1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 " Expand fontinfo mapping"
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
199 def convert_fonts(document, fm, osfoption = "osf"):
200 " Handle font definition (LaTeX preamble -> native) "
202 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
203 rscaleopt = re.compile(r'^scaled?=(.*)')
205 # Check whether we go beyond font option feature introduction
206 haveFontOpts = document.end_format > 580
209 while i < len(document.preamble):
210 i = find_re(document.preamble, rpkg, i+1)
213 mo = rpkg.search(document.preamble[i])
214 if mo == None or mo.group(2) == None:
217 options = mo.group(2).replace(' ', '').split(",")
222 while o < len(options):
223 if options[o] == osfoption:
227 mo = rscaleopt.search(options[o])
235 if not pkg in fm.pkginmap:
240 # Try with name-option combination first
241 # (only one default option supported currently)
243 while o < len(options):
245 fn = fm.getfontname(pkg, [opt])
252 fn = fm.getfontname(pkg, [])
254 fn = fm.getfontname(pkg, options)
257 del document.preamble[i]
258 fontinfo = fm.font2pkgmap[fn]
259 if fontinfo.scaletype == None:
262 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
263 fontinfo.scaleval = oscale
264 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
265 if fontinfo.osfopt == None:
266 options.extend(osfoption)
268 osf = find_token(document.header, "\\font_osf false")
269 osftag = "\\font_osf"
270 if osf == -1 and fontinfo.fonttype != "math":
271 # Try with newer format
272 osftag = "\\font_" + fontinfo.fonttype + "_osf"
273 osf = find_token(document.header, osftag + " false")
275 document.header[osf] = osftag + " true"
276 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
277 del document.preamble[i-1]
279 if fontscale != None:
280 j = find_token(document.header, fontscale, 0)
282 val = get_value(document.header, fontscale, j)
286 scale = "%03d" % int(float(oscale) * 100)
287 document.header[j] = fontscale + " " + scale + " " + vals[1]
288 ft = "\\font_" + fontinfo.fonttype
289 j = find_token(document.header, ft, 0)
291 val = get_value(document.header, ft, j)
292 words = val.split() # ! splits also values like '"DejaVu Sans"'
293 words[0] = '"' + fn + '"'
294 document.header[j] = ft + ' ' + ' '.join(words)
295 if haveFontOpts and fontinfo.fonttype != "math":
296 fotag = "\\font_" + fontinfo.fonttype + "_opts"
297 fo = find_token(document.header, fotag)
299 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
301 # Sensible place to insert tag
302 fo = find_token(document.header, "\\font_sf_scale")
304 document.warning("Malformed LyX document! Missing \\font_sf_scale")
306 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
310 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
311 " Revert native font definition to LaTeX "
312 # fonlist := list of fonts created from the same package
313 # Empty package means that the font-name is the same as the package-name
314 # fontmap (key = package, val += found options) will be filled
315 # and used later in add_preamble_fonts() to be added to user-preamble
317 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
318 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
320 while i < len(document.header):
321 i = find_re(document.header, rfontscale, i+1)
324 mo = rfontscale.search(document.header[i])
327 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
328 val = get_value(document.header, ft, i)
329 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
330 font = words[0].strip('"') # TeX font name has no whitespace
331 if not font in fm.font2pkgmap:
333 fontinfo = fm.font2pkgmap[font]
334 val = fontinfo.package
335 if not val in fontmap:
338 if OnlyWithXOpts or WithXOpts:
339 if ft == "\\font_math":
341 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
342 if ft == "\\font_sans":
343 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
344 elif ft == "\\font_typewriter":
345 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
346 x = find_re(document.header, regexp, 0)
347 if x == -1 and OnlyWithXOpts:
351 # We need to use this regex since split() does not handle quote protection
352 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
353 opts = xopts[1].strip('"').split(",")
354 fontmap[val].extend(opts)
355 del document.header[x]
356 words[0] = '"default"'
357 document.header[i] = ft + ' ' + ' '.join(words)
358 if fontinfo.scaleopt != None:
359 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
360 mo = rscales.search(xval)
365 # set correct scale option
366 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
367 if fontinfo.osfopt != None:
369 if fontinfo.osfdef == "true":
371 osf = find_token(document.header, "\\font_osf " + oldval)
372 if osf == -1 and ft != "\\font_math":
373 # Try with newer format
374 osftag = "\\font_roman_osf " + oldval
375 if ft == "\\font_sans":
376 osftag = "\\font_sans_osf " + oldval
377 elif ft == "\\font_typewriter":
378 osftag = "\\font_typewriter_osf " + oldval
379 osf = find_token(document.header, osftag)
381 fontmap[val].extend([fontinfo.osfopt])
382 if len(fontinfo.options) > 0:
383 fontmap[val].extend(fontinfo.options)
386 ###############################################################################
388 ### Conversion and reversion routines
390 ###############################################################################
392 def convert_inputencoding_namechange(document):
393 " Rename inputencoding settings. "
394 i = find_token(document.header, "\\inputencoding", 0)
397 s = document.header[i].replace("auto", "auto-legacy")
398 document.header[i] = s.replace("default", "auto-legacy-plain")
400 def revert_inputencoding_namechange(document):
401 " Rename inputencoding settings. "
402 i = find_token(document.header, "\\inputencoding", 0)
405 s = document.header[i].replace("auto-legacy-plain", "default")
406 document.header[i] = s.replace("auto-legacy", "auto")
408 def convert_notoFonts(document):
409 " Handle Noto fonts definition to LaTeX "
411 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
412 fm = createFontMapping(['Noto'])
413 convert_fonts(document, fm)
415 def revert_notoFonts(document):
416 " Revert native Noto font definition to LaTeX "
418 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
420 fm = createFontMapping(['Noto'])
421 if revert_fonts(document, fm, fontmap):
422 add_preamble_fonts(document, fontmap)
424 def convert_latexFonts(document):
425 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
427 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
428 fm = createFontMapping(['DejaVu', 'IBM'])
429 convert_fonts(document, fm)
431 def revert_latexFonts(document):
432 " Revert native DejaVu font definition to LaTeX "
434 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
436 fm = createFontMapping(['DejaVu', 'IBM'])
437 if revert_fonts(document, fm, fontmap):
438 add_preamble_fonts(document, fontmap)
440 def convert_AdobeFonts(document):
441 " Handle Adobe Source fonts definition to LaTeX "
443 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
444 fm = createFontMapping(['Adobe'])
445 convert_fonts(document, fm)
447 def revert_AdobeFonts(document):
448 " Revert Adobe Source font definition to LaTeX "
450 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
452 fm = createFontMapping(['Adobe'])
453 if revert_fonts(document, fm, fontmap):
454 add_preamble_fonts(document, fontmap)
456 def removeFrontMatterStyles(document):
457 " Remove styles Begin/EndFrontmatter"
459 layouts = ['BeginFrontmatter', 'EndFrontmatter']
460 tokenend = len('\\begin_layout ')
463 i = find_token_exact(document.body, '\\begin_layout ', i+1)
466 layout = document.body[i][tokenend:].strip()
467 if layout not in layouts:
469 j = find_end_of_layout(document.body, i)
471 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
473 while document.body[j+1].strip() == '':
475 document.body[i:j+1] = []
477 def addFrontMatterStyles(document):
478 " Use styles Begin/EndFrontmatter for elsarticle"
480 if document.textclass != "elsarticle":
483 def insertFrontmatter(prefix, line):
485 while above > 0 and document.body[above-1].strip() == '':
488 while document.body[below].strip() == '':
490 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
491 '\\begin_inset Note Note',
493 '\\begin_layout Plain Layout',
496 '\\end_inset', '', '',
499 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
500 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
501 tokenend = len('\\begin_layout ')
505 i = find_token_exact(document.body, '\\begin_layout ', i+1)
508 layout = document.body[i][tokenend:].strip()
509 if layout not in layouts:
511 k = find_end_of_layout(document.body, i)
513 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
520 insertFrontmatter('End', k+1)
521 insertFrontmatter('Begin', first)
524 def convert_lst_literalparam(document):
525 " Add param literal to include inset "
529 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
532 j = find_end_of_inset(document.body, i)
534 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
536 while i < j and document.body[i].strip() != '':
538 document.body.insert(i, 'literal "true"')
541 def revert_lst_literalparam(document):
542 " Remove param literal from include inset "
546 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
549 j = find_end_of_inset(document.body, i)
551 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
553 del_token(document.body, 'literal', i, j)
556 def revert_paratype(document):
557 " Revert ParaType font definitions to LaTeX "
559 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
561 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
562 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
563 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
564 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
567 sfval = find_token(document.header, "\\font_sf_scale", 0)
569 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
571 sfscale = document.header[sfval].split()
574 document.header[sfval] = " ".join(sfscale)
577 sf_scale = float(val)
579 document.warning("Invalid font_sf_scale value: " + val)
582 if sf_scale != "100.0":
583 sfoption = "scaled=" + str(sf_scale / 100.0)
584 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
585 ttval = get_value(document.header, "\\font_tt_scale", 0)
590 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
591 if i1 != -1 and i2 != -1 and i3!= -1:
592 add_to_preamble(document, ["\\usepackage{paratype}"])
595 add_to_preamble(document, ["\\usepackage{PTSerif}"])
596 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
599 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
601 add_to_preamble(document, ["\\usepackage{PTSans}"])
602 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
605 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
607 add_to_preamble(document, ["\\usepackage{PTMono}"])
608 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
611 def revert_xcharter(document):
612 " Revert XCharter font definitions to LaTeX "
614 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
618 # replace unsupported font setting
619 document.header[i] = document.header[i].replace("xcharter", "default")
620 # no need for preamble code with system fonts
621 if get_bool_value(document.header, "\\use_non_tex_fonts"):
624 # transfer old style figures setting to package options
625 j = find_token(document.header, "\\font_osf true")
628 document.header[j] = "\\font_osf false"
632 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
635 def revert_lscape(document):
636 " Reverts the landscape environment (Landscape module) to TeX-code "
638 if not "landscape" in document.get_module_list():
643 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
646 j = find_end_of_inset(document.body, i)
648 document.warning("Malformed LyX document: Can't find end of Landscape inset")
651 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
652 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
653 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
654 add_to_preamble(document, ["\\usepackage{afterpage}"])
656 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
657 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
659 add_to_preamble(document, ["\\usepackage{pdflscape}"])
660 document.del_module("landscape")
663 def convert_fontenc(document):
664 " Convert default fontenc setting "
666 i = find_token(document.header, "\\fontencoding global", 0)
670 document.header[i] = document.header[i].replace("global", "auto")
673 def revert_fontenc(document):
674 " Revert default fontenc setting "
676 i = find_token(document.header, "\\fontencoding auto", 0)
680 document.header[i] = document.header[i].replace("auto", "global")
683 def revert_nospellcheck(document):
684 " Remove nospellcheck font info param "
688 i = find_token(document.body, '\\nospellcheck', i)
694 def revert_floatpclass(document):
695 " Remove float placement params 'document' and 'class' "
697 del_token(document.header, "\\float_placement class")
701 i = find_token(document.body, '\\begin_inset Float', i+1)
704 j = find_end_of_inset(document.body, i)
705 k = find_token(document.body, 'placement class', i, i + 2)
707 k = find_token(document.body, 'placement document', i, i + 2)
714 def revert_floatalignment(document):
715 " Remove float alignment params "
717 galignment = get_value(document.header, "\\float_alignment", delete=True)
721 i = find_token(document.body, '\\begin_inset Float', i+1)
724 j = find_end_of_inset(document.body, i)
726 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
728 k = find_token(document.body, 'alignment', i, i+4)
732 alignment = get_value(document.body, "alignment", k)
733 if alignment == "document":
734 alignment = galignment
736 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
738 document.warning("Can't find float layout!")
741 if alignment == "left":
742 alcmd = put_cmd_in_ert("\\raggedright{}")
743 elif alignment == "center":
744 alcmd = put_cmd_in_ert("\\centering{}")
745 elif alignment == "right":
746 alcmd = put_cmd_in_ert("\\raggedleft{}")
748 document.body[l+1:l+1] = alcmd
751 def revert_tuftecite(document):
752 " Revert \cite commands in tufte classes "
754 tufte = ["tufte-book", "tufte-handout"]
755 if document.textclass not in tufte:
760 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
763 j = find_end_of_inset(document.body, i)
765 document.warning("Can't find end of citation inset at line %d!!" %(i))
767 k = find_token(document.body, "LatexCommand", i, j)
769 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
772 cmd = get_value(document.body, "LatexCommand", k)
776 pre = get_quoted_value(document.body, "before", i, j)
777 post = get_quoted_value(document.body, "after", i, j)
778 key = get_quoted_value(document.body, "key", i, j)
780 document.warning("Citation inset at line %d does not have a key!" %(i))
782 # Replace command with ERT
785 res += "[" + pre + "]"
787 res += "[" + post + "]"
790 res += "{" + key + "}"
791 document.body[i:j+1] = put_cmd_in_ert([res])
795 def revert_stretchcolumn(document):
796 " We remove the column varwidth flags or everything else will become a mess. "
799 i = find_token(document.body, "\\begin_inset Tabular", i+1)
802 j = find_end_of_inset(document.body, i+1)
804 document.warning("Malformed LyX document: Could not find end of tabular.")
806 for k in range(i, j):
807 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
808 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
809 document.body[k] = document.body[k].replace(' varwidth="true"', '')
812 def revert_vcolumns(document):
813 " Revert standard columns with line breaks etc. "
819 i = find_token(document.body, "\\begin_inset Tabular", i+1)
822 j = find_end_of_inset(document.body, i)
824 document.warning("Malformed LyX document: Could not find end of tabular.")
827 # Collect necessary column information
829 nrows = int(document.body[i+1].split('"')[3])
830 ncols = int(document.body[i+1].split('"')[5])
832 for k in range(ncols):
833 m = find_token(document.body, "<column", m)
834 width = get_option_value(document.body[m], 'width')
835 varwidth = get_option_value(document.body[m], 'varwidth')
836 alignment = get_option_value(document.body[m], 'alignment')
837 special = get_option_value(document.body[m], 'special')
838 col_info.append([width, varwidth, alignment, special, m])
843 for row in range(nrows):
844 for col in range(ncols):
845 m = find_token(document.body, "<cell", m)
846 multicolumn = get_option_value(document.body[m], 'multicolumn')
847 multirow = get_option_value(document.body[m], 'multirow')
848 width = get_option_value(document.body[m], 'width')
849 rotate = get_option_value(document.body[m], 'rotate')
850 # Check for: linebreaks, multipars, non-standard environments
852 endcell = find_token(document.body, "</cell>", begcell)
854 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
856 elif count_pars_in_inset(document.body, begcell + 2) > 1:
858 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
860 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
861 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
863 alignment = col_info[col][2]
864 col_line = col_info[col][4]
866 if alignment == "center":
867 vval = ">{\\centering}"
868 elif alignment == "left":
869 vval = ">{\\raggedright}"
870 elif alignment == "right":
871 vval = ">{\\raggedleft}"
874 vval += "V{\\linewidth}"
876 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
877 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
878 # with newlines, and we do not want that)
880 endcell = find_token(document.body, "</cell>", begcell)
882 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
884 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
888 nle = find_end_of_inset(document.body, nl)
889 del(document.body[nle:nle+1])
891 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
893 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
899 if needarray == True:
900 add_to_preamble(document, ["\\usepackage{array}"])
901 if needvarwidth == True:
902 add_to_preamble(document, ["\\usepackage{varwidth}"])
905 def revert_bibencoding(document):
906 " Revert bibliography encoding "
910 i = find_token(document.header, "\\cite_engine", 0)
912 document.warning("Malformed document! Missing \\cite_engine")
914 engine = get_value(document.header, "\\cite_engine", i)
918 if engine in ["biblatex", "biblatex-natbib"]:
921 # Map lyx to latex encoding names
925 "armscii8" : "armscii8",
926 "iso8859-1" : "latin1",
927 "iso8859-2" : "latin2",
928 "iso8859-3" : "latin3",
929 "iso8859-4" : "latin4",
930 "iso8859-5" : "iso88595",
931 "iso8859-6" : "8859-6",
932 "iso8859-7" : "iso-8859-7",
933 "iso8859-8" : "8859-8",
934 "iso8859-9" : "latin5",
935 "iso8859-13" : "latin7",
936 "iso8859-15" : "latin9",
937 "iso8859-16" : "latin10",
938 "applemac" : "applemac",
940 "cp437de" : "cp437de",
957 "utf8-platex" : "utf8",
964 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
967 j = find_end_of_inset(document.body, i)
969 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
971 encoding = get_quoted_value(document.body, "encoding", i, j)
974 # remove encoding line
975 k = find_token(document.body, "encoding", i, j)
978 if encoding == "default":
980 # Re-find inset end line
981 j = find_end_of_inset(document.body, i)
984 h = find_token(document.header, "\\biblio_options", 0)
986 biblio_options = get_value(document.header, "\\biblio_options", h)
987 if not "bibencoding" in biblio_options:
988 document.header[h] += ",bibencoding=%s" % encodings[encoding]
990 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
992 # this should not happen
993 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
995 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
997 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
998 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1004 def convert_vcsinfo(document):
1005 " Separate vcs Info inset from buffer Info inset. "
1008 "vcs-revision" : "revision",
1009 "vcs-tree-revision" : "tree-revision",
1010 "vcs-author" : "author",
1011 "vcs-time" : "time",
1016 i = find_token(document.body, "\\begin_inset Info", i+1)
1019 j = find_end_of_inset(document.body, i+1)
1021 document.warning("Malformed LyX document: Could not find end of Info inset.")
1023 tp = find_token(document.body, 'type', i, j)
1024 tpv = get_quoted_value(document.body, "type", tp)
1027 arg = find_token(document.body, 'arg', i, j)
1028 argv = get_quoted_value(document.body, "arg", arg)
1029 if argv not in list(types.keys()):
1031 document.body[tp] = "type \"vcs\""
1032 document.body[arg] = "arg \"" + types[argv] + "\""
1035 def revert_vcsinfo(document):
1036 " Merge vcs Info inset to buffer Info inset. "
1038 args = ["revision", "tree-revision", "author", "time", "date" ]
1041 i = find_token(document.body, "\\begin_inset Info", i+1)
1044 j = find_end_of_inset(document.body, i+1)
1046 document.warning("Malformed LyX document: Could not find end of Info inset.")
1048 tp = find_token(document.body, 'type', i, j)
1049 tpv = get_quoted_value(document.body, "type", tp)
1052 arg = find_token(document.body, 'arg', i, j)
1053 argv = get_quoted_value(document.body, "arg", arg)
1054 if argv not in args:
1055 document.warning("Malformed Info inset. Invalid vcs arg.")
1057 document.body[tp] = "type \"buffer\""
1058 document.body[arg] = "arg \"vcs-" + argv + "\""
1060 def revert_vcsinfo_rev_abbrev(document):
1061 " Convert abbreviated revisions to regular revisions. "
1065 i = find_token(document.body, "\\begin_inset Info", i+1)
1068 j = find_end_of_inset(document.body, i+1)
1070 document.warning("Malformed LyX document: Could not find end of Info inset.")
1072 tp = find_token(document.body, 'type', i, j)
1073 tpv = get_quoted_value(document.body, "type", tp)
1076 arg = find_token(document.body, 'arg', i, j)
1077 argv = get_quoted_value(document.body, "arg", arg)
1078 if( argv == "revision-abbrev" ):
1079 document.body[arg] = "arg \"revision\""
1081 def revert_dateinfo(document):
1082 " Revert date info insets to static text. "
1084 # FIXME This currently only considers the main language and uses the system locale
1085 # Ideally, it should honor context languages and switch the locale accordingly.
1087 # The date formats for each language using strftime syntax:
1088 # long, short, loclong, locmedium, locshort
1090 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1091 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1092 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1093 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1094 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1095 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1096 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1097 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1098 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1099 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1100 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1101 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1102 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1103 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1104 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1105 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1106 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1107 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1108 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1109 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1110 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1111 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1112 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1113 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1114 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1115 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1116 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1117 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1118 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1119 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1120 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1121 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1122 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1123 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1124 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1125 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1126 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1127 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1128 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1129 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1130 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1131 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1132 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1133 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1134 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1135 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1136 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1137 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1138 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1139 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1140 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1141 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1142 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1143 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1144 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1145 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1146 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1147 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1148 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1149 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1150 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1151 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1152 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1153 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1154 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1155 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1156 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1157 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1158 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1159 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1160 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1161 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1162 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1163 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1164 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1165 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1166 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1167 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1168 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1169 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1170 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1171 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1172 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1173 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1174 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1175 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1176 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1177 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1178 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1179 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1180 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1181 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1182 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1183 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1184 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1185 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1186 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1187 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1188 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1189 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1190 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1191 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1194 types = ["date", "fixdate", "moddate" ]
1195 lang = get_value(document.header, "\\language")
1197 document.warning("Malformed LyX document! No \\language header found!")
1202 i = find_token(document.body, "\\begin_inset Info", i+1)
1205 j = find_end_of_inset(document.body, i+1)
1207 document.warning("Malformed LyX document: Could not find end of Info inset.")
1209 tp = find_token(document.body, 'type', i, j)
1210 tpv = get_quoted_value(document.body, "type", tp)
1211 if tpv not in types:
1213 arg = find_token(document.body, 'arg', i, j)
1214 argv = get_quoted_value(document.body, "arg", arg)
1217 if tpv == "fixdate":
1218 datecomps = argv.split('@')
1219 if len(datecomps) > 1:
1221 isodate = datecomps[1]
1222 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1224 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1225 # FIXME if we had the path to the original document (not the one in the tmp dir),
1226 # we could use the mtime.
1227 # elif tpv == "moddate":
1228 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1231 result = dte.isodate()
1232 elif argv == "long":
1233 result = dte.strftime(dateformats[lang][0])
1234 elif argv == "short":
1235 result = dte.strftime(dateformats[lang][1])
1236 elif argv == "loclong":
1237 result = dte.strftime(dateformats[lang][2])
1238 elif argv == "locmedium":
1239 result = dte.strftime(dateformats[lang][3])
1240 elif argv == "locshort":
1241 result = dte.strftime(dateformats[lang][4])
1243 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1244 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1245 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1246 fmt = re.sub('[^\'%]d', '%d', fmt)
1247 fmt = fmt.replace("'", "")
1248 result = dte.strftime(fmt)
1249 if sys.version_info < (3,0):
1250 # In Python 2, datetime module works with binary strings,
1251 # our dateformat strings are utf8-encoded:
1252 result = result.decode('utf-8')
1253 document.body[i : j+1] = [result]
1256 def revert_timeinfo(document):
1257 " Revert time info insets to static text. "
1259 # FIXME This currently only considers the main language and uses the system locale
1260 # Ideally, it should honor context languages and switch the locale accordingly.
1261 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1264 # The time formats for each language using strftime syntax:
1267 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1268 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1269 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1270 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1271 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1272 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1273 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1274 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1275 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1276 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1277 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1278 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1279 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1280 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1281 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1282 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1283 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1284 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1285 "british" : ["%H:%M:%S %Z", "%H:%M"],
1286 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1287 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1288 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1289 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1290 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1291 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1292 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1293 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1294 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1295 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1296 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1297 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1298 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1299 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1300 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1301 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1302 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1303 "french" : ["%H:%M:%S %Z", "%H:%M"],
1304 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1305 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1306 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1307 "german" : ["%H:%M:%S %Z", "%H:%M"],
1308 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1309 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1310 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1311 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1312 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1313 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1314 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1315 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1316 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1317 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1318 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1319 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1320 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1321 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1322 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1323 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1324 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1325 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1326 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1327 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1328 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1329 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1330 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1331 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1332 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1333 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1334 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1335 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1336 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1337 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1338 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1339 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1340 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1341 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1342 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1343 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1344 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1345 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1346 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1347 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1348 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1349 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1350 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1351 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1352 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1353 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1354 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1355 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1356 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1357 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1358 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1359 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1360 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1361 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1362 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1363 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1364 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1365 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1366 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1367 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1368 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1371 types = ["time", "fixtime", "modtime" ]
1373 i = find_token(document.header, "\\language", 0)
1375 # this should not happen
1376 document.warning("Malformed LyX document! No \\language header found!")
1378 lang = get_value(document.header, "\\language", i)
1382 i = find_token(document.body, "\\begin_inset Info", i+1)
1385 j = find_end_of_inset(document.body, i+1)
1387 document.warning("Malformed LyX document: Could not find end of Info inset.")
1389 tp = find_token(document.body, 'type', i, j)
1390 tpv = get_quoted_value(document.body, "type", tp)
1391 if tpv not in types:
1393 arg = find_token(document.body, 'arg', i, j)
1394 argv = get_quoted_value(document.body, "arg", arg)
1396 dtme = datetime.now()
1398 if tpv == "fixtime":
1399 timecomps = argv.split('@')
1400 if len(timecomps) > 1:
1402 isotime = timecomps[1]
1403 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1405 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1407 m = re.search('(\d\d):(\d\d)', isotime)
1409 tme = time(int(m.group(1)), int(m.group(2)))
1410 # FIXME if we had the path to the original document (not the one in the tmp dir),
1411 # we could use the mtime.
1412 # elif tpv == "moddate":
1413 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1416 result = tme.isoformat()
1417 elif argv == "long":
1418 result = tme.strftime(timeformats[lang][0])
1419 elif argv == "short":
1420 result = tme.strftime(timeformats[lang][1])
1422 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1423 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1424 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1425 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1426 fmt = fmt.replace("'", "")
1427 result = dte.strftime(fmt)
1428 document.body[i : j+1] = result
1431 def revert_namenoextinfo(document):
1432 " Merge buffer Info inset type name-noext to name. "
1436 i = find_token(document.body, "\\begin_inset Info", i+1)
1439 j = find_end_of_inset(document.body, i+1)
1441 document.warning("Malformed LyX document: Could not find end of Info inset.")
1443 tp = find_token(document.body, 'type', i, j)
1444 tpv = get_quoted_value(document.body, "type", tp)
1447 arg = find_token(document.body, 'arg', i, j)
1448 argv = get_quoted_value(document.body, "arg", arg)
1449 if argv != "name-noext":
1451 document.body[arg] = "arg \"name\""
1454 def revert_l7ninfo(document):
1455 " Revert l7n Info inset to text. "
1459 i = find_token(document.body, "\\begin_inset Info", i+1)
1462 j = find_end_of_inset(document.body, i+1)
1464 document.warning("Malformed LyX document: Could not find end of Info inset.")
1466 tp = find_token(document.body, 'type', i, j)
1467 tpv = get_quoted_value(document.body, "type", tp)
1470 arg = find_token(document.body, 'arg', i, j)
1471 argv = get_quoted_value(document.body, "arg", arg)
1472 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1473 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1474 document.body[i : j+1] = argv
1477 def revert_listpargs(document):
1478 " Reverts listpreamble arguments to TeX-code "
1481 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1484 j = find_end_of_inset(document.body, i)
1485 # Find containing paragraph layout
1486 parent = get_containing_layout(document.body, i)
1488 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1491 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1492 endPlain = find_end_of_layout(document.body, beginPlain)
1493 content = document.body[beginPlain + 1 : endPlain]
1494 del document.body[i:j+1]
1495 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1496 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1497 document.body[parbeg : parbeg] = subst
1500 def revert_lformatinfo(document):
1501 " Revert layout format Info inset to text. "
1505 i = find_token(document.body, "\\begin_inset Info", i+1)
1508 j = find_end_of_inset(document.body, i+1)
1510 document.warning("Malformed LyX document: Could not find end of Info inset.")
1512 tp = find_token(document.body, 'type', i, j)
1513 tpv = get_quoted_value(document.body, "type", tp)
1514 if tpv != "lyxinfo":
1516 arg = find_token(document.body, 'arg', i, j)
1517 argv = get_quoted_value(document.body, "arg", arg)
1518 if argv != "layoutformat":
1521 document.body[i : j+1] = "69"
1524 def convert_hebrew_parentheses(document):
1525 """ Swap opening/closing parentheses in Hebrew text.
1527 Up to LyX 2.4, "(" was used as closing parenthesis and
1528 ")" as opening parenthesis for Hebrew in the LyX source.
1530 # print("convert hebrew parentheses")
1531 current_languages = [document.language]
1532 for i, line in enumerate(document.body):
1533 if line.startswith('\\lang '):
1534 current_languages[-1] = line.lstrip('\\lang ')
1535 elif line.startswith('\\begin_layout'):
1536 current_languages.append(current_languages[-1])
1537 # print (line, current_languages[-1])
1538 elif line.startswith('\\end_layout'):
1539 current_languages.pop()
1540 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1541 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1544 def revert_hebrew_parentheses(document):
1545 " Store parentheses in Hebrew text reversed"
1546 # This only exists to keep the convert/revert naming convention
1547 convert_hebrew_parentheses(document)
1550 def revert_malayalam(document):
1551 " Set the document language to English but assure Malayalam output "
1553 revert_language(document, "malayalam", "", "malayalam")
1556 def revert_soul(document):
1557 " Revert soul module flex insets to ERT "
1559 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1562 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1564 add_to_preamble(document, ["\\usepackage{soul}"])
1566 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1568 add_to_preamble(document, ["\\usepackage{color}"])
1570 revert_flex_inset(document.body, "Spaceletters", "\\so")
1571 revert_flex_inset(document.body, "Strikethrough", "\\st")
1572 revert_flex_inset(document.body, "Underline", "\\ul")
1573 revert_flex_inset(document.body, "Highlight", "\\hl")
1574 revert_flex_inset(document.body, "Capitalize", "\\caps")
1577 def revert_tablestyle(document):
1578 " Remove tablestyle params "
1581 i = find_token(document.header, "\\tablestyle")
1583 del document.header[i]
1586 def revert_bibfileencodings(document):
1587 " Revert individual Biblatex bibliography encodings "
1591 i = find_token(document.header, "\\cite_engine", 0)
1593 document.warning("Malformed document! Missing \\cite_engine")
1595 engine = get_value(document.header, "\\cite_engine", i)
1599 if engine in ["biblatex", "biblatex-natbib"]:
1602 # Map lyx to latex encoding names
1606 "armscii8" : "armscii8",
1607 "iso8859-1" : "latin1",
1608 "iso8859-2" : "latin2",
1609 "iso8859-3" : "latin3",
1610 "iso8859-4" : "latin4",
1611 "iso8859-5" : "iso88595",
1612 "iso8859-6" : "8859-6",
1613 "iso8859-7" : "iso-8859-7",
1614 "iso8859-8" : "8859-8",
1615 "iso8859-9" : "latin5",
1616 "iso8859-13" : "latin7",
1617 "iso8859-15" : "latin9",
1618 "iso8859-16" : "latin10",
1619 "applemac" : "applemac",
1621 "cp437de" : "cp437de",
1629 "cp1250" : "cp1250",
1630 "cp1251" : "cp1251",
1631 "cp1252" : "cp1252",
1632 "cp1255" : "cp1255",
1633 "cp1256" : "cp1256",
1634 "cp1257" : "cp1257",
1635 "koi8-r" : "koi8-r",
1636 "koi8-u" : "koi8-u",
1638 "utf8-platex" : "utf8",
1645 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1648 j = find_end_of_inset(document.body, i)
1650 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1652 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1656 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1657 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1658 if len(bibfiles) == 0:
1659 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1660 # remove encoding line
1661 k = find_token(document.body, "file_encodings", i, j)
1663 del document.body[k]
1664 # Re-find inset end line
1665 j = find_end_of_inset(document.body, i)
1667 enclist = encodings.split("\t")
1670 ppp = pp.split(" ", 1)
1671 encmap[ppp[0]] = ppp[1]
1672 for bib in bibfiles:
1673 pr = "\\addbibresource"
1674 if bib in encmap.keys():
1675 pr += "[bibencoding=" + encmap[bib] + "]"
1676 pr += "{" + bib + "}"
1677 add_to_preamble(document, [pr])
1678 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1679 pcmd = "printbibliography"
1681 pcmd += "[" + opts + "]"
1682 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1683 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1684 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1685 "status open", "", "\\begin_layout Plain Layout" ]
1686 repl += document.body[i:j+1]
1687 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1688 document.body[i:j+1] = repl
1694 def revert_cmidruletrimming(document):
1695 " Remove \\cmidrule trimming "
1697 # FIXME: Revert to TeX code?
1700 # first, let's find out if we need to do anything
1701 i = find_token(document.body, '<cell ', i+1)
1704 j = document.body[i].find('trim="')
1707 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1708 # remove trim option
1709 document.body[i] = rgx.sub('', document.body[i])
1713 r'### Inserted by lyx2lyx (ruby inset) ###',
1714 r'InsetLayout Flex:Ruby',
1715 r' LyxType charstyle',
1716 r' LatexType command',
1720 r' HTMLInnerTag rb',
1721 r' HTMLInnerAttr ""',
1723 r' LabelString "Ruby"',
1724 r' Decoration Conglomerate',
1726 r' \ifdefined\kanjiskip',
1727 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1728 r' \else \ifdefined\luatexversion',
1729 r' \usepackage{luatexja-ruby}',
1730 r' \else \ifdefined\XeTeXversion',
1731 r' \usepackage{ruby}%',
1733 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1735 r' Argument post:1',
1736 r' LabelString "ruby text"',
1737 r' MenuString "Ruby Text|R"',
1738 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1739 r' Decoration Conglomerate',
1751 def convert_ruby_module(document):
1752 " Use ruby module instead of local module definition "
1753 if document.del_local_layout(ruby_inset_def):
1754 document.add_module("ruby")
1756 def revert_ruby_module(document):
1757 " Replace ruby module with local module definition "
1758 if document.del_module("ruby"):
1759 document.append_local_layout(ruby_inset_def)
1762 def convert_utf8_japanese(document):
1763 " Use generic utf8 with Japanese documents."
1764 lang = get_value(document.header, "\\language")
1765 if not lang.startswith("japanese"):
1767 inputenc = get_value(document.header, "\\inputencoding")
1768 if ((lang == "japanese" and inputenc == "utf8-platex")
1769 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1770 document.set_parameter("inputencoding", "utf8")
1772 def revert_utf8_japanese(document):
1773 " Use Japanese utf8 variants with Japanese documents."
1774 inputenc = get_value(document.header, "\\inputencoding")
1775 if inputenc != "utf8":
1777 lang = get_value(document.header, "\\language")
1778 if lang == "japanese":
1779 document.set_parameter("inputencoding", "utf8-platex")
1780 if lang == "japanese-cjk":
1781 document.set_parameter("inputencoding", "utf8-cjk")
1784 def revert_lineno(document):
1785 " Replace lineno setting with user-preamble code."
1787 options = get_quoted_value(document.header, "\\lineno_options",
1789 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1792 options = "[" + options + "]"
1793 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1796 def convert_lineno(document):
1797 " Replace user-preamble code with native lineno support."
1800 i = find_token(document.preamble, "\\linenumbers", 1)
1802 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1805 options = usepkg.group(1).strip("[]")
1806 del(document.preamble[i-1:i+1])
1807 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1809 k = find_token(document.header, "\\index ")
1811 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1813 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1814 "\\lineno_options %s" % options]
1817 def revert_new_languages(document):
1818 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1819 and Russian (Petrine orthography)."""
1821 # lyxname: (babelname, polyglossianame)
1822 new_languages = {"azerbaijani": ("azerbaijani", ""),
1823 "bengali": ("", "bengali"),
1824 "churchslavonic": ("", "churchslavonic"),
1825 "oldrussian": ("", "russian"),
1826 "korean": ("", "korean"),
1828 used_languages = set()
1829 if document.language in new_languages:
1830 used_languages.add(document.language)
1833 i = find_token(document.body, "\\lang", i+1)
1836 if document.body[i][6:].strip() in new_languages:
1837 used_languages.add(document.language)
1839 # Korean is already supported via CJK, so leave as-is for Babel
1840 if ("korean" in used_languages
1841 and get_bool_value(document.header, "\\use_non_tex_fonts")
1842 and get_value(document.header, "\\language_package") in ("default", "auto")):
1843 revert_language(document, "korean", "", "korean")
1844 used_languages.discard("korean")
1846 for lang in used_languages:
1847 revert(lang, *new_languages[lang])
1851 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1852 r'InsetLayout Flex:Glosse',
1854 r' LabelString "Gloss (old version)"',
1855 r' MenuString "Gloss (old version)"',
1856 r' LatexType environment',
1857 r' LatexName linggloss',
1858 r' Decoration minimalistic',
1863 r' CustomPars false',
1864 r' ForcePlain true',
1865 r' ParbreakIsNewline true',
1866 r' FreeSpacing true',
1867 r' Requires covington',
1870 r' \@ifundefined{linggloss}{%',
1871 r' \newenvironment{linggloss}[2][]{',
1872 r' \def\glosstr{\glt #1}%',
1874 r' {\glosstr\glend}}{}',
1877 r' ResetsFont true',
1879 r' Decoration conglomerate',
1880 r' LabelString "Translation"',
1881 r' MenuString "Glosse Translation|s"',
1882 r' Tooltip "Add a translation for the glosse"',
1887 glosss_inset_def = [
1888 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1889 r'InsetLayout Flex:Tri-Glosse',
1891 r' LabelString "Tri-Gloss (old version)"',
1892 r' MenuString "Tri-Gloss (old version)"',
1893 r' LatexType environment',
1894 r' LatexName lingglosss',
1895 r' Decoration minimalistic',
1900 r' CustomPars false',
1901 r' ForcePlain true',
1902 r' ParbreakIsNewline true',
1903 r' FreeSpacing true',
1905 r' Requires covington',
1908 r' \@ifundefined{lingglosss}{%',
1909 r' \newenvironment{lingglosss}[2][]{',
1910 r' \def\glosstr{\glt #1}%',
1912 r' {\glosstr\glend}}{}',
1914 r' ResetsFont true',
1916 r' Decoration conglomerate',
1917 r' LabelString "Translation"',
1918 r' MenuString "Glosse Translation|s"',
1919 r' Tooltip "Add a translation for the glosse"',
1924 def convert_linggloss(document):
1925 " Move old ling glosses to local layout "
1926 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1927 document.append_local_layout(gloss_inset_def)
1928 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1929 document.append_local_layout(glosss_inset_def)
1931 def revert_linggloss(document):
1932 " Revert to old ling gloss definitions "
1933 if not "linguistics" in document.get_module_list():
1935 document.del_local_layout(gloss_inset_def)
1936 document.del_local_layout(glosss_inset_def)
1939 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1940 for glosse in glosses:
1943 i = find_token(document.body, glosse, i+1)
1946 j = find_end_of_inset(document.body, i)
1948 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1951 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1952 endarg = find_end_of_inset(document.body, arg)
1955 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1956 if argbeginPlain == -1:
1957 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1959 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1960 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
1962 # remove Arg insets and paragraph, if it only contains this inset
1963 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1964 del document.body[arg - 1 : endarg + 4]
1966 del document.body[arg : endarg + 1]
1968 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
1969 endarg = find_end_of_inset(document.body, arg)
1972 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1973 if argbeginPlain == -1:
1974 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
1976 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1977 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
1979 # remove Arg insets and paragraph, if it only contains this inset
1980 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1981 del document.body[arg - 1 : endarg + 4]
1983 del document.body[arg : endarg + 1]
1985 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
1986 endarg = find_end_of_inset(document.body, arg)
1989 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1990 if argbeginPlain == -1:
1991 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
1993 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1994 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
1996 # remove Arg insets and paragraph, if it only contains this inset
1997 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1998 del document.body[arg - 1 : endarg + 4]
2000 del document.body[arg : endarg + 1]
2002 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2003 endarg = find_end_of_inset(document.body, arg)
2006 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2007 if argbeginPlain == -1:
2008 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2010 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2011 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2013 # remove Arg insets and paragraph, if it only contains this inset
2014 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2015 del document.body[arg - 1 : endarg + 4]
2017 del document.body[arg : endarg + 1]
2020 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2023 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2024 endInset = find_end_of_inset(document.body, i)
2025 endPlain = find_end_of_layout(document.body, beginPlain)
2026 precontent = put_cmd_in_ert(cmd)
2027 if len(optargcontent) > 0:
2028 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2029 precontent += put_cmd_in_ert("{")
2031 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2032 if cmd == "\\trigloss":
2033 postcontent += put_cmd_in_ert("}{") + marg3content
2034 postcontent += put_cmd_in_ert("}")
2036 document.body[endPlain:endInset + 1] = postcontent
2037 document.body[beginPlain + 1:beginPlain] = precontent
2038 del document.body[i : beginPlain + 1]
2040 document.append_local_layout("Requires covington")
2045 def revert_subexarg(document):
2046 " Revert linguistic subexamples with argument to ERT "
2048 if not "linguistics" in document.get_module_list():
2054 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2057 j = find_end_of_layout(document.body, i)
2059 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2062 # check for consecutive layouts
2063 k = find_token(document.body, "\\begin_layout", j)
2064 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2066 j = find_end_of_layout(document.body, k)
2068 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2071 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2075 endarg = find_end_of_inset(document.body, arg)
2077 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2078 if argbeginPlain == -1:
2079 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2081 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2082 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2084 # remove Arg insets and paragraph, if it only contains this inset
2085 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2086 del document.body[arg - 1 : endarg + 4]
2088 del document.body[arg : endarg + 1]
2090 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2092 # re-find end of layout
2093 j = find_end_of_layout(document.body, i)
2095 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2098 # check for consecutive layouts
2099 k = find_token(document.body, "\\begin_layout", j)
2100 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2102 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2103 j = find_end_of_layout(document.body, k)
2105 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2108 endev = put_cmd_in_ert("\\end{subexamples}")
2110 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2111 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2112 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2114 document.append_local_layout("Requires covington")
2118 def revert_drs(document):
2119 " Revert DRS insets (linguistics) to ERT "
2121 if not "linguistics" in document.get_module_list():
2125 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2126 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2127 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2128 "\\begin_inset Flex SDRS"]
2132 i = find_token(document.body, drs, i+1)
2135 j = find_end_of_inset(document.body, i)
2137 document.warning("Malformed LyX document: Can't find end of DRS inset")
2140 # Check for arguments
2141 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2142 endarg = find_end_of_inset(document.body, arg)
2145 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2146 if argbeginPlain == -1:
2147 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2149 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2150 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2152 # remove Arg insets and paragraph, if it only contains this inset
2153 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2154 del document.body[arg - 1 : endarg + 4]
2156 del document.body[arg : endarg + 1]
2159 j = find_end_of_inset(document.body, i)
2161 document.warning("Malformed LyX document: Can't find end of DRS inset")
2164 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2165 endarg = find_end_of_inset(document.body, arg)
2168 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2169 if argbeginPlain == -1:
2170 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2172 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2173 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2175 # remove Arg insets and paragraph, if it only contains this inset
2176 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2177 del document.body[arg - 1 : endarg + 4]
2179 del document.body[arg : endarg + 1]
2182 j = find_end_of_inset(document.body, i)
2184 document.warning("Malformed LyX document: Can't find end of DRS inset")
2187 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2188 endarg = find_end_of_inset(document.body, arg)
2189 postarg1content = []
2191 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2192 if argbeginPlain == -1:
2193 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2195 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2196 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2198 # remove Arg insets and paragraph, if it only contains this inset
2199 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2200 del document.body[arg - 1 : endarg + 4]
2202 del document.body[arg : endarg + 1]
2205 j = find_end_of_inset(document.body, i)
2207 document.warning("Malformed LyX document: Can't find end of DRS inset")
2210 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2211 endarg = find_end_of_inset(document.body, arg)
2212 postarg2content = []
2214 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2215 if argbeginPlain == -1:
2216 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2218 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2219 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2221 # remove Arg insets and paragraph, if it only contains this inset
2222 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2223 del document.body[arg - 1 : endarg + 4]
2225 del document.body[arg : endarg + 1]
2228 j = find_end_of_inset(document.body, i)
2230 document.warning("Malformed LyX document: Can't find end of DRS inset")
2233 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2234 endarg = find_end_of_inset(document.body, arg)
2235 postarg3content = []
2237 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2238 if argbeginPlain == -1:
2239 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2241 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2242 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2244 # remove Arg insets and paragraph, if it only contains this inset
2245 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2246 del document.body[arg - 1 : endarg + 4]
2248 del document.body[arg : endarg + 1]
2251 j = find_end_of_inset(document.body, i)
2253 document.warning("Malformed LyX document: Can't find end of DRS inset")
2256 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2257 endarg = find_end_of_inset(document.body, arg)
2258 postarg4content = []
2260 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2261 if argbeginPlain == -1:
2262 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2264 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2265 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2267 # remove Arg insets and paragraph, if it only contains this inset
2268 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2269 del document.body[arg - 1 : endarg + 4]
2271 del document.body[arg : endarg + 1]
2273 # The respective LaTeX command
2275 if drs == "\\begin_inset Flex DRS*":
2277 elif drs == "\\begin_inset Flex IfThen-DRS":
2279 elif drs == "\\begin_inset Flex Cond-DRS":
2281 elif drs == "\\begin_inset Flex QDRS":
2283 elif drs == "\\begin_inset Flex NegDRS":
2285 elif drs == "\\begin_inset Flex SDRS":
2288 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2289 endInset = find_end_of_inset(document.body, i)
2290 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2291 precontent = put_cmd_in_ert(cmd)
2292 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2293 if drs == "\\begin_inset Flex SDRS":
2294 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2295 precontent += put_cmd_in_ert("{")
2298 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2299 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2300 if cmd == "\\condrs" or cmd == "\\qdrs":
2301 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2303 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2305 postcontent = put_cmd_in_ert("}")
2307 document.body[endPlain:endInset + 1] = postcontent
2308 document.body[beginPlain + 1:beginPlain] = precontent
2309 del document.body[i : beginPlain + 1]
2311 document.append_local_layout("Provides covington 1")
2312 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2318 def revert_babelfont(document):
2319 " Reverts the use of \\babelfont to user preamble "
2321 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2323 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2325 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2327 i = find_token(document.header, '\\language_package', 0)
2329 document.warning("Malformed LyX document: Missing \\language_package.")
2331 if get_value(document.header, "\\language_package", 0) != "babel":
2334 # check font settings
2336 roman = sans = typew = "default"
2338 sf_scale = tt_scale = 100.0
2340 j = find_token(document.header, "\\font_roman", 0)
2342 document.warning("Malformed LyX document: Missing \\font_roman.")
2344 # We need to use this regex since split() does not handle quote protection
2345 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2346 roman = romanfont[2].strip('"')
2347 romanfont[2] = '"default"'
2348 document.header[j] = " ".join(romanfont)
2350 j = find_token(document.header, "\\font_sans", 0)
2352 document.warning("Malformed LyX document: Missing \\font_sans.")
2354 # We need to use this regex since split() does not handle quote protection
2355 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2356 sans = sansfont[2].strip('"')
2357 sansfont[2] = '"default"'
2358 document.header[j] = " ".join(sansfont)
2360 j = find_token(document.header, "\\font_typewriter", 0)
2362 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2364 # We need to use this regex since split() does not handle quote protection
2365 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2366 typew = ttfont[2].strip('"')
2367 ttfont[2] = '"default"'
2368 document.header[j] = " ".join(ttfont)
2370 i = find_token(document.header, "\\font_osf", 0)
2372 document.warning("Malformed LyX document: Missing \\font_osf.")
2374 osf = str2bool(get_value(document.header, "\\font_osf", i))
2376 j = find_token(document.header, "\\font_sf_scale", 0)
2378 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2380 sfscale = document.header[j].split()
2383 document.header[j] = " ".join(sfscale)
2386 sf_scale = float(val)
2388 document.warning("Invalid font_sf_scale value: " + val)
2390 j = find_token(document.header, "\\font_tt_scale", 0)
2392 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2394 ttscale = document.header[j].split()
2397 document.header[j] = " ".join(ttscale)
2400 tt_scale = float(val)
2402 document.warning("Invalid font_tt_scale value: " + val)
2404 # set preamble stuff
2405 pretext = ['%% This document must be processed with xelatex or lualatex!']
2406 pretext.append('\\AtBeginDocument{%')
2407 if roman != "default":
2408 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2409 if sans != "default":
2410 sf = '\\babelfont{sf}['
2411 if sf_scale != 100.0:
2412 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2413 sf += 'Mapping=tex-text]{' + sans + '}'
2415 if typew != "default":
2416 tw = '\\babelfont{tt}'
2417 if tt_scale != 100.0:
2418 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2419 tw += '{' + typew + '}'
2422 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2424 insert_to_preamble(document, pretext)
2427 def revert_minionpro(document):
2428 " Revert native MinionPro font definition (with extra options) to LaTeX "
2430 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2432 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2434 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2437 regexp = re.compile(r'(\\font_roman_opts)')
2438 x = find_re(document.header, regexp, 0)
2442 # We need to use this regex since split() does not handle quote protection
2443 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2444 opts = romanopts[1].strip('"')
2446 i = find_token(document.header, "\\font_roman", 0)
2448 document.warning("Malformed LyX document: Missing \\font_roman.")
2451 # We need to use this regex since split() does not handle quote protection
2452 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2453 roman = romanfont[1].strip('"')
2454 if roman != "minionpro":
2456 romanfont[1] = '"default"'
2457 document.header[i] = " ".join(romanfont)
2459 j = find_token(document.header, "\\font_osf true", 0)
2462 preamble = "\\usepackage["
2464 document.header[j] = "\\font_osf false"
2468 preamble += "]{MinionPro}"
2469 add_to_preamble(document, [preamble])
2470 del document.header[x]
2473 def revert_font_opts(document):
2474 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2476 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2478 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2480 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2481 i = find_token(document.header, '\\language_package', 0)
2483 document.warning("Malformed LyX document: Missing \\language_package.")
2485 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2488 regexp = re.compile(r'(\\font_roman_opts)')
2489 i = find_re(document.header, regexp, 0)
2491 # We need to use this regex since split() does not handle quote protection
2492 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2493 opts = romanopts[1].strip('"')
2494 del document.header[i]
2496 regexp = re.compile(r'(\\font_roman)')
2497 i = find_re(document.header, regexp, 0)
2499 # We need to use this regex since split() does not handle quote protection
2500 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2501 font = romanfont[2].strip('"')
2502 romanfont[2] = '"default"'
2503 document.header[i] = " ".join(romanfont)
2504 if font != "default":
2506 preamble = "\\babelfont{rm}["
2508 preamble = "\\setmainfont["
2511 preamble += "Mapping=tex-text]{"
2514 add_to_preamble(document, [preamble])
2517 regexp = re.compile(r'(\\font_sans_opts)')
2518 i = find_re(document.header, regexp, 0)
2521 # We need to use this regex since split() does not handle quote protection
2522 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2523 opts = sfopts[1].strip('"')
2524 del document.header[i]
2526 regexp = re.compile(r'(\\font_sf_scale)')
2527 i = find_re(document.header, regexp, 0)
2529 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2530 regexp = re.compile(r'(\\font_sans)')
2531 i = find_re(document.header, regexp, 0)
2533 # We need to use this regex since split() does not handle quote protection
2534 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2535 font = sffont[2].strip('"')
2536 sffont[2] = '"default"'
2537 document.header[i] = " ".join(sffont)
2538 if font != "default":
2540 preamble = "\\babelfont{sf}["
2542 preamble = "\\setsansfont["
2546 preamble += "Scale=0."
2547 preamble += scaleval
2549 preamble += "Mapping=tex-text]{"
2552 add_to_preamble(document, [preamble])
2555 regexp = re.compile(r'(\\font_typewriter_opts)')
2556 i = find_re(document.header, regexp, 0)
2559 # We need to use this regex since split() does not handle quote protection
2560 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2561 opts = ttopts[1].strip('"')
2562 del document.header[i]
2564 regexp = re.compile(r'(\\font_tt_scale)')
2565 i = find_re(document.header, regexp, 0)
2567 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2568 regexp = re.compile(r'(\\font_typewriter)')
2569 i = find_re(document.header, regexp, 0)
2571 # We need to use this regex since split() does not handle quote protection
2572 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2573 font = ttfont[2].strip('"')
2574 ttfont[2] = '"default"'
2575 document.header[i] = " ".join(ttfont)
2576 if font != "default":
2578 preamble = "\\babelfont{tt}["
2580 preamble = "\\setmonofont["
2584 preamble += "Scale=0."
2585 preamble += scaleval
2587 preamble += "Mapping=tex-text]{"
2590 add_to_preamble(document, [preamble])
2593 def revert_plainNotoFonts_xopts(document):
2594 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2596 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2598 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2600 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2604 y = find_token(document.header, "\\font_osf true", 0)
2608 regexp = re.compile(r'(\\font_roman_opts)')
2609 x = find_re(document.header, regexp, 0)
2610 if x == -1 and not osf:
2615 # We need to use this regex since split() does not handle quote protection
2616 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2617 opts = romanopts[1].strip('"')
2623 i = find_token(document.header, "\\font_roman", 0)
2627 # We need to use this regex since split() does not handle quote protection
2628 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2629 roman = romanfont[1].strip('"')
2630 if roman != "NotoSerif-TLF":
2633 j = find_token(document.header, "\\font_sans", 0)
2637 # We need to use this regex since split() does not handle quote protection
2638 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2639 sf = sffont[1].strip('"')
2643 j = find_token(document.header, "\\font_typewriter", 0)
2647 # We need to use this regex since split() does not handle quote protection
2648 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2649 tt = ttfont[1].strip('"')
2653 # So we have noto as "complete font"
2654 romanfont[1] = '"default"'
2655 document.header[i] = " ".join(romanfont)
2657 preamble = "\\usepackage["
2659 preamble += "]{noto}"
2660 add_to_preamble(document, [preamble])
2662 document.header[y] = "\\font_osf false"
2664 del document.header[x]
2667 def revert_notoFonts_xopts(document):
2668 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2670 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2672 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2674 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2678 fm = createFontMapping(['Noto'])
2679 if revert_fonts(document, fm, fontmap, True):
2680 add_preamble_fonts(document, fontmap)
2683 def revert_IBMFonts_xopts(document):
2684 " Revert native IBM font definition (with extra options) to LaTeX "
2686 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2688 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2690 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2694 fm = createFontMapping(['IBM'])
2696 if revert_fonts(document, fm, fontmap, True):
2697 add_preamble_fonts(document, fontmap)
2700 def revert_AdobeFonts_xopts(document):
2701 " Revert native Adobe font definition (with extra options) to LaTeX "
2703 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2705 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2707 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2711 fm = createFontMapping(['Adobe'])
2713 if revert_fonts(document, fm, fontmap, True):
2714 add_preamble_fonts(document, fontmap)
2717 def convert_osf(document):
2718 " Convert \\font_osf param to new format "
2721 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2723 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2725 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2727 i = find_token(document.header, '\\font_osf', 0)
2729 document.warning("Malformed LyX document: Missing \\font_osf.")
2732 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2733 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2735 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2736 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2739 document.header.insert(i, "\\font_sans_osf false")
2740 document.header.insert(i + 1, "\\font_typewriter_osf false")
2744 x = find_token(document.header, "\\font_sans", 0)
2746 document.warning("Malformed LyX document: Missing \\font_sans.")
2748 # We need to use this regex since split() does not handle quote protection
2749 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2750 sf = sffont[1].strip('"')
2752 document.header.insert(i, "\\font_sans_osf true")
2754 document.header.insert(i, "\\font_sans_osf false")
2756 x = find_token(document.header, "\\font_typewriter", 0)
2758 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2760 # We need to use this regex since split() does not handle quote protection
2761 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2762 tt = ttfont[1].strip('"')
2764 document.header.insert(i + 1, "\\font_typewriter_osf true")
2766 document.header.insert(i + 1, "\\font_typewriter_osf false")
2769 document.header.insert(i, "\\font_sans_osf false")
2770 document.header.insert(i + 1, "\\font_typewriter_osf false")
2773 def revert_osf(document):
2774 " Revert \\font_*_osf params "
2777 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2779 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2781 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2783 i = find_token(document.header, '\\font_roman_osf', 0)
2785 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2788 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2789 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2791 i = find_token(document.header, '\\font_sans_osf', 0)
2793 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2796 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2797 del document.header[i]
2799 i = find_token(document.header, '\\font_typewriter_osf', 0)
2801 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2804 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2805 del document.header[i]
2808 i = find_token(document.header, '\\font_osf', 0)
2810 document.warning("Malformed LyX document: Missing \\font_osf.")
2812 document.header[i] = "\\font_osf true"
2815 def revert_texfontopts(document):
2816 " Revert native TeX font definitions (with extra options) to LaTeX "
2818 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2820 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2822 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2825 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2827 # First the sf (biolinum only)
2828 regexp = re.compile(r'(\\font_sans_opts)')
2829 x = find_re(document.header, regexp, 0)
2831 # We need to use this regex since split() does not handle quote protection
2832 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2833 opts = sfopts[1].strip('"')
2834 i = find_token(document.header, "\\font_sans", 0)
2836 document.warning("Malformed LyX document: Missing \\font_sans.")
2838 # We need to use this regex since split() does not handle quote protection
2839 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2840 sans = sffont[1].strip('"')
2841 if sans == "biolinum":
2843 sffont[1] = '"default"'
2844 document.header[i] = " ".join(sffont)
2846 j = find_token(document.header, "\\font_sans_osf true", 0)
2849 k = find_token(document.header, "\\font_sf_scale", 0)
2851 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2853 sfscale = document.header[k].split()
2856 document.header[k] = " ".join(sfscale)
2859 sf_scale = float(val)
2861 document.warning("Invalid font_sf_scale value: " + val)
2862 preamble = "\\usepackage["
2864 document.header[j] = "\\font_sans_osf false"
2866 if sf_scale != 100.0:
2867 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2869 preamble += "]{biolinum}"
2870 add_to_preamble(document, [preamble])
2871 del document.header[x]
2873 regexp = re.compile(r'(\\font_roman_opts)')
2874 x = find_re(document.header, regexp, 0)
2878 # We need to use this regex since split() does not handle quote protection
2879 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2880 opts = romanopts[1].strip('"')
2882 i = find_token(document.header, "\\font_roman", 0)
2884 document.warning("Malformed LyX document: Missing \\font_roman.")
2887 # We need to use this regex since split() does not handle quote protection
2888 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2889 roman = romanfont[1].strip('"')
2890 if not roman in rmfonts:
2892 romanfont[1] = '"default"'
2893 document.header[i] = " ".join(romanfont)
2895 if roman == "utopia":
2897 elif roman == "palatino":
2898 package = "mathpazo"
2899 elif roman == "times":
2900 package = "mathptmx"
2901 elif roman == "xcharter":
2902 package = "XCharter"
2904 j = find_token(document.header, "\\font_roman_osf true", 0)
2906 if roman == "cochineal":
2907 osf = "proportional,osf,"
2908 elif roman == "utopia":
2910 elif roman == "garamondx":
2912 elif roman == "libertine":
2914 elif roman == "palatino":
2916 elif roman == "xcharter":
2918 document.header[j] = "\\font_roman_osf false"
2919 k = find_token(document.header, "\\font_sc true", 0)
2921 if roman == "utopia":
2923 if roman == "palatino" and osf == "":
2925 document.header[k] = "\\font_sc false"
2926 preamble = "\\usepackage["
2929 preamble += "]{" + package + "}"
2930 add_to_preamble(document, [preamble])
2931 del document.header[x]
2934 def convert_CantarellFont(document):
2935 " Handle Cantarell font definition to LaTeX "
2937 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2938 fm = createFontMapping(['Cantarell'])
2939 convert_fonts(document, fm, "oldstyle")
2941 def revert_CantarellFont(document):
2942 " Revert native Cantarell font definition to LaTeX "
2944 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2946 fm = createFontMapping(['Cantarell'])
2947 if revert_fonts(document, fm, fontmap, False, True):
2948 add_preamble_fonts(document, fontmap)
2950 def convert_ChivoFont(document):
2951 " Handle Chivo font definition to LaTeX "
2953 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2954 fm = createFontMapping(['Chivo'])
2955 convert_fonts(document, fm, "oldstyle")
2957 def revert_ChivoFont(document):
2958 " Revert native Chivo font definition to LaTeX "
2960 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2962 fm = createFontMapping(['Chivo'])
2963 if revert_fonts(document, fm, fontmap, False, True):
2964 add_preamble_fonts(document, fontmap)
2967 def convert_FiraFont(document):
2968 " Handle Fira font definition to LaTeX "
2970 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2971 fm = createFontMapping(['Fira'])
2972 convert_fonts(document, fm, "lf")
2974 def revert_FiraFont(document):
2975 " Revert native Fira font definition to LaTeX "
2977 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2979 fm = createFontMapping(['Fira'])
2980 if revert_fonts(document, fm, fontmap, False, True):
2981 add_preamble_fonts(document, fontmap)
2984 def convert_Semibolds(document):
2985 " Move semibold options to extraopts "
2988 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2990 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2992 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2994 i = find_token(document.header, "\\font_roman", 0)
2996 document.warning("Malformed LyX document: Missing \\font_roman.")
2998 # We need to use this regex since split() does not handle quote protection
2999 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3000 roman = romanfont[1].strip('"')
3001 if roman == "IBMPlexSerifSemibold":
3002 romanfont[1] = '"IBMPlexSerif"'
3003 document.header[i] = " ".join(romanfont)
3005 if NonTeXFonts == False:
3006 regexp = re.compile(r'(\\font_roman_opts)')
3007 x = find_re(document.header, regexp, 0)
3009 # Sensible place to insert tag
3010 fo = find_token(document.header, "\\font_sf_scale")
3012 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3014 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3016 # We need to use this regex since split() does not handle quote protection
3017 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3018 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3020 i = find_token(document.header, "\\font_sans", 0)
3022 document.warning("Malformed LyX document: Missing \\font_sans.")
3024 # We need to use this regex since split() does not handle quote protection
3025 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3026 sf = sffont[1].strip('"')
3027 if sf == "IBMPlexSansSemibold":
3028 sffont[1] = '"IBMPlexSans"'
3029 document.header[i] = " ".join(sffont)
3031 if NonTeXFonts == False:
3032 regexp = re.compile(r'(\\font_sans_opts)')
3033 x = find_re(document.header, regexp, 0)
3035 # Sensible place to insert tag
3036 fo = find_token(document.header, "\\font_sf_scale")
3038 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3040 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3042 # We need to use this regex since split() does not handle quote protection
3043 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3044 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3046 i = find_token(document.header, "\\font_typewriter", 0)
3048 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3050 # We need to use this regex since split() does not handle quote protection
3051 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3052 tt = ttfont[1].strip('"')
3053 if tt == "IBMPlexMonoSemibold":
3054 ttfont[1] = '"IBMPlexMono"'
3055 document.header[i] = " ".join(ttfont)
3057 if NonTeXFonts == False:
3058 regexp = re.compile(r'(\\font_typewriter_opts)')
3059 x = find_re(document.header, regexp, 0)
3061 # Sensible place to insert tag
3062 fo = find_token(document.header, "\\font_tt_scale")
3064 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3066 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3068 # We need to use this regex since split() does not handle quote protection
3069 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3070 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3073 def convert_NotoRegulars(document):
3074 " Merge diverse noto reagular fonts "
3076 i = find_token(document.header, "\\font_roman", 0)
3078 document.warning("Malformed LyX document: Missing \\font_roman.")
3080 # We need to use this regex since split() does not handle quote protection
3081 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3082 roman = romanfont[1].strip('"')
3083 if roman == "NotoSerif-TLF":
3084 romanfont[1] = '"NotoSerifRegular"'
3085 document.header[i] = " ".join(romanfont)
3087 i = find_token(document.header, "\\font_sans", 0)
3089 document.warning("Malformed LyX document: Missing \\font_sans.")
3091 # We need to use this regex since split() does not handle quote protection
3092 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3093 sf = sffont[1].strip('"')
3094 if sf == "NotoSans-TLF":
3095 sffont[1] = '"NotoSansRegular"'
3096 document.header[i] = " ".join(sffont)
3098 i = find_token(document.header, "\\font_typewriter", 0)
3100 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3102 # We need to use this regex since split() does not handle quote protection
3103 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3104 tt = ttfont[1].strip('"')
3105 if tt == "NotoMono-TLF":
3106 ttfont[1] = '"NotoMonoRegular"'
3107 document.header[i] = " ".join(ttfont)
3110 def convert_CrimsonProFont(document):
3111 " Handle CrimsonPro font definition to LaTeX "
3113 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3114 fm = createFontMapping(['CrimsonPro'])
3115 convert_fonts(document, fm, "lf")
3117 def revert_CrimsonProFont(document):
3118 " Revert native CrimsonPro font definition to LaTeX "
3120 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3122 fm = createFontMapping(['CrimsonPro'])
3123 if revert_fonts(document, fm, fontmap, False, True):
3124 add_preamble_fonts(document, fontmap)
3127 def revert_pagesizes(document):
3128 " Revert new page sizes in memoir and KOMA to options "
3130 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3133 i = find_token(document.header, "\\use_geometry true", 0)
3137 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3139 i = find_token(document.header, "\\papersize", 0)
3141 document.warning("Malformed LyX document! Missing \\papersize header.")
3143 val = get_value(document.header, "\\papersize", i)
3148 document.header[i] = "\\papersize default"
3150 i = find_token(document.header, "\\options", 0)
3152 i = find_token(document.header, "\\textclass", 0)
3154 document.warning("Malformed LyX document! Missing \\textclass header.")
3156 document.header.insert(i, "\\options " + val)
3158 document.header[i] = document.header[i] + "," + val
3161 def convert_pagesizes(document):
3162 " Convert to new page sizes in memoir and KOMA to options "
3164 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3167 i = find_token(document.header, "\\use_geometry true", 0)
3171 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3173 i = find_token(document.header, "\\papersize", 0)
3175 document.warning("Malformed LyX document! Missing \\papersize header.")
3177 val = get_value(document.header, "\\papersize", i)
3182 i = find_token(document.header, "\\use_geometry false", 0)
3184 # Maintain use of geometry
3185 document.header[1] = "\\use_geometry true"
3187 def revert_komafontsizes(document):
3188 " Revert new font sizes in KOMA to options "
3190 if document.textclass[:3] != "scr":
3193 i = find_token(document.header, "\\paperfontsize", 0)
3195 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3198 defsizes = ["default", "10", "11", "12"]
3200 val = get_value(document.header, "\\paperfontsize", i)
3205 document.header[i] = "\\paperfontsize default"
3207 fsize = "fontsize=" + val
3209 i = find_token(document.header, "\\options", 0)
3211 i = find_token(document.header, "\\textclass", 0)
3213 document.warning("Malformed LyX document! Missing \\textclass header.")
3215 document.header.insert(i, "\\options " + fsize)
3217 document.header[i] = document.header[i] + "," + fsize
3220 def revert_dupqualicites(document):
3221 " Revert qualified citation list commands with duplicate keys to ERT "
3223 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3224 # we need to revert those with multiple uses of the same key.
3228 i = find_token(document.header, "\\cite_engine", 0)
3230 document.warning("Malformed document! Missing \\cite_engine")
3232 engine = get_value(document.header, "\\cite_engine", i)
3234 if not engine in ["biblatex", "biblatex-natbib"]:
3237 # Citation insets that support qualified lists, with their LaTeX code
3241 "citet" : "textcites",
3242 "Citet" : "Textcites",
3243 "citep" : "parencites",
3244 "Citep" : "Parencites",
3245 "Footcite" : "Smartcites",
3246 "footcite" : "smartcites",
3247 "Autocite" : "Autocites",
3248 "autocite" : "autocites",
3253 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3256 j = find_end_of_inset(document.body, i)
3258 document.warning("Can't find end of citation inset at line %d!!" %(i))
3262 k = find_token(document.body, "LatexCommand", i, j)
3264 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3268 cmd = get_value(document.body, "LatexCommand", k)
3269 if not cmd in list(ql_citations.keys()):
3273 pres = find_token(document.body, "pretextlist", i, j)
3274 posts = find_token(document.body, "posttextlist", i, j)
3275 if pres == -1 and posts == -1:
3280 key = get_quoted_value(document.body, "key", i, j)
3282 document.warning("Citation inset at line %d does not have a key!" %(i))
3286 keys = key.split(",")
3287 ukeys = list(set(keys))
3288 if len(keys) == len(ukeys):
3293 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3294 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3296 pre = get_quoted_value(document.body, "before", i, j)
3297 post = get_quoted_value(document.body, "after", i, j)
3298 prelist = pretexts.split("\t")
3301 ppp = pp.split(" ", 1)
3307 if ppp[0] in premap:
3308 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3310 premap[ppp[0]] = val
3311 postlist = posttexts.split("\t")
3315 ppp = pp.split(" ", 1)
3321 if ppp[0] in postmap:
3322 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3324 postmap[ppp[0]] = val
3325 # Replace known new commands with ERT
3326 if "(" in pre or ")" in pre:
3327 pre = "{" + pre + "}"
3328 if "(" in post or ")" in post:
3329 post = "{" + post + "}"
3330 res = "\\" + ql_citations[cmd]
3332 res += "(" + pre + ")"
3334 res += "(" + post + ")"
3338 if premap.get(kk, "") != "":
3339 akeys = premap[kk].split("\t", 1)
3342 res += "[" + akey + "]"
3344 premap[kk] = "\t".join(akeys[1:])
3347 if postmap.get(kk, "") != "":
3348 akeys = postmap[kk].split("\t", 1)
3351 res += "[" + akey + "]"
3353 postmap[kk] = "\t".join(akeys[1:])
3356 elif premap.get(kk, "") != "":
3358 res += "{" + kk + "}"
3359 document.body[i:j+1] = put_cmd_in_ert([res])
3362 def convert_pagesizenames(document):
3363 " Convert LyX page sizes names "
3365 i = find_token(document.header, "\\papersize", 0)
3367 document.warning("Malformed LyX document! Missing \\papersize header.")
3369 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3370 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3371 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3372 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3373 val = get_value(document.header, "\\papersize", i)
3375 newval = val.replace("paper", "")
3376 document.header[i] = "\\papersize " + newval
3378 def revert_pagesizenames(document):
3379 " Convert LyX page sizes names "
3381 i = find_token(document.header, "\\papersize", 0)
3383 document.warning("Malformed LyX document! Missing \\papersize header.")
3385 newnames = ["letter", "legal", "executive", \
3386 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3387 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3388 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3389 val = get_value(document.header, "\\papersize", i)
3391 newval = val + "paper"
3392 document.header[i] = "\\papersize " + newval
3395 def revert_theendnotes(document):
3396 " Reverts native support of \\theendnotes to TeX-code "
3398 if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
3403 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3406 j = find_end_of_inset(document.body, i)
3408 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3411 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
3414 def revert_enotez(document):
3415 " Reverts native support of enotez package to TeX-code "
3417 if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
3421 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
3424 revert_flex_inset(document.body, "Endnote", "\\endnote")
3428 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3431 j = find_end_of_inset(document.body, i)
3433 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3437 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
3440 add_to_preamble(document, ["\\usepackage{enotez}"])
3441 document.del_module("enotez")
3442 document.del_module("foottoenotez")
3445 def revert_memoir_endnotes(document):
3446 " Reverts native support of memoir endnotes to TeX-code "
3448 if document.textclass != "memoir":
3451 encommand = "\\pagenote"
3452 modules = document.get_module_list()
3453 if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
3454 encommand = "\\endnote"
3456 revert_flex_inset(document.body, "Endnote", encommand)
3460 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
3463 j = find_end_of_inset(document.body, i)
3465 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3468 if document.body[i] == "\\begin_inset FloatList pagenote*":
3469 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
3471 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
3472 add_to_preamble(document, ["\\makepagenote"])
3475 def revert_totalheight(document):
3476 " Reverts graphics height parameter from totalheight to height "
3480 i = find_token(document.body, "\\begin_inset Graphics", i)
3483 j = find_end_of_inset(document.body, i)
3485 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3489 rx = re.compile(r'\s*special\s*(\S+)$')
3490 k = find_re(document.body, rx, i, j)
3494 m = rx.match(document.body[k])
3496 special = m.group(1)
3497 mspecial = special.split(',')
3498 for spc in mspecial:
3499 if spc[:7] == "height=":
3500 oldheight = spc.split('=')[1]
3501 mspecial.remove(spc)
3503 if len(mspecial) > 0:
3504 special = ",".join(mspecial)
3508 rx = re.compile(r'(\s*height\s*)(\S+)$')
3509 kk = find_re(document.body, rx, i, j)
3511 m = rx.match(document.body[kk])
3517 val = val + "," + special
3518 document.body[k] = "\tspecial " + "totalheight=" + val
3520 document.body.insert(kk, "\tspecial totalheight=" + val)
3522 document.body[kk] = m.group(1) + oldheight
3524 del document.body[kk]
3525 elif oldheight != "":
3526 document.body.insert(k, "\theight " + oldheight)
3530 def convert_totalheight(document):
3531 " Converts graphics height parameter from totalheight to height "
3535 i = find_token(document.body, "\\begin_inset Graphics", i)
3538 j = find_end_of_inset(document.body, i)
3540 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3544 rx = re.compile(r'\s*special\s*(\S+)$')
3545 k = find_re(document.body, rx, i, j)
3549 m = rx.match(document.body[k])
3551 special = m.group(1)
3552 mspecial = special.split(',')
3553 for spc in mspecial:
3554 if spc[:12] == "totalheight=":
3555 newheight = spc.split('=')[1]
3556 mspecial.remove(spc)
3558 if len(mspecial) > 0:
3559 special = ",".join(mspecial)
3563 rx = re.compile(r'(\s*height\s*)(\S+)$')
3564 kk = find_re(document.body, rx, i, j)
3566 m = rx.match(document.body[kk])
3572 val = val + "," + special
3573 document.body[k] = "\tspecial " + "height=" + val
3575 document.body.insert(kk + 1, "\tspecial height=" + val)
3577 document.body[kk] = m.group(1) + newheight
3579 del document.body[kk]
3580 elif newheight != "":
3581 document.body.insert(k, "\theight " + newheight)
3588 supported_versions = ["2.4.0", "2.4"]
3590 [545, [convert_lst_literalparam]],
3595 [550, [convert_fontenc]],
3602 [557, [convert_vcsinfo]],
3603 [558, [removeFrontMatterStyles]],
3606 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
3610 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
3611 [566, [convert_hebrew_parentheses]],
3617 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
3618 [573, [convert_inputencoding_namechange]],
3619 [574, [convert_ruby_module, convert_utf8_japanese]],
3620 [575, [convert_lineno]],
3622 [577, [convert_linggloss]],
3626 [581, [convert_osf]],
3627 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
3628 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
3630 [585, [convert_pagesizes]],
3632 [587, [convert_pagesizenames]],
3634 [589, [convert_totalheight]]
3637 revert = [[588, [revert_totalheight]],
3638 [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
3639 [586, [revert_pagesizenames]],
3640 [585, [revert_dupqualicites]],
3641 [584, [revert_pagesizes,revert_komafontsizes]],
3642 [583, [revert_vcsinfo_rev_abbrev]],
3643 [582, [revert_ChivoFont,revert_CrimsonProFont]],
3644 [581, [revert_CantarellFont,revert_FiraFont]],
3645 [580, [revert_texfontopts,revert_osf]],
3646 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
3647 [578, [revert_babelfont]],
3648 [577, [revert_drs]],
3649 [576, [revert_linggloss, revert_subexarg]],
3650 [575, [revert_new_languages]],
3651 [574, [revert_lineno]],
3652 [573, [revert_ruby_module, revert_utf8_japanese]],
3653 [572, [revert_inputencoding_namechange]],
3654 [571, [revert_notoFonts]],
3655 [570, [revert_cmidruletrimming]],
3656 [569, [revert_bibfileencodings]],
3657 [568, [revert_tablestyle]],
3658 [567, [revert_soul]],
3659 [566, [revert_malayalam]],
3660 [565, [revert_hebrew_parentheses]],
3661 [564, [revert_AdobeFonts]],
3662 [563, [revert_lformatinfo]],
3663 [562, [revert_listpargs]],
3664 [561, [revert_l7ninfo]],
3665 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
3666 [559, [revert_timeinfo, revert_namenoextinfo]],
3667 [558, [revert_dateinfo]],
3668 [557, [addFrontMatterStyles]],
3669 [556, [revert_vcsinfo]],
3670 [555, [revert_bibencoding]],
3671 [554, [revert_vcolumns]],
3672 [553, [revert_stretchcolumn]],
3673 [552, [revert_tuftecite]],
3674 [551, [revert_floatpclass, revert_floatalignment]],
3675 [550, [revert_nospellcheck]],
3676 [549, [revert_fontenc]],
3677 [548, []],# dummy format change
3678 [547, [revert_lscape]],
3679 [546, [revert_xcharter]],
3680 [545, [revert_paratype]],
3681 [544, [revert_lst_literalparam]]
3685 if __name__ == "__main__":