1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_complete_lines, del_token,
30 find_end_of, find_end_of_inset, find_end_of_layout, find_token,
31 find_token_backwards, find_token_exact, find_re, get_bool_value,
32 get_containing_inset, get_containing_layout, get_option_value, get_value,
35 # find_complete_lines,
36 # find_re, find_substring,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 """Add collected font-packages with their option to user-preamble"""
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 """Expand fontinfo mapping"""
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
197 elif font == 'libertinus':
198 fm.expandFontMapping(['libertinus,serif'], "roman", None, "libertinus", None, "osf")
199 fm.expandFontMapping(['libertinusmath'], "math", None, "libertinust1math", None, None)
202 def convert_fonts(document, fm, osfoption = "osf"):
203 """Handle font definition (LaTeX preamble -> native)"""
204 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
205 rscaleopt = re.compile(r'^scaled?=(.*)')
207 # Check whether we go beyond font option feature introduction
208 haveFontOpts = document.end_format > 580
212 i = find_re(document.preamble, rpkg, i+1)
215 mo = rpkg.search(document.preamble[i])
216 if mo == None or mo.group(2) == None:
219 options = mo.group(2).replace(' ', '').split(",")
224 while o < len(options):
225 if options[o] == osfoption:
229 mo = rscaleopt.search(options[o])
237 if not pkg in fm.pkginmap:
242 # Try with name-option combination first
243 # (only one default option supported currently)
245 while o < len(options):
247 fn = fm.getfontname(pkg, [opt])
254 fn = fm.getfontname(pkg, [])
256 fn = fm.getfontname(pkg, options)
259 del document.preamble[i]
260 fontinfo = fm.font2pkgmap[fn]
261 if fontinfo.scaletype == None:
264 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
265 fontinfo.scaleval = oscale
266 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
267 if fontinfo.osfopt == None:
268 options.extend(osfoption)
270 osf = find_token(document.header, "\\font_osf false")
271 osftag = "\\font_osf"
272 if osf == -1 and fontinfo.fonttype != "math":
273 # Try with newer format
274 osftag = "\\font_" + fontinfo.fonttype + "_osf"
275 osf = find_token(document.header, osftag + " false")
277 document.header[osf] = osftag + " true"
278 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
279 del document.preamble[i-1]
281 if fontscale != None:
282 j = find_token(document.header, fontscale, 0)
284 val = get_value(document.header, fontscale, j)
288 scale = "%03d" % int(float(oscale) * 100)
289 document.header[j] = fontscale + " " + scale + " " + vals[1]
290 ft = "\\font_" + fontinfo.fonttype
291 j = find_token(document.header, ft, 0)
293 val = get_value(document.header, ft, j)
294 words = val.split() # ! splits also values like '"DejaVu Sans"'
295 words[0] = '"' + fn + '"'
296 document.header[j] = ft + ' ' + ' '.join(words)
297 if haveFontOpts and fontinfo.fonttype != "math":
298 fotag = "\\font_" + fontinfo.fonttype + "_opts"
299 fo = find_token(document.header, fotag)
301 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
303 # Sensible place to insert tag
304 fo = find_token(document.header, "\\font_sf_scale")
306 document.warning("Malformed LyX document! Missing \\font_sf_scale")
308 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
311 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
312 """Revert native font definition to LaTeX"""
313 # fonlist := list of fonts created from the same package
314 # Empty package means that the font-name is the same as the package-name
315 # fontmap (key = package, val += found options) will be filled
316 # and used later in add_preamble_fonts() to be added to user-preamble
318 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
319 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
321 while i < len(document.header):
322 i = find_re(document.header, rfontscale, i+1)
325 mo = rfontscale.search(document.header[i])
328 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
329 val = get_value(document.header, ft, i)
330 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
331 font = words[0].strip('"') # TeX font name has no whitespace
332 if not font in fm.font2pkgmap:
334 fontinfo = fm.font2pkgmap[font]
335 val = fontinfo.package
336 if not val in fontmap:
339 if OnlyWithXOpts or WithXOpts:
340 if ft == "\\font_math":
342 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
343 if ft == "\\font_sans":
344 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
345 elif ft == "\\font_typewriter":
346 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
347 x = find_re(document.header, regexp, 0)
348 if x == -1 and OnlyWithXOpts:
352 # We need to use this regex since split() does not handle quote protection
353 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
354 opts = xopts[1].strip('"').split(",")
355 fontmap[val].extend(opts)
356 del document.header[x]
357 words[0] = '"default"'
358 document.header[i] = ft + ' ' + ' '.join(words)
359 if fontinfo.scaleopt != None:
360 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
361 mo = rscales.search(xval)
366 # set correct scale option
367 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
368 if fontinfo.osfopt != None:
370 if fontinfo.osfdef == "true":
372 osf = find_token(document.header, "\\font_osf " + oldval)
373 if osf == -1 and ft != "\\font_math":
374 # Try with newer format
375 osftag = "\\font_roman_osf " + oldval
376 if ft == "\\font_sans":
377 osftag = "\\font_sans_osf " + oldval
378 elif ft == "\\font_typewriter":
379 osftag = "\\font_typewriter_osf " + oldval
380 osf = find_token(document.header, osftag)
382 fontmap[val].extend([fontinfo.osfopt])
383 if len(fontinfo.options) > 0:
384 fontmap[val].extend(fontinfo.options)
387 ###############################################################################
389 ### Conversion and reversion routines
391 ###############################################################################
393 def convert_inputencoding_namechange(document):
394 """Rename inputencoding settings."""
395 i = find_token(document.header, "\\inputencoding", 0)
398 s = document.header[i].replace("auto", "auto-legacy")
399 document.header[i] = s.replace("default", "auto-legacy-plain")
401 def revert_inputencoding_namechange(document):
402 """Rename inputencoding settings."""
403 i = find_token(document.header, "\\inputencoding", 0)
406 s = document.header[i].replace("auto-legacy-plain", "default")
407 document.header[i] = s.replace("auto-legacy", "auto")
409 def convert_notoFonts(document):
410 """Handle Noto fonts definition to LaTeX"""
412 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
413 fm = createFontMapping(['Noto'])
414 convert_fonts(document, fm)
416 def revert_notoFonts(document):
417 """Revert native Noto font definition to LaTeX"""
419 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
421 fm = createFontMapping(['Noto'])
422 if revert_fonts(document, fm, fontmap):
423 add_preamble_fonts(document, fontmap)
425 def convert_latexFonts(document):
426 """Handle DejaVu and IBMPlex fonts definition to LaTeX"""
428 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
429 fm = createFontMapping(['DejaVu', 'IBM'])
430 convert_fonts(document, fm)
432 def revert_latexFonts(document):
433 """Revert native DejaVu font definition to LaTeX"""
435 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
437 fm = createFontMapping(['DejaVu', 'IBM'])
438 if revert_fonts(document, fm, fontmap):
439 add_preamble_fonts(document, fontmap)
441 def convert_AdobeFonts(document):
442 """Handle Adobe Source fonts definition to LaTeX"""
444 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
445 fm = createFontMapping(['Adobe'])
446 convert_fonts(document, fm)
448 def revert_AdobeFonts(document):
449 """Revert Adobe Source font definition to LaTeX"""
451 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
453 fm = createFontMapping(['Adobe'])
454 if revert_fonts(document, fm, fontmap):
455 add_preamble_fonts(document, fontmap)
457 def removeFrontMatterStyles(document):
458 """Remove styles Begin/EndFrontmatter"""
460 layouts = ['BeginFrontmatter', 'EndFrontmatter']
461 tokenend = len('\\begin_layout ')
464 i = find_token_exact(document.body, '\\begin_layout ', i+1)
467 layout = document.body[i][tokenend:].strip()
468 if layout not in layouts:
470 j = find_end_of_layout(document.body, i)
472 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
474 while document.body[j+1].strip() == '':
476 document.body[i:j+1] = []
478 def addFrontMatterStyles(document):
479 """Use styles Begin/EndFrontmatter for elsarticle"""
481 if document.textclass != "elsarticle":
484 def insertFrontmatter(prefix, line):
486 while above > 0 and document.body[above-1].strip() == '':
489 while document.body[below].strip() == '':
491 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
492 '\\begin_inset Note Note',
494 '\\begin_layout Plain Layout',
497 '\\end_inset', '', '',
500 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
501 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
502 tokenend = len('\\begin_layout ')
506 i = find_token_exact(document.body, '\\begin_layout ', i+1)
509 layout = document.body[i][tokenend:].strip()
510 if layout not in layouts:
512 k = find_end_of_layout(document.body, i)
514 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
521 insertFrontmatter('End', k+1)
522 insertFrontmatter('Begin', first)
525 def convert_lst_literalparam(document):
526 """Add param literal to include inset"""
530 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
533 j = find_end_of_inset(document.body, i)
535 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
537 while i < j and document.body[i].strip() != '':
539 document.body.insert(i, 'literal "true"')
542 def revert_lst_literalparam(document):
543 """Remove param literal from include inset"""
547 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
550 j = find_end_of_inset(document.body, i)
552 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
554 del_token(document.body, 'literal', i, j)
557 def revert_paratype(document):
558 """Revert ParaType font definitions to LaTeX"""
560 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
562 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
563 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
564 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
565 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
568 sfval = find_token(document.header, "\\font_sf_scale", 0)
570 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
572 sfscale = document.header[sfval].split()
575 document.header[sfval] = " ".join(sfscale)
578 sf_scale = float(val)
580 document.warning("Invalid font_sf_scale value: " + val)
583 if sf_scale != "100.0":
584 sfoption = "scaled=" + str(sf_scale / 100.0)
585 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
586 ttval = get_value(document.header, "\\font_tt_scale", 0)
591 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
592 if i1 != -1 and i2 != -1 and i3!= -1:
593 add_to_preamble(document, ["\\usepackage{paratype}"])
596 add_to_preamble(document, ["\\usepackage{PTSerif}"])
597 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
600 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
602 add_to_preamble(document, ["\\usepackage{PTSans}"])
603 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
606 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
608 add_to_preamble(document, ["\\usepackage{PTMono}"])
609 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
612 def revert_xcharter(document):
613 """Revert XCharter font definitions to LaTeX"""
615 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
619 # replace unsupported font setting
620 document.header[i] = document.header[i].replace("xcharter", "default")
621 # no need for preamble code with system fonts
622 if get_bool_value(document.header, "\\use_non_tex_fonts"):
625 # transfer old style figures setting to package options
626 j = find_token(document.header, "\\font_osf true")
629 document.header[j] = "\\font_osf false"
633 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
636 def revert_lscape(document):
637 """Reverts the landscape environment (Landscape module) to TeX-code"""
639 if not "landscape" in document.get_module_list():
644 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
647 j = find_end_of_inset(document.body, i)
649 document.warning("Malformed LyX document: Can't find end of Landscape inset")
652 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
653 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
654 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
655 add_to_preamble(document, ["\\usepackage{afterpage}"])
657 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
658 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
660 add_to_preamble(document, ["\\usepackage{pdflscape}"])
661 document.del_module("landscape")
664 def convert_fontenc(document):
665 """Convert default fontenc setting"""
667 i = find_token(document.header, "\\fontencoding global", 0)
671 document.header[i] = document.header[i].replace("global", "auto")
674 def revert_fontenc(document):
675 """Revert default fontenc setting"""
677 i = find_token(document.header, "\\fontencoding auto", 0)
681 document.header[i] = document.header[i].replace("auto", "global")
684 def revert_nospellcheck(document):
685 """Remove nospellcheck font info param"""
689 i = find_token(document.body, '\\nospellcheck', i)
695 def revert_floatpclass(document):
696 """Remove float placement params 'document' and 'class'"""
698 del_token(document.header, "\\float_placement class")
702 i = find_token(document.body, '\\begin_inset Float', i + 1)
705 j = find_end_of_inset(document.body, i)
706 k = find_token(document.body, 'placement class', i, j)
708 k = find_token(document.body, 'placement document', i, j)
715 def revert_floatalignment(document):
716 """Remove float alignment params"""
718 galignment = get_value(document.header, "\\float_alignment", delete=True)
722 i = find_token(document.body, '\\begin_inset Float', i + 1)
725 j = find_end_of_inset(document.body, i)
727 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
729 k = find_token(document.body, 'alignment', i, j)
733 alignment = get_value(document.body, "alignment", k)
734 if alignment == "document":
735 alignment = galignment
737 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
739 document.warning("Can't find float layout!")
742 if alignment == "left":
743 alcmd = put_cmd_in_ert("\\raggedright{}")
744 elif alignment == "center":
745 alcmd = put_cmd_in_ert("\\centering{}")
746 elif alignment == "right":
747 alcmd = put_cmd_in_ert("\\raggedleft{}")
749 document.body[l+1:l+1] = alcmd
750 # There might be subfloats, so we do not want to move past
751 # the end of the inset.
754 def revert_tuftecite(document):
755 r"""Revert \cite commands in tufte classes"""
757 tufte = ["tufte-book", "tufte-handout"]
758 if document.textclass not in tufte:
763 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
766 j = find_end_of_inset(document.body, i)
768 document.warning("Can't find end of citation inset at line %d!!" %(i))
770 k = find_token(document.body, "LatexCommand", i, j)
772 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
775 cmd = get_value(document.body, "LatexCommand", k)
779 pre = get_quoted_value(document.body, "before", i, j)
780 post = get_quoted_value(document.body, "after", i, j)
781 key = get_quoted_value(document.body, "key", i, j)
783 document.warning("Citation inset at line %d does not have a key!" %(i))
785 # Replace command with ERT
788 res += "[" + pre + "]"
790 res += "[" + post + "]"
793 res += "{" + key + "}"
794 document.body[i:j+1] = put_cmd_in_ert([res])
799 def revert_stretchcolumn(document):
800 """We remove the column varwidth flags or everything else will become a mess."""
803 i = find_token(document.body, "\\begin_inset Tabular", i+1)
806 j = find_end_of_inset(document.body, i+1)
808 document.warning("Malformed LyX document: Could not find end of tabular.")
810 for k in range(i, j):
811 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
812 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
813 document.body[k] = document.body[k].replace(' varwidth="true"', '')
816 def revert_vcolumns(document):
817 """Revert standard columns with line breaks etc."""
823 i = find_token(document.body, "\\begin_inset Tabular", i+1)
826 j = find_end_of_inset(document.body, i)
828 document.warning("Malformed LyX document: Could not find end of tabular.")
831 # Collect necessary column information
833 nrows = int(document.body[i+1].split('"')[3])
834 ncols = int(document.body[i+1].split('"')[5])
836 for k in range(ncols):
837 m = find_token(document.body, "<column", m)
838 width = get_option_value(document.body[m], 'width')
839 varwidth = get_option_value(document.body[m], 'varwidth')
840 alignment = get_option_value(document.body[m], 'alignment')
841 special = get_option_value(document.body[m], 'special')
842 col_info.append([width, varwidth, alignment, special, m])
847 for row in range(nrows):
848 for col in range(ncols):
849 m = find_token(document.body, "<cell", m)
850 multicolumn = get_option_value(document.body[m], 'multicolumn')
851 multirow = get_option_value(document.body[m], 'multirow')
852 width = get_option_value(document.body[m], 'width')
853 rotate = get_option_value(document.body[m], 'rotate')
854 # Check for: linebreaks, multipars, non-standard environments
856 endcell = find_token(document.body, "</cell>", begcell)
858 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
860 elif count_pars_in_inset(document.body, begcell + 2) > 1:
862 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
864 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
865 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
867 alignment = col_info[col][2]
868 col_line = col_info[col][4]
870 if alignment == "center":
871 vval = ">{\\centering}"
872 elif alignment == "left":
873 vval = ">{\\raggedright}"
874 elif alignment == "right":
875 vval = ">{\\raggedleft}"
878 vval += "V{\\linewidth}"
880 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
881 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
882 # with newlines, and we do not want that)
884 endcell = find_token(document.body, "</cell>", begcell)
886 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
888 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
892 nle = find_end_of_inset(document.body, nl)
893 del(document.body[nle:nle+1])
895 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
897 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
903 if needarray == True:
904 add_to_preamble(document, ["\\usepackage{array}"])
905 if needvarwidth == True:
906 add_to_preamble(document, ["\\usepackage{varwidth}"])
909 def revert_bibencoding(document):
910 """Revert bibliography encoding"""
914 i = find_token(document.header, "\\cite_engine", 0)
916 document.warning("Malformed document! Missing \\cite_engine")
918 engine = get_value(document.header, "\\cite_engine", i)
922 if engine in ["biblatex", "biblatex-natbib"]:
925 # Map lyx to latex encoding names
929 "armscii8" : "armscii8",
930 "iso8859-1" : "latin1",
931 "iso8859-2" : "latin2",
932 "iso8859-3" : "latin3",
933 "iso8859-4" : "latin4",
934 "iso8859-5" : "iso88595",
935 "iso8859-6" : "8859-6",
936 "iso8859-7" : "iso-8859-7",
937 "iso8859-8" : "8859-8",
938 "iso8859-9" : "latin5",
939 "iso8859-13" : "latin7",
940 "iso8859-15" : "latin9",
941 "iso8859-16" : "latin10",
942 "applemac" : "applemac",
944 "cp437de" : "cp437de",
961 "utf8-platex" : "utf8",
968 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
971 j = find_end_of_inset(document.body, i)
973 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
975 encoding = get_quoted_value(document.body, "encoding", i, j)
978 # remove encoding line
979 k = find_token(document.body, "encoding", i, j)
982 if encoding == "default":
984 # Re-find inset end line
985 j = find_end_of_inset(document.body, i)
988 h = find_token(document.header, "\\biblio_options", 0)
990 biblio_options = get_value(document.header, "\\biblio_options", h)
991 if not "bibencoding" in biblio_options:
992 document.header[h] += ",bibencoding=%s" % encodings[encoding]
994 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
996 # this should not happen
997 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
999 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
1001 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
1002 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1008 def convert_vcsinfo(document):
1009 """Separate vcs Info inset from buffer Info inset."""
1012 "vcs-revision" : "revision",
1013 "vcs-tree-revision" : "tree-revision",
1014 "vcs-author" : "author",
1015 "vcs-time" : "time",
1020 i = find_token(document.body, "\\begin_inset Info", i+1)
1023 j = find_end_of_inset(document.body, i+1)
1025 document.warning("Malformed LyX document: Could not find end of Info inset.")
1027 tp = find_token(document.body, 'type', i, j)
1028 tpv = get_quoted_value(document.body, "type", tp)
1031 arg = find_token(document.body, 'arg', i, j)
1032 argv = get_quoted_value(document.body, "arg", arg)
1033 if argv not in list(types.keys()):
1035 document.body[tp] = "type \"vcs\""
1036 document.body[arg] = "arg \"" + types[argv] + "\""
1039 def revert_vcsinfo(document):
1040 """Merge vcs Info inset to buffer Info inset."""
1042 args = ["revision", "tree-revision", "author", "time", "date" ]
1045 i = find_token(document.body, "\\begin_inset Info", i+1)
1048 j = find_end_of_inset(document.body, i+1)
1050 document.warning("Malformed LyX document: Could not find end of Info inset.")
1052 tp = find_token(document.body, 'type', i, j)
1053 tpv = get_quoted_value(document.body, "type", tp)
1056 arg = find_token(document.body, 'arg', i, j)
1057 argv = get_quoted_value(document.body, "arg", arg)
1058 if argv not in args:
1059 document.warning("Malformed Info inset. Invalid vcs arg.")
1061 document.body[tp] = "type \"buffer\""
1062 document.body[arg] = "arg \"vcs-" + argv + "\""
1064 def revert_vcsinfo_rev_abbrev(document):
1065 " Convert abbreviated revisions to regular revisions. "
1069 i = find_token(document.body, "\\begin_inset Info", i+1)
1072 j = find_end_of_inset(document.body, i+1)
1074 document.warning("Malformed LyX document: Could not find end of Info inset.")
1076 tp = find_token(document.body, 'type', i, j)
1077 tpv = get_quoted_value(document.body, "type", tp)
1080 arg = find_token(document.body, 'arg', i, j)
1081 argv = get_quoted_value(document.body, "arg", arg)
1082 if( argv == "revision-abbrev" ):
1083 document.body[arg] = "arg \"revision\""
1085 def revert_dateinfo(document):
1086 """Revert date info insets to static text."""
1088 # FIXME This currently only considers the main language and uses the system locale
1089 # Ideally, it should honor context languages and switch the locale accordingly.
1091 # The date formats for each language using strftime syntax:
1092 # long, short, loclong, locmedium, locshort
1094 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1095 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1096 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1097 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1098 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1099 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1100 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1101 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1102 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1103 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1104 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1105 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1106 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1107 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1108 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1109 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1110 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1111 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1112 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1113 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1114 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1115 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1116 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1117 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1118 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1119 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1120 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1121 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1122 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1123 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1124 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1125 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1126 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1127 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1128 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1129 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1130 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1131 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1132 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1133 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1134 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1135 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1136 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1137 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1138 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1139 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1140 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1141 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1142 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1143 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1144 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1145 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1146 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1147 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1148 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1149 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1150 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1151 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1152 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1153 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1154 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1155 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1156 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1157 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1158 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1159 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1160 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1161 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1162 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1163 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1164 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1165 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1166 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1167 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1168 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1169 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1170 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1171 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1172 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1173 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1174 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1175 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1176 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1177 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1178 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1179 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1180 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1181 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1182 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1183 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1184 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1185 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1186 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1187 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1188 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1189 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1190 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1191 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1192 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1193 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1194 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1195 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1198 types = ["date", "fixdate", "moddate" ]
1199 lang = get_value(document.header, "\\language")
1201 document.warning("Malformed LyX document! No \\language header found!")
1206 i = find_token(document.body, "\\begin_inset Info", i+1)
1209 j = find_end_of_inset(document.body, i+1)
1211 document.warning("Malformed LyX document: Could not find end of Info inset.")
1213 tp = find_token(document.body, 'type', i, j)
1214 tpv = get_quoted_value(document.body, "type", tp)
1215 if tpv not in types:
1217 arg = find_token(document.body, 'arg', i, j)
1218 argv = get_quoted_value(document.body, "arg", arg)
1221 if tpv == "fixdate":
1222 datecomps = argv.split('@')
1223 if len(datecomps) > 1:
1225 isodate = datecomps[1]
1226 m = re.search(r'(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1228 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1229 # FIXME if we had the path to the original document (not the one in the tmp dir),
1230 # we could use the mtime.
1231 # elif tpv == "moddate":
1232 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1235 result = dte.isodate()
1236 elif argv == "long":
1237 result = dte.strftime(dateformats[lang][0])
1238 elif argv == "short":
1239 result = dte.strftime(dateformats[lang][1])
1240 elif argv == "loclong":
1241 result = dte.strftime(dateformats[lang][2])
1242 elif argv == "locmedium":
1243 result = dte.strftime(dateformats[lang][3])
1244 elif argv == "locshort":
1245 result = dte.strftime(dateformats[lang][4])
1247 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1248 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1249 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1250 fmt = re.sub('[^\'%]d', '%d', fmt)
1251 fmt = fmt.replace("'", "")
1252 result = dte.strftime(fmt)
1253 if sys.version_info < (3,0):
1254 # In Python 2, datetime module works with binary strings,
1255 # our dateformat strings are utf8-encoded:
1256 result = result.decode('utf-8')
1257 document.body[i : j+1] = [result]
1260 def revert_timeinfo(document):
1261 """Revert time info insets to static text."""
1263 # FIXME This currently only considers the main language and uses the system locale
1264 # Ideally, it should honor context languages and switch the locale accordingly.
1265 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1268 # The time formats for each language using strftime syntax:
1271 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1272 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1273 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1274 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1275 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1276 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1277 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1278 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1279 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1280 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1281 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1282 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1283 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1284 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1285 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1286 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1287 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1288 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1289 "british" : ["%H:%M:%S %Z", "%H:%M"],
1290 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1291 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1292 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1293 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1294 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1295 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1296 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1297 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1298 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1299 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1300 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1301 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1302 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1303 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1304 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1305 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1306 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1307 "french" : ["%H:%M:%S %Z", "%H:%M"],
1308 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1309 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1310 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1311 "german" : ["%H:%M:%S %Z", "%H:%M"],
1312 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1313 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1314 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1315 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1316 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1317 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1318 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1319 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1320 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1321 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1322 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1323 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1324 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1325 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1326 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1327 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1328 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1329 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1330 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1331 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1332 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1333 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1334 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1335 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1336 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1337 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1338 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1339 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1340 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1341 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1342 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1343 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1344 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1345 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1346 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1347 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1348 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1349 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1350 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1351 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1352 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1353 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1354 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1355 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1356 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1357 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1358 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1359 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1360 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1361 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1362 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1363 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1364 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1365 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1366 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1367 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1368 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1369 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1370 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1371 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1372 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1375 types = ["time", "fixtime", "modtime" ]
1376 i = find_token(document.header, "\\language", 0)
1378 # this should not happen
1379 document.warning("Malformed LyX document! No \\language header found!")
1381 lang = get_value(document.header, "\\language", i)
1385 i = find_token(document.body, "\\begin_inset Info", i+1)
1388 j = find_end_of_inset(document.body, i+1)
1390 document.warning("Malformed LyX document: Could not find end of Info inset.")
1392 tp = find_token(document.body, 'type', i, j)
1393 tpv = get_quoted_value(document.body, "type", tp)
1394 if tpv not in types:
1396 arg = find_token(document.body, 'arg', i, j)
1397 argv = get_quoted_value(document.body, "arg", arg)
1399 dtme = datetime.now()
1401 if tpv == "fixtime":
1402 timecomps = argv.split('@')
1403 if len(timecomps) > 1:
1405 isotime = timecomps[1]
1406 m = re.search(r'(\d\d):(\d\d):(\d\d)', isotime)
1408 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1410 m = re.search(r'(\d\d):(\d\d)', isotime)
1412 tme = time(int(m.group(1)), int(m.group(2)))
1413 # FIXME if we had the path to the original document (not the one in the tmp dir),
1414 # we could use the mtime.
1415 # elif tpv == "moddate":
1416 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1419 result = tme.isoformat()
1420 elif argv == "long":
1421 result = tme.strftime(timeformats[lang][0])
1422 elif argv == "short":
1423 result = tme.strftime(timeformats[lang][1])
1425 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1426 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1427 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1428 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1429 fmt = fmt.replace("'", "")
1430 result = dte.strftime(fmt)
1431 document.body[i : j+1] = result
1434 def revert_namenoextinfo(document):
1435 """Merge buffer Info inset type name-noext to name."""
1439 i = find_token(document.body, "\\begin_inset Info", i+1)
1442 j = find_end_of_inset(document.body, i+1)
1444 document.warning("Malformed LyX document: Could not find end of Info inset.")
1446 tp = find_token(document.body, 'type', i, j)
1447 tpv = get_quoted_value(document.body, "type", tp)
1450 arg = find_token(document.body, 'arg', i, j)
1451 argv = get_quoted_value(document.body, "arg", arg)
1452 if argv != "name-noext":
1454 document.body[arg] = "arg \"name\""
1457 def revert_l7ninfo(document):
1458 """Revert l7n Info inset to text."""
1462 i = find_token(document.body, "\\begin_inset Info", i+1)
1465 j = find_end_of_inset(document.body, i+1)
1467 document.warning("Malformed LyX document: Could not find end of Info inset.")
1469 tp = find_token(document.body, 'type', i, j)
1470 tpv = get_quoted_value(document.body, "type", tp)
1473 arg = find_token(document.body, 'arg', i, j)
1474 argv = get_quoted_value(document.body, "arg", arg)
1475 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1476 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1477 document.body[i : j+1] = argv
1480 def revert_listpargs(document):
1481 """Reverts listpreamble arguments to TeX-code"""
1484 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1487 j = find_end_of_inset(document.body, i)
1488 # Find containing paragraph layout
1489 parent = get_containing_layout(document.body, i)
1491 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1494 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1495 endPlain = find_end_of_layout(document.body, beginPlain)
1496 content = document.body[beginPlain + 1 : endPlain]
1497 del document.body[i:j+1]
1498 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1499 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1500 document.body[parbeg : parbeg] = subst
1503 def revert_lformatinfo(document):
1504 """Revert layout format Info inset to text."""
1508 i = find_token(document.body, "\\begin_inset Info", i+1)
1511 j = find_end_of_inset(document.body, i+1)
1513 document.warning("Malformed LyX document: Could not find end of Info inset.")
1515 tp = find_token(document.body, 'type', i, j)
1516 tpv = get_quoted_value(document.body, "type", tp)
1517 if tpv != "lyxinfo":
1519 arg = find_token(document.body, 'arg', i, j)
1520 argv = get_quoted_value(document.body, "arg", arg)
1521 if argv != "layoutformat":
1524 document.body[i : j+1] = "69"
1527 def convert_hebrew_parentheses(document):
1528 """ Swap opening/closing parentheses in Hebrew text.
1530 Up to LyX 2.4, "(" was used as closing parenthesis and
1531 ")" as opening parenthesis for Hebrew in the LyX source.
1533 # print("convert hebrew parentheses")
1534 current_languages = [document.language]
1535 for i, line in enumerate(document.body):
1536 if line.startswith('\\lang '):
1537 current_languages[-1] = line.lstrip('\\lang ')
1538 elif line.startswith('\\begin_layout'):
1539 current_languages.append(current_languages[-1])
1540 # print (line, current_languages[-1])
1541 elif line.startswith('\\end_layout'):
1542 current_languages.pop()
1543 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1544 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1547 def revert_hebrew_parentheses(document):
1548 """Store parentheses in Hebrew text reversed"""
1549 # This only exists to keep the convert/revert naming convention
1550 convert_hebrew_parentheses(document)
1553 def revert_malayalam(document):
1554 """Set the document language to English but assure Malayalam output"""
1556 revert_language(document, "malayalam", "", "malayalam")
1559 def revert_soul(document):
1560 """Revert soul module flex insets to ERT"""
1562 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1565 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1567 add_to_preamble(document, ["\\usepackage{soul}"])
1569 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1571 add_to_preamble(document, ["\\usepackage{color}"])
1573 revert_flex_inset(document.body, "Spaceletters", "\\so")
1574 revert_flex_inset(document.body, "Strikethrough", "\\st")
1575 revert_flex_inset(document.body, "Underline", "\\ul")
1576 revert_flex_inset(document.body, "Highlight", "\\hl")
1577 revert_flex_inset(document.body, "Capitalize", "\\caps")
1580 def revert_tablestyle(document):
1581 """Remove tablestyle params"""
1583 i = find_token(document.header, "\\tablestyle")
1585 del document.header[i]
1588 def revert_bibfileencodings(document):
1589 """Revert individual Biblatex bibliography encodings"""
1593 i = find_token(document.header, "\\cite_engine", 0)
1595 document.warning("Malformed document! Missing \\cite_engine")
1597 engine = get_value(document.header, "\\cite_engine", i)
1601 if engine in ["biblatex", "biblatex-natbib"]:
1604 # Map lyx to latex encoding names
1608 "armscii8" : "armscii8",
1609 "iso8859-1" : "latin1",
1610 "iso8859-2" : "latin2",
1611 "iso8859-3" : "latin3",
1612 "iso8859-4" : "latin4",
1613 "iso8859-5" : "iso88595",
1614 "iso8859-6" : "8859-6",
1615 "iso8859-7" : "iso-8859-7",
1616 "iso8859-8" : "8859-8",
1617 "iso8859-9" : "latin5",
1618 "iso8859-13" : "latin7",
1619 "iso8859-15" : "latin9",
1620 "iso8859-16" : "latin10",
1621 "applemac" : "applemac",
1623 "cp437de" : "cp437de",
1631 "cp1250" : "cp1250",
1632 "cp1251" : "cp1251",
1633 "cp1252" : "cp1252",
1634 "cp1255" : "cp1255",
1635 "cp1256" : "cp1256",
1636 "cp1257" : "cp1257",
1637 "koi8-r" : "koi8-r",
1638 "koi8-u" : "koi8-u",
1640 "utf8-platex" : "utf8",
1647 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1650 j = find_end_of_inset(document.body, i)
1652 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1654 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1658 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1659 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1660 if len(bibfiles) == 0:
1661 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1662 # remove encoding line
1663 k = find_token(document.body, "file_encodings", i, j)
1665 del document.body[k]
1666 # Re-find inset end line
1667 j = find_end_of_inset(document.body, i)
1669 enclist = encodings.split("\t")
1672 ppp = pp.split(" ", 1)
1673 encmap[ppp[0]] = ppp[1]
1674 for bib in bibfiles:
1675 pr = "\\addbibresource"
1676 if bib in encmap.keys():
1677 pr += "[bibencoding=" + encmap[bib] + "]"
1678 pr += "{" + bib + "}"
1679 add_to_preamble(document, [pr])
1680 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1681 pcmd = "printbibliography"
1683 pcmd += "[" + opts + "]"
1684 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1685 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1686 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1687 "status open", "", "\\begin_layout Plain Layout" ]
1688 repl += document.body[i:j+1]
1689 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1690 document.body[i:j+1] = repl
1696 def revert_cmidruletrimming(document):
1697 """Remove \\cmidrule trimming"""
1699 # FIXME: Revert to TeX code?
1702 # first, let's find out if we need to do anything
1703 i = find_token(document.body, '<cell ', i+1)
1706 j = document.body[i].find('trim="')
1709 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1710 # remove trim option
1711 document.body[i] = rgx.sub('', document.body[i])
1715 r'### Inserted by lyx2lyx (ruby inset) ###',
1716 r'InsetLayout Flex:Ruby',
1717 r' LyxType charstyle',
1718 r' LatexType command',
1722 r' HTMLInnerTag rb',
1723 r' HTMLInnerAttr ""',
1725 r' LabelString "Ruby"',
1726 r' Decoration Conglomerate',
1728 r' \ifdefined\kanjiskip',
1729 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1730 r' \else \ifdefined\luatexversion',
1731 r' \usepackage{luatexja-ruby}',
1732 r' \else \ifdefined\XeTeXversion',
1733 r' \usepackage{ruby}%',
1735 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1737 r' Argument post:1',
1738 r' LabelString "ruby text"',
1739 r' MenuString "Ruby Text|R"',
1740 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1741 r' Decoration Conglomerate',
1754 def convert_ruby_module(document):
1755 """Use ruby module instead of local module definition"""
1756 if document.del_local_layout(ruby_inset_def):
1757 document.add_module("ruby")
1760 def revert_ruby_module(document):
1761 """Replace ruby module with local module definition"""
1762 if document.del_module("ruby"):
1763 document.append_local_layout(ruby_inset_def)
1766 def convert_utf8_japanese(document):
1767 """Use generic utf8 with Japanese documents."""
1768 lang = get_value(document.header, "\\language")
1769 if not lang.startswith("japanese"):
1771 inputenc = get_value(document.header, "\\inputencoding")
1772 if ((lang == "japanese" and inputenc == "utf8-platex")
1773 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1774 document.set_parameter("inputencoding", "utf8")
1777 def revert_utf8_japanese(document):
1778 """Use Japanese utf8 variants with Japanese documents."""
1779 inputenc = get_value(document.header, "\\inputencoding")
1780 if inputenc != "utf8":
1782 lang = get_value(document.header, "\\language")
1783 if lang == "japanese":
1784 document.set_parameter("inputencoding", "utf8-platex")
1785 if lang == "japanese-cjk":
1786 document.set_parameter("inputencoding", "utf8-cjk")
1789 def revert_lineno(document):
1790 " Replace lineno setting with user-preamble code."
1792 options = get_quoted_value(document.header, "\\lineno_options",
1794 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1797 options = "[" + options + "]"
1798 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1801 def convert_lineno(document):
1802 " Replace user-preamble code with native lineno support."
1805 i = find_token(document.preamble, "\\linenumbers", 1)
1807 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1810 options = usepkg.group(1).strip("[]")
1811 del(document.preamble[i-1:i+1])
1812 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1814 k = find_token(document.header, "\\index ")
1816 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1818 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1819 "\\lineno_options %s" % options]
1822 def convert_aaencoding(document):
1823 " Convert default document option due to encoding change in aa class. "
1825 if document.textclass != "aa":
1828 i = find_token(document.header, "\\use_default_options true")
1831 val = get_value(document.header, "\\inputencoding")
1833 document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
1835 if val == "auto-legacy" or val == "latin9":
1836 document.header[i] = "\\use_default_options false"
1837 k = find_token(document.header, "\\options")
1839 document.header.insert(i, "\\options latin9")
1841 document.header[k] += ",latin9"
1844 def revert_aaencoding(document):
1845 " Revert default document option due to encoding change in aa class. "
1847 if document.textclass != "aa":
1850 i = find_token(document.header, "\\use_default_options true")
1853 val = get_value(document.header, "\\inputencoding")
1855 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
1858 document.header[i] = "\\use_default_options false"
1859 k = find_token(document.header, "\\options", 0)
1861 document.header.insert(i, "\\options utf8")
1863 document.header[k] = document.header[k] + ",utf8"
1866 def revert_new_languages(document):
1867 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1868 and Russian (Petrine orthography)."""
1870 # lyxname: (babelname, polyglossianame)
1871 new_languages = {"azerbaijani": ("azerbaijani", ""),
1872 "bengali": ("", "bengali"),
1873 "churchslavonic": ("", "churchslavonic"),
1874 "oldrussian": ("", "russian"),
1875 "korean": ("", "korean"),
1877 if document.language in new_languages:
1878 used_languages = {document.language}
1880 used_languages = set()
1883 i = find_token(document.body, "\\lang", i+1)
1886 val = get_value(document.body, "\\lang", i)
1887 if val in new_languages:
1888 used_languages.add(val)
1890 # Korean is already supported via CJK, so leave as-is for Babel
1891 if ("korean" in used_languages
1892 and (not get_bool_value(document.header, "\\use_non_tex_fonts")
1893 or get_value(document.header, "\\language_package") == "babel")):
1894 used_languages.discard("korean")
1896 for lang in used_languages:
1897 revert_language(document, lang, *new_languages[lang])
1901 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1902 r'InsetLayout Flex:Glosse',
1904 r' LabelString "Gloss (old version)"',
1905 r' MenuString "Gloss (old version)"',
1906 r' LatexType environment',
1907 r' LatexName linggloss',
1908 r' Decoration minimalistic',
1913 r' CustomPars false',
1914 r' ForcePlain true',
1915 r' ParbreakIsNewline true',
1916 r' FreeSpacing true',
1917 r' Requires covington',
1920 r' \@ifundefined{linggloss}{%',
1921 r' \newenvironment{linggloss}[2][]{',
1922 r' \def\glosstr{\glt #1}%',
1924 r' {\glosstr\glend}}{}',
1927 r' ResetsFont true',
1929 r' Decoration conglomerate',
1930 r' LabelString "Translation"',
1931 r' MenuString "Glosse Translation|s"',
1932 r' Tooltip "Add a translation for the glosse"',
1937 glosss_inset_def = [
1938 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1939 r'InsetLayout Flex:Tri-Glosse',
1941 r' LabelString "Tri-Gloss (old version)"',
1942 r' MenuString "Tri-Gloss (old version)"',
1943 r' LatexType environment',
1944 r' LatexName lingglosss',
1945 r' Decoration minimalistic',
1950 r' CustomPars false',
1951 r' ForcePlain true',
1952 r' ParbreakIsNewline true',
1953 r' FreeSpacing true',
1955 r' Requires covington',
1958 r' \@ifundefined{lingglosss}{%',
1959 r' \newenvironment{lingglosss}[2][]{',
1960 r' \def\glosstr{\glt #1}%',
1962 r' {\glosstr\glend}}{}',
1964 r' ResetsFont true',
1966 r' Decoration conglomerate',
1967 r' LabelString "Translation"',
1968 r' MenuString "Glosse Translation|s"',
1969 r' Tooltip "Add a translation for the glosse"',
1974 def convert_linggloss(document):
1975 " Move old ling glosses to local layout "
1976 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1977 document.append_local_layout(gloss_inset_def)
1978 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1979 document.append_local_layout(glosss_inset_def)
1981 def revert_linggloss(document):
1982 " Revert to old ling gloss definitions "
1983 if not "linguistics" in document.get_module_list():
1985 document.del_local_layout(gloss_inset_def)
1986 document.del_local_layout(glosss_inset_def)
1989 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1990 for glosse in glosses:
1993 i = find_token(document.body, glosse, i+1)
1996 j = find_end_of_inset(document.body, i)
1998 document.warning("Malformed LyX document: Can't find end of Gloss inset")
2001 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2002 endarg = find_end_of_inset(document.body, arg)
2005 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2006 if argbeginPlain == -1:
2007 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2009 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2010 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2012 # remove Arg insets and paragraph, if it only contains this inset
2013 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2014 del document.body[arg - 1 : endarg + 4]
2016 del document.body[arg : endarg + 1]
2018 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2019 endarg = find_end_of_inset(document.body, arg)
2022 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2023 if argbeginPlain == -1:
2024 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2026 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2027 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2029 # remove Arg insets and paragraph, if it only contains this inset
2030 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2031 del document.body[arg - 1 : endarg + 4]
2033 del document.body[arg : endarg + 1]
2035 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2036 endarg = find_end_of_inset(document.body, arg)
2039 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2040 if argbeginPlain == -1:
2041 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2043 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2044 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2046 # remove Arg insets and paragraph, if it only contains this inset
2047 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2048 del document.body[arg - 1 : endarg + 4]
2050 del document.body[arg : endarg + 1]
2052 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2053 endarg = find_end_of_inset(document.body, arg)
2056 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2057 if argbeginPlain == -1:
2058 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2060 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2061 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2063 # remove Arg insets and paragraph, if it only contains this inset
2064 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2065 del document.body[arg - 1 : endarg + 4]
2067 del document.body[arg : endarg + 1]
2070 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2073 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2074 endInset = find_end_of_inset(document.body, i)
2075 endPlain = find_end_of_layout(document.body, beginPlain)
2076 precontent = put_cmd_in_ert(cmd)
2077 if len(optargcontent) > 0:
2078 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2079 precontent += put_cmd_in_ert("{")
2081 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2082 if cmd == "\\trigloss":
2083 postcontent += put_cmd_in_ert("}{") + marg3content
2084 postcontent += put_cmd_in_ert("}")
2086 document.body[endPlain:endInset + 1] = postcontent
2087 document.body[beginPlain + 1:beginPlain] = precontent
2088 del document.body[i : beginPlain + 1]
2090 document.append_local_layout("Requires covington")
2095 def revert_subexarg(document):
2096 " Revert linguistic subexamples with argument to ERT "
2098 if not "linguistics" in document.get_module_list():
2104 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2107 j = find_end_of_layout(document.body, i)
2109 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2112 # check for consecutive layouts
2113 k = find_token(document.body, "\\begin_layout", j)
2114 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2116 j = find_end_of_layout(document.body, k)
2118 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2121 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2125 endarg = find_end_of_inset(document.body, arg)
2127 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2128 if argbeginPlain == -1:
2129 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2131 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2132 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2134 # remove Arg insets and paragraph, if it only contains this inset
2135 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2136 del document.body[arg - 1 : endarg + 4]
2138 del document.body[arg : endarg + 1]
2140 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2142 # re-find end of layout
2143 j = find_end_of_layout(document.body, i)
2145 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2148 # check for consecutive layouts
2149 k = find_token(document.body, "\\begin_layout", j)
2150 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2152 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2153 j = find_end_of_layout(document.body, k)
2155 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2158 endev = put_cmd_in_ert("\\end{subexamples}")
2160 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2161 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2162 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2164 document.append_local_layout("Requires covington")
2168 def revert_drs(document):
2169 " Revert DRS insets (linguistics) to ERT "
2171 if not "linguistics" in document.get_module_list():
2175 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2176 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2177 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2178 "\\begin_inset Flex SDRS"]
2182 i = find_token(document.body, drs, i+1)
2185 j = find_end_of_inset(document.body, i)
2187 document.warning("Malformed LyX document: Can't find end of DRS inset")
2190 # Check for arguments
2191 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2192 endarg = find_end_of_inset(document.body, arg)
2195 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2196 if argbeginPlain == -1:
2197 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2199 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2200 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2202 # remove Arg insets and paragraph, if it only contains this inset
2203 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2204 del document.body[arg - 1 : endarg + 4]
2206 del document.body[arg : endarg + 1]
2209 j = find_end_of_inset(document.body, i)
2211 document.warning("Malformed LyX document: Can't find end of DRS inset")
2214 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2215 endarg = find_end_of_inset(document.body, arg)
2218 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2219 if argbeginPlain == -1:
2220 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2222 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2223 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2225 # remove Arg insets and paragraph, if it only contains this inset
2226 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2227 del document.body[arg - 1 : endarg + 4]
2229 del document.body[arg : endarg + 1]
2232 j = find_end_of_inset(document.body, i)
2234 document.warning("Malformed LyX document: Can't find end of DRS inset")
2237 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2238 endarg = find_end_of_inset(document.body, arg)
2239 postarg1content = []
2241 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2242 if argbeginPlain == -1:
2243 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2245 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2246 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2248 # remove Arg insets and paragraph, if it only contains this inset
2249 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2250 del document.body[arg - 1 : endarg + 4]
2252 del document.body[arg : endarg + 1]
2255 j = find_end_of_inset(document.body, i)
2257 document.warning("Malformed LyX document: Can't find end of DRS inset")
2260 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2261 endarg = find_end_of_inset(document.body, arg)
2262 postarg2content = []
2264 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2265 if argbeginPlain == -1:
2266 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2268 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2269 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2271 # remove Arg insets and paragraph, if it only contains this inset
2272 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2273 del document.body[arg - 1 : endarg + 4]
2275 del document.body[arg : endarg + 1]
2278 j = find_end_of_inset(document.body, i)
2280 document.warning("Malformed LyX document: Can't find end of DRS inset")
2283 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2284 endarg = find_end_of_inset(document.body, arg)
2285 postarg3content = []
2287 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2288 if argbeginPlain == -1:
2289 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2291 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2292 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2294 # remove Arg insets and paragraph, if it only contains this inset
2295 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2296 del document.body[arg - 1 : endarg + 4]
2298 del document.body[arg : endarg + 1]
2301 j = find_end_of_inset(document.body, i)
2303 document.warning("Malformed LyX document: Can't find end of DRS inset")
2306 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2307 endarg = find_end_of_inset(document.body, arg)
2308 postarg4content = []
2310 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2311 if argbeginPlain == -1:
2312 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2314 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2315 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2317 # remove Arg insets and paragraph, if it only contains this inset
2318 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2319 del document.body[arg - 1 : endarg + 4]
2321 del document.body[arg : endarg + 1]
2323 # The respective LaTeX command
2325 if drs == "\\begin_inset Flex DRS*":
2327 elif drs == "\\begin_inset Flex IfThen-DRS":
2329 elif drs == "\\begin_inset Flex Cond-DRS":
2331 elif drs == "\\begin_inset Flex QDRS":
2333 elif drs == "\\begin_inset Flex NegDRS":
2335 elif drs == "\\begin_inset Flex SDRS":
2338 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2339 endInset = find_end_of_inset(document.body, i)
2340 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2341 precontent = put_cmd_in_ert(cmd)
2342 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2343 if drs == "\\begin_inset Flex SDRS":
2344 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2345 precontent += put_cmd_in_ert("{")
2348 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2349 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2350 if cmd == "\\condrs" or cmd == "\\qdrs":
2351 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2353 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2355 postcontent = put_cmd_in_ert("}")
2357 document.body[endPlain:endInset + 1] = postcontent
2358 document.body[beginPlain + 1:beginPlain] = precontent
2359 del document.body[i : beginPlain + 1]
2361 document.append_local_layout("Provides covington 1")
2362 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2368 def revert_babelfont(document):
2369 " Reverts the use of \\babelfont to user preamble "
2371 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2374 i = find_token(document.header, '\\language_package', 0)
2376 document.warning("Malformed LyX document: Missing \\language_package.")
2378 if get_value(document.header, "\\language_package", 0) != "babel":
2381 # check font settings
2383 roman = sans = typew = "default"
2385 sf_scale = tt_scale = 100.0
2387 j = find_token(document.header, "\\font_roman", 0)
2389 document.warning("Malformed LyX document: Missing \\font_roman.")
2391 # We need to use this regex since split() does not handle quote protection
2392 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2393 roman = romanfont[2].strip('"')
2394 romanfont[2] = '"default"'
2395 document.header[j] = " ".join(romanfont)
2397 j = find_token(document.header, "\\font_sans", 0)
2399 document.warning("Malformed LyX document: Missing \\font_sans.")
2401 # We need to use this regex since split() does not handle quote protection
2402 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2403 sans = sansfont[2].strip('"')
2404 sansfont[2] = '"default"'
2405 document.header[j] = " ".join(sansfont)
2407 j = find_token(document.header, "\\font_typewriter", 0)
2409 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2411 # We need to use this regex since split() does not handle quote protection
2412 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2413 typew = ttfont[2].strip('"')
2414 ttfont[2] = '"default"'
2415 document.header[j] = " ".join(ttfont)
2417 i = find_token(document.header, "\\font_osf", 0)
2419 document.warning("Malformed LyX document: Missing \\font_osf.")
2421 osf = str2bool(get_value(document.header, "\\font_osf", i))
2423 j = find_token(document.header, "\\font_sf_scale", 0)
2425 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2427 sfscale = document.header[j].split()
2430 document.header[j] = " ".join(sfscale)
2433 sf_scale = float(val)
2435 document.warning("Invalid font_sf_scale value: " + val)
2437 j = find_token(document.header, "\\font_tt_scale", 0)
2439 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2441 ttscale = document.header[j].split()
2444 document.header[j] = " ".join(ttscale)
2447 tt_scale = float(val)
2449 document.warning("Invalid font_tt_scale value: " + val)
2451 # set preamble stuff
2452 pretext = ['%% This document must be processed with xelatex or lualatex!']
2453 pretext.append('\\AtBeginDocument{%')
2454 if roman != "default":
2455 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2456 if sans != "default":
2457 sf = '\\babelfont{sf}['
2458 if sf_scale != 100.0:
2459 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2460 sf += 'Mapping=tex-text]{' + sans + '}'
2462 if typew != "default":
2463 tw = '\\babelfont{tt}'
2464 if tt_scale != 100.0:
2465 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2466 tw += '{' + typew + '}'
2469 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2471 insert_to_preamble(document, pretext)
2474 def revert_minionpro(document):
2475 " Revert native MinionPro font definition (with extra options) to LaTeX "
2477 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2480 regexp = re.compile(r'(\\font_roman_opts)')
2481 x = find_re(document.header, regexp, 0)
2485 # We need to use this regex since split() does not handle quote protection
2486 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2487 opts = romanopts[1].strip('"')
2489 i = find_token(document.header, "\\font_roman", 0)
2491 document.warning("Malformed LyX document: Missing \\font_roman.")
2494 # We need to use this regex since split() does not handle quote protection
2495 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2496 roman = romanfont[1].strip('"')
2497 if roman != "minionpro":
2499 romanfont[1] = '"default"'
2500 document.header[i] = " ".join(romanfont)
2502 j = find_token(document.header, "\\font_osf true", 0)
2505 preamble = "\\usepackage["
2507 document.header[j] = "\\font_osf false"
2511 preamble += "]{MinionPro}"
2512 add_to_preamble(document, [preamble])
2513 del document.header[x]
2516 def revert_font_opts(document):
2517 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2519 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2520 Babel = (get_value(document.header, "\\language_package") == "babel")
2523 regexp = re.compile(r'(\\font_roman_opts)')
2524 i = find_re(document.header, regexp, 0)
2526 # We need to use this regex since split() does not handle quote protection
2527 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2528 opts = romanopts[1].strip('"')
2529 del document.header[i]
2531 regexp = re.compile(r'(\\font_roman)')
2532 i = find_re(document.header, regexp, 0)
2534 # We need to use this regex since split() does not handle quote protection
2535 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2536 font = romanfont[2].strip('"')
2537 romanfont[2] = '"default"'
2538 document.header[i] = " ".join(romanfont)
2539 if font != "default":
2541 preamble = "\\babelfont{rm}["
2543 preamble = "\\setmainfont["
2546 preamble += "Mapping=tex-text]{"
2549 add_to_preamble(document, [preamble])
2552 regexp = re.compile(r'(\\font_sans_opts)')
2553 i = find_re(document.header, regexp, 0)
2556 # We need to use this regex since split() does not handle quote protection
2557 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2558 opts = sfopts[1].strip('"')
2559 del document.header[i]
2561 regexp = re.compile(r'(\\font_sf_scale)')
2562 i = find_re(document.header, regexp, 0)
2564 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2565 regexp = re.compile(r'(\\font_sans)')
2566 i = find_re(document.header, regexp, 0)
2568 # We need to use this regex since split() does not handle quote protection
2569 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2570 font = sffont[2].strip('"')
2571 sffont[2] = '"default"'
2572 document.header[i] = " ".join(sffont)
2573 if font != "default":
2575 preamble = "\\babelfont{sf}["
2577 preamble = "\\setsansfont["
2581 preamble += "Scale=0."
2582 preamble += scaleval
2584 preamble += "Mapping=tex-text]{"
2587 add_to_preamble(document, [preamble])
2590 regexp = re.compile(r'(\\font_typewriter_opts)')
2591 i = find_re(document.header, regexp, 0)
2594 # We need to use this regex since split() does not handle quote protection
2595 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2596 opts = ttopts[1].strip('"')
2597 del document.header[i]
2599 regexp = re.compile(r'(\\font_tt_scale)')
2600 i = find_re(document.header, regexp, 0)
2602 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2603 regexp = re.compile(r'(\\font_typewriter)')
2604 i = find_re(document.header, regexp, 0)
2606 # We need to use this regex since split() does not handle quote protection
2607 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2608 font = ttfont[2].strip('"')
2609 ttfont[2] = '"default"'
2610 document.header[i] = " ".join(ttfont)
2611 if font != "default":
2613 preamble = "\\babelfont{tt}["
2615 preamble = "\\setmonofont["
2619 preamble += "Scale=0."
2620 preamble += scaleval
2622 preamble += "Mapping=tex-text]{"
2625 add_to_preamble(document, [preamble])
2628 def revert_plainNotoFonts_xopts(document):
2629 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2631 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2635 y = find_token(document.header, "\\font_osf true", 0)
2639 regexp = re.compile(r'(\\font_roman_opts)')
2640 x = find_re(document.header, regexp, 0)
2641 if x == -1 and not osf:
2646 # We need to use this regex since split() does not handle quote protection
2647 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2648 opts = romanopts[1].strip('"')
2654 i = find_token(document.header, "\\font_roman", 0)
2658 # We need to use this regex since split() does not handle quote protection
2659 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2660 roman = romanfont[1].strip('"')
2661 if roman != "NotoSerif-TLF":
2664 j = find_token(document.header, "\\font_sans", 0)
2668 # We need to use this regex since split() does not handle quote protection
2669 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2670 sf = sffont[1].strip('"')
2674 j = find_token(document.header, "\\font_typewriter", 0)
2678 # We need to use this regex since split() does not handle quote protection
2679 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2680 tt = ttfont[1].strip('"')
2684 # So we have noto as "complete font"
2685 romanfont[1] = '"default"'
2686 document.header[i] = " ".join(romanfont)
2688 preamble = "\\usepackage["
2690 preamble += "]{noto}"
2691 add_to_preamble(document, [preamble])
2693 document.header[y] = "\\font_osf false"
2695 del document.header[x]
2698 def revert_notoFonts_xopts(document):
2699 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2701 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2705 fm = createFontMapping(['Noto'])
2706 if revert_fonts(document, fm, fontmap, True):
2707 add_preamble_fonts(document, fontmap)
2710 def revert_IBMFonts_xopts(document):
2711 " Revert native IBM font definition (with extra options) to LaTeX "
2713 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2717 fm = createFontMapping(['IBM'])
2719 if revert_fonts(document, fm, fontmap, True):
2720 add_preamble_fonts(document, fontmap)
2723 def revert_AdobeFonts_xopts(document):
2724 " Revert native Adobe font definition (with extra options) to LaTeX "
2726 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2730 fm = createFontMapping(['Adobe'])
2732 if revert_fonts(document, fm, fontmap, True):
2733 add_preamble_fonts(document, fontmap)
2736 def convert_osf(document):
2737 " Convert \\font_osf param to new format "
2739 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2741 i = find_token(document.header, '\\font_osf', 0)
2743 document.warning("Malformed LyX document: Missing \\font_osf.")
2746 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2747 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2749 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2750 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2753 document.header.insert(i, "\\font_sans_osf false")
2754 document.header.insert(i + 1, "\\font_typewriter_osf false")
2758 x = find_token(document.header, "\\font_sans", 0)
2760 document.warning("Malformed LyX document: Missing \\font_sans.")
2762 # We need to use this regex since split() does not handle quote protection
2763 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2764 sf = sffont[1].strip('"')
2766 document.header.insert(i, "\\font_sans_osf true")
2768 document.header.insert(i, "\\font_sans_osf false")
2770 x = find_token(document.header, "\\font_typewriter", 0)
2772 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2774 # We need to use this regex since split() does not handle quote protection
2775 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2776 tt = ttfont[1].strip('"')
2778 document.header.insert(i + 1, "\\font_typewriter_osf true")
2780 document.header.insert(i + 1, "\\font_typewriter_osf false")
2783 document.header.insert(i, "\\font_sans_osf false")
2784 document.header.insert(i + 1, "\\font_typewriter_osf false")
2787 def revert_osf(document):
2788 " Revert \\font_*_osf params "
2790 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2792 i = find_token(document.header, '\\font_roman_osf', 0)
2794 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2797 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2798 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2800 i = find_token(document.header, '\\font_sans_osf', 0)
2802 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2805 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2806 del document.header[i]
2808 i = find_token(document.header, '\\font_typewriter_osf', 0)
2810 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2813 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2814 del document.header[i]
2817 i = find_token(document.header, '\\font_osf', 0)
2819 document.warning("Malformed LyX document: Missing \\font_osf.")
2821 document.header[i] = "\\font_osf true"
2824 def revert_texfontopts(document):
2825 " Revert native TeX font definitions (with extra options) to LaTeX "
2827 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2830 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2832 # First the sf (biolinum only)
2833 regexp = re.compile(r'(\\font_sans_opts)')
2834 x = find_re(document.header, regexp, 0)
2836 # We need to use this regex since split() does not handle quote protection
2837 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2838 opts = sfopts[1].strip('"')
2839 i = find_token(document.header, "\\font_sans", 0)
2841 document.warning("Malformed LyX document: Missing \\font_sans.")
2843 # We need to use this regex since split() does not handle quote protection
2844 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2845 sans = sffont[1].strip('"')
2846 if sans == "biolinum":
2848 sffont[1] = '"default"'
2849 document.header[i] = " ".join(sffont)
2851 j = find_token(document.header, "\\font_sans_osf true", 0)
2854 k = find_token(document.header, "\\font_sf_scale", 0)
2856 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2858 sfscale = document.header[k].split()
2861 document.header[k] = " ".join(sfscale)
2864 sf_scale = float(val)
2866 document.warning("Invalid font_sf_scale value: " + val)
2867 preamble = "\\usepackage["
2869 document.header[j] = "\\font_sans_osf false"
2871 if sf_scale != 100.0:
2872 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2874 preamble += "]{biolinum}"
2875 add_to_preamble(document, [preamble])
2876 del document.header[x]
2878 regexp = re.compile(r'(\\font_roman_opts)')
2879 x = find_re(document.header, regexp, 0)
2883 # We need to use this regex since split() does not handle quote protection
2884 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2885 opts = romanopts[1].strip('"')
2887 i = find_token(document.header, "\\font_roman", 0)
2889 document.warning("Malformed LyX document: Missing \\font_roman.")
2892 # We need to use this regex since split() does not handle quote protection
2893 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2894 roman = romanfont[1].strip('"')
2895 if not roman in rmfonts:
2897 romanfont[1] = '"default"'
2898 document.header[i] = " ".join(romanfont)
2900 if roman == "utopia":
2902 elif roman == "palatino":
2903 package = "mathpazo"
2904 elif roman == "times":
2905 package = "mathptmx"
2906 elif roman == "xcharter":
2907 package = "XCharter"
2909 j = find_token(document.header, "\\font_roman_osf true", 0)
2911 if roman == "cochineal":
2912 osf = "proportional,osf,"
2913 elif roman == "utopia":
2915 elif roman == "garamondx":
2917 elif roman == "libertine":
2919 elif roman == "palatino":
2921 elif roman == "xcharter":
2923 document.header[j] = "\\font_roman_osf false"
2924 k = find_token(document.header, "\\font_sc true", 0)
2926 if roman == "utopia":
2928 if roman == "palatino" and osf == "":
2930 document.header[k] = "\\font_sc false"
2931 preamble = "\\usepackage["
2934 preamble += "]{" + package + "}"
2935 add_to_preamble(document, [preamble])
2936 del document.header[x]
2939 def convert_CantarellFont(document):
2940 " Handle Cantarell font definition to LaTeX "
2942 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2943 fm = createFontMapping(['Cantarell'])
2944 convert_fonts(document, fm, "oldstyle")
2946 def revert_CantarellFont(document):
2947 " Revert native Cantarell font definition to LaTeX "
2949 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2951 fm = createFontMapping(['Cantarell'])
2952 if revert_fonts(document, fm, fontmap, False, True):
2953 add_preamble_fonts(document, fontmap)
2955 def convert_ChivoFont(document):
2956 " Handle Chivo font definition to LaTeX "
2958 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2959 fm = createFontMapping(['Chivo'])
2960 convert_fonts(document, fm, "oldstyle")
2962 def revert_ChivoFont(document):
2963 " Revert native Chivo font definition to LaTeX "
2965 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2967 fm = createFontMapping(['Chivo'])
2968 if revert_fonts(document, fm, fontmap, False, True):
2969 add_preamble_fonts(document, fontmap)
2972 def convert_FiraFont(document):
2973 " Handle Fira font definition to LaTeX "
2975 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2976 fm = createFontMapping(['Fira'])
2977 convert_fonts(document, fm, "lf")
2979 def revert_FiraFont(document):
2980 " Revert native Fira font definition to LaTeX "
2982 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2984 fm = createFontMapping(['Fira'])
2985 if revert_fonts(document, fm, fontmap, False, True):
2986 add_preamble_fonts(document, fontmap)
2989 def convert_Semibolds(document):
2990 " Move semibold options to extraopts "
2992 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2994 i = find_token(document.header, "\\font_roman", 0)
2996 document.warning("Malformed LyX document: Missing \\font_roman.")
2998 # We need to use this regex since split() does not handle quote protection
2999 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3000 roman = romanfont[1].strip('"')
3001 if roman == "IBMPlexSerifSemibold":
3002 romanfont[1] = '"IBMPlexSerif"'
3003 document.header[i] = " ".join(romanfont)
3005 if NonTeXFonts == False:
3006 regexp = re.compile(r'(\\font_roman_opts)')
3007 x = find_re(document.header, regexp, 0)
3009 # Sensible place to insert tag
3010 fo = find_token(document.header, "\\font_sf_scale")
3012 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3014 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3016 # We need to use this regex since split() does not handle quote protection
3017 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3018 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3020 i = find_token(document.header, "\\font_sans", 0)
3022 document.warning("Malformed LyX document: Missing \\font_sans.")
3024 # We need to use this regex since split() does not handle quote protection
3025 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3026 sf = sffont[1].strip('"')
3027 if sf == "IBMPlexSansSemibold":
3028 sffont[1] = '"IBMPlexSans"'
3029 document.header[i] = " ".join(sffont)
3031 if NonTeXFonts == False:
3032 regexp = re.compile(r'(\\font_sans_opts)')
3033 x = find_re(document.header, regexp, 0)
3035 # Sensible place to insert tag
3036 fo = find_token(document.header, "\\font_sf_scale")
3038 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3040 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3042 # We need to use this regex since split() does not handle quote protection
3043 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3044 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3046 i = find_token(document.header, "\\font_typewriter", 0)
3048 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3050 # We need to use this regex since split() does not handle quote protection
3051 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3052 tt = ttfont[1].strip('"')
3053 if tt == "IBMPlexMonoSemibold":
3054 ttfont[1] = '"IBMPlexMono"'
3055 document.header[i] = " ".join(ttfont)
3057 if NonTeXFonts == False:
3058 regexp = re.compile(r'(\\font_typewriter_opts)')
3059 x = find_re(document.header, regexp, 0)
3061 # Sensible place to insert tag
3062 fo = find_token(document.header, "\\font_tt_scale")
3064 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3066 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3068 # We need to use this regex since split() does not handle quote protection
3069 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3070 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3073 def convert_NotoRegulars(document):
3074 " Merge diverse noto reagular fonts "
3076 i = find_token(document.header, "\\font_roman", 0)
3078 document.warning("Malformed LyX document: Missing \\font_roman.")
3080 # We need to use this regex since split() does not handle quote protection
3081 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3082 roman = romanfont[1].strip('"')
3083 if roman == "NotoSerif-TLF":
3084 romanfont[1] = '"NotoSerifRegular"'
3085 document.header[i] = " ".join(romanfont)
3087 i = find_token(document.header, "\\font_sans", 0)
3089 document.warning("Malformed LyX document: Missing \\font_sans.")
3091 # We need to use this regex since split() does not handle quote protection
3092 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3093 sf = sffont[1].strip('"')
3094 if sf == "NotoSans-TLF":
3095 sffont[1] = '"NotoSansRegular"'
3096 document.header[i] = " ".join(sffont)
3098 i = find_token(document.header, "\\font_typewriter", 0)
3100 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3102 # We need to use this regex since split() does not handle quote protection
3103 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3104 tt = ttfont[1].strip('"')
3105 if tt == "NotoMono-TLF":
3106 ttfont[1] = '"NotoMonoRegular"'
3107 document.header[i] = " ".join(ttfont)
3110 def convert_CrimsonProFont(document):
3111 " Handle CrimsonPro font definition to LaTeX "
3113 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3114 fm = createFontMapping(['CrimsonPro'])
3115 convert_fonts(document, fm, "lf")
3117 def revert_CrimsonProFont(document):
3118 " Revert native CrimsonPro font definition to LaTeX "
3120 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3122 fm = createFontMapping(['CrimsonPro'])
3123 if revert_fonts(document, fm, fontmap, False, True):
3124 add_preamble_fonts(document, fontmap)
3127 def revert_pagesizes(document):
3128 " Revert new page sizes in memoir and KOMA to options "
3130 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3133 i = find_token(document.header, "\\use_geometry true", 0)
3137 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3139 i = find_token(document.header, "\\papersize", 0)
3141 document.warning("Malformed LyX document! Missing \\papersize header.")
3143 val = get_value(document.header, "\\papersize", i)
3148 document.header[i] = "\\papersize default"
3150 i = find_token(document.header, "\\options", 0)
3152 i = find_token(document.header, "\\textclass", 0)
3154 document.warning("Malformed LyX document! Missing \\textclass header.")
3156 document.header.insert(i, "\\options " + val)
3158 document.header[i] = document.header[i] + "," + val
3161 def convert_pagesizes(document):
3162 " Convert to new page sizes in memoir and KOMA to options "
3164 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3167 i = find_token(document.header, "\\use_geometry true", 0)
3171 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3173 i = find_token(document.header, "\\papersize", 0)
3175 document.warning("Malformed LyX document! Missing \\papersize header.")
3177 val = get_value(document.header, "\\papersize", i)
3182 i = find_token(document.header, "\\use_geometry false", 0)
3184 # Maintain use of geometry
3185 document.header[1] = "\\use_geometry true"
3187 def revert_komafontsizes(document):
3188 " Revert new font sizes in KOMA to options "
3190 if document.textclass[:3] != "scr":
3193 i = find_token(document.header, "\\paperfontsize", 0)
3195 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3198 defsizes = ["default", "10", "11", "12"]
3200 val = get_value(document.header, "\\paperfontsize", i)
3205 document.header[i] = "\\paperfontsize default"
3207 fsize = "fontsize=" + val
3209 i = find_token(document.header, "\\options", 0)
3211 i = find_token(document.header, "\\textclass", 0)
3213 document.warning("Malformed LyX document! Missing \\textclass header.")
3215 document.header.insert(i, "\\options " + fsize)
3217 document.header[i] = document.header[i] + "," + fsize
3220 def revert_dupqualicites(document):
3221 " Revert qualified citation list commands with duplicate keys to ERT "
3223 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3224 # we need to revert those with multiple uses of the same key.
3228 i = find_token(document.header, "\\cite_engine", 0)
3230 document.warning("Malformed document! Missing \\cite_engine")
3232 engine = get_value(document.header, "\\cite_engine", i)
3234 if not engine in ["biblatex", "biblatex-natbib"]:
3237 # Citation insets that support qualified lists, with their LaTeX code
3241 "citet" : "textcites",
3242 "Citet" : "Textcites",
3243 "citep" : "parencites",
3244 "Citep" : "Parencites",
3245 "Footcite" : "Smartcites",
3246 "footcite" : "smartcites",
3247 "Autocite" : "Autocites",
3248 "autocite" : "autocites",
3253 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3256 j = find_end_of_inset(document.body, i)
3258 document.warning("Can't find end of citation inset at line %d!!" %(i))
3262 k = find_token(document.body, "LatexCommand", i, j)
3264 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3268 cmd = get_value(document.body, "LatexCommand", k)
3269 if not cmd in list(ql_citations.keys()):
3273 pres = find_token(document.body, "pretextlist", i, j)
3274 posts = find_token(document.body, "posttextlist", i, j)
3275 if pres == -1 and posts == -1:
3280 key = get_quoted_value(document.body, "key", i, j)
3282 document.warning("Citation inset at line %d does not have a key!" %(i))
3286 keys = key.split(",")
3287 ukeys = list(set(keys))
3288 if len(keys) == len(ukeys):
3293 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3294 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3296 pre = get_quoted_value(document.body, "before", i, j)
3297 post = get_quoted_value(document.body, "after", i, j)
3298 prelist = pretexts.split("\t")
3301 ppp = pp.split(" ", 1)
3307 if ppp[0] in premap:
3308 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3310 premap[ppp[0]] = val
3311 postlist = posttexts.split("\t")
3315 ppp = pp.split(" ", 1)
3321 if ppp[0] in postmap:
3322 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3324 postmap[ppp[0]] = val
3325 # Replace known new commands with ERT
3326 if "(" in pre or ")" in pre:
3327 pre = "{" + pre + "}"
3328 if "(" in post or ")" in post:
3329 post = "{" + post + "}"
3330 res = "\\" + ql_citations[cmd]
3332 res += "(" + pre + ")"
3334 res += "(" + post + ")"
3338 if premap.get(kk, "") != "":
3339 akeys = premap[kk].split("\t", 1)
3342 res += "[" + akey + "]"
3344 premap[kk] = "\t".join(akeys[1:])
3347 if postmap.get(kk, "") != "":
3348 akeys = postmap[kk].split("\t", 1)
3351 res += "[" + akey + "]"
3353 postmap[kk] = "\t".join(akeys[1:])
3356 elif premap.get(kk, "") != "":
3358 res += "{" + kk + "}"
3359 document.body[i:j+1] = put_cmd_in_ert([res])
3362 def convert_pagesizenames(document):
3363 " Convert LyX page sizes names "
3365 i = find_token(document.header, "\\papersize", 0)
3367 document.warning("Malformed LyX document! Missing \\papersize header.")
3369 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3370 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3371 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3372 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3373 val = get_value(document.header, "\\papersize", i)
3375 newval = val.replace("paper", "")
3376 document.header[i] = "\\papersize " + newval
3378 def revert_pagesizenames(document):
3379 " Convert LyX page sizes names "
3381 i = find_token(document.header, "\\papersize", 0)
3383 document.warning("Malformed LyX document! Missing \\papersize header.")
3385 newnames = ["letter", "legal", "executive", \
3386 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3387 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3388 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3389 val = get_value(document.header, "\\papersize", i)
3391 newval = val + "paper"
3392 document.header[i] = "\\papersize " + newval
3395 def revert_theendnotes(document):
3396 " Reverts native support of \\theendnotes to TeX-code "
3398 if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
3403 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3406 j = find_end_of_inset(document.body, i)
3408 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3411 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
3414 def revert_enotez(document):
3415 " Reverts native support of enotez package to TeX-code "
3417 if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
3421 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
3424 revert_flex_inset(document.body, "Endnote", "\\endnote")
3428 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3431 j = find_end_of_inset(document.body, i)
3433 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3437 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
3440 add_to_preamble(document, ["\\usepackage{enotez}"])
3441 document.del_module("enotez")
3442 document.del_module("foottoenotez")
3445 def revert_memoir_endnotes(document):
3446 " Reverts native support of memoir endnotes to TeX-code "
3448 if document.textclass != "memoir":
3451 encommand = "\\pagenote"
3452 modules = document.get_module_list()
3453 if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
3454 encommand = "\\endnote"
3456 revert_flex_inset(document.body, "Endnote", encommand)
3460 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
3463 j = find_end_of_inset(document.body, i)
3465 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3468 if document.body[i] == "\\begin_inset FloatList pagenote*":
3469 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
3471 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
3472 add_to_preamble(document, ["\\makepagenote"])
3475 def revert_totalheight(document):
3476 " Reverts graphics height parameter from totalheight to height "
3478 relative_heights = {
3479 "\\textwidth" : "text%",
3480 "\\columnwidth" : "col%",
3481 "\\paperwidth" : "page%",
3482 "\\linewidth" : "line%",
3483 "\\textheight" : "theight%",
3484 "\\paperheight" : "pheight%",
3485 "\\baselineskip " : "baselineskip%"
3489 i = find_token(document.body, "\\begin_inset Graphics", i)
3492 j = find_end_of_inset(document.body, i)
3494 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3498 rx = re.compile(r'\s*special\s*(\S+)$')
3499 rxx = re.compile(r'(\d*\.*\d+)(\S+)$')
3500 k = find_re(document.body, rx, i, j)
3504 m = rx.match(document.body[k])
3506 special = m.group(1)
3507 mspecial = special.split(',')
3508 for spc in mspecial:
3509 if spc.startswith("height="):
3510 oldheight = spc.split('=')[1]
3511 ms = rxx.search(oldheight)
3513 oldunit = ms.group(2)
3514 if oldunit in list(relative_heights.keys()):
3515 oldval = str(float(ms.group(1)) * 100)
3516 oldunit = relative_heights[oldunit]
3517 oldheight = oldval + oldunit
3518 mspecial.remove(spc)
3520 if len(mspecial) > 0:
3521 special = ",".join(mspecial)
3525 rx = re.compile(r'(\s*height\s*)(\S+)$')
3526 kk = find_re(document.body, rx, i, j)
3528 m = rx.match(document.body[kk])
3534 val = val + "," + special
3535 document.body[k] = "\tspecial " + "totalheight=" + val
3537 document.body.insert(kk, "\tspecial totalheight=" + val)
3539 document.body[kk] = m.group(1) + oldheight
3541 del document.body[kk]
3542 elif oldheight != "":
3544 document.body[k] = "\tspecial " + special
3545 document.body.insert(k, "\theight " + oldheight)
3547 document.body[k] = "\theight " + oldheight
3551 def convert_totalheight(document):
3552 " Converts graphics height parameter from totalheight to height "
3554 relative_heights = {
3555 "text%" : "\\textwidth",
3556 "col%" : "\\columnwidth",
3557 "page%" : "\\paperwidth",
3558 "line%" : "\\linewidth",
3559 "theight%" : "\\textheight",
3560 "pheight%" : "\\paperheight",
3561 "baselineskip%" : "\\baselineskip"
3565 i = find_token(document.body, "\\begin_inset Graphics", i)
3568 j = find_end_of_inset(document.body, i)
3570 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3574 rx = re.compile(r'\s*special\s*(\S+)$')
3575 k = find_re(document.body, rx, i, j)
3579 m = rx.match(document.body[k])
3581 special = m.group(1)
3582 mspecial = special.split(',')
3583 for spc in mspecial:
3584 if spc[:12] == "totalheight=":
3585 newheight = spc.split('=')[1]
3586 mspecial.remove(spc)
3588 if len(mspecial) > 0:
3589 special = ",".join(mspecial)
3593 rx = re.compile(r'(\s*height\s*)(\d+)(\S+)$')
3594 kk = find_re(document.body, rx, i, j)
3596 m = rx.match(document.body[kk])
3601 if unit in list(relative_heights.keys()):
3602 val = str(float(val) / 100)
3603 unit = relative_heights[unit]
3606 val = val + unit + "," + special
3607 document.body[k] = "\tspecial " + "height=" + val
3609 document.body.insert(kk + 1, "\tspecial height=" + val + unit)
3611 document.body[kk] = m.group(1) + newheight
3613 del document.body[kk]
3614 elif newheight != "":
3615 document.body.insert(k, "\theight " + newheight)
3619 def convert_changebars(document):
3620 " Converts the changebars module to native solution "
3622 if not "changebars" in document.get_module_list():
3625 i = find_token(document.header, "\\output_changes", 0)
3627 document.warning("Malformed LyX document! Missing \\output_changes header.")
3628 document.del_module("changebars")
3631 document.header.insert(i, "\\change_bars true")
3632 document.del_module("changebars")
3635 def revert_changebars(document):
3636 " Converts native changebar param to module "
3638 i = find_token(document.header, "\\change_bars", 0)
3640 document.warning("Malformed LyX document! Missing \\change_bars header.")
3643 val = get_value(document.header, "\\change_bars", i)
3646 document.add_module("changebars")
3648 del document.header[i]
3651 def convert_postpone_fragile(document):
3652 " Adds false \\postpone_fragile_content buffer param "
3654 i = find_token(document.header, "\\output_changes", 0)
3656 document.warning("Malformed LyX document! Missing \\output_changes header.")
3658 # Set this to false for old documents (see #2154)
3659 document.header.insert(i, "\\postpone_fragile_content false")
3662 def revert_postpone_fragile(document):
3663 " Remove \\postpone_fragile_content buffer param "
3665 i = find_token(document.header, "\\postpone_fragile_content", 0)
3667 document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
3670 del document.header[i]
3673 def revert_colrow_tracking(document):
3674 " Remove change tag from tabular columns/rows "
3677 i = find_token(document.body, "\\begin_inset Tabular", i+1)
3680 j = find_end_of_inset(document.body, i+1)
3682 document.warning("Malformed LyX document: Could not find end of tabular.")
3684 for k in range(i, j):
3685 m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
3687 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3688 m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
3690 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3693 def convert_counter_maintenance(document):
3694 " Convert \\maintain_unincluded_children buffer param from boolean value tro tristate "
3696 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3698 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3701 val = get_value(document.header, "\\maintain_unincluded_children", i)
3704 document.header[i] = "\\maintain_unincluded_children strict"
3706 document.header[i] = "\\maintain_unincluded_children no"
3709 def revert_counter_maintenance(document):
3710 " Revert \\maintain_unincluded_children buffer param to previous boolean value "
3712 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3714 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3717 val = get_value(document.header, "\\maintain_unincluded_children", i)
3720 document.header[i] = "\\maintain_unincluded_children false"
3722 document.header[i] = "\\maintain_unincluded_children true"
3725 def revert_counter_inset(document):
3726 " Revert counter inset to ERT, where possible"
3728 needed_counters = {}
3730 i = find_token(document.body, "\\begin_inset CommandInset counter", i)
3733 j = find_end_of_inset(document.body, i)
3735 document.warning("Can't find end of counter inset at line %d!" % i)
3738 lyx = get_quoted_value(document.body, "lyxonly", i, j)
3740 # there is nothing we can do to affect the LyX counters
3741 document.body[i : j + 1] = []
3744 cnt = get_quoted_value(document.body, "counter", i, j)
3746 document.warning("No counter given for inset at line %d!" % i)
3750 cmd = get_quoted_value(document.body, "LatexCommand", i, j)
3751 document.warning(cmd)
3754 val = get_quoted_value(document.body, "value", i, j)
3756 document.warning("Can't convert counter inset at line %d!" % i)
3758 ert = put_cmd_in_ert("\\setcounter{%s}{%s}" % (cnt, val))
3759 elif cmd == "addto":
3760 val = get_quoted_value(document.body, "value", i, j)
3762 document.warning("Can't convert counter inset at line %d!" % i)
3764 ert = put_cmd_in_ert("\\addtocounter{%s}{%s}" % (cnt, val))
3765 elif cmd == "reset":
3766 ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
3768 needed_counters[cnt] = 1
3769 savecnt = "LyXSave" + cnt
3770 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (savecnt, cnt))
3771 elif cmd == "restore":
3772 needed_counters[cnt] = 1
3773 savecnt = "LyXSave" + cnt
3774 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (cnt, savecnt))
3776 document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
3779 document.body[i : j + 1] = ert
3784 for cnt in needed_counters:
3785 pretext.append("\\newcounter{LyXSave%s}" % (cnt))
3787 add_to_preamble(document, pretext)
3790 def revert_ams_spaces(document):
3791 "Revert InsetSpace medspace and thickspace into their TeX-code counterparts"
3793 insets = ["\\medspace{}", "\\thickspace{}"]
3794 for inset in insets:
3797 i = find_token(document.body, "\\begin_inset space " + inset, i)
3800 end = find_end_of_inset(document.body, i)
3801 subst = put_cmd_in_ert(inset)
3802 document.body[i : end + 1] = subst
3806 # load amsmath in the preamble if not already loaded
3807 i = find_token(document.header, "\\use_package amsmath 2", 0)
3809 add_to_preamble(document, ["\\@ifundefined{thickspace}{\\usepackage{amsmath}}{}"])
3813 def convert_parskip(document):
3814 " Move old parskip settings to preamble "
3816 i = find_token(document.header, "\\paragraph_separation skip", 0)
3820 j = find_token(document.header, "\\defskip", 0)
3822 document.warning("Malformed LyX document! Missing \\defskip.")
3825 val = get_value(document.header, "\\defskip", j)
3827 skipval = "\\medskipamount"
3828 if val == "smallskip" or val == "medskip" or val == "bigskip":
3829 skipval = "\\" + val + "amount"
3833 add_to_preamble(document, ["\\setlength{\\parskip}{" + skipval + "}", "\\setlength{\\parindent}{0pt}"])
3835 document.header[i] = "\\paragraph_separation indent"
3836 document.header[j] = "\\paragraph_indentation default"
3839 def revert_parskip(document):
3840 " Revert new parskip settings to preamble "
3842 i = find_token(document.header, "\\paragraph_separation skip", 0)
3846 j = find_token(document.header, "\\defskip", 0)
3848 document.warning("Malformed LyX document! Missing \\defskip.")
3851 val = get_value(document.header, "\\defskip", j)
3854 if val == "smallskip" or val == "medskip" or val == "bigskip":
3855 skipval = "[skip=\\" + val + "amount]"
3856 elif val == "fullline":
3857 skipval = "[skip=\\baselineskip]"
3858 elif val != "halfline":
3859 skipval = "[skip={" + val + "}]"
3861 add_to_preamble(document, ["\\usepackage" + skipval + "{parskip}"])
3863 document.header[i] = "\\paragraph_separation indent"
3864 document.header[j] = "\\paragraph_indentation default"
3867 def revert_line_vspaces(document):
3868 " Revert fulline and halfline vspaces to TeX "
3870 "fullline*" : "\\vspace*{\\baselineskip}",
3871 "fullline" : "\\vspace{\\baselineskip}",
3872 "halfline*" : "\\vspace*{0.5\\baselineskip}",
3873 "halfline" : "\\vspace{0.5\\baselineskip}",
3875 for inset in insets.keys():
3878 i = find_token(document.body, "\\begin_inset VSpace " + inset, i)
3881 end = find_end_of_inset(document.body, i)
3882 subst = put_cmd_in_ert(insets[inset])
3883 document.body[i : end + 1] = subst
3885 def convert_libertinus_rm_fonts(document):
3886 """Handle Libertinus serif fonts definition to LaTeX"""
3888 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3889 fm = createFontMapping(['Libertinus'])
3890 convert_fonts(document, fm)
3892 def revert_libertinus_rm_fonts(document):
3893 """Revert Libertinus serif font definition to LaTeX"""
3895 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3897 fm = createFontMapping(['libertinus'])
3898 if revert_fonts(document, fm, fontmap):
3899 add_preamble_fonts(document, fontmap)
3901 def revert_libertinus_sftt_fonts(document):
3902 " Revert Libertinus sans and tt font definitions to LaTeX "
3904 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3907 i = find_token(document.header, "\\font_sans \"LibertinusSans-LF\"", 0)
3909 j = find_token(document.header, "\\font_sans_osf true", 0)
3911 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-OsF}"])
3912 document.header[j] = "\\font_sans_osf false"
3914 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-LF}"])
3915 document.header[i] = document.header[i].replace("LibertinusSans-LF", "default")
3917 sfval = find_token(document.header, "\\font_sf_scale", 0)
3919 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
3921 sfscale = document.header[sfval].split()
3924 document.header[sfval] = " ".join(sfscale)
3927 sf_scale = float(val)
3929 document.warning("Invalid font_sf_scale value: " + val)
3930 if sf_scale != "100.0":
3931 add_to_preamble(document, ["\\renewcommand*{\\LibertinusSans@scale}{" + str(sf_scale / 100.0) + "}"])
3933 i = find_token(document.header, "\\font_typewriter \"LibertinusMono-TLF\"", 0)
3935 add_to_preamble(document, ["\\renewcommand{\\ttdefault}{LibertinusMono-TLF}"])
3936 document.header[i] = document.header[i].replace("LibertinusMono-TLF", "default")
3938 ttval = find_token(document.header, "\\font_tt_scale", 0)
3940 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
3942 ttscale = document.header[ttval].split()
3945 document.header[ttval] = " ".join(ttscale)
3948 tt_scale = float(val)
3950 document.warning("Invalid font_tt_scale value: " + val)
3951 if tt_scale != "100.0":
3952 add_to_preamble(document, ["\\renewcommand*{\\LibertinusMono@scale}{" + str(tt_scale / 100.0) + "}"])
3955 def revert_docbook_table_output(document):
3956 i = find_token(document.header, '\\docbook_table_output')
3958 del document.header[i]
3961 def revert_nopagebreak(document):
3963 i = find_token(document.body, "\\begin_inset Newpage nopagebreak")
3966 end = find_end_of_inset(document.body, i)
3968 document.warning("Malformed LyX document: Could not find end of Newpage inset.")
3970 subst = put_cmd_in_ert("\\nopagebreak{}")
3971 document.body[i : end + 1] = subst
3974 def revert_hrquotes(document):
3975 " Revert Hungarian Quotation marks "
3977 i = find_token(document.header, "\\quotes_style hungarian", 0)
3979 document.header[i] = "\\quotes_style polish"
3983 i = find_token(document.body, "\\begin_inset Quotes h")
3986 if document.body[i] == "\\begin_inset Quotes hld":
3987 document.body[i] = "\\begin_inset Quotes pld"
3988 elif document.body[i] == "\\begin_inset Quotes hrd":
3989 document.body[i] = "\\begin_inset Quotes prd"
3990 elif document.body[i] == "\\begin_inset Quotes hls":
3991 document.body[i] = "\\begin_inset Quotes ald"
3992 elif document.body[i] == "\\begin_inset Quotes hrs":
3993 document.body[i] = "\\begin_inset Quotes ard"
3996 def convert_math_refs(document):
3999 i = find_token(document.body, "\\begin_inset Formula", i)
4002 j = find_end_of_inset(document.body, i)
4004 document.warning("Can't find end of inset at line %d of body!" % i)
4008 document.body[i] = document.body[i].replace("\\prettyref", "\\formatted")
4012 def revert_math_refs(document):
4015 i = find_token(document.body, "\\begin_inset Formula", i)
4018 j = find_end_of_inset(document.body, i)
4020 document.warning("Can't find end of inset at line %d of body!" % i)
4024 document.body[i] = document.body[i].replace("\\formatted", "\\prettyref")
4025 if "\\labelonly" in document.body[i]:
4026 document.body[i] = re.sub("\\\\labelonly{([^}]+?)}", "\\1", document.body[i])
4030 def convert_branch_colors(document):
4031 " Convert branch colors to semantic values "
4035 i = find_token(document.header, "\\branch", i)
4038 j = find_token(document.header, "\\end_branch", i)
4040 document.warning("Malformed LyX document. Can't find end of branch definition!")
4042 # We only support the standard LyX background for now
4043 k = find_token(document.header, "\\color #faf0e6", i, j)
4045 document.header[k] = "\\color background"
4049 def revert_branch_colors(document):
4050 " Revert semantic branch colors "
4054 i = find_token(document.header, "\\branch", i)
4057 j = find_token(document.header, "\\end_branch", i)
4059 document.warning("Malformed LyX document. Can't find end of branch definition!")
4061 k = find_token(document.header, "\\color", i, j)
4063 bcolor = get_value(document.header, "\\color", k)
4064 if bcolor[1] != "#":
4065 # this will be read as background by LyX 2.3
4066 document.header[k] = "\\color none"
4070 def revert_darkmode_graphics(document):
4071 " Revert darkModeSensitive InsetGraphics param "
4075 i = find_token(document.body, "\\begin_inset Graphics", i)
4078 j = find_end_of_inset(document.body, i)
4080 document.warning("Can't find end of graphics inset at line %d!!" %(i))
4083 k = find_token(document.body, "\tdarkModeSensitive", i, j)
4085 del document.body[k]
4089 def revert_branch_darkcols(document):
4090 " Revert dark branch colors "
4094 i = find_token(document.header, "\\branch", i)
4097 j = find_token(document.header, "\\end_branch", i)
4099 document.warning("Malformed LyX document. Can't find end of branch definition!")
4101 k = find_token(document.header, "\\color", i, j)
4103 m = re.search('\\\\color (\\S+) (\\S+)', document.header[k])
4105 document.header[k] = "\\color " + m.group(1)
4109 def revert_vcolumns2(document):
4110 """Revert varwidth columns with line breaks etc."""
4112 needvarwidth = False
4114 needcellvarwidth = False
4117 i = find_token(document.body, "\\begin_inset Tabular", i+1)
4120 j = find_end_of_inset(document.body, i)
4122 document.warning("Malformed LyX document: Could not find end of tabular.")
4125 # Collect necessary column information
4127 nrows = int(document.body[i+1].split('"')[3])
4128 ncols = int(document.body[i+1].split('"')[5])
4130 for k in range(ncols):
4131 m = find_token(document.body, "<column", m)
4132 width = get_option_value(document.body[m], 'width')
4133 varwidth = get_option_value(document.body[m], 'varwidth')
4134 alignment = get_option_value(document.body[m], 'alignment')
4135 valignment = get_option_value(document.body[m], 'valignment')
4136 special = get_option_value(document.body[m], 'special')
4137 col_info.append([width, varwidth, alignment, valignment, special, m])
4143 for row in range(nrows):
4144 for col in range(ncols):
4145 m = find_token(document.body, "<cell", m)
4146 multicolumn = get_option_value(document.body[m], 'multicolumn') != ""
4147 multirow = get_option_value(document.body[m], 'multirow') != ""
4148 fixedwidth = get_option_value(document.body[m], 'width') != ""
4149 rotate = get_option_value(document.body[m], 'rotate')
4150 cellalign = get_option_value(document.body[m], 'alignment')
4151 cellvalign = get_option_value(document.body[m], 'valignment')
4152 # Check for: linebreaks, multipars, non-standard environments
4154 endcell = find_token(document.body, "</cell>", begcell)
4156 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
4157 vcand = not fixedwidth
4158 elif count_pars_in_inset(document.body, begcell + 2) > 1:
4159 vcand = not fixedwidth
4160 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
4161 vcand = not fixedwidth
4162 colalignment = col_info[col][2]
4163 colvalignment = col_info[col][3]
4165 if rotate == "" and ((colalignment == "left" and colvalignment == "top") or (multicolumn == True and cellalign == "left" and cellvalign == "top")):
4166 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][4] == "":
4168 col_line = col_info[col][5]
4170 vval = "V{\\linewidth}"
4172 document.body[m] = document.body[m][:-1] + " special=\"" + vval + "\">"
4174 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
4177 if multicolumn or multirow:
4178 if cellvalign == "middle":
4180 elif cellvalign == "bottom":
4183 if colvalignment == "middle":
4185 elif colvalignment == "bottom":
4187 flt = find_token(document.body, "\\begin_layout", begcell, endcell)
4188 elt = find_token_backwards(document.body, "\\end_layout", endcell)
4189 if flt != -1 and elt != -1:
4191 # we need to reset character layouts if necessary
4192 el = find_token(document.body, '\\emph on', flt, elt)
4194 extralines.append("\\emph default")
4195 el = find_token(document.body, '\\noun on', flt, elt)
4197 extralines.append("\\noun default")
4198 el = find_token(document.body, '\\series', flt, elt)
4200 extralines.append("\\series default")
4201 el = find_token(document.body, '\\family', flt, elt)
4203 extralines.append("\\family default")
4204 el = find_token(document.body, '\\shape', flt, elt)
4206 extralines.append("\\shape default")
4207 el = find_token(document.body, '\\color', flt, elt)
4209 extralines.append("\\color inherit")
4210 el = find_token(document.body, '\\size', flt, elt)
4212 extralines.append("\\size default")
4213 el = find_token(document.body, '\\bar under', flt, elt)
4215 extralines.append("\\bar default")
4216 el = find_token(document.body, '\\uuline on', flt, elt)
4218 extralines.append("\\uuline default")
4219 el = find_token(document.body, '\\uwave on', flt, elt)
4221 extralines.append("\\uwave default")
4222 el = find_token(document.body, '\\strikeout on', flt, elt)
4224 extralines.append("\\strikeout default")
4225 document.body[elt:elt+1] = extralines + put_cmd_in_ert("\\end{cellvarwidth}") + [r"\end_layout"]
4227 for q in range(flt, elt):
4228 if document.body[q] != "" and document.body[q][0] != "\\":
4230 if document.body[q][:5] == "\\lang":
4234 document.body[parlang+1:parlang+1] = put_cmd_in_ert("\\begin{cellvarwidth}" + alarg)
4236 document.body[flt+1:flt+1] = put_cmd_in_ert("\\begin{cellvarwidth}" + alarg)
4237 needcellvarwidth = True
4239 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
4240 # with newlines, and we do not want that)
4242 endcell = find_token(document.body, "</cell>", begcell)
4244 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
4246 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
4250 nle = find_end_of_inset(document.body, nl)
4251 del(document.body[nle:nle+1])
4253 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
4255 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
4256 # Replace parbreaks in multirow with \\endgraf
4257 if multirow == True:
4258 flt = find_token(document.body, "\\begin_layout", begcell, endcell)
4261 elt = find_end_of_layout(document.body, flt)
4263 document.warning("Malformed LyX document! Missing layout end.")
4265 endcell = find_token(document.body, "</cell>", begcell)
4266 flt = find_token(document.body, "\\begin_layout", elt, endcell)
4269 document.body[elt : flt + 1] = put_cmd_in_ert("\\endgraf{}")
4275 if needarray == True:
4276 add_to_preamble(document, ["\\usepackage{array}"])
4277 if needcellvarwidth == True:
4278 add_to_preamble(document, ["%% Variable width box for table cells",
4279 "\\newenvironment{cellvarwidth}[1][t]",
4280 " {\\begin{varwidth}[#1]{\\linewidth}}",
4281 " {\\@finalstrut\\@arstrutbox\\end{varwidth}}"])
4282 if needvarwidth == True:
4283 add_to_preamble(document, ["\\usepackage{varwidth}"])
4286 def convert_vcolumns2(document):
4287 """Convert varwidth ERT to native"""
4291 i = find_token(document.body, "\\begin_inset Tabular", i+1)
4294 j = find_end_of_inset(document.body, i)
4296 document.warning("Malformed LyX document: Could not find end of tabular.")
4300 nrows = int(document.body[i+1].split('"')[3])
4301 ncols = int(document.body[i+1].split('"')[5])
4304 for row in range(nrows):
4305 for col in range(ncols):
4306 m = find_token(document.body, "<cell", m)
4307 multirow = get_option_value(document.body[m], 'multirow') != ""
4309 endcell = find_token(document.body, "</cell>", begcell)
4311 cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
4313 vcand = document.body[cvw - 1] == "\\backslash" and get_containing_inset(document.body, cvw)[0] == "ERT"
4315 # Remove ERTs with cellvarwidth env
4316 ecvw = find_token(document.body, "end{cellvarwidth}", begcell, endcell)
4318 if document.body[ecvw - 1] == "\\backslash":
4319 eertins = get_containing_inset(document.body, ecvw)
4320 if eertins and eertins[0] == "ERT":
4321 del document.body[eertins[1] : eertins[2] + 1]
4323 cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
4324 ertins = get_containing_inset(document.body, cvw)
4325 if ertins and ertins[0] == "ERT":
4326 del(document.body[ertins[1] : ertins[2] + 1])
4328 # Convert ERT newlines (as cellvarwidth detection relies on that)
4330 endcell = find_token(document.body, "</cell>", begcell)
4331 nl = find_token(document.body, "\\backslash", begcell, endcell)
4332 if nl == -1 or document.body[nl + 2] != "\\backslash":
4334 ertins = get_containing_inset(document.body, nl)
4335 if ertins and ertins[0] == "ERT":
4336 document.body[ertins[1] : ertins[2] + 1] = ["\\begin_inset Newline newline", "", "\\end_inset"]
4338 # Same for linebreaks
4340 endcell = find_token(document.body, "</cell>", begcell)
4341 nl = find_token(document.body, "linebreak", begcell, endcell)
4342 if nl == -1 or document.body[nl - 1] != "\\backslash":
4344 ertins = get_containing_inset(document.body, nl)
4345 if ertins and ertins[0] == "ERT":
4346 document.body[ertins[1] : ertins[2] + 1] = ["\\begin_inset Newline linebreak", "", "\\end_inset"]
4349 if multirow == True:
4350 endcell = find_token(document.body, "</cell>", begcell)
4351 nl = find_token(document.body, "endgraf{}", begcell, endcell)
4352 if nl == -1 or document.body[nl - 1] != "\\backslash":
4354 ertins = get_containing_inset(document.body, nl)
4355 if ertins and ertins[0] == "ERT":
4356 document.body[ertins[1] : ertins[2] + 1] = ["\\end_layout", "", "\\begin_layout Plain Layout"]
4362 del_complete_lines(document.preamble,
4363 ['% Added by lyx2lyx',
4364 '%% Variable width box for table cells',
4365 r'\newenvironment{cellvarwidth}[1][t]',
4366 r' {\begin{varwidth}[#1]{\linewidth}}',
4367 r' {\@finalstrut\@arstrutbox\end{varwidth}}'])
4368 del_complete_lines(document.preamble,
4369 ['% Added by lyx2lyx',
4370 r'\usepackage{varwidth}'])
4373 frontispiece_def = [
4374 r'### Inserted by lyx2lyx (frontispiece layout) ###',
4375 r'Style Frontispiece',
4376 r' CopyStyle Titlehead',
4377 r' LatexName frontispiece',
4382 def convert_koma_frontispiece(document):
4383 """Remove local KOMA frontispiece definition"""
4384 if document.textclass[:3] != "scr":
4387 if document.del_local_layout(frontispiece_def):
4388 document.add_module("ruby")
4391 def revert_koma_frontispiece(document):
4392 """Add local KOMA frontispiece definition"""
4393 if document.textclass[:3] != "scr":
4396 if find_token(document.body, "\\begin_layout Frontispiece", 0) != -1:
4397 document.append_local_layout(frontispiece_def)
4400 def revert_spellchecker_ignore(document):
4401 """Revert document spellchecker dictionary"""
4403 i = find_token(document.header, "\\spellchecker_ignore")
4406 del document.header[i]
4409 def revert_docbook_mathml_prefix(document):
4410 """Revert the DocBook parameter to choose the prefix for the MathML name space"""
4412 i = find_token(document.header, "\\docbook_mathml_prefix")
4415 del document.header[i]
4417 def revert_document_metadata(document):
4418 """Revert document metadata"""
4421 i = find_token(document.header, "\\begin_metadata", i)
4424 j = find_end_of(document.header, i, "\\begin_metadata", "\\end_metadata")
4426 # this should not happen
4428 document.header[i : j + 1] = []
4434 supported_versions = ["2.4.0", "2.4"]
4436 [545, [convert_lst_literalparam]],
4441 [550, [convert_fontenc]],
4448 [557, [convert_vcsinfo]],
4449 [558, [removeFrontMatterStyles]],
4452 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
4456 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
4457 [566, [convert_hebrew_parentheses]],
4463 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
4464 [573, [convert_inputencoding_namechange]],
4465 [574, [convert_ruby_module, convert_utf8_japanese]],
4466 [575, [convert_lineno, convert_aaencoding]],
4468 [577, [convert_linggloss]],
4472 [581, [convert_osf]],
4473 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
4474 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
4476 [585, [convert_pagesizes]],
4478 [587, [convert_pagesizenames]],
4480 [589, [convert_totalheight]],
4481 [590, [convert_changebars]],
4482 [591, [convert_postpone_fragile]],
4484 [593, [convert_counter_maintenance]],
4487 [596, [convert_parskip]],
4488 [597, [convert_libertinus_rm_fonts]],
4492 [601, [convert_math_refs]],
4493 [602, [convert_branch_colors]],
4496 [605, [convert_vcolumns2]],
4497 [606, [convert_koma_frontispiece]],
4503 revert = [[608, [revert_document_metadata]],
4504 [607, [revert_docbook_mathml_prefix]],
4505 [606, [revert_spellchecker_ignore]],
4506 [605, [revert_koma_frontispiece]],
4507 [604, [revert_vcolumns2]],
4508 [603, [revert_branch_darkcols]],
4509 [602, [revert_darkmode_graphics]],
4510 [601, [revert_branch_colors]],
4512 [599, [revert_math_refs]],
4513 [598, [revert_hrquotes]],
4514 [598, [revert_nopagebreak]],
4515 [597, [revert_docbook_table_output]],
4516 [596, [revert_libertinus_rm_fonts,revert_libertinus_sftt_fonts]],
4517 [595, [revert_parskip,revert_line_vspaces]],
4518 [594, [revert_ams_spaces]],
4519 [593, [revert_counter_inset]],
4520 [592, [revert_counter_maintenance]],
4521 [591, [revert_colrow_tracking]],
4522 [590, [revert_postpone_fragile]],
4523 [589, [revert_changebars]],
4524 [588, [revert_totalheight]],
4525 [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
4526 [586, [revert_pagesizenames]],
4527 [585, [revert_dupqualicites]],
4528 [584, [revert_pagesizes,revert_komafontsizes]],
4529 [583, [revert_vcsinfo_rev_abbrev]],
4530 [582, [revert_ChivoFont,revert_CrimsonProFont]],
4531 [581, [revert_CantarellFont,revert_FiraFont]],
4532 [580, [revert_texfontopts,revert_osf]],
4533 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
4534 [578, [revert_babelfont]],
4535 [577, [revert_drs]],
4536 [576, [revert_linggloss, revert_subexarg]],
4537 [575, [revert_new_languages]],
4538 [574, [revert_lineno, revert_aaencoding]],
4539 [573, [revert_ruby_module, revert_utf8_japanese]],
4540 [572, [revert_inputencoding_namechange]],
4541 [571, [revert_notoFonts]],
4542 [570, [revert_cmidruletrimming]],
4543 [569, [revert_bibfileencodings]],
4544 [568, [revert_tablestyle]],
4545 [567, [revert_soul]],
4546 [566, [revert_malayalam]],
4547 [565, [revert_hebrew_parentheses]],
4548 [564, [revert_AdobeFonts]],
4549 [563, [revert_lformatinfo]],
4550 [562, [revert_listpargs]],
4551 [561, [revert_l7ninfo]],
4552 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
4553 [559, [revert_timeinfo, revert_namenoextinfo]],
4554 [558, [revert_dateinfo]],
4555 [557, [addFrontMatterStyles]],
4556 [556, [revert_vcsinfo]],
4557 [555, [revert_bibencoding]],
4558 [554, [revert_vcolumns]],
4559 [553, [revert_stretchcolumn]],
4560 [552, [revert_tuftecite]],
4561 [551, [revert_floatpclass, revert_floatalignment]],
4562 [550, [revert_nospellcheck]],
4563 [549, [revert_fontenc]],
4564 [548, []], # dummy format change
4565 [547, [revert_lscape]],
4566 [546, [revert_xcharter]],
4567 [545, [revert_paratype]],
4568 [544, [revert_lst_literalparam]]
4572 if __name__ == "__main__":