1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_complete_lines, del_token,
30 find_end_of, find_end_of_inset, find_end_of_layout, find_token,
31 find_token_backwards, find_token_exact, find_re, get_bool_value,
32 get_containing_inset, get_containing_layout, get_option_value, get_value,
35 # find_complete_lines,
36 # find_re, find_substring,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 """Add collected font-packages with their option to user-preamble"""
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 """Expand fontinfo mapping"""
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
197 elif font == 'libertinus':
198 fm.expandFontMapping(['libertinus,serif'], "roman", None, "libertinus", None, "osf")
199 fm.expandFontMapping(['libertinusmath'], "math", None, "libertinust1math", None, None)
202 def convert_fonts(document, fm, osfoption = "osf"):
203 """Handle font definition (LaTeX preamble -> native)"""
204 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
205 rscaleopt = re.compile(r'^scaled?=(.*)')
207 # Check whether we go beyond font option feature introduction
208 haveFontOpts = document.end_format > 580
212 i = find_re(document.preamble, rpkg, i+1)
215 mo = rpkg.search(document.preamble[i])
216 if mo == None or mo.group(2) == None:
219 options = mo.group(2).replace(' ', '').split(",")
224 while o < len(options):
225 if options[o] == osfoption:
229 mo = rscaleopt.search(options[o])
237 if not pkg in fm.pkginmap:
242 # Try with name-option combination first
243 # (only one default option supported currently)
245 while o < len(options):
247 fn = fm.getfontname(pkg, [opt])
254 fn = fm.getfontname(pkg, [])
256 fn = fm.getfontname(pkg, options)
259 del document.preamble[i]
260 fontinfo = fm.font2pkgmap[fn]
261 if fontinfo.scaletype == None:
264 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
265 fontinfo.scaleval = oscale
266 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
267 if fontinfo.osfopt == None:
268 options.extend(osfoption)
270 osf = find_token(document.header, "\\font_osf false")
271 osftag = "\\font_osf"
272 if osf == -1 and fontinfo.fonttype != "math":
273 # Try with newer format
274 osftag = "\\font_" + fontinfo.fonttype + "_osf"
275 osf = find_token(document.header, osftag + " false")
277 document.header[osf] = osftag + " true"
278 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
279 del document.preamble[i-1]
281 if fontscale != None:
282 j = find_token(document.header, fontscale, 0)
284 val = get_value(document.header, fontscale, j)
288 scale = "%03d" % int(float(oscale) * 100)
289 document.header[j] = fontscale + " " + scale + " " + vals[1]
290 ft = "\\font_" + fontinfo.fonttype
291 j = find_token(document.header, ft, 0)
293 val = get_value(document.header, ft, j)
294 words = val.split() # ! splits also values like '"DejaVu Sans"'
295 words[0] = '"' + fn + '"'
296 document.header[j] = ft + ' ' + ' '.join(words)
297 if haveFontOpts and fontinfo.fonttype != "math":
298 fotag = "\\font_" + fontinfo.fonttype + "_opts"
299 fo = find_token(document.header, fotag)
301 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
303 # Sensible place to insert tag
304 fo = find_token(document.header, "\\font_sf_scale")
306 document.warning("Malformed LyX document! Missing \\font_sf_scale")
308 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
311 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
312 """Revert native font definition to LaTeX"""
313 # fonlist := list of fonts created from the same package
314 # Empty package means that the font-name is the same as the package-name
315 # fontmap (key = package, val += found options) will be filled
316 # and used later in add_preamble_fonts() to be added to user-preamble
318 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
319 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
321 while i < len(document.header):
322 i = find_re(document.header, rfontscale, i+1)
325 mo = rfontscale.search(document.header[i])
328 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
329 val = get_value(document.header, ft, i)
330 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
331 font = words[0].strip('"') # TeX font name has no whitespace
332 if not font in fm.font2pkgmap:
334 fontinfo = fm.font2pkgmap[font]
335 val = fontinfo.package
336 if not val in fontmap:
339 if OnlyWithXOpts or WithXOpts:
340 if ft == "\\font_math":
342 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
343 if ft == "\\font_sans":
344 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
345 elif ft == "\\font_typewriter":
346 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
347 x = find_re(document.header, regexp, 0)
348 if x == -1 and OnlyWithXOpts:
352 # We need to use this regex since split() does not handle quote protection
353 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
354 opts = xopts[1].strip('"').split(",")
355 fontmap[val].extend(opts)
356 del document.header[x]
357 words[0] = '"default"'
358 document.header[i] = ft + ' ' + ' '.join(words)
359 if fontinfo.scaleopt != None:
360 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
361 mo = rscales.search(xval)
365 # set correct scale option
366 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
367 if fontinfo.osfopt != None:
369 if fontinfo.osfdef == "true":
371 osf = find_token(document.header, "\\font_osf " + oldval)
372 if osf == -1 and ft != "\\font_math":
373 # Try with newer format
374 osftag = "\\font_roman_osf " + oldval
375 if ft == "\\font_sans":
376 osftag = "\\font_sans_osf " + oldval
377 elif ft == "\\font_typewriter":
378 osftag = "\\font_typewriter_osf " + oldval
379 osf = find_token(document.header, osftag)
381 fontmap[val].extend([fontinfo.osfopt])
382 if len(fontinfo.options) > 0:
383 fontmap[val].extend(fontinfo.options)
386 ###############################################################################
388 ### Conversion and reversion routines
390 ###############################################################################
392 def convert_inputencoding_namechange(document):
393 """Rename inputencoding settings."""
394 i = find_token(document.header, "\\inputencoding", 0)
397 s = document.header[i].replace("auto", "auto-legacy")
398 document.header[i] = s.replace("default", "auto-legacy-plain")
400 def revert_inputencoding_namechange(document):
401 """Rename inputencoding settings."""
402 i = find_token(document.header, "\\inputencoding", 0)
405 s = document.header[i].replace("auto-legacy-plain", "default")
406 document.header[i] = s.replace("auto-legacy", "auto")
408 def convert_notoFonts(document):
409 """Handle Noto fonts definition to LaTeX"""
411 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
412 fm = createFontMapping(['Noto'])
413 convert_fonts(document, fm)
415 def revert_notoFonts(document):
416 """Revert native Noto font definition to LaTeX"""
418 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
420 fm = createFontMapping(['Noto'])
421 if revert_fonts(document, fm, fontmap):
422 add_preamble_fonts(document, fontmap)
424 def convert_latexFonts(document):
425 """Handle DejaVu and IBMPlex fonts definition to LaTeX"""
427 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
428 fm = createFontMapping(['DejaVu', 'IBM'])
429 convert_fonts(document, fm)
431 def revert_latexFonts(document):
432 """Revert native DejaVu font definition to LaTeX"""
434 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
436 fm = createFontMapping(['DejaVu', 'IBM'])
437 if revert_fonts(document, fm, fontmap):
438 add_preamble_fonts(document, fontmap)
440 def convert_AdobeFonts(document):
441 """Handle Adobe Source fonts definition to LaTeX"""
443 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
444 fm = createFontMapping(['Adobe'])
445 convert_fonts(document, fm)
447 def revert_AdobeFonts(document):
448 """Revert Adobe Source font definition to LaTeX"""
450 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
452 fm = createFontMapping(['Adobe'])
453 if revert_fonts(document, fm, fontmap):
454 add_preamble_fonts(document, fontmap)
456 def removeFrontMatterStyles(document):
457 """Remove styles Begin/EndFrontmatter"""
459 layouts = ['BeginFrontmatter', 'EndFrontmatter']
460 tokenend = len('\\begin_layout ')
463 i = find_token_exact(document.body, '\\begin_layout ', i+1)
466 layout = document.body[i][tokenend:].strip()
467 if layout not in layouts:
469 j = find_end_of_layout(document.body, i)
471 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
473 while document.body[j+1].strip() == '':
475 document.body[i:j+1] = []
477 def addFrontMatterStyles(document):
478 """Use styles Begin/EndFrontmatter for elsarticle"""
480 if document.textclass != "elsarticle":
483 def insertFrontmatter(prefix, line):
485 while above > 0 and document.body[above-1].strip() == '':
488 while document.body[below].strip() == '':
490 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
491 '\\begin_inset Note Note',
493 '\\begin_layout Plain Layout',
496 '\\end_inset', '', '',
499 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
500 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
501 tokenend = len('\\begin_layout ')
505 i = find_token_exact(document.body, '\\begin_layout ', i+1)
508 layout = document.body[i][tokenend:].strip()
509 if layout not in layouts:
511 k = find_end_of_layout(document.body, i)
513 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
520 insertFrontmatter('End', k+1)
521 insertFrontmatter('Begin', first)
524 def convert_lst_literalparam(document):
525 """Add param literal to include inset"""
529 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
532 j = find_end_of_inset(document.body, i)
534 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
536 while i < j and document.body[i].strip() != '':
538 document.body.insert(i, 'literal "true"')
541 def revert_lst_literalparam(document):
542 """Remove param literal from include inset"""
546 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
549 j = find_end_of_inset(document.body, i)
551 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
553 del_token(document.body, 'literal', i, j)
556 def revert_paratype(document):
557 """Revert ParaType font definitions to LaTeX"""
559 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
560 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
561 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
562 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
563 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
566 sfval = find_token(document.header, "\\font_sf_scale", 0)
568 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
570 sfscale = document.header[sfval].split()
573 document.header[sfval] = " ".join(sfscale)
576 sf_scale = float(val)
578 document.warning("Invalid font_sf_scale value: " + val)
581 if sf_scale != "100.0":
582 sfoption = "scaled=" + str(sf_scale / 100.0)
583 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
584 ttval = get_value(document.header, "\\font_tt_scale", 0)
589 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
590 if i1 != -1 and i2 != -1 and i3!= -1:
591 add_to_preamble(document, ["\\usepackage{paratype}"])
594 add_to_preamble(document, ["\\usepackage{PTSerif}"])
595 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
598 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
600 add_to_preamble(document, ["\\usepackage{PTSans}"])
601 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
604 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
606 add_to_preamble(document, ["\\usepackage{PTMono}"])
607 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
610 def revert_xcharter(document):
611 """Revert XCharter font definitions to LaTeX"""
613 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
617 # replace unsupported font setting
618 document.header[i] = document.header[i].replace("xcharter", "default")
619 # no need for preamble code with system fonts
620 if get_bool_value(document.header, "\\use_non_tex_fonts"):
623 # transfer old style figures setting to package options
624 j = find_token(document.header, "\\font_osf true")
627 document.header[j] = "\\font_osf false"
631 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
634 def revert_lscape(document):
635 """Reverts the landscape environment (Landscape module) to TeX-code"""
637 if not "landscape" in document.get_module_list():
642 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
645 j = find_end_of_inset(document.body, i)
647 document.warning("Malformed LyX document: Can't find end of Landscape inset")
650 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
651 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
652 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
653 add_to_preamble(document, ["\\usepackage{afterpage}"])
655 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
656 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
658 add_to_preamble(document, ["\\usepackage{pdflscape}"])
659 document.del_module("landscape")
662 def convert_fontenc(document):
663 """Convert default fontenc setting"""
665 i = find_token(document.header, "\\fontencoding global", 0)
669 document.header[i] = document.header[i].replace("global", "auto")
672 def revert_fontenc(document):
673 """Revert default fontenc setting"""
675 i = find_token(document.header, "\\fontencoding auto", 0)
679 document.header[i] = document.header[i].replace("auto", "global")
682 def revert_nospellcheck(document):
683 """Remove nospellcheck font info param"""
687 i = find_token(document.body, '\\nospellcheck', i)
693 def revert_floatpclass(document):
694 """Remove float placement params 'document' and 'class'"""
696 del_token(document.header, "\\float_placement class")
700 i = find_token(document.body, '\\begin_inset Float', i + 1)
703 j = find_end_of_inset(document.body, i)
704 k = find_token(document.body, 'placement class', i, j)
706 k = find_token(document.body, 'placement document', i, j)
713 def revert_floatalignment(document):
714 """Remove float alignment params"""
716 galignment = get_value(document.header, "\\float_alignment", delete=True)
720 i = find_token(document.body, '\\begin_inset Float', i + 1)
723 j = find_end_of_inset(document.body, i)
725 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
727 k = find_token(document.body, 'alignment', i, j)
731 alignment = get_value(document.body, "alignment", k)
732 if alignment == "document":
733 alignment = galignment
735 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
737 document.warning("Can't find float layout!")
740 if alignment == "left":
741 alcmd = put_cmd_in_ert("\\raggedright{}")
742 elif alignment == "center":
743 alcmd = put_cmd_in_ert("\\centering{}")
744 elif alignment == "right":
745 alcmd = put_cmd_in_ert("\\raggedleft{}")
747 document.body[l+1:l+1] = alcmd
748 # There might be subfloats, so we do not want to move past
749 # the end of the inset.
752 def revert_tuftecite(document):
753 r"""Revert \cite commands in tufte classes"""
755 tufte = ["tufte-book", "tufte-handout"]
756 if document.textclass not in tufte:
761 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
764 j = find_end_of_inset(document.body, i)
766 document.warning("Can't find end of citation inset at line %d!!" %(i))
768 k = find_token(document.body, "LatexCommand", i, j)
770 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
773 cmd = get_value(document.body, "LatexCommand", k)
777 pre = get_quoted_value(document.body, "before", i, j)
778 post = get_quoted_value(document.body, "after", i, j)
779 key = get_quoted_value(document.body, "key", i, j)
781 document.warning("Citation inset at line %d does not have a key!" %(i))
783 # Replace command with ERT
786 res += "[" + pre + "]"
788 res += "[" + post + "]"
791 res += "{" + key + "}"
792 document.body[i:j+1] = put_cmd_in_ert([res])
797 def revert_stretchcolumn(document):
798 """We remove the column varwidth flags or everything else will become a mess."""
801 i = find_token(document.body, "\\begin_inset Tabular", i+1)
804 j = find_end_of_inset(document.body, i+1)
806 document.warning("Malformed LyX document: Could not find end of tabular.")
808 for k in range(i, j):
809 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
810 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
811 document.body[k] = document.body[k].replace(' varwidth="true"', '')
814 def revert_vcolumns(document):
815 """Revert standard columns with line breaks etc."""
821 i = find_token(document.body, "\\begin_inset Tabular", i+1)
824 j = find_end_of_inset(document.body, i)
826 document.warning("Malformed LyX document: Could not find end of tabular.")
829 # Collect necessary column information
831 nrows = int(document.body[i+1].split('"')[3])
832 ncols = int(document.body[i+1].split('"')[5])
834 for k in range(ncols):
835 m = find_token(document.body, "<column", m)
836 width = get_option_value(document.body[m], 'width')
837 varwidth = get_option_value(document.body[m], 'varwidth')
838 alignment = get_option_value(document.body[m], 'alignment')
839 special = get_option_value(document.body[m], 'special')
840 col_info.append([width, varwidth, alignment, special, m])
845 for row in range(nrows):
846 for col in range(ncols):
847 m = find_token(document.body, "<cell", m)
848 multicolumn = get_option_value(document.body[m], 'multicolumn')
849 multirow = get_option_value(document.body[m], 'multirow')
850 width = get_option_value(document.body[m], 'width')
851 rotate = get_option_value(document.body[m], 'rotate')
852 # Check for: linebreaks, multipars, non-standard environments
854 endcell = find_token(document.body, "</cell>", begcell)
856 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
858 elif count_pars_in_inset(document.body, begcell + 2) > 1:
860 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
862 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
863 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
865 alignment = col_info[col][2]
866 col_line = col_info[col][4]
868 if alignment == "center":
869 vval = ">{\\centering}"
870 elif alignment == "left":
871 vval = ">{\\raggedright}"
872 elif alignment == "right":
873 vval = ">{\\raggedleft}"
876 vval += "V{\\linewidth}"
878 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
879 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
880 # with newlines, and we do not want that)
882 endcell = find_token(document.body, "</cell>", begcell)
884 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
886 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
890 nle = find_end_of_inset(document.body, nl)
891 del(document.body[nle:nle+1])
893 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
895 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
901 if needarray == True:
902 add_to_preamble(document, ["\\usepackage{array}"])
903 if needvarwidth == True:
904 add_to_preamble(document, ["\\usepackage{varwidth}"])
907 def revert_bibencoding(document):
908 """Revert bibliography encoding"""
912 i = find_token(document.header, "\\cite_engine", 0)
914 document.warning("Malformed document! Missing \\cite_engine")
916 engine = get_value(document.header, "\\cite_engine", i)
920 if engine in ["biblatex", "biblatex-natbib"]:
923 # Map lyx to latex encoding names
927 "armscii8" : "armscii8",
928 "iso8859-1" : "latin1",
929 "iso8859-2" : "latin2",
930 "iso8859-3" : "latin3",
931 "iso8859-4" : "latin4",
932 "iso8859-5" : "iso88595",
933 "iso8859-6" : "8859-6",
934 "iso8859-7" : "iso-8859-7",
935 "iso8859-8" : "8859-8",
936 "iso8859-9" : "latin5",
937 "iso8859-13" : "latin7",
938 "iso8859-15" : "latin9",
939 "iso8859-16" : "latin10",
940 "applemac" : "applemac",
942 "cp437de" : "cp437de",
959 "utf8-platex" : "utf8",
965 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
968 j = find_end_of_inset(document.body, i)
970 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
972 encoding = get_quoted_value(document.body, "encoding", i, j)
975 # remove encoding line
976 k = find_token(document.body, "encoding", i, j)
979 if encoding == "default":
981 # Re-find inset end line
982 j = find_end_of_inset(document.body, i)
985 h = find_token(document.header, "\\biblio_options", 0)
987 biblio_options = get_value(document.header, "\\biblio_options", h)
988 if not "bibencoding" in biblio_options:
989 document.header[h] += ",bibencoding=%s" % encodings[encoding]
991 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
993 # this should not happen
994 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
996 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
998 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
999 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1005 def convert_vcsinfo(document):
1006 """Separate vcs Info inset from buffer Info inset."""
1009 "vcs-revision" : "revision",
1010 "vcs-tree-revision" : "tree-revision",
1011 "vcs-author" : "author",
1012 "vcs-time" : "time",
1017 i = find_token(document.body, "\\begin_inset Info", i+1)
1020 j = find_end_of_inset(document.body, i+1)
1022 document.warning("Malformed LyX document: Could not find end of Info inset.")
1024 tp = find_token(document.body, 'type', i, j)
1025 tpv = get_quoted_value(document.body, "type", tp)
1028 arg = find_token(document.body, 'arg', i, j)
1029 argv = get_quoted_value(document.body, "arg", arg)
1030 if argv not in list(types.keys()):
1032 document.body[tp] = "type \"vcs\""
1033 document.body[arg] = "arg \"" + types[argv] + "\""
1036 def revert_vcsinfo(document):
1037 """Merge vcs Info inset to buffer Info inset."""
1039 args = ["revision", "tree-revision", "author", "time", "date" ]
1042 i = find_token(document.body, "\\begin_inset Info", i+1)
1045 j = find_end_of_inset(document.body, i+1)
1047 document.warning("Malformed LyX document: Could not find end of Info inset.")
1049 tp = find_token(document.body, 'type', i, j)
1050 tpv = get_quoted_value(document.body, "type", tp)
1053 arg = find_token(document.body, 'arg', i, j)
1054 argv = get_quoted_value(document.body, "arg", arg)
1055 if argv not in args:
1056 document.warning("Malformed Info inset. Invalid vcs arg.")
1058 document.body[tp] = "type \"buffer\""
1059 document.body[arg] = "arg \"vcs-" + argv + "\""
1061 def revert_vcsinfo_rev_abbrev(document):
1062 " Convert abbreviated revisions to regular revisions. "
1066 i = find_token(document.body, "\\begin_inset Info", i+1)
1069 j = find_end_of_inset(document.body, i+1)
1071 document.warning("Malformed LyX document: Could not find end of Info inset.")
1073 tp = find_token(document.body, 'type', i, j)
1074 tpv = get_quoted_value(document.body, "type", tp)
1077 arg = find_token(document.body, 'arg', i, j)
1078 argv = get_quoted_value(document.body, "arg", arg)
1079 if( argv == "revision-abbrev" ):
1080 document.body[arg] = "arg \"revision\""
1082 def revert_dateinfo(document):
1083 """Revert date info insets to static text."""
1085 # FIXME This currently only considers the main language and uses the system locale
1086 # Ideally, it should honor context languages and switch the locale accordingly.
1088 # The date formats for each language using strftime syntax:
1089 # long, short, loclong, locmedium, locshort
1091 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1092 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1093 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1094 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1095 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1096 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1097 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1098 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1099 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1100 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1101 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1102 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1103 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1104 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1105 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1106 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1107 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1108 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1109 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1110 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1111 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1112 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1113 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1114 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1115 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1116 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1117 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1118 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1119 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1120 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1121 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1122 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1123 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1124 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1125 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1126 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1127 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1128 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1129 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1130 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1131 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1132 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1133 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1134 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1135 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1136 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1137 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1138 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1139 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1140 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1141 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1142 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1143 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1144 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1145 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1146 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1147 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1148 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1149 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1150 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1151 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1152 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1153 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1154 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1155 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1156 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1157 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1158 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1159 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1160 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1161 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1162 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1163 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1164 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1165 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1166 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1167 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1168 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1169 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1170 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1171 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1172 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1173 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1174 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1175 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1176 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1177 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1178 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1179 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1180 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1181 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1182 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1183 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1184 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1185 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1186 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1187 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1188 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1189 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1190 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1191 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1192 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1195 types = ["date", "fixdate", "moddate" ]
1196 lang = get_value(document.header, "\\language")
1198 document.warning("Malformed LyX document! No \\language header found!")
1203 i = find_token(document.body, "\\begin_inset Info", i+1)
1206 j = find_end_of_inset(document.body, i+1)
1208 document.warning("Malformed LyX document: Could not find end of Info inset.")
1210 tp = find_token(document.body, 'type', i, j)
1211 tpv = get_quoted_value(document.body, "type", tp)
1212 if tpv not in types:
1214 arg = find_token(document.body, 'arg', i, j)
1215 argv = get_quoted_value(document.body, "arg", arg)
1218 if tpv == "fixdate":
1219 datecomps = argv.split('@')
1220 if len(datecomps) > 1:
1222 isodate = datecomps[1]
1223 m = re.search(r'(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1225 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1226 # FIXME if we had the path to the original document (not the one in the tmp dir),
1227 # we could use the mtime.
1228 # elif tpv == "moddate":
1229 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1232 result = dte.isodate()
1233 elif argv == "long":
1234 result = dte.strftime(dateformats[lang][0])
1235 elif argv == "short":
1236 result = dte.strftime(dateformats[lang][1])
1237 elif argv == "loclong":
1238 result = dte.strftime(dateformats[lang][2])
1239 elif argv == "locmedium":
1240 result = dte.strftime(dateformats[lang][3])
1241 elif argv == "locshort":
1242 result = dte.strftime(dateformats[lang][4])
1244 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1245 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1246 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1247 fmt = re.sub('[^\'%]d', '%d', fmt)
1248 fmt = fmt.replace("'", "")
1249 result = dte.strftime(fmt)
1250 if sys.version_info < (3,0):
1251 # In Python 2, datetime module works with binary strings,
1252 # our dateformat strings are utf8-encoded:
1253 result = result.decode('utf-8')
1254 document.body[i : j+1] = [result]
1257 def revert_timeinfo(document):
1258 """Revert time info insets to static text."""
1260 # FIXME This currently only considers the main language and uses the system locale
1261 # Ideally, it should honor context languages and switch the locale accordingly.
1262 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1265 # The time formats for each language using strftime syntax:
1268 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1269 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1270 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1271 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1272 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1273 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1274 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1275 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1276 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1277 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1278 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1279 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1280 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1281 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1282 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1283 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1284 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1285 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1286 "british" : ["%H:%M:%S %Z", "%H:%M"],
1287 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1288 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1289 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1290 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1291 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1292 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1293 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1294 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1295 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1296 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1297 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1298 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1299 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1300 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1301 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1302 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1303 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1304 "french" : ["%H:%M:%S %Z", "%H:%M"],
1305 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1306 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1307 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1308 "german" : ["%H:%M:%S %Z", "%H:%M"],
1309 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1310 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1311 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1312 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1313 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1314 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1315 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1316 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1317 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1318 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1319 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1320 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1321 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1322 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1323 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1324 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1325 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1326 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1327 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1328 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1329 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1330 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1331 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1332 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1333 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1334 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1335 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1336 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1337 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1338 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1339 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1340 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1341 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1342 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1343 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1344 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1345 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1346 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1347 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1348 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1349 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1350 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1351 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1352 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1353 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1354 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1355 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1356 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1357 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1358 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1359 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1360 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1361 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1362 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1363 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1364 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1365 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1366 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1367 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1368 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1369 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1372 types = ["time", "fixtime", "modtime" ]
1373 i = find_token(document.header, "\\language", 0)
1375 # this should not happen
1376 document.warning("Malformed LyX document! No \\language header found!")
1378 lang = get_value(document.header, "\\language", i)
1382 i = find_token(document.body, "\\begin_inset Info", i+1)
1385 j = find_end_of_inset(document.body, i+1)
1387 document.warning("Malformed LyX document: Could not find end of Info inset.")
1389 tp = find_token(document.body, 'type', i, j)
1390 tpv = get_quoted_value(document.body, "type", tp)
1391 if tpv not in types:
1393 arg = find_token(document.body, 'arg', i, j)
1394 argv = get_quoted_value(document.body, "arg", arg)
1396 dtme = datetime.now()
1398 if tpv == "fixtime":
1399 timecomps = argv.split('@')
1400 if len(timecomps) > 1:
1402 isotime = timecomps[1]
1403 m = re.search(r'(\d\d):(\d\d):(\d\d)', isotime)
1405 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1407 m = re.search(r'(\d\d):(\d\d)', isotime)
1409 tme = time(int(m.group(1)), int(m.group(2)))
1410 # FIXME if we had the path to the original document (not the one in the tmp dir),
1411 # we could use the mtime.
1412 # elif tpv == "moddate":
1413 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1416 result = tme.isoformat()
1417 elif argv == "long":
1418 result = tme.strftime(timeformats[lang][0])
1419 elif argv == "short":
1420 result = tme.strftime(timeformats[lang][1])
1422 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1423 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1424 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1425 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1426 fmt = fmt.replace("'", "")
1427 result = dte.strftime(fmt)
1428 document.body[i : j+1] = result
1431 def revert_namenoextinfo(document):
1432 """Merge buffer Info inset type name-noext to name."""
1436 i = find_token(document.body, "\\begin_inset Info", i+1)
1439 j = find_end_of_inset(document.body, i+1)
1441 document.warning("Malformed LyX document: Could not find end of Info inset.")
1443 tp = find_token(document.body, 'type', i, j)
1444 tpv = get_quoted_value(document.body, "type", tp)
1447 arg = find_token(document.body, 'arg', i, j)
1448 argv = get_quoted_value(document.body, "arg", arg)
1449 if argv != "name-noext":
1451 document.body[arg] = "arg \"name\""
1454 def revert_l7ninfo(document):
1455 """Revert l7n Info inset to text."""
1459 i = find_token(document.body, "\\begin_inset Info", i+1)
1462 j = find_end_of_inset(document.body, i+1)
1464 document.warning("Malformed LyX document: Could not find end of Info inset.")
1466 tp = find_token(document.body, 'type', i, j)
1467 tpv = get_quoted_value(document.body, "type", tp)
1470 arg = find_token(document.body, 'arg', i, j)
1471 argv = get_quoted_value(document.body, "arg", arg)
1472 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1473 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1474 document.body[i : j+1] = argv
1477 def revert_listpargs(document):
1478 """Reverts listpreamble arguments to TeX-code"""
1481 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1484 j = find_end_of_inset(document.body, i)
1485 # Find containing paragraph layout
1486 parent = get_containing_layout(document.body, i)
1488 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1491 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1492 endPlain = find_end_of_layout(document.body, beginPlain)
1493 content = document.body[beginPlain + 1 : endPlain]
1494 del document.body[i:j+1]
1495 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1496 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1497 document.body[parbeg : parbeg] = subst
1500 def revert_lformatinfo(document):
1501 """Revert layout format Info inset to text."""
1505 i = find_token(document.body, "\\begin_inset Info", i+1)
1508 j = find_end_of_inset(document.body, i+1)
1510 document.warning("Malformed LyX document: Could not find end of Info inset.")
1512 tp = find_token(document.body, 'type', i, j)
1513 tpv = get_quoted_value(document.body, "type", tp)
1514 if tpv != "lyxinfo":
1516 arg = find_token(document.body, 'arg', i, j)
1517 argv = get_quoted_value(document.body, "arg", arg)
1518 if argv != "layoutformat":
1521 document.body[i : j+1] = "69"
1524 def convert_hebrew_parentheses(document):
1525 """ Swap opening/closing parentheses in Hebrew text.
1527 Up to LyX 2.4, "(" was used as closing parenthesis and
1528 ")" as opening parenthesis for Hebrew in the LyX source.
1530 # print("convert hebrew parentheses")
1531 current_languages = [document.language]
1532 for i, line in enumerate(document.body):
1533 if line.startswith('\\lang '):
1534 current_languages[-1] = line.lstrip('\\lang ')
1535 elif line.startswith('\\begin_layout'):
1536 current_languages.append(current_languages[-1])
1537 # print (line, current_languages[-1])
1538 elif line.startswith('\\end_layout'):
1539 current_languages.pop()
1540 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1541 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1544 def revert_hebrew_parentheses(document):
1545 """Store parentheses in Hebrew text reversed"""
1546 # This only exists to keep the convert/revert naming convention
1547 convert_hebrew_parentheses(document)
1550 def revert_malayalam(document):
1551 """Set the document language to English but assure Malayalam output"""
1553 revert_language(document, "malayalam", "", "malayalam")
1556 def revert_soul(document):
1557 """Revert soul module flex insets to ERT"""
1559 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1562 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1564 add_to_preamble(document, ["\\usepackage{soul}"])
1566 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1568 add_to_preamble(document, ["\\usepackage{color}"])
1570 revert_flex_inset(document.body, "Spaceletters", "\\so")
1571 revert_flex_inset(document.body, "Strikethrough", "\\st")
1572 revert_flex_inset(document.body, "Underline", "\\ul")
1573 revert_flex_inset(document.body, "Highlight", "\\hl")
1574 revert_flex_inset(document.body, "Capitalize", "\\caps")
1577 def revert_tablestyle(document):
1578 """Remove tablestyle params"""
1580 i = find_token(document.header, "\\tablestyle")
1582 del document.header[i]
1585 def revert_bibfileencodings(document):
1586 """Revert individual Biblatex bibliography encodings"""
1590 i = find_token(document.header, "\\cite_engine", 0)
1592 document.warning("Malformed document! Missing \\cite_engine")
1594 engine = get_value(document.header, "\\cite_engine", i)
1598 if engine in ["biblatex", "biblatex-natbib"]:
1601 # Map lyx to latex encoding names
1605 "armscii8" : "armscii8",
1606 "iso8859-1" : "latin1",
1607 "iso8859-2" : "latin2",
1608 "iso8859-3" : "latin3",
1609 "iso8859-4" : "latin4",
1610 "iso8859-5" : "iso88595",
1611 "iso8859-6" : "8859-6",
1612 "iso8859-7" : "iso-8859-7",
1613 "iso8859-8" : "8859-8",
1614 "iso8859-9" : "latin5",
1615 "iso8859-13" : "latin7",
1616 "iso8859-15" : "latin9",
1617 "iso8859-16" : "latin10",
1618 "applemac" : "applemac",
1620 "cp437de" : "cp437de",
1628 "cp1250" : "cp1250",
1629 "cp1251" : "cp1251",
1630 "cp1252" : "cp1252",
1631 "cp1255" : "cp1255",
1632 "cp1256" : "cp1256",
1633 "cp1257" : "cp1257",
1634 "koi8-r" : "koi8-r",
1635 "koi8-u" : "koi8-u",
1637 "utf8-platex" : "utf8",
1643 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1646 j = find_end_of_inset(document.body, i)
1648 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1650 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1654 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1655 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1656 if len(bibfiles) == 0:
1657 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1658 # remove encoding line
1659 k = find_token(document.body, "file_encodings", i, j)
1661 del document.body[k]
1662 # Re-find inset end line
1663 j = find_end_of_inset(document.body, i)
1665 enclist = encodings.split("\t")
1668 ppp = pp.split(" ", 1)
1669 encmap[ppp[0]] = ppp[1]
1670 for bib in bibfiles:
1671 pr = "\\addbibresource"
1672 if bib in encmap.keys():
1673 pr += "[bibencoding=" + encmap[bib] + "]"
1674 pr += "{" + bib + "}"
1675 add_to_preamble(document, [pr])
1676 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1677 pcmd = "printbibliography"
1679 pcmd += "[" + opts + "]"
1680 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1681 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1682 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1683 "status open", "", "\\begin_layout Plain Layout" ]
1684 repl += document.body[i:j+1]
1685 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1686 document.body[i:j+1] = repl
1692 def revert_cmidruletrimming(document):
1693 """Remove \\cmidrule trimming"""
1695 # FIXME: Revert to TeX code?
1698 # first, let's find out if we need to do anything
1699 i = find_token(document.body, '<cell ', i+1)
1702 j = document.body[i].find('trim="')
1705 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1706 # remove trim option
1707 document.body[i] = rgx.sub('', document.body[i])
1711 r'### Inserted by lyx2lyx (ruby inset) ###',
1712 r'InsetLayout Flex:Ruby',
1713 r' LyxType charstyle',
1714 r' LatexType command',
1718 r' HTMLInnerTag rb',
1719 r' HTMLInnerAttr ""',
1721 r' LabelString "Ruby"',
1722 r' Decoration Conglomerate',
1724 r' \ifdefined\kanjiskip',
1725 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1726 r' \else \ifdefined\luatexversion',
1727 r' \usepackage{luatexja-ruby}',
1728 r' \else \ifdefined\XeTeXversion',
1729 r' \usepackage{ruby}%',
1731 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1733 r' Argument post:1',
1734 r' LabelString "ruby text"',
1735 r' MenuString "Ruby Text|R"',
1736 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1737 r' Decoration Conglomerate',
1750 def convert_ruby_module(document):
1751 """Use ruby module instead of local module definition"""
1752 if document.del_local_layout(ruby_inset_def):
1753 document.add_module("ruby")
1756 def revert_ruby_module(document):
1757 """Replace ruby module with local module definition"""
1758 if document.del_module("ruby"):
1759 document.append_local_layout(ruby_inset_def)
1762 def convert_utf8_japanese(document):
1763 """Use generic utf8 with Japanese documents."""
1764 lang = get_value(document.header, "\\language")
1765 if not lang.startswith("japanese"):
1767 inputenc = get_value(document.header, "\\inputencoding")
1768 if ((lang == "japanese" and inputenc == "utf8-platex")
1769 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1770 document.set_parameter("inputencoding", "utf8")
1773 def revert_utf8_japanese(document):
1774 """Use Japanese utf8 variants with Japanese documents."""
1775 inputenc = get_value(document.header, "\\inputencoding")
1776 if inputenc != "utf8":
1778 lang = get_value(document.header, "\\language")
1779 if lang == "japanese":
1780 document.set_parameter("inputencoding", "utf8-platex")
1781 if lang == "japanese-cjk":
1782 document.set_parameter("inputencoding", "utf8-cjk")
1785 def revert_lineno(document):
1786 " Replace lineno setting with user-preamble code."
1788 options = get_quoted_value(document.header, "\\lineno_options",
1790 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1793 options = "[" + options + "]"
1794 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1797 def convert_lineno(document):
1798 " Replace user-preamble code with native lineno support."
1801 i = find_token(document.preamble, "\\linenumbers", 1)
1803 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1806 options = usepkg.group(1).strip("[]")
1807 del(document.preamble[i-1:i+1])
1808 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1810 k = find_token(document.header, "\\index ")
1812 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1814 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1815 "\\lineno_options %s" % options]
1818 def convert_aaencoding(document):
1819 " Convert default document option due to encoding change in aa class. "
1821 if document.textclass != "aa":
1824 i = find_token(document.header, "\\use_default_options true")
1827 val = get_value(document.header, "\\inputencoding")
1829 document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
1831 if val == "auto-legacy" or val == "latin9":
1832 document.header[i] = "\\use_default_options false"
1833 k = find_token(document.header, "\\options")
1835 document.header.insert(i, "\\options latin9")
1837 document.header[k] += ",latin9"
1840 def revert_aaencoding(document):
1841 " Revert default document option due to encoding change in aa class. "
1843 if document.textclass != "aa":
1846 i = find_token(document.header, "\\use_default_options true")
1849 val = get_value(document.header, "\\inputencoding")
1851 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
1854 document.header[i] = "\\use_default_options false"
1855 k = find_token(document.header, "\\options", 0)
1857 document.header.insert(i, "\\options utf8")
1859 document.header[k] = document.header[k] + ",utf8"
1862 def revert_new_languages(document):
1863 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1864 and Russian (Petrine orthography)."""
1866 # lyxname: (babelname, polyglossianame)
1867 new_languages = {"azerbaijani": ("azerbaijani", ""),
1868 "bengali": ("", "bengali"),
1869 "churchslavonic": ("", "churchslavonic"),
1870 "oldrussian": ("", "russian"),
1871 "korean": ("", "korean"),
1873 if document.language in new_languages:
1874 used_languages = {document.language}
1876 used_languages = set()
1879 i = find_token(document.body, "\\lang", i+1)
1882 val = get_value(document.body, "\\lang", i)
1883 if val in new_languages:
1884 used_languages.add(val)
1886 # Korean is already supported via CJK, so leave as-is for Babel
1887 if ("korean" in used_languages
1888 and (not get_bool_value(document.header, "\\use_non_tex_fonts")
1889 or get_value(document.header, "\\language_package") == "babel")):
1890 used_languages.discard("korean")
1892 for lang in used_languages:
1893 revert_language(document, lang, *new_languages[lang])
1897 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1898 r'InsetLayout Flex:Glosse',
1900 r' LabelString "Gloss (old version)"',
1901 r' MenuString "Gloss (old version)"',
1902 r' LatexType environment',
1903 r' LatexName linggloss',
1904 r' Decoration minimalistic',
1909 r' CustomPars false',
1910 r' ForcePlain true',
1911 r' ParbreakIsNewline true',
1912 r' FreeSpacing true',
1913 r' Requires covington',
1916 r' \@ifundefined{linggloss}{%',
1917 r' \newenvironment{linggloss}[2][]{',
1918 r' \def\glosstr{\glt #1}%',
1920 r' {\glosstr\glend}}{}',
1923 r' ResetsFont true',
1925 r' Decoration conglomerate',
1926 r' LabelString "Translation"',
1927 r' MenuString "Glosse Translation|s"',
1928 r' Tooltip "Add a translation for the glosse"',
1933 glosss_inset_def = [
1934 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1935 r'InsetLayout Flex:Tri-Glosse',
1937 r' LabelString "Tri-Gloss (old version)"',
1938 r' MenuString "Tri-Gloss (old version)"',
1939 r' LatexType environment',
1940 r' LatexName lingglosss',
1941 r' Decoration minimalistic',
1946 r' CustomPars false',
1947 r' ForcePlain true',
1948 r' ParbreakIsNewline true',
1949 r' FreeSpacing true',
1951 r' Requires covington',
1954 r' \@ifundefined{lingglosss}{%',
1955 r' \newenvironment{lingglosss}[2][]{',
1956 r' \def\glosstr{\glt #1}%',
1958 r' {\glosstr\glend}}{}',
1960 r' ResetsFont true',
1962 r' Decoration conglomerate',
1963 r' LabelString "Translation"',
1964 r' MenuString "Glosse Translation|s"',
1965 r' Tooltip "Add a translation for the glosse"',
1970 def convert_linggloss(document):
1971 " Move old ling glosses to local layout "
1972 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1973 document.append_local_layout(gloss_inset_def)
1974 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1975 document.append_local_layout(glosss_inset_def)
1977 def revert_linggloss(document):
1978 " Revert to old ling gloss definitions "
1979 if not "linguistics" in document.get_module_list():
1981 document.del_local_layout(gloss_inset_def)
1982 document.del_local_layout(glosss_inset_def)
1985 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1986 for glosse in glosses:
1989 i = find_token(document.body, glosse, i+1)
1992 j = find_end_of_inset(document.body, i)
1994 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1997 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1998 endarg = find_end_of_inset(document.body, arg)
2001 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2002 if argbeginPlain == -1:
2003 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2005 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2006 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2008 # remove Arg insets and paragraph, if it only contains this inset
2009 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2010 del document.body[arg - 1 : endarg + 4]
2012 del document.body[arg : endarg + 1]
2014 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2015 endarg = find_end_of_inset(document.body, arg)
2018 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2019 if argbeginPlain == -1:
2020 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2022 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2023 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2025 # remove Arg insets and paragraph, if it only contains this inset
2026 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2027 del document.body[arg - 1 : endarg + 4]
2029 del document.body[arg : endarg + 1]
2031 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2032 endarg = find_end_of_inset(document.body, arg)
2035 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2036 if argbeginPlain == -1:
2037 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2039 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2040 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2042 # remove Arg insets and paragraph, if it only contains this inset
2043 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2044 del document.body[arg - 1 : endarg + 4]
2046 del document.body[arg : endarg + 1]
2048 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2049 endarg = find_end_of_inset(document.body, arg)
2052 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2053 if argbeginPlain == -1:
2054 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2056 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2057 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2059 # remove Arg insets and paragraph, if it only contains this inset
2060 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2061 del document.body[arg - 1 : endarg + 4]
2063 del document.body[arg : endarg + 1]
2066 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2069 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2070 endInset = find_end_of_inset(document.body, i)
2071 endPlain = find_end_of_layout(document.body, beginPlain)
2072 precontent = put_cmd_in_ert(cmd)
2073 if len(optargcontent) > 0:
2074 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2075 precontent += put_cmd_in_ert("{")
2077 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2078 if cmd == "\\trigloss":
2079 postcontent += put_cmd_in_ert("}{") + marg3content
2080 postcontent += put_cmd_in_ert("}")
2082 document.body[endPlain:endInset + 1] = postcontent
2083 document.body[beginPlain + 1:beginPlain] = precontent
2084 del document.body[i : beginPlain + 1]
2086 document.append_local_layout("Requires covington")
2091 def revert_subexarg(document):
2092 " Revert linguistic subexamples with argument to ERT "
2094 if not "linguistics" in document.get_module_list():
2100 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2103 j = find_end_of_layout(document.body, i)
2105 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2108 # check for consecutive layouts
2109 k = find_token(document.body, "\\begin_layout", j)
2110 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2112 j = find_end_of_layout(document.body, k)
2114 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2117 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2121 endarg = find_end_of_inset(document.body, arg)
2123 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2124 if argbeginPlain == -1:
2125 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2127 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2128 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2130 # remove Arg insets and paragraph, if it only contains this inset
2131 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2132 del document.body[arg - 1 : endarg + 4]
2134 del document.body[arg : endarg + 1]
2136 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2138 # re-find end of layout
2139 j = find_end_of_layout(document.body, i)
2141 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2144 # check for consecutive layouts
2145 k = find_token(document.body, "\\begin_layout", j)
2146 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2148 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2149 j = find_end_of_layout(document.body, k)
2151 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2154 endev = put_cmd_in_ert("\\end{subexamples}")
2156 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2157 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2158 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2160 document.append_local_layout("Requires covington")
2164 def revert_drs(document):
2165 " Revert DRS insets (linguistics) to ERT "
2167 if not "linguistics" in document.get_module_list():
2171 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2172 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2173 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2174 "\\begin_inset Flex SDRS"]
2178 i = find_token(document.body, drs, i+1)
2181 j = find_end_of_inset(document.body, i)
2183 document.warning("Malformed LyX document: Can't find end of DRS inset")
2186 # Check for arguments
2187 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2188 endarg = find_end_of_inset(document.body, arg)
2191 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2192 if argbeginPlain == -1:
2193 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2195 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2196 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2198 # remove Arg insets and paragraph, if it only contains this inset
2199 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2200 del document.body[arg - 1 : endarg + 4]
2202 del document.body[arg : endarg + 1]
2205 j = find_end_of_inset(document.body, i)
2207 document.warning("Malformed LyX document: Can't find end of DRS inset")
2210 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2211 endarg = find_end_of_inset(document.body, arg)
2214 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2215 if argbeginPlain == -1:
2216 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2218 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2219 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2221 # remove Arg insets and paragraph, if it only contains this inset
2222 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2223 del document.body[arg - 1 : endarg + 4]
2225 del document.body[arg : endarg + 1]
2228 j = find_end_of_inset(document.body, i)
2230 document.warning("Malformed LyX document: Can't find end of DRS inset")
2233 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2234 endarg = find_end_of_inset(document.body, arg)
2235 postarg1content = []
2237 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2238 if argbeginPlain == -1:
2239 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2241 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2242 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2244 # remove Arg insets and paragraph, if it only contains this inset
2245 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2246 del document.body[arg - 1 : endarg + 4]
2248 del document.body[arg : endarg + 1]
2251 j = find_end_of_inset(document.body, i)
2253 document.warning("Malformed LyX document: Can't find end of DRS inset")
2256 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2257 endarg = find_end_of_inset(document.body, arg)
2258 postarg2content = []
2260 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2261 if argbeginPlain == -1:
2262 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2264 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2265 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2267 # remove Arg insets and paragraph, if it only contains this inset
2268 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2269 del document.body[arg - 1 : endarg + 4]
2271 del document.body[arg : endarg + 1]
2274 j = find_end_of_inset(document.body, i)
2276 document.warning("Malformed LyX document: Can't find end of DRS inset")
2279 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2280 endarg = find_end_of_inset(document.body, arg)
2281 postarg3content = []
2283 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2284 if argbeginPlain == -1:
2285 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2287 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2288 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2290 # remove Arg insets and paragraph, if it only contains this inset
2291 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2292 del document.body[arg - 1 : endarg + 4]
2294 del document.body[arg : endarg + 1]
2297 j = find_end_of_inset(document.body, i)
2299 document.warning("Malformed LyX document: Can't find end of DRS inset")
2302 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2303 endarg = find_end_of_inset(document.body, arg)
2304 postarg4content = []
2306 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2307 if argbeginPlain == -1:
2308 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2310 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2311 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2313 # remove Arg insets and paragraph, if it only contains this inset
2314 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2315 del document.body[arg - 1 : endarg + 4]
2317 del document.body[arg : endarg + 1]
2319 # The respective LaTeX command
2321 if drs == "\\begin_inset Flex DRS*":
2323 elif drs == "\\begin_inset Flex IfThen-DRS":
2325 elif drs == "\\begin_inset Flex Cond-DRS":
2327 elif drs == "\\begin_inset Flex QDRS":
2329 elif drs == "\\begin_inset Flex NegDRS":
2331 elif drs == "\\begin_inset Flex SDRS":
2334 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2335 endInset = find_end_of_inset(document.body, i)
2336 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2337 precontent = put_cmd_in_ert(cmd)
2338 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2339 if drs == "\\begin_inset Flex SDRS":
2340 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2341 precontent += put_cmd_in_ert("{")
2344 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2345 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2346 if cmd == "\\condrs" or cmd == "\\qdrs":
2347 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2349 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2351 postcontent = put_cmd_in_ert("}")
2353 document.body[endPlain:endInset + 1] = postcontent
2354 document.body[beginPlain + 1:beginPlain] = precontent
2355 del document.body[i : beginPlain + 1]
2357 document.append_local_layout("Provides covington 1")
2358 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2364 def revert_babelfont(document):
2365 " Reverts the use of \\babelfont to user preamble "
2367 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2370 i = find_token(document.header, '\\language_package', 0)
2372 document.warning("Malformed LyX document: Missing \\language_package.")
2374 if get_value(document.header, "\\language_package", 0) != "babel":
2377 # check font settings
2379 roman = sans = typew = "default"
2381 sf_scale = tt_scale = 100.0
2383 j = find_token(document.header, "\\font_roman", 0)
2385 document.warning("Malformed LyX document: Missing \\font_roman.")
2387 # We need to use this regex since split() does not handle quote protection
2388 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2389 roman = romanfont[2].strip('"')
2390 romanfont[2] = '"default"'
2391 document.header[j] = " ".join(romanfont)
2393 j = find_token(document.header, "\\font_sans", 0)
2395 document.warning("Malformed LyX document: Missing \\font_sans.")
2397 # We need to use this regex since split() does not handle quote protection
2398 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2399 sans = sansfont[2].strip('"')
2400 sansfont[2] = '"default"'
2401 document.header[j] = " ".join(sansfont)
2403 j = find_token(document.header, "\\font_typewriter", 0)
2405 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2407 # We need to use this regex since split() does not handle quote protection
2408 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2409 typew = ttfont[2].strip('"')
2410 ttfont[2] = '"default"'
2411 document.header[j] = " ".join(ttfont)
2413 i = find_token(document.header, "\\font_osf", 0)
2415 document.warning("Malformed LyX document: Missing \\font_osf.")
2417 osf = str2bool(get_value(document.header, "\\font_osf", i))
2419 j = find_token(document.header, "\\font_sf_scale", 0)
2421 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2423 sfscale = document.header[j].split()
2426 document.header[j] = " ".join(sfscale)
2429 sf_scale = float(val)
2431 document.warning("Invalid font_sf_scale value: " + val)
2433 j = find_token(document.header, "\\font_tt_scale", 0)
2435 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2437 ttscale = document.header[j].split()
2440 document.header[j] = " ".join(ttscale)
2443 tt_scale = float(val)
2445 document.warning("Invalid font_tt_scale value: " + val)
2447 # set preamble stuff
2448 pretext = ['%% This document must be processed with xelatex or lualatex!']
2449 pretext.append('\\AtBeginDocument{%')
2450 if roman != "default":
2451 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2452 if sans != "default":
2453 sf = '\\babelfont{sf}['
2454 if sf_scale != 100.0:
2455 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2456 sf += 'Mapping=tex-text]{' + sans + '}'
2458 if typew != "default":
2459 tw = '\\babelfont{tt}'
2460 if tt_scale != 100.0:
2461 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2462 tw += '{' + typew + '}'
2465 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2467 insert_to_preamble(document, pretext)
2470 def revert_minionpro(document):
2471 " Revert native MinionPro font definition (with extra options) to LaTeX "
2473 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2476 regexp = re.compile(r'(\\font_roman_opts)')
2477 x = find_re(document.header, regexp, 0)
2481 # We need to use this regex since split() does not handle quote protection
2482 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2483 opts = romanopts[1].strip('"')
2485 i = find_token(document.header, "\\font_roman", 0)
2487 document.warning("Malformed LyX document: Missing \\font_roman.")
2490 # We need to use this regex since split() does not handle quote protection
2491 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2492 roman = romanfont[1].strip('"')
2493 if roman != "minionpro":
2495 romanfont[1] = '"default"'
2496 document.header[i] = " ".join(romanfont)
2498 j = find_token(document.header, "\\font_osf true", 0)
2501 preamble = "\\usepackage["
2503 document.header[j] = "\\font_osf false"
2507 preamble += "]{MinionPro}"
2508 add_to_preamble(document, [preamble])
2509 del document.header[x]
2512 def revert_font_opts(document):
2513 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2515 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2516 Babel = (get_value(document.header, "\\language_package") == "babel")
2519 regexp = re.compile(r'(\\font_roman_opts)')
2520 i = find_re(document.header, regexp, 0)
2522 # We need to use this regex since split() does not handle quote protection
2523 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2524 opts = romanopts[1].strip('"')
2525 del document.header[i]
2527 regexp = re.compile(r'(\\font_roman)')
2528 i = find_re(document.header, regexp, 0)
2530 # We need to use this regex since split() does not handle quote protection
2531 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2532 font = romanfont[2].strip('"')
2533 romanfont[2] = '"default"'
2534 document.header[i] = " ".join(romanfont)
2535 if font != "default":
2537 preamble = "\\babelfont{rm}["
2539 preamble = "\\setmainfont["
2542 preamble += "Mapping=tex-text]{"
2545 add_to_preamble(document, [preamble])
2548 regexp = re.compile(r'(\\font_sans_opts)')
2549 i = find_re(document.header, regexp, 0)
2552 # We need to use this regex since split() does not handle quote protection
2553 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2554 opts = sfopts[1].strip('"')
2555 del document.header[i]
2557 regexp = re.compile(r'(\\font_sf_scale)')
2558 i = find_re(document.header, regexp, 0)
2560 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2561 regexp = re.compile(r'(\\font_sans)')
2562 i = find_re(document.header, regexp, 0)
2564 # We need to use this regex since split() does not handle quote protection
2565 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2566 font = sffont[2].strip('"')
2567 sffont[2] = '"default"'
2568 document.header[i] = " ".join(sffont)
2569 if font != "default":
2571 preamble = "\\babelfont{sf}["
2573 preamble = "\\setsansfont["
2577 preamble += "Scale=0."
2578 preamble += scaleval
2580 preamble += "Mapping=tex-text]{"
2583 add_to_preamble(document, [preamble])
2586 regexp = re.compile(r'(\\font_typewriter_opts)')
2587 i = find_re(document.header, regexp, 0)
2590 # We need to use this regex since split() does not handle quote protection
2591 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2592 opts = ttopts[1].strip('"')
2593 del document.header[i]
2595 regexp = re.compile(r'(\\font_tt_scale)')
2596 i = find_re(document.header, regexp, 0)
2598 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2599 regexp = re.compile(r'(\\font_typewriter)')
2600 i = find_re(document.header, regexp, 0)
2602 # We need to use this regex since split() does not handle quote protection
2603 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2604 font = ttfont[2].strip('"')
2605 ttfont[2] = '"default"'
2606 document.header[i] = " ".join(ttfont)
2607 if font != "default":
2609 preamble = "\\babelfont{tt}["
2611 preamble = "\\setmonofont["
2615 preamble += "Scale=0."
2616 preamble += scaleval
2618 preamble += "Mapping=tex-text]{"
2621 add_to_preamble(document, [preamble])
2624 def revert_plainNotoFonts_xopts(document):
2625 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2627 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2631 y = find_token(document.header, "\\font_osf true", 0)
2635 regexp = re.compile(r'(\\font_roman_opts)')
2636 x = find_re(document.header, regexp, 0)
2637 if x == -1 and not osf:
2642 # We need to use this regex since split() does not handle quote protection
2643 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2644 opts = romanopts[1].strip('"')
2650 i = find_token(document.header, "\\font_roman", 0)
2654 # We need to use this regex since split() does not handle quote protection
2655 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2656 roman = romanfont[1].strip('"')
2657 if roman != "NotoSerif-TLF":
2660 j = find_token(document.header, "\\font_sans", 0)
2664 # We need to use this regex since split() does not handle quote protection
2665 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2666 sf = sffont[1].strip('"')
2670 j = find_token(document.header, "\\font_typewriter", 0)
2674 # We need to use this regex since split() does not handle quote protection
2675 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2676 tt = ttfont[1].strip('"')
2680 # So we have noto as "complete font"
2681 romanfont[1] = '"default"'
2682 document.header[i] = " ".join(romanfont)
2684 preamble = "\\usepackage["
2686 preamble += "]{noto}"
2687 add_to_preamble(document, [preamble])
2689 document.header[y] = "\\font_osf false"
2691 del document.header[x]
2694 def revert_notoFonts_xopts(document):
2695 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2697 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2701 fm = createFontMapping(['Noto'])
2702 if revert_fonts(document, fm, fontmap, True):
2703 add_preamble_fonts(document, fontmap)
2706 def revert_IBMFonts_xopts(document):
2707 " Revert native IBM font definition (with extra options) to LaTeX "
2709 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2713 fm = createFontMapping(['IBM'])
2714 if revert_fonts(document, fm, fontmap, True):
2715 add_preamble_fonts(document, fontmap)
2718 def revert_AdobeFonts_xopts(document):
2719 " Revert native Adobe font definition (with extra options) to LaTeX "
2721 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2725 fm = createFontMapping(['Adobe'])
2726 if revert_fonts(document, fm, fontmap, True):
2727 add_preamble_fonts(document, fontmap)
2730 def convert_osf(document):
2731 " Convert \\font_osf param to new format "
2733 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2735 i = find_token(document.header, '\\font_osf', 0)
2737 document.warning("Malformed LyX document: Missing \\font_osf.")
2740 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2741 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2743 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2744 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2747 document.header.insert(i, "\\font_sans_osf false")
2748 document.header.insert(i + 1, "\\font_typewriter_osf false")
2752 x = find_token(document.header, "\\font_sans", 0)
2754 document.warning("Malformed LyX document: Missing \\font_sans.")
2756 # We need to use this regex since split() does not handle quote protection
2757 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2758 sf = sffont[1].strip('"')
2760 document.header.insert(i, "\\font_sans_osf true")
2762 document.header.insert(i, "\\font_sans_osf false")
2764 x = find_token(document.header, "\\font_typewriter", 0)
2766 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2768 # We need to use this regex since split() does not handle quote protection
2769 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2770 tt = ttfont[1].strip('"')
2772 document.header.insert(i + 1, "\\font_typewriter_osf true")
2774 document.header.insert(i + 1, "\\font_typewriter_osf false")
2777 document.header.insert(i, "\\font_sans_osf false")
2778 document.header.insert(i + 1, "\\font_typewriter_osf false")
2781 def revert_osf(document):
2782 " Revert \\font_*_osf params "
2784 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2786 i = find_token(document.header, '\\font_roman_osf', 0)
2788 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2791 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2792 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2794 i = find_token(document.header, '\\font_sans_osf', 0)
2796 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2799 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2800 del document.header[i]
2802 i = find_token(document.header, '\\font_typewriter_osf', 0)
2804 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2807 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2808 del document.header[i]
2811 i = find_token(document.header, '\\font_osf', 0)
2813 document.warning("Malformed LyX document: Missing \\font_osf.")
2815 document.header[i] = "\\font_osf true"
2818 def revert_texfontopts(document):
2819 " Revert native TeX font definitions (with extra options) to LaTeX "
2821 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2824 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2826 # First the sf (biolinum only)
2827 regexp = re.compile(r'(\\font_sans_opts)')
2828 x = find_re(document.header, regexp, 0)
2830 # We need to use this regex since split() does not handle quote protection
2831 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2832 opts = sfopts[1].strip('"')
2833 i = find_token(document.header, "\\font_sans", 0)
2835 document.warning("Malformed LyX document: Missing \\font_sans.")
2837 # We need to use this regex since split() does not handle quote protection
2838 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2839 sans = sffont[1].strip('"')
2840 if sans == "biolinum":
2842 sffont[1] = '"default"'
2843 document.header[i] = " ".join(sffont)
2845 j = find_token(document.header, "\\font_sans_osf true", 0)
2848 k = find_token(document.header, "\\font_sf_scale", 0)
2850 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2852 sfscale = document.header[k].split()
2855 document.header[k] = " ".join(sfscale)
2858 sf_scale = float(val)
2860 document.warning("Invalid font_sf_scale value: " + val)
2861 preamble = "\\usepackage["
2863 document.header[j] = "\\font_sans_osf false"
2865 if sf_scale != 100.0:
2866 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2868 preamble += "]{biolinum}"
2869 add_to_preamble(document, [preamble])
2870 del document.header[x]
2872 regexp = re.compile(r'(\\font_roman_opts)')
2873 x = find_re(document.header, regexp, 0)
2877 # We need to use this regex since split() does not handle quote protection
2878 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2879 opts = romanopts[1].strip('"')
2881 i = find_token(document.header, "\\font_roman", 0)
2883 document.warning("Malformed LyX document: Missing \\font_roman.")
2886 # We need to use this regex since split() does not handle quote protection
2887 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2888 roman = romanfont[1].strip('"')
2889 if not roman in rmfonts:
2891 romanfont[1] = '"default"'
2892 document.header[i] = " ".join(romanfont)
2894 if roman == "utopia":
2896 elif roman == "palatino":
2897 package = "mathpazo"
2898 elif roman == "times":
2899 package = "mathptmx"
2900 elif roman == "xcharter":
2901 package = "XCharter"
2903 j = find_token(document.header, "\\font_roman_osf true", 0)
2905 if roman == "cochineal":
2906 osf = "proportional,osf,"
2907 elif roman == "utopia":
2909 elif roman == "garamondx":
2911 elif roman == "libertine":
2913 elif roman == "palatino":
2915 elif roman == "xcharter":
2917 document.header[j] = "\\font_roman_osf false"
2918 k = find_token(document.header, "\\font_sc true", 0)
2920 if roman == "utopia":
2922 if roman == "palatino" and osf == "":
2924 document.header[k] = "\\font_sc false"
2925 preamble = "\\usepackage["
2928 preamble += "]{" + package + "}"
2929 add_to_preamble(document, [preamble])
2930 del document.header[x]
2933 def convert_CantarellFont(document):
2934 " Handle Cantarell font definition to LaTeX "
2936 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2937 fm = createFontMapping(['Cantarell'])
2938 convert_fonts(document, fm, "oldstyle")
2940 def revert_CantarellFont(document):
2941 " Revert native Cantarell font definition to LaTeX "
2943 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2945 fm = createFontMapping(['Cantarell'])
2946 if revert_fonts(document, fm, fontmap, False, True):
2947 add_preamble_fonts(document, fontmap)
2949 def convert_ChivoFont(document):
2950 " Handle Chivo font definition to LaTeX "
2952 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2953 fm = createFontMapping(['Chivo'])
2954 convert_fonts(document, fm, "oldstyle")
2956 def revert_ChivoFont(document):
2957 " Revert native Chivo font definition to LaTeX "
2959 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2961 fm = createFontMapping(['Chivo'])
2962 if revert_fonts(document, fm, fontmap, False, True):
2963 add_preamble_fonts(document, fontmap)
2966 def convert_FiraFont(document):
2967 " Handle Fira font definition to LaTeX "
2969 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2970 fm = createFontMapping(['Fira'])
2971 convert_fonts(document, fm, "lf")
2973 def revert_FiraFont(document):
2974 " Revert native Fira font definition to LaTeX "
2976 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2978 fm = createFontMapping(['Fira'])
2979 if revert_fonts(document, fm, fontmap, False, True):
2980 add_preamble_fonts(document, fontmap)
2983 def convert_Semibolds(document):
2984 " Move semibold options to extraopts "
2986 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2988 i = find_token(document.header, "\\font_roman", 0)
2990 document.warning("Malformed LyX document: Missing \\font_roman.")
2992 # We need to use this regex since split() does not handle quote protection
2993 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2994 roman = romanfont[1].strip('"')
2995 if roman == "IBMPlexSerifSemibold":
2996 romanfont[1] = '"IBMPlexSerif"'
2997 document.header[i] = " ".join(romanfont)
2999 if NonTeXFonts == False:
3000 regexp = re.compile(r'(\\font_roman_opts)')
3001 x = find_re(document.header, regexp, 0)
3003 # Sensible place to insert tag
3004 fo = find_token(document.header, "\\font_sf_scale")
3006 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3008 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3010 # We need to use this regex since split() does not handle quote protection
3011 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3012 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3014 i = find_token(document.header, "\\font_sans", 0)
3016 document.warning("Malformed LyX document: Missing \\font_sans.")
3018 # We need to use this regex since split() does not handle quote protection
3019 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3020 sf = sffont[1].strip('"')
3021 if sf == "IBMPlexSansSemibold":
3022 sffont[1] = '"IBMPlexSans"'
3023 document.header[i] = " ".join(sffont)
3025 if NonTeXFonts == False:
3026 regexp = re.compile(r'(\\font_sans_opts)')
3027 x = find_re(document.header, regexp, 0)
3029 # Sensible place to insert tag
3030 fo = find_token(document.header, "\\font_sf_scale")
3032 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3034 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3036 # We need to use this regex since split() does not handle quote protection
3037 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3038 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3040 i = find_token(document.header, "\\font_typewriter", 0)
3042 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3044 # We need to use this regex since split() does not handle quote protection
3045 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3046 tt = ttfont[1].strip('"')
3047 if tt == "IBMPlexMonoSemibold":
3048 ttfont[1] = '"IBMPlexMono"'
3049 document.header[i] = " ".join(ttfont)
3051 if NonTeXFonts == False:
3052 regexp = re.compile(r'(\\font_typewriter_opts)')
3053 x = find_re(document.header, regexp, 0)
3055 # Sensible place to insert tag
3056 fo = find_token(document.header, "\\font_tt_scale")
3058 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3060 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3062 # We need to use this regex since split() does not handle quote protection
3063 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3064 document.header[x] = "\\font_typewriter_opts \"semibold, " + ttopts[1].strip('"') + "\""
3067 def convert_NotoRegulars(document):
3068 " Merge diverse noto reagular fonts "
3070 i = find_token(document.header, "\\font_roman", 0)
3072 document.warning("Malformed LyX document: Missing \\font_roman.")
3074 # We need to use this regex since split() does not handle quote protection
3075 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3076 roman = romanfont[1].strip('"')
3077 if roman == "NotoSerif-TLF":
3078 romanfont[1] = '"NotoSerifRegular"'
3079 document.header[i] = " ".join(romanfont)
3081 i = find_token(document.header, "\\font_sans", 0)
3083 document.warning("Malformed LyX document: Missing \\font_sans.")
3085 # We need to use this regex since split() does not handle quote protection
3086 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3087 sf = sffont[1].strip('"')
3088 if sf == "NotoSans-TLF":
3089 sffont[1] = '"NotoSansRegular"'
3090 document.header[i] = " ".join(sffont)
3092 i = find_token(document.header, "\\font_typewriter", 0)
3094 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3096 # We need to use this regex since split() does not handle quote protection
3097 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3098 tt = ttfont[1].strip('"')
3099 if tt == "NotoMono-TLF":
3100 ttfont[1] = '"NotoMonoRegular"'
3101 document.header[i] = " ".join(ttfont)
3104 def convert_CrimsonProFont(document):
3105 " Handle CrimsonPro font definition to LaTeX "
3107 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3108 fm = createFontMapping(['CrimsonPro'])
3109 convert_fonts(document, fm, "lf")
3111 def revert_CrimsonProFont(document):
3112 " Revert native CrimsonPro font definition to LaTeX "
3114 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3116 fm = createFontMapping(['CrimsonPro'])
3117 if revert_fonts(document, fm, fontmap, False, True):
3118 add_preamble_fonts(document, fontmap)
3121 def revert_pagesizes(document):
3122 " Revert new page sizes in memoir and KOMA to options "
3124 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3127 i = find_token(document.header, "\\use_geometry true", 0)
3131 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3133 i = find_token(document.header, "\\papersize", 0)
3135 document.warning("Malformed LyX document! Missing \\papersize header.")
3137 val = get_value(document.header, "\\papersize", i)
3142 document.header[i] = "\\papersize default"
3144 i = find_token(document.header, "\\options", 0)
3146 i = find_token(document.header, "\\textclass", 0)
3148 document.warning("Malformed LyX document! Missing \\textclass header.")
3150 document.header.insert(i, "\\options " + val)
3152 document.header[i] = document.header[i] + "," + val
3155 def convert_pagesizes(document):
3156 " Convert to new page sizes in memoir and KOMA to options "
3158 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3161 i = find_token(document.header, "\\use_geometry true", 0)
3165 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3167 i = find_token(document.header, "\\papersize", 0)
3169 document.warning("Malformed LyX document! Missing \\papersize header.")
3171 val = get_value(document.header, "\\papersize", i)
3176 i = find_token(document.header, "\\use_geometry false", 0)
3178 # Maintain use of geometry
3179 document.header[1] = "\\use_geometry true"
3181 def revert_komafontsizes(document):
3182 " Revert new font sizes in KOMA to options "
3184 if document.textclass[:3] != "scr":
3187 i = find_token(document.header, "\\paperfontsize", 0)
3189 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3192 defsizes = ["default", "10", "11", "12"]
3194 val = get_value(document.header, "\\paperfontsize", i)
3199 document.header[i] = "\\paperfontsize default"
3201 fsize = "fontsize=" + val
3203 i = find_token(document.header, "\\options", 0)
3205 i = find_token(document.header, "\\textclass", 0)
3207 document.warning("Malformed LyX document! Missing \\textclass header.")
3209 document.header.insert(i, "\\options " + fsize)
3211 document.header[i] = document.header[i] + "," + fsize
3214 def revert_dupqualicites(document):
3215 " Revert qualified citation list commands with duplicate keys to ERT "
3217 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3218 # we need to revert those with multiple uses of the same key.
3222 i = find_token(document.header, "\\cite_engine", 0)
3224 document.warning("Malformed document! Missing \\cite_engine")
3226 engine = get_value(document.header, "\\cite_engine", i)
3228 if not engine in ["biblatex", "biblatex-natbib"]:
3231 # Citation insets that support qualified lists, with their LaTeX code
3235 "citet" : "textcites",
3236 "Citet" : "Textcites",
3237 "citep" : "parencites",
3238 "Citep" : "Parencites",
3239 "Footcite" : "Smartcites",
3240 "footcite" : "smartcites",
3241 "Autocite" : "Autocites",
3242 "autocite" : "autocites",
3247 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3250 j = find_end_of_inset(document.body, i)
3252 document.warning("Can't find end of citation inset at line %d!!" %(i))
3256 k = find_token(document.body, "LatexCommand", i, j)
3258 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3262 cmd = get_value(document.body, "LatexCommand", k)
3263 if not cmd in list(ql_citations.keys()):
3267 pres = find_token(document.body, "pretextlist", i, j)
3268 posts = find_token(document.body, "posttextlist", i, j)
3269 if pres == -1 and posts == -1:
3274 key = get_quoted_value(document.body, "key", i, j)
3276 document.warning("Citation inset at line %d does not have a key!" %(i))
3280 keys = key.split(",")
3281 ukeys = list(set(keys))
3282 if len(keys) == len(ukeys):
3287 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3288 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3290 pre = get_quoted_value(document.body, "before", i, j)
3291 post = get_quoted_value(document.body, "after", i, j)
3292 prelist = pretexts.split("\t")
3295 ppp = pp.split(" ", 1)
3301 if ppp[0] in premap:
3302 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3304 premap[ppp[0]] = val
3305 postlist = posttexts.split("\t")
3308 ppp = pp.split(" ", 1)
3314 if ppp[0] in postmap:
3315 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3317 postmap[ppp[0]] = val
3318 # Replace known new commands with ERT
3319 if "(" in pre or ")" in pre:
3320 pre = "{" + pre + "}"
3321 if "(" in post or ")" in post:
3322 post = "{" + post + "}"
3323 res = "\\" + ql_citations[cmd]
3325 res += "(" + pre + ")"
3327 res += "(" + post + ")"
3331 if premap.get(kk, "") != "":
3332 akeys = premap[kk].split("\t", 1)
3335 res += "[" + akey + "]"
3337 premap[kk] = "\t".join(akeys[1:])
3340 if postmap.get(kk, "") != "":
3341 akeys = postmap[kk].split("\t", 1)
3344 res += "[" + akey + "]"
3346 postmap[kk] = "\t".join(akeys[1:])
3349 elif premap.get(kk, "") != "":
3351 res += "{" + kk + "}"
3352 document.body[i:j+1] = put_cmd_in_ert([res])
3355 def convert_pagesizenames(document):
3356 " Convert LyX page sizes names "
3358 i = find_token(document.header, "\\papersize", 0)
3360 document.warning("Malformed LyX document! Missing \\papersize header.")
3362 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3363 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3364 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3365 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3366 val = get_value(document.header, "\\papersize", i)
3368 newval = val.replace("paper", "")
3369 document.header[i] = "\\papersize " + newval
3371 def revert_pagesizenames(document):
3372 " Convert LyX page sizes names "
3374 i = find_token(document.header, "\\papersize", 0)
3376 document.warning("Malformed LyX document! Missing \\papersize header.")
3378 newnames = ["letter", "legal", "executive", \
3379 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3380 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3381 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3382 val = get_value(document.header, "\\papersize", i)
3384 newval = val + "paper"
3385 document.header[i] = "\\papersize " + newval
3388 def revert_theendnotes(document):
3389 " Reverts native support of \\theendnotes to TeX-code "
3391 if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
3396 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3399 j = find_end_of_inset(document.body, i)
3401 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3404 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
3407 def revert_enotez(document):
3408 " Reverts native support of enotez package to TeX-code "
3410 if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
3414 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
3417 revert_flex_inset(document.body, "Endnote", "\\endnote")
3421 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3424 j = find_end_of_inset(document.body, i)
3426 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3430 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
3433 add_to_preamble(document, ["\\usepackage{enotez}"])
3434 document.del_module("enotez")
3435 document.del_module("foottoenotez")
3438 def revert_memoir_endnotes(document):
3439 " Reverts native support of memoir endnotes to TeX-code "
3441 if document.textclass != "memoir":
3444 encommand = "\\pagenote"
3445 modules = document.get_module_list()
3446 if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
3447 encommand = "\\endnote"
3449 revert_flex_inset(document.body, "Endnote", encommand)
3453 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
3456 j = find_end_of_inset(document.body, i)
3458 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3461 if document.body[i] == "\\begin_inset FloatList pagenote*":
3462 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
3464 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
3465 add_to_preamble(document, ["\\makepagenote"])
3468 def revert_totalheight(document):
3469 " Reverts graphics height parameter from totalheight to height "
3471 relative_heights = {
3472 "\\textwidth" : "text%",
3473 "\\columnwidth" : "col%",
3474 "\\paperwidth" : "page%",
3475 "\\linewidth" : "line%",
3476 "\\textheight" : "theight%",
3477 "\\paperheight" : "pheight%",
3478 "\\baselineskip " : "baselineskip%"
3482 i = find_token(document.body, "\\begin_inset Graphics", i)
3485 j = find_end_of_inset(document.body, i)
3487 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3491 rx = re.compile(r'\s*special\s*(\S+)$')
3492 rxx = re.compile(r'(\d*\.*\d+)(\S+)$')
3493 k = find_re(document.body, rx, i, j)
3497 m = rx.match(document.body[k])
3499 special = m.group(1)
3500 mspecial = special.split(',')
3501 for spc in mspecial:
3502 if spc.startswith("height="):
3503 oldheight = spc.split('=')[1]
3504 ms = rxx.search(oldheight)
3506 oldunit = ms.group(2)
3507 if oldunit in list(relative_heights.keys()):
3508 oldval = str(float(ms.group(1)) * 100)
3509 oldunit = relative_heights[oldunit]
3510 oldheight = oldval + oldunit
3511 mspecial.remove(spc)
3513 if len(mspecial) > 0:
3514 special = ",".join(mspecial)
3518 rx = re.compile(r'(\s*height\s*)(\S+)$')
3519 kk = find_re(document.body, rx, i, j)
3521 m = rx.match(document.body[kk])
3527 val = val + "," + special
3528 document.body[k] = "\tspecial " + "totalheight=" + val
3530 document.body.insert(kk, "\tspecial totalheight=" + val)
3532 document.body[kk] = m.group(1) + oldheight
3534 del document.body[kk]
3535 elif oldheight != "":
3537 document.body[k] = "\tspecial " + special
3538 document.body.insert(k, "\theight " + oldheight)
3540 document.body[k] = "\theight " + oldheight
3544 def convert_totalheight(document):
3545 " Converts graphics height parameter from totalheight to height "
3547 relative_heights = {
3548 "text%" : "\\textwidth",
3549 "col%" : "\\columnwidth",
3550 "page%" : "\\paperwidth",
3551 "line%" : "\\linewidth",
3552 "theight%" : "\\textheight",
3553 "pheight%" : "\\paperheight",
3554 "baselineskip%" : "\\baselineskip"
3558 i = find_token(document.body, "\\begin_inset Graphics", i)
3561 j = find_end_of_inset(document.body, i)
3563 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3567 rx = re.compile(r'\s*special\s*(\S+)$')
3568 k = find_re(document.body, rx, i, j)
3572 m = rx.match(document.body[k])
3574 special = m.group(1)
3575 mspecial = special.split(',')
3576 for spc in mspecial:
3577 if spc[:12] == "totalheight=":
3578 newheight = spc.split('=')[1]
3579 mspecial.remove(spc)
3581 if len(mspecial) > 0:
3582 special = ",".join(mspecial)
3586 rx = re.compile(r'(\s*height\s*)(\d+)(\S+)$')
3587 kk = find_re(document.body, rx, i, j)
3589 m = rx.match(document.body[kk])
3594 if unit in list(relative_heights.keys()):
3595 val = str(float(val) / 100)
3596 unit = relative_heights[unit]
3599 val = val + unit + "," + special
3600 document.body[k] = "\tspecial " + "height=" + val
3602 document.body.insert(kk + 1, "\tspecial height=" + val + unit)
3604 document.body[kk] = m.group(1) + newheight
3606 del document.body[kk]
3607 elif newheight != "":
3608 document.body.insert(k, "\theight " + newheight)
3612 def convert_changebars(document):
3613 " Converts the changebars module to native solution "
3615 if not "changebars" in document.get_module_list():
3618 i = find_token(document.header, "\\output_changes", 0)
3620 document.warning("Malformed LyX document! Missing \\output_changes header.")
3621 document.del_module("changebars")
3624 document.header.insert(i, "\\change_bars true")
3625 document.del_module("changebars")
3628 def revert_changebars(document):
3629 " Converts native changebar param to module "
3631 i = find_token(document.header, "\\change_bars", 0)
3633 document.warning("Malformed LyX document! Missing \\change_bars header.")
3636 val = get_value(document.header, "\\change_bars", i)
3639 document.add_module("changebars")
3641 del document.header[i]
3644 def convert_postpone_fragile(document):
3645 " Adds false \\postpone_fragile_content buffer param "
3647 i = find_token(document.header, "\\output_changes", 0)
3649 document.warning("Malformed LyX document! Missing \\output_changes header.")
3651 # Set this to false for old documents (see #2154)
3652 document.header.insert(i, "\\postpone_fragile_content false")
3655 def revert_postpone_fragile(document):
3656 " Remove \\postpone_fragile_content buffer param "
3658 i = find_token(document.header, "\\postpone_fragile_content", 0)
3660 document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
3663 del document.header[i]
3666 def revert_colrow_tracking(document):
3667 " Remove change tag from tabular columns/rows "
3670 i = find_token(document.body, "\\begin_inset Tabular", i+1)
3673 j = find_end_of_inset(document.body, i+1)
3675 document.warning("Malformed LyX document: Could not find end of tabular.")
3677 for k in range(i, j):
3678 m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
3680 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3681 m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
3683 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3686 def convert_counter_maintenance(document):
3687 " Convert \\maintain_unincluded_children buffer param from boolean value tro tristate "
3689 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3691 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3694 val = get_value(document.header, "\\maintain_unincluded_children", i)
3697 document.header[i] = "\\maintain_unincluded_children strict"
3699 document.header[i] = "\\maintain_unincluded_children no"
3702 def revert_counter_maintenance(document):
3703 " Revert \\maintain_unincluded_children buffer param to previous boolean value "
3705 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3707 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3710 val = get_value(document.header, "\\maintain_unincluded_children", i)
3713 document.header[i] = "\\maintain_unincluded_children false"
3715 document.header[i] = "\\maintain_unincluded_children true"
3718 def revert_counter_inset(document):
3719 " Revert counter inset to ERT, where possible"
3721 needed_counters = {}
3723 i = find_token(document.body, "\\begin_inset CommandInset counter", i)
3726 j = find_end_of_inset(document.body, i)
3728 document.warning("Can't find end of counter inset at line %d!" % i)
3731 lyx = get_quoted_value(document.body, "lyxonly", i, j)
3733 # there is nothing we can do to affect the LyX counters
3734 document.body[i : j + 1] = []
3737 cnt = get_quoted_value(document.body, "counter", i, j)
3739 document.warning("No counter given for inset at line %d!" % i)
3743 cmd = get_quoted_value(document.body, "LatexCommand", i, j)
3744 document.warning(cmd)
3747 val = get_quoted_value(document.body, "value", i, j)
3749 document.warning("Can't convert counter inset at line %d!" % i)
3751 ert = put_cmd_in_ert("\\setcounter{%s}{%s}" % (cnt, val))
3752 elif cmd == "addto":
3753 val = get_quoted_value(document.body, "value", i, j)
3755 document.warning("Can't convert counter inset at line %d!" % i)
3757 ert = put_cmd_in_ert("\\addtocounter{%s}{%s}" % (cnt, val))
3758 elif cmd == "reset":
3759 ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
3761 needed_counters[cnt] = 1
3762 savecnt = "LyXSave" + cnt
3763 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (savecnt, cnt))
3764 elif cmd == "restore":
3765 needed_counters[cnt] = 1
3766 savecnt = "LyXSave" + cnt
3767 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (cnt, savecnt))
3769 document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
3772 document.body[i : j + 1] = ert
3777 for cnt in needed_counters:
3778 pretext.append("\\newcounter{LyXSave%s}" % (cnt))
3780 add_to_preamble(document, pretext)
3783 def revert_ams_spaces(document):
3784 "Revert InsetSpace medspace and thickspace into their TeX-code counterparts"
3786 insets = ["\\medspace{}", "\\thickspace{}"]
3787 for inset in insets:
3789 i = find_token(document.body, "\\begin_inset space " + inset, i)
3792 end = find_end_of_inset(document.body, i)
3793 subst = put_cmd_in_ert(inset)
3794 document.body[i : end + 1] = subst
3798 # load amsmath in the preamble if not already loaded
3799 i = find_token(document.header, "\\use_package amsmath 2", 0)
3801 add_to_preamble(document, ["\\@ifundefined{thickspace}{\\usepackage{amsmath}}{}"])
3805 def convert_parskip(document):
3806 " Move old parskip settings to preamble "
3808 i = find_token(document.header, "\\paragraph_separation skip", 0)
3812 j = find_token(document.header, "\\defskip", 0)
3814 document.warning("Malformed LyX document! Missing \\defskip.")
3817 val = get_value(document.header, "\\defskip", j)
3819 skipval = "\\medskipamount"
3820 if val == "smallskip" or val == "medskip" or val == "bigskip":
3821 skipval = "\\" + val + "amount"
3825 add_to_preamble(document, ["\\setlength{\\parskip}{" + skipval + "}", "\\setlength{\\parindent}{0pt}"])
3827 document.header[i] = "\\paragraph_separation indent"
3828 document.header[j] = "\\paragraph_indentation default"
3831 def revert_parskip(document):
3832 " Revert new parskip settings to preamble "
3834 i = find_token(document.header, "\\paragraph_separation skip", 0)
3838 j = find_token(document.header, "\\defskip", 0)
3840 document.warning("Malformed LyX document! Missing \\defskip.")
3843 val = get_value(document.header, "\\defskip", j)
3846 if val == "smallskip" or val == "medskip" or val == "bigskip":
3847 skipval = "[skip=\\" + val + "amount]"
3848 elif val == "fullline":
3849 skipval = "[skip=\\baselineskip]"
3850 elif val != "halfline":
3851 skipval = "[skip={" + val + "}]"
3853 add_to_preamble(document, ["\\usepackage" + skipval + "{parskip}"])
3855 document.header[i] = "\\paragraph_separation indent"
3856 document.header[j] = "\\paragraph_indentation default"
3859 def revert_line_vspaces(document):
3860 " Revert fulline and halfline vspaces to TeX "
3862 "fullline*" : "\\vspace*{\\baselineskip}",
3863 "fullline" : "\\vspace{\\baselineskip}",
3864 "halfline*" : "\\vspace*{0.5\\baselineskip}",
3865 "halfline" : "\\vspace{0.5\\baselineskip}",
3867 for inset in insets.keys():
3869 i = find_token(document.body, "\\begin_inset VSpace " + inset, i)
3872 end = find_end_of_inset(document.body, i)
3873 subst = put_cmd_in_ert(insets[inset])
3874 document.body[i : end + 1] = subst
3876 def convert_libertinus_rm_fonts(document):
3877 """Handle Libertinus serif fonts definition to LaTeX"""
3879 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3880 fm = createFontMapping(['Libertinus'])
3881 convert_fonts(document, fm)
3883 def revert_libertinus_rm_fonts(document):
3884 """Revert Libertinus serif font definition to LaTeX"""
3886 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3888 fm = createFontMapping(['libertinus'])
3889 if revert_fonts(document, fm, fontmap):
3890 add_preamble_fonts(document, fontmap)
3892 def revert_libertinus_sftt_fonts(document):
3893 " Revert Libertinus sans and tt font definitions to LaTeX "
3895 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3897 i = find_token(document.header, "\\font_sans \"LibertinusSans-LF\"", 0)
3899 j = find_token(document.header, "\\font_sans_osf true", 0)
3901 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-OsF}"])
3902 document.header[j] = "\\font_sans_osf false"
3904 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-LF}"])
3905 document.header[i] = document.header[i].replace("LibertinusSans-LF", "default")
3907 sfval = find_token(document.header, "\\font_sf_scale", 0)
3909 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
3911 sfscale = document.header[sfval].split()
3914 document.header[sfval] = " ".join(sfscale)
3917 sf_scale = float(val)
3919 document.warning("Invalid font_sf_scale value: " + val)
3920 if sf_scale != "100.0":
3921 add_to_preamble(document, ["\\renewcommand*{\\LibertinusSans@scale}{" + str(sf_scale / 100.0) + "}"])
3923 i = find_token(document.header, "\\font_typewriter \"LibertinusMono-TLF\"", 0)
3925 add_to_preamble(document, ["\\renewcommand{\\ttdefault}{LibertinusMono-TLF}"])
3926 document.header[i] = document.header[i].replace("LibertinusMono-TLF", "default")
3928 ttval = find_token(document.header, "\\font_tt_scale", 0)
3930 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
3932 ttscale = document.header[ttval].split()
3935 document.header[ttval] = " ".join(ttscale)
3938 tt_scale = float(val)
3940 document.warning("Invalid font_tt_scale value: " + val)
3941 if tt_scale != "100.0":
3942 add_to_preamble(document, ["\\renewcommand*{\\LibertinusMono@scale}{" + str(tt_scale / 100.0) + "}"])
3945 def revert_docbook_table_output(document):
3946 i = find_token(document.header, '\\docbook_table_output')
3948 del document.header[i]
3951 def revert_nopagebreak(document):
3953 i = find_token(document.body, "\\begin_inset Newpage nopagebreak")
3956 end = find_end_of_inset(document.body, i)
3958 document.warning("Malformed LyX document: Could not find end of Newpage inset.")
3960 subst = put_cmd_in_ert("\\nopagebreak{}")
3961 document.body[i : end + 1] = subst
3964 def revert_hrquotes(document):
3965 " Revert Hungarian Quotation marks "
3967 i = find_token(document.header, "\\quotes_style hungarian", 0)
3969 document.header[i] = "\\quotes_style polish"
3973 i = find_token(document.body, "\\begin_inset Quotes h")
3976 if document.body[i] == "\\begin_inset Quotes hld":
3977 document.body[i] = "\\begin_inset Quotes pld"
3978 elif document.body[i] == "\\begin_inset Quotes hrd":
3979 document.body[i] = "\\begin_inset Quotes prd"
3980 elif document.body[i] == "\\begin_inset Quotes hls":
3981 document.body[i] = "\\begin_inset Quotes ald"
3982 elif document.body[i] == "\\begin_inset Quotes hrs":
3983 document.body[i] = "\\begin_inset Quotes ard"
3986 def convert_math_refs(document):
3989 i = find_token(document.body, "\\begin_inset Formula", i)
3992 j = find_end_of_inset(document.body, i)
3994 document.warning("Can't find end of inset at line %d of body!" % i)
3998 document.body[i] = document.body[i].replace("\\prettyref", "\\formatted")
4002 def revert_math_refs(document):
4005 i = find_token(document.body, "\\begin_inset Formula", i)
4008 j = find_end_of_inset(document.body, i)
4010 document.warning("Can't find end of inset at line %d of body!" % i)
4014 document.body[i] = document.body[i].replace("\\formatted", "\\prettyref")
4015 if "\\labelonly" in document.body[i]:
4016 document.body[i] = re.sub("\\\\labelonly{([^}]+?)}", "\\1", document.body[i])
4020 def convert_branch_colors(document):
4021 " Convert branch colors to semantic values "
4025 i = find_token(document.header, "\\branch", i)
4028 j = find_token(document.header, "\\end_branch", i)
4030 document.warning("Malformed LyX document. Can't find end of branch definition!")
4032 # We only support the standard LyX background for now
4033 k = find_token(document.header, "\\color #faf0e6", i, j)
4035 document.header[k] = "\\color background"
4039 def revert_branch_colors(document):
4040 " Revert semantic branch colors "
4044 i = find_token(document.header, "\\branch", i)
4047 j = find_token(document.header, "\\end_branch", i)
4049 document.warning("Malformed LyX document. Can't find end of branch definition!")
4051 k = find_token(document.header, "\\color", i, j)
4053 bcolor = get_value(document.header, "\\color", k)
4054 if bcolor[1] != "#":
4055 # this will be read as background by LyX 2.3
4056 document.header[k] = "\\color none"
4060 def revert_darkmode_graphics(document):
4061 " Revert darkModeSensitive InsetGraphics param "
4065 i = find_token(document.body, "\\begin_inset Graphics", i)
4068 j = find_end_of_inset(document.body, i)
4070 document.warning("Can't find end of graphics inset at line %d!!" %(i))
4073 k = find_token(document.body, "\tdarkModeSensitive", i, j)
4075 del document.body[k]
4079 def revert_branch_darkcols(document):
4080 " Revert dark branch colors "
4084 i = find_token(document.header, "\\branch", i)
4087 j = find_token(document.header, "\\end_branch", i)
4089 document.warning("Malformed LyX document. Can't find end of branch definition!")
4091 k = find_token(document.header, "\\color", i, j)
4093 m = re.search('\\\\color (\\S+) (\\S+)', document.header[k])
4095 document.header[k] = "\\color " + m.group(1)
4099 def revert_vcolumns2(document):
4100 """Revert varwidth columns with line breaks etc."""
4102 needvarwidth = False
4104 needcellvarwidth = False
4107 i = find_token(document.body, "\\begin_inset Tabular", i+1)
4110 j = find_end_of_inset(document.body, i)
4112 document.warning("Malformed LyX document: Could not find end of tabular.")
4115 # Collect necessary column information
4117 nrows = int(document.body[i+1].split('"')[3])
4118 ncols = int(document.body[i+1].split('"')[5])
4120 for k in range(ncols):
4121 m = find_token(document.body, "<column", m)
4122 width = get_option_value(document.body[m], 'width')
4123 varwidth = get_option_value(document.body[m], 'varwidth')
4124 alignment = get_option_value(document.body[m], 'alignment')
4125 valignment = get_option_value(document.body[m], 'valignment')
4126 special = get_option_value(document.body[m], 'special')
4127 col_info.append([width, varwidth, alignment, valignment, special, m])
4132 for row in range(nrows):
4133 for col in range(ncols):
4134 m = find_token(document.body, "<cell", m)
4135 multicolumn = get_option_value(document.body[m], 'multicolumn') != ""
4136 multirow = get_option_value(document.body[m], 'multirow') != ""
4137 fixedwidth = get_option_value(document.body[m], 'width') != ""
4138 rotate = get_option_value(document.body[m], 'rotate')
4139 cellalign = get_option_value(document.body[m], 'alignment')
4140 cellvalign = get_option_value(document.body[m], 'valignment')
4141 # Check for: linebreaks, multipars, non-standard environments
4143 endcell = find_token(document.body, "</cell>", begcell)
4145 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
4146 vcand = not fixedwidth
4147 elif count_pars_in_inset(document.body, begcell + 2) > 1:
4148 vcand = not fixedwidth
4149 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
4150 vcand = not fixedwidth
4151 colalignment = col_info[col][2]
4152 colvalignment = col_info[col][3]
4154 if rotate == "" and ((colalignment == "left" and colvalignment == "top") or (multicolumn == True and cellalign == "left" and cellvalign == "top")):
4155 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][4] == "":
4157 col_line = col_info[col][5]
4159 vval = "V{\\linewidth}"
4161 document.body[m] = document.body[m][:-1] + " special=\"" + vval + "\">"
4163 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
4166 if multicolumn or multirow:
4167 if cellvalign == "middle":
4169 elif cellvalign == "bottom":
4172 if colvalignment == "middle":
4174 elif colvalignment == "bottom":
4176 flt = find_token(document.body, "\\begin_layout", begcell, endcell)
4177 elt = find_token_backwards(document.body, "\\end_layout", endcell)
4178 if flt != -1 and elt != -1:
4180 # we need to reset character layouts if necessary
4181 el = find_token(document.body, '\\emph on', flt, elt)
4183 extralines.append("\\emph default")
4184 el = find_token(document.body, '\\noun on', flt, elt)
4186 extralines.append("\\noun default")
4187 el = find_token(document.body, '\\series', flt, elt)
4189 extralines.append("\\series default")
4190 el = find_token(document.body, '\\family', flt, elt)
4192 extralines.append("\\family default")
4193 el = find_token(document.body, '\\shape', flt, elt)
4195 extralines.append("\\shape default")
4196 el = find_token(document.body, '\\color', flt, elt)
4198 extralines.append("\\color inherit")
4199 el = find_token(document.body, '\\size', flt, elt)
4201 extralines.append("\\size default")
4202 el = find_token(document.body, '\\bar under', flt, elt)
4204 extralines.append("\\bar default")
4205 el = find_token(document.body, '\\uuline on', flt, elt)
4207 extralines.append("\\uuline default")
4208 el = find_token(document.body, '\\uwave on', flt, elt)
4210 extralines.append("\\uwave default")
4211 el = find_token(document.body, '\\strikeout on', flt, elt)
4213 extralines.append("\\strikeout default")
4214 document.body[elt:elt+1] = extralines + put_cmd_in_ert("\\end{cellvarwidth}") + [r"\end_layout"]
4216 for q in range(flt, elt):
4217 if document.body[q] != "" and document.body[q][0] != "\\":
4219 if document.body[q][:5] == "\\lang":
4223 document.body[parlang+1:parlang+1] = put_cmd_in_ert("\\begin{cellvarwidth}" + alarg)
4225 document.body[flt+1:flt+1] = put_cmd_in_ert("\\begin{cellvarwidth}" + alarg)
4226 needcellvarwidth = True
4228 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
4229 # with newlines, and we do not want that)
4231 endcell = find_token(document.body, "</cell>", begcell)
4233 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
4235 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
4239 nle = find_end_of_inset(document.body, nl)
4240 del(document.body[nle:nle+1])
4242 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
4244 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
4245 # Replace parbreaks in multirow with \\endgraf
4246 if multirow == True:
4247 flt = find_token(document.body, "\\begin_layout", begcell, endcell)
4250 elt = find_end_of_layout(document.body, flt)
4252 document.warning("Malformed LyX document! Missing layout end.")
4254 endcell = find_token(document.body, "</cell>", begcell)
4255 flt = find_token(document.body, "\\begin_layout", elt, endcell)
4258 document.body[elt : flt + 1] = put_cmd_in_ert("\\endgraf{}")
4264 if needarray == True:
4265 add_to_preamble(document, ["\\usepackage{array}"])
4266 if needcellvarwidth == True:
4267 add_to_preamble(document, ["%% Variable width box for table cells",
4268 "\\newenvironment{cellvarwidth}[1][t]",
4269 " {\\begin{varwidth}[#1]{\\linewidth}}",
4270 " {\\@finalstrut\\@arstrutbox\\end{varwidth}}"])
4271 if needvarwidth == True:
4272 add_to_preamble(document, ["\\usepackage{varwidth}"])
4275 def convert_vcolumns2(document):
4276 """Convert varwidth ERT to native"""
4280 i = find_token(document.body, "\\begin_inset Tabular", i+1)
4283 j = find_end_of_inset(document.body, i)
4285 document.warning("Malformed LyX document: Could not find end of tabular.")
4289 nrows = int(document.body[i+1].split('"')[3])
4290 ncols = int(document.body[i+1].split('"')[5])
4293 for row in range(nrows):
4294 for col in range(ncols):
4295 m = find_token(document.body, "<cell", m)
4296 multirow = get_option_value(document.body[m], 'multirow') != ""
4298 endcell = find_token(document.body, "</cell>", begcell)
4300 cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
4302 vcand = document.body[cvw - 1] == "\\backslash" and get_containing_inset(document.body, cvw)[0] == "ERT"
4304 # Remove ERTs with cellvarwidth env
4305 ecvw = find_token(document.body, "end{cellvarwidth}", begcell, endcell)
4307 if document.body[ecvw - 1] == "\\backslash":
4308 eertins = get_containing_inset(document.body, ecvw)
4309 if eertins and eertins[0] == "ERT":
4310 del document.body[eertins[1] : eertins[2] + 1]
4312 cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
4313 ertins = get_containing_inset(document.body, cvw)
4314 if ertins and ertins[0] == "ERT":
4315 del(document.body[ertins[1] : ertins[2] + 1])
4317 # Convert ERT newlines (as cellvarwidth detection relies on that)
4319 endcell = find_token(document.body, "</cell>", begcell)
4320 nl = find_token(document.body, "\\backslash", begcell, endcell)
4321 if nl == -1 or document.body[nl + 2] != "\\backslash":
4323 ertins = get_containing_inset(document.body, nl)
4324 if ertins and ertins[0] == "ERT":
4325 document.body[ertins[1] : ertins[2] + 1] = ["\\begin_inset Newline newline", "", "\\end_inset"]
4327 # Same for linebreaks
4329 endcell = find_token(document.body, "</cell>", begcell)
4330 nl = find_token(document.body, "linebreak", begcell, endcell)
4331 if nl == -1 or document.body[nl - 1] != "\\backslash":
4333 ertins = get_containing_inset(document.body, nl)
4334 if ertins and ertins[0] == "ERT":
4335 document.body[ertins[1] : ertins[2] + 1] = ["\\begin_inset Newline linebreak", "", "\\end_inset"]
4338 if multirow == True:
4339 endcell = find_token(document.body, "</cell>", begcell)
4340 nl = find_token(document.body, "endgraf{}", begcell, endcell)
4341 if nl == -1 or document.body[nl - 1] != "\\backslash":
4343 ertins = get_containing_inset(document.body, nl)
4344 if ertins and ertins[0] == "ERT":
4345 document.body[ertins[1] : ertins[2] + 1] = ["\\end_layout", "", "\\begin_layout Plain Layout"]
4351 del_complete_lines(document.preamble,
4352 ['% Added by lyx2lyx',
4353 '%% Variable width box for table cells',
4354 r'\newenvironment{cellvarwidth}[1][t]',
4355 r' {\begin{varwidth}[#1]{\linewidth}}',
4356 r' {\@finalstrut\@arstrutbox\end{varwidth}}'])
4357 del_complete_lines(document.preamble,
4358 ['% Added by lyx2lyx',
4359 r'\usepackage{varwidth}'])
4362 frontispiece_def = [
4363 r'### Inserted by lyx2lyx (frontispiece layout) ###',
4364 r'Style Frontispiece',
4365 r' CopyStyle Titlehead',
4366 r' LatexName frontispiece',
4371 def convert_koma_frontispiece(document):
4372 """Remove local KOMA frontispiece definition"""
4373 if document.textclass[:3] != "scr":
4376 if document.del_local_layout(frontispiece_def):
4377 document.add_module("ruby")
4380 def revert_koma_frontispiece(document):
4381 """Add local KOMA frontispiece definition"""
4382 if document.textclass[:3] != "scr":
4385 if find_token(document.body, "\\begin_layout Frontispiece", 0) != -1:
4386 document.append_local_layout(frontispiece_def)
4389 def revert_spellchecker_ignore(document):
4390 """Revert document spellchecker dictionary"""
4392 i = find_token(document.header, "\\spellchecker_ignore")
4395 del document.header[i]
4398 def revert_docbook_mathml_prefix(document):
4399 """Revert the DocBook parameter to choose the prefix for the MathML name space"""
4401 i = find_token(document.header, "\\docbook_mathml_prefix")
4404 del document.header[i]
4407 def revert_document_metadata(document):
4408 """Revert document metadata"""
4411 i = find_token(document.header, "\\begin_metadata", i)
4414 j = find_end_of(document.header, i, "\\begin_metadata", "\\end_metadata")
4416 # this should not happen
4418 document.header[i : j + 1] = []
4421 def revert_index_macros(document):
4422 " Revert inset index macros "
4426 # trailing blank needed here to exclude IndexMacro insets
4427 i = find_token(document.body, '\\begin_inset Index ', i+1)
4430 j = find_end_of_inset(document.body, i)
4432 document.warning("Malformed LyX document: Can't find end of index inset at line %d" % i)
4434 pl = find_token(document.body, '\\begin_layout Plain Layout', i, j)
4436 document.warning("Malformed LyX document: Can't find plain layout in index inset at line %d" % i)
4438 # find, store and remove inset params
4439 pr = find_token(document.body, 'range', i, pl)
4440 prval = get_quoted_value(document.body, "range", pr)
4442 if prval == "start":
4444 elif prval == "end":
4446 pf = find_token(document.body, 'pageformat', i, pl)
4447 pageformat = get_quoted_value(document.body, "pageformat", pf)
4448 del document.body[pr:pf+1]
4449 # Now re-find (potentially moved) inset end again, and search for subinsets
4450 j = find_end_of_inset(document.body, i)
4452 document.warning("Malformed LyX document: Can't find end of index inset at line %d" % i)
4454 # We search for all possible subentries in turn, store their
4455 # content and delete them
4461 # Two subentries are allowed, thus the duplication
4462 imacros = ["seealso", "see", "subentry", "subentry", "sortkey"]
4463 for imacro in imacros:
4464 iim = find_token(document.body, "\\begin_inset IndexMacro %s" % imacro, i, j)
4467 iime = find_end_of_inset(document.body, iim)
4469 document.warning("Malformed LyX document: Can't find end of index macro inset at line %d" % i)
4471 iimpl = find_token(document.body, '\\begin_layout Plain Layout', iim, iime)
4473 document.warning("Malformed LyX document: Can't find plain layout in index macro inset at line %d" % i)
4475 iimple = find_end_of_layout(document.body, iimpl)
4477 document.warning("Malformed LyX document: Can't find end of index macro inset plain layout at line %d" % i)
4479 icont = document.body[iimpl:iimple]
4480 if imacro == "seealso":
4482 elif imacro == "see":
4484 elif imacro == "subentry":
4485 # subentries might hace their own sortkey!
4486 xiim = find_token(document.body, "\\begin_inset IndexMacro sortkey", iimpl, iimple)
4488 xiime = find_end_of_inset(document.body, xiim)
4490 document.warning("Malformed LyX document: Can't find end of index macro inset at line %d" % i)
4492 xiimpl = find_token(document.body, '\\begin_layout Plain Layout', xiim, xiime)
4494 document.warning("Malformed LyX document: Can't find plain layout in index macro inset at line %d" % i)
4496 xiimple = find_end_of_layout(document.body, xiimpl)
4498 document.warning("Malformed LyX document: Can't find end of index macro inset plain layout at line %d" % i)
4501 xicont = document.body[xiimpl+1:xiimple]
4502 # everything before ................... or after
4503 xxicont = document.body[iimpl+1:xiim] + document.body[xiime+1:iimple]
4504 # construct the latex sequence
4505 icont = xicont + put_cmd_in_ert("@") + xxicont[1:]
4506 if len(subentry) > 0:
4507 subentry2 = icont[1:]
4509 subentry = icont[1:]
4510 elif imacro == "sortkey":
4512 # Everything stored. Delete subinset.
4513 del document.body[iim:iime+1]
4514 # Again re-find (potentially moved) index inset end
4515 j = find_end_of_inset(document.body, i)
4517 document.warning("Malformed LyX document: Can't find end of index inset at line %d" % i)
4519 # Now insert all stuff, starting from the inset end
4520 pl = find_token(document.body, '\\begin_layout Plain Layout', i, j)
4522 document.warning("Malformed LyX document: Can't find plain layout in index inset at line %d" % i)
4524 ple = find_end_of_layout(document.body, pl)
4526 document.warning("Malformed LyX document: Can't find end of index macro inset plain layout at line %d" % i)
4529 document.body[ple:ple] = put_cmd_in_ert("|" + pagerange + "see{") + see + put_cmd_in_ert("}")
4530 elif len(seealso) > 0:
4531 document.body[ple:ple] = put_cmd_in_ert("|" + pagerange + "seealso{") + seealso + put_cmd_in_ert("}")
4532 elif pageformat != "default":
4533 document.body[ple:ple] = put_cmd_in_ert("|" + pagerange + pageformat)
4534 if len(subentry2) > 0:
4535 document.body[ple:ple] = put_cmd_in_ert("!") + subentry2
4536 if len(subentry) > 0:
4537 document.body[ple:ple] = put_cmd_in_ert("!") + subentry
4538 if len(sortkey) > 0:
4539 document.body[pl:pl+1] = document.body[pl:pl] + sortkey + put_cmd_in_ert("@")
4542 def revert_starred_refs(document):
4543 " Revert starred refs "
4544 i = find_token(document.header, "\\use_hyperref true", 0)
4545 use_hyperref = (i != -1)
4553 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
4557 end = find_end_of_inset(document.body, i)
4559 document.warning("Malformed LyX document: Can't find end of inset at line %d" % i)
4562 # If we are not using hyperref, then we just need to delete the line
4563 if not use_hyperref:
4564 k = find_token(document.body, "nolink", i, end)
4568 del document.body[k]
4571 # If we are using hyperref, then we'll need to do more.
4575 # so we are in an InsetRef
4578 # If nolink is False, just remove that line
4579 if nolink == False or cmd == "formatted" or cmd == "labelonly":
4580 # document.warning("Skipping " + cmd + " " + ref)
4581 if nolinkline != -1:
4582 del document.body[nolinkline]
4585 # We need to construct a new command and put it in ERT
4586 newcmd = "\\" + cmd + "*{" + ref + "}"
4587 # document.warning(newcmd)
4588 newlines = put_cmd_in_ert(newcmd)
4589 document.body[start:end+1] = newlines
4590 i += len(newlines) - (end - start) + 1
4596 l = document.body[i]
4597 if l.startswith("LatexCommand"):
4599 elif l.startswith("reference"):
4601 elif l.startswith("nolink"):
4603 nolink = (tmp == "true")
4608 def convert_starred_refs(document):
4609 " Convert starred refs "
4612 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
4615 end = find_end_of_inset(document.body, i)
4617 document.warning("Malformed LyX document: Can't find end of inset at line %d" % i)
4621 document.body.insert(newlineat, "nolink \"false\"")
4625 def revert_familydefault(document):
4626 " Revert \\font_default_family for non-TeX fonts "
4628 if find_token(document.header, "\\use_non_tex_fonts true", 0) == -1:
4631 i = find_token(document.header, "\\font_default_family", 0)
4633 document.warning("Malformed LyX document: Can't find \\font_default_family header")
4636 dfamily = get_value(document.header, "\\font_default_family", i)
4637 if dfamily == "default":
4640 document.header[i] = "\\font_default_family default"
4641 add_to_preamble(document, ["\\renewcommand{\\familydefault}{\\" + dfamily + "}"])
4647 supported_versions = ["2.4.0", "2.4"]
4649 [545, [convert_lst_literalparam]],
4654 [550, [convert_fontenc]],
4661 [557, [convert_vcsinfo]],
4662 [558, [removeFrontMatterStyles]],
4665 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
4669 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
4670 [566, [convert_hebrew_parentheses]],
4676 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
4677 [573, [convert_inputencoding_namechange]],
4678 [574, [convert_ruby_module, convert_utf8_japanese]],
4679 [575, [convert_lineno, convert_aaencoding]],
4681 [577, [convert_linggloss]],
4685 [581, [convert_osf]],
4686 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
4687 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
4689 [585, [convert_pagesizes]],
4691 [587, [convert_pagesizenames]],
4693 [589, [convert_totalheight]],
4694 [590, [convert_changebars]],
4695 [591, [convert_postpone_fragile]],
4697 [593, [convert_counter_maintenance]],
4700 [596, [convert_parskip]],
4701 [597, [convert_libertinus_rm_fonts]],
4705 [601, [convert_math_refs]],
4706 [602, [convert_branch_colors]],
4709 [605, [convert_vcolumns2]],
4710 [606, [convert_koma_frontispiece]],
4716 [612, [convert_starred_refs]],
4720 revert = [[612, [revert_familydefault]],
4721 [611, [revert_starred_refs]],
4723 [609, [revert_index_macros]],
4724 [608, [revert_document_metadata]],
4725 [607, [revert_docbook_mathml_prefix]],
4726 [606, [revert_spellchecker_ignore]],
4727 [605, [revert_koma_frontispiece]],
4728 [604, [revert_vcolumns2]],
4729 [603, [revert_branch_darkcols]],
4730 [602, [revert_darkmode_graphics]],
4731 [601, [revert_branch_colors]],
4733 [599, [revert_math_refs]],
4734 [598, [revert_hrquotes]],
4735 [598, [revert_nopagebreak]],
4736 [597, [revert_docbook_table_output]],
4737 [596, [revert_libertinus_rm_fonts,revert_libertinus_sftt_fonts]],
4738 [595, [revert_parskip,revert_line_vspaces]],
4739 [594, [revert_ams_spaces]],
4740 [593, [revert_counter_inset]],
4741 [592, [revert_counter_maintenance]],
4742 [591, [revert_colrow_tracking]],
4743 [590, [revert_postpone_fragile]],
4744 [589, [revert_changebars]],
4745 [588, [revert_totalheight]],
4746 [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
4747 [586, [revert_pagesizenames]],
4748 [585, [revert_dupqualicites]],
4749 [584, [revert_pagesizes,revert_komafontsizes]],
4750 [583, [revert_vcsinfo_rev_abbrev]],
4751 [582, [revert_ChivoFont,revert_CrimsonProFont]],
4752 [581, [revert_CantarellFont,revert_FiraFont]],
4753 [580, [revert_texfontopts,revert_osf]],
4754 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
4755 [578, [revert_babelfont]],
4756 [577, [revert_drs]],
4757 [576, [revert_linggloss, revert_subexarg]],
4758 [575, [revert_new_languages]],
4759 [574, [revert_lineno, revert_aaencoding]],
4760 [573, [revert_ruby_module, revert_utf8_japanese]],
4761 [572, [revert_inputencoding_namechange]],
4762 [571, [revert_notoFonts]],
4763 [570, [revert_cmidruletrimming]],
4764 [569, [revert_bibfileencodings]],
4765 [568, [revert_tablestyle]],
4766 [567, [revert_soul]],
4767 [566, [revert_malayalam]],
4768 [565, [revert_hebrew_parentheses]],
4769 [564, [revert_AdobeFonts]],
4770 [563, [revert_lformatinfo]],
4771 [562, [revert_listpargs]],
4772 [561, [revert_l7ninfo]],
4773 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
4774 [559, [revert_timeinfo, revert_namenoextinfo]],
4775 [558, [revert_dateinfo]],
4776 [557, [addFrontMatterStyles]],
4777 [556, [revert_vcsinfo]],
4778 [555, [revert_bibencoding]],
4779 [554, [revert_vcolumns]],
4780 [553, [revert_stretchcolumn]],
4781 [552, [revert_tuftecite]],
4782 [551, [revert_floatpclass, revert_floatalignment]],
4783 [550, [revert_nospellcheck]],
4784 [549, [revert_fontenc]],
4785 [548, []], # dummy format change
4786 [547, [revert_lscape]],
4787 [546, [revert_xcharter]],
4788 [545, [revert_paratype]],
4789 [544, [revert_lst_literalparam]]
4793 if __name__ == "__main__":