1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_complete_lines, del_token,
30 find_end_of_inset, find_end_of_layout, find_token, find_token_backwards,
31 find_token_exact, find_re, get_bool_value, get_containing_inset,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # is_in_inset, set_bool_value
37 # find_tokens, check_token
39 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
40 revert_language, revert_flex_inset, str2bool)
41 # revert_font_attrs, latex_length
42 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
43 # revert_flex_inset, hex2ratio
45 ####################################################################
46 # Private helper functions
48 def add_preamble_fonts(document, fontmap):
49 """Add collected font-packages with their option to user-preamble"""
52 if len(fontmap[pkg]) > 0:
53 xoption = "[" + ",".join(fontmap[pkg]) + "]"
56 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
57 add_to_preamble(document, [preamble])
60 def createkey(pkg, options):
62 return pkg + ':' + "-".join(options)
66 self.fontname = None # key into font2pkgmap
67 self.fonttype = None # roman,sans,typewriter,math
68 self.scaletype = None # None,sf,tt
69 self.scaleopt = None # None, 'scaled', 'scale'
73 self.pkgkey = None # key into pkg2fontmap
74 self.osfopt = None # None, string
75 self.osfdef = "false" # "false" or "true"
78 self.pkgkey = createkey(self.package, self.options)
82 self.font2pkgmap = dict()
83 self.pkg2fontmap = dict()
84 self.pkginmap = dict() # defines, if a map for package exists
86 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
87 """Expand fontinfo mapping"""
89 # fontlist: list of fontnames, each element
90 # may contain a ','-separated list of needed options
91 # like e.g. 'IBMPlexSansCondensed,condensed'
92 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
93 # scale_type: one of None, 'sf', 'tt'
94 # pkg: package defining the font. Defaults to fontname if None
95 # scaleopt: one of None, 'scale', 'scaled', or some other string
96 # to be used in scale option (e.g. scaled=0.7)
97 # osfopt: None or some other string to be used in osf option
98 # osfdef: "true" if osf is default
101 fe.fonttype = font_type
102 fe.scaletype = scale_type
105 fe.fontname = font_name
107 fe.scaleopt = scaleopt
111 fe.package = font_name
115 self.font2pkgmap[font_name] = fe
116 if fe.pkgkey in self.pkg2fontmap:
117 # Repeated the same entry? Check content
118 if self.pkg2fontmap[fe.pkgkey] != font_name:
119 document.error("Something is wrong in pkgname+options <-> fontname mapping")
120 self.pkg2fontmap[fe.pkgkey] = font_name
121 self.pkginmap[fe.package] = 1
123 def getfontname(self, pkg, options):
125 pkgkey = createkey(pkg, options)
126 if not pkgkey in self.pkg2fontmap:
128 fontname = self.pkg2fontmap[pkgkey]
129 if not fontname in self.font2pkgmap:
130 document.error("Something is wrong in pkgname+options <-> fontname mapping")
132 if pkgkey == self.font2pkgmap[fontname].pkgkey:
136 def createFontMapping(fontlist):
137 # Create info for known fonts for the use in
138 # convert_latexFonts() and
139 # revert_latexFonts()
141 # * Would be more handy to parse latexFonts file,
142 # but the path to this file is unknown
143 # * For now, add DejaVu and IBMPlex only.
144 # * Expand, if desired
146 for font in fontlist:
148 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
149 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
150 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
152 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
153 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
154 'IBMPlexSerifSemibold,semibold'],
155 "roman", None, "plex-serif")
156 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
157 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
158 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
159 "sans", "sf", "plex-sans", "scale")
160 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
161 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
162 'IBMPlexMonoSemibold,semibold'],
163 "typewriter", "tt", "plex-mono", "scale")
164 elif font == 'Adobe':
165 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
166 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
167 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
169 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
170 'NotoSerifThin,thin', 'NotoSerifLight,light',
171 'NotoSerifExtralight,extralight'],
172 "roman", None, "noto-serif", None, "osf")
173 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
174 'NotoSansThin,thin', 'NotoSansLight,light',
175 'NotoSansExtralight,extralight'],
176 "sans", "sf", "noto-sans", "scaled")
177 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
178 elif font == 'Cantarell':
179 fm.expandFontMapping(['cantarell,defaultsans'],
180 "sans", "sf", "cantarell", "scaled", "oldstyle")
181 elif font == 'Chivo':
182 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
183 'Chivo,regular', 'ChivoMedium,medium'],
184 "sans", "sf", "Chivo", "scale", "oldstyle")
185 elif font == 'CrimsonPro':
186 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
187 'CrimsonProMedium,medium'],
188 "roman", None, "CrimsonPro", None, "lf", "true")
190 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
191 'FiraSansThin,thin', 'FiraSansLight,light',
192 'FiraSansExtralight,extralight',
193 'FiraSansUltralight,ultralight'],
194 "sans", "sf", "FiraSans", "scaled", "lf", "true")
195 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
196 elif font == 'libertinus':
197 fm.expandFontMapping(['libertinus,serif'], "roman", None, "libertinus", None, "osf")
198 fm.expandFontMapping(['libertinusmath'], "math", None, "libertinust1math", None, None)
201 def convert_fonts(document, fm, osfoption = "osf"):
202 """Handle font definition (LaTeX preamble -> native)"""
203 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
204 rscaleopt = re.compile(r'^scaled?=(.*)')
206 # Check whether we go beyond font option feature introduction
207 haveFontOpts = document.end_format > 580
211 i = find_re(document.preamble, rpkg, i+1)
214 mo = rpkg.search(document.preamble[i])
215 if mo == None or mo.group(2) == None:
218 options = mo.group(2).replace(' ', '').split(",")
223 while o < len(options):
224 if options[o] == osfoption:
228 mo = rscaleopt.search(options[o])
236 if not pkg in fm.pkginmap:
241 # Try with name-option combination first
242 # (only one default option supported currently)
244 while o < len(options):
246 fn = fm.getfontname(pkg, [opt])
253 fn = fm.getfontname(pkg, [])
255 fn = fm.getfontname(pkg, options)
258 del document.preamble[i]
259 fontinfo = fm.font2pkgmap[fn]
260 if fontinfo.scaletype == None:
263 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
264 fontinfo.scaleval = oscale
265 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
266 if fontinfo.osfopt == None:
267 options.extend(osfoption)
269 osf = find_token(document.header, "\\font_osf false")
270 osftag = "\\font_osf"
271 if osf == -1 and fontinfo.fonttype != "math":
272 # Try with newer format
273 osftag = "\\font_" + fontinfo.fonttype + "_osf"
274 osf = find_token(document.header, osftag + " false")
276 document.header[osf] = osftag + " true"
277 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
278 del document.preamble[i-1]
280 if fontscale != None:
281 j = find_token(document.header, fontscale, 0)
283 val = get_value(document.header, fontscale, j)
287 scale = "%03d" % int(float(oscale) * 100)
288 document.header[j] = fontscale + " " + scale + " " + vals[1]
289 ft = "\\font_" + fontinfo.fonttype
290 j = find_token(document.header, ft, 0)
292 val = get_value(document.header, ft, j)
293 words = val.split() # ! splits also values like '"DejaVu Sans"'
294 words[0] = '"' + fn + '"'
295 document.header[j] = ft + ' ' + ' '.join(words)
296 if haveFontOpts and fontinfo.fonttype != "math":
297 fotag = "\\font_" + fontinfo.fonttype + "_opts"
298 fo = find_token(document.header, fotag)
300 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
302 # Sensible place to insert tag
303 fo = find_token(document.header, "\\font_sf_scale")
305 document.warning("Malformed LyX document! Missing \\font_sf_scale")
307 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
310 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
311 """Revert native font definition to LaTeX"""
312 # fonlist := list of fonts created from the same package
313 # Empty package means that the font-name is the same as the package-name
314 # fontmap (key = package, val += found options) will be filled
315 # and used later in add_preamble_fonts() to be added to user-preamble
317 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
318 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
320 while i < len(document.header):
321 i = find_re(document.header, rfontscale, i+1)
324 mo = rfontscale.search(document.header[i])
327 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
328 val = get_value(document.header, ft, i)
329 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
330 font = words[0].strip('"') # TeX font name has no whitespace
331 if not font in fm.font2pkgmap:
333 fontinfo = fm.font2pkgmap[font]
334 val = fontinfo.package
335 if not val in fontmap:
338 if OnlyWithXOpts or WithXOpts:
339 if ft == "\\font_math":
341 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
342 if ft == "\\font_sans":
343 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
344 elif ft == "\\font_typewriter":
345 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
346 x = find_re(document.header, regexp, 0)
347 if x == -1 and OnlyWithXOpts:
351 # We need to use this regex since split() does not handle quote protection
352 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
353 opts = xopts[1].strip('"').split(",")
354 fontmap[val].extend(opts)
355 del document.header[x]
356 words[0] = '"default"'
357 document.header[i] = ft + ' ' + ' '.join(words)
358 if fontinfo.scaleopt != None:
359 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
360 mo = rscales.search(xval)
365 # set correct scale option
366 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
367 if fontinfo.osfopt != None:
369 if fontinfo.osfdef == "true":
371 osf = find_token(document.header, "\\font_osf " + oldval)
372 if osf == -1 and ft != "\\font_math":
373 # Try with newer format
374 osftag = "\\font_roman_osf " + oldval
375 if ft == "\\font_sans":
376 osftag = "\\font_sans_osf " + oldval
377 elif ft == "\\font_typewriter":
378 osftag = "\\font_typewriter_osf " + oldval
379 osf = find_token(document.header, osftag)
381 fontmap[val].extend([fontinfo.osfopt])
382 if len(fontinfo.options) > 0:
383 fontmap[val].extend(fontinfo.options)
386 ###############################################################################
388 ### Conversion and reversion routines
390 ###############################################################################
392 def convert_inputencoding_namechange(document):
393 """Rename inputencoding settings."""
394 i = find_token(document.header, "\\inputencoding", 0)
397 s = document.header[i].replace("auto", "auto-legacy")
398 document.header[i] = s.replace("default", "auto-legacy-plain")
400 def revert_inputencoding_namechange(document):
401 """Rename inputencoding settings."""
402 i = find_token(document.header, "\\inputencoding", 0)
405 s = document.header[i].replace("auto-legacy-plain", "default")
406 document.header[i] = s.replace("auto-legacy", "auto")
408 def convert_notoFonts(document):
409 """Handle Noto fonts definition to LaTeX"""
411 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
412 fm = createFontMapping(['Noto'])
413 convert_fonts(document, fm)
415 def revert_notoFonts(document):
416 """Revert native Noto font definition to LaTeX"""
418 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
420 fm = createFontMapping(['Noto'])
421 if revert_fonts(document, fm, fontmap):
422 add_preamble_fonts(document, fontmap)
424 def convert_latexFonts(document):
425 """Handle DejaVu and IBMPlex fonts definition to LaTeX"""
427 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
428 fm = createFontMapping(['DejaVu', 'IBM'])
429 convert_fonts(document, fm)
431 def revert_latexFonts(document):
432 """Revert native DejaVu font definition to LaTeX"""
434 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
436 fm = createFontMapping(['DejaVu', 'IBM'])
437 if revert_fonts(document, fm, fontmap):
438 add_preamble_fonts(document, fontmap)
440 def convert_AdobeFonts(document):
441 """Handle Adobe Source fonts definition to LaTeX"""
443 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
444 fm = createFontMapping(['Adobe'])
445 convert_fonts(document, fm)
447 def revert_AdobeFonts(document):
448 """Revert Adobe Source font definition to LaTeX"""
450 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
452 fm = createFontMapping(['Adobe'])
453 if revert_fonts(document, fm, fontmap):
454 add_preamble_fonts(document, fontmap)
456 def removeFrontMatterStyles(document):
457 """Remove styles Begin/EndFrontmatter"""
459 layouts = ['BeginFrontmatter', 'EndFrontmatter']
460 tokenend = len('\\begin_layout ')
463 i = find_token_exact(document.body, '\\begin_layout ', i+1)
466 layout = document.body[i][tokenend:].strip()
467 if layout not in layouts:
469 j = find_end_of_layout(document.body, i)
471 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
473 while document.body[j+1].strip() == '':
475 document.body[i:j+1] = []
477 def addFrontMatterStyles(document):
478 """Use styles Begin/EndFrontmatter for elsarticle"""
480 if document.textclass != "elsarticle":
483 def insertFrontmatter(prefix, line):
485 while above > 0 and document.body[above-1].strip() == '':
488 while document.body[below].strip() == '':
490 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
491 '\\begin_inset Note Note',
493 '\\begin_layout Plain Layout',
496 '\\end_inset', '', '',
499 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
500 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
501 tokenend = len('\\begin_layout ')
505 i = find_token_exact(document.body, '\\begin_layout ', i+1)
508 layout = document.body[i][tokenend:].strip()
509 if layout not in layouts:
511 k = find_end_of_layout(document.body, i)
513 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
520 insertFrontmatter('End', k+1)
521 insertFrontmatter('Begin', first)
524 def convert_lst_literalparam(document):
525 """Add param literal to include inset"""
529 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
532 j = find_end_of_inset(document.body, i)
534 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
536 while i < j and document.body[i].strip() != '':
538 document.body.insert(i, 'literal "true"')
541 def revert_lst_literalparam(document):
542 """Remove param literal from include inset"""
546 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
549 j = find_end_of_inset(document.body, i)
551 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
553 del_token(document.body, 'literal', i, j)
556 def revert_paratype(document):
557 """Revert ParaType font definitions to LaTeX"""
559 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
561 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
562 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
563 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
564 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
567 sfval = find_token(document.header, "\\font_sf_scale", 0)
569 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
571 sfscale = document.header[sfval].split()
574 document.header[sfval] = " ".join(sfscale)
577 sf_scale = float(val)
579 document.warning("Invalid font_sf_scale value: " + val)
582 if sf_scale != "100.0":
583 sfoption = "scaled=" + str(sf_scale / 100.0)
584 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
585 ttval = get_value(document.header, "\\font_tt_scale", 0)
590 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
591 if i1 != -1 and i2 != -1 and i3!= -1:
592 add_to_preamble(document, ["\\usepackage{paratype}"])
595 add_to_preamble(document, ["\\usepackage{PTSerif}"])
596 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
599 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
601 add_to_preamble(document, ["\\usepackage{PTSans}"])
602 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
605 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
607 add_to_preamble(document, ["\\usepackage{PTMono}"])
608 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
611 def revert_xcharter(document):
612 """Revert XCharter font definitions to LaTeX"""
614 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
618 # replace unsupported font setting
619 document.header[i] = document.header[i].replace("xcharter", "default")
620 # no need for preamble code with system fonts
621 if get_bool_value(document.header, "\\use_non_tex_fonts"):
624 # transfer old style figures setting to package options
625 j = find_token(document.header, "\\font_osf true")
628 document.header[j] = "\\font_osf false"
632 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
635 def revert_lscape(document):
636 """Reverts the landscape environment (Landscape module) to TeX-code"""
638 if not "landscape" in document.get_module_list():
643 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
646 j = find_end_of_inset(document.body, i)
648 document.warning("Malformed LyX document: Can't find end of Landscape inset")
651 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
652 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
653 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
654 add_to_preamble(document, ["\\usepackage{afterpage}"])
656 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
657 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
659 add_to_preamble(document, ["\\usepackage{pdflscape}"])
660 document.del_module("landscape")
663 def convert_fontenc(document):
664 """Convert default fontenc setting"""
666 i = find_token(document.header, "\\fontencoding global", 0)
670 document.header[i] = document.header[i].replace("global", "auto")
673 def revert_fontenc(document):
674 """Revert default fontenc setting"""
676 i = find_token(document.header, "\\fontencoding auto", 0)
680 document.header[i] = document.header[i].replace("auto", "global")
683 def revert_nospellcheck(document):
684 """Remove nospellcheck font info param"""
688 i = find_token(document.body, '\\nospellcheck', i)
694 def revert_floatpclass(document):
695 """Remove float placement params 'document' and 'class'"""
697 del_token(document.header, "\\float_placement class")
701 i = find_token(document.body, '\\begin_inset Float', i + 1)
704 j = find_end_of_inset(document.body, i)
705 k = find_token(document.body, 'placement class', i, j)
707 k = find_token(document.body, 'placement document', i, j)
714 def revert_floatalignment(document):
715 """Remove float alignment params"""
717 galignment = get_value(document.header, "\\float_alignment", delete=True)
721 i = find_token(document.body, '\\begin_inset Float', i + 1)
724 j = find_end_of_inset(document.body, i)
726 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
728 k = find_token(document.body, 'alignment', i, j)
732 alignment = get_value(document.body, "alignment", k)
733 if alignment == "document":
734 alignment = galignment
736 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
738 document.warning("Can't find float layout!")
741 if alignment == "left":
742 alcmd = put_cmd_in_ert("\\raggedright{}")
743 elif alignment == "center":
744 alcmd = put_cmd_in_ert("\\centering{}")
745 elif alignment == "right":
746 alcmd = put_cmd_in_ert("\\raggedleft{}")
748 document.body[l+1:l+1] = alcmd
749 # There might be subfloats, so we do not want to move past
750 # the end of the inset.
753 def revert_tuftecite(document):
754 """Revert \cite commands in tufte classes"""
756 tufte = ["tufte-book", "tufte-handout"]
757 if document.textclass not in tufte:
762 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
765 j = find_end_of_inset(document.body, i)
767 document.warning("Can't find end of citation inset at line %d!!" %(i))
769 k = find_token(document.body, "LatexCommand", i, j)
771 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
774 cmd = get_value(document.body, "LatexCommand", k)
778 pre = get_quoted_value(document.body, "before", i, j)
779 post = get_quoted_value(document.body, "after", i, j)
780 key = get_quoted_value(document.body, "key", i, j)
782 document.warning("Citation inset at line %d does not have a key!" %(i))
784 # Replace command with ERT
787 res += "[" + pre + "]"
789 res += "[" + post + "]"
792 res += "{" + key + "}"
793 document.body[i:j+1] = put_cmd_in_ert([res])
798 def revert_stretchcolumn(document):
799 """We remove the column varwidth flags or everything else will become a mess."""
802 i = find_token(document.body, "\\begin_inset Tabular", i+1)
805 j = find_end_of_inset(document.body, i+1)
807 document.warning("Malformed LyX document: Could not find end of tabular.")
809 for k in range(i, j):
810 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
811 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
812 document.body[k] = document.body[k].replace(' varwidth="true"', '')
815 def revert_vcolumns(document):
816 """Revert standard columns with line breaks etc."""
822 i = find_token(document.body, "\\begin_inset Tabular", i+1)
825 j = find_end_of_inset(document.body, i)
827 document.warning("Malformed LyX document: Could not find end of tabular.")
830 # Collect necessary column information
832 nrows = int(document.body[i+1].split('"')[3])
833 ncols = int(document.body[i+1].split('"')[5])
835 for k in range(ncols):
836 m = find_token(document.body, "<column", m)
837 width = get_option_value(document.body[m], 'width')
838 varwidth = get_option_value(document.body[m], 'varwidth')
839 alignment = get_option_value(document.body[m], 'alignment')
840 special = get_option_value(document.body[m], 'special')
841 col_info.append([width, varwidth, alignment, special, m])
846 for row in range(nrows):
847 for col in range(ncols):
848 m = find_token(document.body, "<cell", m)
849 multicolumn = get_option_value(document.body[m], 'multicolumn')
850 multirow = get_option_value(document.body[m], 'multirow')
851 width = get_option_value(document.body[m], 'width')
852 rotate = get_option_value(document.body[m], 'rotate')
853 # Check for: linebreaks, multipars, non-standard environments
855 endcell = find_token(document.body, "</cell>", begcell)
857 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
859 elif count_pars_in_inset(document.body, begcell + 2) > 1:
861 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
863 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
864 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
866 alignment = col_info[col][2]
867 col_line = col_info[col][4]
869 if alignment == "center":
870 vval = ">{\\centering}"
871 elif alignment == "left":
872 vval = ">{\\raggedright}"
873 elif alignment == "right":
874 vval = ">{\\raggedleft}"
877 vval += "V{\\linewidth}"
879 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
880 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
881 # with newlines, and we do not want that)
883 endcell = find_token(document.body, "</cell>", begcell)
885 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
887 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
891 nle = find_end_of_inset(document.body, nl)
892 del(document.body[nle:nle+1])
894 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
896 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
902 if needarray == True:
903 add_to_preamble(document, ["\\usepackage{array}"])
904 if needvarwidth == True:
905 add_to_preamble(document, ["\\usepackage{varwidth}"])
908 def revert_bibencoding(document):
909 """Revert bibliography encoding"""
913 i = find_token(document.header, "\\cite_engine", 0)
915 document.warning("Malformed document! Missing \\cite_engine")
917 engine = get_value(document.header, "\\cite_engine", i)
921 if engine in ["biblatex", "biblatex-natbib"]:
924 # Map lyx to latex encoding names
928 "armscii8" : "armscii8",
929 "iso8859-1" : "latin1",
930 "iso8859-2" : "latin2",
931 "iso8859-3" : "latin3",
932 "iso8859-4" : "latin4",
933 "iso8859-5" : "iso88595",
934 "iso8859-6" : "8859-6",
935 "iso8859-7" : "iso-8859-7",
936 "iso8859-8" : "8859-8",
937 "iso8859-9" : "latin5",
938 "iso8859-13" : "latin7",
939 "iso8859-15" : "latin9",
940 "iso8859-16" : "latin10",
941 "applemac" : "applemac",
943 "cp437de" : "cp437de",
960 "utf8-platex" : "utf8",
967 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
970 j = find_end_of_inset(document.body, i)
972 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
974 encoding = get_quoted_value(document.body, "encoding", i, j)
977 # remove encoding line
978 k = find_token(document.body, "encoding", i, j)
981 if encoding == "default":
983 # Re-find inset end line
984 j = find_end_of_inset(document.body, i)
987 h = find_token(document.header, "\\biblio_options", 0)
989 biblio_options = get_value(document.header, "\\biblio_options", h)
990 if not "bibencoding" in biblio_options:
991 document.header[h] += ",bibencoding=%s" % encodings[encoding]
993 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
995 # this should not happen
996 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
998 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
1000 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
1001 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1007 def convert_vcsinfo(document):
1008 """Separate vcs Info inset from buffer Info inset."""
1011 "vcs-revision" : "revision",
1012 "vcs-tree-revision" : "tree-revision",
1013 "vcs-author" : "author",
1014 "vcs-time" : "time",
1019 i = find_token(document.body, "\\begin_inset Info", i+1)
1022 j = find_end_of_inset(document.body, i+1)
1024 document.warning("Malformed LyX document: Could not find end of Info inset.")
1026 tp = find_token(document.body, 'type', i, j)
1027 tpv = get_quoted_value(document.body, "type", tp)
1030 arg = find_token(document.body, 'arg', i, j)
1031 argv = get_quoted_value(document.body, "arg", arg)
1032 if argv not in list(types.keys()):
1034 document.body[tp] = "type \"vcs\""
1035 document.body[arg] = "arg \"" + types[argv] + "\""
1038 def revert_vcsinfo(document):
1039 """Merge vcs Info inset to buffer Info inset."""
1041 args = ["revision", "tree-revision", "author", "time", "date" ]
1044 i = find_token(document.body, "\\begin_inset Info", i+1)
1047 j = find_end_of_inset(document.body, i+1)
1049 document.warning("Malformed LyX document: Could not find end of Info inset.")
1051 tp = find_token(document.body, 'type', i, j)
1052 tpv = get_quoted_value(document.body, "type", tp)
1055 arg = find_token(document.body, 'arg', i, j)
1056 argv = get_quoted_value(document.body, "arg", arg)
1057 if argv not in args:
1058 document.warning("Malformed Info inset. Invalid vcs arg.")
1060 document.body[tp] = "type \"buffer\""
1061 document.body[arg] = "arg \"vcs-" + argv + "\""
1063 def revert_vcsinfo_rev_abbrev(document):
1064 " Convert abbreviated revisions to regular revisions. "
1068 i = find_token(document.body, "\\begin_inset Info", i+1)
1071 j = find_end_of_inset(document.body, i+1)
1073 document.warning("Malformed LyX document: Could not find end of Info inset.")
1075 tp = find_token(document.body, 'type', i, j)
1076 tpv = get_quoted_value(document.body, "type", tp)
1079 arg = find_token(document.body, 'arg', i, j)
1080 argv = get_quoted_value(document.body, "arg", arg)
1081 if( argv == "revision-abbrev" ):
1082 document.body[arg] = "arg \"revision\""
1084 def revert_dateinfo(document):
1085 """Revert date info insets to static text."""
1087 # FIXME This currently only considers the main language and uses the system locale
1088 # Ideally, it should honor context languages and switch the locale accordingly.
1090 # The date formats for each language using strftime syntax:
1091 # long, short, loclong, locmedium, locshort
1093 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1094 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1095 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1096 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1097 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1098 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1099 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1100 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1101 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1102 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1103 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1104 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1105 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1106 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1107 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1108 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1109 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1110 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1111 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1112 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1113 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1114 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1115 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1116 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1117 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1118 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1119 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1120 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1121 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1122 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1123 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1124 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1125 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1126 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1127 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1128 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1129 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1130 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1131 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1132 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1133 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1134 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1135 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1136 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1137 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1138 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1139 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1140 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1141 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1142 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1143 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1144 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1145 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1146 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1147 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1148 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1149 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1150 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1151 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1152 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1153 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1154 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1155 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1156 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1157 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1158 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1159 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1160 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1161 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1162 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1163 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1164 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1165 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1166 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1167 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1168 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1169 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1170 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1171 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1172 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1173 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1174 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1175 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1176 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1177 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1178 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1179 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1180 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1181 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1182 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1183 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1184 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1185 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1186 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1187 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1188 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1189 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1190 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1191 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1192 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1193 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1194 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1197 types = ["date", "fixdate", "moddate" ]
1198 lang = get_value(document.header, "\\language")
1200 document.warning("Malformed LyX document! No \\language header found!")
1205 i = find_token(document.body, "\\begin_inset Info", i+1)
1208 j = find_end_of_inset(document.body, i+1)
1210 document.warning("Malformed LyX document: Could not find end of Info inset.")
1212 tp = find_token(document.body, 'type', i, j)
1213 tpv = get_quoted_value(document.body, "type", tp)
1214 if tpv not in types:
1216 arg = find_token(document.body, 'arg', i, j)
1217 argv = get_quoted_value(document.body, "arg", arg)
1220 if tpv == "fixdate":
1221 datecomps = argv.split('@')
1222 if len(datecomps) > 1:
1224 isodate = datecomps[1]
1225 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1227 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1228 # FIXME if we had the path to the original document (not the one in the tmp dir),
1229 # we could use the mtime.
1230 # elif tpv == "moddate":
1231 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1234 result = dte.isodate()
1235 elif argv == "long":
1236 result = dte.strftime(dateformats[lang][0])
1237 elif argv == "short":
1238 result = dte.strftime(dateformats[lang][1])
1239 elif argv == "loclong":
1240 result = dte.strftime(dateformats[lang][2])
1241 elif argv == "locmedium":
1242 result = dte.strftime(dateformats[lang][3])
1243 elif argv == "locshort":
1244 result = dte.strftime(dateformats[lang][4])
1246 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1247 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1248 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1249 fmt = re.sub('[^\'%]d', '%d', fmt)
1250 fmt = fmt.replace("'", "")
1251 result = dte.strftime(fmt)
1252 if sys.version_info < (3,0):
1253 # In Python 2, datetime module works with binary strings,
1254 # our dateformat strings are utf8-encoded:
1255 result = result.decode('utf-8')
1256 document.body[i : j+1] = [result]
1259 def revert_timeinfo(document):
1260 """Revert time info insets to static text."""
1262 # FIXME This currently only considers the main language and uses the system locale
1263 # Ideally, it should honor context languages and switch the locale accordingly.
1264 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1267 # The time formats for each language using strftime syntax:
1270 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1271 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1272 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1273 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1274 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1275 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1276 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1277 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1278 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1279 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1280 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1281 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1282 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1283 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1284 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1285 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1286 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1287 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1288 "british" : ["%H:%M:%S %Z", "%H:%M"],
1289 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1290 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1291 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1292 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1293 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1294 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1295 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1296 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1297 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1298 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1299 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1300 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1301 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1302 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1303 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1304 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1305 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1306 "french" : ["%H:%M:%S %Z", "%H:%M"],
1307 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1308 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1309 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1310 "german" : ["%H:%M:%S %Z", "%H:%M"],
1311 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1312 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1313 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1314 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1315 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1316 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1317 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1318 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1319 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1320 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1321 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1322 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1323 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1324 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1325 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1326 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1327 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1328 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1329 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1330 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1331 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1332 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1333 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1334 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1335 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1336 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1337 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1338 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1339 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1340 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1341 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1342 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1343 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1344 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1345 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1346 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1347 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1348 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1349 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1350 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1351 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1352 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1353 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1354 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1355 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1356 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1357 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1358 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1359 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1360 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1361 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1362 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1363 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1364 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1365 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1366 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1367 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1368 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1369 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1370 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1371 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1374 types = ["time", "fixtime", "modtime" ]
1375 i = find_token(document.header, "\\language", 0)
1377 # this should not happen
1378 document.warning("Malformed LyX document! No \\language header found!")
1380 lang = get_value(document.header, "\\language", i)
1384 i = find_token(document.body, "\\begin_inset Info", i+1)
1387 j = find_end_of_inset(document.body, i+1)
1389 document.warning("Malformed LyX document: Could not find end of Info inset.")
1391 tp = find_token(document.body, 'type', i, j)
1392 tpv = get_quoted_value(document.body, "type", tp)
1393 if tpv not in types:
1395 arg = find_token(document.body, 'arg', i, j)
1396 argv = get_quoted_value(document.body, "arg", arg)
1398 dtme = datetime.now()
1400 if tpv == "fixtime":
1401 timecomps = argv.split('@')
1402 if len(timecomps) > 1:
1404 isotime = timecomps[1]
1405 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1407 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1409 m = re.search('(\d\d):(\d\d)', isotime)
1411 tme = time(int(m.group(1)), int(m.group(2)))
1412 # FIXME if we had the path to the original document (not the one in the tmp dir),
1413 # we could use the mtime.
1414 # elif tpv == "moddate":
1415 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1418 result = tme.isoformat()
1419 elif argv == "long":
1420 result = tme.strftime(timeformats[lang][0])
1421 elif argv == "short":
1422 result = tme.strftime(timeformats[lang][1])
1424 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1425 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1426 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1427 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1428 fmt = fmt.replace("'", "")
1429 result = dte.strftime(fmt)
1430 document.body[i : j+1] = result
1433 def revert_namenoextinfo(document):
1434 """Merge buffer Info inset type name-noext to name."""
1438 i = find_token(document.body, "\\begin_inset Info", i+1)
1441 j = find_end_of_inset(document.body, i+1)
1443 document.warning("Malformed LyX document: Could not find end of Info inset.")
1445 tp = find_token(document.body, 'type', i, j)
1446 tpv = get_quoted_value(document.body, "type", tp)
1449 arg = find_token(document.body, 'arg', i, j)
1450 argv = get_quoted_value(document.body, "arg", arg)
1451 if argv != "name-noext":
1453 document.body[arg] = "arg \"name\""
1456 def revert_l7ninfo(document):
1457 """Revert l7n Info inset to text."""
1461 i = find_token(document.body, "\\begin_inset Info", i+1)
1464 j = find_end_of_inset(document.body, i+1)
1466 document.warning("Malformed LyX document: Could not find end of Info inset.")
1468 tp = find_token(document.body, 'type', i, j)
1469 tpv = get_quoted_value(document.body, "type", tp)
1472 arg = find_token(document.body, 'arg', i, j)
1473 argv = get_quoted_value(document.body, "arg", arg)
1474 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1475 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1476 document.body[i : j+1] = argv
1479 def revert_listpargs(document):
1480 """Reverts listpreamble arguments to TeX-code"""
1483 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1486 j = find_end_of_inset(document.body, i)
1487 # Find containing paragraph layout
1488 parent = get_containing_layout(document.body, i)
1490 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1493 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1494 endPlain = find_end_of_layout(document.body, beginPlain)
1495 content = document.body[beginPlain + 1 : endPlain]
1496 del document.body[i:j+1]
1497 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1498 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1499 document.body[parbeg : parbeg] = subst
1502 def revert_lformatinfo(document):
1503 """Revert layout format Info inset to text."""
1507 i = find_token(document.body, "\\begin_inset Info", i+1)
1510 j = find_end_of_inset(document.body, i+1)
1512 document.warning("Malformed LyX document: Could not find end of Info inset.")
1514 tp = find_token(document.body, 'type', i, j)
1515 tpv = get_quoted_value(document.body, "type", tp)
1516 if tpv != "lyxinfo":
1518 arg = find_token(document.body, 'arg', i, j)
1519 argv = get_quoted_value(document.body, "arg", arg)
1520 if argv != "layoutformat":
1523 document.body[i : j+1] = "69"
1526 def convert_hebrew_parentheses(document):
1527 """ Swap opening/closing parentheses in Hebrew text.
1529 Up to LyX 2.4, "(" was used as closing parenthesis and
1530 ")" as opening parenthesis for Hebrew in the LyX source.
1532 # print("convert hebrew parentheses")
1533 current_languages = [document.language]
1534 for i, line in enumerate(document.body):
1535 if line.startswith('\\lang '):
1536 current_languages[-1] = line.lstrip('\\lang ')
1537 elif line.startswith('\\begin_layout'):
1538 current_languages.append(current_languages[-1])
1539 # print (line, current_languages[-1])
1540 elif line.startswith('\\end_layout'):
1541 current_languages.pop()
1542 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1543 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1546 def revert_hebrew_parentheses(document):
1547 """Store parentheses in Hebrew text reversed"""
1548 # This only exists to keep the convert/revert naming convention
1549 convert_hebrew_parentheses(document)
1552 def revert_malayalam(document):
1553 """Set the document language to English but assure Malayalam output"""
1555 revert_language(document, "malayalam", "", "malayalam")
1558 def revert_soul(document):
1559 """Revert soul module flex insets to ERT"""
1561 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1564 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1566 add_to_preamble(document, ["\\usepackage{soul}"])
1568 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1570 add_to_preamble(document, ["\\usepackage{color}"])
1572 revert_flex_inset(document.body, "Spaceletters", "\\so")
1573 revert_flex_inset(document.body, "Strikethrough", "\\st")
1574 revert_flex_inset(document.body, "Underline", "\\ul")
1575 revert_flex_inset(document.body, "Highlight", "\\hl")
1576 revert_flex_inset(document.body, "Capitalize", "\\caps")
1579 def revert_tablestyle(document):
1580 """Remove tablestyle params"""
1582 i = find_token(document.header, "\\tablestyle")
1584 del document.header[i]
1587 def revert_bibfileencodings(document):
1588 """Revert individual Biblatex bibliography encodings"""
1592 i = find_token(document.header, "\\cite_engine", 0)
1594 document.warning("Malformed document! Missing \\cite_engine")
1596 engine = get_value(document.header, "\\cite_engine", i)
1600 if engine in ["biblatex", "biblatex-natbib"]:
1603 # Map lyx to latex encoding names
1607 "armscii8" : "armscii8",
1608 "iso8859-1" : "latin1",
1609 "iso8859-2" : "latin2",
1610 "iso8859-3" : "latin3",
1611 "iso8859-4" : "latin4",
1612 "iso8859-5" : "iso88595",
1613 "iso8859-6" : "8859-6",
1614 "iso8859-7" : "iso-8859-7",
1615 "iso8859-8" : "8859-8",
1616 "iso8859-9" : "latin5",
1617 "iso8859-13" : "latin7",
1618 "iso8859-15" : "latin9",
1619 "iso8859-16" : "latin10",
1620 "applemac" : "applemac",
1622 "cp437de" : "cp437de",
1630 "cp1250" : "cp1250",
1631 "cp1251" : "cp1251",
1632 "cp1252" : "cp1252",
1633 "cp1255" : "cp1255",
1634 "cp1256" : "cp1256",
1635 "cp1257" : "cp1257",
1636 "koi8-r" : "koi8-r",
1637 "koi8-u" : "koi8-u",
1639 "utf8-platex" : "utf8",
1646 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1649 j = find_end_of_inset(document.body, i)
1651 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1653 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1657 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1658 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1659 if len(bibfiles) == 0:
1660 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1661 # remove encoding line
1662 k = find_token(document.body, "file_encodings", i, j)
1664 del document.body[k]
1665 # Re-find inset end line
1666 j = find_end_of_inset(document.body, i)
1668 enclist = encodings.split("\t")
1671 ppp = pp.split(" ", 1)
1672 encmap[ppp[0]] = ppp[1]
1673 for bib in bibfiles:
1674 pr = "\\addbibresource"
1675 if bib in encmap.keys():
1676 pr += "[bibencoding=" + encmap[bib] + "]"
1677 pr += "{" + bib + "}"
1678 add_to_preamble(document, [pr])
1679 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1680 pcmd = "printbibliography"
1682 pcmd += "[" + opts + "]"
1683 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1684 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1685 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1686 "status open", "", "\\begin_layout Plain Layout" ]
1687 repl += document.body[i:j+1]
1688 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1689 document.body[i:j+1] = repl
1695 def revert_cmidruletrimming(document):
1696 """Remove \\cmidrule trimming"""
1698 # FIXME: Revert to TeX code?
1701 # first, let's find out if we need to do anything
1702 i = find_token(document.body, '<cell ', i+1)
1705 j = document.body[i].find('trim="')
1708 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1709 # remove trim option
1710 document.body[i] = rgx.sub('', document.body[i])
1714 r'### Inserted by lyx2lyx (ruby inset) ###',
1715 r'InsetLayout Flex:Ruby',
1716 r' LyxType charstyle',
1717 r' LatexType command',
1721 r' HTMLInnerTag rb',
1722 r' HTMLInnerAttr ""',
1724 r' LabelString "Ruby"',
1725 r' Decoration Conglomerate',
1727 r' \ifdefined\kanjiskip',
1728 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1729 r' \else \ifdefined\luatexversion',
1730 r' \usepackage{luatexja-ruby}',
1731 r' \else \ifdefined\XeTeXversion',
1732 r' \usepackage{ruby}%',
1734 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1736 r' Argument post:1',
1737 r' LabelString "ruby text"',
1738 r' MenuString "Ruby Text|R"',
1739 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1740 r' Decoration Conglomerate',
1753 def convert_ruby_module(document):
1754 """Use ruby module instead of local module definition"""
1755 if document.del_local_layout(ruby_inset_def):
1756 document.add_module("ruby")
1759 def revert_ruby_module(document):
1760 """Replace ruby module with local module definition"""
1761 if document.del_module("ruby"):
1762 document.append_local_layout(ruby_inset_def)
1765 def convert_utf8_japanese(document):
1766 """Use generic utf8 with Japanese documents."""
1767 lang = get_value(document.header, "\\language")
1768 if not lang.startswith("japanese"):
1770 inputenc = get_value(document.header, "\\inputencoding")
1771 if ((lang == "japanese" and inputenc == "utf8-platex")
1772 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1773 document.set_parameter("inputencoding", "utf8")
1776 def revert_utf8_japanese(document):
1777 """Use Japanese utf8 variants with Japanese documents."""
1778 inputenc = get_value(document.header, "\\inputencoding")
1779 if inputenc != "utf8":
1781 lang = get_value(document.header, "\\language")
1782 if lang == "japanese":
1783 document.set_parameter("inputencoding", "utf8-platex")
1784 if lang == "japanese-cjk":
1785 document.set_parameter("inputencoding", "utf8-cjk")
1788 def revert_lineno(document):
1789 " Replace lineno setting with user-preamble code."
1791 options = get_quoted_value(document.header, "\\lineno_options",
1793 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1796 options = "[" + options + "]"
1797 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1800 def convert_lineno(document):
1801 " Replace user-preamble code with native lineno support."
1804 i = find_token(document.preamble, "\\linenumbers", 1)
1806 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1809 options = usepkg.group(1).strip("[]")
1810 del(document.preamble[i-1:i+1])
1811 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1813 k = find_token(document.header, "\\index ")
1815 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1817 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1818 "\\lineno_options %s" % options]
1821 def convert_aaencoding(document):
1822 " Convert default document option due to encoding change in aa class. "
1824 if document.textclass != "aa":
1827 i = find_token(document.header, "\\use_default_options true")
1830 val = get_value(document.header, "\\inputencoding")
1832 document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
1834 if val == "auto-legacy" or val == "latin9":
1835 document.header[i] = "\\use_default_options false"
1836 k = find_token(document.header, "\\options")
1838 document.header.insert(i, "\\options latin9")
1840 document.header[k] += ",latin9"
1843 def revert_aaencoding(document):
1844 " Revert default document option due to encoding change in aa class. "
1846 if document.textclass != "aa":
1849 i = find_token(document.header, "\\use_default_options true")
1852 val = get_value(document.header, "\\inputencoding")
1854 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
1857 document.header[i] = "\\use_default_options false"
1858 k = find_token(document.header, "\\options", 0)
1860 document.header.insert(i, "\\options utf8")
1862 document.header[k] = document.header[k] + ",utf8"
1865 def revert_new_languages(document):
1866 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1867 and Russian (Petrine orthography)."""
1869 # lyxname: (babelname, polyglossianame)
1870 new_languages = {"azerbaijani": ("azerbaijani", ""),
1871 "bengali": ("", "bengali"),
1872 "churchslavonic": ("", "churchslavonic"),
1873 "oldrussian": ("", "russian"),
1874 "korean": ("", "korean"),
1876 if document.language in new_languages:
1877 used_languages = set((document.language, ))
1879 used_languages = set()
1882 i = find_token(document.body, "\\lang", i+1)
1885 val = get_value(document.body, "\\lang", i)
1886 if val in new_languages:
1887 used_languages.add(val)
1889 # Korean is already supported via CJK, so leave as-is for Babel
1890 if ("korean" in used_languages
1891 and (not get_bool_value(document.header, "\\use_non_tex_fonts")
1892 or get_value(document.header, "\\language_package") == "babel")):
1893 used_languages.discard("korean")
1895 for lang in used_languages:
1896 revert_language(document, lang, *new_languages[lang])
1900 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1901 r'InsetLayout Flex:Glosse',
1903 r' LabelString "Gloss (old version)"',
1904 r' MenuString "Gloss (old version)"',
1905 r' LatexType environment',
1906 r' LatexName linggloss',
1907 r' Decoration minimalistic',
1912 r' CustomPars false',
1913 r' ForcePlain true',
1914 r' ParbreakIsNewline true',
1915 r' FreeSpacing true',
1916 r' Requires covington',
1919 r' \@ifundefined{linggloss}{%',
1920 r' \newenvironment{linggloss}[2][]{',
1921 r' \def\glosstr{\glt #1}%',
1923 r' {\glosstr\glend}}{}',
1926 r' ResetsFont true',
1928 r' Decoration conglomerate',
1929 r' LabelString "Translation"',
1930 r' MenuString "Glosse Translation|s"',
1931 r' Tooltip "Add a translation for the glosse"',
1936 glosss_inset_def = [
1937 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1938 r'InsetLayout Flex:Tri-Glosse',
1940 r' LabelString "Tri-Gloss (old version)"',
1941 r' MenuString "Tri-Gloss (old version)"',
1942 r' LatexType environment',
1943 r' LatexName lingglosss',
1944 r' Decoration minimalistic',
1949 r' CustomPars false',
1950 r' ForcePlain true',
1951 r' ParbreakIsNewline true',
1952 r' FreeSpacing true',
1954 r' Requires covington',
1957 r' \@ifundefined{lingglosss}{%',
1958 r' \newenvironment{lingglosss}[2][]{',
1959 r' \def\glosstr{\glt #1}%',
1961 r' {\glosstr\glend}}{}',
1963 r' ResetsFont true',
1965 r' Decoration conglomerate',
1966 r' LabelString "Translation"',
1967 r' MenuString "Glosse Translation|s"',
1968 r' Tooltip "Add a translation for the glosse"',
1973 def convert_linggloss(document):
1974 " Move old ling glosses to local layout "
1975 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1976 document.append_local_layout(gloss_inset_def)
1977 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1978 document.append_local_layout(glosss_inset_def)
1980 def revert_linggloss(document):
1981 " Revert to old ling gloss definitions "
1982 if not "linguistics" in document.get_module_list():
1984 document.del_local_layout(gloss_inset_def)
1985 document.del_local_layout(glosss_inset_def)
1988 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1989 for glosse in glosses:
1992 i = find_token(document.body, glosse, i+1)
1995 j = find_end_of_inset(document.body, i)
1997 document.warning("Malformed LyX document: Can't find end of Gloss inset")
2000 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2001 endarg = find_end_of_inset(document.body, arg)
2004 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2005 if argbeginPlain == -1:
2006 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2008 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2009 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2011 # remove Arg insets and paragraph, if it only contains this inset
2012 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2013 del document.body[arg - 1 : endarg + 4]
2015 del document.body[arg : endarg + 1]
2017 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2018 endarg = find_end_of_inset(document.body, arg)
2021 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2022 if argbeginPlain == -1:
2023 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2025 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2026 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2028 # remove Arg insets and paragraph, if it only contains this inset
2029 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2030 del document.body[arg - 1 : endarg + 4]
2032 del document.body[arg : endarg + 1]
2034 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2035 endarg = find_end_of_inset(document.body, arg)
2038 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2039 if argbeginPlain == -1:
2040 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2042 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2043 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2045 # remove Arg insets and paragraph, if it only contains this inset
2046 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2047 del document.body[arg - 1 : endarg + 4]
2049 del document.body[arg : endarg + 1]
2051 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2052 endarg = find_end_of_inset(document.body, arg)
2055 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2056 if argbeginPlain == -1:
2057 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2059 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2060 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2062 # remove Arg insets and paragraph, if it only contains this inset
2063 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2064 del document.body[arg - 1 : endarg + 4]
2066 del document.body[arg : endarg + 1]
2069 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2072 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2073 endInset = find_end_of_inset(document.body, i)
2074 endPlain = find_end_of_layout(document.body, beginPlain)
2075 precontent = put_cmd_in_ert(cmd)
2076 if len(optargcontent) > 0:
2077 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2078 precontent += put_cmd_in_ert("{")
2080 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2081 if cmd == "\\trigloss":
2082 postcontent += put_cmd_in_ert("}{") + marg3content
2083 postcontent += put_cmd_in_ert("}")
2085 document.body[endPlain:endInset + 1] = postcontent
2086 document.body[beginPlain + 1:beginPlain] = precontent
2087 del document.body[i : beginPlain + 1]
2089 document.append_local_layout("Requires covington")
2094 def revert_subexarg(document):
2095 " Revert linguistic subexamples with argument to ERT "
2097 if not "linguistics" in document.get_module_list():
2103 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2106 j = find_end_of_layout(document.body, i)
2108 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2111 # check for consecutive layouts
2112 k = find_token(document.body, "\\begin_layout", j)
2113 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2115 j = find_end_of_layout(document.body, k)
2117 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2120 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2124 endarg = find_end_of_inset(document.body, arg)
2126 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2127 if argbeginPlain == -1:
2128 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2130 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2131 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2133 # remove Arg insets and paragraph, if it only contains this inset
2134 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2135 del document.body[arg - 1 : endarg + 4]
2137 del document.body[arg : endarg + 1]
2139 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2141 # re-find end of layout
2142 j = find_end_of_layout(document.body, i)
2144 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2147 # check for consecutive layouts
2148 k = find_token(document.body, "\\begin_layout", j)
2149 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2151 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2152 j = find_end_of_layout(document.body, k)
2154 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2157 endev = put_cmd_in_ert("\\end{subexamples}")
2159 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2160 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2161 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2163 document.append_local_layout("Requires covington")
2167 def revert_drs(document):
2168 " Revert DRS insets (linguistics) to ERT "
2170 if not "linguistics" in document.get_module_list():
2174 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2175 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2176 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2177 "\\begin_inset Flex SDRS"]
2181 i = find_token(document.body, drs, i+1)
2184 j = find_end_of_inset(document.body, i)
2186 document.warning("Malformed LyX document: Can't find end of DRS inset")
2189 # Check for arguments
2190 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2191 endarg = find_end_of_inset(document.body, arg)
2194 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2195 if argbeginPlain == -1:
2196 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2198 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2199 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2201 # remove Arg insets and paragraph, if it only contains this inset
2202 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2203 del document.body[arg - 1 : endarg + 4]
2205 del document.body[arg : endarg + 1]
2208 j = find_end_of_inset(document.body, i)
2210 document.warning("Malformed LyX document: Can't find end of DRS inset")
2213 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2214 endarg = find_end_of_inset(document.body, arg)
2217 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2218 if argbeginPlain == -1:
2219 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2221 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2222 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2224 # remove Arg insets and paragraph, if it only contains this inset
2225 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2226 del document.body[arg - 1 : endarg + 4]
2228 del document.body[arg : endarg + 1]
2231 j = find_end_of_inset(document.body, i)
2233 document.warning("Malformed LyX document: Can't find end of DRS inset")
2236 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2237 endarg = find_end_of_inset(document.body, arg)
2238 postarg1content = []
2240 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2241 if argbeginPlain == -1:
2242 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2244 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2245 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2247 # remove Arg insets and paragraph, if it only contains this inset
2248 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2249 del document.body[arg - 1 : endarg + 4]
2251 del document.body[arg : endarg + 1]
2254 j = find_end_of_inset(document.body, i)
2256 document.warning("Malformed LyX document: Can't find end of DRS inset")
2259 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2260 endarg = find_end_of_inset(document.body, arg)
2261 postarg2content = []
2263 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2264 if argbeginPlain == -1:
2265 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2267 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2268 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2270 # remove Arg insets and paragraph, if it only contains this inset
2271 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2272 del document.body[arg - 1 : endarg + 4]
2274 del document.body[arg : endarg + 1]
2277 j = find_end_of_inset(document.body, i)
2279 document.warning("Malformed LyX document: Can't find end of DRS inset")
2282 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2283 endarg = find_end_of_inset(document.body, arg)
2284 postarg3content = []
2286 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2287 if argbeginPlain == -1:
2288 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2290 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2291 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2293 # remove Arg insets and paragraph, if it only contains this inset
2294 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2295 del document.body[arg - 1 : endarg + 4]
2297 del document.body[arg : endarg + 1]
2300 j = find_end_of_inset(document.body, i)
2302 document.warning("Malformed LyX document: Can't find end of DRS inset")
2305 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2306 endarg = find_end_of_inset(document.body, arg)
2307 postarg4content = []
2309 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2310 if argbeginPlain == -1:
2311 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2313 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2314 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2316 # remove Arg insets and paragraph, if it only contains this inset
2317 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2318 del document.body[arg - 1 : endarg + 4]
2320 del document.body[arg : endarg + 1]
2322 # The respective LaTeX command
2324 if drs == "\\begin_inset Flex DRS*":
2326 elif drs == "\\begin_inset Flex IfThen-DRS":
2328 elif drs == "\\begin_inset Flex Cond-DRS":
2330 elif drs == "\\begin_inset Flex QDRS":
2332 elif drs == "\\begin_inset Flex NegDRS":
2334 elif drs == "\\begin_inset Flex SDRS":
2337 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2338 endInset = find_end_of_inset(document.body, i)
2339 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2340 precontent = put_cmd_in_ert(cmd)
2341 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2342 if drs == "\\begin_inset Flex SDRS":
2343 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2344 precontent += put_cmd_in_ert("{")
2347 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2348 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2349 if cmd == "\\condrs" or cmd == "\\qdrs":
2350 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2352 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2354 postcontent = put_cmd_in_ert("}")
2356 document.body[endPlain:endInset + 1] = postcontent
2357 document.body[beginPlain + 1:beginPlain] = precontent
2358 del document.body[i : beginPlain + 1]
2360 document.append_local_layout("Provides covington 1")
2361 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2367 def revert_babelfont(document):
2368 " Reverts the use of \\babelfont to user preamble "
2370 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2373 i = find_token(document.header, '\\language_package', 0)
2375 document.warning("Malformed LyX document: Missing \\language_package.")
2377 if get_value(document.header, "\\language_package", 0) != "babel":
2380 # check font settings
2382 roman = sans = typew = "default"
2384 sf_scale = tt_scale = 100.0
2386 j = find_token(document.header, "\\font_roman", 0)
2388 document.warning("Malformed LyX document: Missing \\font_roman.")
2390 # We need to use this regex since split() does not handle quote protection
2391 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2392 roman = romanfont[2].strip('"')
2393 romanfont[2] = '"default"'
2394 document.header[j] = " ".join(romanfont)
2396 j = find_token(document.header, "\\font_sans", 0)
2398 document.warning("Malformed LyX document: Missing \\font_sans.")
2400 # We need to use this regex since split() does not handle quote protection
2401 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2402 sans = sansfont[2].strip('"')
2403 sansfont[2] = '"default"'
2404 document.header[j] = " ".join(sansfont)
2406 j = find_token(document.header, "\\font_typewriter", 0)
2408 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2410 # We need to use this regex since split() does not handle quote protection
2411 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2412 typew = ttfont[2].strip('"')
2413 ttfont[2] = '"default"'
2414 document.header[j] = " ".join(ttfont)
2416 i = find_token(document.header, "\\font_osf", 0)
2418 document.warning("Malformed LyX document: Missing \\font_osf.")
2420 osf = str2bool(get_value(document.header, "\\font_osf", i))
2422 j = find_token(document.header, "\\font_sf_scale", 0)
2424 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2426 sfscale = document.header[j].split()
2429 document.header[j] = " ".join(sfscale)
2432 sf_scale = float(val)
2434 document.warning("Invalid font_sf_scale value: " + val)
2436 j = find_token(document.header, "\\font_tt_scale", 0)
2438 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2440 ttscale = document.header[j].split()
2443 document.header[j] = " ".join(ttscale)
2446 tt_scale = float(val)
2448 document.warning("Invalid font_tt_scale value: " + val)
2450 # set preamble stuff
2451 pretext = ['%% This document must be processed with xelatex or lualatex!']
2452 pretext.append('\\AtBeginDocument{%')
2453 if roman != "default":
2454 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2455 if sans != "default":
2456 sf = '\\babelfont{sf}['
2457 if sf_scale != 100.0:
2458 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2459 sf += 'Mapping=tex-text]{' + sans + '}'
2461 if typew != "default":
2462 tw = '\\babelfont{tt}'
2463 if tt_scale != 100.0:
2464 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2465 tw += '{' + typew + '}'
2468 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2470 insert_to_preamble(document, pretext)
2473 def revert_minionpro(document):
2474 " Revert native MinionPro font definition (with extra options) to LaTeX "
2476 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2479 regexp = re.compile(r'(\\font_roman_opts)')
2480 x = find_re(document.header, regexp, 0)
2484 # We need to use this regex since split() does not handle quote protection
2485 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2486 opts = romanopts[1].strip('"')
2488 i = find_token(document.header, "\\font_roman", 0)
2490 document.warning("Malformed LyX document: Missing \\font_roman.")
2493 # We need to use this regex since split() does not handle quote protection
2494 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2495 roman = romanfont[1].strip('"')
2496 if roman != "minionpro":
2498 romanfont[1] = '"default"'
2499 document.header[i] = " ".join(romanfont)
2501 j = find_token(document.header, "\\font_osf true", 0)
2504 preamble = "\\usepackage["
2506 document.header[j] = "\\font_osf false"
2510 preamble += "]{MinionPro}"
2511 add_to_preamble(document, [preamble])
2512 del document.header[x]
2515 def revert_font_opts(document):
2516 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2518 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2519 Babel = (get_value(document.header, "\\language_package") == "babel")
2522 regexp = re.compile(r'(\\font_roman_opts)')
2523 i = find_re(document.header, regexp, 0)
2525 # We need to use this regex since split() does not handle quote protection
2526 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2527 opts = romanopts[1].strip('"')
2528 del document.header[i]
2530 regexp = re.compile(r'(\\font_roman)')
2531 i = find_re(document.header, regexp, 0)
2533 # We need to use this regex since split() does not handle quote protection
2534 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2535 font = romanfont[2].strip('"')
2536 romanfont[2] = '"default"'
2537 document.header[i] = " ".join(romanfont)
2538 if font != "default":
2540 preamble = "\\babelfont{rm}["
2542 preamble = "\\setmainfont["
2545 preamble += "Mapping=tex-text]{"
2548 add_to_preamble(document, [preamble])
2551 regexp = re.compile(r'(\\font_sans_opts)')
2552 i = find_re(document.header, regexp, 0)
2555 # We need to use this regex since split() does not handle quote protection
2556 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2557 opts = sfopts[1].strip('"')
2558 del document.header[i]
2560 regexp = re.compile(r'(\\font_sf_scale)')
2561 i = find_re(document.header, regexp, 0)
2563 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2564 regexp = re.compile(r'(\\font_sans)')
2565 i = find_re(document.header, regexp, 0)
2567 # We need to use this regex since split() does not handle quote protection
2568 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2569 font = sffont[2].strip('"')
2570 sffont[2] = '"default"'
2571 document.header[i] = " ".join(sffont)
2572 if font != "default":
2574 preamble = "\\babelfont{sf}["
2576 preamble = "\\setsansfont["
2580 preamble += "Scale=0."
2581 preamble += scaleval
2583 preamble += "Mapping=tex-text]{"
2586 add_to_preamble(document, [preamble])
2589 regexp = re.compile(r'(\\font_typewriter_opts)')
2590 i = find_re(document.header, regexp, 0)
2593 # We need to use this regex since split() does not handle quote protection
2594 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2595 opts = ttopts[1].strip('"')
2596 del document.header[i]
2598 regexp = re.compile(r'(\\font_tt_scale)')
2599 i = find_re(document.header, regexp, 0)
2601 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2602 regexp = re.compile(r'(\\font_typewriter)')
2603 i = find_re(document.header, regexp, 0)
2605 # We need to use this regex since split() does not handle quote protection
2606 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2607 font = ttfont[2].strip('"')
2608 ttfont[2] = '"default"'
2609 document.header[i] = " ".join(ttfont)
2610 if font != "default":
2612 preamble = "\\babelfont{tt}["
2614 preamble = "\\setmonofont["
2618 preamble += "Scale=0."
2619 preamble += scaleval
2621 preamble += "Mapping=tex-text]{"
2624 add_to_preamble(document, [preamble])
2627 def revert_plainNotoFonts_xopts(document):
2628 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2630 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2634 y = find_token(document.header, "\\font_osf true", 0)
2638 regexp = re.compile(r'(\\font_roman_opts)')
2639 x = find_re(document.header, regexp, 0)
2640 if x == -1 and not osf:
2645 # We need to use this regex since split() does not handle quote protection
2646 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2647 opts = romanopts[1].strip('"')
2653 i = find_token(document.header, "\\font_roman", 0)
2657 # We need to use this regex since split() does not handle quote protection
2658 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2659 roman = romanfont[1].strip('"')
2660 if roman != "NotoSerif-TLF":
2663 j = find_token(document.header, "\\font_sans", 0)
2667 # We need to use this regex since split() does not handle quote protection
2668 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2669 sf = sffont[1].strip('"')
2673 j = find_token(document.header, "\\font_typewriter", 0)
2677 # We need to use this regex since split() does not handle quote protection
2678 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2679 tt = ttfont[1].strip('"')
2683 # So we have noto as "complete font"
2684 romanfont[1] = '"default"'
2685 document.header[i] = " ".join(romanfont)
2687 preamble = "\\usepackage["
2689 preamble += "]{noto}"
2690 add_to_preamble(document, [preamble])
2692 document.header[y] = "\\font_osf false"
2694 del document.header[x]
2697 def revert_notoFonts_xopts(document):
2698 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2700 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2704 fm = createFontMapping(['Noto'])
2705 if revert_fonts(document, fm, fontmap, True):
2706 add_preamble_fonts(document, fontmap)
2709 def revert_IBMFonts_xopts(document):
2710 " Revert native IBM font definition (with extra options) to LaTeX "
2712 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2716 fm = createFontMapping(['IBM'])
2718 if revert_fonts(document, fm, fontmap, True):
2719 add_preamble_fonts(document, fontmap)
2722 def revert_AdobeFonts_xopts(document):
2723 " Revert native Adobe font definition (with extra options) to LaTeX "
2725 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2729 fm = createFontMapping(['Adobe'])
2731 if revert_fonts(document, fm, fontmap, True):
2732 add_preamble_fonts(document, fontmap)
2735 def convert_osf(document):
2736 " Convert \\font_osf param to new format "
2738 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2740 i = find_token(document.header, '\\font_osf', 0)
2742 document.warning("Malformed LyX document: Missing \\font_osf.")
2745 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2746 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2748 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2749 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2752 document.header.insert(i, "\\font_sans_osf false")
2753 document.header.insert(i + 1, "\\font_typewriter_osf false")
2757 x = find_token(document.header, "\\font_sans", 0)
2759 document.warning("Malformed LyX document: Missing \\font_sans.")
2761 # We need to use this regex since split() does not handle quote protection
2762 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2763 sf = sffont[1].strip('"')
2765 document.header.insert(i, "\\font_sans_osf true")
2767 document.header.insert(i, "\\font_sans_osf false")
2769 x = find_token(document.header, "\\font_typewriter", 0)
2771 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2773 # We need to use this regex since split() does not handle quote protection
2774 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2775 tt = ttfont[1].strip('"')
2777 document.header.insert(i + 1, "\\font_typewriter_osf true")
2779 document.header.insert(i + 1, "\\font_typewriter_osf false")
2782 document.header.insert(i, "\\font_sans_osf false")
2783 document.header.insert(i + 1, "\\font_typewriter_osf false")
2786 def revert_osf(document):
2787 " Revert \\font_*_osf params "
2789 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2791 i = find_token(document.header, '\\font_roman_osf', 0)
2793 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2796 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2797 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2799 i = find_token(document.header, '\\font_sans_osf', 0)
2801 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2804 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2805 del document.header[i]
2807 i = find_token(document.header, '\\font_typewriter_osf', 0)
2809 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2812 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2813 del document.header[i]
2816 i = find_token(document.header, '\\font_osf', 0)
2818 document.warning("Malformed LyX document: Missing \\font_osf.")
2820 document.header[i] = "\\font_osf true"
2823 def revert_texfontopts(document):
2824 " Revert native TeX font definitions (with extra options) to LaTeX "
2826 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2829 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2831 # First the sf (biolinum only)
2832 regexp = re.compile(r'(\\font_sans_opts)')
2833 x = find_re(document.header, regexp, 0)
2835 # We need to use this regex since split() does not handle quote protection
2836 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2837 opts = sfopts[1].strip('"')
2838 i = find_token(document.header, "\\font_sans", 0)
2840 document.warning("Malformed LyX document: Missing \\font_sans.")
2842 # We need to use this regex since split() does not handle quote protection
2843 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2844 sans = sffont[1].strip('"')
2845 if sans == "biolinum":
2847 sffont[1] = '"default"'
2848 document.header[i] = " ".join(sffont)
2850 j = find_token(document.header, "\\font_sans_osf true", 0)
2853 k = find_token(document.header, "\\font_sf_scale", 0)
2855 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2857 sfscale = document.header[k].split()
2860 document.header[k] = " ".join(sfscale)
2863 sf_scale = float(val)
2865 document.warning("Invalid font_sf_scale value: " + val)
2866 preamble = "\\usepackage["
2868 document.header[j] = "\\font_sans_osf false"
2870 if sf_scale != 100.0:
2871 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2873 preamble += "]{biolinum}"
2874 add_to_preamble(document, [preamble])
2875 del document.header[x]
2877 regexp = re.compile(r'(\\font_roman_opts)')
2878 x = find_re(document.header, regexp, 0)
2882 # We need to use this regex since split() does not handle quote protection
2883 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2884 opts = romanopts[1].strip('"')
2886 i = find_token(document.header, "\\font_roman", 0)
2888 document.warning("Malformed LyX document: Missing \\font_roman.")
2891 # We need to use this regex since split() does not handle quote protection
2892 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2893 roman = romanfont[1].strip('"')
2894 if not roman in rmfonts:
2896 romanfont[1] = '"default"'
2897 document.header[i] = " ".join(romanfont)
2899 if roman == "utopia":
2901 elif roman == "palatino":
2902 package = "mathpazo"
2903 elif roman == "times":
2904 package = "mathptmx"
2905 elif roman == "xcharter":
2906 package = "XCharter"
2908 j = find_token(document.header, "\\font_roman_osf true", 0)
2910 if roman == "cochineal":
2911 osf = "proportional,osf,"
2912 elif roman == "utopia":
2914 elif roman == "garamondx":
2916 elif roman == "libertine":
2918 elif roman == "palatino":
2920 elif roman == "xcharter":
2922 document.header[j] = "\\font_roman_osf false"
2923 k = find_token(document.header, "\\font_sc true", 0)
2925 if roman == "utopia":
2927 if roman == "palatino" and osf == "":
2929 document.header[k] = "\\font_sc false"
2930 preamble = "\\usepackage["
2933 preamble += "]{" + package + "}"
2934 add_to_preamble(document, [preamble])
2935 del document.header[x]
2938 def convert_CantarellFont(document):
2939 " Handle Cantarell font definition to LaTeX "
2941 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2942 fm = createFontMapping(['Cantarell'])
2943 convert_fonts(document, fm, "oldstyle")
2945 def revert_CantarellFont(document):
2946 " Revert native Cantarell font definition to LaTeX "
2948 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2950 fm = createFontMapping(['Cantarell'])
2951 if revert_fonts(document, fm, fontmap, False, True):
2952 add_preamble_fonts(document, fontmap)
2954 def convert_ChivoFont(document):
2955 " Handle Chivo font definition to LaTeX "
2957 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2958 fm = createFontMapping(['Chivo'])
2959 convert_fonts(document, fm, "oldstyle")
2961 def revert_ChivoFont(document):
2962 " Revert native Chivo font definition to LaTeX "
2964 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2966 fm = createFontMapping(['Chivo'])
2967 if revert_fonts(document, fm, fontmap, False, True):
2968 add_preamble_fonts(document, fontmap)
2971 def convert_FiraFont(document):
2972 " Handle Fira font definition to LaTeX "
2974 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2975 fm = createFontMapping(['Fira'])
2976 convert_fonts(document, fm, "lf")
2978 def revert_FiraFont(document):
2979 " Revert native Fira font definition to LaTeX "
2981 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2983 fm = createFontMapping(['Fira'])
2984 if revert_fonts(document, fm, fontmap, False, True):
2985 add_preamble_fonts(document, fontmap)
2988 def convert_Semibolds(document):
2989 " Move semibold options to extraopts "
2991 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2993 i = find_token(document.header, "\\font_roman", 0)
2995 document.warning("Malformed LyX document: Missing \\font_roman.")
2997 # We need to use this regex since split() does not handle quote protection
2998 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2999 roman = romanfont[1].strip('"')
3000 if roman == "IBMPlexSerifSemibold":
3001 romanfont[1] = '"IBMPlexSerif"'
3002 document.header[i] = " ".join(romanfont)
3004 if NonTeXFonts == False:
3005 regexp = re.compile(r'(\\font_roman_opts)')
3006 x = find_re(document.header, regexp, 0)
3008 # Sensible place to insert tag
3009 fo = find_token(document.header, "\\font_sf_scale")
3011 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3013 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3015 # We need to use this regex since split() does not handle quote protection
3016 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3017 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3019 i = find_token(document.header, "\\font_sans", 0)
3021 document.warning("Malformed LyX document: Missing \\font_sans.")
3023 # We need to use this regex since split() does not handle quote protection
3024 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3025 sf = sffont[1].strip('"')
3026 if sf == "IBMPlexSansSemibold":
3027 sffont[1] = '"IBMPlexSans"'
3028 document.header[i] = " ".join(sffont)
3030 if NonTeXFonts == False:
3031 regexp = re.compile(r'(\\font_sans_opts)')
3032 x = find_re(document.header, regexp, 0)
3034 # Sensible place to insert tag
3035 fo = find_token(document.header, "\\font_sf_scale")
3037 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3039 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3041 # We need to use this regex since split() does not handle quote protection
3042 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3043 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3045 i = find_token(document.header, "\\font_typewriter", 0)
3047 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3049 # We need to use this regex since split() does not handle quote protection
3050 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3051 tt = ttfont[1].strip('"')
3052 if tt == "IBMPlexMonoSemibold":
3053 ttfont[1] = '"IBMPlexMono"'
3054 document.header[i] = " ".join(ttfont)
3056 if NonTeXFonts == False:
3057 regexp = re.compile(r'(\\font_typewriter_opts)')
3058 x = find_re(document.header, regexp, 0)
3060 # Sensible place to insert tag
3061 fo = find_token(document.header, "\\font_tt_scale")
3063 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3065 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3067 # We need to use this regex since split() does not handle quote protection
3068 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3069 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3072 def convert_NotoRegulars(document):
3073 " Merge diverse noto reagular fonts "
3075 i = find_token(document.header, "\\font_roman", 0)
3077 document.warning("Malformed LyX document: Missing \\font_roman.")
3079 # We need to use this regex since split() does not handle quote protection
3080 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3081 roman = romanfont[1].strip('"')
3082 if roman == "NotoSerif-TLF":
3083 romanfont[1] = '"NotoSerifRegular"'
3084 document.header[i] = " ".join(romanfont)
3086 i = find_token(document.header, "\\font_sans", 0)
3088 document.warning("Malformed LyX document: Missing \\font_sans.")
3090 # We need to use this regex since split() does not handle quote protection
3091 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3092 sf = sffont[1].strip('"')
3093 if sf == "NotoSans-TLF":
3094 sffont[1] = '"NotoSansRegular"'
3095 document.header[i] = " ".join(sffont)
3097 i = find_token(document.header, "\\font_typewriter", 0)
3099 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3101 # We need to use this regex since split() does not handle quote protection
3102 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3103 tt = ttfont[1].strip('"')
3104 if tt == "NotoMono-TLF":
3105 ttfont[1] = '"NotoMonoRegular"'
3106 document.header[i] = " ".join(ttfont)
3109 def convert_CrimsonProFont(document):
3110 " Handle CrimsonPro font definition to LaTeX "
3112 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3113 fm = createFontMapping(['CrimsonPro'])
3114 convert_fonts(document, fm, "lf")
3116 def revert_CrimsonProFont(document):
3117 " Revert native CrimsonPro font definition to LaTeX "
3119 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3121 fm = createFontMapping(['CrimsonPro'])
3122 if revert_fonts(document, fm, fontmap, False, True):
3123 add_preamble_fonts(document, fontmap)
3126 def revert_pagesizes(document):
3127 " Revert new page sizes in memoir and KOMA to options "
3129 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3132 i = find_token(document.header, "\\use_geometry true", 0)
3136 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3138 i = find_token(document.header, "\\papersize", 0)
3140 document.warning("Malformed LyX document! Missing \\papersize header.")
3142 val = get_value(document.header, "\\papersize", i)
3147 document.header[i] = "\\papersize default"
3149 i = find_token(document.header, "\\options", 0)
3151 i = find_token(document.header, "\\textclass", 0)
3153 document.warning("Malformed LyX document! Missing \\textclass header.")
3155 document.header.insert(i, "\\options " + val)
3157 document.header[i] = document.header[i] + "," + val
3160 def convert_pagesizes(document):
3161 " Convert to new page sizes in memoir and KOMA to options "
3163 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3166 i = find_token(document.header, "\\use_geometry true", 0)
3170 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3172 i = find_token(document.header, "\\papersize", 0)
3174 document.warning("Malformed LyX document! Missing \\papersize header.")
3176 val = get_value(document.header, "\\papersize", i)
3181 i = find_token(document.header, "\\use_geometry false", 0)
3183 # Maintain use of geometry
3184 document.header[1] = "\\use_geometry true"
3186 def revert_komafontsizes(document):
3187 " Revert new font sizes in KOMA to options "
3189 if document.textclass[:3] != "scr":
3192 i = find_token(document.header, "\\paperfontsize", 0)
3194 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3197 defsizes = ["default", "10", "11", "12"]
3199 val = get_value(document.header, "\\paperfontsize", i)
3204 document.header[i] = "\\paperfontsize default"
3206 fsize = "fontsize=" + val
3208 i = find_token(document.header, "\\options", 0)
3210 i = find_token(document.header, "\\textclass", 0)
3212 document.warning("Malformed LyX document! Missing \\textclass header.")
3214 document.header.insert(i, "\\options " + fsize)
3216 document.header[i] = document.header[i] + "," + fsize
3219 def revert_dupqualicites(document):
3220 " Revert qualified citation list commands with duplicate keys to ERT "
3222 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3223 # we need to revert those with multiple uses of the same key.
3227 i = find_token(document.header, "\\cite_engine", 0)
3229 document.warning("Malformed document! Missing \\cite_engine")
3231 engine = get_value(document.header, "\\cite_engine", i)
3233 if not engine in ["biblatex", "biblatex-natbib"]:
3236 # Citation insets that support qualified lists, with their LaTeX code
3240 "citet" : "textcites",
3241 "Citet" : "Textcites",
3242 "citep" : "parencites",
3243 "Citep" : "Parencites",
3244 "Footcite" : "Smartcites",
3245 "footcite" : "smartcites",
3246 "Autocite" : "Autocites",
3247 "autocite" : "autocites",
3252 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3255 j = find_end_of_inset(document.body, i)
3257 document.warning("Can't find end of citation inset at line %d!!" %(i))
3261 k = find_token(document.body, "LatexCommand", i, j)
3263 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3267 cmd = get_value(document.body, "LatexCommand", k)
3268 if not cmd in list(ql_citations.keys()):
3272 pres = find_token(document.body, "pretextlist", i, j)
3273 posts = find_token(document.body, "posttextlist", i, j)
3274 if pres == -1 and posts == -1:
3279 key = get_quoted_value(document.body, "key", i, j)
3281 document.warning("Citation inset at line %d does not have a key!" %(i))
3285 keys = key.split(",")
3286 ukeys = list(set(keys))
3287 if len(keys) == len(ukeys):
3292 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3293 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3295 pre = get_quoted_value(document.body, "before", i, j)
3296 post = get_quoted_value(document.body, "after", i, j)
3297 prelist = pretexts.split("\t")
3300 ppp = pp.split(" ", 1)
3306 if ppp[0] in premap:
3307 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3309 premap[ppp[0]] = val
3310 postlist = posttexts.split("\t")
3314 ppp = pp.split(" ", 1)
3320 if ppp[0] in postmap:
3321 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3323 postmap[ppp[0]] = val
3324 # Replace known new commands with ERT
3325 if "(" in pre or ")" in pre:
3326 pre = "{" + pre + "}"
3327 if "(" in post or ")" in post:
3328 post = "{" + post + "}"
3329 res = "\\" + ql_citations[cmd]
3331 res += "(" + pre + ")"
3333 res += "(" + post + ")"
3337 if premap.get(kk, "") != "":
3338 akeys = premap[kk].split("\t", 1)
3341 res += "[" + akey + "]"
3343 premap[kk] = "\t".join(akeys[1:])
3346 if postmap.get(kk, "") != "":
3347 akeys = postmap[kk].split("\t", 1)
3350 res += "[" + akey + "]"
3352 postmap[kk] = "\t".join(akeys[1:])
3355 elif premap.get(kk, "") != "":
3357 res += "{" + kk + "}"
3358 document.body[i:j+1] = put_cmd_in_ert([res])
3361 def convert_pagesizenames(document):
3362 " Convert LyX page sizes names "
3364 i = find_token(document.header, "\\papersize", 0)
3366 document.warning("Malformed LyX document! Missing \\papersize header.")
3368 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3369 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3370 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3371 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3372 val = get_value(document.header, "\\papersize", i)
3374 newval = val.replace("paper", "")
3375 document.header[i] = "\\papersize " + newval
3377 def revert_pagesizenames(document):
3378 " Convert LyX page sizes names "
3380 i = find_token(document.header, "\\papersize", 0)
3382 document.warning("Malformed LyX document! Missing \\papersize header.")
3384 newnames = ["letter", "legal", "executive", \
3385 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3386 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3387 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3388 val = get_value(document.header, "\\papersize", i)
3390 newval = val + "paper"
3391 document.header[i] = "\\papersize " + newval
3394 def revert_theendnotes(document):
3395 " Reverts native support of \\theendnotes to TeX-code "
3397 if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
3402 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3405 j = find_end_of_inset(document.body, i)
3407 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3410 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
3413 def revert_enotez(document):
3414 " Reverts native support of enotez package to TeX-code "
3416 if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
3420 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
3423 revert_flex_inset(document.body, "Endnote", "\\endnote")
3427 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3430 j = find_end_of_inset(document.body, i)
3432 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3436 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
3439 add_to_preamble(document, ["\\usepackage{enotez}"])
3440 document.del_module("enotez")
3441 document.del_module("foottoenotez")
3444 def revert_memoir_endnotes(document):
3445 " Reverts native support of memoir endnotes to TeX-code "
3447 if document.textclass != "memoir":
3450 encommand = "\\pagenote"
3451 modules = document.get_module_list()
3452 if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
3453 encommand = "\\endnote"
3455 revert_flex_inset(document.body, "Endnote", encommand)
3459 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
3462 j = find_end_of_inset(document.body, i)
3464 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3467 if document.body[i] == "\\begin_inset FloatList pagenote*":
3468 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
3470 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
3471 add_to_preamble(document, ["\\makepagenote"])
3474 def revert_totalheight(document):
3475 " Reverts graphics height parameter from totalheight to height "
3477 relative_heights = {
3478 "\\textwidth" : "text%",
3479 "\\columnwidth" : "col%",
3480 "\\paperwidth" : "page%",
3481 "\\linewidth" : "line%",
3482 "\\textheight" : "theight%",
3483 "\\paperheight" : "pheight%",
3484 "\\baselineskip " : "baselineskip%"
3488 i = find_token(document.body, "\\begin_inset Graphics", i)
3491 j = find_end_of_inset(document.body, i)
3493 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3497 rx = re.compile(r'\s*special\s*(\S+)$')
3498 rxx = re.compile(r'(\d*\.*\d+)(\S+)$')
3499 k = find_re(document.body, rx, i, j)
3503 m = rx.match(document.body[k])
3505 special = m.group(1)
3506 mspecial = special.split(',')
3507 for spc in mspecial:
3508 if spc.startswith("height="):
3509 oldheight = spc.split('=')[1]
3510 ms = rxx.search(oldheight)
3512 oldunit = ms.group(2)
3513 if oldunit in list(relative_heights.keys()):
3514 oldval = str(float(ms.group(1)) * 100)
3515 oldunit = relative_heights[oldunit]
3516 oldheight = oldval + oldunit
3517 mspecial.remove(spc)
3519 if len(mspecial) > 0:
3520 special = ",".join(mspecial)
3524 rx = re.compile(r'(\s*height\s*)(\S+)$')
3525 kk = find_re(document.body, rx, i, j)
3527 m = rx.match(document.body[kk])
3533 val = val + "," + special
3534 document.body[k] = "\tspecial " + "totalheight=" + val
3536 document.body.insert(kk, "\tspecial totalheight=" + val)
3538 document.body[kk] = m.group(1) + oldheight
3540 del document.body[kk]
3541 elif oldheight != "":
3543 document.body[k] = "\tspecial " + special
3544 document.body.insert(k, "\theight " + oldheight)
3546 document.body[k] = "\theight " + oldheight
3550 def convert_totalheight(document):
3551 " Converts graphics height parameter from totalheight to height "
3553 relative_heights = {
3554 "text%" : "\\textwidth",
3555 "col%" : "\\columnwidth",
3556 "page%" : "\\paperwidth",
3557 "line%" : "\\linewidth",
3558 "theight%" : "\\textheight",
3559 "pheight%" : "\\paperheight",
3560 "baselineskip%" : "\\baselineskip"
3564 i = find_token(document.body, "\\begin_inset Graphics", i)
3567 j = find_end_of_inset(document.body, i)
3569 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3573 rx = re.compile(r'\s*special\s*(\S+)$')
3574 k = find_re(document.body, rx, i, j)
3578 m = rx.match(document.body[k])
3580 special = m.group(1)
3581 mspecial = special.split(',')
3582 for spc in mspecial:
3583 if spc[:12] == "totalheight=":
3584 newheight = spc.split('=')[1]
3585 mspecial.remove(spc)
3587 if len(mspecial) > 0:
3588 special = ",".join(mspecial)
3592 rx = re.compile(r'(\s*height\s*)(\d+)(\S+)$')
3593 kk = find_re(document.body, rx, i, j)
3595 m = rx.match(document.body[kk])
3600 if unit in list(relative_heights.keys()):
3601 val = str(float(val) / 100)
3602 unit = relative_heights[unit]
3605 val = val + unit + "," + special
3606 document.body[k] = "\tspecial " + "height=" + val
3608 document.body.insert(kk + 1, "\tspecial height=" + val + unit)
3610 document.body[kk] = m.group(1) + newheight
3612 del document.body[kk]
3613 elif newheight != "":
3614 document.body.insert(k, "\theight " + newheight)
3618 def convert_changebars(document):
3619 " Converts the changebars module to native solution "
3621 if not "changebars" in document.get_module_list():
3624 i = find_token(document.header, "\\output_changes", 0)
3626 document.warning("Malformed LyX document! Missing \\output_changes header.")
3627 document.del_module("changebars")
3630 document.header.insert(i, "\\change_bars true")
3631 document.del_module("changebars")
3634 def revert_changebars(document):
3635 " Converts native changebar param to module "
3637 i = find_token(document.header, "\\change_bars", 0)
3639 document.warning("Malformed LyX document! Missing \\change_bars header.")
3642 val = get_value(document.header, "\\change_bars", i)
3645 document.add_module("changebars")
3647 del document.header[i]
3650 def convert_postpone_fragile(document):
3651 " Adds false \\postpone_fragile_content buffer param "
3653 i = find_token(document.header, "\\output_changes", 0)
3655 document.warning("Malformed LyX document! Missing \\output_changes header.")
3657 # Set this to false for old documents (see #2154)
3658 document.header.insert(i, "\\postpone_fragile_content false")
3661 def revert_postpone_fragile(document):
3662 " Remove \\postpone_fragile_content buffer param "
3664 i = find_token(document.header, "\\postpone_fragile_content", 0)
3666 document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
3669 del document.header[i]
3672 def revert_colrow_tracking(document):
3673 " Remove change tag from tabular columns/rows "
3676 i = find_token(document.body, "\\begin_inset Tabular", i+1)
3679 j = find_end_of_inset(document.body, i+1)
3681 document.warning("Malformed LyX document: Could not find end of tabular.")
3683 for k in range(i, j):
3684 m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
3686 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3687 m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
3689 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3692 def convert_counter_maintenance(document):
3693 " Convert \\maintain_unincluded_children buffer param from boolean value tro tristate "
3695 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3697 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3700 val = get_value(document.header, "\\maintain_unincluded_children", i)
3703 document.header[i] = "\\maintain_unincluded_children strict"
3705 document.header[i] = "\\maintain_unincluded_children no"
3708 def revert_counter_maintenance(document):
3709 " Revert \\maintain_unincluded_children buffer param to previous boolean value "
3711 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3713 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3716 val = get_value(document.header, "\\maintain_unincluded_children", i)
3719 document.header[i] = "\\maintain_unincluded_children false"
3721 document.header[i] = "\\maintain_unincluded_children true"
3724 def revert_counter_inset(document):
3725 " Revert counter inset to ERT, where possible"
3727 needed_counters = {}
3729 i = find_token(document.body, "\\begin_inset CommandInset counter", i)
3732 j = find_end_of_inset(document.body, i)
3734 document.warning("Can't find end of counter inset at line %d!" % i)
3737 lyx = get_quoted_value(document.body, "lyxonly", i, j)
3739 # there is nothing we can do to affect the LyX counters
3740 document.body[i : j + 1] = []
3743 cnt = get_quoted_value(document.body, "counter", i, j)
3745 document.warning("No counter given for inset at line %d!" % i)
3749 cmd = get_quoted_value(document.body, "LatexCommand", i, j)
3750 document.warning(cmd)
3753 val = get_quoted_value(document.body, "value", i, j)
3755 document.warning("Can't convert counter inset at line %d!" % i)
3757 ert = put_cmd_in_ert("\\setcounter{%s}{%s}" % (cnt, val))
3758 elif cmd == "addto":
3759 val = get_quoted_value(document.body, "value", i, j)
3761 document.warning("Can't convert counter inset at line %d!" % i)
3763 ert = put_cmd_in_ert("\\addtocounter{%s}{%s}" % (cnt, val))
3764 elif cmd == "reset":
3765 ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
3767 needed_counters[cnt] = 1
3768 savecnt = "LyXSave" + cnt
3769 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (savecnt, cnt))
3770 elif cmd == "restore":
3771 needed_counters[cnt] = 1
3772 savecnt = "LyXSave" + cnt
3773 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (cnt, savecnt))
3775 document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
3778 document.body[i : j + 1] = ert
3783 for cnt in needed_counters:
3784 pretext.append("\\newcounter{LyXSave%s}" % (cnt))
3786 add_to_preamble(document, pretext)
3789 def revert_ams_spaces(document):
3790 "Revert InsetSpace medspace and thickspace into their TeX-code counterparts"
3792 insets = ["\\medspace{}", "\\thickspace{}"]
3793 for inset in insets:
3796 i = find_token(document.body, "\\begin_inset space " + inset, i)
3799 end = find_end_of_inset(document.body, i)
3800 subst = put_cmd_in_ert(inset)
3801 document.body[i : end + 1] = subst
3805 # load amsmath in the preamble if not already loaded
3806 i = find_token(document.header, "\\use_package amsmath 2", 0)
3808 add_to_preamble(document, ["\\@ifundefined{thickspace}{\\usepackage{amsmath}}{}"])
3812 def convert_parskip(document):
3813 " Move old parskip settings to preamble "
3815 i = find_token(document.header, "\\paragraph_separation skip", 0)
3819 j = find_token(document.header, "\\defskip", 0)
3821 document.warning("Malformed LyX document! Missing \\defskip.")
3824 val = get_value(document.header, "\\defskip", j)
3826 skipval = "\\medskipamount"
3827 if val == "smallskip" or val == "medskip" or val == "bigskip":
3828 skipval = "\\" + val + "amount"
3832 add_to_preamble(document, ["\\setlength{\\parskip}{" + skipval + "}", "\\setlength{\\parindent}{0pt}"])
3834 document.header[i] = "\\paragraph_separation indent"
3835 document.header[j] = "\\paragraph_indentation default"
3838 def revert_parskip(document):
3839 " Revert new parskip settings to preamble "
3841 i = find_token(document.header, "\\paragraph_separation skip", 0)
3845 j = find_token(document.header, "\\defskip", 0)
3847 document.warning("Malformed LyX document! Missing \\defskip.")
3850 val = get_value(document.header, "\\defskip", j)
3853 if val == "smallskip" or val == "medskip" or val == "bigskip":
3854 skipval = "[skip=\\" + val + "amount]"
3855 elif val == "fullline":
3856 skipval = "[skip=\\baselineskip]"
3857 elif val != "halfline":
3858 skipval = "[skip={" + val + "}]"
3860 add_to_preamble(document, ["\\usepackage" + skipval + "{parskip}"])
3862 document.header[i] = "\\paragraph_separation indent"
3863 document.header[j] = "\\paragraph_indentation default"
3866 def revert_line_vspaces(document):
3867 " Revert fulline and halfline vspaces to TeX "
3869 "fullline*" : "\\vspace*{\\baselineskip}",
3870 "fullline" : "\\vspace{\\baselineskip}",
3871 "halfline*" : "\\vspace*{0.5\\baselineskip}",
3872 "halfline" : "\\vspace{0.5\\baselineskip}",
3874 for inset in insets.keys():
3877 i = find_token(document.body, "\\begin_inset VSpace " + inset, i)
3880 end = find_end_of_inset(document.body, i)
3881 subst = put_cmd_in_ert(insets[inset])
3882 document.body[i : end + 1] = subst
3884 def convert_libertinus_rm_fonts(document):
3885 """Handle Libertinus serif fonts definition to LaTeX"""
3887 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3888 fm = createFontMapping(['Libertinus'])
3889 convert_fonts(document, fm)
3891 def revert_libertinus_rm_fonts(document):
3892 """Revert Libertinus serif font definition to LaTeX"""
3894 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3896 fm = createFontMapping(['libertinus'])
3897 if revert_fonts(document, fm, fontmap):
3898 add_preamble_fonts(document, fontmap)
3900 def revert_libertinus_sftt_fonts(document):
3901 " Revert Libertinus sans and tt font definitions to LaTeX "
3903 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3906 i = find_token(document.header, "\\font_sans \"LibertinusSans-LF\"", 0)
3908 j = find_token(document.header, "\\font_sans_osf true", 0)
3910 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-OsF}"])
3911 document.header[j] = "\\font_sans_osf false"
3913 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-LF}"])
3914 document.header[i] = document.header[i].replace("LibertinusSans-LF", "default")
3916 sfval = find_token(document.header, "\\font_sf_scale", 0)
3918 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
3920 sfscale = document.header[sfval].split()
3923 document.header[sfval] = " ".join(sfscale)
3926 sf_scale = float(val)
3928 document.warning("Invalid font_sf_scale value: " + val)
3929 if sf_scale != "100.0":
3930 add_to_preamble(document, ["\\renewcommand*{\\LibertinusSans@scale}{" + str(sf_scale / 100.0) + "}"])
3932 i = find_token(document.header, "\\font_typewriter \"LibertinusMono-TLF\"", 0)
3934 add_to_preamble(document, ["\\renewcommand{\\ttdefault}{LibertinusMono-TLF}"])
3935 document.header[i] = document.header[i].replace("LibertinusMono-TLF", "default")
3937 ttval = find_token(document.header, "\\font_tt_scale", 0)
3939 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
3941 ttscale = document.header[ttval].split()
3944 document.header[ttval] = " ".join(ttscale)
3947 tt_scale = float(val)
3949 document.warning("Invalid font_tt_scale value: " + val)
3950 if tt_scale != "100.0":
3951 add_to_preamble(document, ["\\renewcommand*{\\LibertinusMono@scale}{" + str(tt_scale / 100.0) + "}"])
3954 def revert_docbook_table_output(document):
3955 i = find_token(document.header, '\\docbook_table_output')
3957 del document.header[i]
3960 def revert_nopagebreak(document):
3962 i = find_token(document.body, "\\begin_inset Newpage nopagebreak")
3965 end = find_end_of_inset(document.body, i)
3967 document.warning("Malformed LyX document: Could not find end of Newpage inset.")
3969 subst = put_cmd_in_ert("\\nopagebreak{}")
3970 document.body[i : end + 1] = subst
3973 def revert_hrquotes(document):
3974 " Revert Hungarian Quotation marks "
3976 i = find_token(document.header, "\\quotes_style hungarian", 0)
3978 document.header[i] = "\\quotes_style polish"
3982 i = find_token(document.body, "\\begin_inset Quotes h")
3985 if document.body[i] == "\\begin_inset Quotes hld":
3986 document.body[i] = "\\begin_inset Quotes pld"
3987 elif document.body[i] == "\\begin_inset Quotes hrd":
3988 document.body[i] = "\\begin_inset Quotes prd"
3989 elif document.body[i] == "\\begin_inset Quotes hls":
3990 document.body[i] = "\\begin_inset Quotes ald"
3991 elif document.body[i] == "\\begin_inset Quotes hrs":
3992 document.body[i] = "\\begin_inset Quotes ard"
3995 def convert_math_refs(document):
3998 i = find_token(document.body, "\\begin_inset Formula", i)
4001 j = find_end_of_inset(document.body, i)
4003 document.warning("Can't find end of inset at line %d of body!" % i)
4007 document.body[i] = document.body[i].replace("\\prettyref", "\\formatted")
4011 def revert_math_refs(document):
4014 i = find_token(document.body, "\\begin_inset Formula", i)
4017 j = find_end_of_inset(document.body, i)
4019 document.warning("Can't find end of inset at line %d of body!" % i)
4023 document.body[i] = document.body[i].replace("\\formatted", "\\prettyref")
4024 if "\\labelonly" in document.body[i]:
4025 document.body[i] = re.sub("\\\\labelonly{([^}]+?)}", "\\1", document.body[i])
4029 def convert_branch_colors(document):
4030 " Convert branch colors to semantic values "
4034 i = find_token(document.header, "\\branch", i)
4037 j = find_token(document.header, "\\end_branch", i)
4039 document.warning("Malformed LyX document. Can't find end of branch definition!")
4041 # We only support the standard LyX background for now
4042 k = find_token(document.header, "\\color #faf0e6", i, j)
4044 document.header[k] = "\\color background"
4048 def revert_branch_colors(document):
4049 " Revert semantic branch colors "
4053 i = find_token(document.header, "\\branch", i)
4056 j = find_token(document.header, "\\end_branch", i)
4058 document.warning("Malformed LyX document. Can't find end of branch definition!")
4060 k = find_token(document.header, "\\color", i, j)
4062 bcolor = get_value(document.header, "\\color", k)
4063 if bcolor[1] != "#":
4064 # this will be read as background by LyX 2.3
4065 document.header[k] = "\\color none"
4069 def revert_darkmode_graphics(document):
4070 " Revert darkModeSensitive InsetGraphics param "
4074 i = find_token(document.body, "\\begin_inset Graphics", i)
4077 j = find_end_of_inset(document.body, i)
4079 document.warning("Can't find end of graphics inset at line %d!!" %(i))
4082 k = find_token(document.body, "\tdarkModeSensitive", i, j)
4084 del document.body[k]
4088 def revert_branch_darkcols(document):
4089 " Revert dark branch colors "
4093 i = find_token(document.header, "\\branch", i)
4096 j = find_token(document.header, "\\end_branch", i)
4098 document.warning("Malformed LyX document. Can't find end of branch definition!")
4100 k = find_token(document.header, "\\color", i, j)
4102 m = re.search('\\\\color (\S+) (\S+)', document.header[k])
4104 document.header[k] = "\\color " + m.group(1)
4108 def revert_vcolumns2(document):
4109 """Revert varwidth columns with line breaks etc."""
4111 needvarwidth = False
4113 needcellvarwidth = False
4116 i = find_token(document.body, "\\begin_inset Tabular", i+1)
4119 j = find_end_of_inset(document.body, i)
4121 document.warning("Malformed LyX document: Could not find end of tabular.")
4124 # Collect necessary column information
4126 nrows = int(document.body[i+1].split('"')[3])
4127 ncols = int(document.body[i+1].split('"')[5])
4129 for k in range(ncols):
4130 m = find_token(document.body, "<column", m)
4131 width = get_option_value(document.body[m], 'width')
4132 varwidth = get_option_value(document.body[m], 'varwidth')
4133 alignment = get_option_value(document.body[m], 'alignment')
4134 valignment = get_option_value(document.body[m], 'valignment')
4135 special = get_option_value(document.body[m], 'special')
4136 col_info.append([width, varwidth, alignment, valignment, special, m])
4142 for row in range(nrows):
4143 for col in range(ncols):
4144 m = find_token(document.body, "<cell", m)
4145 multicolumn = get_option_value(document.body[m], 'multicolumn') != ""
4146 multirow = get_option_value(document.body[m], 'multirow') != ""
4147 fixedwidth = get_option_value(document.body[m], 'width') != ""
4148 rotate = get_option_value(document.body[m], 'rotate')
4149 cellalign = get_option_value(document.body[m], 'alignment')
4150 cellvalign = get_option_value(document.body[m], 'valignment')
4151 # Check for: linebreaks, multipars, non-standard environments
4153 endcell = find_token(document.body, "</cell>", begcell)
4155 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
4156 vcand = not fixedwidth
4157 elif count_pars_in_inset(document.body, begcell + 2) > 1:
4158 vcand = not fixedwidth
4159 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
4160 vcand = not fixedwidth
4161 colalignment = col_info[col][2]
4162 colvalignment = col_info[col][3]
4164 if rotate == "" and ((colalignment == "left" and colvalignment == "top") or (multicolumn == True and cellalign == "left" and cellvalign == "top")):
4165 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][4] == "":
4167 col_line = col_info[col][5]
4169 vval = "V{\\linewidth}"
4171 document.body[m] = document.body[m][:-1] + " special=\"" + vval + "\">"
4173 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
4176 if multicolumn or multirow:
4177 if cellvalign == "middle":
4179 elif cellvalign == "bottom":
4182 if colvalignment == "middle":
4184 elif colvalignment == "bottom":
4186 flt = find_token(document.body, "\\begin_layout", begcell, endcell)
4187 elt = find_token_backwards(document.body, "\\end_layout", endcell)
4188 if flt != -1 and elt != -1:
4190 # we need to reset character layouts if necessary
4191 el = find_token(document.body, '\\emph on', flt, elt)
4193 extralines.append("\\emph default")
4194 el = find_token(document.body, '\\noun on', flt, elt)
4196 extralines.append("\\noun default")
4197 el = find_token(document.body, '\\series', flt, elt)
4199 extralines.append("\\series default")
4200 el = find_token(document.body, '\\family', flt, elt)
4202 extralines.append("\\family default")
4203 el = find_token(document.body, '\\shape', flt, elt)
4205 extralines.append("\\shape default")
4206 el = find_token(document.body, '\\color', flt, elt)
4208 extralines.append("\\color inherit")
4209 el = find_token(document.body, '\\size', flt, elt)
4211 extralines.append("\\size default")
4212 el = find_token(document.body, '\\bar under', flt, elt)
4214 extralines.append("\\bar default")
4215 el = find_token(document.body, '\\uuline on', flt, elt)
4217 extralines.append("\\uuline default")
4218 el = find_token(document.body, '\\uwave on', flt, elt)
4220 extralines.append("\\uwave default")
4221 el = find_token(document.body, '\\strikeout on', flt, elt)
4223 extralines.append("\\strikeout default")
4224 document.body[elt:elt+1] = extralines + put_cmd_in_ert("\\end{cellvarwidth}") + ["\end_layout"]
4226 for q in range(flt, elt):
4227 if document.body[q] != "" and document.body[q][0] != "\\":
4229 if document.body[q][:5] == "\\lang":
4233 document.body[parlang+1:parlang+1] = put_cmd_in_ert("\\begin{cellvarwidth}" + alarg)
4235 document.body[flt+1:flt+1] = put_cmd_in_ert("\\begin{cellvarwidth}" + alarg)
4236 needcellvarwidth = True
4238 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
4239 # with newlines, and we do not want that)
4241 endcell = find_token(document.body, "</cell>", begcell)
4243 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
4245 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
4249 nle = find_end_of_inset(document.body, nl)
4250 del(document.body[nle:nle+1])
4252 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
4254 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
4255 # Replace parbreaks in multirow with \\endgraf
4256 if multirow == True:
4257 flt = find_token(document.body, "\\begin_layout", begcell, endcell)
4260 elt = find_end_of_layout(document.body, flt)
4262 document.warning("Malformed LyX document! Missing layout end.")
4264 endcell = find_token(document.body, "</cell>", begcell)
4265 flt = find_token(document.body, "\\begin_layout", elt, endcell)
4268 document.body[elt : flt + 1] = put_cmd_in_ert("\\endgraf{}")
4274 if needarray == True:
4275 add_to_preamble(document, ["\\usepackage{array}"])
4276 if needcellvarwidth == True:
4277 add_to_preamble(document, ["%% Variable width box for table cells",
4278 "\\newenvironment{cellvarwidth}[1][t]",
4279 " {\\begin{varwidth}[#1]{\\linewidth}}",
4280 " {\\@finalstrut\\@arstrutbox\\end{varwidth}}"])
4281 if needvarwidth == True:
4282 add_to_preamble(document, ["\\usepackage{varwidth}"])
4285 def convert_vcolumns2(document):
4286 """Convert varwidth ERT to native"""
4290 i = find_token(document.body, "\\begin_inset Tabular", i+1)
4293 j = find_end_of_inset(document.body, i)
4295 document.warning("Malformed LyX document: Could not find end of tabular.")
4299 nrows = int(document.body[i+1].split('"')[3])
4300 ncols = int(document.body[i+1].split('"')[5])
4303 for row in range(nrows):
4304 for col in range(ncols):
4305 m = find_token(document.body, "<cell", m)
4306 multirow = get_option_value(document.body[m], 'multirow') != ""
4308 endcell = find_token(document.body, "</cell>", begcell)
4310 cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
4312 vcand = document.body[cvw - 1] == "\\backslash" and get_containing_inset(document.body, cvw)[0] == "ERT"
4314 # Remove ERTs with cellvarwidth env
4315 ecvw = find_token(document.body, "end{cellvarwidth}", begcell, endcell)
4317 if document.body[ecvw - 1] == "\\backslash":
4318 eertins = get_containing_inset(document.body, ecvw)
4319 if eertins and eertins[0] == "ERT":
4320 del document.body[eertins[1] : eertins[2] + 1]
4322 cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
4323 ertins = get_containing_inset(document.body, cvw)
4324 if ertins and ertins[0] == "ERT":
4325 del(document.body[ertins[1] : ertins[2] + 1])
4327 # Convert ERT newlines (as cellvarwidth detection relies on that)
4329 endcell = find_token(document.body, "</cell>", begcell)
4330 nl = find_token(document.body, "\\backslash", begcell, endcell)
4331 if nl == -1 or document.body[nl + 2] != "\\backslash":
4333 ertins = get_containing_inset(document.body, nl)
4334 if ertins and ertins[0] == "ERT":
4335 document.body[ertins[1] : ertins[2] + 1] = ["\\begin_inset Newline newline", "", "\\end_inset"]
4337 # Same for linebreaks
4339 endcell = find_token(document.body, "</cell>", begcell)
4340 nl = find_token(document.body, "linebreak", begcell, endcell)
4341 if nl == -1 or document.body[nl - 1] != "\\backslash":
4343 ertins = get_containing_inset(document.body, nl)
4344 if ertins and ertins[0] == "ERT":
4345 document.body[ertins[1] : ertins[2] + 1] = ["\\begin_inset Newline linebreak", "", "\\end_inset"]
4348 if multirow == True:
4349 endcell = find_token(document.body, "</cell>", begcell)
4350 nl = find_token(document.body, "endgraf{}", begcell, endcell)
4351 if nl == -1 or document.body[nl - 1] != "\\backslash":
4353 ertins = get_containing_inset(document.body, nl)
4354 if ertins and ertins[0] == "ERT":
4355 document.body[ertins[1] : ertins[2] + 1] = ["\\end_layout", "", "\\begin_layout Plain Layout"]
4361 del_complete_lines(document.preamble,
4362 ['% Added by lyx2lyx',
4363 '%% Variable width box for table cells',
4364 r'\newenvironment{cellvarwidth}[1][t]',
4365 r' {\begin{varwidth}[#1]{\linewidth}}',
4366 r' {\@finalstrut\@arstrutbox\end{varwidth}}'])
4367 del_complete_lines(document.preamble,
4368 ['% Added by lyx2lyx',
4369 r'\usepackage{varwidth}'])
4372 frontispiece_def = [
4373 r'### Inserted by lyx2lyx (frontispiece layout) ###',
4374 r'Style Frontispiece',
4375 r' CopyStyle Titlehead',
4376 r' LatexName frontispiece',
4381 def convert_koma_frontispiece(document):
4382 """Remove local KOMA frontispiece definition"""
4383 if document.textclass[:3] != "scr":
4386 if document.del_local_layout(frontispiece_def):
4387 document.add_module("ruby")
4390 def revert_koma_frontispiece(document):
4391 """Add local KOMA frontispiece definition"""
4392 if document.textclass[:3] != "scr":
4395 if find_token(document.body, "\\begin_layout Frontispiece", 0) != -1:
4396 document.append_local_layout(frontispiece_def)
4399 def revert_spellchecker_ignore(document):
4400 """Revert document spellchecker dictionary"""
4402 i = find_token(document.header, "\\spellchecker_ignore")
4405 del document.header[i]
4408 def revert_docbook_mathml_prefix(document):
4409 """Revert the DocBook parameter to choose the prefix for the MathML name space"""
4411 i = find_token(document.header, "\\docbook_mathml_prefix")
4414 del document.header[i]
4420 supported_versions = ["2.4.0", "2.4"]
4422 [545, [convert_lst_literalparam]],
4427 [550, [convert_fontenc]],
4434 [557, [convert_vcsinfo]],
4435 [558, [removeFrontMatterStyles]],
4438 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
4442 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
4443 [566, [convert_hebrew_parentheses]],
4449 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
4450 [573, [convert_inputencoding_namechange]],
4451 [574, [convert_ruby_module, convert_utf8_japanese]],
4452 [575, [convert_lineno, convert_aaencoding]],
4454 [577, [convert_linggloss]],
4458 [581, [convert_osf]],
4459 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
4460 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
4462 [585, [convert_pagesizes]],
4464 [587, [convert_pagesizenames]],
4466 [589, [convert_totalheight]],
4467 [590, [convert_changebars]],
4468 [591, [convert_postpone_fragile]],
4470 [593, [convert_counter_maintenance]],
4473 [596, [convert_parskip]],
4474 [597, [convert_libertinus_rm_fonts]],
4478 [601, [convert_math_refs]],
4479 [602, [convert_branch_colors]],
4482 [605, [convert_vcolumns2]],
4483 [606, [convert_koma_frontispiece]],
4488 revert = [[607, [revert_docbook_mathml_prefix]],
4489 [606, [revert_spellchecker_ignore]],
4490 [605, [revert_koma_frontispiece]],
4491 [604, [revert_vcolumns2]],
4492 [603, [revert_branch_darkcols]],
4493 [602, [revert_darkmode_graphics]],
4494 [601, [revert_branch_colors]],
4496 [599, [revert_math_refs]],
4497 [598, [revert_hrquotes]],
4498 [598, [revert_nopagebreak]],
4499 [597, [revert_docbook_table_output]],
4500 [596, [revert_libertinus_rm_fonts,revert_libertinus_sftt_fonts]],
4501 [595, [revert_parskip,revert_line_vspaces]],
4502 [594, [revert_ams_spaces]],
4503 [593, [revert_counter_inset]],
4504 [592, [revert_counter_maintenance]],
4505 [591, [revert_colrow_tracking]],
4506 [590, [revert_postpone_fragile]],
4507 [589, [revert_changebars]],
4508 [588, [revert_totalheight]],
4509 [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
4510 [586, [revert_pagesizenames]],
4511 [585, [revert_dupqualicites]],
4512 [584, [revert_pagesizes,revert_komafontsizes]],
4513 [583, [revert_vcsinfo_rev_abbrev]],
4514 [582, [revert_ChivoFont,revert_CrimsonProFont]],
4515 [581, [revert_CantarellFont,revert_FiraFont]],
4516 [580, [revert_texfontopts,revert_osf]],
4517 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
4518 [578, [revert_babelfont]],
4519 [577, [revert_drs]],
4520 [576, [revert_linggloss, revert_subexarg]],
4521 [575, [revert_new_languages]],
4522 [574, [revert_lineno, revert_aaencoding]],
4523 [573, [revert_ruby_module, revert_utf8_japanese]],
4524 [572, [revert_inputencoding_namechange]],
4525 [571, [revert_notoFonts]],
4526 [570, [revert_cmidruletrimming]],
4527 [569, [revert_bibfileencodings]],
4528 [568, [revert_tablestyle]],
4529 [567, [revert_soul]],
4530 [566, [revert_malayalam]],
4531 [565, [revert_hebrew_parentheses]],
4532 [564, [revert_AdobeFonts]],
4533 [563, [revert_lformatinfo]],
4534 [562, [revert_listpargs]],
4535 [561, [revert_l7ninfo]],
4536 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
4537 [559, [revert_timeinfo, revert_namenoextinfo]],
4538 [558, [revert_dateinfo]],
4539 [557, [addFrontMatterStyles]],
4540 [556, [revert_vcsinfo]],
4541 [555, [revert_bibencoding]],
4542 [554, [revert_vcolumns]],
4543 [553, [revert_stretchcolumn]],
4544 [552, [revert_tuftecite]],
4545 [551, [revert_floatpclass, revert_floatalignment]],
4546 [550, [revert_nospellcheck]],
4547 [549, [revert_fontenc]],
4548 [548, []], # dummy format change
4549 [547, [revert_lscape]],
4550 [546, [revert_xcharter]],
4551 [545, [revert_paratype]],
4552 [544, [revert_lst_literalparam]]
4556 if __name__ == "__main__":