1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 " Expand fontinfo mapping"
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
199 def convert_fonts(document, fm, osfoption = "osf"):
200 " Handle font definition (LaTeX preamble -> native) "
202 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
203 rscaleopt = re.compile(r'^scaled?=(.*)')
205 # Check whether we go beyond font option feature introduction
206 haveFontOpts = document.end_format > 580
210 i = find_re(document.preamble, rpkg, i+1)
213 mo = rpkg.search(document.preamble[i])
214 if mo == None or mo.group(2) == None:
217 options = mo.group(2).replace(' ', '').split(",")
222 while o < len(options):
223 if options[o] == osfoption:
227 mo = rscaleopt.search(options[o])
235 if not pkg in fm.pkginmap:
240 # Try with name-option combination first
241 # (only one default option supported currently)
243 while o < len(options):
245 fn = fm.getfontname(pkg, [opt])
252 fn = fm.getfontname(pkg, [])
254 fn = fm.getfontname(pkg, options)
257 del document.preamble[i]
258 fontinfo = fm.font2pkgmap[fn]
259 if fontinfo.scaletype == None:
262 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
263 fontinfo.scaleval = oscale
264 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
265 if fontinfo.osfopt == None:
266 options.extend(osfoption)
268 osf = find_token(document.header, "\\font_osf false")
269 osftag = "\\font_osf"
270 if osf == -1 and fontinfo.fonttype != "math":
271 # Try with newer format
272 osftag = "\\font_" + fontinfo.fonttype + "_osf"
273 osf = find_token(document.header, osftag + " false")
275 document.header[osf] = osftag + " true"
276 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
277 del document.preamble[i-1]
279 if fontscale != None:
280 j = find_token(document.header, fontscale, 0)
282 val = get_value(document.header, fontscale, j)
286 scale = "%03d" % int(float(oscale) * 100)
287 document.header[j] = fontscale + " " + scale + " " + vals[1]
288 ft = "\\font_" + fontinfo.fonttype
289 j = find_token(document.header, ft, 0)
291 val = get_value(document.header, ft, j)
292 words = val.split() # ! splits also values like '"DejaVu Sans"'
293 words[0] = '"' + fn + '"'
294 document.header[j] = ft + ' ' + ' '.join(words)
295 if haveFontOpts and fontinfo.fonttype != "math":
296 fotag = "\\font_" + fontinfo.fonttype + "_opts"
297 fo = find_token(document.header, fotag)
299 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
301 # Sensible place to insert tag
302 fo = find_token(document.header, "\\font_sf_scale")
304 document.warning("Malformed LyX document! Missing \\font_sf_scale")
306 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
310 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
311 " Revert native font definition to LaTeX "
312 # fonlist := list of fonts created from the same package
313 # Empty package means that the font-name is the same as the package-name
314 # fontmap (key = package, val += found options) will be filled
315 # and used later in add_preamble_fonts() to be added to user-preamble
317 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
318 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
320 while i < len(document.header):
321 i = find_re(document.header, rfontscale, i+1)
324 mo = rfontscale.search(document.header[i])
327 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
328 val = get_value(document.header, ft, i)
329 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
330 font = words[0].strip('"') # TeX font name has no whitespace
331 if not font in fm.font2pkgmap:
333 fontinfo = fm.font2pkgmap[font]
334 val = fontinfo.package
335 if not val in fontmap:
338 if OnlyWithXOpts or WithXOpts:
339 if ft == "\\font_math":
341 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
342 if ft == "\\font_sans":
343 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
344 elif ft == "\\font_typewriter":
345 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
346 x = find_re(document.header, regexp, 0)
347 if x == -1 and OnlyWithXOpts:
351 # We need to use this regex since split() does not handle quote protection
352 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
353 opts = xopts[1].strip('"').split(",")
354 fontmap[val].extend(opts)
355 del document.header[x]
356 words[0] = '"default"'
357 document.header[i] = ft + ' ' + ' '.join(words)
358 if fontinfo.scaleopt != None:
359 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
360 mo = rscales.search(xval)
365 # set correct scale option
366 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
367 if fontinfo.osfopt != None:
369 if fontinfo.osfdef == "true":
371 osf = find_token(document.header, "\\font_osf " + oldval)
372 if osf == -1 and ft != "\\font_math":
373 # Try with newer format
374 osftag = "\\font_roman_osf " + oldval
375 if ft == "\\font_sans":
376 osftag = "\\font_sans_osf " + oldval
377 elif ft == "\\font_typewriter":
378 osftag = "\\font_typewriter_osf " + oldval
379 osf = find_token(document.header, osftag)
381 fontmap[val].extend([fontinfo.osfopt])
382 if len(fontinfo.options) > 0:
383 fontmap[val].extend(fontinfo.options)
386 ###############################################################################
388 ### Conversion and reversion routines
390 ###############################################################################
392 def convert_inputencoding_namechange(document):
393 " Rename inputencoding settings. "
394 i = find_token(document.header, "\\inputencoding", 0)
397 s = document.header[i].replace("auto", "auto-legacy")
398 document.header[i] = s.replace("default", "auto-legacy-plain")
400 def revert_inputencoding_namechange(document):
401 " Rename inputencoding settings. "
402 i = find_token(document.header, "\\inputencoding", 0)
405 s = document.header[i].replace("auto-legacy-plain", "default")
406 document.header[i] = s.replace("auto-legacy", "auto")
408 def convert_notoFonts(document):
409 " Handle Noto fonts definition to LaTeX "
411 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
412 fm = createFontMapping(['Noto'])
413 convert_fonts(document, fm)
415 def revert_notoFonts(document):
416 " Revert native Noto font definition to LaTeX "
418 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
420 fm = createFontMapping(['Noto'])
421 if revert_fonts(document, fm, fontmap):
422 add_preamble_fonts(document, fontmap)
424 def convert_latexFonts(document):
425 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
427 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
428 fm = createFontMapping(['DejaVu', 'IBM'])
429 convert_fonts(document, fm)
431 def revert_latexFonts(document):
432 " Revert native DejaVu font definition to LaTeX "
434 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
436 fm = createFontMapping(['DejaVu', 'IBM'])
437 if revert_fonts(document, fm, fontmap):
438 add_preamble_fonts(document, fontmap)
440 def convert_AdobeFonts(document):
441 " Handle Adobe Source fonts definition to LaTeX "
443 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
444 fm = createFontMapping(['Adobe'])
445 convert_fonts(document, fm)
447 def revert_AdobeFonts(document):
448 " Revert Adobe Source font definition to LaTeX "
450 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
452 fm = createFontMapping(['Adobe'])
453 if revert_fonts(document, fm, fontmap):
454 add_preamble_fonts(document, fontmap)
456 def removeFrontMatterStyles(document):
457 " Remove styles Begin/EndFrontmatter"
459 layouts = ['BeginFrontmatter', 'EndFrontmatter']
460 tokenend = len('\\begin_layout ')
463 i = find_token_exact(document.body, '\\begin_layout ', i+1)
466 layout = document.body[i][tokenend:].strip()
467 if layout not in layouts:
469 j = find_end_of_layout(document.body, i)
471 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
473 while document.body[j+1].strip() == '':
475 document.body[i:j+1] = []
477 def addFrontMatterStyles(document):
478 " Use styles Begin/EndFrontmatter for elsarticle"
480 if document.textclass != "elsarticle":
483 def insertFrontmatter(prefix, line):
485 while above > 0 and document.body[above-1].strip() == '':
488 while document.body[below].strip() == '':
490 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
491 '\\begin_inset Note Note',
493 '\\begin_layout Plain Layout',
496 '\\end_inset', '', '',
499 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
500 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
501 tokenend = len('\\begin_layout ')
505 i = find_token_exact(document.body, '\\begin_layout ', i+1)
508 layout = document.body[i][tokenend:].strip()
509 if layout not in layouts:
511 k = find_end_of_layout(document.body, i)
513 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
520 insertFrontmatter('End', k+1)
521 insertFrontmatter('Begin', first)
524 def convert_lst_literalparam(document):
525 " Add param literal to include inset "
529 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
532 j = find_end_of_inset(document.body, i)
534 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
536 while i < j and document.body[i].strip() != '':
538 document.body.insert(i, 'literal "true"')
541 def revert_lst_literalparam(document):
542 " Remove param literal from include inset "
546 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
549 j = find_end_of_inset(document.body, i)
551 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
553 del_token(document.body, 'literal', i, j)
556 def revert_paratype(document):
557 " Revert ParaType font definitions to LaTeX "
559 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
561 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
562 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
563 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
564 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
567 sfval = find_token(document.header, "\\font_sf_scale", 0)
569 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
571 sfscale = document.header[sfval].split()
574 document.header[sfval] = " ".join(sfscale)
577 sf_scale = float(val)
579 document.warning("Invalid font_sf_scale value: " + val)
582 if sf_scale != "100.0":
583 sfoption = "scaled=" + str(sf_scale / 100.0)
584 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
585 ttval = get_value(document.header, "\\font_tt_scale", 0)
590 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
591 if i1 != -1 and i2 != -1 and i3!= -1:
592 add_to_preamble(document, ["\\usepackage{paratype}"])
595 add_to_preamble(document, ["\\usepackage{PTSerif}"])
596 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
599 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
601 add_to_preamble(document, ["\\usepackage{PTSans}"])
602 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
605 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
607 add_to_preamble(document, ["\\usepackage{PTMono}"])
608 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
611 def revert_xcharter(document):
612 " Revert XCharter font definitions to LaTeX "
614 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
618 # replace unsupported font setting
619 document.header[i] = document.header[i].replace("xcharter", "default")
620 # no need for preamble code with system fonts
621 if get_bool_value(document.header, "\\use_non_tex_fonts"):
624 # transfer old style figures setting to package options
625 j = find_token(document.header, "\\font_osf true")
628 document.header[j] = "\\font_osf false"
632 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
635 def revert_lscape(document):
636 " Reverts the landscape environment (Landscape module) to TeX-code "
638 if not "landscape" in document.get_module_list():
643 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
646 j = find_end_of_inset(document.body, i)
648 document.warning("Malformed LyX document: Can't find end of Landscape inset")
651 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
652 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
653 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
654 add_to_preamble(document, ["\\usepackage{afterpage}"])
656 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
657 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
659 add_to_preamble(document, ["\\usepackage{pdflscape}"])
660 document.del_module("landscape")
663 def convert_fontenc(document):
664 " Convert default fontenc setting "
666 i = find_token(document.header, "\\fontencoding global", 0)
670 document.header[i] = document.header[i].replace("global", "auto")
673 def revert_fontenc(document):
674 " Revert default fontenc setting "
676 i = find_token(document.header, "\\fontencoding auto", 0)
680 document.header[i] = document.header[i].replace("auto", "global")
683 def revert_nospellcheck(document):
684 " Remove nospellcheck font info param "
688 i = find_token(document.body, '\\nospellcheck', i)
694 def revert_floatpclass(document):
695 " Remove float placement params 'document' and 'class' "
697 del_token(document.header, "\\float_placement class")
701 i = find_token(document.body, '\\begin_inset Float', i+1)
704 j = find_end_of_inset(document.body, i)
705 k = find_token(document.body, 'placement class', i, i + 2)
707 k = find_token(document.body, 'placement document', i, i + 2)
714 def revert_floatalignment(document):
715 " Remove float alignment params "
717 galignment = get_value(document.header, "\\float_alignment", delete=True)
721 i = find_token(document.body, '\\begin_inset Float', i+1)
724 j = find_end_of_inset(document.body, i)
726 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
728 k = find_token(document.body, 'alignment', i, i+4)
732 alignment = get_value(document.body, "alignment", k)
733 if alignment == "document":
734 alignment = galignment
736 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
738 document.warning("Can't find float layout!")
741 if alignment == "left":
742 alcmd = put_cmd_in_ert("\\raggedright{}")
743 elif alignment == "center":
744 alcmd = put_cmd_in_ert("\\centering{}")
745 elif alignment == "right":
746 alcmd = put_cmd_in_ert("\\raggedleft{}")
748 document.body[l+1:l+1] = alcmd
751 def revert_tuftecite(document):
752 " Revert \cite commands in tufte classes "
754 tufte = ["tufte-book", "tufte-handout"]
755 if document.textclass not in tufte:
760 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
763 j = find_end_of_inset(document.body, i)
765 document.warning("Can't find end of citation inset at line %d!!" %(i))
767 k = find_token(document.body, "LatexCommand", i, j)
769 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
772 cmd = get_value(document.body, "LatexCommand", k)
776 pre = get_quoted_value(document.body, "before", i, j)
777 post = get_quoted_value(document.body, "after", i, j)
778 key = get_quoted_value(document.body, "key", i, j)
780 document.warning("Citation inset at line %d does not have a key!" %(i))
782 # Replace command with ERT
785 res += "[" + pre + "]"
787 res += "[" + post + "]"
790 res += "{" + key + "}"
791 document.body[i:j+1] = put_cmd_in_ert([res])
796 def revert_stretchcolumn(document):
797 " We remove the column varwidth flags or everything else will become a mess. "
800 i = find_token(document.body, "\\begin_inset Tabular", i+1)
803 j = find_end_of_inset(document.body, i+1)
805 document.warning("Malformed LyX document: Could not find end of tabular.")
807 for k in range(i, j):
808 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
809 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
810 document.body[k] = document.body[k].replace(' varwidth="true"', '')
813 def revert_vcolumns(document):
814 " Revert standard columns with line breaks etc. "
820 i = find_token(document.body, "\\begin_inset Tabular", i+1)
823 j = find_end_of_inset(document.body, i)
825 document.warning("Malformed LyX document: Could not find end of tabular.")
828 # Collect necessary column information
830 nrows = int(document.body[i+1].split('"')[3])
831 ncols = int(document.body[i+1].split('"')[5])
833 for k in range(ncols):
834 m = find_token(document.body, "<column", m)
835 width = get_option_value(document.body[m], 'width')
836 varwidth = get_option_value(document.body[m], 'varwidth')
837 alignment = get_option_value(document.body[m], 'alignment')
838 special = get_option_value(document.body[m], 'special')
839 col_info.append([width, varwidth, alignment, special, m])
844 for row in range(nrows):
845 for col in range(ncols):
846 m = find_token(document.body, "<cell", m)
847 multicolumn = get_option_value(document.body[m], 'multicolumn')
848 multirow = get_option_value(document.body[m], 'multirow')
849 width = get_option_value(document.body[m], 'width')
850 rotate = get_option_value(document.body[m], 'rotate')
851 # Check for: linebreaks, multipars, non-standard environments
853 endcell = find_token(document.body, "</cell>", begcell)
855 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
857 elif count_pars_in_inset(document.body, begcell + 2) > 1:
859 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
861 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
862 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
864 alignment = col_info[col][2]
865 col_line = col_info[col][4]
867 if alignment == "center":
868 vval = ">{\\centering}"
869 elif alignment == "left":
870 vval = ">{\\raggedright}"
871 elif alignment == "right":
872 vval = ">{\\raggedleft}"
875 vval += "V{\\linewidth}"
877 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
878 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
879 # with newlines, and we do not want that)
881 endcell = find_token(document.body, "</cell>", begcell)
883 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
885 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
889 nle = find_end_of_inset(document.body, nl)
890 del(document.body[nle:nle+1])
892 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
894 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
900 if needarray == True:
901 add_to_preamble(document, ["\\usepackage{array}"])
902 if needvarwidth == True:
903 add_to_preamble(document, ["\\usepackage{varwidth}"])
906 def revert_bibencoding(document):
907 " Revert bibliography encoding "
911 i = find_token(document.header, "\\cite_engine", 0)
913 document.warning("Malformed document! Missing \\cite_engine")
915 engine = get_value(document.header, "\\cite_engine", i)
919 if engine in ["biblatex", "biblatex-natbib"]:
922 # Map lyx to latex encoding names
926 "armscii8" : "armscii8",
927 "iso8859-1" : "latin1",
928 "iso8859-2" : "latin2",
929 "iso8859-3" : "latin3",
930 "iso8859-4" : "latin4",
931 "iso8859-5" : "iso88595",
932 "iso8859-6" : "8859-6",
933 "iso8859-7" : "iso-8859-7",
934 "iso8859-8" : "8859-8",
935 "iso8859-9" : "latin5",
936 "iso8859-13" : "latin7",
937 "iso8859-15" : "latin9",
938 "iso8859-16" : "latin10",
939 "applemac" : "applemac",
941 "cp437de" : "cp437de",
958 "utf8-platex" : "utf8",
965 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
968 j = find_end_of_inset(document.body, i)
970 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
972 encoding = get_quoted_value(document.body, "encoding", i, j)
975 # remove encoding line
976 k = find_token(document.body, "encoding", i, j)
979 if encoding == "default":
981 # Re-find inset end line
982 j = find_end_of_inset(document.body, i)
985 h = find_token(document.header, "\\biblio_options", 0)
987 biblio_options = get_value(document.header, "\\biblio_options", h)
988 if not "bibencoding" in biblio_options:
989 document.header[h] += ",bibencoding=%s" % encodings[encoding]
991 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
993 # this should not happen
994 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
996 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
998 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
999 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1005 def convert_vcsinfo(document):
1006 " Separate vcs Info inset from buffer Info inset. "
1009 "vcs-revision" : "revision",
1010 "vcs-tree-revision" : "tree-revision",
1011 "vcs-author" : "author",
1012 "vcs-time" : "time",
1017 i = find_token(document.body, "\\begin_inset Info", i+1)
1020 j = find_end_of_inset(document.body, i+1)
1022 document.warning("Malformed LyX document: Could not find end of Info inset.")
1024 tp = find_token(document.body, 'type', i, j)
1025 tpv = get_quoted_value(document.body, "type", tp)
1028 arg = find_token(document.body, 'arg', i, j)
1029 argv = get_quoted_value(document.body, "arg", arg)
1030 if argv not in list(types.keys()):
1032 document.body[tp] = "type \"vcs\""
1033 document.body[arg] = "arg \"" + types[argv] + "\""
1036 def revert_vcsinfo(document):
1037 " Merge vcs Info inset to buffer Info inset. "
1039 args = ["revision", "tree-revision", "author", "time", "date" ]
1042 i = find_token(document.body, "\\begin_inset Info", i+1)
1045 j = find_end_of_inset(document.body, i+1)
1047 document.warning("Malformed LyX document: Could not find end of Info inset.")
1049 tp = find_token(document.body, 'type', i, j)
1050 tpv = get_quoted_value(document.body, "type", tp)
1053 arg = find_token(document.body, 'arg', i, j)
1054 argv = get_quoted_value(document.body, "arg", arg)
1055 if argv not in args:
1056 document.warning("Malformed Info inset. Invalid vcs arg.")
1058 document.body[tp] = "type \"buffer\""
1059 document.body[arg] = "arg \"vcs-" + argv + "\""
1061 def revert_vcsinfo_rev_abbrev(document):
1062 " Convert abbreviated revisions to regular revisions. "
1066 i = find_token(document.body, "\\begin_inset Info", i+1)
1069 j = find_end_of_inset(document.body, i+1)
1071 document.warning("Malformed LyX document: Could not find end of Info inset.")
1073 tp = find_token(document.body, 'type', i, j)
1074 tpv = get_quoted_value(document.body, "type", tp)
1077 arg = find_token(document.body, 'arg', i, j)
1078 argv = get_quoted_value(document.body, "arg", arg)
1079 if( argv == "revision-abbrev" ):
1080 document.body[arg] = "arg \"revision\""
1082 def revert_dateinfo(document):
1083 " Revert date info insets to static text. "
1085 # FIXME This currently only considers the main language and uses the system locale
1086 # Ideally, it should honor context languages and switch the locale accordingly.
1088 # The date formats for each language using strftime syntax:
1089 # long, short, loclong, locmedium, locshort
1091 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1092 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1093 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1094 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1095 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1096 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1097 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1098 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1099 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1100 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1101 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1102 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1103 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1104 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1105 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1106 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1107 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1108 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1109 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1110 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1111 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1112 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1113 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1114 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1115 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1116 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1117 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1118 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1119 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1120 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1121 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1122 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1123 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1124 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1125 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1126 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1127 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1128 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1129 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1130 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1131 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1132 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1133 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1134 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1135 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1136 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1137 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1138 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1139 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1140 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1141 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1142 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1143 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1144 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1145 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1146 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1147 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1148 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1149 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1150 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1151 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1152 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1153 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1154 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1155 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1156 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1157 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1158 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1159 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1160 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1161 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1162 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1163 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1164 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1165 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1166 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1167 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1168 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1169 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1170 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1171 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1172 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1173 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1174 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1175 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1176 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1177 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1178 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1179 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1180 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1181 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1182 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1183 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1184 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1185 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1186 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1187 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1188 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1189 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1190 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1191 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1192 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1195 types = ["date", "fixdate", "moddate" ]
1196 lang = get_value(document.header, "\\language")
1198 document.warning("Malformed LyX document! No \\language header found!")
1203 i = find_token(document.body, "\\begin_inset Info", i+1)
1206 j = find_end_of_inset(document.body, i+1)
1208 document.warning("Malformed LyX document: Could not find end of Info inset.")
1210 tp = find_token(document.body, 'type', i, j)
1211 tpv = get_quoted_value(document.body, "type", tp)
1212 if tpv not in types:
1214 arg = find_token(document.body, 'arg', i, j)
1215 argv = get_quoted_value(document.body, "arg", arg)
1218 if tpv == "fixdate":
1219 datecomps = argv.split('@')
1220 if len(datecomps) > 1:
1222 isodate = datecomps[1]
1223 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1225 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1226 # FIXME if we had the path to the original document (not the one in the tmp dir),
1227 # we could use the mtime.
1228 # elif tpv == "moddate":
1229 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1232 result = dte.isodate()
1233 elif argv == "long":
1234 result = dte.strftime(dateformats[lang][0])
1235 elif argv == "short":
1236 result = dte.strftime(dateformats[lang][1])
1237 elif argv == "loclong":
1238 result = dte.strftime(dateformats[lang][2])
1239 elif argv == "locmedium":
1240 result = dte.strftime(dateformats[lang][3])
1241 elif argv == "locshort":
1242 result = dte.strftime(dateformats[lang][4])
1244 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1245 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1246 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1247 fmt = re.sub('[^\'%]d', '%d', fmt)
1248 fmt = fmt.replace("'", "")
1249 result = dte.strftime(fmt)
1250 if sys.version_info < (3,0):
1251 # In Python 2, datetime module works with binary strings,
1252 # our dateformat strings are utf8-encoded:
1253 result = result.decode('utf-8')
1254 document.body[i : j+1] = [result]
1257 def revert_timeinfo(document):
1258 " Revert time info insets to static text. "
1260 # FIXME This currently only considers the main language and uses the system locale
1261 # Ideally, it should honor context languages and switch the locale accordingly.
1262 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1265 # The time formats for each language using strftime syntax:
1268 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1269 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1270 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1271 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1272 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1273 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1274 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1275 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1276 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1277 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1278 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1279 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1280 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1281 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1282 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1283 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1284 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1285 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1286 "british" : ["%H:%M:%S %Z", "%H:%M"],
1287 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1288 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1289 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1290 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1291 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1292 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1293 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1294 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1295 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1296 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1297 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1298 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1299 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1300 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1301 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1302 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1303 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1304 "french" : ["%H:%M:%S %Z", "%H:%M"],
1305 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1306 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1307 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1308 "german" : ["%H:%M:%S %Z", "%H:%M"],
1309 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1310 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1311 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1312 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1313 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1314 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1315 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1316 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1317 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1318 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1319 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1320 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1321 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1322 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1323 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1324 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1325 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1326 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1327 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1328 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1329 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1330 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1331 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1332 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1333 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1334 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1335 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1336 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1337 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1338 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1339 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1340 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1341 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1342 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1343 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1344 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1345 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1346 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1347 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1348 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1349 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1350 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1351 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1352 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1353 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1354 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1355 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1356 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1357 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1358 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1359 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1360 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1361 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1362 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1363 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1364 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1365 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1366 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1367 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1368 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1369 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1372 types = ["time", "fixtime", "modtime" ]
1374 i = find_token(document.header, "\\language", 0)
1376 # this should not happen
1377 document.warning("Malformed LyX document! No \\language header found!")
1379 lang = get_value(document.header, "\\language", i)
1383 i = find_token(document.body, "\\begin_inset Info", i+1)
1386 j = find_end_of_inset(document.body, i+1)
1388 document.warning("Malformed LyX document: Could not find end of Info inset.")
1390 tp = find_token(document.body, 'type', i, j)
1391 tpv = get_quoted_value(document.body, "type", tp)
1392 if tpv not in types:
1394 arg = find_token(document.body, 'arg', i, j)
1395 argv = get_quoted_value(document.body, "arg", arg)
1397 dtme = datetime.now()
1399 if tpv == "fixtime":
1400 timecomps = argv.split('@')
1401 if len(timecomps) > 1:
1403 isotime = timecomps[1]
1404 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1406 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1408 m = re.search('(\d\d):(\d\d)', isotime)
1410 tme = time(int(m.group(1)), int(m.group(2)))
1411 # FIXME if we had the path to the original document (not the one in the tmp dir),
1412 # we could use the mtime.
1413 # elif tpv == "moddate":
1414 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1417 result = tme.isoformat()
1418 elif argv == "long":
1419 result = tme.strftime(timeformats[lang][0])
1420 elif argv == "short":
1421 result = tme.strftime(timeformats[lang][1])
1423 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1424 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1425 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1426 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1427 fmt = fmt.replace("'", "")
1428 result = dte.strftime(fmt)
1429 document.body[i : j+1] = result
1432 def revert_namenoextinfo(document):
1433 " Merge buffer Info inset type name-noext to name. "
1437 i = find_token(document.body, "\\begin_inset Info", i+1)
1440 j = find_end_of_inset(document.body, i+1)
1442 document.warning("Malformed LyX document: Could not find end of Info inset.")
1444 tp = find_token(document.body, 'type', i, j)
1445 tpv = get_quoted_value(document.body, "type", tp)
1448 arg = find_token(document.body, 'arg', i, j)
1449 argv = get_quoted_value(document.body, "arg", arg)
1450 if argv != "name-noext":
1452 document.body[arg] = "arg \"name\""
1455 def revert_l7ninfo(document):
1456 " Revert l7n Info inset to text. "
1460 i = find_token(document.body, "\\begin_inset Info", i+1)
1463 j = find_end_of_inset(document.body, i+1)
1465 document.warning("Malformed LyX document: Could not find end of Info inset.")
1467 tp = find_token(document.body, 'type', i, j)
1468 tpv = get_quoted_value(document.body, "type", tp)
1471 arg = find_token(document.body, 'arg', i, j)
1472 argv = get_quoted_value(document.body, "arg", arg)
1473 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1474 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1475 document.body[i : j+1] = argv
1478 def revert_listpargs(document):
1479 " Reverts listpreamble arguments to TeX-code "
1482 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1485 j = find_end_of_inset(document.body, i)
1486 # Find containing paragraph layout
1487 parent = get_containing_layout(document.body, i)
1489 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1492 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1493 endPlain = find_end_of_layout(document.body, beginPlain)
1494 content = document.body[beginPlain + 1 : endPlain]
1495 del document.body[i:j+1]
1496 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1497 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1498 document.body[parbeg : parbeg] = subst
1501 def revert_lformatinfo(document):
1502 " Revert layout format Info inset to text. "
1506 i = find_token(document.body, "\\begin_inset Info", i+1)
1509 j = find_end_of_inset(document.body, i+1)
1511 document.warning("Malformed LyX document: Could not find end of Info inset.")
1513 tp = find_token(document.body, 'type', i, j)
1514 tpv = get_quoted_value(document.body, "type", tp)
1515 if tpv != "lyxinfo":
1517 arg = find_token(document.body, 'arg', i, j)
1518 argv = get_quoted_value(document.body, "arg", arg)
1519 if argv != "layoutformat":
1522 document.body[i : j+1] = "69"
1525 def convert_hebrew_parentheses(document):
1526 """ Swap opening/closing parentheses in Hebrew text.
1528 Up to LyX 2.4, "(" was used as closing parenthesis and
1529 ")" as opening parenthesis for Hebrew in the LyX source.
1531 # print("convert hebrew parentheses")
1532 current_languages = [document.language]
1533 for i, line in enumerate(document.body):
1534 if line.startswith('\\lang '):
1535 current_languages[-1] = line.lstrip('\\lang ')
1536 elif line.startswith('\\begin_layout'):
1537 current_languages.append(current_languages[-1])
1538 # print (line, current_languages[-1])
1539 elif line.startswith('\\end_layout'):
1540 current_languages.pop()
1541 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1542 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1545 def revert_hebrew_parentheses(document):
1546 " Store parentheses in Hebrew text reversed"
1547 # This only exists to keep the convert/revert naming convention
1548 convert_hebrew_parentheses(document)
1551 def revert_malayalam(document):
1552 " Set the document language to English but assure Malayalam output "
1554 revert_language(document, "malayalam", "", "malayalam")
1557 def revert_soul(document):
1558 " Revert soul module flex insets to ERT "
1560 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1563 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1565 add_to_preamble(document, ["\\usepackage{soul}"])
1567 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1569 add_to_preamble(document, ["\\usepackage{color}"])
1571 revert_flex_inset(document.body, "Spaceletters", "\\so")
1572 revert_flex_inset(document.body, "Strikethrough", "\\st")
1573 revert_flex_inset(document.body, "Underline", "\\ul")
1574 revert_flex_inset(document.body, "Highlight", "\\hl")
1575 revert_flex_inset(document.body, "Capitalize", "\\caps")
1578 def revert_tablestyle(document):
1579 " Remove tablestyle params "
1582 i = find_token(document.header, "\\tablestyle")
1584 del document.header[i]
1587 def revert_bibfileencodings(document):
1588 " Revert individual Biblatex bibliography encodings "
1592 i = find_token(document.header, "\\cite_engine", 0)
1594 document.warning("Malformed document! Missing \\cite_engine")
1596 engine = get_value(document.header, "\\cite_engine", i)
1600 if engine in ["biblatex", "biblatex-natbib"]:
1603 # Map lyx to latex encoding names
1607 "armscii8" : "armscii8",
1608 "iso8859-1" : "latin1",
1609 "iso8859-2" : "latin2",
1610 "iso8859-3" : "latin3",
1611 "iso8859-4" : "latin4",
1612 "iso8859-5" : "iso88595",
1613 "iso8859-6" : "8859-6",
1614 "iso8859-7" : "iso-8859-7",
1615 "iso8859-8" : "8859-8",
1616 "iso8859-9" : "latin5",
1617 "iso8859-13" : "latin7",
1618 "iso8859-15" : "latin9",
1619 "iso8859-16" : "latin10",
1620 "applemac" : "applemac",
1622 "cp437de" : "cp437de",
1630 "cp1250" : "cp1250",
1631 "cp1251" : "cp1251",
1632 "cp1252" : "cp1252",
1633 "cp1255" : "cp1255",
1634 "cp1256" : "cp1256",
1635 "cp1257" : "cp1257",
1636 "koi8-r" : "koi8-r",
1637 "koi8-u" : "koi8-u",
1639 "utf8-platex" : "utf8",
1646 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1649 j = find_end_of_inset(document.body, i)
1651 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1653 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1657 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1658 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1659 if len(bibfiles) == 0:
1660 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1661 # remove encoding line
1662 k = find_token(document.body, "file_encodings", i, j)
1664 del document.body[k]
1665 # Re-find inset end line
1666 j = find_end_of_inset(document.body, i)
1668 enclist = encodings.split("\t")
1671 ppp = pp.split(" ", 1)
1672 encmap[ppp[0]] = ppp[1]
1673 for bib in bibfiles:
1674 pr = "\\addbibresource"
1675 if bib in encmap.keys():
1676 pr += "[bibencoding=" + encmap[bib] + "]"
1677 pr += "{" + bib + "}"
1678 add_to_preamble(document, [pr])
1679 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1680 pcmd = "printbibliography"
1682 pcmd += "[" + opts + "]"
1683 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1684 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1685 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1686 "status open", "", "\\begin_layout Plain Layout" ]
1687 repl += document.body[i:j+1]
1688 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1689 document.body[i:j+1] = repl
1695 def revert_cmidruletrimming(document):
1696 " Remove \\cmidrule trimming "
1698 # FIXME: Revert to TeX code?
1701 # first, let's find out if we need to do anything
1702 i = find_token(document.body, '<cell ', i+1)
1705 j = document.body[i].find('trim="')
1708 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1709 # remove trim option
1710 document.body[i] = rgx.sub('', document.body[i])
1714 r'### Inserted by lyx2lyx (ruby inset) ###',
1715 r'InsetLayout Flex:Ruby',
1716 r' LyxType charstyle',
1717 r' LatexType command',
1721 r' HTMLInnerTag rb',
1722 r' HTMLInnerAttr ""',
1724 r' LabelString "Ruby"',
1725 r' Decoration Conglomerate',
1727 r' \ifdefined\kanjiskip',
1728 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1729 r' \else \ifdefined\luatexversion',
1730 r' \usepackage{luatexja-ruby}',
1731 r' \else \ifdefined\XeTeXversion',
1732 r' \usepackage{ruby}%',
1734 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1736 r' Argument post:1',
1737 r' LabelString "ruby text"',
1738 r' MenuString "Ruby Text|R"',
1739 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1740 r' Decoration Conglomerate',
1752 def convert_ruby_module(document):
1753 " Use ruby module instead of local module definition "
1754 if document.del_local_layout(ruby_inset_def):
1755 document.add_module("ruby")
1757 def revert_ruby_module(document):
1758 " Replace ruby module with local module definition "
1759 if document.del_module("ruby"):
1760 document.append_local_layout(ruby_inset_def)
1763 def convert_utf8_japanese(document):
1764 " Use generic utf8 with Japanese documents."
1765 lang = get_value(document.header, "\\language")
1766 if not lang.startswith("japanese"):
1768 inputenc = get_value(document.header, "\\inputencoding")
1769 if ((lang == "japanese" and inputenc == "utf8-platex")
1770 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1771 document.set_parameter("inputencoding", "utf8")
1773 def revert_utf8_japanese(document):
1774 " Use Japanese utf8 variants with Japanese documents."
1775 inputenc = get_value(document.header, "\\inputencoding")
1776 if inputenc != "utf8":
1778 lang = get_value(document.header, "\\language")
1779 if lang == "japanese":
1780 document.set_parameter("inputencoding", "utf8-platex")
1781 if lang == "japanese-cjk":
1782 document.set_parameter("inputencoding", "utf8-cjk")
1785 def revert_lineno(document):
1786 " Replace lineno setting with user-preamble code."
1788 options = get_quoted_value(document.header, "\\lineno_options",
1790 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1793 options = "[" + options + "]"
1794 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1797 def convert_lineno(document):
1798 " Replace user-preamble code with native lineno support."
1801 i = find_token(document.preamble, "\\linenumbers", 1)
1803 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1806 options = usepkg.group(1).strip("[]")
1807 del(document.preamble[i-1:i+1])
1808 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1810 k = find_token(document.header, "\\index ")
1812 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1814 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1815 "\\lineno_options %s" % options]
1818 def convert_aaencoding(document):
1819 " Convert default document option due to encoding change in aa class. "
1821 if document.textclass != "aa":
1824 i = find_token(document.header, "\\use_default_options true")
1827 val = get_value(document.header, "\\inputencoding")
1829 document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
1831 if val == "auto-legacy" or val == "latin9":
1832 document.header[i] = "\\use_default_options false"
1833 k = find_token(document.header, "\\options")
1835 document.header.insert(i, "\\options latin9")
1837 document.header[k] += ",latin9"
1840 def revert_aaencoding(document):
1841 " Revert default document option due to encoding change in aa class. "
1843 if document.textclass != "aa":
1846 i = find_token(document.header, "\\use_default_options true")
1849 val = get_value(document.header, "\\inputencoding")
1851 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
1854 document.header[i] = "\\use_default_options false"
1855 k = find_token(document.header, "\\options", 0)
1857 document.header.insert(i, "\\options utf8")
1859 document.header[k] = document.header[k] + ",utf8"
1862 def revert_new_languages(document):
1863 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1864 and Russian (Petrine orthography)."""
1866 # lyxname: (babelname, polyglossianame)
1867 new_languages = {"azerbaijani": ("azerbaijani", ""),
1868 "bengali": ("", "bengali"),
1869 "churchslavonic": ("", "churchslavonic"),
1870 "oldrussian": ("", "russian"),
1871 "korean": ("", "korean"),
1873 if document.language in new_languages:
1874 used_languages = set((document.language, ))
1876 used_languages = set()
1879 i = find_token(document.body, "\\lang", i+1)
1882 val = get_value(document.body, "\\lang", i)
1883 if val in new_languages:
1884 used_languages.add(val)
1886 # Korean is already supported via CJK, so leave as-is for Babel
1887 if ("korean" in used_languages
1888 and (not get_bool_value(document.header, "\\use_non_tex_fonts")
1889 or get_value(document.header, "\\language_package") == "babel")):
1890 used_languages.discard("korean")
1892 for lang in used_languages:
1893 revert_language(document, lang, *new_languages[lang])
1897 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1898 r'InsetLayout Flex:Glosse',
1900 r' LabelString "Gloss (old version)"',
1901 r' MenuString "Gloss (old version)"',
1902 r' LatexType environment',
1903 r' LatexName linggloss',
1904 r' Decoration minimalistic',
1909 r' CustomPars false',
1910 r' ForcePlain true',
1911 r' ParbreakIsNewline true',
1912 r' FreeSpacing true',
1913 r' Requires covington',
1916 r' \@ifundefined{linggloss}{%',
1917 r' \newenvironment{linggloss}[2][]{',
1918 r' \def\glosstr{\glt #1}%',
1920 r' {\glosstr\glend}}{}',
1923 r' ResetsFont true',
1925 r' Decoration conglomerate',
1926 r' LabelString "Translation"',
1927 r' MenuString "Glosse Translation|s"',
1928 r' Tooltip "Add a translation for the glosse"',
1933 glosss_inset_def = [
1934 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1935 r'InsetLayout Flex:Tri-Glosse',
1937 r' LabelString "Tri-Gloss (old version)"',
1938 r' MenuString "Tri-Gloss (old version)"',
1939 r' LatexType environment',
1940 r' LatexName lingglosss',
1941 r' Decoration minimalistic',
1946 r' CustomPars false',
1947 r' ForcePlain true',
1948 r' ParbreakIsNewline true',
1949 r' FreeSpacing true',
1951 r' Requires covington',
1954 r' \@ifundefined{lingglosss}{%',
1955 r' \newenvironment{lingglosss}[2][]{',
1956 r' \def\glosstr{\glt #1}%',
1958 r' {\glosstr\glend}}{}',
1960 r' ResetsFont true',
1962 r' Decoration conglomerate',
1963 r' LabelString "Translation"',
1964 r' MenuString "Glosse Translation|s"',
1965 r' Tooltip "Add a translation for the glosse"',
1970 def convert_linggloss(document):
1971 " Move old ling glosses to local layout "
1972 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1973 document.append_local_layout(gloss_inset_def)
1974 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1975 document.append_local_layout(glosss_inset_def)
1977 def revert_linggloss(document):
1978 " Revert to old ling gloss definitions "
1979 if not "linguistics" in document.get_module_list():
1981 document.del_local_layout(gloss_inset_def)
1982 document.del_local_layout(glosss_inset_def)
1985 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1986 for glosse in glosses:
1989 i = find_token(document.body, glosse, i+1)
1992 j = find_end_of_inset(document.body, i)
1994 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1997 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1998 endarg = find_end_of_inset(document.body, arg)
2001 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2002 if argbeginPlain == -1:
2003 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2005 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2006 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2008 # remove Arg insets and paragraph, if it only contains this inset
2009 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2010 del document.body[arg - 1 : endarg + 4]
2012 del document.body[arg : endarg + 1]
2014 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2015 endarg = find_end_of_inset(document.body, arg)
2018 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2019 if argbeginPlain == -1:
2020 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2022 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2023 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2025 # remove Arg insets and paragraph, if it only contains this inset
2026 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2027 del document.body[arg - 1 : endarg + 4]
2029 del document.body[arg : endarg + 1]
2031 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2032 endarg = find_end_of_inset(document.body, arg)
2035 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2036 if argbeginPlain == -1:
2037 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2039 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2040 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2042 # remove Arg insets and paragraph, if it only contains this inset
2043 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2044 del document.body[arg - 1 : endarg + 4]
2046 del document.body[arg : endarg + 1]
2048 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2049 endarg = find_end_of_inset(document.body, arg)
2052 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2053 if argbeginPlain == -1:
2054 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2056 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2057 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2059 # remove Arg insets and paragraph, if it only contains this inset
2060 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2061 del document.body[arg - 1 : endarg + 4]
2063 del document.body[arg : endarg + 1]
2066 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2069 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2070 endInset = find_end_of_inset(document.body, i)
2071 endPlain = find_end_of_layout(document.body, beginPlain)
2072 precontent = put_cmd_in_ert(cmd)
2073 if len(optargcontent) > 0:
2074 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2075 precontent += put_cmd_in_ert("{")
2077 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2078 if cmd == "\\trigloss":
2079 postcontent += put_cmd_in_ert("}{") + marg3content
2080 postcontent += put_cmd_in_ert("}")
2082 document.body[endPlain:endInset + 1] = postcontent
2083 document.body[beginPlain + 1:beginPlain] = precontent
2084 del document.body[i : beginPlain + 1]
2086 document.append_local_layout("Requires covington")
2091 def revert_subexarg(document):
2092 " Revert linguistic subexamples with argument to ERT "
2094 if not "linguistics" in document.get_module_list():
2100 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2103 j = find_end_of_layout(document.body, i)
2105 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2108 # check for consecutive layouts
2109 k = find_token(document.body, "\\begin_layout", j)
2110 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2112 j = find_end_of_layout(document.body, k)
2114 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2117 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2121 endarg = find_end_of_inset(document.body, arg)
2123 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2124 if argbeginPlain == -1:
2125 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2127 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2128 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2130 # remove Arg insets and paragraph, if it only contains this inset
2131 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2132 del document.body[arg - 1 : endarg + 4]
2134 del document.body[arg : endarg + 1]
2136 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2138 # re-find end of layout
2139 j = find_end_of_layout(document.body, i)
2141 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2144 # check for consecutive layouts
2145 k = find_token(document.body, "\\begin_layout", j)
2146 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2148 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2149 j = find_end_of_layout(document.body, k)
2151 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2154 endev = put_cmd_in_ert("\\end{subexamples}")
2156 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2157 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2158 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2160 document.append_local_layout("Requires covington")
2164 def revert_drs(document):
2165 " Revert DRS insets (linguistics) to ERT "
2167 if not "linguistics" in document.get_module_list():
2171 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2172 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2173 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2174 "\\begin_inset Flex SDRS"]
2178 i = find_token(document.body, drs, i+1)
2181 j = find_end_of_inset(document.body, i)
2183 document.warning("Malformed LyX document: Can't find end of DRS inset")
2186 # Check for arguments
2187 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2188 endarg = find_end_of_inset(document.body, arg)
2191 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2192 if argbeginPlain == -1:
2193 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2195 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2196 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2198 # remove Arg insets and paragraph, if it only contains this inset
2199 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2200 del document.body[arg - 1 : endarg + 4]
2202 del document.body[arg : endarg + 1]
2205 j = find_end_of_inset(document.body, i)
2207 document.warning("Malformed LyX document: Can't find end of DRS inset")
2210 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2211 endarg = find_end_of_inset(document.body, arg)
2214 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2215 if argbeginPlain == -1:
2216 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2218 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2219 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2221 # remove Arg insets and paragraph, if it only contains this inset
2222 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2223 del document.body[arg - 1 : endarg + 4]
2225 del document.body[arg : endarg + 1]
2228 j = find_end_of_inset(document.body, i)
2230 document.warning("Malformed LyX document: Can't find end of DRS inset")
2233 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2234 endarg = find_end_of_inset(document.body, arg)
2235 postarg1content = []
2237 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2238 if argbeginPlain == -1:
2239 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2241 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2242 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2244 # remove Arg insets and paragraph, if it only contains this inset
2245 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2246 del document.body[arg - 1 : endarg + 4]
2248 del document.body[arg : endarg + 1]
2251 j = find_end_of_inset(document.body, i)
2253 document.warning("Malformed LyX document: Can't find end of DRS inset")
2256 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2257 endarg = find_end_of_inset(document.body, arg)
2258 postarg2content = []
2260 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2261 if argbeginPlain == -1:
2262 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2264 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2265 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2267 # remove Arg insets and paragraph, if it only contains this inset
2268 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2269 del document.body[arg - 1 : endarg + 4]
2271 del document.body[arg : endarg + 1]
2274 j = find_end_of_inset(document.body, i)
2276 document.warning("Malformed LyX document: Can't find end of DRS inset")
2279 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2280 endarg = find_end_of_inset(document.body, arg)
2281 postarg3content = []
2283 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2284 if argbeginPlain == -1:
2285 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2287 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2288 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2290 # remove Arg insets and paragraph, if it only contains this inset
2291 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2292 del document.body[arg - 1 : endarg + 4]
2294 del document.body[arg : endarg + 1]
2297 j = find_end_of_inset(document.body, i)
2299 document.warning("Malformed LyX document: Can't find end of DRS inset")
2302 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2303 endarg = find_end_of_inset(document.body, arg)
2304 postarg4content = []
2306 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2307 if argbeginPlain == -1:
2308 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2310 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2311 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2313 # remove Arg insets and paragraph, if it only contains this inset
2314 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2315 del document.body[arg - 1 : endarg + 4]
2317 del document.body[arg : endarg + 1]
2319 # The respective LaTeX command
2321 if drs == "\\begin_inset Flex DRS*":
2323 elif drs == "\\begin_inset Flex IfThen-DRS":
2325 elif drs == "\\begin_inset Flex Cond-DRS":
2327 elif drs == "\\begin_inset Flex QDRS":
2329 elif drs == "\\begin_inset Flex NegDRS":
2331 elif drs == "\\begin_inset Flex SDRS":
2334 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2335 endInset = find_end_of_inset(document.body, i)
2336 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2337 precontent = put_cmd_in_ert(cmd)
2338 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2339 if drs == "\\begin_inset Flex SDRS":
2340 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2341 precontent += put_cmd_in_ert("{")
2344 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2345 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2346 if cmd == "\\condrs" or cmd == "\\qdrs":
2347 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2349 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2351 postcontent = put_cmd_in_ert("}")
2353 document.body[endPlain:endInset + 1] = postcontent
2354 document.body[beginPlain + 1:beginPlain] = precontent
2355 del document.body[i : beginPlain + 1]
2357 document.append_local_layout("Provides covington 1")
2358 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2364 def revert_babelfont(document):
2365 " Reverts the use of \\babelfont to user preamble "
2367 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2370 i = find_token(document.header, '\\language_package', 0)
2372 document.warning("Malformed LyX document: Missing \\language_package.")
2374 if get_value(document.header, "\\language_package", 0) != "babel":
2377 # check font settings
2379 roman = sans = typew = "default"
2381 sf_scale = tt_scale = 100.0
2383 j = find_token(document.header, "\\font_roman", 0)
2385 document.warning("Malformed LyX document: Missing \\font_roman.")
2387 # We need to use this regex since split() does not handle quote protection
2388 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2389 roman = romanfont[2].strip('"')
2390 romanfont[2] = '"default"'
2391 document.header[j] = " ".join(romanfont)
2393 j = find_token(document.header, "\\font_sans", 0)
2395 document.warning("Malformed LyX document: Missing \\font_sans.")
2397 # We need to use this regex since split() does not handle quote protection
2398 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2399 sans = sansfont[2].strip('"')
2400 sansfont[2] = '"default"'
2401 document.header[j] = " ".join(sansfont)
2403 j = find_token(document.header, "\\font_typewriter", 0)
2405 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2407 # We need to use this regex since split() does not handle quote protection
2408 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2409 typew = ttfont[2].strip('"')
2410 ttfont[2] = '"default"'
2411 document.header[j] = " ".join(ttfont)
2413 i = find_token(document.header, "\\font_osf", 0)
2415 document.warning("Malformed LyX document: Missing \\font_osf.")
2417 osf = str2bool(get_value(document.header, "\\font_osf", i))
2419 j = find_token(document.header, "\\font_sf_scale", 0)
2421 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2423 sfscale = document.header[j].split()
2426 document.header[j] = " ".join(sfscale)
2429 sf_scale = float(val)
2431 document.warning("Invalid font_sf_scale value: " + val)
2433 j = find_token(document.header, "\\font_tt_scale", 0)
2435 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2437 ttscale = document.header[j].split()
2440 document.header[j] = " ".join(ttscale)
2443 tt_scale = float(val)
2445 document.warning("Invalid font_tt_scale value: " + val)
2447 # set preamble stuff
2448 pretext = ['%% This document must be processed with xelatex or lualatex!']
2449 pretext.append('\\AtBeginDocument{%')
2450 if roman != "default":
2451 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2452 if sans != "default":
2453 sf = '\\babelfont{sf}['
2454 if sf_scale != 100.0:
2455 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2456 sf += 'Mapping=tex-text]{' + sans + '}'
2458 if typew != "default":
2459 tw = '\\babelfont{tt}'
2460 if tt_scale != 100.0:
2461 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2462 tw += '{' + typew + '}'
2465 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2467 insert_to_preamble(document, pretext)
2470 def revert_minionpro(document):
2471 " Revert native MinionPro font definition (with extra options) to LaTeX "
2473 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2476 regexp = re.compile(r'(\\font_roman_opts)')
2477 x = find_re(document.header, regexp, 0)
2481 # We need to use this regex since split() does not handle quote protection
2482 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2483 opts = romanopts[1].strip('"')
2485 i = find_token(document.header, "\\font_roman", 0)
2487 document.warning("Malformed LyX document: Missing \\font_roman.")
2490 # We need to use this regex since split() does not handle quote protection
2491 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2492 roman = romanfont[1].strip('"')
2493 if roman != "minionpro":
2495 romanfont[1] = '"default"'
2496 document.header[i] = " ".join(romanfont)
2498 j = find_token(document.header, "\\font_osf true", 0)
2501 preamble = "\\usepackage["
2503 document.header[j] = "\\font_osf false"
2507 preamble += "]{MinionPro}"
2508 add_to_preamble(document, [preamble])
2509 del document.header[x]
2512 def revert_font_opts(document):
2513 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2515 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2516 Babel = (get_value(document.header, "\\language_package") == "babel")
2519 regexp = re.compile(r'(\\font_roman_opts)')
2520 i = find_re(document.header, regexp, 0)
2522 # We need to use this regex since split() does not handle quote protection
2523 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2524 opts = romanopts[1].strip('"')
2525 del document.header[i]
2527 regexp = re.compile(r'(\\font_roman)')
2528 i = find_re(document.header, regexp, 0)
2530 # We need to use this regex since split() does not handle quote protection
2531 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2532 font = romanfont[2].strip('"')
2533 romanfont[2] = '"default"'
2534 document.header[i] = " ".join(romanfont)
2535 if font != "default":
2537 preamble = "\\babelfont{rm}["
2539 preamble = "\\setmainfont["
2542 preamble += "Mapping=tex-text]{"
2545 add_to_preamble(document, [preamble])
2548 regexp = re.compile(r'(\\font_sans_opts)')
2549 i = find_re(document.header, regexp, 0)
2552 # We need to use this regex since split() does not handle quote protection
2553 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2554 opts = sfopts[1].strip('"')
2555 del document.header[i]
2557 regexp = re.compile(r'(\\font_sf_scale)')
2558 i = find_re(document.header, regexp, 0)
2560 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2561 regexp = re.compile(r'(\\font_sans)')
2562 i = find_re(document.header, regexp, 0)
2564 # We need to use this regex since split() does not handle quote protection
2565 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2566 font = sffont[2].strip('"')
2567 sffont[2] = '"default"'
2568 document.header[i] = " ".join(sffont)
2569 if font != "default":
2571 preamble = "\\babelfont{sf}["
2573 preamble = "\\setsansfont["
2577 preamble += "Scale=0."
2578 preamble += scaleval
2580 preamble += "Mapping=tex-text]{"
2583 add_to_preamble(document, [preamble])
2586 regexp = re.compile(r'(\\font_typewriter_opts)')
2587 i = find_re(document.header, regexp, 0)
2590 # We need to use this regex since split() does not handle quote protection
2591 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2592 opts = ttopts[1].strip('"')
2593 del document.header[i]
2595 regexp = re.compile(r'(\\font_tt_scale)')
2596 i = find_re(document.header, regexp, 0)
2598 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2599 regexp = re.compile(r'(\\font_typewriter)')
2600 i = find_re(document.header, regexp, 0)
2602 # We need to use this regex since split() does not handle quote protection
2603 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2604 font = ttfont[2].strip('"')
2605 ttfont[2] = '"default"'
2606 document.header[i] = " ".join(ttfont)
2607 if font != "default":
2609 preamble = "\\babelfont{tt}["
2611 preamble = "\\setmonofont["
2615 preamble += "Scale=0."
2616 preamble += scaleval
2618 preamble += "Mapping=tex-text]{"
2621 add_to_preamble(document, [preamble])
2624 def revert_plainNotoFonts_xopts(document):
2625 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2627 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2631 y = find_token(document.header, "\\font_osf true", 0)
2635 regexp = re.compile(r'(\\font_roman_opts)')
2636 x = find_re(document.header, regexp, 0)
2637 if x == -1 and not osf:
2642 # We need to use this regex since split() does not handle quote protection
2643 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2644 opts = romanopts[1].strip('"')
2650 i = find_token(document.header, "\\font_roman", 0)
2654 # We need to use this regex since split() does not handle quote protection
2655 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2656 roman = romanfont[1].strip('"')
2657 if roman != "NotoSerif-TLF":
2660 j = find_token(document.header, "\\font_sans", 0)
2664 # We need to use this regex since split() does not handle quote protection
2665 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2666 sf = sffont[1].strip('"')
2670 j = find_token(document.header, "\\font_typewriter", 0)
2674 # We need to use this regex since split() does not handle quote protection
2675 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2676 tt = ttfont[1].strip('"')
2680 # So we have noto as "complete font"
2681 romanfont[1] = '"default"'
2682 document.header[i] = " ".join(romanfont)
2684 preamble = "\\usepackage["
2686 preamble += "]{noto}"
2687 add_to_preamble(document, [preamble])
2689 document.header[y] = "\\font_osf false"
2691 del document.header[x]
2694 def revert_notoFonts_xopts(document):
2695 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2697 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2701 fm = createFontMapping(['Noto'])
2702 if revert_fonts(document, fm, fontmap, True):
2703 add_preamble_fonts(document, fontmap)
2706 def revert_IBMFonts_xopts(document):
2707 " Revert native IBM font definition (with extra options) to LaTeX "
2709 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2713 fm = createFontMapping(['IBM'])
2715 if revert_fonts(document, fm, fontmap, True):
2716 add_preamble_fonts(document, fontmap)
2719 def revert_AdobeFonts_xopts(document):
2720 " Revert native Adobe font definition (with extra options) to LaTeX "
2722 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2726 fm = createFontMapping(['Adobe'])
2728 if revert_fonts(document, fm, fontmap, True):
2729 add_preamble_fonts(document, fontmap)
2732 def convert_osf(document):
2733 " Convert \\font_osf param to new format "
2735 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2737 i = find_token(document.header, '\\font_osf', 0)
2739 document.warning("Malformed LyX document: Missing \\font_osf.")
2742 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2743 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2745 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2746 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2749 document.header.insert(i, "\\font_sans_osf false")
2750 document.header.insert(i + 1, "\\font_typewriter_osf false")
2754 x = find_token(document.header, "\\font_sans", 0)
2756 document.warning("Malformed LyX document: Missing \\font_sans.")
2758 # We need to use this regex since split() does not handle quote protection
2759 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2760 sf = sffont[1].strip('"')
2762 document.header.insert(i, "\\font_sans_osf true")
2764 document.header.insert(i, "\\font_sans_osf false")
2766 x = find_token(document.header, "\\font_typewriter", 0)
2768 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2770 # We need to use this regex since split() does not handle quote protection
2771 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2772 tt = ttfont[1].strip('"')
2774 document.header.insert(i + 1, "\\font_typewriter_osf true")
2776 document.header.insert(i + 1, "\\font_typewriter_osf false")
2779 document.header.insert(i, "\\font_sans_osf false")
2780 document.header.insert(i + 1, "\\font_typewriter_osf false")
2783 def revert_osf(document):
2784 " Revert \\font_*_osf params "
2786 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2788 i = find_token(document.header, '\\font_roman_osf', 0)
2790 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2793 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2794 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2796 i = find_token(document.header, '\\font_sans_osf', 0)
2798 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2801 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2802 del document.header[i]
2804 i = find_token(document.header, '\\font_typewriter_osf', 0)
2806 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2809 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2810 del document.header[i]
2813 i = find_token(document.header, '\\font_osf', 0)
2815 document.warning("Malformed LyX document: Missing \\font_osf.")
2817 document.header[i] = "\\font_osf true"
2820 def revert_texfontopts(document):
2821 " Revert native TeX font definitions (with extra options) to LaTeX "
2823 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2826 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2828 # First the sf (biolinum only)
2829 regexp = re.compile(r'(\\font_sans_opts)')
2830 x = find_re(document.header, regexp, 0)
2832 # We need to use this regex since split() does not handle quote protection
2833 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2834 opts = sfopts[1].strip('"')
2835 i = find_token(document.header, "\\font_sans", 0)
2837 document.warning("Malformed LyX document: Missing \\font_sans.")
2839 # We need to use this regex since split() does not handle quote protection
2840 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2841 sans = sffont[1].strip('"')
2842 if sans == "biolinum":
2844 sffont[1] = '"default"'
2845 document.header[i] = " ".join(sffont)
2847 j = find_token(document.header, "\\font_sans_osf true", 0)
2850 k = find_token(document.header, "\\font_sf_scale", 0)
2852 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2854 sfscale = document.header[k].split()
2857 document.header[k] = " ".join(sfscale)
2860 sf_scale = float(val)
2862 document.warning("Invalid font_sf_scale value: " + val)
2863 preamble = "\\usepackage["
2865 document.header[j] = "\\font_sans_osf false"
2867 if sf_scale != 100.0:
2868 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2870 preamble += "]{biolinum}"
2871 add_to_preamble(document, [preamble])
2872 del document.header[x]
2874 regexp = re.compile(r'(\\font_roman_opts)')
2875 x = find_re(document.header, regexp, 0)
2879 # We need to use this regex since split() does not handle quote protection
2880 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2881 opts = romanopts[1].strip('"')
2883 i = find_token(document.header, "\\font_roman", 0)
2885 document.warning("Malformed LyX document: Missing \\font_roman.")
2888 # We need to use this regex since split() does not handle quote protection
2889 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2890 roman = romanfont[1].strip('"')
2891 if not roman in rmfonts:
2893 romanfont[1] = '"default"'
2894 document.header[i] = " ".join(romanfont)
2896 if roman == "utopia":
2898 elif roman == "palatino":
2899 package = "mathpazo"
2900 elif roman == "times":
2901 package = "mathptmx"
2902 elif roman == "xcharter":
2903 package = "XCharter"
2905 j = find_token(document.header, "\\font_roman_osf true", 0)
2907 if roman == "cochineal":
2908 osf = "proportional,osf,"
2909 elif roman == "utopia":
2911 elif roman == "garamondx":
2913 elif roman == "libertine":
2915 elif roman == "palatino":
2917 elif roman == "xcharter":
2919 document.header[j] = "\\font_roman_osf false"
2920 k = find_token(document.header, "\\font_sc true", 0)
2922 if roman == "utopia":
2924 if roman == "palatino" and osf == "":
2926 document.header[k] = "\\font_sc false"
2927 preamble = "\\usepackage["
2930 preamble += "]{" + package + "}"
2931 add_to_preamble(document, [preamble])
2932 del document.header[x]
2935 def convert_CantarellFont(document):
2936 " Handle Cantarell font definition to LaTeX "
2938 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2939 fm = createFontMapping(['Cantarell'])
2940 convert_fonts(document, fm, "oldstyle")
2942 def revert_CantarellFont(document):
2943 " Revert native Cantarell font definition to LaTeX "
2945 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2947 fm = createFontMapping(['Cantarell'])
2948 if revert_fonts(document, fm, fontmap, False, True):
2949 add_preamble_fonts(document, fontmap)
2951 def convert_ChivoFont(document):
2952 " Handle Chivo font definition to LaTeX "
2954 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2955 fm = createFontMapping(['Chivo'])
2956 convert_fonts(document, fm, "oldstyle")
2958 def revert_ChivoFont(document):
2959 " Revert native Chivo font definition to LaTeX "
2961 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2963 fm = createFontMapping(['Chivo'])
2964 if revert_fonts(document, fm, fontmap, False, True):
2965 add_preamble_fonts(document, fontmap)
2968 def convert_FiraFont(document):
2969 " Handle Fira font definition to LaTeX "
2971 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2972 fm = createFontMapping(['Fira'])
2973 convert_fonts(document, fm, "lf")
2975 def revert_FiraFont(document):
2976 " Revert native Fira font definition to LaTeX "
2978 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2980 fm = createFontMapping(['Fira'])
2981 if revert_fonts(document, fm, fontmap, False, True):
2982 add_preamble_fonts(document, fontmap)
2985 def convert_Semibolds(document):
2986 " Move semibold options to extraopts "
2988 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2990 i = find_token(document.header, "\\font_roman", 0)
2992 document.warning("Malformed LyX document: Missing \\font_roman.")
2994 # We need to use this regex since split() does not handle quote protection
2995 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2996 roman = romanfont[1].strip('"')
2997 if roman == "IBMPlexSerifSemibold":
2998 romanfont[1] = '"IBMPlexSerif"'
2999 document.header[i] = " ".join(romanfont)
3001 if NonTeXFonts == False:
3002 regexp = re.compile(r'(\\font_roman_opts)')
3003 x = find_re(document.header, regexp, 0)
3005 # Sensible place to insert tag
3006 fo = find_token(document.header, "\\font_sf_scale")
3008 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3010 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3012 # We need to use this regex since split() does not handle quote protection
3013 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3014 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3016 i = find_token(document.header, "\\font_sans", 0)
3018 document.warning("Malformed LyX document: Missing \\font_sans.")
3020 # We need to use this regex since split() does not handle quote protection
3021 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3022 sf = sffont[1].strip('"')
3023 if sf == "IBMPlexSansSemibold":
3024 sffont[1] = '"IBMPlexSans"'
3025 document.header[i] = " ".join(sffont)
3027 if NonTeXFonts == False:
3028 regexp = re.compile(r'(\\font_sans_opts)')
3029 x = find_re(document.header, regexp, 0)
3031 # Sensible place to insert tag
3032 fo = find_token(document.header, "\\font_sf_scale")
3034 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3036 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3038 # We need to use this regex since split() does not handle quote protection
3039 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3040 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3042 i = find_token(document.header, "\\font_typewriter", 0)
3044 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3046 # We need to use this regex since split() does not handle quote protection
3047 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3048 tt = ttfont[1].strip('"')
3049 if tt == "IBMPlexMonoSemibold":
3050 ttfont[1] = '"IBMPlexMono"'
3051 document.header[i] = " ".join(ttfont)
3053 if NonTeXFonts == False:
3054 regexp = re.compile(r'(\\font_typewriter_opts)')
3055 x = find_re(document.header, regexp, 0)
3057 # Sensible place to insert tag
3058 fo = find_token(document.header, "\\font_tt_scale")
3060 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3062 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3064 # We need to use this regex since split() does not handle quote protection
3065 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3066 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3069 def convert_NotoRegulars(document):
3070 " Merge diverse noto reagular fonts "
3072 i = find_token(document.header, "\\font_roman", 0)
3074 document.warning("Malformed LyX document: Missing \\font_roman.")
3076 # We need to use this regex since split() does not handle quote protection
3077 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3078 roman = romanfont[1].strip('"')
3079 if roman == "NotoSerif-TLF":
3080 romanfont[1] = '"NotoSerifRegular"'
3081 document.header[i] = " ".join(romanfont)
3083 i = find_token(document.header, "\\font_sans", 0)
3085 document.warning("Malformed LyX document: Missing \\font_sans.")
3087 # We need to use this regex since split() does not handle quote protection
3088 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3089 sf = sffont[1].strip('"')
3090 if sf == "NotoSans-TLF":
3091 sffont[1] = '"NotoSansRegular"'
3092 document.header[i] = " ".join(sffont)
3094 i = find_token(document.header, "\\font_typewriter", 0)
3096 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3098 # We need to use this regex since split() does not handle quote protection
3099 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3100 tt = ttfont[1].strip('"')
3101 if tt == "NotoMono-TLF":
3102 ttfont[1] = '"NotoMonoRegular"'
3103 document.header[i] = " ".join(ttfont)
3106 def convert_CrimsonProFont(document):
3107 " Handle CrimsonPro font definition to LaTeX "
3109 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3110 fm = createFontMapping(['CrimsonPro'])
3111 convert_fonts(document, fm, "lf")
3113 def revert_CrimsonProFont(document):
3114 " Revert native CrimsonPro font definition to LaTeX "
3116 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3118 fm = createFontMapping(['CrimsonPro'])
3119 if revert_fonts(document, fm, fontmap, False, True):
3120 add_preamble_fonts(document, fontmap)
3123 def revert_pagesizes(document):
3124 " Revert new page sizes in memoir and KOMA to options "
3126 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3129 i = find_token(document.header, "\\use_geometry true", 0)
3133 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3135 i = find_token(document.header, "\\papersize", 0)
3137 document.warning("Malformed LyX document! Missing \\papersize header.")
3139 val = get_value(document.header, "\\papersize", i)
3144 document.header[i] = "\\papersize default"
3146 i = find_token(document.header, "\\options", 0)
3148 i = find_token(document.header, "\\textclass", 0)
3150 document.warning("Malformed LyX document! Missing \\textclass header.")
3152 document.header.insert(i, "\\options " + val)
3154 document.header[i] = document.header[i] + "," + val
3157 def convert_pagesizes(document):
3158 " Convert to new page sizes in memoir and KOMA to options "
3160 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3163 i = find_token(document.header, "\\use_geometry true", 0)
3167 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3169 i = find_token(document.header, "\\papersize", 0)
3171 document.warning("Malformed LyX document! Missing \\papersize header.")
3173 val = get_value(document.header, "\\papersize", i)
3178 i = find_token(document.header, "\\use_geometry false", 0)
3180 # Maintain use of geometry
3181 document.header[1] = "\\use_geometry true"
3183 def revert_komafontsizes(document):
3184 " Revert new font sizes in KOMA to options "
3186 if document.textclass[:3] != "scr":
3189 i = find_token(document.header, "\\paperfontsize", 0)
3191 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3194 defsizes = ["default", "10", "11", "12"]
3196 val = get_value(document.header, "\\paperfontsize", i)
3201 document.header[i] = "\\paperfontsize default"
3203 fsize = "fontsize=" + val
3205 i = find_token(document.header, "\\options", 0)
3207 i = find_token(document.header, "\\textclass", 0)
3209 document.warning("Malformed LyX document! Missing \\textclass header.")
3211 document.header.insert(i, "\\options " + fsize)
3213 document.header[i] = document.header[i] + "," + fsize
3216 def revert_dupqualicites(document):
3217 " Revert qualified citation list commands with duplicate keys to ERT "
3219 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3220 # we need to revert those with multiple uses of the same key.
3224 i = find_token(document.header, "\\cite_engine", 0)
3226 document.warning("Malformed document! Missing \\cite_engine")
3228 engine = get_value(document.header, "\\cite_engine", i)
3230 if not engine in ["biblatex", "biblatex-natbib"]:
3233 # Citation insets that support qualified lists, with their LaTeX code
3237 "citet" : "textcites",
3238 "Citet" : "Textcites",
3239 "citep" : "parencites",
3240 "Citep" : "Parencites",
3241 "Footcite" : "Smartcites",
3242 "footcite" : "smartcites",
3243 "Autocite" : "Autocites",
3244 "autocite" : "autocites",
3249 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3252 j = find_end_of_inset(document.body, i)
3254 document.warning("Can't find end of citation inset at line %d!!" %(i))
3258 k = find_token(document.body, "LatexCommand", i, j)
3260 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3264 cmd = get_value(document.body, "LatexCommand", k)
3265 if not cmd in list(ql_citations.keys()):
3269 pres = find_token(document.body, "pretextlist", i, j)
3270 posts = find_token(document.body, "posttextlist", i, j)
3271 if pres == -1 and posts == -1:
3276 key = get_quoted_value(document.body, "key", i, j)
3278 document.warning("Citation inset at line %d does not have a key!" %(i))
3282 keys = key.split(",")
3283 ukeys = list(set(keys))
3284 if len(keys) == len(ukeys):
3289 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3290 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3292 pre = get_quoted_value(document.body, "before", i, j)
3293 post = get_quoted_value(document.body, "after", i, j)
3294 prelist = pretexts.split("\t")
3297 ppp = pp.split(" ", 1)
3303 if ppp[0] in premap:
3304 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3306 premap[ppp[0]] = val
3307 postlist = posttexts.split("\t")
3311 ppp = pp.split(" ", 1)
3317 if ppp[0] in postmap:
3318 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3320 postmap[ppp[0]] = val
3321 # Replace known new commands with ERT
3322 if "(" in pre or ")" in pre:
3323 pre = "{" + pre + "}"
3324 if "(" in post or ")" in post:
3325 post = "{" + post + "}"
3326 res = "\\" + ql_citations[cmd]
3328 res += "(" + pre + ")"
3330 res += "(" + post + ")"
3334 if premap.get(kk, "") != "":
3335 akeys = premap[kk].split("\t", 1)
3338 res += "[" + akey + "]"
3340 premap[kk] = "\t".join(akeys[1:])
3343 if postmap.get(kk, "") != "":
3344 akeys = postmap[kk].split("\t", 1)
3347 res += "[" + akey + "]"
3349 postmap[kk] = "\t".join(akeys[1:])
3352 elif premap.get(kk, "") != "":
3354 res += "{" + kk + "}"
3355 document.body[i:j+1] = put_cmd_in_ert([res])
3358 def convert_pagesizenames(document):
3359 " Convert LyX page sizes names "
3361 i = find_token(document.header, "\\papersize", 0)
3363 document.warning("Malformed LyX document! Missing \\papersize header.")
3365 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3366 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3367 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3368 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3369 val = get_value(document.header, "\\papersize", i)
3371 newval = val.replace("paper", "")
3372 document.header[i] = "\\papersize " + newval
3374 def revert_pagesizenames(document):
3375 " Convert LyX page sizes names "
3377 i = find_token(document.header, "\\papersize", 0)
3379 document.warning("Malformed LyX document! Missing \\papersize header.")
3381 newnames = ["letter", "legal", "executive", \
3382 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3383 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3384 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3385 val = get_value(document.header, "\\papersize", i)
3387 newval = val + "paper"
3388 document.header[i] = "\\papersize " + newval
3391 def revert_theendnotes(document):
3392 " Reverts native support of \\theendnotes to TeX-code "
3394 if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
3399 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3402 j = find_end_of_inset(document.body, i)
3404 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3407 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
3410 def revert_enotez(document):
3411 " Reverts native support of enotez package to TeX-code "
3413 if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
3417 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
3420 revert_flex_inset(document.body, "Endnote", "\\endnote")
3424 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3427 j = find_end_of_inset(document.body, i)
3429 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3433 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
3436 add_to_preamble(document, ["\\usepackage{enotez}"])
3437 document.del_module("enotez")
3438 document.del_module("foottoenotez")
3441 def revert_memoir_endnotes(document):
3442 " Reverts native support of memoir endnotes to TeX-code "
3444 if document.textclass != "memoir":
3447 encommand = "\\pagenote"
3448 modules = document.get_module_list()
3449 if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
3450 encommand = "\\endnote"
3452 revert_flex_inset(document.body, "Endnote", encommand)
3456 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
3459 j = find_end_of_inset(document.body, i)
3461 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3464 if document.body[i] == "\\begin_inset FloatList pagenote*":
3465 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
3467 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
3468 add_to_preamble(document, ["\\makepagenote"])
3471 def revert_totalheight(document):
3472 " Reverts graphics height parameter from totalheight to height "
3476 i = find_token(document.body, "\\begin_inset Graphics", i)
3479 j = find_end_of_inset(document.body, i)
3481 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3485 rx = re.compile(r'\s*special\s*(\S+)$')
3486 k = find_re(document.body, rx, i, j)
3490 m = rx.match(document.body[k])
3492 special = m.group(1)
3493 mspecial = special.split(',')
3494 for spc in mspecial:
3495 if spc[:7] == "height=":
3496 oldheight = spc.split('=')[1]
3497 mspecial.remove(spc)
3499 if len(mspecial) > 0:
3500 special = ",".join(mspecial)
3504 rx = re.compile(r'(\s*height\s*)(\S+)$')
3505 kk = find_re(document.body, rx, i, j)
3507 m = rx.match(document.body[kk])
3513 val = val + "," + special
3514 document.body[k] = "\tspecial " + "totalheight=" + val
3516 document.body.insert(kk, "\tspecial totalheight=" + val)
3518 document.body[kk] = m.group(1) + oldheight
3520 del document.body[kk]
3521 elif oldheight != "":
3523 document.body[k] = "\tspecial " + special
3524 document.body.insert(k, "\theight " + oldheight)
3526 document.body[k] = "\theight " + oldheight
3530 def convert_totalheight(document):
3531 " Converts graphics height parameter from totalheight to height "
3535 i = find_token(document.body, "\\begin_inset Graphics", i)
3538 j = find_end_of_inset(document.body, i)
3540 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3544 rx = re.compile(r'\s*special\s*(\S+)$')
3545 k = find_re(document.body, rx, i, j)
3549 m = rx.match(document.body[k])
3551 special = m.group(1)
3552 mspecial = special.split(',')
3553 for spc in mspecial:
3554 if spc[:12] == "totalheight=":
3555 newheight = spc.split('=')[1]
3556 mspecial.remove(spc)
3558 if len(mspecial) > 0:
3559 special = ",".join(mspecial)
3563 rx = re.compile(r'(\s*height\s*)(\S+)$')
3564 kk = find_re(document.body, rx, i, j)
3566 m = rx.match(document.body[kk])
3572 val = val + "," + special
3573 document.body[k] = "\tspecial " + "height=" + val
3575 document.body.insert(kk + 1, "\tspecial height=" + val)
3577 document.body[kk] = m.group(1) + newheight
3579 del document.body[kk]
3580 elif newheight != "":
3581 document.body.insert(k, "\theight " + newheight)
3585 def convert_changebars(document):
3586 " Converts the changebars module to native solution "
3588 if not "changebars" in document.get_module_list():
3591 i = find_token(document.header, "\\output_changes", 0)
3593 document.warning("Malformed LyX document! Missing \\output_changes header.")
3594 document.del_module("changebars")
3597 document.header.insert(i, "\\change_bars true")
3598 document.del_module("changebars")
3601 def revert_changebars(document):
3602 " Converts native changebar param to module "
3604 i = find_token(document.header, "\\change_bars", 0)
3606 document.warning("Malformed LyX document! Missing \\change_bars header.")
3609 val = get_value(document.header, "\\change_bars", i)
3612 document.add_module("changebars")
3614 del document.header[i]
3617 def convert_postpone_fragile(document):
3618 " Adds false \\postpone_fragile_content buffer param "
3620 i = find_token(document.header, "\\output_changes", 0)
3622 document.warning("Malformed LyX document! Missing \\output_changes header.")
3624 # Set this to false for old documents (see #2154)
3625 document.header.insert(i, "\\postpone_fragile_content false")
3628 def revert_postpone_fragile(document):
3629 " Remove \\postpone_fragile_content buffer param "
3631 i = find_token(document.header, "\\postpone_fragile_content", 0)
3633 document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
3636 del document.header[i]
3639 def revert_colrow_tracking(document):
3640 " Remove change tag from tabular columns/rows "
3643 i = find_token(document.body, "\\begin_inset Tabular", i+1)
3646 j = find_end_of_inset(document.body, i+1)
3648 document.warning("Malformed LyX document: Could not find end of tabular.")
3650 for k in range(i, j):
3651 m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
3653 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3654 m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
3656 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3659 def convert_counter_maintenance(document):
3660 " Convert \\maintain_unincluded_children buffer param from boolean value tro tristate "
3662 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3664 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3667 val = get_value(document.header, "\\maintain_unincluded_children", i)
3670 document.header[i] = "\\maintain_unincluded_children strict"
3672 document.header[i] = "\\maintain_unincluded_children no"
3675 def revert_counter_maintenance(document):
3676 " Revert \\maintain_unincluded_children buffer param to previous boolean value "
3678 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3680 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3683 val = get_value(document.header, "\\maintain_unincluded_children", i)
3686 document.header[i] = "\\maintain_unincluded_children false"
3688 document.header[i] = "\\maintain_unincluded_children true"
3691 def revert_counter_inset(document):
3692 " Revert counter inset to ERT, where possible"
3694 needed_counters = {}
3696 i = find_token(document.body, "\\begin_inset CommandInset counter", i)
3699 j = find_end_of_inset(document.body, i)
3701 document.warning("Can't find end of counter inset at line %d!" % i)
3704 lyx = get_quoted_value(document.body, "lyxonly", i, j)
3706 # there is nothing we can do to affect the LyX counters
3707 document.body[i : j + 1] = []
3710 cnt = get_quoted_value(document.body, "counter", i, j)
3712 document.warning("No counter given for inset at line %d!" % i)
3716 cmd = get_quoted_value(document.body, "LatexCommand", i, j)
3717 document.warning(cmd)
3720 val = get_quoted_value(document.body, "value", i, j)
3722 document.warning("Can't convert counter inset at line %d!" % i)
3724 ert = put_cmd_in_ert("\\setcounter{%s}{%s}" % (cnt, val))
3725 elif cmd == "addto":
3726 val = get_quoted_value(document.body, "value", i, j)
3728 document.warning("Can't convert counter inset at line %d!" % i)
3730 ert = put_cmd_in_ert("\\addtocounter{%s}{%s}" % (cnt, val))
3731 elif cmd == "reset":
3732 ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
3734 needed_counters[cnt] = 1
3735 savecnt = "LyXSave" + cnt
3736 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (savecnt, cnt))
3737 elif cmd == "restore":
3738 needed_counters[cnt] = 1
3739 savecnt = "LyXSave" + cnt
3740 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (cnt, savecnt))
3742 document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
3745 document.body[i : j + 1] = ert
3750 for cnt in needed_counters:
3751 pretext.append("\\newcounter{LyXSave%s}" % (cnt))
3753 add_to_preamble(document, pretext)
3760 supported_versions = ["2.4.0", "2.4"]
3762 [545, [convert_lst_literalparam]],
3767 [550, [convert_fontenc]],
3774 [557, [convert_vcsinfo]],
3775 [558, [removeFrontMatterStyles]],
3778 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
3782 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
3783 [566, [convert_hebrew_parentheses]],
3789 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
3790 [573, [convert_inputencoding_namechange]],
3791 [574, [convert_ruby_module, convert_utf8_japanese]],
3792 [575, [convert_lineno, convert_aaencoding]],
3794 [577, [convert_linggloss]],
3798 [581, [convert_osf]],
3799 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
3800 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
3802 [585, [convert_pagesizes]],
3804 [587, [convert_pagesizenames]],
3806 [589, [convert_totalheight]],
3807 [590, [convert_changebars]],
3808 [591, [convert_postpone_fragile]],
3810 [593, [convert_counter_maintenance]],
3814 revert = [[593, [revert_counter_inset]],
3815 [592, [revert_counter_maintenance]],
3816 [591, [revert_colrow_tracking]],
3817 [590, [revert_postpone_fragile]],
3818 [589, [revert_changebars]],
3819 [588, [revert_totalheight]],
3820 [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
3821 [586, [revert_pagesizenames]],
3822 [585, [revert_dupqualicites]],
3823 [584, [revert_pagesizes,revert_komafontsizes]],
3824 [583, [revert_vcsinfo_rev_abbrev]],
3825 [582, [revert_ChivoFont,revert_CrimsonProFont]],
3826 [581, [revert_CantarellFont,revert_FiraFont]],
3827 [580, [revert_texfontopts,revert_osf]],
3828 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
3829 [578, [revert_babelfont]],
3830 [577, [revert_drs]],
3831 [576, [revert_linggloss, revert_subexarg]],
3832 [575, [revert_new_languages]],
3833 [574, [revert_lineno, revert_aaencoding]],
3834 [573, [revert_ruby_module, revert_utf8_japanese]],
3835 [572, [revert_inputencoding_namechange]],
3836 [571, [revert_notoFonts]],
3837 [570, [revert_cmidruletrimming]],
3838 [569, [revert_bibfileencodings]],
3839 [568, [revert_tablestyle]],
3840 [567, [revert_soul]],
3841 [566, [revert_malayalam]],
3842 [565, [revert_hebrew_parentheses]],
3843 [564, [revert_AdobeFonts]],
3844 [563, [revert_lformatinfo]],
3845 [562, [revert_listpargs]],
3846 [561, [revert_l7ninfo]],
3847 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
3848 [559, [revert_timeinfo, revert_namenoextinfo]],
3849 [558, [revert_dateinfo]],
3850 [557, [addFrontMatterStyles]],
3851 [556, [revert_vcsinfo]],
3852 [555, [revert_bibencoding]],
3853 [554, [revert_vcolumns]],
3854 [553, [revert_stretchcolumn]],
3855 [552, [revert_tuftecite]],
3856 [551, [revert_floatpclass, revert_floatalignment]],
3857 [550, [revert_nospellcheck]],
3858 [549, [revert_fontenc]],
3859 [548, []],# dummy format change
3860 [547, [revert_lscape]],
3861 [546, [revert_xcharter]],
3862 [545, [revert_paratype]],
3863 [544, [revert_lst_literalparam]]
3867 if __name__ == "__main__":