1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 " Expand fontinfo mapping"
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
199 def convert_fonts(document, fm, osfoption = "osf"):
200 " Handle font definition (LaTeX preamble -> native) "
202 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
203 rscaleopt = re.compile(r'^scaled?=(.*)')
205 # Check whether we go beyond font option feature introduction
206 haveFontOpts = document.end_format > 580
209 while i < len(document.preamble):
210 i = find_re(document.preamble, rpkg, i+1)
213 mo = rpkg.search(document.preamble[i])
214 if mo == None or mo.group(2) == None:
217 options = mo.group(2).replace(' ', '').split(",")
222 while o < len(options):
223 if options[o] == osfoption:
227 mo = rscaleopt.search(options[o])
235 if not pkg in fm.pkginmap:
240 # Try with name-option combination first
241 # (only one default option supported currently)
243 while o < len(options):
245 fn = fm.getfontname(pkg, [opt])
252 fn = fm.getfontname(pkg, [])
254 fn = fm.getfontname(pkg, options)
257 del document.preamble[i]
258 fontinfo = fm.font2pkgmap[fn]
259 if fontinfo.scaletype == None:
262 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
263 fontinfo.scaleval = oscale
264 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
265 if fontinfo.osfopt == None:
266 options.extend(osfoption)
268 osf = find_token(document.header, "\\font_osf false")
269 osftag = "\\font_osf"
270 if osf == -1 and fontinfo.fonttype != "math":
271 # Try with newer format
272 osftag = "\\font_" + fontinfo.fonttype + "_osf"
273 osf = find_token(document.header, osftag + " false")
275 document.header[osf] = osftag + " true"
276 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
277 del document.preamble[i-1]
279 if fontscale != None:
280 j = find_token(document.header, fontscale, 0)
282 val = get_value(document.header, fontscale, j)
286 scale = "%03d" % int(float(oscale) * 100)
287 document.header[j] = fontscale + " " + scale + " " + vals[1]
288 ft = "\\font_" + fontinfo.fonttype
289 j = find_token(document.header, ft, 0)
291 val = get_value(document.header, ft, j)
292 words = val.split() # ! splits also values like '"DejaVu Sans"'
293 words[0] = '"' + fn + '"'
294 document.header[j] = ft + ' ' + ' '.join(words)
295 if haveFontOpts and fontinfo.fonttype != "math":
296 fotag = "\\font_" + fontinfo.fonttype + "_opts"
297 fo = find_token(document.header, fotag)
299 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
301 # Sensible place to insert tag
302 fo = find_token(document.header, "\\font_sf_scale")
304 document.warning("Malformed LyX document! Missing \\font_sf_scale")
306 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
310 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
311 " Revert native font definition to LaTeX "
312 # fonlist := list of fonts created from the same package
313 # Empty package means that the font-name is the same as the package-name
314 # fontmap (key = package, val += found options) will be filled
315 # and used later in add_preamble_fonts() to be added to user-preamble
317 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
318 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
320 while i < len(document.header):
321 i = find_re(document.header, rfontscale, i+1)
324 mo = rfontscale.search(document.header[i])
327 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
328 val = get_value(document.header, ft, i)
329 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
330 font = words[0].strip('"') # TeX font name has no whitespace
331 if not font in fm.font2pkgmap:
333 fontinfo = fm.font2pkgmap[font]
334 val = fontinfo.package
335 if not val in fontmap:
338 if OnlyWithXOpts or WithXOpts:
339 if ft == "\\font_math":
341 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
342 if ft == "\\font_sans":
343 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
344 elif ft == "\\font_typewriter":
345 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
346 x = find_re(document.header, regexp, 0)
347 if x == -1 and OnlyWithXOpts:
351 # We need to use this regex since split() does not handle quote protection
352 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
353 opts = xopts[1].strip('"').split(",")
354 fontmap[val].extend(opts)
355 del document.header[x]
356 words[0] = '"default"'
357 document.header[i] = ft + ' ' + ' '.join(words)
358 if fontinfo.scaleopt != None:
359 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
360 mo = rscales.search(xval)
365 # set correct scale option
366 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
367 if fontinfo.osfopt != None:
369 if fontinfo.osfdef == "true":
371 osf = find_token(document.header, "\\font_osf " + oldval)
372 if osf == -1 and ft != "\\font_math":
373 # Try with newer format
374 osftag = "\\font_roman_osf " + oldval
375 if ft == "\\font_sans":
376 osftag = "\\font_sans_osf " + oldval
377 elif ft == "\\font_typewriter":
378 osftag = "\\font_typewriter_osf " + oldval
379 osf = find_token(document.header, osftag)
381 fontmap[val].extend([fontinfo.osfopt])
382 if len(fontinfo.options) > 0:
383 fontmap[val].extend(fontinfo.options)
386 ###############################################################################
388 ### Conversion and reversion routines
390 ###############################################################################
392 def convert_inputencoding_namechange(document):
393 " Rename inputencoding settings. "
394 i = find_token(document.header, "\\inputencoding", 0)
397 s = document.header[i].replace("auto", "auto-legacy")
398 document.header[i] = s.replace("default", "auto-legacy-plain")
400 def revert_inputencoding_namechange(document):
401 " Rename inputencoding settings. "
402 i = find_token(document.header, "\\inputencoding", 0)
405 s = document.header[i].replace("auto-legacy-plain", "default")
406 document.header[i] = s.replace("auto-legacy", "auto")
408 def convert_notoFonts(document):
409 " Handle Noto fonts definition to LaTeX "
411 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
412 fm = createFontMapping(['Noto'])
413 convert_fonts(document, fm)
415 def revert_notoFonts(document):
416 " Revert native Noto font definition to LaTeX "
418 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
420 fm = createFontMapping(['Noto'])
421 if revert_fonts(document, fm, fontmap):
422 add_preamble_fonts(document, fontmap)
424 def convert_latexFonts(document):
425 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
427 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
428 fm = createFontMapping(['DejaVu', 'IBM'])
429 convert_fonts(document, fm)
431 def revert_latexFonts(document):
432 " Revert native DejaVu font definition to LaTeX "
434 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
436 fm = createFontMapping(['DejaVu', 'IBM'])
437 if revert_fonts(document, fm, fontmap):
438 add_preamble_fonts(document, fontmap)
440 def convert_AdobeFonts(document):
441 " Handle Adobe Source fonts definition to LaTeX "
443 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
444 fm = createFontMapping(['Adobe'])
445 convert_fonts(document, fm)
447 def revert_AdobeFonts(document):
448 " Revert Adobe Source font definition to LaTeX "
450 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
452 fm = createFontMapping(['Adobe'])
453 if revert_fonts(document, fm, fontmap):
454 add_preamble_fonts(document, fontmap)
456 def removeFrontMatterStyles(document):
457 " Remove styles Begin/EndFrontmatter"
459 layouts = ['BeginFrontmatter', 'EndFrontmatter']
460 tokenend = len('\\begin_layout ')
463 i = find_token_exact(document.body, '\\begin_layout ', i+1)
466 layout = document.body[i][tokenend:].strip()
467 if layout not in layouts:
469 j = find_end_of_layout(document.body, i)
471 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
473 while document.body[j+1].strip() == '':
475 document.body[i:j+1] = []
477 def addFrontMatterStyles(document):
478 " Use styles Begin/EndFrontmatter for elsarticle"
480 if document.textclass != "elsarticle":
483 def insertFrontmatter(prefix, line):
485 while above > 0 and document.body[above-1].strip() == '':
488 while document.body[below].strip() == '':
490 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
491 '\\begin_inset Note Note',
493 '\\begin_layout Plain Layout',
496 '\\end_inset', '', '',
499 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
500 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
501 tokenend = len('\\begin_layout ')
505 i = find_token_exact(document.body, '\\begin_layout ', i+1)
508 layout = document.body[i][tokenend:].strip()
509 if layout not in layouts:
511 k = find_end_of_layout(document.body, i)
513 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
520 insertFrontmatter('End', k+1)
521 insertFrontmatter('Begin', first)
524 def convert_lst_literalparam(document):
525 " Add param literal to include inset "
529 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
532 j = find_end_of_inset(document.body, i)
534 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
536 while i < j and document.body[i].strip() != '':
538 document.body.insert(i, 'literal "true"')
541 def revert_lst_literalparam(document):
542 " Remove param literal from include inset "
546 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
549 j = find_end_of_inset(document.body, i)
551 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
553 del_token(document.body, 'literal', i, j)
556 def revert_paratype(document):
557 " Revert ParaType font definitions to LaTeX "
559 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
561 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
562 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
563 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
564 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
567 sfval = find_token(document.header, "\\font_sf_scale", 0)
569 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
571 sfscale = document.header[sfval].split()
574 document.header[sfval] = " ".join(sfscale)
577 sf_scale = float(val)
579 document.warning("Invalid font_sf_scale value: " + val)
582 if sf_scale != "100.0":
583 sfoption = "scaled=" + str(sf_scale / 100.0)
584 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
585 ttval = get_value(document.header, "\\font_tt_scale", 0)
590 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
591 if i1 != -1 and i2 != -1 and i3!= -1:
592 add_to_preamble(document, ["\\usepackage{paratype}"])
595 add_to_preamble(document, ["\\usepackage{PTSerif}"])
596 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
599 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
601 add_to_preamble(document, ["\\usepackage{PTSans}"])
602 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
605 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
607 add_to_preamble(document, ["\\usepackage{PTMono}"])
608 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
611 def revert_xcharter(document):
612 " Revert XCharter font definitions to LaTeX "
614 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
618 # replace unsupported font setting
619 document.header[i] = document.header[i].replace("xcharter", "default")
620 # no need for preamble code with system fonts
621 if get_bool_value(document.header, "\\use_non_tex_fonts"):
624 # transfer old style figures setting to package options
625 j = find_token(document.header, "\\font_osf true")
628 document.header[j] = "\\font_osf false"
632 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
635 def revert_lscape(document):
636 " Reverts the landscape environment (Landscape module) to TeX-code "
638 if not "landscape" in document.get_module_list():
643 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
646 j = find_end_of_inset(document.body, i)
648 document.warning("Malformed LyX document: Can't find end of Landscape inset")
651 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
652 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
653 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
654 add_to_preamble(document, ["\\usepackage{afterpage}"])
656 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
657 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
659 add_to_preamble(document, ["\\usepackage{pdflscape}"])
660 document.del_module("landscape")
663 def convert_fontenc(document):
664 " Convert default fontenc setting "
666 i = find_token(document.header, "\\fontencoding global", 0)
670 document.header[i] = document.header[i].replace("global", "auto")
673 def revert_fontenc(document):
674 " Revert default fontenc setting "
676 i = find_token(document.header, "\\fontencoding auto", 0)
680 document.header[i] = document.header[i].replace("auto", "global")
683 def revert_nospellcheck(document):
684 " Remove nospellcheck font info param "
688 i = find_token(document.body, '\\nospellcheck', i)
694 def revert_floatpclass(document):
695 " Remove float placement params 'document' and 'class' "
697 del_token(document.header, "\\float_placement class")
701 i = find_token(document.body, '\\begin_inset Float', i+1)
704 j = find_end_of_inset(document.body, i)
705 k = find_token(document.body, 'placement class', i, i + 2)
707 k = find_token(document.body, 'placement document', i, i + 2)
714 def revert_floatalignment(document):
715 " Remove float alignment params "
717 galignment = get_value(document.header, "\\float_alignment", delete=True)
721 i = find_token(document.body, '\\begin_inset Float', i+1)
724 j = find_end_of_inset(document.body, i)
726 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
728 k = find_token(document.body, 'alignment', i, i+4)
732 alignment = get_value(document.body, "alignment", k)
733 if alignment == "document":
734 alignment = galignment
736 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
738 document.warning("Can't find float layout!")
741 if alignment == "left":
742 alcmd = put_cmd_in_ert("\\raggedright{}")
743 elif alignment == "center":
744 alcmd = put_cmd_in_ert("\\centering{}")
745 elif alignment == "right":
746 alcmd = put_cmd_in_ert("\\raggedleft{}")
748 document.body[l+1:l+1] = alcmd
751 def revert_tuftecite(document):
752 " Revert \cite commands in tufte classes "
754 tufte = ["tufte-book", "tufte-handout"]
755 if document.textclass not in tufte:
760 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
763 j = find_end_of_inset(document.body, i)
765 document.warning("Can't find end of citation inset at line %d!!" %(i))
767 k = find_token(document.body, "LatexCommand", i, j)
769 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
772 cmd = get_value(document.body, "LatexCommand", k)
776 pre = get_quoted_value(document.body, "before", i, j)
777 post = get_quoted_value(document.body, "after", i, j)
778 key = get_quoted_value(document.body, "key", i, j)
780 document.warning("Citation inset at line %d does not have a key!" %(i))
782 # Replace command with ERT
785 res += "[" + pre + "]"
787 res += "[" + post + "]"
790 res += "{" + key + "}"
791 document.body[i:j+1] = put_cmd_in_ert([res])
795 def convert_aaencoding(document):
796 " Convert default document option due to encoding change in aa class. "
798 if document.textclass != "aa":
803 i = find_token(document.header, "\\use_default_options true", i)
806 j = find_token(document.header, "\\inputencoding", 0)
808 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
810 val = get_value(document.header, "\\inputencoding", j)
811 if val == "auto" or val == "latin9":
812 document.header[i] = "\\use_default_options false"
813 k = find_token(document.header, "\\options", 0)
815 document.header.insert(i, "\\options latin9")
817 document.header[k] = document.header[k] + ",latin9"
820 def revert_aaencoding(document):
821 " Revert default document option due to encoding change in aa class. "
823 if document.textclass != "aa":
828 i = find_token(document.header, "\\use_default_options true", i)
831 j = find_token(document.header, "\\inputencoding", 0)
833 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
835 val = get_value(document.header, "\\inputencoding", j)
837 document.header[i] = "\\use_default_options false"
838 k = find_token(document.header, "\\options", 0)
840 document.header.insert(i, "\\options utf8")
842 document.header[k] = document.header[k] + ",utf8"
846 def revert_stretchcolumn(document):
847 " We remove the column varwidth flags or everything else will become a mess. "
850 i = find_token(document.body, "\\begin_inset Tabular", i+1)
853 j = find_end_of_inset(document.body, i+1)
855 document.warning("Malformed LyX document: Could not find end of tabular.")
857 for k in range(i, j):
858 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
859 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
860 document.body[k] = document.body[k].replace(' varwidth="true"', '')
863 def revert_vcolumns(document):
864 " Revert standard columns with line breaks etc. "
870 i = find_token(document.body, "\\begin_inset Tabular", i+1)
873 j = find_end_of_inset(document.body, i)
875 document.warning("Malformed LyX document: Could not find end of tabular.")
878 # Collect necessary column information
880 nrows = int(document.body[i+1].split('"')[3])
881 ncols = int(document.body[i+1].split('"')[5])
883 for k in range(ncols):
884 m = find_token(document.body, "<column", m)
885 width = get_option_value(document.body[m], 'width')
886 varwidth = get_option_value(document.body[m], 'varwidth')
887 alignment = get_option_value(document.body[m], 'alignment')
888 special = get_option_value(document.body[m], 'special')
889 col_info.append([width, varwidth, alignment, special, m])
894 for row in range(nrows):
895 for col in range(ncols):
896 m = find_token(document.body, "<cell", m)
897 multicolumn = get_option_value(document.body[m], 'multicolumn')
898 multirow = get_option_value(document.body[m], 'multirow')
899 width = get_option_value(document.body[m], 'width')
900 rotate = get_option_value(document.body[m], 'rotate')
901 # Check for: linebreaks, multipars, non-standard environments
903 endcell = find_token(document.body, "</cell>", begcell)
905 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
907 elif count_pars_in_inset(document.body, begcell + 2) > 1:
909 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
911 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
912 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
914 alignment = col_info[col][2]
915 col_line = col_info[col][4]
917 if alignment == "center":
918 vval = ">{\\centering}"
919 elif alignment == "left":
920 vval = ">{\\raggedright}"
921 elif alignment == "right":
922 vval = ">{\\raggedleft}"
925 vval += "V{\\linewidth}"
927 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
928 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
929 # with newlines, and we do not want that)
931 endcell = find_token(document.body, "</cell>", begcell)
933 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
935 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
939 nle = find_end_of_inset(document.body, nl)
940 del(document.body[nle:nle+1])
942 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
944 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
950 if needarray == True:
951 add_to_preamble(document, ["\\usepackage{array}"])
952 if needvarwidth == True:
953 add_to_preamble(document, ["\\usepackage{varwidth}"])
956 def revert_bibencoding(document):
957 " Revert bibliography encoding "
961 i = find_token(document.header, "\\cite_engine", 0)
963 document.warning("Malformed document! Missing \\cite_engine")
965 engine = get_value(document.header, "\\cite_engine", i)
969 if engine in ["biblatex", "biblatex-natbib"]:
972 # Map lyx to latex encoding names
976 "armscii8" : "armscii8",
977 "iso8859-1" : "latin1",
978 "iso8859-2" : "latin2",
979 "iso8859-3" : "latin3",
980 "iso8859-4" : "latin4",
981 "iso8859-5" : "iso88595",
982 "iso8859-6" : "8859-6",
983 "iso8859-7" : "iso-8859-7",
984 "iso8859-8" : "8859-8",
985 "iso8859-9" : "latin5",
986 "iso8859-13" : "latin7",
987 "iso8859-15" : "latin9",
988 "iso8859-16" : "latin10",
989 "applemac" : "applemac",
991 "cp437de" : "cp437de",
1000 "cp1251" : "cp1251",
1001 "cp1252" : "cp1252",
1002 "cp1255" : "cp1255",
1003 "cp1256" : "cp1256",
1004 "cp1257" : "cp1257",
1005 "koi8-r" : "koi8-r",
1006 "koi8-u" : "koi8-u",
1008 "utf8-platex" : "utf8",
1015 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1018 j = find_end_of_inset(document.body, i)
1020 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1022 encoding = get_quoted_value(document.body, "encoding", i, j)
1025 # remove encoding line
1026 k = find_token(document.body, "encoding", i, j)
1028 del document.body[k]
1029 if encoding == "default":
1031 # Re-find inset end line
1032 j = find_end_of_inset(document.body, i)
1035 h = find_token(document.header, "\\biblio_options", 0)
1037 biblio_options = get_value(document.header, "\\biblio_options", h)
1038 if not "bibencoding" in biblio_options:
1039 document.header[h] += ",bibencoding=%s" % encodings[encoding]
1041 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
1043 # this should not happen
1044 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
1046 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
1048 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
1049 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1055 def convert_vcsinfo(document):
1056 " Separate vcs Info inset from buffer Info inset. "
1059 "vcs-revision" : "revision",
1060 "vcs-tree-revision" : "tree-revision",
1061 "vcs-author" : "author",
1062 "vcs-time" : "time",
1067 i = find_token(document.body, "\\begin_inset Info", i+1)
1070 j = find_end_of_inset(document.body, i+1)
1072 document.warning("Malformed LyX document: Could not find end of Info inset.")
1074 tp = find_token(document.body, 'type', i, j)
1075 tpv = get_quoted_value(document.body, "type", tp)
1078 arg = find_token(document.body, 'arg', i, j)
1079 argv = get_quoted_value(document.body, "arg", arg)
1080 if argv not in list(types.keys()):
1082 document.body[tp] = "type \"vcs\""
1083 document.body[arg] = "arg \"" + types[argv] + "\""
1086 def revert_vcsinfo(document):
1087 " Merge vcs Info inset to buffer Info inset. "
1089 args = ["revision", "tree-revision", "author", "time", "date" ]
1092 i = find_token(document.body, "\\begin_inset Info", i+1)
1095 j = find_end_of_inset(document.body, i+1)
1097 document.warning("Malformed LyX document: Could not find end of Info inset.")
1099 tp = find_token(document.body, 'type', i, j)
1100 tpv = get_quoted_value(document.body, "type", tp)
1103 arg = find_token(document.body, 'arg', i, j)
1104 argv = get_quoted_value(document.body, "arg", arg)
1105 if argv not in args:
1106 document.warning("Malformed Info inset. Invalid vcs arg.")
1108 document.body[tp] = "type \"buffer\""
1109 document.body[arg] = "arg \"vcs-" + argv + "\""
1111 def revert_vcsinfo_rev_abbrev(document):
1112 " Convert abbreviated revisions to regular revisions. "
1116 i = find_token(document.body, "\\begin_inset Info", i+1)
1119 j = find_end_of_inset(document.body, i+1)
1121 document.warning("Malformed LyX document: Could not find end of Info inset.")
1123 tp = find_token(document.body, 'type', i, j)
1124 tpv = get_quoted_value(document.body, "type", tp)
1127 arg = find_token(document.body, 'arg', i, j)
1128 argv = get_quoted_value(document.body, "arg", arg)
1129 if( argv == "revision-abbrev" ):
1130 document.body[arg] = "arg \"revision\""
1132 def revert_dateinfo(document):
1133 " Revert date info insets to static text. "
1135 # FIXME This currently only considers the main language and uses the system locale
1136 # Ideally, it should honor context languages and switch the locale accordingly.
1138 # The date formats for each language using strftime syntax:
1139 # long, short, loclong, locmedium, locshort
1141 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1142 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1143 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1144 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1145 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1146 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1147 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1148 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1149 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1150 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1151 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1152 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1153 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1154 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1155 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1156 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1157 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1158 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1159 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1160 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1161 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1162 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1163 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1164 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1165 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1166 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1167 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1168 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1169 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1170 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1171 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1172 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1173 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1174 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1175 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1176 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1177 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1178 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1179 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1180 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1181 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1182 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1183 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1184 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1185 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1186 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1187 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1188 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1189 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1190 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1191 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1192 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1193 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1194 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1195 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1196 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1197 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1198 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1199 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1200 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1201 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1202 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1203 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1204 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1205 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1206 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1207 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1208 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1209 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1210 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1211 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1212 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1213 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1214 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1215 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1216 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1217 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1218 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1219 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1220 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1221 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1222 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1223 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1224 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1225 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1226 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1227 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1228 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1229 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1230 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1231 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1232 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1233 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1234 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1235 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1236 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1237 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1238 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1239 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1240 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1241 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1242 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1245 types = ["date", "fixdate", "moddate" ]
1246 lang = get_value(document.header, "\\language")
1248 document.warning("Malformed LyX document! No \\language header found!")
1253 i = find_token(document.body, "\\begin_inset Info", i+1)
1256 j = find_end_of_inset(document.body, i+1)
1258 document.warning("Malformed LyX document: Could not find end of Info inset.")
1260 tp = find_token(document.body, 'type', i, j)
1261 tpv = get_quoted_value(document.body, "type", tp)
1262 if tpv not in types:
1264 arg = find_token(document.body, 'arg', i, j)
1265 argv = get_quoted_value(document.body, "arg", arg)
1268 if tpv == "fixdate":
1269 datecomps = argv.split('@')
1270 if len(datecomps) > 1:
1272 isodate = datecomps[1]
1273 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1275 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1276 # FIXME if we had the path to the original document (not the one in the tmp dir),
1277 # we could use the mtime.
1278 # elif tpv == "moddate":
1279 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1282 result = dte.isodate()
1283 elif argv == "long":
1284 result = dte.strftime(dateformats[lang][0])
1285 elif argv == "short":
1286 result = dte.strftime(dateformats[lang][1])
1287 elif argv == "loclong":
1288 result = dte.strftime(dateformats[lang][2])
1289 elif argv == "locmedium":
1290 result = dte.strftime(dateformats[lang][3])
1291 elif argv == "locshort":
1292 result = dte.strftime(dateformats[lang][4])
1294 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1295 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1296 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1297 fmt = re.sub('[^\'%]d', '%d', fmt)
1298 fmt = fmt.replace("'", "")
1299 result = dte.strftime(fmt)
1300 if sys.version_info < (3,0):
1301 # In Python 2, datetime module works with binary strings,
1302 # our dateformat strings are utf8-encoded:
1303 result = result.decode('utf-8')
1304 document.body[i : j+1] = [result]
1307 def revert_timeinfo(document):
1308 " Revert time info insets to static text. "
1310 # FIXME This currently only considers the main language and uses the system locale
1311 # Ideally, it should honor context languages and switch the locale accordingly.
1312 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1315 # The time formats for each language using strftime syntax:
1318 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1319 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1320 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1321 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1322 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1323 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1324 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1325 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1326 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1327 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1328 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1329 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1330 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1331 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1332 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1333 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1334 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1335 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1336 "british" : ["%H:%M:%S %Z", "%H:%M"],
1337 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1338 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1339 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1340 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1341 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1342 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1343 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1344 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1345 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1346 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1347 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1348 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1349 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1350 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1351 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1352 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1353 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1354 "french" : ["%H:%M:%S %Z", "%H:%M"],
1355 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1356 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1357 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1358 "german" : ["%H:%M:%S %Z", "%H:%M"],
1359 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1360 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1361 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1362 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1363 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1364 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1365 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1366 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1367 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1368 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1369 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1370 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1371 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1372 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1373 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1374 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1375 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1376 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1377 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1378 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1379 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1380 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1381 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1382 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1383 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1384 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1385 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1386 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1387 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1388 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1389 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1390 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1391 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1392 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1393 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1394 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1395 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1396 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1397 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1398 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1399 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1400 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1401 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1402 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1403 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1404 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1405 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1406 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1407 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1408 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1409 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1410 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1411 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1412 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1413 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1414 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1415 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1416 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1417 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1418 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1419 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1422 types = ["time", "fixtime", "modtime" ]
1424 i = find_token(document.header, "\\language", 0)
1426 # this should not happen
1427 document.warning("Malformed LyX document! No \\language header found!")
1429 lang = get_value(document.header, "\\language", i)
1433 i = find_token(document.body, "\\begin_inset Info", i+1)
1436 j = find_end_of_inset(document.body, i+1)
1438 document.warning("Malformed LyX document: Could not find end of Info inset.")
1440 tp = find_token(document.body, 'type', i, j)
1441 tpv = get_quoted_value(document.body, "type", tp)
1442 if tpv not in types:
1444 arg = find_token(document.body, 'arg', i, j)
1445 argv = get_quoted_value(document.body, "arg", arg)
1447 dtme = datetime.now()
1449 if tpv == "fixtime":
1450 timecomps = argv.split('@')
1451 if len(timecomps) > 1:
1453 isotime = timecomps[1]
1454 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1456 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1458 m = re.search('(\d\d):(\d\d)', isotime)
1460 tme = time(int(m.group(1)), int(m.group(2)))
1461 # FIXME if we had the path to the original document (not the one in the tmp dir),
1462 # we could use the mtime.
1463 # elif tpv == "moddate":
1464 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1467 result = tme.isoformat()
1468 elif argv == "long":
1469 result = tme.strftime(timeformats[lang][0])
1470 elif argv == "short":
1471 result = tme.strftime(timeformats[lang][1])
1473 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1474 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1475 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1476 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1477 fmt = fmt.replace("'", "")
1478 result = dte.strftime(fmt)
1479 document.body[i : j+1] = result
1482 def revert_namenoextinfo(document):
1483 " Merge buffer Info inset type name-noext to name. "
1487 i = find_token(document.body, "\\begin_inset Info", i+1)
1490 j = find_end_of_inset(document.body, i+1)
1492 document.warning("Malformed LyX document: Could not find end of Info inset.")
1494 tp = find_token(document.body, 'type', i, j)
1495 tpv = get_quoted_value(document.body, "type", tp)
1498 arg = find_token(document.body, 'arg', i, j)
1499 argv = get_quoted_value(document.body, "arg", arg)
1500 if argv != "name-noext":
1502 document.body[arg] = "arg \"name\""
1505 def revert_l7ninfo(document):
1506 " Revert l7n Info inset to text. "
1510 i = find_token(document.body, "\\begin_inset Info", i+1)
1513 j = find_end_of_inset(document.body, i+1)
1515 document.warning("Malformed LyX document: Could not find end of Info inset.")
1517 tp = find_token(document.body, 'type', i, j)
1518 tpv = get_quoted_value(document.body, "type", tp)
1521 arg = find_token(document.body, 'arg', i, j)
1522 argv = get_quoted_value(document.body, "arg", arg)
1523 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1524 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1525 document.body[i : j+1] = argv
1528 def revert_listpargs(document):
1529 " Reverts listpreamble arguments to TeX-code "
1532 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1535 j = find_end_of_inset(document.body, i)
1536 # Find containing paragraph layout
1537 parent = get_containing_layout(document.body, i)
1539 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1542 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1543 endPlain = find_end_of_layout(document.body, beginPlain)
1544 content = document.body[beginPlain + 1 : endPlain]
1545 del document.body[i:j+1]
1546 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1547 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1548 document.body[parbeg : parbeg] = subst
1551 def revert_lformatinfo(document):
1552 " Revert layout format Info inset to text. "
1556 i = find_token(document.body, "\\begin_inset Info", i+1)
1559 j = find_end_of_inset(document.body, i+1)
1561 document.warning("Malformed LyX document: Could not find end of Info inset.")
1563 tp = find_token(document.body, 'type', i, j)
1564 tpv = get_quoted_value(document.body, "type", tp)
1565 if tpv != "lyxinfo":
1567 arg = find_token(document.body, 'arg', i, j)
1568 argv = get_quoted_value(document.body, "arg", arg)
1569 if argv != "layoutformat":
1572 document.body[i : j+1] = "69"
1575 def convert_hebrew_parentheses(document):
1576 """ Swap opening/closing parentheses in Hebrew text.
1578 Up to LyX 2.4, "(" was used as closing parenthesis and
1579 ")" as opening parenthesis for Hebrew in the LyX source.
1581 # print("convert hebrew parentheses")
1582 current_languages = [document.language]
1583 for i, line in enumerate(document.body):
1584 if line.startswith('\\lang '):
1585 current_languages[-1] = line.lstrip('\\lang ')
1586 elif line.startswith('\\begin_layout'):
1587 current_languages.append(current_languages[-1])
1588 # print (line, current_languages[-1])
1589 elif line.startswith('\\end_layout'):
1590 current_languages.pop()
1591 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1592 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1595 def revert_hebrew_parentheses(document):
1596 " Store parentheses in Hebrew text reversed"
1597 # This only exists to keep the convert/revert naming convention
1598 convert_hebrew_parentheses(document)
1601 def revert_malayalam(document):
1602 " Set the document language to English but assure Malayalam output "
1604 revert_language(document, "malayalam", "", "malayalam")
1607 def revert_soul(document):
1608 " Revert soul module flex insets to ERT "
1610 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1613 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1615 add_to_preamble(document, ["\\usepackage{soul}"])
1617 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1619 add_to_preamble(document, ["\\usepackage{color}"])
1621 revert_flex_inset(document.body, "Spaceletters", "\\so")
1622 revert_flex_inset(document.body, "Strikethrough", "\\st")
1623 revert_flex_inset(document.body, "Underline", "\\ul")
1624 revert_flex_inset(document.body, "Highlight", "\\hl")
1625 revert_flex_inset(document.body, "Capitalize", "\\caps")
1628 def revert_tablestyle(document):
1629 " Remove tablestyle params "
1632 i = find_token(document.header, "\\tablestyle")
1634 del document.header[i]
1637 def revert_bibfileencodings(document):
1638 " Revert individual Biblatex bibliography encodings "
1642 i = find_token(document.header, "\\cite_engine", 0)
1644 document.warning("Malformed document! Missing \\cite_engine")
1646 engine = get_value(document.header, "\\cite_engine", i)
1650 if engine in ["biblatex", "biblatex-natbib"]:
1653 # Map lyx to latex encoding names
1657 "armscii8" : "armscii8",
1658 "iso8859-1" : "latin1",
1659 "iso8859-2" : "latin2",
1660 "iso8859-3" : "latin3",
1661 "iso8859-4" : "latin4",
1662 "iso8859-5" : "iso88595",
1663 "iso8859-6" : "8859-6",
1664 "iso8859-7" : "iso-8859-7",
1665 "iso8859-8" : "8859-8",
1666 "iso8859-9" : "latin5",
1667 "iso8859-13" : "latin7",
1668 "iso8859-15" : "latin9",
1669 "iso8859-16" : "latin10",
1670 "applemac" : "applemac",
1672 "cp437de" : "cp437de",
1680 "cp1250" : "cp1250",
1681 "cp1251" : "cp1251",
1682 "cp1252" : "cp1252",
1683 "cp1255" : "cp1255",
1684 "cp1256" : "cp1256",
1685 "cp1257" : "cp1257",
1686 "koi8-r" : "koi8-r",
1687 "koi8-u" : "koi8-u",
1689 "utf8-platex" : "utf8",
1696 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1699 j = find_end_of_inset(document.body, i)
1701 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1703 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1707 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1708 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1709 if len(bibfiles) == 0:
1710 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1711 # remove encoding line
1712 k = find_token(document.body, "file_encodings", i, j)
1714 del document.body[k]
1715 # Re-find inset end line
1716 j = find_end_of_inset(document.body, i)
1718 enclist = encodings.split("\t")
1721 ppp = pp.split(" ", 1)
1722 encmap[ppp[0]] = ppp[1]
1723 for bib in bibfiles:
1724 pr = "\\addbibresource"
1725 if bib in encmap.keys():
1726 pr += "[bibencoding=" + encmap[bib] + "]"
1727 pr += "{" + bib + "}"
1728 add_to_preamble(document, [pr])
1729 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1730 pcmd = "printbibliography"
1732 pcmd += "[" + opts + "]"
1733 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1734 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1735 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1736 "status open", "", "\\begin_layout Plain Layout" ]
1737 repl += document.body[i:j+1]
1738 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1739 document.body[i:j+1] = repl
1745 def revert_cmidruletrimming(document):
1746 " Remove \\cmidrule trimming "
1748 # FIXME: Revert to TeX code?
1751 # first, let's find out if we need to do anything
1752 i = find_token(document.body, '<cell ', i+1)
1755 j = document.body[i].find('trim="')
1758 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1759 # remove trim option
1760 document.body[i] = rgx.sub('', document.body[i])
1764 r'### Inserted by lyx2lyx (ruby inset) ###',
1765 r'InsetLayout Flex:Ruby',
1766 r' LyxType charstyle',
1767 r' LatexType command',
1771 r' HTMLInnerTag rb',
1772 r' HTMLInnerAttr ""',
1774 r' LabelString "Ruby"',
1775 r' Decoration Conglomerate',
1777 r' \ifdefined\kanjiskip',
1778 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1779 r' \else \ifdefined\luatexversion',
1780 r' \usepackage{luatexja-ruby}',
1781 r' \else \ifdefined\XeTeXversion',
1782 r' \usepackage{ruby}%',
1784 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1786 r' Argument post:1',
1787 r' LabelString "ruby text"',
1788 r' MenuString "Ruby Text|R"',
1789 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1790 r' Decoration Conglomerate',
1802 def convert_ruby_module(document):
1803 " Use ruby module instead of local module definition "
1804 if document.del_local_layout(ruby_inset_def):
1805 document.add_module("ruby")
1807 def revert_ruby_module(document):
1808 " Replace ruby module with local module definition "
1809 if document.del_module("ruby"):
1810 document.append_local_layout(ruby_inset_def)
1813 def convert_utf8_japanese(document):
1814 " Use generic utf8 with Japanese documents."
1815 lang = get_value(document.header, "\\language")
1816 if not lang.startswith("japanese"):
1818 inputenc = get_value(document.header, "\\inputencoding")
1819 if ((lang == "japanese" and inputenc == "utf8-platex")
1820 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1821 document.set_parameter("inputencoding", "utf8")
1823 def revert_utf8_japanese(document):
1824 " Use Japanese utf8 variants with Japanese documents."
1825 inputenc = get_value(document.header, "\\inputencoding")
1826 if inputenc != "utf8":
1828 lang = get_value(document.header, "\\language")
1829 if lang == "japanese":
1830 document.set_parameter("inputencoding", "utf8-platex")
1831 if lang == "japanese-cjk":
1832 document.set_parameter("inputencoding", "utf8-cjk")
1835 def revert_lineno(document):
1836 " Replace lineno setting with user-preamble code."
1838 options = get_quoted_value(document.header, "\\lineno_options",
1840 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1843 options = "[" + options + "]"
1844 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1847 def convert_lineno(document):
1848 " Replace user-preamble code with native lineno support."
1851 i = find_token(document.preamble, "\\linenumbers", 1)
1853 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1856 options = usepkg.group(1).strip("[]")
1857 del(document.preamble[i-1:i+1])
1858 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1860 k = find_token(document.header, "\\index ")
1862 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1864 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1865 "\\lineno_options %s" % options]
1868 def revert_new_languages(document):
1869 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1870 and Russian (Petrine orthography)."""
1872 # lyxname: (babelname, polyglossianame)
1873 new_languages = {"azerbaijani": ("azerbaijani", ""),
1874 "bengali": ("", "bengali"),
1875 "churchslavonic": ("", "churchslavonic"),
1876 "oldrussian": ("", "russian"),
1877 "korean": ("", "korean"),
1879 used_languages = set()
1880 if document.language in new_languages:
1881 used_languages.add(document.language)
1884 i = find_token(document.body, "\\lang", i+1)
1887 if document.body[i][6:].strip() in new_languages:
1888 used_languages.add(document.language)
1890 # Korean is already supported via CJK, so leave as-is for Babel
1891 if ("korean" in used_languages
1892 and get_bool_value(document.header, "\\use_non_tex_fonts")
1893 and get_value(document.header, "\\language_package") in ("default", "auto")):
1894 revert_language(document, "korean", "", "korean")
1895 used_languages.discard("korean")
1897 for lang in used_languages:
1898 revert(lang, *new_languages[lang])
1902 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1903 r'InsetLayout Flex:Glosse',
1905 r' LabelString "Gloss (old version)"',
1906 r' MenuString "Gloss (old version)"',
1907 r' LatexType environment',
1908 r' LatexName linggloss',
1909 r' Decoration minimalistic',
1914 r' CustomPars false',
1915 r' ForcePlain true',
1916 r' ParbreakIsNewline true',
1917 r' FreeSpacing true',
1918 r' Requires covington',
1921 r' \@ifundefined{linggloss}{%',
1922 r' \newenvironment{linggloss}[2][]{',
1923 r' \def\glosstr{\glt #1}%',
1925 r' {\glosstr\glend}}{}',
1928 r' ResetsFont true',
1930 r' Decoration conglomerate',
1931 r' LabelString "Translation"',
1932 r' MenuString "Glosse Translation|s"',
1933 r' Tooltip "Add a translation for the glosse"',
1938 glosss_inset_def = [
1939 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1940 r'InsetLayout Flex:Tri-Glosse',
1942 r' LabelString "Tri-Gloss (old version)"',
1943 r' MenuString "Tri-Gloss (old version)"',
1944 r' LatexType environment',
1945 r' LatexName lingglosss',
1946 r' Decoration minimalistic',
1951 r' CustomPars false',
1952 r' ForcePlain true',
1953 r' ParbreakIsNewline true',
1954 r' FreeSpacing true',
1956 r' Requires covington',
1959 r' \@ifundefined{lingglosss}{%',
1960 r' \newenvironment{lingglosss}[2][]{',
1961 r' \def\glosstr{\glt #1}%',
1963 r' {\glosstr\glend}}{}',
1965 r' ResetsFont true',
1967 r' Decoration conglomerate',
1968 r' LabelString "Translation"',
1969 r' MenuString "Glosse Translation|s"',
1970 r' Tooltip "Add a translation for the glosse"',
1975 def convert_linggloss(document):
1976 " Move old ling glosses to local layout "
1977 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1978 document.append_local_layout(gloss_inset_def)
1979 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1980 document.append_local_layout(glosss_inset_def)
1982 def revert_linggloss(document):
1983 " Revert to old ling gloss definitions "
1984 if not "linguistics" in document.get_module_list():
1986 document.del_local_layout(gloss_inset_def)
1987 document.del_local_layout(glosss_inset_def)
1990 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1991 for glosse in glosses:
1994 i = find_token(document.body, glosse, i+1)
1997 j = find_end_of_inset(document.body, i)
1999 document.warning("Malformed LyX document: Can't find end of Gloss inset")
2002 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2003 endarg = find_end_of_inset(document.body, arg)
2006 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2007 if argbeginPlain == -1:
2008 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2010 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2011 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2013 # remove Arg insets and paragraph, if it only contains this inset
2014 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2015 del document.body[arg - 1 : endarg + 4]
2017 del document.body[arg : endarg + 1]
2019 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2020 endarg = find_end_of_inset(document.body, arg)
2023 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2024 if argbeginPlain == -1:
2025 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2027 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2028 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2030 # remove Arg insets and paragraph, if it only contains this inset
2031 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2032 del document.body[arg - 1 : endarg + 4]
2034 del document.body[arg : endarg + 1]
2036 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2037 endarg = find_end_of_inset(document.body, arg)
2040 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2041 if argbeginPlain == -1:
2042 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2044 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2045 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2047 # remove Arg insets and paragraph, if it only contains this inset
2048 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2049 del document.body[arg - 1 : endarg + 4]
2051 del document.body[arg : endarg + 1]
2053 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2054 endarg = find_end_of_inset(document.body, arg)
2057 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2058 if argbeginPlain == -1:
2059 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2061 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2062 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2064 # remove Arg insets and paragraph, if it only contains this inset
2065 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2066 del document.body[arg - 1 : endarg + 4]
2068 del document.body[arg : endarg + 1]
2071 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2074 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2075 endInset = find_end_of_inset(document.body, i)
2076 endPlain = find_end_of_layout(document.body, beginPlain)
2077 precontent = put_cmd_in_ert(cmd)
2078 if len(optargcontent) > 0:
2079 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2080 precontent += put_cmd_in_ert("{")
2082 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2083 if cmd == "\\trigloss":
2084 postcontent += put_cmd_in_ert("}{") + marg3content
2085 postcontent += put_cmd_in_ert("}")
2087 document.body[endPlain:endInset + 1] = postcontent
2088 document.body[beginPlain + 1:beginPlain] = precontent
2089 del document.body[i : beginPlain + 1]
2091 document.append_local_layout("Requires covington")
2096 def revert_subexarg(document):
2097 " Revert linguistic subexamples with argument to ERT "
2099 if not "linguistics" in document.get_module_list():
2105 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2108 j = find_end_of_layout(document.body, i)
2110 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2113 # check for consecutive layouts
2114 k = find_token(document.body, "\\begin_layout", j)
2115 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2117 j = find_end_of_layout(document.body, k)
2119 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2122 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2126 endarg = find_end_of_inset(document.body, arg)
2128 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2129 if argbeginPlain == -1:
2130 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2132 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2133 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2135 # remove Arg insets and paragraph, if it only contains this inset
2136 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2137 del document.body[arg - 1 : endarg + 4]
2139 del document.body[arg : endarg + 1]
2141 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2143 # re-find end of layout
2144 j = find_end_of_layout(document.body, i)
2146 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2149 # check for consecutive layouts
2150 k = find_token(document.body, "\\begin_layout", j)
2151 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2153 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2154 j = find_end_of_layout(document.body, k)
2156 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2159 endev = put_cmd_in_ert("\\end{subexamples}")
2161 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2162 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2163 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2165 document.append_local_layout("Requires covington")
2169 def revert_drs(document):
2170 " Revert DRS insets (linguistics) to ERT "
2172 if not "linguistics" in document.get_module_list():
2176 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2177 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2178 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2179 "\\begin_inset Flex SDRS"]
2183 i = find_token(document.body, drs, i+1)
2186 j = find_end_of_inset(document.body, i)
2188 document.warning("Malformed LyX document: Can't find end of DRS inset")
2191 # Check for arguments
2192 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2193 endarg = find_end_of_inset(document.body, arg)
2196 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2197 if argbeginPlain == -1:
2198 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2200 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2201 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2203 # remove Arg insets and paragraph, if it only contains this inset
2204 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2205 del document.body[arg - 1 : endarg + 4]
2207 del document.body[arg : endarg + 1]
2210 j = find_end_of_inset(document.body, i)
2212 document.warning("Malformed LyX document: Can't find end of DRS inset")
2215 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2216 endarg = find_end_of_inset(document.body, arg)
2219 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2220 if argbeginPlain == -1:
2221 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2223 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2224 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2226 # remove Arg insets and paragraph, if it only contains this inset
2227 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2228 del document.body[arg - 1 : endarg + 4]
2230 del document.body[arg : endarg + 1]
2233 j = find_end_of_inset(document.body, i)
2235 document.warning("Malformed LyX document: Can't find end of DRS inset")
2238 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2239 endarg = find_end_of_inset(document.body, arg)
2240 postarg1content = []
2242 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2243 if argbeginPlain == -1:
2244 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2246 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2247 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2249 # remove Arg insets and paragraph, if it only contains this inset
2250 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2251 del document.body[arg - 1 : endarg + 4]
2253 del document.body[arg : endarg + 1]
2256 j = find_end_of_inset(document.body, i)
2258 document.warning("Malformed LyX document: Can't find end of DRS inset")
2261 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2262 endarg = find_end_of_inset(document.body, arg)
2263 postarg2content = []
2265 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2266 if argbeginPlain == -1:
2267 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2269 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2270 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2272 # remove Arg insets and paragraph, if it only contains this inset
2273 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2274 del document.body[arg - 1 : endarg + 4]
2276 del document.body[arg : endarg + 1]
2279 j = find_end_of_inset(document.body, i)
2281 document.warning("Malformed LyX document: Can't find end of DRS inset")
2284 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2285 endarg = find_end_of_inset(document.body, arg)
2286 postarg3content = []
2288 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2289 if argbeginPlain == -1:
2290 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2292 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2293 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2295 # remove Arg insets and paragraph, if it only contains this inset
2296 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2297 del document.body[arg - 1 : endarg + 4]
2299 del document.body[arg : endarg + 1]
2302 j = find_end_of_inset(document.body, i)
2304 document.warning("Malformed LyX document: Can't find end of DRS inset")
2307 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2308 endarg = find_end_of_inset(document.body, arg)
2309 postarg4content = []
2311 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2312 if argbeginPlain == -1:
2313 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2315 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2316 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2318 # remove Arg insets and paragraph, if it only contains this inset
2319 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2320 del document.body[arg - 1 : endarg + 4]
2322 del document.body[arg : endarg + 1]
2324 # The respective LaTeX command
2326 if drs == "\\begin_inset Flex DRS*":
2328 elif drs == "\\begin_inset Flex IfThen-DRS":
2330 elif drs == "\\begin_inset Flex Cond-DRS":
2332 elif drs == "\\begin_inset Flex QDRS":
2334 elif drs == "\\begin_inset Flex NegDRS":
2336 elif drs == "\\begin_inset Flex SDRS":
2339 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2340 endInset = find_end_of_inset(document.body, i)
2341 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2342 precontent = put_cmd_in_ert(cmd)
2343 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2344 if drs == "\\begin_inset Flex SDRS":
2345 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2346 precontent += put_cmd_in_ert("{")
2349 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2350 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2351 if cmd == "\\condrs" or cmd == "\\qdrs":
2352 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2354 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2356 postcontent = put_cmd_in_ert("}")
2358 document.body[endPlain:endInset + 1] = postcontent
2359 document.body[beginPlain + 1:beginPlain] = precontent
2360 del document.body[i : beginPlain + 1]
2362 document.append_local_layout("Provides covington 1")
2363 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2369 def revert_babelfont(document):
2370 " Reverts the use of \\babelfont to user preamble "
2372 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2374 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2376 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2378 i = find_token(document.header, '\\language_package', 0)
2380 document.warning("Malformed LyX document: Missing \\language_package.")
2382 if get_value(document.header, "\\language_package", 0) != "babel":
2385 # check font settings
2387 roman = sans = typew = "default"
2389 sf_scale = tt_scale = 100.0
2391 j = find_token(document.header, "\\font_roman", 0)
2393 document.warning("Malformed LyX document: Missing \\font_roman.")
2395 # We need to use this regex since split() does not handle quote protection
2396 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2397 roman = romanfont[2].strip('"')
2398 romanfont[2] = '"default"'
2399 document.header[j] = " ".join(romanfont)
2401 j = find_token(document.header, "\\font_sans", 0)
2403 document.warning("Malformed LyX document: Missing \\font_sans.")
2405 # We need to use this regex since split() does not handle quote protection
2406 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2407 sans = sansfont[2].strip('"')
2408 sansfont[2] = '"default"'
2409 document.header[j] = " ".join(sansfont)
2411 j = find_token(document.header, "\\font_typewriter", 0)
2413 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2415 # We need to use this regex since split() does not handle quote protection
2416 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2417 typew = ttfont[2].strip('"')
2418 ttfont[2] = '"default"'
2419 document.header[j] = " ".join(ttfont)
2421 i = find_token(document.header, "\\font_osf", 0)
2423 document.warning("Malformed LyX document: Missing \\font_osf.")
2425 osf = str2bool(get_value(document.header, "\\font_osf", i))
2427 j = find_token(document.header, "\\font_sf_scale", 0)
2429 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2431 sfscale = document.header[j].split()
2434 document.header[j] = " ".join(sfscale)
2437 sf_scale = float(val)
2439 document.warning("Invalid font_sf_scale value: " + val)
2441 j = find_token(document.header, "\\font_tt_scale", 0)
2443 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2445 ttscale = document.header[j].split()
2448 document.header[j] = " ".join(ttscale)
2451 tt_scale = float(val)
2453 document.warning("Invalid font_tt_scale value: " + val)
2455 # set preamble stuff
2456 pretext = ['%% This document must be processed with xelatex or lualatex!']
2457 pretext.append('\\AtBeginDocument{%')
2458 if roman != "default":
2459 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2460 if sans != "default":
2461 sf = '\\babelfont{sf}['
2462 if sf_scale != 100.0:
2463 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2464 sf += 'Mapping=tex-text]{' + sans + '}'
2466 if typew != "default":
2467 tw = '\\babelfont{tt}'
2468 if tt_scale != 100.0:
2469 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2470 tw += '{' + typew + '}'
2473 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2475 insert_to_preamble(document, pretext)
2478 def revert_minionpro(document):
2479 " Revert native MinionPro font definition (with extra options) to LaTeX "
2481 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2483 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2485 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2488 regexp = re.compile(r'(\\font_roman_opts)')
2489 x = find_re(document.header, regexp, 0)
2493 # We need to use this regex since split() does not handle quote protection
2494 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2495 opts = romanopts[1].strip('"')
2497 i = find_token(document.header, "\\font_roman", 0)
2499 document.warning("Malformed LyX document: Missing \\font_roman.")
2502 # We need to use this regex since split() does not handle quote protection
2503 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2504 roman = romanfont[1].strip('"')
2505 if roman != "minionpro":
2507 romanfont[1] = '"default"'
2508 document.header[i] = " ".join(romanfont)
2510 j = find_token(document.header, "\\font_osf true", 0)
2513 preamble = "\\usepackage["
2515 document.header[j] = "\\font_osf false"
2519 preamble += "]{MinionPro}"
2520 add_to_preamble(document, [preamble])
2521 del document.header[x]
2524 def revert_font_opts(document):
2525 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2527 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2529 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2531 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2532 i = find_token(document.header, '\\language_package', 0)
2534 document.warning("Malformed LyX document: Missing \\language_package.")
2536 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2539 regexp = re.compile(r'(\\font_roman_opts)')
2540 i = find_re(document.header, regexp, 0)
2542 # We need to use this regex since split() does not handle quote protection
2543 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2544 opts = romanopts[1].strip('"')
2545 del document.header[i]
2547 regexp = re.compile(r'(\\font_roman)')
2548 i = find_re(document.header, regexp, 0)
2550 # We need to use this regex since split() does not handle quote protection
2551 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2552 font = romanfont[2].strip('"')
2553 romanfont[2] = '"default"'
2554 document.header[i] = " ".join(romanfont)
2555 if font != "default":
2557 preamble = "\\babelfont{rm}["
2559 preamble = "\\setmainfont["
2562 preamble += "Mapping=tex-text]{"
2565 add_to_preamble(document, [preamble])
2568 regexp = re.compile(r'(\\font_sans_opts)')
2569 i = find_re(document.header, regexp, 0)
2572 # We need to use this regex since split() does not handle quote protection
2573 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2574 opts = sfopts[1].strip('"')
2575 del document.header[i]
2577 regexp = re.compile(r'(\\font_sf_scale)')
2578 i = find_re(document.header, regexp, 0)
2580 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2581 regexp = re.compile(r'(\\font_sans)')
2582 i = find_re(document.header, regexp, 0)
2584 # We need to use this regex since split() does not handle quote protection
2585 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2586 font = sffont[2].strip('"')
2587 sffont[2] = '"default"'
2588 document.header[i] = " ".join(sffont)
2589 if font != "default":
2591 preamble = "\\babelfont{sf}["
2593 preamble = "\\setsansfont["
2597 preamble += "Scale=0."
2598 preamble += scaleval
2600 preamble += "Mapping=tex-text]{"
2603 add_to_preamble(document, [preamble])
2606 regexp = re.compile(r'(\\font_typewriter_opts)')
2607 i = find_re(document.header, regexp, 0)
2610 # We need to use this regex since split() does not handle quote protection
2611 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2612 opts = ttopts[1].strip('"')
2613 del document.header[i]
2615 regexp = re.compile(r'(\\font_tt_scale)')
2616 i = find_re(document.header, regexp, 0)
2618 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2619 regexp = re.compile(r'(\\font_typewriter)')
2620 i = find_re(document.header, regexp, 0)
2622 # We need to use this regex since split() does not handle quote protection
2623 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2624 font = ttfont[2].strip('"')
2625 ttfont[2] = '"default"'
2626 document.header[i] = " ".join(ttfont)
2627 if font != "default":
2629 preamble = "\\babelfont{tt}["
2631 preamble = "\\setmonofont["
2635 preamble += "Scale=0."
2636 preamble += scaleval
2638 preamble += "Mapping=tex-text]{"
2641 add_to_preamble(document, [preamble])
2644 def revert_plainNotoFonts_xopts(document):
2645 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2647 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2649 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2651 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2655 y = find_token(document.header, "\\font_osf true", 0)
2659 regexp = re.compile(r'(\\font_roman_opts)')
2660 x = find_re(document.header, regexp, 0)
2661 if x == -1 and not osf:
2666 # We need to use this regex since split() does not handle quote protection
2667 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2668 opts = romanopts[1].strip('"')
2674 i = find_token(document.header, "\\font_roman", 0)
2678 # We need to use this regex since split() does not handle quote protection
2679 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2680 roman = romanfont[1].strip('"')
2681 if roman != "NotoSerif-TLF":
2684 j = find_token(document.header, "\\font_sans", 0)
2688 # We need to use this regex since split() does not handle quote protection
2689 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2690 sf = sffont[1].strip('"')
2694 j = find_token(document.header, "\\font_typewriter", 0)
2698 # We need to use this regex since split() does not handle quote protection
2699 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2700 tt = ttfont[1].strip('"')
2704 # So we have noto as "complete font"
2705 romanfont[1] = '"default"'
2706 document.header[i] = " ".join(romanfont)
2708 preamble = "\\usepackage["
2710 preamble += "]{noto}"
2711 add_to_preamble(document, [preamble])
2713 document.header[y] = "\\font_osf false"
2715 del document.header[x]
2718 def revert_notoFonts_xopts(document):
2719 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2721 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2723 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2725 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2729 fm = createFontMapping(['Noto'])
2730 if revert_fonts(document, fm, fontmap, True):
2731 add_preamble_fonts(document, fontmap)
2734 def revert_IBMFonts_xopts(document):
2735 " Revert native IBM font definition (with extra options) to LaTeX "
2737 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2739 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2741 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2745 fm = createFontMapping(['IBM'])
2747 if revert_fonts(document, fm, fontmap, True):
2748 add_preamble_fonts(document, fontmap)
2751 def revert_AdobeFonts_xopts(document):
2752 " Revert native Adobe font definition (with extra options) to LaTeX "
2754 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2756 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2758 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2762 fm = createFontMapping(['Adobe'])
2764 if revert_fonts(document, fm, fontmap, True):
2765 add_preamble_fonts(document, fontmap)
2768 def convert_osf(document):
2769 " Convert \\font_osf param to new format "
2772 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2774 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2776 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2778 i = find_token(document.header, '\\font_osf', 0)
2780 document.warning("Malformed LyX document: Missing \\font_osf.")
2783 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2784 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2786 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2787 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2790 document.header.insert(i, "\\font_sans_osf false")
2791 document.header.insert(i + 1, "\\font_typewriter_osf false")
2795 x = find_token(document.header, "\\font_sans", 0)
2797 document.warning("Malformed LyX document: Missing \\font_sans.")
2799 # We need to use this regex since split() does not handle quote protection
2800 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2801 sf = sffont[1].strip('"')
2803 document.header.insert(i, "\\font_sans_osf true")
2805 document.header.insert(i, "\\font_sans_osf false")
2807 x = find_token(document.header, "\\font_typewriter", 0)
2809 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2811 # We need to use this regex since split() does not handle quote protection
2812 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2813 tt = ttfont[1].strip('"')
2815 document.header.insert(i + 1, "\\font_typewriter_osf true")
2817 document.header.insert(i + 1, "\\font_typewriter_osf false")
2820 document.header.insert(i, "\\font_sans_osf false")
2821 document.header.insert(i + 1, "\\font_typewriter_osf false")
2824 def revert_osf(document):
2825 " Revert \\font_*_osf params "
2828 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2830 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2832 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2834 i = find_token(document.header, '\\font_roman_osf', 0)
2836 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2839 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2840 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2842 i = find_token(document.header, '\\font_sans_osf', 0)
2844 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2847 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2848 del document.header[i]
2850 i = find_token(document.header, '\\font_typewriter_osf', 0)
2852 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2855 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2856 del document.header[i]
2859 i = find_token(document.header, '\\font_osf', 0)
2861 document.warning("Malformed LyX document: Missing \\font_osf.")
2863 document.header[i] = "\\font_osf true"
2866 def revert_texfontopts(document):
2867 " Revert native TeX font definitions (with extra options) to LaTeX "
2869 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2871 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2873 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2876 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2878 # First the sf (biolinum only)
2879 regexp = re.compile(r'(\\font_sans_opts)')
2880 x = find_re(document.header, regexp, 0)
2882 # We need to use this regex since split() does not handle quote protection
2883 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2884 opts = sfopts[1].strip('"')
2885 i = find_token(document.header, "\\font_sans", 0)
2887 document.warning("Malformed LyX document: Missing \\font_sans.")
2889 # We need to use this regex since split() does not handle quote protection
2890 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2891 sans = sffont[1].strip('"')
2892 if sans == "biolinum":
2894 sffont[1] = '"default"'
2895 document.header[i] = " ".join(sffont)
2897 j = find_token(document.header, "\\font_sans_osf true", 0)
2900 k = find_token(document.header, "\\font_sf_scale", 0)
2902 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2904 sfscale = document.header[k].split()
2907 document.header[k] = " ".join(sfscale)
2910 sf_scale = float(val)
2912 document.warning("Invalid font_sf_scale value: " + val)
2913 preamble = "\\usepackage["
2915 document.header[j] = "\\font_sans_osf false"
2917 if sf_scale != 100.0:
2918 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2920 preamble += "]{biolinum}"
2921 add_to_preamble(document, [preamble])
2922 del document.header[x]
2924 regexp = re.compile(r'(\\font_roman_opts)')
2925 x = find_re(document.header, regexp, 0)
2929 # We need to use this regex since split() does not handle quote protection
2930 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2931 opts = romanopts[1].strip('"')
2933 i = find_token(document.header, "\\font_roman", 0)
2935 document.warning("Malformed LyX document: Missing \\font_roman.")
2938 # We need to use this regex since split() does not handle quote protection
2939 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2940 roman = romanfont[1].strip('"')
2941 if not roman in rmfonts:
2943 romanfont[1] = '"default"'
2944 document.header[i] = " ".join(romanfont)
2946 if roman == "utopia":
2948 elif roman == "palatino":
2949 package = "mathpazo"
2950 elif roman == "times":
2951 package = "mathptmx"
2952 elif roman == "xcharter":
2953 package = "XCharter"
2955 j = find_token(document.header, "\\font_roman_osf true", 0)
2957 if roman == "cochineal":
2958 osf = "proportional,osf,"
2959 elif roman == "utopia":
2961 elif roman == "garamondx":
2963 elif roman == "libertine":
2965 elif roman == "palatino":
2967 elif roman == "xcharter":
2969 document.header[j] = "\\font_roman_osf false"
2970 k = find_token(document.header, "\\font_sc true", 0)
2972 if roman == "utopia":
2974 if roman == "palatino" and osf == "":
2976 document.header[k] = "\\font_sc false"
2977 preamble = "\\usepackage["
2980 preamble += "]{" + package + "}"
2981 add_to_preamble(document, [preamble])
2982 del document.header[x]
2985 def convert_CantarellFont(document):
2986 " Handle Cantarell font definition to LaTeX "
2988 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2989 fm = createFontMapping(['Cantarell'])
2990 convert_fonts(document, fm, "oldstyle")
2992 def revert_CantarellFont(document):
2993 " Revert native Cantarell font definition to LaTeX "
2995 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2997 fm = createFontMapping(['Cantarell'])
2998 if revert_fonts(document, fm, fontmap, False, True):
2999 add_preamble_fonts(document, fontmap)
3001 def convert_ChivoFont(document):
3002 " Handle Chivo font definition to LaTeX "
3004 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3005 fm = createFontMapping(['Chivo'])
3006 convert_fonts(document, fm, "oldstyle")
3008 def revert_ChivoFont(document):
3009 " Revert native Chivo font definition to LaTeX "
3011 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3013 fm = createFontMapping(['Chivo'])
3014 if revert_fonts(document, fm, fontmap, False, True):
3015 add_preamble_fonts(document, fontmap)
3018 def convert_FiraFont(document):
3019 " Handle Fira font definition to LaTeX "
3021 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3022 fm = createFontMapping(['Fira'])
3023 convert_fonts(document, fm, "lf")
3025 def revert_FiraFont(document):
3026 " Revert native Fira font definition to LaTeX "
3028 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3030 fm = createFontMapping(['Fira'])
3031 if revert_fonts(document, fm, fontmap, False, True):
3032 add_preamble_fonts(document, fontmap)
3035 def convert_Semibolds(document):
3036 " Move semibold options to extraopts "
3039 i = find_token(document.header, '\\use_non_tex_fonts', 0)
3041 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
3043 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
3045 i = find_token(document.header, "\\font_roman", 0)
3047 document.warning("Malformed LyX document: Missing \\font_roman.")
3049 # We need to use this regex since split() does not handle quote protection
3050 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3051 roman = romanfont[1].strip('"')
3052 if roman == "IBMPlexSerifSemibold":
3053 romanfont[1] = '"IBMPlexSerif"'
3054 document.header[i] = " ".join(romanfont)
3056 if NonTeXFonts == False:
3057 regexp = re.compile(r'(\\font_roman_opts)')
3058 x = find_re(document.header, regexp, 0)
3060 # Sensible place to insert tag
3061 fo = find_token(document.header, "\\font_sf_scale")
3063 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3065 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3067 # We need to use this regex since split() does not handle quote protection
3068 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3069 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3071 i = find_token(document.header, "\\font_sans", 0)
3073 document.warning("Malformed LyX document: Missing \\font_sans.")
3075 # We need to use this regex since split() does not handle quote protection
3076 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3077 sf = sffont[1].strip('"')
3078 if sf == "IBMPlexSansSemibold":
3079 sffont[1] = '"IBMPlexSans"'
3080 document.header[i] = " ".join(sffont)
3082 if NonTeXFonts == False:
3083 regexp = re.compile(r'(\\font_sans_opts)')
3084 x = find_re(document.header, regexp, 0)
3086 # Sensible place to insert tag
3087 fo = find_token(document.header, "\\font_sf_scale")
3089 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3091 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3093 # We need to use this regex since split() does not handle quote protection
3094 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3095 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3097 i = find_token(document.header, "\\font_typewriter", 0)
3099 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3101 # We need to use this regex since split() does not handle quote protection
3102 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3103 tt = ttfont[1].strip('"')
3104 if tt == "IBMPlexMonoSemibold":
3105 ttfont[1] = '"IBMPlexMono"'
3106 document.header[i] = " ".join(ttfont)
3108 if NonTeXFonts == False:
3109 regexp = re.compile(r'(\\font_typewriter_opts)')
3110 x = find_re(document.header, regexp, 0)
3112 # Sensible place to insert tag
3113 fo = find_token(document.header, "\\font_tt_scale")
3115 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3117 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3119 # We need to use this regex since split() does not handle quote protection
3120 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3121 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3124 def convert_NotoRegulars(document):
3125 " Merge diverse noto reagular fonts "
3127 i = find_token(document.header, "\\font_roman", 0)
3129 document.warning("Malformed LyX document: Missing \\font_roman.")
3131 # We need to use this regex since split() does not handle quote protection
3132 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3133 roman = romanfont[1].strip('"')
3134 if roman == "NotoSerif-TLF":
3135 romanfont[1] = '"NotoSerifRegular"'
3136 document.header[i] = " ".join(romanfont)
3138 i = find_token(document.header, "\\font_sans", 0)
3140 document.warning("Malformed LyX document: Missing \\font_sans.")
3142 # We need to use this regex since split() does not handle quote protection
3143 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3144 sf = sffont[1].strip('"')
3145 if sf == "NotoSans-TLF":
3146 sffont[1] = '"NotoSansRegular"'
3147 document.header[i] = " ".join(sffont)
3149 i = find_token(document.header, "\\font_typewriter", 0)
3151 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3153 # We need to use this regex since split() does not handle quote protection
3154 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3155 tt = ttfont[1].strip('"')
3156 if tt == "NotoMono-TLF":
3157 ttfont[1] = '"NotoMonoRegular"'
3158 document.header[i] = " ".join(ttfont)
3161 def convert_CrimsonProFont(document):
3162 " Handle CrimsonPro font definition to LaTeX "
3164 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3165 fm = createFontMapping(['CrimsonPro'])
3166 convert_fonts(document, fm, "lf")
3168 def revert_CrimsonProFont(document):
3169 " Revert native CrimsonPro font definition to LaTeX "
3171 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3173 fm = createFontMapping(['CrimsonPro'])
3174 if revert_fonts(document, fm, fontmap, False, True):
3175 add_preamble_fonts(document, fontmap)
3178 def revert_pagesizes(document):
3179 " Revert new page sizes in memoir and KOMA to options "
3181 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3184 i = find_token(document.header, "\\use_geometry true", 0)
3188 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3190 i = find_token(document.header, "\\papersize", 0)
3192 document.warning("Malformed LyX document! Missing \\papersize header.")
3194 val = get_value(document.header, "\\papersize", i)
3199 document.header[i] = "\\papersize default"
3201 i = find_token(document.header, "\\options", 0)
3203 i = find_token(document.header, "\\textclass", 0)
3205 document.warning("Malformed LyX document! Missing \\textclass header.")
3207 document.header.insert(i, "\\options " + val)
3209 document.header[i] = document.header[i] + "," + val
3212 def convert_pagesizes(document):
3213 " Convert to new page sizes in memoir and KOMA to options "
3215 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3218 i = find_token(document.header, "\\use_geometry true", 0)
3222 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3224 i = find_token(document.header, "\\papersize", 0)
3226 document.warning("Malformed LyX document! Missing \\papersize header.")
3228 val = get_value(document.header, "\\papersize", i)
3233 i = find_token(document.header, "\\use_geometry false", 0)
3235 # Maintain use of geometry
3236 document.header[1] = "\\use_geometry true"
3238 def revert_komafontsizes(document):
3239 " Revert new font sizes in KOMA to options "
3241 if document.textclass[:3] != "scr":
3244 i = find_token(document.header, "\\paperfontsize", 0)
3246 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3249 defsizes = ["default", "10", "11", "12"]
3251 val = get_value(document.header, "\\paperfontsize", i)
3256 document.header[i] = "\\paperfontsize default"
3258 fsize = "fontsize=" + val
3260 i = find_token(document.header, "\\options", 0)
3262 i = find_token(document.header, "\\textclass", 0)
3264 document.warning("Malformed LyX document! Missing \\textclass header.")
3266 document.header.insert(i, "\\options " + fsize)
3268 document.header[i] = document.header[i] + "," + fsize
3271 def revert_dupqualicites(document):
3272 " Revert qualified citation list commands with duplicate keys to ERT "
3274 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3275 # we need to revert those with multiple uses of the same key.
3279 i = find_token(document.header, "\\cite_engine", 0)
3281 document.warning("Malformed document! Missing \\cite_engine")
3283 engine = get_value(document.header, "\\cite_engine", i)
3285 if not engine in ["biblatex", "biblatex-natbib"]:
3288 # Citation insets that support qualified lists, with their LaTeX code
3292 "citet" : "textcites",
3293 "Citet" : "Textcites",
3294 "citep" : "parencites",
3295 "Citep" : "Parencites",
3296 "Footcite" : "Smartcites",
3297 "footcite" : "smartcites",
3298 "Autocite" : "Autocites",
3299 "autocite" : "autocites",
3304 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3307 j = find_end_of_inset(document.body, i)
3309 document.warning("Can't find end of citation inset at line %d!!" %(i))
3313 k = find_token(document.body, "LatexCommand", i, j)
3315 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3319 cmd = get_value(document.body, "LatexCommand", k)
3320 if not cmd in list(ql_citations.keys()):
3324 pres = find_token(document.body, "pretextlist", i, j)
3325 posts = find_token(document.body, "posttextlist", i, j)
3326 if pres == -1 and posts == -1:
3331 key = get_quoted_value(document.body, "key", i, j)
3333 document.warning("Citation inset at line %d does not have a key!" %(i))
3337 keys = key.split(",")
3338 ukeys = list(set(keys))
3339 if len(keys) == len(ukeys):
3344 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3345 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3347 pre = get_quoted_value(document.body, "before", i, j)
3348 post = get_quoted_value(document.body, "after", i, j)
3349 prelist = pretexts.split("\t")
3352 ppp = pp.split(" ", 1)
3358 if ppp[0] in premap:
3359 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3361 premap[ppp[0]] = val
3362 postlist = posttexts.split("\t")
3366 ppp = pp.split(" ", 1)
3372 if ppp[0] in postmap:
3373 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3375 postmap[ppp[0]] = val
3376 # Replace known new commands with ERT
3377 if "(" in pre or ")" in pre:
3378 pre = "{" + pre + "}"
3379 if "(" in post or ")" in post:
3380 post = "{" + post + "}"
3381 res = "\\" + ql_citations[cmd]
3383 res += "(" + pre + ")"
3385 res += "(" + post + ")"
3389 if premap.get(kk, "") != "":
3390 akeys = premap[kk].split("\t", 1)
3393 res += "[" + akey + "]"
3395 premap[kk] = "\t".join(akeys[1:])
3398 if postmap.get(kk, "") != "":
3399 akeys = postmap[kk].split("\t", 1)
3402 res += "[" + akey + "]"
3404 postmap[kk] = "\t".join(akeys[1:])
3407 elif premap.get(kk, "") != "":
3409 res += "{" + kk + "}"
3410 document.body[i:j+1] = put_cmd_in_ert([res])
3413 def convert_pagesizenames(document):
3414 " Convert LyX page sizes names "
3416 i = find_token(document.header, "\\papersize", 0)
3418 document.warning("Malformed LyX document! Missing \\papersize header.")
3420 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3421 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3422 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3423 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3424 val = get_value(document.header, "\\papersize", i)
3426 newval = val.replace("paper", "")
3427 document.header[i] = "\\papersize " + newval
3429 def revert_pagesizenames(document):
3430 " Convert LyX page sizes names "
3432 i = find_token(document.header, "\\papersize", 0)
3434 document.warning("Malformed LyX document! Missing \\papersize header.")
3436 newnames = ["letter", "legal", "executive", \
3437 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3438 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3439 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3440 val = get_value(document.header, "\\papersize", i)
3442 newval = val + "paper"
3443 document.header[i] = "\\papersize " + newval
3446 def revert_theendnotes(document):
3447 " Reverts native support of \\theendnotes to TeX-code "
3449 if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
3454 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3457 j = find_end_of_inset(document.body, i)
3459 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3462 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
3465 def revert_enotez(document):
3466 " Reverts native support of enotez package to TeX-code "
3468 if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
3472 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
3475 revert_flex_inset(document.body, "Endnote", "\\endnote")
3479 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3482 j = find_end_of_inset(document.body, i)
3484 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3488 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
3491 add_to_preamble(document, ["\\usepackage{enotez}"])
3492 document.del_module("enotez")
3493 document.del_module("foottoenotez")
3496 def revert_memoir_endnotes(document):
3497 " Reverts native support of memoir endnotes to TeX-code "
3499 if document.textclass != "memoir":
3502 encommand = "\\pagenote"
3503 modules = document.get_module_list()
3504 if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
3505 encommand = "\\endnote"
3507 revert_flex_inset(document.body, "Endnote", encommand)
3511 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
3514 j = find_end_of_inset(document.body, i)
3516 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3519 if document.body[i] == "\\begin_inset FloatList pagenote*":
3520 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
3522 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
3523 add_to_preamble(document, ["\\makepagenote"])
3526 def revert_totalheight(document):
3527 " Reverts graphics height parameter from totalheight to height "
3531 i = find_token(document.body, "\\begin_inset Graphics", i)
3534 j = find_end_of_inset(document.body, i)
3536 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3540 rx = re.compile(r'\s*special\s*(\S+)$')
3541 k = find_re(document.body, rx, i, j)
3545 m = rx.match(document.body[k])
3547 special = m.group(1)
3548 mspecial = special.split(',')
3549 for spc in mspecial:
3550 if spc[:7] == "height=":
3551 oldheight = spc.split('=')[1]
3552 mspecial.remove(spc)
3554 if len(mspecial) > 0:
3555 special = ",".join(mspecial)
3559 rx = re.compile(r'(\s*height\s*)(\S+)$')
3560 kk = find_re(document.body, rx, i, j)
3562 m = rx.match(document.body[kk])
3568 val = val + "," + special
3569 document.body[k] = "\tspecial " + "totalheight=" + val
3571 document.body.insert(kk, "\tspecial totalheight=" + val)
3573 document.body[kk] = m.group(1) + oldheight
3575 del document.body[kk]
3576 elif oldheight != "":
3578 document.body[k] = "\tspecial " + special
3579 document.body.insert(k, "\theight " + oldheight)
3581 document.body[k] = "\theight " + oldheight
3585 def convert_totalheight(document):
3586 " Converts graphics height parameter from totalheight to height "
3590 i = find_token(document.body, "\\begin_inset Graphics", i)
3593 j = find_end_of_inset(document.body, i)
3595 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3599 rx = re.compile(r'\s*special\s*(\S+)$')
3600 k = find_re(document.body, rx, i, j)
3604 m = rx.match(document.body[k])
3606 special = m.group(1)
3607 mspecial = special.split(',')
3608 for spc in mspecial:
3609 if spc[:12] == "totalheight=":
3610 newheight = spc.split('=')[1]
3611 mspecial.remove(spc)
3613 if len(mspecial) > 0:
3614 special = ",".join(mspecial)
3618 rx = re.compile(r'(\s*height\s*)(\S+)$')
3619 kk = find_re(document.body, rx, i, j)
3621 m = rx.match(document.body[kk])
3627 val = val + "," + special
3628 document.body[k] = "\tspecial " + "height=" + val
3630 document.body.insert(kk + 1, "\tspecial height=" + val)
3632 document.body[kk] = m.group(1) + newheight
3634 del document.body[kk]
3635 elif newheight != "":
3636 document.body.insert(k, "\theight " + newheight)
3643 supported_versions = ["2.4.0", "2.4"]
3645 [545, [convert_lst_literalparam]],
3650 [550, [convert_fontenc]],
3652 [552, [convert_aaencoding]],
3657 [557, [convert_vcsinfo]],
3658 [558, [removeFrontMatterStyles]],
3661 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
3665 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
3666 [566, [convert_hebrew_parentheses]],
3672 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
3673 [573, [convert_inputencoding_namechange]],
3674 [574, [convert_ruby_module, convert_utf8_japanese]],
3675 [575, [convert_lineno]],
3677 [577, [convert_linggloss]],
3681 [581, [convert_osf]],
3682 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
3683 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
3685 [585, [convert_pagesizes]],
3687 [587, [convert_pagesizenames]],
3689 [589, [convert_totalheight]]
3692 revert = [[588, [revert_totalheight]],
3693 [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
3694 [586, [revert_pagesizenames]],
3695 [585, [revert_dupqualicites]],
3696 [584, [revert_pagesizes,revert_komafontsizes]],
3697 [583, [revert_vcsinfo_rev_abbrev]],
3698 [582, [revert_ChivoFont,revert_CrimsonProFont]],
3699 [581, [revert_CantarellFont,revert_FiraFont]],
3700 [580, [revert_texfontopts,revert_osf]],
3701 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
3702 [578, [revert_babelfont]],
3703 [577, [revert_drs]],
3704 [576, [revert_linggloss, revert_subexarg]],
3705 [575, [revert_new_languages]],
3706 [574, [revert_lineno]],
3707 [573, [revert_ruby_module, revert_utf8_japanese]],
3708 [572, [revert_inputencoding_namechange]],
3709 [571, [revert_notoFonts]],
3710 [570, [revert_cmidruletrimming]],
3711 [569, [revert_bibfileencodings]],
3712 [568, [revert_tablestyle]],
3713 [567, [revert_soul]],
3714 [566, [revert_malayalam]],
3715 [565, [revert_hebrew_parentheses]],
3716 [564, [revert_AdobeFonts]],
3717 [563, [revert_lformatinfo]],
3718 [562, [revert_listpargs]],
3719 [561, [revert_l7ninfo]],
3720 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
3721 [559, [revert_timeinfo, revert_namenoextinfo]],
3722 [558, [revert_dateinfo]],
3723 [557, [addFrontMatterStyles]],
3724 [556, [revert_vcsinfo]],
3725 [555, [revert_bibencoding]],
3726 [554, [revert_vcolumns]],
3727 [553, [revert_stretchcolumn]],
3728 [552, [revert_tuftecite]],
3729 [551, [revert_floatpclass, revert_floatalignment, revert_aaencoding]],
3730 [550, [revert_nospellcheck]],
3731 [549, [revert_fontenc]],
3732 [548, []],# dummy format change
3733 [547, [revert_lscape]],
3734 [546, [revert_xcharter]],
3735 [545, [revert_paratype]],
3736 [544, [revert_lst_literalparam]]
3740 if __name__ == "__main__":