1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 " Expand fontinfo mapping"
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
199 def convert_fonts(document, fm, osfoption = "osf"):
200 " Handle font definition (LaTeX preamble -> native) "
202 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
203 rscaleopt = re.compile(r'^scaled?=(.*)')
205 # Check whether we go beyond font option feature introduction
206 haveFontOpts = document.end_format > 580
209 while i < len(document.preamble):
210 i = find_re(document.preamble, rpkg, i+1)
213 mo = rpkg.search(document.preamble[i])
214 if mo == None or mo.group(2) == None:
217 options = mo.group(2).replace(' ', '').split(",")
222 while o < len(options):
223 if options[o] == osfoption:
227 mo = rscaleopt.search(options[o])
235 if not pkg in fm.pkginmap:
240 # Try with name-option combination first
241 # (only one default option supported currently)
243 while o < len(options):
245 fn = fm.getfontname(pkg, [opt])
252 fn = fm.getfontname(pkg, [])
254 fn = fm.getfontname(pkg, options)
257 del document.preamble[i]
258 fontinfo = fm.font2pkgmap[fn]
259 if fontinfo.scaletype == None:
262 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
263 fontinfo.scaleval = oscale
264 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
265 if fontinfo.osfopt == None:
266 options.extend(osfoption)
268 osf = find_token(document.header, "\\font_osf false")
269 osftag = "\\font_osf"
270 if osf == -1 and fontinfo.fonttype != "math":
271 # Try with newer format
272 osftag = "\\font_" + fontinfo.fonttype + "_osf"
273 osf = find_token(document.header, osftag + " false")
275 document.header[osf] = osftag + " true"
276 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
277 del document.preamble[i-1]
279 if fontscale != None:
280 j = find_token(document.header, fontscale, 0)
282 val = get_value(document.header, fontscale, j)
286 scale = "%03d" % int(float(oscale) * 100)
287 document.header[j] = fontscale + " " + scale + " " + vals[1]
288 ft = "\\font_" + fontinfo.fonttype
289 j = find_token(document.header, ft, 0)
291 val = get_value(document.header, ft, j)
292 words = val.split() # ! splits also values like '"DejaVu Sans"'
293 words[0] = '"' + fn + '"'
294 document.header[j] = ft + ' ' + ' '.join(words)
295 if haveFontOpts and fontinfo.fonttype != "math":
296 fotag = "\\font_" + fontinfo.fonttype + "_opts"
297 fo = find_token(document.header, fotag)
299 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
301 # Sensible place to insert tag
302 fo = find_token(document.header, "\\font_sf_scale")
304 document.warning("Malformed LyX document! Missing \\font_sf_scale")
306 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
310 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
311 " Revert native font definition to LaTeX "
312 # fonlist := list of fonts created from the same package
313 # Empty package means that the font-name is the same as the package-name
314 # fontmap (key = package, val += found options) will be filled
315 # and used later in add_preamble_fonts() to be added to user-preamble
317 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
318 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
320 while i < len(document.header):
321 i = find_re(document.header, rfontscale, i+1)
324 mo = rfontscale.search(document.header[i])
327 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
328 val = get_value(document.header, ft, i)
329 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
330 font = words[0].strip('"') # TeX font name has no whitespace
331 if not font in fm.font2pkgmap:
333 fontinfo = fm.font2pkgmap[font]
334 val = fontinfo.package
335 if not val in fontmap:
338 if OnlyWithXOpts or WithXOpts:
339 if ft == "\\font_math":
341 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
342 if ft == "\\font_sans":
343 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
344 elif ft == "\\font_typewriter":
345 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
346 x = find_re(document.header, regexp, 0)
347 if x == -1 and OnlyWithXOpts:
351 # We need to use this regex since split() does not handle quote protection
352 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
353 opts = xopts[1].strip('"').split(",")
354 fontmap[val].extend(opts)
355 del document.header[x]
356 words[0] = '"default"'
357 document.header[i] = ft + ' ' + ' '.join(words)
358 if fontinfo.scaleopt != None:
359 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
360 mo = rscales.search(xval)
365 # set correct scale option
366 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
367 if fontinfo.osfopt != None:
369 if fontinfo.osfdef == "true":
371 osf = find_token(document.header, "\\font_osf " + oldval)
372 if osf == -1 and ft != "\\font_math":
373 # Try with newer format
374 osftag = "\\font_roman_osf " + oldval
375 if ft == "\\font_sans":
376 osftag = "\\font_sans_osf " + oldval
377 elif ft == "\\font_typewriter":
378 osftag = "\\font_typewriter_osf " + oldval
379 osf = find_token(document.header, osftag)
381 fontmap[val].extend([fontinfo.osfopt])
382 if len(fontinfo.options) > 0:
383 fontmap[val].extend(fontinfo.options)
386 ###############################################################################
388 ### Conversion and reversion routines
390 ###############################################################################
392 def convert_inputencoding_namechange(document):
393 " Rename inputencoding settings. "
394 i = find_token(document.header, "\\inputencoding", 0)
397 s = document.header[i].replace("auto", "auto-legacy")
398 document.header[i] = s.replace("default", "auto-legacy-plain")
400 def revert_inputencoding_namechange(document):
401 " Rename inputencoding settings. "
402 i = find_token(document.header, "\\inputencoding", 0)
405 s = document.header[i].replace("auto-legacy-plain", "default")
406 document.header[i] = s.replace("auto-legacy", "auto")
408 def convert_notoFonts(document):
409 " Handle Noto fonts definition to LaTeX "
411 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
412 fm = createFontMapping(['Noto'])
413 convert_fonts(document, fm)
415 def revert_notoFonts(document):
416 " Revert native Noto font definition to LaTeX "
418 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
420 fm = createFontMapping(['Noto'])
421 if revert_fonts(document, fm, fontmap):
422 add_preamble_fonts(document, fontmap)
424 def convert_latexFonts(document):
425 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
427 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
428 fm = createFontMapping(['DejaVu', 'IBM'])
429 convert_fonts(document, fm)
431 def revert_latexFonts(document):
432 " Revert native DejaVu font definition to LaTeX "
434 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
436 fm = createFontMapping(['DejaVu', 'IBM'])
437 if revert_fonts(document, fm, fontmap):
438 add_preamble_fonts(document, fontmap)
440 def convert_AdobeFonts(document):
441 " Handle Adobe Source fonts definition to LaTeX "
443 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
444 fm = createFontMapping(['Adobe'])
445 convert_fonts(document, fm)
447 def revert_AdobeFonts(document):
448 " Revert Adobe Source font definition to LaTeX "
450 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
452 fm = createFontMapping(['Adobe'])
453 if revert_fonts(document, fm, fontmap):
454 add_preamble_fonts(document, fontmap)
456 def removeFrontMatterStyles(document):
457 " Remove styles Begin/EndFrontmatter"
459 layouts = ['BeginFrontmatter', 'EndFrontmatter']
460 tokenend = len('\\begin_layout ')
463 i = find_token_exact(document.body, '\\begin_layout ', i+1)
466 layout = document.body[i][tokenend:].strip()
467 if layout not in layouts:
469 j = find_end_of_layout(document.body, i)
471 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
473 while document.body[j+1].strip() == '':
475 document.body[i:j+1] = []
477 def addFrontMatterStyles(document):
478 " Use styles Begin/EndFrontmatter for elsarticle"
480 if document.textclass != "elsarticle":
483 def insertFrontmatter(prefix, line):
485 while above > 0 and document.body[above-1].strip() == '':
488 while document.body[below].strip() == '':
490 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
491 '\\begin_inset Note Note',
493 '\\begin_layout Plain Layout',
496 '\\end_inset', '', '',
499 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
500 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
501 tokenend = len('\\begin_layout ')
505 i = find_token_exact(document.body, '\\begin_layout ', i+1)
508 layout = document.body[i][tokenend:].strip()
509 if layout not in layouts:
511 k = find_end_of_layout(document.body, i)
513 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
520 insertFrontmatter('End', k+1)
521 insertFrontmatter('Begin', first)
524 def convert_lst_literalparam(document):
525 " Add param literal to include inset "
529 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
532 j = find_end_of_inset(document.body, i)
534 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
536 while i < j and document.body[i].strip() != '':
538 document.body.insert(i, 'literal "true"')
541 def revert_lst_literalparam(document):
542 " Remove param literal from include inset "
546 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
549 j = find_end_of_inset(document.body, i)
551 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
553 del_token(document.body, 'literal', i, j)
556 def revert_paratype(document):
557 " Revert ParaType font definitions to LaTeX "
559 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
561 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
562 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
563 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
564 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
567 sfval = find_token(document.header, "\\font_sf_scale", 0)
569 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
571 sfscale = document.header[sfval].split()
574 document.header[sfval] = " ".join(sfscale)
577 sf_scale = float(val)
579 document.warning("Invalid font_sf_scale value: " + val)
582 if sf_scale != "100.0":
583 sfoption = "scaled=" + str(sf_scale / 100.0)
584 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
585 ttval = get_value(document.header, "\\font_tt_scale", 0)
590 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
591 if i1 != -1 and i2 != -1 and i3!= -1:
592 add_to_preamble(document, ["\\usepackage{paratype}"])
595 add_to_preamble(document, ["\\usepackage{PTSerif}"])
596 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
599 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
601 add_to_preamble(document, ["\\usepackage{PTSans}"])
602 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
605 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
607 add_to_preamble(document, ["\\usepackage{PTMono}"])
608 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
611 def revert_xcharter(document):
612 " Revert XCharter font definitions to LaTeX "
614 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
618 # replace unsupported font setting
619 document.header[i] = document.header[i].replace("xcharter", "default")
620 # no need for preamble code with system fonts
621 if get_bool_value(document.header, "\\use_non_tex_fonts"):
624 # transfer old style figures setting to package options
625 j = find_token(document.header, "\\font_osf true")
628 document.header[j] = "\\font_osf false"
632 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
635 def revert_lscape(document):
636 " Reverts the landscape environment (Landscape module) to TeX-code "
638 if not "landscape" in document.get_module_list():
643 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
646 j = find_end_of_inset(document.body, i)
648 document.warning("Malformed LyX document: Can't find end of Landscape inset")
651 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
652 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
653 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
654 add_to_preamble(document, ["\\usepackage{afterpage}"])
656 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
657 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
659 add_to_preamble(document, ["\\usepackage{pdflscape}"])
660 document.del_module("landscape")
663 def convert_fontenc(document):
664 " Convert default fontenc setting "
666 i = find_token(document.header, "\\fontencoding global", 0)
670 document.header[i] = document.header[i].replace("global", "auto")
673 def revert_fontenc(document):
674 " Revert default fontenc setting "
676 i = find_token(document.header, "\\fontencoding auto", 0)
680 document.header[i] = document.header[i].replace("auto", "global")
683 def revert_nospellcheck(document):
684 " Remove nospellcheck font info param "
688 i = find_token(document.body, '\\nospellcheck', i)
694 def revert_floatpclass(document):
695 " Remove float placement params 'document' and 'class' "
697 del_token(document.header, "\\float_placement class")
701 i = find_token(document.body, '\\begin_inset Float', i+1)
704 j = find_end_of_inset(document.body, i)
705 k = find_token(document.body, 'placement class', i, i + 2)
707 k = find_token(document.body, 'placement document', i, i + 2)
714 def revert_floatalignment(document):
715 " Remove float alignment params "
717 galignment = get_value(document.header, "\\float_alignment", delete=True)
721 i = find_token(document.body, '\\begin_inset Float', i+1)
724 j = find_end_of_inset(document.body, i)
726 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
728 k = find_token(document.body, 'alignment', i, i+4)
732 alignment = get_value(document.body, "alignment", k)
733 if alignment == "document":
734 alignment = galignment
736 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
738 document.warning("Can't find float layout!")
741 if alignment == "left":
742 alcmd = put_cmd_in_ert("\\raggedright{}")
743 elif alignment == "center":
744 alcmd = put_cmd_in_ert("\\centering{}")
745 elif alignment == "right":
746 alcmd = put_cmd_in_ert("\\raggedleft{}")
748 document.body[l+1:l+1] = alcmd
751 def revert_tuftecite(document):
752 " Revert \cite commands in tufte classes "
754 tufte = ["tufte-book", "tufte-handout"]
755 if document.textclass not in tufte:
760 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
763 j = find_end_of_inset(document.body, i)
765 document.warning("Can't find end of citation inset at line %d!!" %(i))
767 k = find_token(document.body, "LatexCommand", i, j)
769 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
772 cmd = get_value(document.body, "LatexCommand", k)
776 pre = get_quoted_value(document.body, "before", i, j)
777 post = get_quoted_value(document.body, "after", i, j)
778 key = get_quoted_value(document.body, "key", i, j)
780 document.warning("Citation inset at line %d does not have a key!" %(i))
782 # Replace command with ERT
785 res += "[" + pre + "]"
787 res += "[" + post + "]"
790 res += "{" + key + "}"
791 document.body[i:j+1] = put_cmd_in_ert([res])
796 def revert_stretchcolumn(document):
797 " We remove the column varwidth flags or everything else will become a mess. "
800 i = find_token(document.body, "\\begin_inset Tabular", i+1)
803 j = find_end_of_inset(document.body, i+1)
805 document.warning("Malformed LyX document: Could not find end of tabular.")
807 for k in range(i, j):
808 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
809 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
810 document.body[k] = document.body[k].replace(' varwidth="true"', '')
813 def revert_vcolumns(document):
814 " Revert standard columns with line breaks etc. "
820 i = find_token(document.body, "\\begin_inset Tabular", i+1)
823 j = find_end_of_inset(document.body, i)
825 document.warning("Malformed LyX document: Could not find end of tabular.")
828 # Collect necessary column information
830 nrows = int(document.body[i+1].split('"')[3])
831 ncols = int(document.body[i+1].split('"')[5])
833 for k in range(ncols):
834 m = find_token(document.body, "<column", m)
835 width = get_option_value(document.body[m], 'width')
836 varwidth = get_option_value(document.body[m], 'varwidth')
837 alignment = get_option_value(document.body[m], 'alignment')
838 special = get_option_value(document.body[m], 'special')
839 col_info.append([width, varwidth, alignment, special, m])
844 for row in range(nrows):
845 for col in range(ncols):
846 m = find_token(document.body, "<cell", m)
847 multicolumn = get_option_value(document.body[m], 'multicolumn')
848 multirow = get_option_value(document.body[m], 'multirow')
849 width = get_option_value(document.body[m], 'width')
850 rotate = get_option_value(document.body[m], 'rotate')
851 # Check for: linebreaks, multipars, non-standard environments
853 endcell = find_token(document.body, "</cell>", begcell)
855 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
857 elif count_pars_in_inset(document.body, begcell + 2) > 1:
859 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
861 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
862 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
864 alignment = col_info[col][2]
865 col_line = col_info[col][4]
867 if alignment == "center":
868 vval = ">{\\centering}"
869 elif alignment == "left":
870 vval = ">{\\raggedright}"
871 elif alignment == "right":
872 vval = ">{\\raggedleft}"
875 vval += "V{\\linewidth}"
877 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
878 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
879 # with newlines, and we do not want that)
881 endcell = find_token(document.body, "</cell>", begcell)
883 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
885 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
889 nle = find_end_of_inset(document.body, nl)
890 del(document.body[nle:nle+1])
892 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
894 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
900 if needarray == True:
901 add_to_preamble(document, ["\\usepackage{array}"])
902 if needvarwidth == True:
903 add_to_preamble(document, ["\\usepackage{varwidth}"])
906 def revert_bibencoding(document):
907 " Revert bibliography encoding "
911 i = find_token(document.header, "\\cite_engine", 0)
913 document.warning("Malformed document! Missing \\cite_engine")
915 engine = get_value(document.header, "\\cite_engine", i)
919 if engine in ["biblatex", "biblatex-natbib"]:
922 # Map lyx to latex encoding names
926 "armscii8" : "armscii8",
927 "iso8859-1" : "latin1",
928 "iso8859-2" : "latin2",
929 "iso8859-3" : "latin3",
930 "iso8859-4" : "latin4",
931 "iso8859-5" : "iso88595",
932 "iso8859-6" : "8859-6",
933 "iso8859-7" : "iso-8859-7",
934 "iso8859-8" : "8859-8",
935 "iso8859-9" : "latin5",
936 "iso8859-13" : "latin7",
937 "iso8859-15" : "latin9",
938 "iso8859-16" : "latin10",
939 "applemac" : "applemac",
941 "cp437de" : "cp437de",
958 "utf8-platex" : "utf8",
965 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
968 j = find_end_of_inset(document.body, i)
970 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
972 encoding = get_quoted_value(document.body, "encoding", i, j)
975 # remove encoding line
976 k = find_token(document.body, "encoding", i, j)
979 if encoding == "default":
981 # Re-find inset end line
982 j = find_end_of_inset(document.body, i)
985 h = find_token(document.header, "\\biblio_options", 0)
987 biblio_options = get_value(document.header, "\\biblio_options", h)
988 if not "bibencoding" in biblio_options:
989 document.header[h] += ",bibencoding=%s" % encodings[encoding]
991 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
993 # this should not happen
994 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
996 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
998 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
999 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1005 def convert_vcsinfo(document):
1006 " Separate vcs Info inset from buffer Info inset. "
1009 "vcs-revision" : "revision",
1010 "vcs-tree-revision" : "tree-revision",
1011 "vcs-author" : "author",
1012 "vcs-time" : "time",
1017 i = find_token(document.body, "\\begin_inset Info", i+1)
1020 j = find_end_of_inset(document.body, i+1)
1022 document.warning("Malformed LyX document: Could not find end of Info inset.")
1024 tp = find_token(document.body, 'type', i, j)
1025 tpv = get_quoted_value(document.body, "type", tp)
1028 arg = find_token(document.body, 'arg', i, j)
1029 argv = get_quoted_value(document.body, "arg", arg)
1030 if argv not in list(types.keys()):
1032 document.body[tp] = "type \"vcs\""
1033 document.body[arg] = "arg \"" + types[argv] + "\""
1036 def revert_vcsinfo(document):
1037 " Merge vcs Info inset to buffer Info inset. "
1039 args = ["revision", "tree-revision", "author", "time", "date" ]
1042 i = find_token(document.body, "\\begin_inset Info", i+1)
1045 j = find_end_of_inset(document.body, i+1)
1047 document.warning("Malformed LyX document: Could not find end of Info inset.")
1049 tp = find_token(document.body, 'type', i, j)
1050 tpv = get_quoted_value(document.body, "type", tp)
1053 arg = find_token(document.body, 'arg', i, j)
1054 argv = get_quoted_value(document.body, "arg", arg)
1055 if argv not in args:
1056 document.warning("Malformed Info inset. Invalid vcs arg.")
1058 document.body[tp] = "type \"buffer\""
1059 document.body[arg] = "arg \"vcs-" + argv + "\""
1061 def revert_vcsinfo_rev_abbrev(document):
1062 " Convert abbreviated revisions to regular revisions. "
1066 i = find_token(document.body, "\\begin_inset Info", i+1)
1069 j = find_end_of_inset(document.body, i+1)
1071 document.warning("Malformed LyX document: Could not find end of Info inset.")
1073 tp = find_token(document.body, 'type', i, j)
1074 tpv = get_quoted_value(document.body, "type", tp)
1077 arg = find_token(document.body, 'arg', i, j)
1078 argv = get_quoted_value(document.body, "arg", arg)
1079 if( argv == "revision-abbrev" ):
1080 document.body[arg] = "arg \"revision\""
1082 def revert_dateinfo(document):
1083 " Revert date info insets to static text. "
1085 # FIXME This currently only considers the main language and uses the system locale
1086 # Ideally, it should honor context languages and switch the locale accordingly.
1088 # The date formats for each language using strftime syntax:
1089 # long, short, loclong, locmedium, locshort
1091 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1092 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1093 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1094 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1095 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1096 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1097 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1098 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1099 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1100 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1101 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1102 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1103 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1104 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1105 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1106 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1107 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1108 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1109 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1110 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1111 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1112 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1113 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1114 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1115 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1116 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1117 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1118 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1119 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1120 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1121 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1122 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1123 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1124 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1125 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1126 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1127 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1128 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1129 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1130 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1131 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1132 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1133 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1134 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1135 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1136 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1137 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1138 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1139 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1140 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1141 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1142 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1143 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1144 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1145 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1146 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1147 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1148 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1149 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1150 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1151 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1152 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1153 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1154 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1155 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1156 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1157 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1158 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1159 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1160 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1161 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1162 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1163 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1164 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1165 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1166 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1167 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1168 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1169 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1170 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1171 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1172 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1173 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1174 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1175 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1176 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1177 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1178 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1179 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1180 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1181 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1182 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1183 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1184 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1185 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1186 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1187 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1188 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1189 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1190 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1191 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1192 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1195 types = ["date", "fixdate", "moddate" ]
1196 lang = get_value(document.header, "\\language")
1198 document.warning("Malformed LyX document! No \\language header found!")
1203 i = find_token(document.body, "\\begin_inset Info", i+1)
1206 j = find_end_of_inset(document.body, i+1)
1208 document.warning("Malformed LyX document: Could not find end of Info inset.")
1210 tp = find_token(document.body, 'type', i, j)
1211 tpv = get_quoted_value(document.body, "type", tp)
1212 if tpv not in types:
1214 arg = find_token(document.body, 'arg', i, j)
1215 argv = get_quoted_value(document.body, "arg", arg)
1218 if tpv == "fixdate":
1219 datecomps = argv.split('@')
1220 if len(datecomps) > 1:
1222 isodate = datecomps[1]
1223 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1225 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1226 # FIXME if we had the path to the original document (not the one in the tmp dir),
1227 # we could use the mtime.
1228 # elif tpv == "moddate":
1229 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1232 result = dte.isodate()
1233 elif argv == "long":
1234 result = dte.strftime(dateformats[lang][0])
1235 elif argv == "short":
1236 result = dte.strftime(dateformats[lang][1])
1237 elif argv == "loclong":
1238 result = dte.strftime(dateformats[lang][2])
1239 elif argv == "locmedium":
1240 result = dte.strftime(dateformats[lang][3])
1241 elif argv == "locshort":
1242 result = dte.strftime(dateformats[lang][4])
1244 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1245 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1246 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1247 fmt = re.sub('[^\'%]d', '%d', fmt)
1248 fmt = fmt.replace("'", "")
1249 result = dte.strftime(fmt)
1250 if sys.version_info < (3,0):
1251 # In Python 2, datetime module works with binary strings,
1252 # our dateformat strings are utf8-encoded:
1253 result = result.decode('utf-8')
1254 document.body[i : j+1] = [result]
1257 def revert_timeinfo(document):
1258 " Revert time info insets to static text. "
1260 # FIXME This currently only considers the main language and uses the system locale
1261 # Ideally, it should honor context languages and switch the locale accordingly.
1262 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1265 # The time formats for each language using strftime syntax:
1268 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1269 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1270 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1271 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1272 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1273 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1274 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1275 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1276 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1277 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1278 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1279 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1280 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1281 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1282 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1283 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1284 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1285 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1286 "british" : ["%H:%M:%S %Z", "%H:%M"],
1287 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1288 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1289 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1290 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1291 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1292 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1293 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1294 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1295 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1296 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1297 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1298 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1299 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1300 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1301 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1302 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1303 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1304 "french" : ["%H:%M:%S %Z", "%H:%M"],
1305 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1306 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1307 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1308 "german" : ["%H:%M:%S %Z", "%H:%M"],
1309 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1310 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1311 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1312 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1313 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1314 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1315 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1316 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1317 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1318 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1319 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1320 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1321 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1322 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1323 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1324 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1325 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1326 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1327 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1328 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1329 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1330 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1331 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1332 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1333 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1334 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1335 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1336 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1337 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1338 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1339 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1340 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1341 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1342 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1343 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1344 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1345 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1346 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1347 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1348 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1349 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1350 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1351 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1352 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1353 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1354 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1355 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1356 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1357 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1358 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1359 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1360 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1361 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1362 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1363 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1364 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1365 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1366 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1367 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1368 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1369 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1372 types = ["time", "fixtime", "modtime" ]
1374 i = find_token(document.header, "\\language", 0)
1376 # this should not happen
1377 document.warning("Malformed LyX document! No \\language header found!")
1379 lang = get_value(document.header, "\\language", i)
1383 i = find_token(document.body, "\\begin_inset Info", i+1)
1386 j = find_end_of_inset(document.body, i+1)
1388 document.warning("Malformed LyX document: Could not find end of Info inset.")
1390 tp = find_token(document.body, 'type', i, j)
1391 tpv = get_quoted_value(document.body, "type", tp)
1392 if tpv not in types:
1394 arg = find_token(document.body, 'arg', i, j)
1395 argv = get_quoted_value(document.body, "arg", arg)
1397 dtme = datetime.now()
1399 if tpv == "fixtime":
1400 timecomps = argv.split('@')
1401 if len(timecomps) > 1:
1403 isotime = timecomps[1]
1404 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1406 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1408 m = re.search('(\d\d):(\d\d)', isotime)
1410 tme = time(int(m.group(1)), int(m.group(2)))
1411 # FIXME if we had the path to the original document (not the one in the tmp dir),
1412 # we could use the mtime.
1413 # elif tpv == "moddate":
1414 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1417 result = tme.isoformat()
1418 elif argv == "long":
1419 result = tme.strftime(timeformats[lang][0])
1420 elif argv == "short":
1421 result = tme.strftime(timeformats[lang][1])
1423 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1424 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1425 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1426 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1427 fmt = fmt.replace("'", "")
1428 result = dte.strftime(fmt)
1429 document.body[i : j+1] = result
1432 def revert_namenoextinfo(document):
1433 " Merge buffer Info inset type name-noext to name. "
1437 i = find_token(document.body, "\\begin_inset Info", i+1)
1440 j = find_end_of_inset(document.body, i+1)
1442 document.warning("Malformed LyX document: Could not find end of Info inset.")
1444 tp = find_token(document.body, 'type', i, j)
1445 tpv = get_quoted_value(document.body, "type", tp)
1448 arg = find_token(document.body, 'arg', i, j)
1449 argv = get_quoted_value(document.body, "arg", arg)
1450 if argv != "name-noext":
1452 document.body[arg] = "arg \"name\""
1455 def revert_l7ninfo(document):
1456 " Revert l7n Info inset to text. "
1460 i = find_token(document.body, "\\begin_inset Info", i+1)
1463 j = find_end_of_inset(document.body, i+1)
1465 document.warning("Malformed LyX document: Could not find end of Info inset.")
1467 tp = find_token(document.body, 'type', i, j)
1468 tpv = get_quoted_value(document.body, "type", tp)
1471 arg = find_token(document.body, 'arg', i, j)
1472 argv = get_quoted_value(document.body, "arg", arg)
1473 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1474 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1475 document.body[i : j+1] = argv
1478 def revert_listpargs(document):
1479 " Reverts listpreamble arguments to TeX-code "
1482 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1485 j = find_end_of_inset(document.body, i)
1486 # Find containing paragraph layout
1487 parent = get_containing_layout(document.body, i)
1489 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1492 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1493 endPlain = find_end_of_layout(document.body, beginPlain)
1494 content = document.body[beginPlain + 1 : endPlain]
1495 del document.body[i:j+1]
1496 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1497 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1498 document.body[parbeg : parbeg] = subst
1501 def revert_lformatinfo(document):
1502 " Revert layout format Info inset to text. "
1506 i = find_token(document.body, "\\begin_inset Info", i+1)
1509 j = find_end_of_inset(document.body, i+1)
1511 document.warning("Malformed LyX document: Could not find end of Info inset.")
1513 tp = find_token(document.body, 'type', i, j)
1514 tpv = get_quoted_value(document.body, "type", tp)
1515 if tpv != "lyxinfo":
1517 arg = find_token(document.body, 'arg', i, j)
1518 argv = get_quoted_value(document.body, "arg", arg)
1519 if argv != "layoutformat":
1522 document.body[i : j+1] = "69"
1525 def convert_hebrew_parentheses(document):
1526 """ Swap opening/closing parentheses in Hebrew text.
1528 Up to LyX 2.4, "(" was used as closing parenthesis and
1529 ")" as opening parenthesis for Hebrew in the LyX source.
1531 # print("convert hebrew parentheses")
1532 current_languages = [document.language]
1533 for i, line in enumerate(document.body):
1534 if line.startswith('\\lang '):
1535 current_languages[-1] = line.lstrip('\\lang ')
1536 elif line.startswith('\\begin_layout'):
1537 current_languages.append(current_languages[-1])
1538 # print (line, current_languages[-1])
1539 elif line.startswith('\\end_layout'):
1540 current_languages.pop()
1541 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1542 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1545 def revert_hebrew_parentheses(document):
1546 " Store parentheses in Hebrew text reversed"
1547 # This only exists to keep the convert/revert naming convention
1548 convert_hebrew_parentheses(document)
1551 def revert_malayalam(document):
1552 " Set the document language to English but assure Malayalam output "
1554 revert_language(document, "malayalam", "", "malayalam")
1557 def revert_soul(document):
1558 " Revert soul module flex insets to ERT "
1560 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1563 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1565 add_to_preamble(document, ["\\usepackage{soul}"])
1567 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1569 add_to_preamble(document, ["\\usepackage{color}"])
1571 revert_flex_inset(document.body, "Spaceletters", "\\so")
1572 revert_flex_inset(document.body, "Strikethrough", "\\st")
1573 revert_flex_inset(document.body, "Underline", "\\ul")
1574 revert_flex_inset(document.body, "Highlight", "\\hl")
1575 revert_flex_inset(document.body, "Capitalize", "\\caps")
1578 def revert_tablestyle(document):
1579 " Remove tablestyle params "
1582 i = find_token(document.header, "\\tablestyle")
1584 del document.header[i]
1587 def revert_bibfileencodings(document):
1588 " Revert individual Biblatex bibliography encodings "
1592 i = find_token(document.header, "\\cite_engine", 0)
1594 document.warning("Malformed document! Missing \\cite_engine")
1596 engine = get_value(document.header, "\\cite_engine", i)
1600 if engine in ["biblatex", "biblatex-natbib"]:
1603 # Map lyx to latex encoding names
1607 "armscii8" : "armscii8",
1608 "iso8859-1" : "latin1",
1609 "iso8859-2" : "latin2",
1610 "iso8859-3" : "latin3",
1611 "iso8859-4" : "latin4",
1612 "iso8859-5" : "iso88595",
1613 "iso8859-6" : "8859-6",
1614 "iso8859-7" : "iso-8859-7",
1615 "iso8859-8" : "8859-8",
1616 "iso8859-9" : "latin5",
1617 "iso8859-13" : "latin7",
1618 "iso8859-15" : "latin9",
1619 "iso8859-16" : "latin10",
1620 "applemac" : "applemac",
1622 "cp437de" : "cp437de",
1630 "cp1250" : "cp1250",
1631 "cp1251" : "cp1251",
1632 "cp1252" : "cp1252",
1633 "cp1255" : "cp1255",
1634 "cp1256" : "cp1256",
1635 "cp1257" : "cp1257",
1636 "koi8-r" : "koi8-r",
1637 "koi8-u" : "koi8-u",
1639 "utf8-platex" : "utf8",
1646 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1649 j = find_end_of_inset(document.body, i)
1651 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1653 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1657 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1658 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1659 if len(bibfiles) == 0:
1660 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1661 # remove encoding line
1662 k = find_token(document.body, "file_encodings", i, j)
1664 del document.body[k]
1665 # Re-find inset end line
1666 j = find_end_of_inset(document.body, i)
1668 enclist = encodings.split("\t")
1671 ppp = pp.split(" ", 1)
1672 encmap[ppp[0]] = ppp[1]
1673 for bib in bibfiles:
1674 pr = "\\addbibresource"
1675 if bib in encmap.keys():
1676 pr += "[bibencoding=" + encmap[bib] + "]"
1677 pr += "{" + bib + "}"
1678 add_to_preamble(document, [pr])
1679 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1680 pcmd = "printbibliography"
1682 pcmd += "[" + opts + "]"
1683 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1684 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1685 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1686 "status open", "", "\\begin_layout Plain Layout" ]
1687 repl += document.body[i:j+1]
1688 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1689 document.body[i:j+1] = repl
1695 def revert_cmidruletrimming(document):
1696 " Remove \\cmidrule trimming "
1698 # FIXME: Revert to TeX code?
1701 # first, let's find out if we need to do anything
1702 i = find_token(document.body, '<cell ', i+1)
1705 j = document.body[i].find('trim="')
1708 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1709 # remove trim option
1710 document.body[i] = rgx.sub('', document.body[i])
1714 r'### Inserted by lyx2lyx (ruby inset) ###',
1715 r'InsetLayout Flex:Ruby',
1716 r' LyxType charstyle',
1717 r' LatexType command',
1721 r' HTMLInnerTag rb',
1722 r' HTMLInnerAttr ""',
1724 r' LabelString "Ruby"',
1725 r' Decoration Conglomerate',
1727 r' \ifdefined\kanjiskip',
1728 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1729 r' \else \ifdefined\luatexversion',
1730 r' \usepackage{luatexja-ruby}',
1731 r' \else \ifdefined\XeTeXversion',
1732 r' \usepackage{ruby}%',
1734 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1736 r' Argument post:1',
1737 r' LabelString "ruby text"',
1738 r' MenuString "Ruby Text|R"',
1739 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1740 r' Decoration Conglomerate',
1752 def convert_ruby_module(document):
1753 " Use ruby module instead of local module definition "
1754 if document.del_local_layout(ruby_inset_def):
1755 document.add_module("ruby")
1757 def revert_ruby_module(document):
1758 " Replace ruby module with local module definition "
1759 if document.del_module("ruby"):
1760 document.append_local_layout(ruby_inset_def)
1763 def convert_utf8_japanese(document):
1764 " Use generic utf8 with Japanese documents."
1765 lang = get_value(document.header, "\\language")
1766 if not lang.startswith("japanese"):
1768 inputenc = get_value(document.header, "\\inputencoding")
1769 if ((lang == "japanese" and inputenc == "utf8-platex")
1770 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1771 document.set_parameter("inputencoding", "utf8")
1773 def revert_utf8_japanese(document):
1774 " Use Japanese utf8 variants with Japanese documents."
1775 inputenc = get_value(document.header, "\\inputencoding")
1776 if inputenc != "utf8":
1778 lang = get_value(document.header, "\\language")
1779 if lang == "japanese":
1780 document.set_parameter("inputencoding", "utf8-platex")
1781 if lang == "japanese-cjk":
1782 document.set_parameter("inputencoding", "utf8-cjk")
1785 def revert_lineno(document):
1786 " Replace lineno setting with user-preamble code."
1788 options = get_quoted_value(document.header, "\\lineno_options",
1790 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1793 options = "[" + options + "]"
1794 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1797 def convert_lineno(document):
1798 " Replace user-preamble code with native lineno support."
1801 i = find_token(document.preamble, "\\linenumbers", 1)
1803 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1806 options = usepkg.group(1).strip("[]")
1807 del(document.preamble[i-1:i+1])
1808 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1810 k = find_token(document.header, "\\index ")
1812 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1814 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1815 "\\lineno_options %s" % options]
1818 def convert_aaencoding(document):
1819 " Convert default document option due to encoding change in aa class. "
1821 if document.textclass != "aa":
1825 i = find_token(document.header, "\\use_default_options true", i)
1828 j = find_token(document.header, "\\inputencoding", 0)
1830 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
1832 val = get_value(document.header, "\\inputencoding", j)
1833 if val == "auto-legacy" or val == "latin9":
1834 document.header[i] = "\\use_default_options false"
1835 k = find_token(document.header, "\\options", 0)
1837 document.header.insert(i, "\\options latin9")
1839 document.header[k] = document.header[k] + ",latin9"
1842 def revert_aaencoding(document):
1843 " Revert default document option due to encoding change in aa class. "
1845 if document.textclass != "aa":
1849 i = find_token(document.header, "\\use_default_options true", i)
1852 j = find_token(document.header, "\\inputencoding", 0)
1854 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
1856 val = get_value(document.header, "\\inputencoding", j)
1858 document.header[i] = "\\use_default_options false"
1859 k = find_token(document.header, "\\options", 0)
1861 document.header.insert(i, "\\options utf8")
1863 document.header[k] = document.header[k] + ",utf8"
1866 def revert_new_languages(document):
1867 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1868 and Russian (Petrine orthography)."""
1870 # lyxname: (babelname, polyglossianame)
1871 new_languages = {"azerbaijani": ("azerbaijani", ""),
1872 "bengali": ("", "bengali"),
1873 "churchslavonic": ("", "churchslavonic"),
1874 "oldrussian": ("", "russian"),
1875 "korean": ("", "korean"),
1877 used_languages = set()
1878 if document.language in new_languages:
1879 used_languages.add(document.language)
1882 i = find_token(document.body, "\\lang", i+1)
1885 val = get_value(document.body, "\\lang", i)
1886 if val in new_languages:
1887 used_languages.add(val)
1889 # Korean is already supported via CJK, so leave as-is for Babel
1890 if ("korean" in used_languages
1891 and get_bool_value(document.header, "\\use_non_tex_fonts")
1892 and get_value(document.header, "\\language_package") in ("default", "auto")):
1893 revert_language(document, "korean", "", "korean")
1894 used_languages.discard("korean")
1896 for lang in used_languages:
1897 revert_language(document, lang, new_languages[lang][0], new_languages[lang][1])
1901 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1902 r'InsetLayout Flex:Glosse',
1904 r' LabelString "Gloss (old version)"',
1905 r' MenuString "Gloss (old version)"',
1906 r' LatexType environment',
1907 r' LatexName linggloss',
1908 r' Decoration minimalistic',
1913 r' CustomPars false',
1914 r' ForcePlain true',
1915 r' ParbreakIsNewline true',
1916 r' FreeSpacing true',
1917 r' Requires covington',
1920 r' \@ifundefined{linggloss}{%',
1921 r' \newenvironment{linggloss}[2][]{',
1922 r' \def\glosstr{\glt #1}%',
1924 r' {\glosstr\glend}}{}',
1927 r' ResetsFont true',
1929 r' Decoration conglomerate',
1930 r' LabelString "Translation"',
1931 r' MenuString "Glosse Translation|s"',
1932 r' Tooltip "Add a translation for the glosse"',
1937 glosss_inset_def = [
1938 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1939 r'InsetLayout Flex:Tri-Glosse',
1941 r' LabelString "Tri-Gloss (old version)"',
1942 r' MenuString "Tri-Gloss (old version)"',
1943 r' LatexType environment',
1944 r' LatexName lingglosss',
1945 r' Decoration minimalistic',
1950 r' CustomPars false',
1951 r' ForcePlain true',
1952 r' ParbreakIsNewline true',
1953 r' FreeSpacing true',
1955 r' Requires covington',
1958 r' \@ifundefined{lingglosss}{%',
1959 r' \newenvironment{lingglosss}[2][]{',
1960 r' \def\glosstr{\glt #1}%',
1962 r' {\glosstr\glend}}{}',
1964 r' ResetsFont true',
1966 r' Decoration conglomerate',
1967 r' LabelString "Translation"',
1968 r' MenuString "Glosse Translation|s"',
1969 r' Tooltip "Add a translation for the glosse"',
1974 def convert_linggloss(document):
1975 " Move old ling glosses to local layout "
1976 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1977 document.append_local_layout(gloss_inset_def)
1978 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1979 document.append_local_layout(glosss_inset_def)
1981 def revert_linggloss(document):
1982 " Revert to old ling gloss definitions "
1983 if not "linguistics" in document.get_module_list():
1985 document.del_local_layout(gloss_inset_def)
1986 document.del_local_layout(glosss_inset_def)
1989 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1990 for glosse in glosses:
1993 i = find_token(document.body, glosse, i+1)
1996 j = find_end_of_inset(document.body, i)
1998 document.warning("Malformed LyX document: Can't find end of Gloss inset")
2001 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2002 endarg = find_end_of_inset(document.body, arg)
2005 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2006 if argbeginPlain == -1:
2007 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2009 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2010 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2012 # remove Arg insets and paragraph, if it only contains this inset
2013 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2014 del document.body[arg - 1 : endarg + 4]
2016 del document.body[arg : endarg + 1]
2018 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2019 endarg = find_end_of_inset(document.body, arg)
2022 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2023 if argbeginPlain == -1:
2024 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2026 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2027 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2029 # remove Arg insets and paragraph, if it only contains this inset
2030 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2031 del document.body[arg - 1 : endarg + 4]
2033 del document.body[arg : endarg + 1]
2035 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2036 endarg = find_end_of_inset(document.body, arg)
2039 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2040 if argbeginPlain == -1:
2041 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2043 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2044 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2046 # remove Arg insets and paragraph, if it only contains this inset
2047 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2048 del document.body[arg - 1 : endarg + 4]
2050 del document.body[arg : endarg + 1]
2052 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2053 endarg = find_end_of_inset(document.body, arg)
2056 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2057 if argbeginPlain == -1:
2058 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2060 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2061 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2063 # remove Arg insets and paragraph, if it only contains this inset
2064 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2065 del document.body[arg - 1 : endarg + 4]
2067 del document.body[arg : endarg + 1]
2070 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2073 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2074 endInset = find_end_of_inset(document.body, i)
2075 endPlain = find_end_of_layout(document.body, beginPlain)
2076 precontent = put_cmd_in_ert(cmd)
2077 if len(optargcontent) > 0:
2078 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2079 precontent += put_cmd_in_ert("{")
2081 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2082 if cmd == "\\trigloss":
2083 postcontent += put_cmd_in_ert("}{") + marg3content
2084 postcontent += put_cmd_in_ert("}")
2086 document.body[endPlain:endInset + 1] = postcontent
2087 document.body[beginPlain + 1:beginPlain] = precontent
2088 del document.body[i : beginPlain + 1]
2090 document.append_local_layout("Requires covington")
2095 def revert_subexarg(document):
2096 " Revert linguistic subexamples with argument to ERT "
2098 if not "linguistics" in document.get_module_list():
2104 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2107 j = find_end_of_layout(document.body, i)
2109 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2112 # check for consecutive layouts
2113 k = find_token(document.body, "\\begin_layout", j)
2114 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2116 j = find_end_of_layout(document.body, k)
2118 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2121 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2125 endarg = find_end_of_inset(document.body, arg)
2127 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2128 if argbeginPlain == -1:
2129 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2131 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2132 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2134 # remove Arg insets and paragraph, if it only contains this inset
2135 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2136 del document.body[arg - 1 : endarg + 4]
2138 del document.body[arg : endarg + 1]
2140 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2142 # re-find end of layout
2143 j = find_end_of_layout(document.body, i)
2145 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2148 # check for consecutive layouts
2149 k = find_token(document.body, "\\begin_layout", j)
2150 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2152 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2153 j = find_end_of_layout(document.body, k)
2155 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2158 endev = put_cmd_in_ert("\\end{subexamples}")
2160 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2161 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2162 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2164 document.append_local_layout("Requires covington")
2168 def revert_drs(document):
2169 " Revert DRS insets (linguistics) to ERT "
2171 if not "linguistics" in document.get_module_list():
2175 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2176 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2177 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2178 "\\begin_inset Flex SDRS"]
2182 i = find_token(document.body, drs, i+1)
2185 j = find_end_of_inset(document.body, i)
2187 document.warning("Malformed LyX document: Can't find end of DRS inset")
2190 # Check for arguments
2191 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2192 endarg = find_end_of_inset(document.body, arg)
2195 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2196 if argbeginPlain == -1:
2197 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2199 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2200 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2202 # remove Arg insets and paragraph, if it only contains this inset
2203 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2204 del document.body[arg - 1 : endarg + 4]
2206 del document.body[arg : endarg + 1]
2209 j = find_end_of_inset(document.body, i)
2211 document.warning("Malformed LyX document: Can't find end of DRS inset")
2214 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2215 endarg = find_end_of_inset(document.body, arg)
2218 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2219 if argbeginPlain == -1:
2220 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2222 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2223 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2225 # remove Arg insets and paragraph, if it only contains this inset
2226 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2227 del document.body[arg - 1 : endarg + 4]
2229 del document.body[arg : endarg + 1]
2232 j = find_end_of_inset(document.body, i)
2234 document.warning("Malformed LyX document: Can't find end of DRS inset")
2237 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2238 endarg = find_end_of_inset(document.body, arg)
2239 postarg1content = []
2241 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2242 if argbeginPlain == -1:
2243 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2245 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2246 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2248 # remove Arg insets and paragraph, if it only contains this inset
2249 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2250 del document.body[arg - 1 : endarg + 4]
2252 del document.body[arg : endarg + 1]
2255 j = find_end_of_inset(document.body, i)
2257 document.warning("Malformed LyX document: Can't find end of DRS inset")
2260 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2261 endarg = find_end_of_inset(document.body, arg)
2262 postarg2content = []
2264 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2265 if argbeginPlain == -1:
2266 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2268 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2269 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2271 # remove Arg insets and paragraph, if it only contains this inset
2272 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2273 del document.body[arg - 1 : endarg + 4]
2275 del document.body[arg : endarg + 1]
2278 j = find_end_of_inset(document.body, i)
2280 document.warning("Malformed LyX document: Can't find end of DRS inset")
2283 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2284 endarg = find_end_of_inset(document.body, arg)
2285 postarg3content = []
2287 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2288 if argbeginPlain == -1:
2289 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2291 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2292 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2294 # remove Arg insets and paragraph, if it only contains this inset
2295 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2296 del document.body[arg - 1 : endarg + 4]
2298 del document.body[arg : endarg + 1]
2301 j = find_end_of_inset(document.body, i)
2303 document.warning("Malformed LyX document: Can't find end of DRS inset")
2306 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2307 endarg = find_end_of_inset(document.body, arg)
2308 postarg4content = []
2310 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2311 if argbeginPlain == -1:
2312 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2314 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2315 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2317 # remove Arg insets and paragraph, if it only contains this inset
2318 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2319 del document.body[arg - 1 : endarg + 4]
2321 del document.body[arg : endarg + 1]
2323 # The respective LaTeX command
2325 if drs == "\\begin_inset Flex DRS*":
2327 elif drs == "\\begin_inset Flex IfThen-DRS":
2329 elif drs == "\\begin_inset Flex Cond-DRS":
2331 elif drs == "\\begin_inset Flex QDRS":
2333 elif drs == "\\begin_inset Flex NegDRS":
2335 elif drs == "\\begin_inset Flex SDRS":
2338 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2339 endInset = find_end_of_inset(document.body, i)
2340 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2341 precontent = put_cmd_in_ert(cmd)
2342 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2343 if drs == "\\begin_inset Flex SDRS":
2344 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2345 precontent += put_cmd_in_ert("{")
2348 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2349 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2350 if cmd == "\\condrs" or cmd == "\\qdrs":
2351 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2353 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2355 postcontent = put_cmd_in_ert("}")
2357 document.body[endPlain:endInset + 1] = postcontent
2358 document.body[beginPlain + 1:beginPlain] = precontent
2359 del document.body[i : beginPlain + 1]
2361 document.append_local_layout("Provides covington 1")
2362 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2368 def revert_babelfont(document):
2369 " Reverts the use of \\babelfont to user preamble "
2371 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2373 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2375 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2377 i = find_token(document.header, '\\language_package', 0)
2379 document.warning("Malformed LyX document: Missing \\language_package.")
2381 if get_value(document.header, "\\language_package", 0) != "babel":
2384 # check font settings
2386 roman = sans = typew = "default"
2388 sf_scale = tt_scale = 100.0
2390 j = find_token(document.header, "\\font_roman", 0)
2392 document.warning("Malformed LyX document: Missing \\font_roman.")
2394 # We need to use this regex since split() does not handle quote protection
2395 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2396 roman = romanfont[2].strip('"')
2397 romanfont[2] = '"default"'
2398 document.header[j] = " ".join(romanfont)
2400 j = find_token(document.header, "\\font_sans", 0)
2402 document.warning("Malformed LyX document: Missing \\font_sans.")
2404 # We need to use this regex since split() does not handle quote protection
2405 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2406 sans = sansfont[2].strip('"')
2407 sansfont[2] = '"default"'
2408 document.header[j] = " ".join(sansfont)
2410 j = find_token(document.header, "\\font_typewriter", 0)
2412 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2414 # We need to use this regex since split() does not handle quote protection
2415 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2416 typew = ttfont[2].strip('"')
2417 ttfont[2] = '"default"'
2418 document.header[j] = " ".join(ttfont)
2420 i = find_token(document.header, "\\font_osf", 0)
2422 document.warning("Malformed LyX document: Missing \\font_osf.")
2424 osf = str2bool(get_value(document.header, "\\font_osf", i))
2426 j = find_token(document.header, "\\font_sf_scale", 0)
2428 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2430 sfscale = document.header[j].split()
2433 document.header[j] = " ".join(sfscale)
2436 sf_scale = float(val)
2438 document.warning("Invalid font_sf_scale value: " + val)
2440 j = find_token(document.header, "\\font_tt_scale", 0)
2442 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2444 ttscale = document.header[j].split()
2447 document.header[j] = " ".join(ttscale)
2450 tt_scale = float(val)
2452 document.warning("Invalid font_tt_scale value: " + val)
2454 # set preamble stuff
2455 pretext = ['%% This document must be processed with xelatex or lualatex!']
2456 pretext.append('\\AtBeginDocument{%')
2457 if roman != "default":
2458 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2459 if sans != "default":
2460 sf = '\\babelfont{sf}['
2461 if sf_scale != 100.0:
2462 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2463 sf += 'Mapping=tex-text]{' + sans + '}'
2465 if typew != "default":
2466 tw = '\\babelfont{tt}'
2467 if tt_scale != 100.0:
2468 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2469 tw += '{' + typew + '}'
2472 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2474 insert_to_preamble(document, pretext)
2477 def revert_minionpro(document):
2478 " Revert native MinionPro font definition (with extra options) to LaTeX "
2480 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2482 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2484 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2487 regexp = re.compile(r'(\\font_roman_opts)')
2488 x = find_re(document.header, regexp, 0)
2492 # We need to use this regex since split() does not handle quote protection
2493 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2494 opts = romanopts[1].strip('"')
2496 i = find_token(document.header, "\\font_roman", 0)
2498 document.warning("Malformed LyX document: Missing \\font_roman.")
2501 # We need to use this regex since split() does not handle quote protection
2502 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2503 roman = romanfont[1].strip('"')
2504 if roman != "minionpro":
2506 romanfont[1] = '"default"'
2507 document.header[i] = " ".join(romanfont)
2509 j = find_token(document.header, "\\font_osf true", 0)
2512 preamble = "\\usepackage["
2514 document.header[j] = "\\font_osf false"
2518 preamble += "]{MinionPro}"
2519 add_to_preamble(document, [preamble])
2520 del document.header[x]
2523 def revert_font_opts(document):
2524 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2526 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2528 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2530 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2531 i = find_token(document.header, '\\language_package', 0)
2533 document.warning("Malformed LyX document: Missing \\language_package.")
2535 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2538 regexp = re.compile(r'(\\font_roman_opts)')
2539 i = find_re(document.header, regexp, 0)
2541 # We need to use this regex since split() does not handle quote protection
2542 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2543 opts = romanopts[1].strip('"')
2544 del document.header[i]
2546 regexp = re.compile(r'(\\font_roman)')
2547 i = find_re(document.header, regexp, 0)
2549 # We need to use this regex since split() does not handle quote protection
2550 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2551 font = romanfont[2].strip('"')
2552 romanfont[2] = '"default"'
2553 document.header[i] = " ".join(romanfont)
2554 if font != "default":
2556 preamble = "\\babelfont{rm}["
2558 preamble = "\\setmainfont["
2561 preamble += "Mapping=tex-text]{"
2564 add_to_preamble(document, [preamble])
2567 regexp = re.compile(r'(\\font_sans_opts)')
2568 i = find_re(document.header, regexp, 0)
2571 # We need to use this regex since split() does not handle quote protection
2572 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2573 opts = sfopts[1].strip('"')
2574 del document.header[i]
2576 regexp = re.compile(r'(\\font_sf_scale)')
2577 i = find_re(document.header, regexp, 0)
2579 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2580 regexp = re.compile(r'(\\font_sans)')
2581 i = find_re(document.header, regexp, 0)
2583 # We need to use this regex since split() does not handle quote protection
2584 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2585 font = sffont[2].strip('"')
2586 sffont[2] = '"default"'
2587 document.header[i] = " ".join(sffont)
2588 if font != "default":
2590 preamble = "\\babelfont{sf}["
2592 preamble = "\\setsansfont["
2596 preamble += "Scale=0."
2597 preamble += scaleval
2599 preamble += "Mapping=tex-text]{"
2602 add_to_preamble(document, [preamble])
2605 regexp = re.compile(r'(\\font_typewriter_opts)')
2606 i = find_re(document.header, regexp, 0)
2609 # We need to use this regex since split() does not handle quote protection
2610 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2611 opts = ttopts[1].strip('"')
2612 del document.header[i]
2614 regexp = re.compile(r'(\\font_tt_scale)')
2615 i = find_re(document.header, regexp, 0)
2617 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2618 regexp = re.compile(r'(\\font_typewriter)')
2619 i = find_re(document.header, regexp, 0)
2621 # We need to use this regex since split() does not handle quote protection
2622 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2623 font = ttfont[2].strip('"')
2624 ttfont[2] = '"default"'
2625 document.header[i] = " ".join(ttfont)
2626 if font != "default":
2628 preamble = "\\babelfont{tt}["
2630 preamble = "\\setmonofont["
2634 preamble += "Scale=0."
2635 preamble += scaleval
2637 preamble += "Mapping=tex-text]{"
2640 add_to_preamble(document, [preamble])
2643 def revert_plainNotoFonts_xopts(document):
2644 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2646 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2648 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2650 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2654 y = find_token(document.header, "\\font_osf true", 0)
2658 regexp = re.compile(r'(\\font_roman_opts)')
2659 x = find_re(document.header, regexp, 0)
2660 if x == -1 and not osf:
2665 # We need to use this regex since split() does not handle quote protection
2666 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2667 opts = romanopts[1].strip('"')
2673 i = find_token(document.header, "\\font_roman", 0)
2677 # We need to use this regex since split() does not handle quote protection
2678 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2679 roman = romanfont[1].strip('"')
2680 if roman != "NotoSerif-TLF":
2683 j = find_token(document.header, "\\font_sans", 0)
2687 # We need to use this regex since split() does not handle quote protection
2688 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2689 sf = sffont[1].strip('"')
2693 j = find_token(document.header, "\\font_typewriter", 0)
2697 # We need to use this regex since split() does not handle quote protection
2698 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2699 tt = ttfont[1].strip('"')
2703 # So we have noto as "complete font"
2704 romanfont[1] = '"default"'
2705 document.header[i] = " ".join(romanfont)
2707 preamble = "\\usepackage["
2709 preamble += "]{noto}"
2710 add_to_preamble(document, [preamble])
2712 document.header[y] = "\\font_osf false"
2714 del document.header[x]
2717 def revert_notoFonts_xopts(document):
2718 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2720 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2722 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2724 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2728 fm = createFontMapping(['Noto'])
2729 if revert_fonts(document, fm, fontmap, True):
2730 add_preamble_fonts(document, fontmap)
2733 def revert_IBMFonts_xopts(document):
2734 " Revert native IBM font definition (with extra options) to LaTeX "
2736 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2738 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2740 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2744 fm = createFontMapping(['IBM'])
2746 if revert_fonts(document, fm, fontmap, True):
2747 add_preamble_fonts(document, fontmap)
2750 def revert_AdobeFonts_xopts(document):
2751 " Revert native Adobe font definition (with extra options) to LaTeX "
2753 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2755 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2757 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2761 fm = createFontMapping(['Adobe'])
2763 if revert_fonts(document, fm, fontmap, True):
2764 add_preamble_fonts(document, fontmap)
2767 def convert_osf(document):
2768 " Convert \\font_osf param to new format "
2771 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2773 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2775 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2777 i = find_token(document.header, '\\font_osf', 0)
2779 document.warning("Malformed LyX document: Missing \\font_osf.")
2782 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2783 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2785 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2786 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2789 document.header.insert(i, "\\font_sans_osf false")
2790 document.header.insert(i + 1, "\\font_typewriter_osf false")
2794 x = find_token(document.header, "\\font_sans", 0)
2796 document.warning("Malformed LyX document: Missing \\font_sans.")
2798 # We need to use this regex since split() does not handle quote protection
2799 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2800 sf = sffont[1].strip('"')
2802 document.header.insert(i, "\\font_sans_osf true")
2804 document.header.insert(i, "\\font_sans_osf false")
2806 x = find_token(document.header, "\\font_typewriter", 0)
2808 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2810 # We need to use this regex since split() does not handle quote protection
2811 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2812 tt = ttfont[1].strip('"')
2814 document.header.insert(i + 1, "\\font_typewriter_osf true")
2816 document.header.insert(i + 1, "\\font_typewriter_osf false")
2819 document.header.insert(i, "\\font_sans_osf false")
2820 document.header.insert(i + 1, "\\font_typewriter_osf false")
2823 def revert_osf(document):
2824 " Revert \\font_*_osf params "
2827 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2829 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2831 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2833 i = find_token(document.header, '\\font_roman_osf', 0)
2835 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2838 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2839 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2841 i = find_token(document.header, '\\font_sans_osf', 0)
2843 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2846 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2847 del document.header[i]
2849 i = find_token(document.header, '\\font_typewriter_osf', 0)
2851 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2854 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2855 del document.header[i]
2858 i = find_token(document.header, '\\font_osf', 0)
2860 document.warning("Malformed LyX document: Missing \\font_osf.")
2862 document.header[i] = "\\font_osf true"
2865 def revert_texfontopts(document):
2866 " Revert native TeX font definitions (with extra options) to LaTeX "
2868 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2870 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2872 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2875 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2877 # First the sf (biolinum only)
2878 regexp = re.compile(r'(\\font_sans_opts)')
2879 x = find_re(document.header, regexp, 0)
2881 # We need to use this regex since split() does not handle quote protection
2882 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2883 opts = sfopts[1].strip('"')
2884 i = find_token(document.header, "\\font_sans", 0)
2886 document.warning("Malformed LyX document: Missing \\font_sans.")
2888 # We need to use this regex since split() does not handle quote protection
2889 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2890 sans = sffont[1].strip('"')
2891 if sans == "biolinum":
2893 sffont[1] = '"default"'
2894 document.header[i] = " ".join(sffont)
2896 j = find_token(document.header, "\\font_sans_osf true", 0)
2899 k = find_token(document.header, "\\font_sf_scale", 0)
2901 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2903 sfscale = document.header[k].split()
2906 document.header[k] = " ".join(sfscale)
2909 sf_scale = float(val)
2911 document.warning("Invalid font_sf_scale value: " + val)
2912 preamble = "\\usepackage["
2914 document.header[j] = "\\font_sans_osf false"
2916 if sf_scale != 100.0:
2917 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2919 preamble += "]{biolinum}"
2920 add_to_preamble(document, [preamble])
2921 del document.header[x]
2923 regexp = re.compile(r'(\\font_roman_opts)')
2924 x = find_re(document.header, regexp, 0)
2928 # We need to use this regex since split() does not handle quote protection
2929 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2930 opts = romanopts[1].strip('"')
2932 i = find_token(document.header, "\\font_roman", 0)
2934 document.warning("Malformed LyX document: Missing \\font_roman.")
2937 # We need to use this regex since split() does not handle quote protection
2938 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2939 roman = romanfont[1].strip('"')
2940 if not roman in rmfonts:
2942 romanfont[1] = '"default"'
2943 document.header[i] = " ".join(romanfont)
2945 if roman == "utopia":
2947 elif roman == "palatino":
2948 package = "mathpazo"
2949 elif roman == "times":
2950 package = "mathptmx"
2951 elif roman == "xcharter":
2952 package = "XCharter"
2954 j = find_token(document.header, "\\font_roman_osf true", 0)
2956 if roman == "cochineal":
2957 osf = "proportional,osf,"
2958 elif roman == "utopia":
2960 elif roman == "garamondx":
2962 elif roman == "libertine":
2964 elif roman == "palatino":
2966 elif roman == "xcharter":
2968 document.header[j] = "\\font_roman_osf false"
2969 k = find_token(document.header, "\\font_sc true", 0)
2971 if roman == "utopia":
2973 if roman == "palatino" and osf == "":
2975 document.header[k] = "\\font_sc false"
2976 preamble = "\\usepackage["
2979 preamble += "]{" + package + "}"
2980 add_to_preamble(document, [preamble])
2981 del document.header[x]
2984 def convert_CantarellFont(document):
2985 " Handle Cantarell font definition to LaTeX "
2987 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2988 fm = createFontMapping(['Cantarell'])
2989 convert_fonts(document, fm, "oldstyle")
2991 def revert_CantarellFont(document):
2992 " Revert native Cantarell font definition to LaTeX "
2994 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2996 fm = createFontMapping(['Cantarell'])
2997 if revert_fonts(document, fm, fontmap, False, True):
2998 add_preamble_fonts(document, fontmap)
3000 def convert_ChivoFont(document):
3001 " Handle Chivo font definition to LaTeX "
3003 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3004 fm = createFontMapping(['Chivo'])
3005 convert_fonts(document, fm, "oldstyle")
3007 def revert_ChivoFont(document):
3008 " Revert native Chivo font definition to LaTeX "
3010 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3012 fm = createFontMapping(['Chivo'])
3013 if revert_fonts(document, fm, fontmap, False, True):
3014 add_preamble_fonts(document, fontmap)
3017 def convert_FiraFont(document):
3018 " Handle Fira font definition to LaTeX "
3020 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3021 fm = createFontMapping(['Fira'])
3022 convert_fonts(document, fm, "lf")
3024 def revert_FiraFont(document):
3025 " Revert native Fira font definition to LaTeX "
3027 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3029 fm = createFontMapping(['Fira'])
3030 if revert_fonts(document, fm, fontmap, False, True):
3031 add_preamble_fonts(document, fontmap)
3034 def convert_Semibolds(document):
3035 " Move semibold options to extraopts "
3038 i = find_token(document.header, '\\use_non_tex_fonts', 0)
3040 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
3042 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
3044 i = find_token(document.header, "\\font_roman", 0)
3046 document.warning("Malformed LyX document: Missing \\font_roman.")
3048 # We need to use this regex since split() does not handle quote protection
3049 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3050 roman = romanfont[1].strip('"')
3051 if roman == "IBMPlexSerifSemibold":
3052 romanfont[1] = '"IBMPlexSerif"'
3053 document.header[i] = " ".join(romanfont)
3055 if NonTeXFonts == False:
3056 regexp = re.compile(r'(\\font_roman_opts)')
3057 x = find_re(document.header, regexp, 0)
3059 # Sensible place to insert tag
3060 fo = find_token(document.header, "\\font_sf_scale")
3062 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3064 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3066 # We need to use this regex since split() does not handle quote protection
3067 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3068 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3070 i = find_token(document.header, "\\font_sans", 0)
3072 document.warning("Malformed LyX document: Missing \\font_sans.")
3074 # We need to use this regex since split() does not handle quote protection
3075 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3076 sf = sffont[1].strip('"')
3077 if sf == "IBMPlexSansSemibold":
3078 sffont[1] = '"IBMPlexSans"'
3079 document.header[i] = " ".join(sffont)
3081 if NonTeXFonts == False:
3082 regexp = re.compile(r'(\\font_sans_opts)')
3083 x = find_re(document.header, regexp, 0)
3085 # Sensible place to insert tag
3086 fo = find_token(document.header, "\\font_sf_scale")
3088 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3090 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3092 # We need to use this regex since split() does not handle quote protection
3093 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3094 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3096 i = find_token(document.header, "\\font_typewriter", 0)
3098 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3100 # We need to use this regex since split() does not handle quote protection
3101 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3102 tt = ttfont[1].strip('"')
3103 if tt == "IBMPlexMonoSemibold":
3104 ttfont[1] = '"IBMPlexMono"'
3105 document.header[i] = " ".join(ttfont)
3107 if NonTeXFonts == False:
3108 regexp = re.compile(r'(\\font_typewriter_opts)')
3109 x = find_re(document.header, regexp, 0)
3111 # Sensible place to insert tag
3112 fo = find_token(document.header, "\\font_tt_scale")
3114 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3116 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3118 # We need to use this regex since split() does not handle quote protection
3119 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3120 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3123 def convert_NotoRegulars(document):
3124 " Merge diverse noto reagular fonts "
3126 i = find_token(document.header, "\\font_roman", 0)
3128 document.warning("Malformed LyX document: Missing \\font_roman.")
3130 # We need to use this regex since split() does not handle quote protection
3131 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3132 roman = romanfont[1].strip('"')
3133 if roman == "NotoSerif-TLF":
3134 romanfont[1] = '"NotoSerifRegular"'
3135 document.header[i] = " ".join(romanfont)
3137 i = find_token(document.header, "\\font_sans", 0)
3139 document.warning("Malformed LyX document: Missing \\font_sans.")
3141 # We need to use this regex since split() does not handle quote protection
3142 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3143 sf = sffont[1].strip('"')
3144 if sf == "NotoSans-TLF":
3145 sffont[1] = '"NotoSansRegular"'
3146 document.header[i] = " ".join(sffont)
3148 i = find_token(document.header, "\\font_typewriter", 0)
3150 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3152 # We need to use this regex since split() does not handle quote protection
3153 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3154 tt = ttfont[1].strip('"')
3155 if tt == "NotoMono-TLF":
3156 ttfont[1] = '"NotoMonoRegular"'
3157 document.header[i] = " ".join(ttfont)
3160 def convert_CrimsonProFont(document):
3161 " Handle CrimsonPro font definition to LaTeX "
3163 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3164 fm = createFontMapping(['CrimsonPro'])
3165 convert_fonts(document, fm, "lf")
3167 def revert_CrimsonProFont(document):
3168 " Revert native CrimsonPro font definition to LaTeX "
3170 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3172 fm = createFontMapping(['CrimsonPro'])
3173 if revert_fonts(document, fm, fontmap, False, True):
3174 add_preamble_fonts(document, fontmap)
3177 def revert_pagesizes(document):
3178 " Revert new page sizes in memoir and KOMA to options "
3180 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3183 i = find_token(document.header, "\\use_geometry true", 0)
3187 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3189 i = find_token(document.header, "\\papersize", 0)
3191 document.warning("Malformed LyX document! Missing \\papersize header.")
3193 val = get_value(document.header, "\\papersize", i)
3198 document.header[i] = "\\papersize default"
3200 i = find_token(document.header, "\\options", 0)
3202 i = find_token(document.header, "\\textclass", 0)
3204 document.warning("Malformed LyX document! Missing \\textclass header.")
3206 document.header.insert(i, "\\options " + val)
3208 document.header[i] = document.header[i] + "," + val
3211 def convert_pagesizes(document):
3212 " Convert to new page sizes in memoir and KOMA to options "
3214 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3217 i = find_token(document.header, "\\use_geometry true", 0)
3221 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3223 i = find_token(document.header, "\\papersize", 0)
3225 document.warning("Malformed LyX document! Missing \\papersize header.")
3227 val = get_value(document.header, "\\papersize", i)
3232 i = find_token(document.header, "\\use_geometry false", 0)
3234 # Maintain use of geometry
3235 document.header[1] = "\\use_geometry true"
3237 def revert_komafontsizes(document):
3238 " Revert new font sizes in KOMA to options "
3240 if document.textclass[:3] != "scr":
3243 i = find_token(document.header, "\\paperfontsize", 0)
3245 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3248 defsizes = ["default", "10", "11", "12"]
3250 val = get_value(document.header, "\\paperfontsize", i)
3255 document.header[i] = "\\paperfontsize default"
3257 fsize = "fontsize=" + val
3259 i = find_token(document.header, "\\options", 0)
3261 i = find_token(document.header, "\\textclass", 0)
3263 document.warning("Malformed LyX document! Missing \\textclass header.")
3265 document.header.insert(i, "\\options " + fsize)
3267 document.header[i] = document.header[i] + "," + fsize
3270 def revert_dupqualicites(document):
3271 " Revert qualified citation list commands with duplicate keys to ERT "
3273 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3274 # we need to revert those with multiple uses of the same key.
3278 i = find_token(document.header, "\\cite_engine", 0)
3280 document.warning("Malformed document! Missing \\cite_engine")
3282 engine = get_value(document.header, "\\cite_engine", i)
3284 if not engine in ["biblatex", "biblatex-natbib"]:
3287 # Citation insets that support qualified lists, with their LaTeX code
3291 "citet" : "textcites",
3292 "Citet" : "Textcites",
3293 "citep" : "parencites",
3294 "Citep" : "Parencites",
3295 "Footcite" : "Smartcites",
3296 "footcite" : "smartcites",
3297 "Autocite" : "Autocites",
3298 "autocite" : "autocites",
3303 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3306 j = find_end_of_inset(document.body, i)
3308 document.warning("Can't find end of citation inset at line %d!!" %(i))
3312 k = find_token(document.body, "LatexCommand", i, j)
3314 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3318 cmd = get_value(document.body, "LatexCommand", k)
3319 if not cmd in list(ql_citations.keys()):
3323 pres = find_token(document.body, "pretextlist", i, j)
3324 posts = find_token(document.body, "posttextlist", i, j)
3325 if pres == -1 and posts == -1:
3330 key = get_quoted_value(document.body, "key", i, j)
3332 document.warning("Citation inset at line %d does not have a key!" %(i))
3336 keys = key.split(",")
3337 ukeys = list(set(keys))
3338 if len(keys) == len(ukeys):
3343 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3344 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3346 pre = get_quoted_value(document.body, "before", i, j)
3347 post = get_quoted_value(document.body, "after", i, j)
3348 prelist = pretexts.split("\t")
3351 ppp = pp.split(" ", 1)
3357 if ppp[0] in premap:
3358 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3360 premap[ppp[0]] = val
3361 postlist = posttexts.split("\t")
3365 ppp = pp.split(" ", 1)
3371 if ppp[0] in postmap:
3372 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3374 postmap[ppp[0]] = val
3375 # Replace known new commands with ERT
3376 if "(" in pre or ")" in pre:
3377 pre = "{" + pre + "}"
3378 if "(" in post or ")" in post:
3379 post = "{" + post + "}"
3380 res = "\\" + ql_citations[cmd]
3382 res += "(" + pre + ")"
3384 res += "(" + post + ")"
3388 if premap.get(kk, "") != "":
3389 akeys = premap[kk].split("\t", 1)
3392 res += "[" + akey + "]"
3394 premap[kk] = "\t".join(akeys[1:])
3397 if postmap.get(kk, "") != "":
3398 akeys = postmap[kk].split("\t", 1)
3401 res += "[" + akey + "]"
3403 postmap[kk] = "\t".join(akeys[1:])
3406 elif premap.get(kk, "") != "":
3408 res += "{" + kk + "}"
3409 document.body[i:j+1] = put_cmd_in_ert([res])
3412 def convert_pagesizenames(document):
3413 " Convert LyX page sizes names "
3415 i = find_token(document.header, "\\papersize", 0)
3417 document.warning("Malformed LyX document! Missing \\papersize header.")
3419 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3420 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3421 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3422 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3423 val = get_value(document.header, "\\papersize", i)
3425 newval = val.replace("paper", "")
3426 document.header[i] = "\\papersize " + newval
3428 def revert_pagesizenames(document):
3429 " Convert LyX page sizes names "
3431 i = find_token(document.header, "\\papersize", 0)
3433 document.warning("Malformed LyX document! Missing \\papersize header.")
3435 newnames = ["letter", "legal", "executive", \
3436 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3437 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3438 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3439 val = get_value(document.header, "\\papersize", i)
3441 newval = val + "paper"
3442 document.header[i] = "\\papersize " + newval
3445 def revert_theendnotes(document):
3446 " Reverts native support of \\theendnotes to TeX-code "
3448 if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
3453 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3456 j = find_end_of_inset(document.body, i)
3458 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3461 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
3464 def revert_enotez(document):
3465 " Reverts native support of enotez package to TeX-code "
3467 if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
3471 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
3474 revert_flex_inset(document.body, "Endnote", "\\endnote")
3478 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3481 j = find_end_of_inset(document.body, i)
3483 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3487 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
3490 add_to_preamble(document, ["\\usepackage{enotez}"])
3491 document.del_module("enotez")
3492 document.del_module("foottoenotez")
3495 def revert_memoir_endnotes(document):
3496 " Reverts native support of memoir endnotes to TeX-code "
3498 if document.textclass != "memoir":
3501 encommand = "\\pagenote"
3502 modules = document.get_module_list()
3503 if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
3504 encommand = "\\endnote"
3506 revert_flex_inset(document.body, "Endnote", encommand)
3510 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
3513 j = find_end_of_inset(document.body, i)
3515 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3518 if document.body[i] == "\\begin_inset FloatList pagenote*":
3519 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
3521 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
3522 add_to_preamble(document, ["\\makepagenote"])
3525 def revert_totalheight(document):
3526 " Reverts graphics height parameter from totalheight to height "
3530 i = find_token(document.body, "\\begin_inset Graphics", i)
3533 j = find_end_of_inset(document.body, i)
3535 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3539 rx = re.compile(r'\s*special\s*(\S+)$')
3540 k = find_re(document.body, rx, i, j)
3544 m = rx.match(document.body[k])
3546 special = m.group(1)
3547 mspecial = special.split(',')
3548 for spc in mspecial:
3549 if spc[:7] == "height=":
3550 oldheight = spc.split('=')[1]
3551 mspecial.remove(spc)
3553 if len(mspecial) > 0:
3554 special = ",".join(mspecial)
3558 rx = re.compile(r'(\s*height\s*)(\S+)$')
3559 kk = find_re(document.body, rx, i, j)
3561 m = rx.match(document.body[kk])
3567 val = val + "," + special
3568 document.body[k] = "\tspecial " + "totalheight=" + val
3570 document.body.insert(kk, "\tspecial totalheight=" + val)
3572 document.body[kk] = m.group(1) + oldheight
3574 del document.body[kk]
3575 elif oldheight != "":
3577 document.body[k] = "\tspecial " + special
3578 document.body.insert(k, "\theight " + oldheight)
3580 document.body[k] = "\theight " + oldheight
3584 def convert_totalheight(document):
3585 " Converts graphics height parameter from totalheight to height "
3589 i = find_token(document.body, "\\begin_inset Graphics", i)
3592 j = find_end_of_inset(document.body, i)
3594 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3598 rx = re.compile(r'\s*special\s*(\S+)$')
3599 k = find_re(document.body, rx, i, j)
3603 m = rx.match(document.body[k])
3605 special = m.group(1)
3606 mspecial = special.split(',')
3607 for spc in mspecial:
3608 if spc[:12] == "totalheight=":
3609 newheight = spc.split('=')[1]
3610 mspecial.remove(spc)
3612 if len(mspecial) > 0:
3613 special = ",".join(mspecial)
3617 rx = re.compile(r'(\s*height\s*)(\S+)$')
3618 kk = find_re(document.body, rx, i, j)
3620 m = rx.match(document.body[kk])
3626 val = val + "," + special
3627 document.body[k] = "\tspecial " + "height=" + val
3629 document.body.insert(kk + 1, "\tspecial height=" + val)
3631 document.body[kk] = m.group(1) + newheight
3633 del document.body[kk]
3634 elif newheight != "":
3635 document.body.insert(k, "\theight " + newheight)
3639 def convert_changebars(document):
3640 " Converts the changebars module to native solution "
3642 if not "changebars" in document.get_module_list():
3645 i = find_token(document.header, "\\output_changes", 0)
3647 document.warning("Malformed LyX document! Missing \\output_changes header.")
3648 document.del_module("changebars")
3651 document.header.insert(i, "\\change_bars true")
3652 document.del_module("changebars")
3655 def revert_changebars(document):
3656 " Converts native changebar param to module "
3658 i = find_token(document.header, "\\change_bars", 0)
3660 document.warning("Malformed LyX document! Missing \\change_bars header.")
3663 val = get_value(document.header, "\\change_bars", i)
3666 document.add_module("changebars")
3668 del document.header[i]
3671 def convert_postpone_fragile(document):
3672 " Adds false \\postpone_fragile_content buffer param "
3674 i = find_token(document.header, "\\output_changes", 0)
3676 document.warning("Malformed LyX document! Missing \\output_changes header.")
3678 # Set this to false for old documents (see #2154)
3679 document.header.insert(i, "\\postpone_fragile_content false")
3682 def revert_postpone_fragile(document):
3683 " Remove \\postpone_fragile_content buffer param "
3685 i = find_token(document.header, "\\postpone_fragile_content", 0)
3687 document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
3690 del document.header[i]
3692 def revert_colrow_tracking(document):
3693 " Remove change tag from tabular columns/rows "
3696 i = find_token(document.body, "\\begin_inset Tabular", i+1)
3699 j = find_end_of_inset(document.body, i+1)
3701 document.warning("Malformed LyX document: Could not find end of tabular.")
3703 for k in range(i, j):
3704 m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
3706 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3707 m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
3709 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3715 supported_versions = ["2.4.0", "2.4"]
3717 [545, [convert_lst_literalparam]],
3722 [550, [convert_fontenc]],
3729 [557, [convert_vcsinfo]],
3730 [558, [removeFrontMatterStyles]],
3733 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
3737 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
3738 [566, [convert_hebrew_parentheses]],
3744 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
3745 [573, [convert_inputencoding_namechange]],
3746 [574, [convert_ruby_module, convert_utf8_japanese]],
3747 [575, [convert_lineno, convert_aaencoding]],
3749 [577, [convert_linggloss]],
3753 [581, [convert_osf]],
3754 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
3755 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
3757 [585, [convert_pagesizes]],
3759 [587, [convert_pagesizenames]],
3761 [589, [convert_totalheight]],
3762 [590, [convert_changebars]],
3763 [591, [convert_postpone_fragile]],
3767 revert = [[591, [revert_colrow_tracking]],
3768 [590, [revert_postpone_fragile]],
3769 [589, [revert_changebars]],
3770 [588, [revert_totalheight]],
3771 [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
3772 [586, [revert_pagesizenames]],
3773 [585, [revert_dupqualicites]],
3774 [584, [revert_pagesizes,revert_komafontsizes]],
3775 [583, [revert_vcsinfo_rev_abbrev]],
3776 [582, [revert_ChivoFont,revert_CrimsonProFont]],
3777 [581, [revert_CantarellFont,revert_FiraFont]],
3778 [580, [revert_texfontopts,revert_osf]],
3779 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
3780 [578, [revert_babelfont]],
3781 [577, [revert_drs]],
3782 [576, [revert_linggloss, revert_subexarg]],
3783 [575, [revert_new_languages]],
3784 [574, [revert_lineno, revert_aaencoding]],
3785 [573, [revert_ruby_module, revert_utf8_japanese]],
3786 [572, [revert_inputencoding_namechange]],
3787 [571, [revert_notoFonts]],
3788 [570, [revert_cmidruletrimming]],
3789 [569, [revert_bibfileencodings]],
3790 [568, [revert_tablestyle]],
3791 [567, [revert_soul]],
3792 [566, [revert_malayalam]],
3793 [565, [revert_hebrew_parentheses]],
3794 [564, [revert_AdobeFonts]],
3795 [563, [revert_lformatinfo]],
3796 [562, [revert_listpargs]],
3797 [561, [revert_l7ninfo]],
3798 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
3799 [559, [revert_timeinfo, revert_namenoextinfo]],
3800 [558, [revert_dateinfo]],
3801 [557, [addFrontMatterStyles]],
3802 [556, [revert_vcsinfo]],
3803 [555, [revert_bibencoding]],
3804 [554, [revert_vcolumns]],
3805 [553, [revert_stretchcolumn]],
3806 [552, [revert_tuftecite]],
3807 [551, [revert_floatpclass, revert_floatalignment]],
3808 [550, [revert_nospellcheck]],
3809 [549, [revert_fontenc]],
3810 [548, []],# dummy format change
3811 [547, [revert_lscape]],
3812 [546, [revert_xcharter]],
3813 [545, [revert_paratype]],
3814 [544, [revert_lst_literalparam]]
3818 if __name__ == "__main__":