1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 " Expand fontinfo mapping"
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
199 def convert_fonts(document, fm, osfoption = "osf"):
200 " Handle font definition (LaTeX preamble -> native) "
202 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
203 rscaleopt = re.compile(r'^scaled?=(.*)')
205 # Check whether we go beyond font option feature introduction
206 haveFontOpts = document.end_format > 580
209 while i < len(document.preamble):
210 i = find_re(document.preamble, rpkg, i+1)
213 mo = rpkg.search(document.preamble[i])
214 if mo == None or mo.group(2) == None:
217 options = mo.group(2).replace(' ', '').split(",")
222 while o < len(options):
223 if options[o] == osfoption:
227 mo = rscaleopt.search(options[o])
235 if not pkg in fm.pkginmap:
240 # Try with name-option combination first
241 # (only one default option supported currently)
243 while o < len(options):
245 fn = fm.getfontname(pkg, [opt])
252 fn = fm.getfontname(pkg, [])
254 fn = fm.getfontname(pkg, options)
257 del document.preamble[i]
258 fontinfo = fm.font2pkgmap[fn]
259 if fontinfo.scaletype == None:
262 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
263 fontinfo.scaleval = oscale
264 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
265 if fontinfo.osfopt == None:
266 options.extend(osfoption)
268 osf = find_token(document.header, "\\font_osf false")
269 osftag = "\\font_osf"
270 if osf == -1 and fontinfo.fonttype != "math":
271 # Try with newer format
272 osftag = "\\font_" + fontinfo.fonttype + "_osf"
273 osf = find_token(document.header, osftag + " false")
275 document.header[osf] = osftag + " true"
276 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
277 del document.preamble[i-1]
279 if fontscale != None:
280 j = find_token(document.header, fontscale, 0)
282 val = get_value(document.header, fontscale, j)
286 scale = "%03d" % int(float(oscale) * 100)
287 document.header[j] = fontscale + " " + scale + " " + vals[1]
288 ft = "\\font_" + fontinfo.fonttype
289 j = find_token(document.header, ft, 0)
291 val = get_value(document.header, ft, j)
292 words = val.split() # ! splits also values like '"DejaVu Sans"'
293 words[0] = '"' + fn + '"'
294 document.header[j] = ft + ' ' + ' '.join(words)
295 if haveFontOpts and fontinfo.fonttype != "math":
296 fotag = "\\font_" + fontinfo.fonttype + "_opts"
297 fo = find_token(document.header, fotag)
299 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
301 # Sensible place to insert tag
302 fo = find_token(document.header, "\\font_sf_scale")
304 document.warning("Malformed LyX document! Missing \\font_sf_scale")
306 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
310 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
311 " Revert native font definition to LaTeX "
312 # fonlist := list of fonts created from the same package
313 # Empty package means that the font-name is the same as the package-name
314 # fontmap (key = package, val += found options) will be filled
315 # and used later in add_preamble_fonts() to be added to user-preamble
317 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
318 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
320 while i < len(document.header):
321 i = find_re(document.header, rfontscale, i+1)
324 mo = rfontscale.search(document.header[i])
327 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
328 val = get_value(document.header, ft, i)
329 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
330 font = words[0].strip('"') # TeX font name has no whitespace
331 if not font in fm.font2pkgmap:
333 fontinfo = fm.font2pkgmap[font]
334 val = fontinfo.package
335 if not val in fontmap:
338 if OnlyWithXOpts or WithXOpts:
339 if ft == "\\font_math":
341 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
342 if ft == "\\font_sans":
343 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
344 elif ft == "\\font_typewriter":
345 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
346 x = find_re(document.header, regexp, 0)
347 if x == -1 and OnlyWithXOpts:
351 # We need to use this regex since split() does not handle quote protection
352 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
353 opts = xopts[1].strip('"').split(",")
354 fontmap[val].extend(opts)
355 del document.header[x]
356 words[0] = '"default"'
357 document.header[i] = ft + ' ' + ' '.join(words)
358 if fontinfo.scaleopt != None:
359 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
360 mo = rscales.search(xval)
365 # set correct scale option
366 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
367 if fontinfo.osfopt != None:
369 if fontinfo.osfdef == "true":
371 osf = find_token(document.header, "\\font_osf " + oldval)
372 if osf == -1 and ft != "\\font_math":
373 # Try with newer format
374 osftag = "\\font_roman_osf " + oldval
375 if ft == "\\font_sans":
376 osftag = "\\font_sans_osf " + oldval
377 elif ft == "\\font_typewriter":
378 osftag = "\\font_typewriter_osf " + oldval
379 osf = find_token(document.header, osftag)
381 fontmap[val].extend([fontinfo.osfopt])
382 if len(fontinfo.options) > 0:
383 fontmap[val].extend(fontinfo.options)
386 ###############################################################################
388 ### Conversion and reversion routines
390 ###############################################################################
392 def convert_inputencoding_namechange(document):
393 " Rename inputencoding settings. "
394 i = find_token(document.header, "\\inputencoding", 0)
397 s = document.header[i].replace("auto", "auto-legacy")
398 document.header[i] = s.replace("default", "auto-legacy-plain")
400 def revert_inputencoding_namechange(document):
401 " Rename inputencoding settings. "
402 i = find_token(document.header, "\\inputencoding", 0)
405 s = document.header[i].replace("auto-legacy-plain", "default")
406 document.header[i] = s.replace("auto-legacy", "auto")
408 def convert_notoFonts(document):
409 " Handle Noto fonts definition to LaTeX "
411 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
412 fm = createFontMapping(['Noto'])
413 convert_fonts(document, fm)
415 def revert_notoFonts(document):
416 " Revert native Noto font definition to LaTeX "
418 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
420 fm = createFontMapping(['Noto'])
421 if revert_fonts(document, fm, fontmap):
422 add_preamble_fonts(document, fontmap)
424 def convert_latexFonts(document):
425 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
427 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
428 fm = createFontMapping(['DejaVu', 'IBM'])
429 convert_fonts(document, fm)
431 def revert_latexFonts(document):
432 " Revert native DejaVu font definition to LaTeX "
434 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
436 fm = createFontMapping(['DejaVu', 'IBM'])
437 if revert_fonts(document, fm, fontmap):
438 add_preamble_fonts(document, fontmap)
440 def convert_AdobeFonts(document):
441 " Handle Adobe Source fonts definition to LaTeX "
443 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
444 fm = createFontMapping(['Adobe'])
445 convert_fonts(document, fm)
447 def revert_AdobeFonts(document):
448 " Revert Adobe Source font definition to LaTeX "
450 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
452 fm = createFontMapping(['Adobe'])
453 if revert_fonts(document, fm, fontmap):
454 add_preamble_fonts(document, fontmap)
456 def removeFrontMatterStyles(document):
457 " Remove styles Begin/EndFrontmatter"
459 layouts = ['BeginFrontmatter', 'EndFrontmatter']
460 tokenend = len('\\begin_layout ')
463 i = find_token_exact(document.body, '\\begin_layout ', i+1)
466 layout = document.body[i][tokenend:].strip()
467 if layout not in layouts:
469 j = find_end_of_layout(document.body, i)
471 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
473 while document.body[j+1].strip() == '':
475 document.body[i:j+1] = []
477 def addFrontMatterStyles(document):
478 " Use styles Begin/EndFrontmatter for elsarticle"
480 if document.textclass != "elsarticle":
483 def insertFrontmatter(prefix, line):
485 while above > 0 and document.body[above-1].strip() == '':
488 while document.body[below].strip() == '':
490 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
491 '\\begin_inset Note Note',
493 '\\begin_layout Plain Layout',
496 '\\end_inset', '', '',
499 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
500 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
501 tokenend = len('\\begin_layout ')
505 i = find_token_exact(document.body, '\\begin_layout ', i+1)
508 layout = document.body[i][tokenend:].strip()
509 if layout not in layouts:
511 k = find_end_of_layout(document.body, i)
513 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
520 insertFrontmatter('End', k+1)
521 insertFrontmatter('Begin', first)
524 def convert_lst_literalparam(document):
525 " Add param literal to include inset "
529 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
532 j = find_end_of_inset(document.body, i)
534 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
536 while i < j and document.body[i].strip() != '':
538 document.body.insert(i, 'literal "true"')
541 def revert_lst_literalparam(document):
542 " Remove param literal from include inset "
546 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
549 j = find_end_of_inset(document.body, i)
551 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
553 del_token(document.body, 'literal', i, j)
556 def revert_paratype(document):
557 " Revert ParaType font definitions to LaTeX "
559 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
561 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
562 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
563 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
564 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
567 sfval = find_token(document.header, "\\font_sf_scale", 0)
569 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
571 sfscale = document.header[sfval].split()
574 document.header[sfval] = " ".join(sfscale)
577 sf_scale = float(val)
579 document.warning("Invalid font_sf_scale value: " + val)
582 if sf_scale != "100.0":
583 sfoption = "scaled=" + str(sf_scale / 100.0)
584 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
585 ttval = get_value(document.header, "\\font_tt_scale", 0)
590 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
591 if i1 != -1 and i2 != -1 and i3!= -1:
592 add_to_preamble(document, ["\\usepackage{paratype}"])
595 add_to_preamble(document, ["\\usepackage{PTSerif}"])
596 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
599 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
601 add_to_preamble(document, ["\\usepackage{PTSans}"])
602 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
605 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
607 add_to_preamble(document, ["\\usepackage{PTMono}"])
608 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
611 def revert_xcharter(document):
612 " Revert XCharter font definitions to LaTeX "
614 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
618 # replace unsupported font setting
619 document.header[i] = document.header[i].replace("xcharter", "default")
620 # no need for preamble code with system fonts
621 if get_bool_value(document.header, "\\use_non_tex_fonts"):
624 # transfer old style figures setting to package options
625 j = find_token(document.header, "\\font_osf true")
628 document.header[j] = "\\font_osf false"
632 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
635 def revert_lscape(document):
636 " Reverts the landscape environment (Landscape module) to TeX-code "
638 if not "landscape" in document.get_module_list():
643 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
646 j = find_end_of_inset(document.body, i)
648 document.warning("Malformed LyX document: Can't find end of Landscape inset")
651 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
652 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
653 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
654 add_to_preamble(document, ["\\usepackage{afterpage}"])
656 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
657 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
659 add_to_preamble(document, ["\\usepackage{pdflscape}"])
660 document.del_module("landscape")
663 def convert_fontenc(document):
664 " Convert default fontenc setting "
666 i = find_token(document.header, "\\fontencoding global", 0)
670 document.header[i] = document.header[i].replace("global", "auto")
673 def revert_fontenc(document):
674 " Revert default fontenc setting "
676 i = find_token(document.header, "\\fontencoding auto", 0)
680 document.header[i] = document.header[i].replace("auto", "global")
683 def revert_nospellcheck(document):
684 " Remove nospellcheck font info param "
688 i = find_token(document.body, '\\nospellcheck', i)
694 def revert_floatpclass(document):
695 " Remove float placement params 'document' and 'class' "
697 del_token(document.header, "\\float_placement class")
701 i = find_token(document.body, '\\begin_inset Float', i+1)
704 j = find_end_of_inset(document.body, i)
705 k = find_token(document.body, 'placement class', i, i + 2)
707 k = find_token(document.body, 'placement document', i, i + 2)
714 def revert_floatalignment(document):
715 " Remove float alignment params "
717 galignment = get_value(document.header, "\\float_alignment", delete=True)
721 i = find_token(document.body, '\\begin_inset Float', i+1)
724 j = find_end_of_inset(document.body, i)
726 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
728 k = find_token(document.body, 'alignment', i, i+4)
732 alignment = get_value(document.body, "alignment", k)
733 if alignment == "document":
734 alignment = galignment
736 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
738 document.warning("Can't find float layout!")
741 if alignment == "left":
742 alcmd = put_cmd_in_ert("\\raggedright{}")
743 elif alignment == "center":
744 alcmd = put_cmd_in_ert("\\centering{}")
745 elif alignment == "right":
746 alcmd = put_cmd_in_ert("\\raggedleft{}")
748 document.body[l+1:l+1] = alcmd
751 def revert_tuftecite(document):
752 " Revert \cite commands in tufte classes "
754 tufte = ["tufte-book", "tufte-handout"]
755 if document.textclass not in tufte:
760 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
763 j = find_end_of_inset(document.body, i)
765 document.warning("Can't find end of citation inset at line %d!!" %(i))
767 k = find_token(document.body, "LatexCommand", i, j)
769 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
772 cmd = get_value(document.body, "LatexCommand", k)
776 pre = get_quoted_value(document.body, "before", i, j)
777 post = get_quoted_value(document.body, "after", i, j)
778 key = get_quoted_value(document.body, "key", i, j)
780 document.warning("Citation inset at line %d does not have a key!" %(i))
782 # Replace command with ERT
785 res += "[" + pre + "]"
787 res += "[" + post + "]"
790 res += "{" + key + "}"
791 document.body[i:j+1] = put_cmd_in_ert([res])
796 def revert_stretchcolumn(document):
797 " We remove the column varwidth flags or everything else will become a mess. "
800 i = find_token(document.body, "\\begin_inset Tabular", i+1)
803 j = find_end_of_inset(document.body, i+1)
805 document.warning("Malformed LyX document: Could not find end of tabular.")
807 for k in range(i, j):
808 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
809 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
810 document.body[k] = document.body[k].replace(' varwidth="true"', '')
813 def revert_vcolumns(document):
814 " Revert standard columns with line breaks etc. "
820 i = find_token(document.body, "\\begin_inset Tabular", i+1)
823 j = find_end_of_inset(document.body, i)
825 document.warning("Malformed LyX document: Could not find end of tabular.")
828 # Collect necessary column information
830 nrows = int(document.body[i+1].split('"')[3])
831 ncols = int(document.body[i+1].split('"')[5])
833 for k in range(ncols):
834 m = find_token(document.body, "<column", m)
835 width = get_option_value(document.body[m], 'width')
836 varwidth = get_option_value(document.body[m], 'varwidth')
837 alignment = get_option_value(document.body[m], 'alignment')
838 special = get_option_value(document.body[m], 'special')
839 col_info.append([width, varwidth, alignment, special, m])
844 for row in range(nrows):
845 for col in range(ncols):
846 m = find_token(document.body, "<cell", m)
847 multicolumn = get_option_value(document.body[m], 'multicolumn')
848 multirow = get_option_value(document.body[m], 'multirow')
849 width = get_option_value(document.body[m], 'width')
850 rotate = get_option_value(document.body[m], 'rotate')
851 # Check for: linebreaks, multipars, non-standard environments
853 endcell = find_token(document.body, "</cell>", begcell)
855 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
857 elif count_pars_in_inset(document.body, begcell + 2) > 1:
859 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
861 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
862 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
864 alignment = col_info[col][2]
865 col_line = col_info[col][4]
867 if alignment == "center":
868 vval = ">{\\centering}"
869 elif alignment == "left":
870 vval = ">{\\raggedright}"
871 elif alignment == "right":
872 vval = ">{\\raggedleft}"
875 vval += "V{\\linewidth}"
877 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
878 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
879 # with newlines, and we do not want that)
881 endcell = find_token(document.body, "</cell>", begcell)
883 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
885 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
889 nle = find_end_of_inset(document.body, nl)
890 del(document.body[nle:nle+1])
892 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
894 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
900 if needarray == True:
901 add_to_preamble(document, ["\\usepackage{array}"])
902 if needvarwidth == True:
903 add_to_preamble(document, ["\\usepackage{varwidth}"])
906 def revert_bibencoding(document):
907 " Revert bibliography encoding "
911 i = find_token(document.header, "\\cite_engine", 0)
913 document.warning("Malformed document! Missing \\cite_engine")
915 engine = get_value(document.header, "\\cite_engine", i)
919 if engine in ["biblatex", "biblatex-natbib"]:
922 # Map lyx to latex encoding names
926 "armscii8" : "armscii8",
927 "iso8859-1" : "latin1",
928 "iso8859-2" : "latin2",
929 "iso8859-3" : "latin3",
930 "iso8859-4" : "latin4",
931 "iso8859-5" : "iso88595",
932 "iso8859-6" : "8859-6",
933 "iso8859-7" : "iso-8859-7",
934 "iso8859-8" : "8859-8",
935 "iso8859-9" : "latin5",
936 "iso8859-13" : "latin7",
937 "iso8859-15" : "latin9",
938 "iso8859-16" : "latin10",
939 "applemac" : "applemac",
941 "cp437de" : "cp437de",
958 "utf8-platex" : "utf8",
965 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
968 j = find_end_of_inset(document.body, i)
970 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
972 encoding = get_quoted_value(document.body, "encoding", i, j)
975 # remove encoding line
976 k = find_token(document.body, "encoding", i, j)
979 if encoding == "default":
981 # Re-find inset end line
982 j = find_end_of_inset(document.body, i)
985 h = find_token(document.header, "\\biblio_options", 0)
987 biblio_options = get_value(document.header, "\\biblio_options", h)
988 if not "bibencoding" in biblio_options:
989 document.header[h] += ",bibencoding=%s" % encodings[encoding]
991 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
993 # this should not happen
994 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
996 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
998 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
999 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1005 def convert_vcsinfo(document):
1006 " Separate vcs Info inset from buffer Info inset. "
1009 "vcs-revision" : "revision",
1010 "vcs-tree-revision" : "tree-revision",
1011 "vcs-author" : "author",
1012 "vcs-time" : "time",
1017 i = find_token(document.body, "\\begin_inset Info", i+1)
1020 j = find_end_of_inset(document.body, i+1)
1022 document.warning("Malformed LyX document: Could not find end of Info inset.")
1024 tp = find_token(document.body, 'type', i, j)
1025 tpv = get_quoted_value(document.body, "type", tp)
1028 arg = find_token(document.body, 'arg', i, j)
1029 argv = get_quoted_value(document.body, "arg", arg)
1030 if argv not in list(types.keys()):
1032 document.body[tp] = "type \"vcs\""
1033 document.body[arg] = "arg \"" + types[argv] + "\""
1036 def revert_vcsinfo(document):
1037 " Merge vcs Info inset to buffer Info inset. "
1039 args = ["revision", "tree-revision", "author", "time", "date" ]
1042 i = find_token(document.body, "\\begin_inset Info", i+1)
1045 j = find_end_of_inset(document.body, i+1)
1047 document.warning("Malformed LyX document: Could not find end of Info inset.")
1049 tp = find_token(document.body, 'type', i, j)
1050 tpv = get_quoted_value(document.body, "type", tp)
1053 arg = find_token(document.body, 'arg', i, j)
1054 argv = get_quoted_value(document.body, "arg", arg)
1055 if argv not in args:
1056 document.warning("Malformed Info inset. Invalid vcs arg.")
1058 document.body[tp] = "type \"buffer\""
1059 document.body[arg] = "arg \"vcs-" + argv + "\""
1061 def revert_vcsinfo_rev_abbrev(document):
1062 " Convert abbreviated revisions to regular revisions. "
1066 i = find_token(document.body, "\\begin_inset Info", i+1)
1069 j = find_end_of_inset(document.body, i+1)
1071 document.warning("Malformed LyX document: Could not find end of Info inset.")
1073 tp = find_token(document.body, 'type', i, j)
1074 tpv = get_quoted_value(document.body, "type", tp)
1077 arg = find_token(document.body, 'arg', i, j)
1078 argv = get_quoted_value(document.body, "arg", arg)
1079 if( argv == "revision-abbrev" ):
1080 document.body[arg] = "arg \"revision\""
1082 def revert_dateinfo(document):
1083 " Revert date info insets to static text. "
1085 # FIXME This currently only considers the main language and uses the system locale
1086 # Ideally, it should honor context languages and switch the locale accordingly.
1088 # The date formats for each language using strftime syntax:
1089 # long, short, loclong, locmedium, locshort
1091 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1092 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1093 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1094 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1095 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1096 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1097 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1098 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1099 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1100 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1101 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1102 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1103 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1104 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1105 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1106 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1107 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1108 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1109 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1110 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1111 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1112 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1113 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1114 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1115 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1116 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1117 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1118 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1119 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1120 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1121 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1122 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1123 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1124 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1125 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1126 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1127 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1128 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1129 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1130 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1131 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1132 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1133 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1134 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1135 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1136 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1137 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1138 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1139 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1140 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1141 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1142 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1143 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1144 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1145 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1146 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1147 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1148 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1149 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1150 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1151 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1152 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1153 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1154 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1155 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1156 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1157 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1158 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1159 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1160 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1161 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1162 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1163 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1164 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1165 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1166 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1167 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1168 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1169 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1170 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1171 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1172 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1173 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1174 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1175 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1176 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1177 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1178 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1179 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1180 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1181 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1182 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1183 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1184 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1185 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1186 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1187 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1188 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1189 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1190 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1191 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1192 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1195 types = ["date", "fixdate", "moddate" ]
1196 lang = get_value(document.header, "\\language")
1198 document.warning("Malformed LyX document! No \\language header found!")
1203 i = find_token(document.body, "\\begin_inset Info", i+1)
1206 j = find_end_of_inset(document.body, i+1)
1208 document.warning("Malformed LyX document: Could not find end of Info inset.")
1210 tp = find_token(document.body, 'type', i, j)
1211 tpv = get_quoted_value(document.body, "type", tp)
1212 if tpv not in types:
1214 arg = find_token(document.body, 'arg', i, j)
1215 argv = get_quoted_value(document.body, "arg", arg)
1218 if tpv == "fixdate":
1219 datecomps = argv.split('@')
1220 if len(datecomps) > 1:
1222 isodate = datecomps[1]
1223 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1225 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1226 # FIXME if we had the path to the original document (not the one in the tmp dir),
1227 # we could use the mtime.
1228 # elif tpv == "moddate":
1229 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1232 result = dte.isodate()
1233 elif argv == "long":
1234 result = dte.strftime(dateformats[lang][0])
1235 elif argv == "short":
1236 result = dte.strftime(dateformats[lang][1])
1237 elif argv == "loclong":
1238 result = dte.strftime(dateformats[lang][2])
1239 elif argv == "locmedium":
1240 result = dte.strftime(dateformats[lang][3])
1241 elif argv == "locshort":
1242 result = dte.strftime(dateformats[lang][4])
1244 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1245 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1246 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1247 fmt = re.sub('[^\'%]d', '%d', fmt)
1248 fmt = fmt.replace("'", "")
1249 result = dte.strftime(fmt)
1250 if sys.version_info < (3,0):
1251 # In Python 2, datetime module works with binary strings,
1252 # our dateformat strings are utf8-encoded:
1253 result = result.decode('utf-8')
1254 document.body[i : j+1] = [result]
1257 def revert_timeinfo(document):
1258 " Revert time info insets to static text. "
1260 # FIXME This currently only considers the main language and uses the system locale
1261 # Ideally, it should honor context languages and switch the locale accordingly.
1262 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1265 # The time formats for each language using strftime syntax:
1268 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1269 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1270 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1271 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1272 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1273 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1274 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1275 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1276 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1277 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1278 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1279 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1280 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1281 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1282 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1283 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1284 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1285 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1286 "british" : ["%H:%M:%S %Z", "%H:%M"],
1287 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1288 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1289 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1290 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1291 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1292 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1293 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1294 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1295 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1296 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1297 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1298 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1299 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1300 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1301 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1302 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1303 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1304 "french" : ["%H:%M:%S %Z", "%H:%M"],
1305 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1306 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1307 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1308 "german" : ["%H:%M:%S %Z", "%H:%M"],
1309 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1310 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1311 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1312 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1313 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1314 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1315 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1316 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1317 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1318 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1319 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1320 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1321 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1322 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1323 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1324 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1325 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1326 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1327 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1328 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1329 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1330 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1331 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1332 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1333 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1334 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1335 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1336 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1337 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1338 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1339 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1340 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1341 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1342 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1343 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1344 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1345 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1346 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1347 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1348 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1349 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1350 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1351 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1352 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1353 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1354 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1355 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1356 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1357 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1358 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1359 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1360 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1361 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1362 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1363 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1364 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1365 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1366 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1367 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1368 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1369 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1372 types = ["time", "fixtime", "modtime" ]
1374 i = find_token(document.header, "\\language", 0)
1376 # this should not happen
1377 document.warning("Malformed LyX document! No \\language header found!")
1379 lang = get_value(document.header, "\\language", i)
1383 i = find_token(document.body, "\\begin_inset Info", i+1)
1386 j = find_end_of_inset(document.body, i+1)
1388 document.warning("Malformed LyX document: Could not find end of Info inset.")
1390 tp = find_token(document.body, 'type', i, j)
1391 tpv = get_quoted_value(document.body, "type", tp)
1392 if tpv not in types:
1394 arg = find_token(document.body, 'arg', i, j)
1395 argv = get_quoted_value(document.body, "arg", arg)
1397 dtme = datetime.now()
1399 if tpv == "fixtime":
1400 timecomps = argv.split('@')
1401 if len(timecomps) > 1:
1403 isotime = timecomps[1]
1404 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1406 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1408 m = re.search('(\d\d):(\d\d)', isotime)
1410 tme = time(int(m.group(1)), int(m.group(2)))
1411 # FIXME if we had the path to the original document (not the one in the tmp dir),
1412 # we could use the mtime.
1413 # elif tpv == "moddate":
1414 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1417 result = tme.isoformat()
1418 elif argv == "long":
1419 result = tme.strftime(timeformats[lang][0])
1420 elif argv == "short":
1421 result = tme.strftime(timeformats[lang][1])
1423 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1424 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1425 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1426 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1427 fmt = fmt.replace("'", "")
1428 result = dte.strftime(fmt)
1429 document.body[i : j+1] = result
1432 def revert_namenoextinfo(document):
1433 " Merge buffer Info inset type name-noext to name. "
1437 i = find_token(document.body, "\\begin_inset Info", i+1)
1440 j = find_end_of_inset(document.body, i+1)
1442 document.warning("Malformed LyX document: Could not find end of Info inset.")
1444 tp = find_token(document.body, 'type', i, j)
1445 tpv = get_quoted_value(document.body, "type", tp)
1448 arg = find_token(document.body, 'arg', i, j)
1449 argv = get_quoted_value(document.body, "arg", arg)
1450 if argv != "name-noext":
1452 document.body[arg] = "arg \"name\""
1455 def revert_l7ninfo(document):
1456 " Revert l7n Info inset to text. "
1460 i = find_token(document.body, "\\begin_inset Info", i+1)
1463 j = find_end_of_inset(document.body, i+1)
1465 document.warning("Malformed LyX document: Could not find end of Info inset.")
1467 tp = find_token(document.body, 'type', i, j)
1468 tpv = get_quoted_value(document.body, "type", tp)
1471 arg = find_token(document.body, 'arg', i, j)
1472 argv = get_quoted_value(document.body, "arg", arg)
1473 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1474 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1475 document.body[i : j+1] = argv
1478 def revert_listpargs(document):
1479 " Reverts listpreamble arguments to TeX-code "
1482 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1485 j = find_end_of_inset(document.body, i)
1486 # Find containing paragraph layout
1487 parent = get_containing_layout(document.body, i)
1489 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1492 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1493 endPlain = find_end_of_layout(document.body, beginPlain)
1494 content = document.body[beginPlain + 1 : endPlain]
1495 del document.body[i:j+1]
1496 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1497 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1498 document.body[parbeg : parbeg] = subst
1501 def revert_lformatinfo(document):
1502 " Revert layout format Info inset to text. "
1506 i = find_token(document.body, "\\begin_inset Info", i+1)
1509 j = find_end_of_inset(document.body, i+1)
1511 document.warning("Malformed LyX document: Could not find end of Info inset.")
1513 tp = find_token(document.body, 'type', i, j)
1514 tpv = get_quoted_value(document.body, "type", tp)
1515 if tpv != "lyxinfo":
1517 arg = find_token(document.body, 'arg', i, j)
1518 argv = get_quoted_value(document.body, "arg", arg)
1519 if argv != "layoutformat":
1522 document.body[i : j+1] = "69"
1525 def convert_hebrew_parentheses(document):
1526 """ Swap opening/closing parentheses in Hebrew text.
1528 Up to LyX 2.4, "(" was used as closing parenthesis and
1529 ")" as opening parenthesis for Hebrew in the LyX source.
1531 # print("convert hebrew parentheses")
1532 current_languages = [document.language]
1533 for i, line in enumerate(document.body):
1534 if line.startswith('\\lang '):
1535 current_languages[-1] = line.lstrip('\\lang ')
1536 elif line.startswith('\\begin_layout'):
1537 current_languages.append(current_languages[-1])
1538 # print (line, current_languages[-1])
1539 elif line.startswith('\\end_layout'):
1540 current_languages.pop()
1541 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1542 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1545 def revert_hebrew_parentheses(document):
1546 " Store parentheses in Hebrew text reversed"
1547 # This only exists to keep the convert/revert naming convention
1548 convert_hebrew_parentheses(document)
1551 def revert_malayalam(document):
1552 " Set the document language to English but assure Malayalam output "
1554 revert_language(document, "malayalam", "", "malayalam")
1557 def revert_soul(document):
1558 " Revert soul module flex insets to ERT "
1560 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1563 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1565 add_to_preamble(document, ["\\usepackage{soul}"])
1567 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1569 add_to_preamble(document, ["\\usepackage{color}"])
1571 revert_flex_inset(document.body, "Spaceletters", "\\so")
1572 revert_flex_inset(document.body, "Strikethrough", "\\st")
1573 revert_flex_inset(document.body, "Underline", "\\ul")
1574 revert_flex_inset(document.body, "Highlight", "\\hl")
1575 revert_flex_inset(document.body, "Capitalize", "\\caps")
1578 def revert_tablestyle(document):
1579 " Remove tablestyle params "
1582 i = find_token(document.header, "\\tablestyle")
1584 del document.header[i]
1587 def revert_bibfileencodings(document):
1588 " Revert individual Biblatex bibliography encodings "
1592 i = find_token(document.header, "\\cite_engine", 0)
1594 document.warning("Malformed document! Missing \\cite_engine")
1596 engine = get_value(document.header, "\\cite_engine", i)
1600 if engine in ["biblatex", "biblatex-natbib"]:
1603 # Map lyx to latex encoding names
1607 "armscii8" : "armscii8",
1608 "iso8859-1" : "latin1",
1609 "iso8859-2" : "latin2",
1610 "iso8859-3" : "latin3",
1611 "iso8859-4" : "latin4",
1612 "iso8859-5" : "iso88595",
1613 "iso8859-6" : "8859-6",
1614 "iso8859-7" : "iso-8859-7",
1615 "iso8859-8" : "8859-8",
1616 "iso8859-9" : "latin5",
1617 "iso8859-13" : "latin7",
1618 "iso8859-15" : "latin9",
1619 "iso8859-16" : "latin10",
1620 "applemac" : "applemac",
1622 "cp437de" : "cp437de",
1630 "cp1250" : "cp1250",
1631 "cp1251" : "cp1251",
1632 "cp1252" : "cp1252",
1633 "cp1255" : "cp1255",
1634 "cp1256" : "cp1256",
1635 "cp1257" : "cp1257",
1636 "koi8-r" : "koi8-r",
1637 "koi8-u" : "koi8-u",
1639 "utf8-platex" : "utf8",
1646 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1649 j = find_end_of_inset(document.body, i)
1651 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1653 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1657 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1658 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1659 if len(bibfiles) == 0:
1660 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1661 # remove encoding line
1662 k = find_token(document.body, "file_encodings", i, j)
1664 del document.body[k]
1665 # Re-find inset end line
1666 j = find_end_of_inset(document.body, i)
1668 enclist = encodings.split("\t")
1671 ppp = pp.split(" ", 1)
1672 encmap[ppp[0]] = ppp[1]
1673 for bib in bibfiles:
1674 pr = "\\addbibresource"
1675 if bib in encmap.keys():
1676 pr += "[bibencoding=" + encmap[bib] + "]"
1677 pr += "{" + bib + "}"
1678 add_to_preamble(document, [pr])
1679 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1680 pcmd = "printbibliography"
1682 pcmd += "[" + opts + "]"
1683 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1684 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1685 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1686 "status open", "", "\\begin_layout Plain Layout" ]
1687 repl += document.body[i:j+1]
1688 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1689 document.body[i:j+1] = repl
1695 def revert_cmidruletrimming(document):
1696 " Remove \\cmidrule trimming "
1698 # FIXME: Revert to TeX code?
1701 # first, let's find out if we need to do anything
1702 i = find_token(document.body, '<cell ', i+1)
1705 j = document.body[i].find('trim="')
1708 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1709 # remove trim option
1710 document.body[i] = rgx.sub('', document.body[i])
1714 r'### Inserted by lyx2lyx (ruby inset) ###',
1715 r'InsetLayout Flex:Ruby',
1716 r' LyxType charstyle',
1717 r' LatexType command',
1721 r' HTMLInnerTag rb',
1722 r' HTMLInnerAttr ""',
1724 r' LabelString "Ruby"',
1725 r' Decoration Conglomerate',
1727 r' \ifdefined\kanjiskip',
1728 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1729 r' \else \ifdefined\luatexversion',
1730 r' \usepackage{luatexja-ruby}',
1731 r' \else \ifdefined\XeTeXversion',
1732 r' \usepackage{ruby}%',
1734 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1736 r' Argument post:1',
1737 r' LabelString "ruby text"',
1738 r' MenuString "Ruby Text|R"',
1739 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1740 r' Decoration Conglomerate',
1752 def convert_ruby_module(document):
1753 " Use ruby module instead of local module definition "
1754 if document.del_local_layout(ruby_inset_def):
1755 document.add_module("ruby")
1757 def revert_ruby_module(document):
1758 " Replace ruby module with local module definition "
1759 if document.del_module("ruby"):
1760 document.append_local_layout(ruby_inset_def)
1763 def convert_utf8_japanese(document):
1764 " Use generic utf8 with Japanese documents."
1765 lang = get_value(document.header, "\\language")
1766 if not lang.startswith("japanese"):
1768 inputenc = get_value(document.header, "\\inputencoding")
1769 if ((lang == "japanese" and inputenc == "utf8-platex")
1770 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1771 document.set_parameter("inputencoding", "utf8")
1773 def revert_utf8_japanese(document):
1774 " Use Japanese utf8 variants with Japanese documents."
1775 inputenc = get_value(document.header, "\\inputencoding")
1776 if inputenc != "utf8":
1778 lang = get_value(document.header, "\\language")
1779 if lang == "japanese":
1780 document.set_parameter("inputencoding", "utf8-platex")
1781 if lang == "japanese-cjk":
1782 document.set_parameter("inputencoding", "utf8-cjk")
1785 def revert_lineno(document):
1786 " Replace lineno setting with user-preamble code."
1788 options = get_quoted_value(document.header, "\\lineno_options",
1790 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1793 options = "[" + options + "]"
1794 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1797 def convert_lineno(document):
1798 " Replace user-preamble code with native lineno support."
1801 i = find_token(document.preamble, "\\linenumbers", 1)
1803 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1806 options = usepkg.group(1).strip("[]")
1807 del(document.preamble[i-1:i+1])
1808 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1810 k = find_token(document.header, "\\index ")
1812 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1814 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1815 "\\lineno_options %s" % options]
1818 def convert_aaencoding(document):
1819 " Convert default document option due to encoding change in aa class. "
1821 if document.textclass != "aa":
1825 i = find_token(document.header, "\\use_default_options true", i)
1828 j = find_token(document.header, "\\inputencoding", 0)
1830 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
1832 val = get_value(document.header, "\\inputencoding", j)
1833 if val == "auto-legacy" or val == "latin9":
1834 document.header[i] = "\\use_default_options false"
1835 k = find_token(document.header, "\\options", 0)
1837 document.header.insert(i, "\\options latin9")
1839 document.header[k] = document.header[k] + ",latin9"
1842 def revert_aaencoding(document):
1843 " Revert default document option due to encoding change in aa class. "
1845 if document.textclass != "aa":
1849 i = find_token(document.header, "\\use_default_options true", i)
1852 j = find_token(document.header, "\\inputencoding", 0)
1854 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
1856 val = get_value(document.header, "\\inputencoding", j)
1858 document.header[i] = "\\use_default_options false"
1859 k = find_token(document.header, "\\options", 0)
1861 document.header.insert(i, "\\options utf8")
1863 document.header[k] = document.header[k] + ",utf8"
1866 def revert_new_languages(document):
1867 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1868 and Russian (Petrine orthography)."""
1870 # lyxname: (babelname, polyglossianame)
1871 new_languages = {"azerbaijani": ("azerbaijani", ""),
1872 "bengali": ("", "bengali"),
1873 "churchslavonic": ("", "churchslavonic"),
1874 "oldrussian": ("", "russian"),
1875 "korean": ("", "korean"),
1877 used_languages = set()
1878 if document.language in new_languages:
1879 used_languages.add(document.language)
1882 i = find_token(document.body, "\\lang", i+1)
1885 if document.body[i][6:].strip() in new_languages:
1886 used_languages.add(document.language)
1888 # Korean is already supported via CJK, so leave as-is for Babel
1889 if ("korean" in used_languages
1890 and get_bool_value(document.header, "\\use_non_tex_fonts")
1891 and get_value(document.header, "\\language_package") in ("default", "auto")):
1892 revert_language(document, "korean", "", "korean")
1893 used_languages.discard("korean")
1895 for lang in used_languages:
1896 revert(lang, *new_languages[lang])
1900 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1901 r'InsetLayout Flex:Glosse',
1903 r' LabelString "Gloss (old version)"',
1904 r' MenuString "Gloss (old version)"',
1905 r' LatexType environment',
1906 r' LatexName linggloss',
1907 r' Decoration minimalistic',
1912 r' CustomPars false',
1913 r' ForcePlain true',
1914 r' ParbreakIsNewline true',
1915 r' FreeSpacing true',
1916 r' Requires covington',
1919 r' \@ifundefined{linggloss}{%',
1920 r' \newenvironment{linggloss}[2][]{',
1921 r' \def\glosstr{\glt #1}%',
1923 r' {\glosstr\glend}}{}',
1926 r' ResetsFont true',
1928 r' Decoration conglomerate',
1929 r' LabelString "Translation"',
1930 r' MenuString "Glosse Translation|s"',
1931 r' Tooltip "Add a translation for the glosse"',
1936 glosss_inset_def = [
1937 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1938 r'InsetLayout Flex:Tri-Glosse',
1940 r' LabelString "Tri-Gloss (old version)"',
1941 r' MenuString "Tri-Gloss (old version)"',
1942 r' LatexType environment',
1943 r' LatexName lingglosss',
1944 r' Decoration minimalistic',
1949 r' CustomPars false',
1950 r' ForcePlain true',
1951 r' ParbreakIsNewline true',
1952 r' FreeSpacing true',
1954 r' Requires covington',
1957 r' \@ifundefined{lingglosss}{%',
1958 r' \newenvironment{lingglosss}[2][]{',
1959 r' \def\glosstr{\glt #1}%',
1961 r' {\glosstr\glend}}{}',
1963 r' ResetsFont true',
1965 r' Decoration conglomerate',
1966 r' LabelString "Translation"',
1967 r' MenuString "Glosse Translation|s"',
1968 r' Tooltip "Add a translation for the glosse"',
1973 def convert_linggloss(document):
1974 " Move old ling glosses to local layout "
1975 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1976 document.append_local_layout(gloss_inset_def)
1977 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1978 document.append_local_layout(glosss_inset_def)
1980 def revert_linggloss(document):
1981 " Revert to old ling gloss definitions "
1982 if not "linguistics" in document.get_module_list():
1984 document.del_local_layout(gloss_inset_def)
1985 document.del_local_layout(glosss_inset_def)
1988 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1989 for glosse in glosses:
1992 i = find_token(document.body, glosse, i+1)
1995 j = find_end_of_inset(document.body, i)
1997 document.warning("Malformed LyX document: Can't find end of Gloss inset")
2000 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2001 endarg = find_end_of_inset(document.body, arg)
2004 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2005 if argbeginPlain == -1:
2006 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2008 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2009 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2011 # remove Arg insets and paragraph, if it only contains this inset
2012 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2013 del document.body[arg - 1 : endarg + 4]
2015 del document.body[arg : endarg + 1]
2017 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2018 endarg = find_end_of_inset(document.body, arg)
2021 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2022 if argbeginPlain == -1:
2023 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2025 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2026 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2028 # remove Arg insets and paragraph, if it only contains this inset
2029 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2030 del document.body[arg - 1 : endarg + 4]
2032 del document.body[arg : endarg + 1]
2034 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2035 endarg = find_end_of_inset(document.body, arg)
2038 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2039 if argbeginPlain == -1:
2040 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2042 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2043 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2045 # remove Arg insets and paragraph, if it only contains this inset
2046 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2047 del document.body[arg - 1 : endarg + 4]
2049 del document.body[arg : endarg + 1]
2051 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2052 endarg = find_end_of_inset(document.body, arg)
2055 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2056 if argbeginPlain == -1:
2057 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2059 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2060 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2062 # remove Arg insets and paragraph, if it only contains this inset
2063 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2064 del document.body[arg - 1 : endarg + 4]
2066 del document.body[arg : endarg + 1]
2069 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2072 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2073 endInset = find_end_of_inset(document.body, i)
2074 endPlain = find_end_of_layout(document.body, beginPlain)
2075 precontent = put_cmd_in_ert(cmd)
2076 if len(optargcontent) > 0:
2077 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2078 precontent += put_cmd_in_ert("{")
2080 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2081 if cmd == "\\trigloss":
2082 postcontent += put_cmd_in_ert("}{") + marg3content
2083 postcontent += put_cmd_in_ert("}")
2085 document.body[endPlain:endInset + 1] = postcontent
2086 document.body[beginPlain + 1:beginPlain] = precontent
2087 del document.body[i : beginPlain + 1]
2089 document.append_local_layout("Requires covington")
2094 def revert_subexarg(document):
2095 " Revert linguistic subexamples with argument to ERT "
2097 if not "linguistics" in document.get_module_list():
2103 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2106 j = find_end_of_layout(document.body, i)
2108 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2111 # check for consecutive layouts
2112 k = find_token(document.body, "\\begin_layout", j)
2113 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2115 j = find_end_of_layout(document.body, k)
2117 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2120 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2124 endarg = find_end_of_inset(document.body, arg)
2126 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2127 if argbeginPlain == -1:
2128 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2130 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2131 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2133 # remove Arg insets and paragraph, if it only contains this inset
2134 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2135 del document.body[arg - 1 : endarg + 4]
2137 del document.body[arg : endarg + 1]
2139 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2141 # re-find end of layout
2142 j = find_end_of_layout(document.body, i)
2144 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2147 # check for consecutive layouts
2148 k = find_token(document.body, "\\begin_layout", j)
2149 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2151 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2152 j = find_end_of_layout(document.body, k)
2154 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2157 endev = put_cmd_in_ert("\\end{subexamples}")
2159 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2160 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2161 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2163 document.append_local_layout("Requires covington")
2167 def revert_drs(document):
2168 " Revert DRS insets (linguistics) to ERT "
2170 if not "linguistics" in document.get_module_list():
2174 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2175 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2176 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2177 "\\begin_inset Flex SDRS"]
2181 i = find_token(document.body, drs, i+1)
2184 j = find_end_of_inset(document.body, i)
2186 document.warning("Malformed LyX document: Can't find end of DRS inset")
2189 # Check for arguments
2190 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2191 endarg = find_end_of_inset(document.body, arg)
2194 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2195 if argbeginPlain == -1:
2196 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2198 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2199 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2201 # remove Arg insets and paragraph, if it only contains this inset
2202 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2203 del document.body[arg - 1 : endarg + 4]
2205 del document.body[arg : endarg + 1]
2208 j = find_end_of_inset(document.body, i)
2210 document.warning("Malformed LyX document: Can't find end of DRS inset")
2213 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2214 endarg = find_end_of_inset(document.body, arg)
2217 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2218 if argbeginPlain == -1:
2219 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2221 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2222 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2224 # remove Arg insets and paragraph, if it only contains this inset
2225 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2226 del document.body[arg - 1 : endarg + 4]
2228 del document.body[arg : endarg + 1]
2231 j = find_end_of_inset(document.body, i)
2233 document.warning("Malformed LyX document: Can't find end of DRS inset")
2236 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2237 endarg = find_end_of_inset(document.body, arg)
2238 postarg1content = []
2240 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2241 if argbeginPlain == -1:
2242 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2244 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2245 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2247 # remove Arg insets and paragraph, if it only contains this inset
2248 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2249 del document.body[arg - 1 : endarg + 4]
2251 del document.body[arg : endarg + 1]
2254 j = find_end_of_inset(document.body, i)
2256 document.warning("Malformed LyX document: Can't find end of DRS inset")
2259 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2260 endarg = find_end_of_inset(document.body, arg)
2261 postarg2content = []
2263 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2264 if argbeginPlain == -1:
2265 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2267 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2268 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2270 # remove Arg insets and paragraph, if it only contains this inset
2271 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2272 del document.body[arg - 1 : endarg + 4]
2274 del document.body[arg : endarg + 1]
2277 j = find_end_of_inset(document.body, i)
2279 document.warning("Malformed LyX document: Can't find end of DRS inset")
2282 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2283 endarg = find_end_of_inset(document.body, arg)
2284 postarg3content = []
2286 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2287 if argbeginPlain == -1:
2288 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2290 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2291 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2293 # remove Arg insets and paragraph, if it only contains this inset
2294 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2295 del document.body[arg - 1 : endarg + 4]
2297 del document.body[arg : endarg + 1]
2300 j = find_end_of_inset(document.body, i)
2302 document.warning("Malformed LyX document: Can't find end of DRS inset")
2305 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2306 endarg = find_end_of_inset(document.body, arg)
2307 postarg4content = []
2309 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2310 if argbeginPlain == -1:
2311 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2313 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2314 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2316 # remove Arg insets and paragraph, if it only contains this inset
2317 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2318 del document.body[arg - 1 : endarg + 4]
2320 del document.body[arg : endarg + 1]
2322 # The respective LaTeX command
2324 if drs == "\\begin_inset Flex DRS*":
2326 elif drs == "\\begin_inset Flex IfThen-DRS":
2328 elif drs == "\\begin_inset Flex Cond-DRS":
2330 elif drs == "\\begin_inset Flex QDRS":
2332 elif drs == "\\begin_inset Flex NegDRS":
2334 elif drs == "\\begin_inset Flex SDRS":
2337 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2338 endInset = find_end_of_inset(document.body, i)
2339 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2340 precontent = put_cmd_in_ert(cmd)
2341 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2342 if drs == "\\begin_inset Flex SDRS":
2343 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2344 precontent += put_cmd_in_ert("{")
2347 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2348 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2349 if cmd == "\\condrs" or cmd == "\\qdrs":
2350 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2352 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2354 postcontent = put_cmd_in_ert("}")
2356 document.body[endPlain:endInset + 1] = postcontent
2357 document.body[beginPlain + 1:beginPlain] = precontent
2358 del document.body[i : beginPlain + 1]
2360 document.append_local_layout("Provides covington 1")
2361 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2367 def revert_babelfont(document):
2368 " Reverts the use of \\babelfont to user preamble "
2370 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2372 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2374 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2376 i = find_token(document.header, '\\language_package', 0)
2378 document.warning("Malformed LyX document: Missing \\language_package.")
2380 if get_value(document.header, "\\language_package", 0) != "babel":
2383 # check font settings
2385 roman = sans = typew = "default"
2387 sf_scale = tt_scale = 100.0
2389 j = find_token(document.header, "\\font_roman", 0)
2391 document.warning("Malformed LyX document: Missing \\font_roman.")
2393 # We need to use this regex since split() does not handle quote protection
2394 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2395 roman = romanfont[2].strip('"')
2396 romanfont[2] = '"default"'
2397 document.header[j] = " ".join(romanfont)
2399 j = find_token(document.header, "\\font_sans", 0)
2401 document.warning("Malformed LyX document: Missing \\font_sans.")
2403 # We need to use this regex since split() does not handle quote protection
2404 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2405 sans = sansfont[2].strip('"')
2406 sansfont[2] = '"default"'
2407 document.header[j] = " ".join(sansfont)
2409 j = find_token(document.header, "\\font_typewriter", 0)
2411 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2413 # We need to use this regex since split() does not handle quote protection
2414 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2415 typew = ttfont[2].strip('"')
2416 ttfont[2] = '"default"'
2417 document.header[j] = " ".join(ttfont)
2419 i = find_token(document.header, "\\font_osf", 0)
2421 document.warning("Malformed LyX document: Missing \\font_osf.")
2423 osf = str2bool(get_value(document.header, "\\font_osf", i))
2425 j = find_token(document.header, "\\font_sf_scale", 0)
2427 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2429 sfscale = document.header[j].split()
2432 document.header[j] = " ".join(sfscale)
2435 sf_scale = float(val)
2437 document.warning("Invalid font_sf_scale value: " + val)
2439 j = find_token(document.header, "\\font_tt_scale", 0)
2441 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2443 ttscale = document.header[j].split()
2446 document.header[j] = " ".join(ttscale)
2449 tt_scale = float(val)
2451 document.warning("Invalid font_tt_scale value: " + val)
2453 # set preamble stuff
2454 pretext = ['%% This document must be processed with xelatex or lualatex!']
2455 pretext.append('\\AtBeginDocument{%')
2456 if roman != "default":
2457 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2458 if sans != "default":
2459 sf = '\\babelfont{sf}['
2460 if sf_scale != 100.0:
2461 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2462 sf += 'Mapping=tex-text]{' + sans + '}'
2464 if typew != "default":
2465 tw = '\\babelfont{tt}'
2466 if tt_scale != 100.0:
2467 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2468 tw += '{' + typew + '}'
2471 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2473 insert_to_preamble(document, pretext)
2476 def revert_minionpro(document):
2477 " Revert native MinionPro font definition (with extra options) to LaTeX "
2479 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2481 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2483 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2486 regexp = re.compile(r'(\\font_roman_opts)')
2487 x = find_re(document.header, regexp, 0)
2491 # We need to use this regex since split() does not handle quote protection
2492 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2493 opts = romanopts[1].strip('"')
2495 i = find_token(document.header, "\\font_roman", 0)
2497 document.warning("Malformed LyX document: Missing \\font_roman.")
2500 # We need to use this regex since split() does not handle quote protection
2501 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2502 roman = romanfont[1].strip('"')
2503 if roman != "minionpro":
2505 romanfont[1] = '"default"'
2506 document.header[i] = " ".join(romanfont)
2508 j = find_token(document.header, "\\font_osf true", 0)
2511 preamble = "\\usepackage["
2513 document.header[j] = "\\font_osf false"
2517 preamble += "]{MinionPro}"
2518 add_to_preamble(document, [preamble])
2519 del document.header[x]
2522 def revert_font_opts(document):
2523 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2525 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2527 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2529 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2530 i = find_token(document.header, '\\language_package', 0)
2532 document.warning("Malformed LyX document: Missing \\language_package.")
2534 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2537 regexp = re.compile(r'(\\font_roman_opts)')
2538 i = find_re(document.header, regexp, 0)
2540 # We need to use this regex since split() does not handle quote protection
2541 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2542 opts = romanopts[1].strip('"')
2543 del document.header[i]
2545 regexp = re.compile(r'(\\font_roman)')
2546 i = find_re(document.header, regexp, 0)
2548 # We need to use this regex since split() does not handle quote protection
2549 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2550 font = romanfont[2].strip('"')
2551 romanfont[2] = '"default"'
2552 document.header[i] = " ".join(romanfont)
2553 if font != "default":
2555 preamble = "\\babelfont{rm}["
2557 preamble = "\\setmainfont["
2560 preamble += "Mapping=tex-text]{"
2563 add_to_preamble(document, [preamble])
2566 regexp = re.compile(r'(\\font_sans_opts)')
2567 i = find_re(document.header, regexp, 0)
2570 # We need to use this regex since split() does not handle quote protection
2571 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2572 opts = sfopts[1].strip('"')
2573 del document.header[i]
2575 regexp = re.compile(r'(\\font_sf_scale)')
2576 i = find_re(document.header, regexp, 0)
2578 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2579 regexp = re.compile(r'(\\font_sans)')
2580 i = find_re(document.header, regexp, 0)
2582 # We need to use this regex since split() does not handle quote protection
2583 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2584 font = sffont[2].strip('"')
2585 sffont[2] = '"default"'
2586 document.header[i] = " ".join(sffont)
2587 if font != "default":
2589 preamble = "\\babelfont{sf}["
2591 preamble = "\\setsansfont["
2595 preamble += "Scale=0."
2596 preamble += scaleval
2598 preamble += "Mapping=tex-text]{"
2601 add_to_preamble(document, [preamble])
2604 regexp = re.compile(r'(\\font_typewriter_opts)')
2605 i = find_re(document.header, regexp, 0)
2608 # We need to use this regex since split() does not handle quote protection
2609 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2610 opts = ttopts[1].strip('"')
2611 del document.header[i]
2613 regexp = re.compile(r'(\\font_tt_scale)')
2614 i = find_re(document.header, regexp, 0)
2616 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2617 regexp = re.compile(r'(\\font_typewriter)')
2618 i = find_re(document.header, regexp, 0)
2620 # We need to use this regex since split() does not handle quote protection
2621 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2622 font = ttfont[2].strip('"')
2623 ttfont[2] = '"default"'
2624 document.header[i] = " ".join(ttfont)
2625 if font != "default":
2627 preamble = "\\babelfont{tt}["
2629 preamble = "\\setmonofont["
2633 preamble += "Scale=0."
2634 preamble += scaleval
2636 preamble += "Mapping=tex-text]{"
2639 add_to_preamble(document, [preamble])
2642 def revert_plainNotoFonts_xopts(document):
2643 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2645 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2647 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2649 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2653 y = find_token(document.header, "\\font_osf true", 0)
2657 regexp = re.compile(r'(\\font_roman_opts)')
2658 x = find_re(document.header, regexp, 0)
2659 if x == -1 and not osf:
2664 # We need to use this regex since split() does not handle quote protection
2665 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2666 opts = romanopts[1].strip('"')
2672 i = find_token(document.header, "\\font_roman", 0)
2676 # We need to use this regex since split() does not handle quote protection
2677 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2678 roman = romanfont[1].strip('"')
2679 if roman != "NotoSerif-TLF":
2682 j = find_token(document.header, "\\font_sans", 0)
2686 # We need to use this regex since split() does not handle quote protection
2687 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2688 sf = sffont[1].strip('"')
2692 j = find_token(document.header, "\\font_typewriter", 0)
2696 # We need to use this regex since split() does not handle quote protection
2697 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2698 tt = ttfont[1].strip('"')
2702 # So we have noto as "complete font"
2703 romanfont[1] = '"default"'
2704 document.header[i] = " ".join(romanfont)
2706 preamble = "\\usepackage["
2708 preamble += "]{noto}"
2709 add_to_preamble(document, [preamble])
2711 document.header[y] = "\\font_osf false"
2713 del document.header[x]
2716 def revert_notoFonts_xopts(document):
2717 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2719 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2721 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2723 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2727 fm = createFontMapping(['Noto'])
2728 if revert_fonts(document, fm, fontmap, True):
2729 add_preamble_fonts(document, fontmap)
2732 def revert_IBMFonts_xopts(document):
2733 " Revert native IBM font definition (with extra options) to LaTeX "
2735 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2737 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2739 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2743 fm = createFontMapping(['IBM'])
2745 if revert_fonts(document, fm, fontmap, True):
2746 add_preamble_fonts(document, fontmap)
2749 def revert_AdobeFonts_xopts(document):
2750 " Revert native Adobe font definition (with extra options) to LaTeX "
2752 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2754 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2756 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2760 fm = createFontMapping(['Adobe'])
2762 if revert_fonts(document, fm, fontmap, True):
2763 add_preamble_fonts(document, fontmap)
2766 def convert_osf(document):
2767 " Convert \\font_osf param to new format "
2770 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2772 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2774 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2776 i = find_token(document.header, '\\font_osf', 0)
2778 document.warning("Malformed LyX document: Missing \\font_osf.")
2781 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2782 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2784 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2785 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2788 document.header.insert(i, "\\font_sans_osf false")
2789 document.header.insert(i + 1, "\\font_typewriter_osf false")
2793 x = find_token(document.header, "\\font_sans", 0)
2795 document.warning("Malformed LyX document: Missing \\font_sans.")
2797 # We need to use this regex since split() does not handle quote protection
2798 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2799 sf = sffont[1].strip('"')
2801 document.header.insert(i, "\\font_sans_osf true")
2803 document.header.insert(i, "\\font_sans_osf false")
2805 x = find_token(document.header, "\\font_typewriter", 0)
2807 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2809 # We need to use this regex since split() does not handle quote protection
2810 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2811 tt = ttfont[1].strip('"')
2813 document.header.insert(i + 1, "\\font_typewriter_osf true")
2815 document.header.insert(i + 1, "\\font_typewriter_osf false")
2818 document.header.insert(i, "\\font_sans_osf false")
2819 document.header.insert(i + 1, "\\font_typewriter_osf false")
2822 def revert_osf(document):
2823 " Revert \\font_*_osf params "
2826 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2828 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2830 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2832 i = find_token(document.header, '\\font_roman_osf', 0)
2834 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2837 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2838 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2840 i = find_token(document.header, '\\font_sans_osf', 0)
2842 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2845 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2846 del document.header[i]
2848 i = find_token(document.header, '\\font_typewriter_osf', 0)
2850 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2853 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2854 del document.header[i]
2857 i = find_token(document.header, '\\font_osf', 0)
2859 document.warning("Malformed LyX document: Missing \\font_osf.")
2861 document.header[i] = "\\font_osf true"
2864 def revert_texfontopts(document):
2865 " Revert native TeX font definitions (with extra options) to LaTeX "
2867 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2869 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2871 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2874 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2876 # First the sf (biolinum only)
2877 regexp = re.compile(r'(\\font_sans_opts)')
2878 x = find_re(document.header, regexp, 0)
2880 # We need to use this regex since split() does not handle quote protection
2881 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2882 opts = sfopts[1].strip('"')
2883 i = find_token(document.header, "\\font_sans", 0)
2885 document.warning("Malformed LyX document: Missing \\font_sans.")
2887 # We need to use this regex since split() does not handle quote protection
2888 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2889 sans = sffont[1].strip('"')
2890 if sans == "biolinum":
2892 sffont[1] = '"default"'
2893 document.header[i] = " ".join(sffont)
2895 j = find_token(document.header, "\\font_sans_osf true", 0)
2898 k = find_token(document.header, "\\font_sf_scale", 0)
2900 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2902 sfscale = document.header[k].split()
2905 document.header[k] = " ".join(sfscale)
2908 sf_scale = float(val)
2910 document.warning("Invalid font_sf_scale value: " + val)
2911 preamble = "\\usepackage["
2913 document.header[j] = "\\font_sans_osf false"
2915 if sf_scale != 100.0:
2916 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2918 preamble += "]{biolinum}"
2919 add_to_preamble(document, [preamble])
2920 del document.header[x]
2922 regexp = re.compile(r'(\\font_roman_opts)')
2923 x = find_re(document.header, regexp, 0)
2927 # We need to use this regex since split() does not handle quote protection
2928 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2929 opts = romanopts[1].strip('"')
2931 i = find_token(document.header, "\\font_roman", 0)
2933 document.warning("Malformed LyX document: Missing \\font_roman.")
2936 # We need to use this regex since split() does not handle quote protection
2937 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2938 roman = romanfont[1].strip('"')
2939 if not roman in rmfonts:
2941 romanfont[1] = '"default"'
2942 document.header[i] = " ".join(romanfont)
2944 if roman == "utopia":
2946 elif roman == "palatino":
2947 package = "mathpazo"
2948 elif roman == "times":
2949 package = "mathptmx"
2950 elif roman == "xcharter":
2951 package = "XCharter"
2953 j = find_token(document.header, "\\font_roman_osf true", 0)
2955 if roman == "cochineal":
2956 osf = "proportional,osf,"
2957 elif roman == "utopia":
2959 elif roman == "garamondx":
2961 elif roman == "libertine":
2963 elif roman == "palatino":
2965 elif roman == "xcharter":
2967 document.header[j] = "\\font_roman_osf false"
2968 k = find_token(document.header, "\\font_sc true", 0)
2970 if roman == "utopia":
2972 if roman == "palatino" and osf == "":
2974 document.header[k] = "\\font_sc false"
2975 preamble = "\\usepackage["
2978 preamble += "]{" + package + "}"
2979 add_to_preamble(document, [preamble])
2980 del document.header[x]
2983 def convert_CantarellFont(document):
2984 " Handle Cantarell font definition to LaTeX "
2986 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2987 fm = createFontMapping(['Cantarell'])
2988 convert_fonts(document, fm, "oldstyle")
2990 def revert_CantarellFont(document):
2991 " Revert native Cantarell font definition to LaTeX "
2993 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2995 fm = createFontMapping(['Cantarell'])
2996 if revert_fonts(document, fm, fontmap, False, True):
2997 add_preamble_fonts(document, fontmap)
2999 def convert_ChivoFont(document):
3000 " Handle Chivo font definition to LaTeX "
3002 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3003 fm = createFontMapping(['Chivo'])
3004 convert_fonts(document, fm, "oldstyle")
3006 def revert_ChivoFont(document):
3007 " Revert native Chivo font definition to LaTeX "
3009 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3011 fm = createFontMapping(['Chivo'])
3012 if revert_fonts(document, fm, fontmap, False, True):
3013 add_preamble_fonts(document, fontmap)
3016 def convert_FiraFont(document):
3017 " Handle Fira font definition to LaTeX "
3019 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3020 fm = createFontMapping(['Fira'])
3021 convert_fonts(document, fm, "lf")
3023 def revert_FiraFont(document):
3024 " Revert native Fira font definition to LaTeX "
3026 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3028 fm = createFontMapping(['Fira'])
3029 if revert_fonts(document, fm, fontmap, False, True):
3030 add_preamble_fonts(document, fontmap)
3033 def convert_Semibolds(document):
3034 " Move semibold options to extraopts "
3037 i = find_token(document.header, '\\use_non_tex_fonts', 0)
3039 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
3041 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
3043 i = find_token(document.header, "\\font_roman", 0)
3045 document.warning("Malformed LyX document: Missing \\font_roman.")
3047 # We need to use this regex since split() does not handle quote protection
3048 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3049 roman = romanfont[1].strip('"')
3050 if roman == "IBMPlexSerifSemibold":
3051 romanfont[1] = '"IBMPlexSerif"'
3052 document.header[i] = " ".join(romanfont)
3054 if NonTeXFonts == False:
3055 regexp = re.compile(r'(\\font_roman_opts)')
3056 x = find_re(document.header, regexp, 0)
3058 # Sensible place to insert tag
3059 fo = find_token(document.header, "\\font_sf_scale")
3061 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3063 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3065 # We need to use this regex since split() does not handle quote protection
3066 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3067 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3069 i = find_token(document.header, "\\font_sans", 0)
3071 document.warning("Malformed LyX document: Missing \\font_sans.")
3073 # We need to use this regex since split() does not handle quote protection
3074 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3075 sf = sffont[1].strip('"')
3076 if sf == "IBMPlexSansSemibold":
3077 sffont[1] = '"IBMPlexSans"'
3078 document.header[i] = " ".join(sffont)
3080 if NonTeXFonts == False:
3081 regexp = re.compile(r'(\\font_sans_opts)')
3082 x = find_re(document.header, regexp, 0)
3084 # Sensible place to insert tag
3085 fo = find_token(document.header, "\\font_sf_scale")
3087 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3089 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3091 # We need to use this regex since split() does not handle quote protection
3092 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3093 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3095 i = find_token(document.header, "\\font_typewriter", 0)
3097 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3099 # We need to use this regex since split() does not handle quote protection
3100 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3101 tt = ttfont[1].strip('"')
3102 if tt == "IBMPlexMonoSemibold":
3103 ttfont[1] = '"IBMPlexMono"'
3104 document.header[i] = " ".join(ttfont)
3106 if NonTeXFonts == False:
3107 regexp = re.compile(r'(\\font_typewriter_opts)')
3108 x = find_re(document.header, regexp, 0)
3110 # Sensible place to insert tag
3111 fo = find_token(document.header, "\\font_tt_scale")
3113 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3115 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3117 # We need to use this regex since split() does not handle quote protection
3118 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3119 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3122 def convert_NotoRegulars(document):
3123 " Merge diverse noto reagular fonts "
3125 i = find_token(document.header, "\\font_roman", 0)
3127 document.warning("Malformed LyX document: Missing \\font_roman.")
3129 # We need to use this regex since split() does not handle quote protection
3130 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3131 roman = romanfont[1].strip('"')
3132 if roman == "NotoSerif-TLF":
3133 romanfont[1] = '"NotoSerifRegular"'
3134 document.header[i] = " ".join(romanfont)
3136 i = find_token(document.header, "\\font_sans", 0)
3138 document.warning("Malformed LyX document: Missing \\font_sans.")
3140 # We need to use this regex since split() does not handle quote protection
3141 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3142 sf = sffont[1].strip('"')
3143 if sf == "NotoSans-TLF":
3144 sffont[1] = '"NotoSansRegular"'
3145 document.header[i] = " ".join(sffont)
3147 i = find_token(document.header, "\\font_typewriter", 0)
3149 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3151 # We need to use this regex since split() does not handle quote protection
3152 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3153 tt = ttfont[1].strip('"')
3154 if tt == "NotoMono-TLF":
3155 ttfont[1] = '"NotoMonoRegular"'
3156 document.header[i] = " ".join(ttfont)
3159 def convert_CrimsonProFont(document):
3160 " Handle CrimsonPro font definition to LaTeX "
3162 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3163 fm = createFontMapping(['CrimsonPro'])
3164 convert_fonts(document, fm, "lf")
3166 def revert_CrimsonProFont(document):
3167 " Revert native CrimsonPro font definition to LaTeX "
3169 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3171 fm = createFontMapping(['CrimsonPro'])
3172 if revert_fonts(document, fm, fontmap, False, True):
3173 add_preamble_fonts(document, fontmap)
3176 def revert_pagesizes(document):
3177 " Revert new page sizes in memoir and KOMA to options "
3179 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3182 i = find_token(document.header, "\\use_geometry true", 0)
3186 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3188 i = find_token(document.header, "\\papersize", 0)
3190 document.warning("Malformed LyX document! Missing \\papersize header.")
3192 val = get_value(document.header, "\\papersize", i)
3197 document.header[i] = "\\papersize default"
3199 i = find_token(document.header, "\\options", 0)
3201 i = find_token(document.header, "\\textclass", 0)
3203 document.warning("Malformed LyX document! Missing \\textclass header.")
3205 document.header.insert(i, "\\options " + val)
3207 document.header[i] = document.header[i] + "," + val
3210 def convert_pagesizes(document):
3211 " Convert to new page sizes in memoir and KOMA to options "
3213 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3216 i = find_token(document.header, "\\use_geometry true", 0)
3220 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3222 i = find_token(document.header, "\\papersize", 0)
3224 document.warning("Malformed LyX document! Missing \\papersize header.")
3226 val = get_value(document.header, "\\papersize", i)
3231 i = find_token(document.header, "\\use_geometry false", 0)
3233 # Maintain use of geometry
3234 document.header[1] = "\\use_geometry true"
3236 def revert_komafontsizes(document):
3237 " Revert new font sizes in KOMA to options "
3239 if document.textclass[:3] != "scr":
3242 i = find_token(document.header, "\\paperfontsize", 0)
3244 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3247 defsizes = ["default", "10", "11", "12"]
3249 val = get_value(document.header, "\\paperfontsize", i)
3254 document.header[i] = "\\paperfontsize default"
3256 fsize = "fontsize=" + val
3258 i = find_token(document.header, "\\options", 0)
3260 i = find_token(document.header, "\\textclass", 0)
3262 document.warning("Malformed LyX document! Missing \\textclass header.")
3264 document.header.insert(i, "\\options " + fsize)
3266 document.header[i] = document.header[i] + "," + fsize
3269 def revert_dupqualicites(document):
3270 " Revert qualified citation list commands with duplicate keys to ERT "
3272 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3273 # we need to revert those with multiple uses of the same key.
3277 i = find_token(document.header, "\\cite_engine", 0)
3279 document.warning("Malformed document! Missing \\cite_engine")
3281 engine = get_value(document.header, "\\cite_engine", i)
3283 if not engine in ["biblatex", "biblatex-natbib"]:
3286 # Citation insets that support qualified lists, with their LaTeX code
3290 "citet" : "textcites",
3291 "Citet" : "Textcites",
3292 "citep" : "parencites",
3293 "Citep" : "Parencites",
3294 "Footcite" : "Smartcites",
3295 "footcite" : "smartcites",
3296 "Autocite" : "Autocites",
3297 "autocite" : "autocites",
3302 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3305 j = find_end_of_inset(document.body, i)
3307 document.warning("Can't find end of citation inset at line %d!!" %(i))
3311 k = find_token(document.body, "LatexCommand", i, j)
3313 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3317 cmd = get_value(document.body, "LatexCommand", k)
3318 if not cmd in list(ql_citations.keys()):
3322 pres = find_token(document.body, "pretextlist", i, j)
3323 posts = find_token(document.body, "posttextlist", i, j)
3324 if pres == -1 and posts == -1:
3329 key = get_quoted_value(document.body, "key", i, j)
3331 document.warning("Citation inset at line %d does not have a key!" %(i))
3335 keys = key.split(",")
3336 ukeys = list(set(keys))
3337 if len(keys) == len(ukeys):
3342 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3343 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3345 pre = get_quoted_value(document.body, "before", i, j)
3346 post = get_quoted_value(document.body, "after", i, j)
3347 prelist = pretexts.split("\t")
3350 ppp = pp.split(" ", 1)
3356 if ppp[0] in premap:
3357 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3359 premap[ppp[0]] = val
3360 postlist = posttexts.split("\t")
3364 ppp = pp.split(" ", 1)
3370 if ppp[0] in postmap:
3371 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3373 postmap[ppp[0]] = val
3374 # Replace known new commands with ERT
3375 if "(" in pre or ")" in pre:
3376 pre = "{" + pre + "}"
3377 if "(" in post or ")" in post:
3378 post = "{" + post + "}"
3379 res = "\\" + ql_citations[cmd]
3381 res += "(" + pre + ")"
3383 res += "(" + post + ")"
3387 if premap.get(kk, "") != "":
3388 akeys = premap[kk].split("\t", 1)
3391 res += "[" + akey + "]"
3393 premap[kk] = "\t".join(akeys[1:])
3396 if postmap.get(kk, "") != "":
3397 akeys = postmap[kk].split("\t", 1)
3400 res += "[" + akey + "]"
3402 postmap[kk] = "\t".join(akeys[1:])
3405 elif premap.get(kk, "") != "":
3407 res += "{" + kk + "}"
3408 document.body[i:j+1] = put_cmd_in_ert([res])
3411 def convert_pagesizenames(document):
3412 " Convert LyX page sizes names "
3414 i = find_token(document.header, "\\papersize", 0)
3416 document.warning("Malformed LyX document! Missing \\papersize header.")
3418 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3419 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3420 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3421 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3422 val = get_value(document.header, "\\papersize", i)
3424 newval = val.replace("paper", "")
3425 document.header[i] = "\\papersize " + newval
3427 def revert_pagesizenames(document):
3428 " Convert LyX page sizes names "
3430 i = find_token(document.header, "\\papersize", 0)
3432 document.warning("Malformed LyX document! Missing \\papersize header.")
3434 newnames = ["letter", "legal", "executive", \
3435 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3436 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3437 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3438 val = get_value(document.header, "\\papersize", i)
3440 newval = val + "paper"
3441 document.header[i] = "\\papersize " + newval
3444 def revert_theendnotes(document):
3445 " Reverts native support of \\theendnotes to TeX-code "
3447 if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
3452 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3455 j = find_end_of_inset(document.body, i)
3457 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3460 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
3463 def revert_enotez(document):
3464 " Reverts native support of enotez package to TeX-code "
3466 if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
3470 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
3473 revert_flex_inset(document.body, "Endnote", "\\endnote")
3477 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3480 j = find_end_of_inset(document.body, i)
3482 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3486 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
3489 add_to_preamble(document, ["\\usepackage{enotez}"])
3490 document.del_module("enotez")
3491 document.del_module("foottoenotez")
3494 def revert_memoir_endnotes(document):
3495 " Reverts native support of memoir endnotes to TeX-code "
3497 if document.textclass != "memoir":
3500 encommand = "\\pagenote"
3501 modules = document.get_module_list()
3502 if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
3503 encommand = "\\endnote"
3505 revert_flex_inset(document.body, "Endnote", encommand)
3509 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
3512 j = find_end_of_inset(document.body, i)
3514 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3517 if document.body[i] == "\\begin_inset FloatList pagenote*":
3518 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
3520 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
3521 add_to_preamble(document, ["\\makepagenote"])
3524 def revert_totalheight(document):
3525 " Reverts graphics height parameter from totalheight to height "
3529 i = find_token(document.body, "\\begin_inset Graphics", i)
3532 j = find_end_of_inset(document.body, i)
3534 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3538 rx = re.compile(r'\s*special\s*(\S+)$')
3539 k = find_re(document.body, rx, i, j)
3543 m = rx.match(document.body[k])
3545 special = m.group(1)
3546 mspecial = special.split(',')
3547 for spc in mspecial:
3548 if spc[:7] == "height=":
3549 oldheight = spc.split('=')[1]
3550 mspecial.remove(spc)
3552 if len(mspecial) > 0:
3553 special = ",".join(mspecial)
3557 rx = re.compile(r'(\s*height\s*)(\S+)$')
3558 kk = find_re(document.body, rx, i, j)
3560 m = rx.match(document.body[kk])
3566 val = val + "," + special
3567 document.body[k] = "\tspecial " + "totalheight=" + val
3569 document.body.insert(kk, "\tspecial totalheight=" + val)
3571 document.body[kk] = m.group(1) + oldheight
3573 del document.body[kk]
3574 elif oldheight != "":
3576 document.body[k] = "\tspecial " + special
3577 document.body.insert(k, "\theight " + oldheight)
3579 document.body[k] = "\theight " + oldheight
3583 def convert_totalheight(document):
3584 " Converts graphics height parameter from totalheight to height "
3588 i = find_token(document.body, "\\begin_inset Graphics", i)
3591 j = find_end_of_inset(document.body, i)
3593 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3597 rx = re.compile(r'\s*special\s*(\S+)$')
3598 k = find_re(document.body, rx, i, j)
3602 m = rx.match(document.body[k])
3604 special = m.group(1)
3605 mspecial = special.split(',')
3606 for spc in mspecial:
3607 if spc[:12] == "totalheight=":
3608 newheight = spc.split('=')[1]
3609 mspecial.remove(spc)
3611 if len(mspecial) > 0:
3612 special = ",".join(mspecial)
3616 rx = re.compile(r'(\s*height\s*)(\S+)$')
3617 kk = find_re(document.body, rx, i, j)
3619 m = rx.match(document.body[kk])
3625 val = val + "," + special
3626 document.body[k] = "\tspecial " + "height=" + val
3628 document.body.insert(kk + 1, "\tspecial height=" + val)
3630 document.body[kk] = m.group(1) + newheight
3632 del document.body[kk]
3633 elif newheight != "":
3634 document.body.insert(k, "\theight " + newheight)
3641 supported_versions = ["2.4.0", "2.4"]
3643 [545, [convert_lst_literalparam]],
3648 [550, [convert_fontenc]],
3655 [557, [convert_vcsinfo]],
3656 [558, [removeFrontMatterStyles]],
3659 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
3663 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
3664 [566, [convert_hebrew_parentheses]],
3670 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
3671 [573, [convert_inputencoding_namechange]],
3672 [574, [convert_ruby_module, convert_utf8_japanese]],
3673 [575, [convert_lineno, convert_aaencoding]],
3675 [577, [convert_linggloss]],
3679 [581, [convert_osf]],
3680 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
3681 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
3683 [585, [convert_pagesizes]],
3685 [587, [convert_pagesizenames]],
3687 [589, [convert_totalheight]]
3690 revert = [[588, [revert_totalheight]],
3691 [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
3692 [586, [revert_pagesizenames]],
3693 [585, [revert_dupqualicites]],
3694 [584, [revert_pagesizes,revert_komafontsizes]],
3695 [583, [revert_vcsinfo_rev_abbrev]],
3696 [582, [revert_ChivoFont,revert_CrimsonProFont]],
3697 [581, [revert_CantarellFont,revert_FiraFont]],
3698 [580, [revert_texfontopts,revert_osf]],
3699 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
3700 [578, [revert_babelfont]],
3701 [577, [revert_drs]],
3702 [576, [revert_linggloss, revert_subexarg]],
3703 [575, [revert_new_languages]],
3704 [574, [revert_lineno, revert_aaencoding]],
3705 [573, [revert_ruby_module, revert_utf8_japanese]],
3706 [572, [revert_inputencoding_namechange]],
3707 [571, [revert_notoFonts]],
3708 [570, [revert_cmidruletrimming]],
3709 [569, [revert_bibfileencodings]],
3710 [568, [revert_tablestyle]],
3711 [567, [revert_soul]],
3712 [566, [revert_malayalam]],
3713 [565, [revert_hebrew_parentheses]],
3714 [564, [revert_AdobeFonts]],
3715 [563, [revert_lformatinfo]],
3716 [562, [revert_listpargs]],
3717 [561, [revert_l7ninfo]],
3718 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
3719 [559, [revert_timeinfo, revert_namenoextinfo]],
3720 [558, [revert_dateinfo]],
3721 [557, [addFrontMatterStyles]],
3722 [556, [revert_vcsinfo]],
3723 [555, [revert_bibencoding]],
3724 [554, [revert_vcolumns]],
3725 [553, [revert_stretchcolumn]],
3726 [552, [revert_tuftecite]],
3727 [551, [revert_floatpclass, revert_floatalignment]],
3728 [550, [revert_nospellcheck]],
3729 [549, [revert_fontenc]],
3730 [548, []],# dummy format change
3731 [547, [revert_lscape]],
3732 [546, [revert_xcharter]],
3733 [545, [revert_paratype]],
3734 [544, [revert_lst_literalparam]]
3738 if __name__ == "__main__":