1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 " Expand fontinfo mapping"
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
199 def convert_fonts(document, fm, osfoption = "osf"):
200 " Handle font definition (LaTeX preamble -> native) "
202 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
203 rscaleopt = re.compile(r'^scaled?=(.*)')
205 # Check whether we go beyond font option feature introduction
206 haveFontOpts = document.end_format > 580
209 while i < len(document.preamble):
210 i = find_re(document.preamble, rpkg, i+1)
213 mo = rpkg.search(document.preamble[i])
214 if mo == None or mo.group(2) == None:
217 options = mo.group(2).replace(' ', '').split(",")
222 while o < len(options):
223 if options[o] == osfoption:
227 mo = rscaleopt.search(options[o])
235 if not pkg in fm.pkginmap:
240 # Try with name-option combination first
241 # (only one default option supported currently)
243 while o < len(options):
245 fn = fm.getfontname(pkg, [opt])
252 fn = fm.getfontname(pkg, [])
254 fn = fm.getfontname(pkg, options)
257 del document.preamble[i]
258 fontinfo = fm.font2pkgmap[fn]
259 if fontinfo.scaletype == None:
262 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
263 fontinfo.scaleval = oscale
264 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
265 if fontinfo.osfopt == None:
266 options.extend(osfoption)
268 osf = find_token(document.header, "\\font_osf false")
269 osftag = "\\font_osf"
270 if osf == -1 and fontinfo.fonttype != "math":
271 # Try with newer format
272 osftag = "\\font_" + fontinfo.fonttype + "_osf"
273 osf = find_token(document.header, osftag + " false")
275 document.header[osf] = osftag + " true"
276 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
277 del document.preamble[i-1]
279 if fontscale != None:
280 j = find_token(document.header, fontscale, 0)
282 val = get_value(document.header, fontscale, j)
286 scale = "%03d" % int(float(oscale) * 100)
287 document.header[j] = fontscale + " " + scale + " " + vals[1]
288 ft = "\\font_" + fontinfo.fonttype
289 j = find_token(document.header, ft, 0)
291 val = get_value(document.header, ft, j)
292 words = val.split() # ! splits also values like '"DejaVu Sans"'
293 words[0] = '"' + fn + '"'
294 document.header[j] = ft + ' ' + ' '.join(words)
295 if haveFontOpts and fontinfo.fonttype != "math":
296 fotag = "\\font_" + fontinfo.fonttype + "_opts"
297 fo = find_token(document.header, fotag)
299 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
301 # Sensible place to insert tag
302 fo = find_token(document.header, "\\font_sf_scale")
304 document.warning("Malformed LyX document! Missing \\font_sf_scale")
306 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
310 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
311 " Revert native font definition to LaTeX "
312 # fonlist := list of fonts created from the same package
313 # Empty package means that the font-name is the same as the package-name
314 # fontmap (key = package, val += found options) will be filled
315 # and used later in add_preamble_fonts() to be added to user-preamble
317 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
318 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
320 while i < len(document.header):
321 i = find_re(document.header, rfontscale, i+1)
324 mo = rfontscale.search(document.header[i])
327 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
328 val = get_value(document.header, ft, i)
329 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
330 font = words[0].strip('"') # TeX font name has no whitespace
331 if not font in fm.font2pkgmap:
333 fontinfo = fm.font2pkgmap[font]
334 val = fontinfo.package
335 if not val in fontmap:
338 if OnlyWithXOpts or WithXOpts:
339 if ft == "\\font_math":
341 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
342 if ft == "\\font_sans":
343 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
344 elif ft == "\\font_typewriter":
345 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
346 x = find_re(document.header, regexp, 0)
347 if x == -1 and OnlyWithXOpts:
351 # We need to use this regex since split() does not handle quote protection
352 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
353 opts = xopts[1].strip('"').split(",")
354 fontmap[val].extend(opts)
355 del document.header[x]
356 words[0] = '"default"'
357 document.header[i] = ft + ' ' + ' '.join(words)
358 if fontinfo.scaleopt != None:
359 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
360 mo = rscales.search(xval)
365 # set correct scale option
366 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
367 if fontinfo.osfopt != None:
369 if fontinfo.osfdef == "true":
371 osf = find_token(document.header, "\\font_osf " + oldval)
372 if osf == -1 and ft != "\\font_math":
373 # Try with newer format
374 osftag = "\\font_roman_osf " + oldval
375 if ft == "\\font_sans":
376 osftag = "\\font_sans_osf " + oldval
377 elif ft == "\\font_typewriter":
378 osftag = "\\font_typewriter_osf " + oldval
379 osf = find_token(document.header, osftag)
381 fontmap[val].extend([fontinfo.osfopt])
382 if len(fontinfo.options) > 0:
383 fontmap[val].extend(fontinfo.options)
386 ###############################################################################
388 ### Conversion and reversion routines
390 ###############################################################################
392 def convert_inputencoding_namechange(document):
393 " Rename inputencoding settings. "
394 i = find_token(document.header, "\\inputencoding", 0)
397 s = document.header[i].replace("auto", "auto-legacy")
398 document.header[i] = s.replace("default", "auto-legacy-plain")
400 def revert_inputencoding_namechange(document):
401 " Rename inputencoding settings. "
402 i = find_token(document.header, "\\inputencoding", 0)
405 s = document.header[i].replace("auto-legacy-plain", "default")
406 document.header[i] = s.replace("auto-legacy", "auto")
408 def convert_notoFonts(document):
409 " Handle Noto fonts definition to LaTeX "
411 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
412 fm = createFontMapping(['Noto'])
413 convert_fonts(document, fm)
415 def revert_notoFonts(document):
416 " Revert native Noto font definition to LaTeX "
418 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
420 fm = createFontMapping(['Noto'])
421 if revert_fonts(document, fm, fontmap):
422 add_preamble_fonts(document, fontmap)
424 def convert_latexFonts(document):
425 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
427 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
428 fm = createFontMapping(['DejaVu', 'IBM'])
429 convert_fonts(document, fm)
431 def revert_latexFonts(document):
432 " Revert native DejaVu font definition to LaTeX "
434 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
436 fm = createFontMapping(['DejaVu', 'IBM'])
437 if revert_fonts(document, fm, fontmap):
438 add_preamble_fonts(document, fontmap)
440 def convert_AdobeFonts(document):
441 " Handle Adobe Source fonts definition to LaTeX "
443 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
444 fm = createFontMapping(['Adobe'])
445 convert_fonts(document, fm)
447 def revert_AdobeFonts(document):
448 " Revert Adobe Source font definition to LaTeX "
450 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
452 fm = createFontMapping(['Adobe'])
453 if revert_fonts(document, fm, fontmap):
454 add_preamble_fonts(document, fontmap)
456 def removeFrontMatterStyles(document):
457 " Remove styles Begin/EndFrontmatter"
459 layouts = ['BeginFrontmatter', 'EndFrontmatter']
460 tokenend = len('\\begin_layout ')
463 i = find_token_exact(document.body, '\\begin_layout ', i+1)
466 layout = document.body[i][tokenend:].strip()
467 if layout not in layouts:
469 j = find_end_of_layout(document.body, i)
471 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
473 while document.body[j+1].strip() == '':
475 document.body[i:j+1] = []
477 def addFrontMatterStyles(document):
478 " Use styles Begin/EndFrontmatter for elsarticle"
480 if document.textclass != "elsarticle":
483 def insertFrontmatter(prefix, line):
485 while above > 0 and document.body[above-1].strip() == '':
488 while document.body[below].strip() == '':
490 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
491 '\\begin_inset Note Note',
493 '\\begin_layout Plain Layout',
496 '\\end_inset', '', '',
499 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
500 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
501 tokenend = len('\\begin_layout ')
505 i = find_token_exact(document.body, '\\begin_layout ', i+1)
508 layout = document.body[i][tokenend:].strip()
509 if layout not in layouts:
511 k = find_end_of_layout(document.body, i)
513 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
520 insertFrontmatter('End', k+1)
521 insertFrontmatter('Begin', first)
524 def convert_lst_literalparam(document):
525 " Add param literal to include inset "
529 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
532 j = find_end_of_inset(document.body, i)
534 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
536 while i < j and document.body[i].strip() != '':
538 document.body.insert(i, 'literal "true"')
541 def revert_lst_literalparam(document):
542 " Remove param literal from include inset "
546 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
549 j = find_end_of_inset(document.body, i)
551 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
553 del_token(document.body, 'literal', i, j)
556 def revert_paratype(document):
557 " Revert ParaType font definitions to LaTeX "
559 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
561 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
562 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
563 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
564 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
567 sfval = find_token(document.header, "\\font_sf_scale", 0)
569 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
571 sfscale = document.header[sfval].split()
574 document.header[sfval] = " ".join(sfscale)
577 sf_scale = float(val)
579 document.warning("Invalid font_sf_scale value: " + val)
582 if sf_scale != "100.0":
583 sfoption = "scaled=" + str(sf_scale / 100.0)
584 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
585 ttval = get_value(document.header, "\\font_tt_scale", 0)
590 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
591 if i1 != -1 and i2 != -1 and i3!= -1:
592 add_to_preamble(document, ["\\usepackage{paratype}"])
595 add_to_preamble(document, ["\\usepackage{PTSerif}"])
596 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
599 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
601 add_to_preamble(document, ["\\usepackage{PTSans}"])
602 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
605 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
607 add_to_preamble(document, ["\\usepackage{PTMono}"])
608 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
611 def revert_xcharter(document):
612 " Revert XCharter font definitions to LaTeX "
614 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
618 # replace unsupported font setting
619 document.header[i] = document.header[i].replace("xcharter", "default")
620 # no need for preamble code with system fonts
621 if get_bool_value(document.header, "\\use_non_tex_fonts"):
624 # transfer old style figures setting to package options
625 j = find_token(document.header, "\\font_osf true")
628 document.header[j] = "\\font_osf false"
632 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
635 def revert_lscape(document):
636 " Reverts the landscape environment (Landscape module) to TeX-code "
638 if not "landscape" in document.get_module_list():
643 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
646 j = find_end_of_inset(document.body, i)
648 document.warning("Malformed LyX document: Can't find end of Landscape inset")
651 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
652 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
653 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
654 add_to_preamble(document, ["\\usepackage{afterpage}"])
656 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
657 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
659 add_to_preamble(document, ["\\usepackage{pdflscape}"])
662 def convert_fontenc(document):
663 " Convert default fontenc setting "
665 i = find_token(document.header, "\\fontencoding global", 0)
669 document.header[i] = document.header[i].replace("global", "auto")
672 def revert_fontenc(document):
673 " Revert default fontenc setting "
675 i = find_token(document.header, "\\fontencoding auto", 0)
679 document.header[i] = document.header[i].replace("auto", "global")
682 def revert_nospellcheck(document):
683 " Remove nospellcheck font info param "
687 i = find_token(document.body, '\\nospellcheck', i)
693 def revert_floatpclass(document):
694 " Remove float placement params 'document' and 'class' "
696 del_token(document.header, "\\float_placement class")
700 i = find_token(document.body, '\\begin_inset Float', i+1)
703 j = find_end_of_inset(document.body, i)
704 k = find_token(document.body, 'placement class', i, i + 2)
706 k = find_token(document.body, 'placement document', i, i + 2)
713 def revert_floatalignment(document):
714 " Remove float alignment params "
716 galignment = get_value(document.header, "\\float_alignment", delete=True)
720 i = find_token(document.body, '\\begin_inset Float', i+1)
723 j = find_end_of_inset(document.body, i)
725 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
727 k = find_token(document.body, 'alignment', i, i+4)
731 alignment = get_value(document.body, "alignment", k)
732 if alignment == "document":
733 alignment = galignment
735 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
737 document.warning("Can't find float layout!")
740 if alignment == "left":
741 alcmd = put_cmd_in_ert("\\raggedright{}")
742 elif alignment == "center":
743 alcmd = put_cmd_in_ert("\\centering{}")
744 elif alignment == "right":
745 alcmd = put_cmd_in_ert("\\raggedleft{}")
747 document.body[l+1:l+1] = alcmd
750 def revert_tuftecite(document):
751 " Revert \cite commands in tufte classes "
753 tufte = ["tufte-book", "tufte-handout"]
754 if document.textclass not in tufte:
759 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
762 j = find_end_of_inset(document.body, i)
764 document.warning("Can't find end of citation inset at line %d!!" %(i))
766 k = find_token(document.body, "LatexCommand", i, j)
768 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
771 cmd = get_value(document.body, "LatexCommand", k)
775 pre = get_quoted_value(document.body, "before", i, j)
776 post = get_quoted_value(document.body, "after", i, j)
777 key = get_quoted_value(document.body, "key", i, j)
779 document.warning("Citation inset at line %d does not have a key!" %(i))
781 # Replace command with ERT
784 res += "[" + pre + "]"
786 res += "[" + post + "]"
789 res += "{" + key + "}"
790 document.body[i:j+1] = put_cmd_in_ert([res])
794 def revert_stretchcolumn(document):
795 " We remove the column varwidth flags or everything else will become a mess. "
798 i = find_token(document.body, "\\begin_inset Tabular", i+1)
801 j = find_end_of_inset(document.body, i+1)
803 document.warning("Malformed LyX document: Could not find end of tabular.")
805 for k in range(i, j):
806 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
807 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
808 document.body[k] = document.body[k].replace(' varwidth="true"', '')
811 def revert_vcolumns(document):
812 " Revert standard columns with line breaks etc. "
818 i = find_token(document.body, "\\begin_inset Tabular", i+1)
821 j = find_end_of_inset(document.body, i)
823 document.warning("Malformed LyX document: Could not find end of tabular.")
826 # Collect necessary column information
828 nrows = int(document.body[i+1].split('"')[3])
829 ncols = int(document.body[i+1].split('"')[5])
831 for k in range(ncols):
832 m = find_token(document.body, "<column", m)
833 width = get_option_value(document.body[m], 'width')
834 varwidth = get_option_value(document.body[m], 'varwidth')
835 alignment = get_option_value(document.body[m], 'alignment')
836 special = get_option_value(document.body[m], 'special')
837 col_info.append([width, varwidth, alignment, special, m])
842 for row in range(nrows):
843 for col in range(ncols):
844 m = find_token(document.body, "<cell", m)
845 multicolumn = get_option_value(document.body[m], 'multicolumn')
846 multirow = get_option_value(document.body[m], 'multirow')
847 width = get_option_value(document.body[m], 'width')
848 rotate = get_option_value(document.body[m], 'rotate')
849 # Check for: linebreaks, multipars, non-standard environments
851 endcell = find_token(document.body, "</cell>", begcell)
853 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
855 elif count_pars_in_inset(document.body, begcell + 2) > 1:
857 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
859 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
860 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
862 alignment = col_info[col][2]
863 col_line = col_info[col][4]
865 if alignment == "center":
866 vval = ">{\\centering}"
867 elif alignment == "left":
868 vval = ">{\\raggedright}"
869 elif alignment == "right":
870 vval = ">{\\raggedleft}"
873 vval += "V{\\linewidth}"
875 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
876 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
877 # with newlines, and we do not want that)
879 endcell = find_token(document.body, "</cell>", begcell)
881 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
883 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
887 nle = find_end_of_inset(document.body, nl)
888 del(document.body[nle:nle+1])
890 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
892 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
898 if needarray == True:
899 add_to_preamble(document, ["\\usepackage{array}"])
900 if needvarwidth == True:
901 add_to_preamble(document, ["\\usepackage{varwidth}"])
904 def revert_bibencoding(document):
905 " Revert bibliography encoding "
909 i = find_token(document.header, "\\cite_engine", 0)
911 document.warning("Malformed document! Missing \\cite_engine")
913 engine = get_value(document.header, "\\cite_engine", i)
917 if engine in ["biblatex", "biblatex-natbib"]:
920 # Map lyx to latex encoding names
924 "armscii8" : "armscii8",
925 "iso8859-1" : "latin1",
926 "iso8859-2" : "latin2",
927 "iso8859-3" : "latin3",
928 "iso8859-4" : "latin4",
929 "iso8859-5" : "iso88595",
930 "iso8859-6" : "8859-6",
931 "iso8859-7" : "iso-8859-7",
932 "iso8859-8" : "8859-8",
933 "iso8859-9" : "latin5",
934 "iso8859-13" : "latin7",
935 "iso8859-15" : "latin9",
936 "iso8859-16" : "latin10",
937 "applemac" : "applemac",
939 "cp437de" : "cp437de",
956 "utf8-platex" : "utf8",
963 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
966 j = find_end_of_inset(document.body, i)
968 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
970 encoding = get_quoted_value(document.body, "encoding", i, j)
973 # remove encoding line
974 k = find_token(document.body, "encoding", i, j)
977 if encoding == "default":
979 # Re-find inset end line
980 j = find_end_of_inset(document.body, i)
983 h = find_token(document.header, "\\biblio_options", 0)
985 biblio_options = get_value(document.header, "\\biblio_options", h)
986 if not "bibencoding" in biblio_options:
987 document.header[h] += ",bibencoding=%s" % encodings[encoding]
989 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
991 # this should not happen
992 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
994 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
996 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
997 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1003 def convert_vcsinfo(document):
1004 " Separate vcs Info inset from buffer Info inset. "
1007 "vcs-revision" : "revision",
1008 "vcs-tree-revision" : "tree-revision",
1009 "vcs-author" : "author",
1010 "vcs-time" : "time",
1015 i = find_token(document.body, "\\begin_inset Info", i+1)
1018 j = find_end_of_inset(document.body, i+1)
1020 document.warning("Malformed LyX document: Could not find end of Info inset.")
1022 tp = find_token(document.body, 'type', i, j)
1023 tpv = get_quoted_value(document.body, "type", tp)
1026 arg = find_token(document.body, 'arg', i, j)
1027 argv = get_quoted_value(document.body, "arg", arg)
1028 if argv not in list(types.keys()):
1030 document.body[tp] = "type \"vcs\""
1031 document.body[arg] = "arg \"" + types[argv] + "\""
1034 def revert_vcsinfo(document):
1035 " Merge vcs Info inset to buffer Info inset. "
1037 args = ["revision", "tree-revision", "author", "time", "date" ]
1040 i = find_token(document.body, "\\begin_inset Info", i+1)
1043 j = find_end_of_inset(document.body, i+1)
1045 document.warning("Malformed LyX document: Could not find end of Info inset.")
1047 tp = find_token(document.body, 'type', i, j)
1048 tpv = get_quoted_value(document.body, "type", tp)
1051 arg = find_token(document.body, 'arg', i, j)
1052 argv = get_quoted_value(document.body, "arg", arg)
1053 if argv not in args:
1054 document.warning("Malformed Info inset. Invalid vcs arg.")
1056 document.body[tp] = "type \"buffer\""
1057 document.body[arg] = "arg \"vcs-" + argv + "\""
1059 def revert_vcsinfo_rev_abbrev(document):
1060 " Convert abbreviated revisions to regular revisions. "
1064 i = find_token(document.body, "\\begin_inset Info", i+1)
1067 j = find_end_of_inset(document.body, i+1)
1069 document.warning("Malformed LyX document: Could not find end of Info inset.")
1071 tp = find_token(document.body, 'type', i, j)
1072 tpv = get_quoted_value(document.body, "type", tp)
1075 arg = find_token(document.body, 'arg', i, j)
1076 argv = get_quoted_value(document.body, "arg", arg)
1077 if( argv == "revision-abbrev" ):
1078 document.body[arg] = "arg \"revision\""
1080 def revert_dateinfo(document):
1081 " Revert date info insets to static text. "
1083 # FIXME This currently only considers the main language and uses the system locale
1084 # Ideally, it should honor context languages and switch the locale accordingly.
1086 # The date formats for each language using strftime syntax:
1087 # long, short, loclong, locmedium, locshort
1089 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1090 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1091 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1092 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1093 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1094 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1095 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1096 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1097 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1098 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1099 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1100 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1101 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1102 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1103 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1104 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1105 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1106 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1107 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1108 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1109 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1110 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1111 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1112 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1113 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1114 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1115 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1116 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1117 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1118 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1119 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1120 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1121 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1122 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1123 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1124 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1125 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1126 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1127 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1128 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1129 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1130 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1131 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1132 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1133 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1134 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1135 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1136 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1137 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1138 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1139 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1140 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1141 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1142 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1143 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1144 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1145 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1146 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1147 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1148 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1149 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1150 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1151 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1152 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1153 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1154 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1155 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1156 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1157 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1158 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1159 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1160 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1161 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1162 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1163 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1164 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1165 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1166 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1167 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1168 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1169 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1170 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1171 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1172 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1173 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1174 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1175 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1176 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1177 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1178 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1179 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1180 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1181 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1182 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1183 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1184 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1185 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1186 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1187 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1188 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1189 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1190 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1193 types = ["date", "fixdate", "moddate" ]
1194 lang = get_value(document.header, "\\language")
1196 document.warning("Malformed LyX document! No \\language header found!")
1201 i = find_token(document.body, "\\begin_inset Info", i+1)
1204 j = find_end_of_inset(document.body, i+1)
1206 document.warning("Malformed LyX document: Could not find end of Info inset.")
1208 tp = find_token(document.body, 'type', i, j)
1209 tpv = get_quoted_value(document.body, "type", tp)
1210 if tpv not in types:
1212 arg = find_token(document.body, 'arg', i, j)
1213 argv = get_quoted_value(document.body, "arg", arg)
1216 if tpv == "fixdate":
1217 datecomps = argv.split('@')
1218 if len(datecomps) > 1:
1220 isodate = datecomps[1]
1221 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1223 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1224 # FIXME if we had the path to the original document (not the one in the tmp dir),
1225 # we could use the mtime.
1226 # elif tpv == "moddate":
1227 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1230 result = dte.isodate()
1231 elif argv == "long":
1232 result = dte.strftime(dateformats[lang][0])
1233 elif argv == "short":
1234 result = dte.strftime(dateformats[lang][1])
1235 elif argv == "loclong":
1236 result = dte.strftime(dateformats[lang][2])
1237 elif argv == "locmedium":
1238 result = dte.strftime(dateformats[lang][3])
1239 elif argv == "locshort":
1240 result = dte.strftime(dateformats[lang][4])
1242 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1243 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1244 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1245 fmt = re.sub('[^\'%]d', '%d', fmt)
1246 fmt = fmt.replace("'", "")
1247 result = dte.strftime(fmt)
1248 if sys.version_info < (3,0):
1249 # In Python 2, datetime module works with binary strings,
1250 # our dateformat strings are utf8-encoded:
1251 result = result.decode('utf-8')
1252 document.body[i : j+1] = [result]
1255 def revert_timeinfo(document):
1256 " Revert time info insets to static text. "
1258 # FIXME This currently only considers the main language and uses the system locale
1259 # Ideally, it should honor context languages and switch the locale accordingly.
1260 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1263 # The time formats for each language using strftime syntax:
1266 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1267 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1268 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1269 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1270 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1271 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1272 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1273 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1274 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1275 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1276 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1277 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1278 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1279 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1280 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1281 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1282 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1283 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1284 "british" : ["%H:%M:%S %Z", "%H:%M"],
1285 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1286 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1287 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1288 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1289 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1290 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1291 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1292 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1293 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1294 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1295 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1296 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1297 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1298 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1299 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1300 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1301 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1302 "french" : ["%H:%M:%S %Z", "%H:%M"],
1303 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1304 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1305 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1306 "german" : ["%H:%M:%S %Z", "%H:%M"],
1307 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1308 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1309 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1310 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1311 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1312 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1313 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1314 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1315 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1316 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1317 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1318 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1319 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1320 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1321 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1322 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1323 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1324 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1325 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1326 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1327 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1328 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1329 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1330 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1331 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1332 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1333 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1334 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1335 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1336 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1337 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1338 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1339 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1340 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1341 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1342 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1343 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1344 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1345 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1346 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1347 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1348 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1349 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1350 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1351 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1352 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1353 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1354 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1355 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1356 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1357 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1358 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1359 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1360 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1361 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1362 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1363 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1364 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1365 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1366 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1367 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1370 types = ["time", "fixtime", "modtime" ]
1372 i = find_token(document.header, "\\language", 0)
1374 # this should not happen
1375 document.warning("Malformed LyX document! No \\language header found!")
1377 lang = get_value(document.header, "\\language", i)
1381 i = find_token(document.body, "\\begin_inset Info", i+1)
1384 j = find_end_of_inset(document.body, i+1)
1386 document.warning("Malformed LyX document: Could not find end of Info inset.")
1388 tp = find_token(document.body, 'type', i, j)
1389 tpv = get_quoted_value(document.body, "type", tp)
1390 if tpv not in types:
1392 arg = find_token(document.body, 'arg', i, j)
1393 argv = get_quoted_value(document.body, "arg", arg)
1395 dtme = datetime.now()
1397 if tpv == "fixtime":
1398 timecomps = argv.split('@')
1399 if len(timecomps) > 1:
1401 isotime = timecomps[1]
1402 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1404 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1406 m = re.search('(\d\d):(\d\d)', isotime)
1408 tme = time(int(m.group(1)), int(m.group(2)))
1409 # FIXME if we had the path to the original document (not the one in the tmp dir),
1410 # we could use the mtime.
1411 # elif tpv == "moddate":
1412 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1415 result = tme.isoformat()
1416 elif argv == "long":
1417 result = tme.strftime(timeformats[lang][0])
1418 elif argv == "short":
1419 result = tme.strftime(timeformats[lang][1])
1421 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1422 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1423 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1424 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1425 fmt = fmt.replace("'", "")
1426 result = dte.strftime(fmt)
1427 document.body[i : j+1] = result
1430 def revert_namenoextinfo(document):
1431 " Merge buffer Info inset type name-noext to name. "
1435 i = find_token(document.body, "\\begin_inset Info", i+1)
1438 j = find_end_of_inset(document.body, i+1)
1440 document.warning("Malformed LyX document: Could not find end of Info inset.")
1442 tp = find_token(document.body, 'type', i, j)
1443 tpv = get_quoted_value(document.body, "type", tp)
1446 arg = find_token(document.body, 'arg', i, j)
1447 argv = get_quoted_value(document.body, "arg", arg)
1448 if argv != "name-noext":
1450 document.body[arg] = "arg \"name\""
1453 def revert_l7ninfo(document):
1454 " Revert l7n Info inset to text. "
1458 i = find_token(document.body, "\\begin_inset Info", i+1)
1461 j = find_end_of_inset(document.body, i+1)
1463 document.warning("Malformed LyX document: Could not find end of Info inset.")
1465 tp = find_token(document.body, 'type', i, j)
1466 tpv = get_quoted_value(document.body, "type", tp)
1469 arg = find_token(document.body, 'arg', i, j)
1470 argv = get_quoted_value(document.body, "arg", arg)
1471 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1472 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1473 document.body[i : j+1] = argv
1476 def revert_listpargs(document):
1477 " Reverts listpreamble arguments to TeX-code "
1480 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1483 j = find_end_of_inset(document.body, i)
1484 # Find containing paragraph layout
1485 parent = get_containing_layout(document.body, i)
1487 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1490 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1491 endPlain = find_end_of_layout(document.body, beginPlain)
1492 content = document.body[beginPlain + 1 : endPlain]
1493 del document.body[i:j+1]
1494 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1495 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1496 document.body[parbeg : parbeg] = subst
1499 def revert_lformatinfo(document):
1500 " Revert layout format Info inset to text. "
1504 i = find_token(document.body, "\\begin_inset Info", i+1)
1507 j = find_end_of_inset(document.body, i+1)
1509 document.warning("Malformed LyX document: Could not find end of Info inset.")
1511 tp = find_token(document.body, 'type', i, j)
1512 tpv = get_quoted_value(document.body, "type", tp)
1513 if tpv != "lyxinfo":
1515 arg = find_token(document.body, 'arg', i, j)
1516 argv = get_quoted_value(document.body, "arg", arg)
1517 if argv != "layoutformat":
1520 document.body[i : j+1] = "69"
1523 def convert_hebrew_parentheses(document):
1524 """ Swap opening/closing parentheses in Hebrew text.
1526 Up to LyX 2.4, "(" was used as closing parenthesis and
1527 ")" as opening parenthesis for Hebrew in the LyX source.
1529 # print("convert hebrew parentheses")
1530 current_languages = [document.language]
1531 for i, line in enumerate(document.body):
1532 if line.startswith('\\lang '):
1533 current_languages[-1] = line.lstrip('\\lang ')
1534 elif line.startswith('\\begin_layout'):
1535 current_languages.append(current_languages[-1])
1536 # print (line, current_languages[-1])
1537 elif line.startswith('\\end_layout'):
1538 current_languages.pop()
1539 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1540 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1543 def revert_hebrew_parentheses(document):
1544 " Store parentheses in Hebrew text reversed"
1545 # This only exists to keep the convert/revert naming convention
1546 convert_hebrew_parentheses(document)
1549 def revert_malayalam(document):
1550 " Set the document language to English but assure Malayalam output "
1552 revert_language(document, "malayalam", "", "malayalam")
1555 def revert_soul(document):
1556 " Revert soul module flex insets to ERT "
1558 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1561 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1563 add_to_preamble(document, ["\\usepackage{soul}"])
1565 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1567 add_to_preamble(document, ["\\usepackage{color}"])
1569 revert_flex_inset(document.body, "Spaceletters", "\\so")
1570 revert_flex_inset(document.body, "Strikethrough", "\\st")
1571 revert_flex_inset(document.body, "Underline", "\\ul")
1572 revert_flex_inset(document.body, "Highlight", "\\hl")
1573 revert_flex_inset(document.body, "Capitalize", "\\caps")
1576 def revert_tablestyle(document):
1577 " Remove tablestyle params "
1580 i = find_token(document.header, "\\tablestyle")
1582 del document.header[i]
1585 def revert_bibfileencodings(document):
1586 " Revert individual Biblatex bibliography encodings "
1590 i = find_token(document.header, "\\cite_engine", 0)
1592 document.warning("Malformed document! Missing \\cite_engine")
1594 engine = get_value(document.header, "\\cite_engine", i)
1598 if engine in ["biblatex", "biblatex-natbib"]:
1601 # Map lyx to latex encoding names
1605 "armscii8" : "armscii8",
1606 "iso8859-1" : "latin1",
1607 "iso8859-2" : "latin2",
1608 "iso8859-3" : "latin3",
1609 "iso8859-4" : "latin4",
1610 "iso8859-5" : "iso88595",
1611 "iso8859-6" : "8859-6",
1612 "iso8859-7" : "iso-8859-7",
1613 "iso8859-8" : "8859-8",
1614 "iso8859-9" : "latin5",
1615 "iso8859-13" : "latin7",
1616 "iso8859-15" : "latin9",
1617 "iso8859-16" : "latin10",
1618 "applemac" : "applemac",
1620 "cp437de" : "cp437de",
1628 "cp1250" : "cp1250",
1629 "cp1251" : "cp1251",
1630 "cp1252" : "cp1252",
1631 "cp1255" : "cp1255",
1632 "cp1256" : "cp1256",
1633 "cp1257" : "cp1257",
1634 "koi8-r" : "koi8-r",
1635 "koi8-u" : "koi8-u",
1637 "utf8-platex" : "utf8",
1644 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1647 j = find_end_of_inset(document.body, i)
1649 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1651 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1655 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1656 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1657 if len(bibfiles) == 0:
1658 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1659 # remove encoding line
1660 k = find_token(document.body, "file_encodings", i, j)
1662 del document.body[k]
1663 # Re-find inset end line
1664 j = find_end_of_inset(document.body, i)
1666 enclist = encodings.split("\t")
1669 ppp = pp.split(" ", 1)
1670 encmap[ppp[0]] = ppp[1]
1671 for bib in bibfiles:
1672 pr = "\\addbibresource"
1673 if bib in encmap.keys():
1674 pr += "[bibencoding=" + encmap[bib] + "]"
1675 pr += "{" + bib + "}"
1676 add_to_preamble(document, [pr])
1677 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1678 pcmd = "printbibliography"
1680 pcmd += "[" + opts + "]"
1681 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1682 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1683 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1684 "status open", "", "\\begin_layout Plain Layout" ]
1685 repl += document.body[i:j+1]
1686 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1687 document.body[i:j+1] = repl
1693 def revert_cmidruletrimming(document):
1694 " Remove \\cmidrule trimming "
1696 # FIXME: Revert to TeX code?
1699 # first, let's find out if we need to do anything
1700 i = find_token(document.body, '<cell ', i+1)
1703 j = document.body[i].find('trim="')
1706 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1707 # remove trim option
1708 document.body[i] = rgx.sub('', document.body[i])
1712 r'### Inserted by lyx2lyx (ruby inset) ###',
1713 r'InsetLayout Flex:Ruby',
1714 r' LyxType charstyle',
1715 r' LatexType command',
1719 r' HTMLInnerTag rb',
1720 r' HTMLInnerAttr ""',
1722 r' LabelString "Ruby"',
1723 r' Decoration Conglomerate',
1725 r' \ifdefined\kanjiskip',
1726 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1727 r' \else \ifdefined\luatexversion',
1728 r' \usepackage{luatexja-ruby}',
1729 r' \else \ifdefined\XeTeXversion',
1730 r' \usepackage{ruby}%',
1732 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1734 r' Argument post:1',
1735 r' LabelString "ruby text"',
1736 r' MenuString "Ruby Text|R"',
1737 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1738 r' Decoration Conglomerate',
1750 def convert_ruby_module(document):
1751 " Use ruby module instead of local module definition "
1752 if document.del_local_layout(ruby_inset_def):
1753 document.add_module("ruby")
1755 def revert_ruby_module(document):
1756 " Replace ruby module with local module definition "
1757 if document.del_module("ruby"):
1758 document.append_local_layout(ruby_inset_def)
1761 def convert_utf8_japanese(document):
1762 " Use generic utf8 with Japanese documents."
1763 lang = get_value(document.header, "\\language")
1764 if not lang.startswith("japanese"):
1766 inputenc = get_value(document.header, "\\inputencoding")
1767 if ((lang == "japanese" and inputenc == "utf8-platex")
1768 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1769 document.set_parameter("inputencoding", "utf8")
1771 def revert_utf8_japanese(document):
1772 " Use Japanese utf8 variants with Japanese documents."
1773 inputenc = get_value(document.header, "\\inputencoding")
1774 if inputenc != "utf8":
1776 lang = get_value(document.header, "\\language")
1777 if lang == "japanese":
1778 document.set_parameter("inputencoding", "utf8-platex")
1779 if lang == "japanese-cjk":
1780 document.set_parameter("inputencoding", "utf8-cjk")
1783 def revert_lineno(document):
1784 " Replace lineno setting with user-preamble code."
1786 options = get_quoted_value(document.header, "\\lineno_options",
1788 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1791 options = "[" + options + "]"
1792 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1795 def convert_lineno(document):
1796 " Replace user-preamble code with native lineno support."
1799 i = find_token(document.preamble, "\\linenumbers", 1)
1801 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1804 options = usepkg.group(1).strip("[]")
1805 del(document.preamble[i-1:i+1])
1806 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1808 k = find_token(document.header, "\\index ")
1810 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1812 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1813 "\\lineno_options %s" % options]
1816 def revert_new_languages(document):
1817 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1818 and Russian (Petrine orthography)."""
1820 # lyxname: (babelname, polyglossianame)
1821 new_languages = {"azerbaijani": ("azerbaijani", ""),
1822 "bengali": ("", "bengali"),
1823 "churchslavonic": ("", "churchslavonic"),
1824 "oldrussian": ("", "russian"),
1825 "korean": ("", "korean"),
1827 used_languages = set()
1828 if document.language in new_languages:
1829 used_languages.add(document.language)
1832 i = find_token(document.body, "\\lang", i+1)
1835 if document.body[i][6:].strip() in new_languages:
1836 used_languages.add(document.language)
1838 # Korean is already supported via CJK, so leave as-is for Babel
1839 if ("korean" in used_languages
1840 and get_bool_value(document.header, "\\use_non_tex_fonts")
1841 and get_value(document.header, "\\language_package") in ("default", "auto")):
1842 revert_language(document, "korean", "", "korean")
1843 used_languages.discard("korean")
1845 for lang in used_languages:
1846 revert(lang, *new_languages[lang])
1850 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1851 r'InsetLayout Flex:Glosse',
1853 r' LabelString "Gloss (old version)"',
1854 r' MenuString "Gloss (old version)"',
1855 r' LatexType environment',
1856 r' LatexName linggloss',
1857 r' Decoration minimalistic',
1862 r' CustomPars false',
1863 r' ForcePlain true',
1864 r' ParbreakIsNewline true',
1865 r' FreeSpacing true',
1866 r' Requires covington',
1869 r' \@ifundefined{linggloss}{%',
1870 r' \newenvironment{linggloss}[2][]{',
1871 r' \def\glosstr{\glt #1}%',
1873 r' {\glosstr\glend}}{}',
1876 r' ResetsFont true',
1878 r' Decoration conglomerate',
1879 r' LabelString "Translation"',
1880 r' MenuString "Glosse Translation|s"',
1881 r' Tooltip "Add a translation for the glosse"',
1886 glosss_inset_def = [
1887 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1888 r'InsetLayout Flex:Tri-Glosse',
1890 r' LabelString "Tri-Gloss (old version)"',
1891 r' MenuString "Tri-Gloss (old version)"',
1892 r' LatexType environment',
1893 r' LatexName lingglosss',
1894 r' Decoration minimalistic',
1899 r' CustomPars false',
1900 r' ForcePlain true',
1901 r' ParbreakIsNewline true',
1902 r' FreeSpacing true',
1904 r' Requires covington',
1907 r' \@ifundefined{lingglosss}{%',
1908 r' \newenvironment{lingglosss}[2][]{',
1909 r' \def\glosstr{\glt #1}%',
1911 r' {\glosstr\glend}}{}',
1913 r' ResetsFont true',
1915 r' Decoration conglomerate',
1916 r' LabelString "Translation"',
1917 r' MenuString "Glosse Translation|s"',
1918 r' Tooltip "Add a translation for the glosse"',
1923 def convert_linggloss(document):
1924 " Move old ling glosses to local layout "
1925 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1926 document.append_local_layout(gloss_inset_def)
1927 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1928 document.append_local_layout(glosss_inset_def)
1930 def revert_linggloss(document):
1931 " Revert to old ling gloss definitions "
1932 if not "linguistics" in document.get_module_list():
1934 document.del_local_layout(gloss_inset_def)
1935 document.del_local_layout(glosss_inset_def)
1938 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1939 for glosse in glosses:
1942 i = find_token(document.body, glosse, i+1)
1945 j = find_end_of_inset(document.body, i)
1947 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1950 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1951 endarg = find_end_of_inset(document.body, arg)
1954 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1955 if argbeginPlain == -1:
1956 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1958 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1959 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
1961 # remove Arg insets and paragraph, if it only contains this inset
1962 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1963 del document.body[arg - 1 : endarg + 4]
1965 del document.body[arg : endarg + 1]
1967 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
1968 endarg = find_end_of_inset(document.body, arg)
1971 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1972 if argbeginPlain == -1:
1973 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
1975 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1976 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
1978 # remove Arg insets and paragraph, if it only contains this inset
1979 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1980 del document.body[arg - 1 : endarg + 4]
1982 del document.body[arg : endarg + 1]
1984 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
1985 endarg = find_end_of_inset(document.body, arg)
1988 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1989 if argbeginPlain == -1:
1990 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
1992 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1993 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
1995 # remove Arg insets and paragraph, if it only contains this inset
1996 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1997 del document.body[arg - 1 : endarg + 4]
1999 del document.body[arg : endarg + 1]
2001 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2002 endarg = find_end_of_inset(document.body, arg)
2005 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2006 if argbeginPlain == -1:
2007 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2009 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2010 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2012 # remove Arg insets and paragraph, if it only contains this inset
2013 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2014 del document.body[arg - 1 : endarg + 4]
2016 del document.body[arg : endarg + 1]
2019 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2022 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2023 endInset = find_end_of_inset(document.body, i)
2024 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2025 precontent = put_cmd_in_ert(cmd)
2026 if len(optargcontent) > 0:
2027 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2028 precontent += put_cmd_in_ert("{")
2030 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2031 if cmd == "\\trigloss":
2032 postcontent += put_cmd_in_ert("}{") + marg3content
2033 postcontent += put_cmd_in_ert("}")
2035 document.body[endPlain:endInset + 1] = postcontent
2036 document.body[beginPlain + 1:beginPlain] = precontent
2037 del document.body[i : beginPlain + 1]
2039 document.append_local_layout("Requires covington")
2044 def revert_subexarg(document):
2045 " Revert linguistic subexamples with argument to ERT "
2047 if not "linguistics" in document.get_module_list():
2053 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2056 j = find_end_of_layout(document.body, i)
2058 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2061 # check for consecutive layouts
2062 k = find_token(document.body, "\\begin_layout", j)
2063 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2065 j = find_end_of_layout(document.body, k)
2067 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2070 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2074 endarg = find_end_of_inset(document.body, arg)
2076 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2077 if argbeginPlain == -1:
2078 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2080 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2081 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2083 # remove Arg insets and paragraph, if it only contains this inset
2084 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2085 del document.body[arg - 1 : endarg + 4]
2087 del document.body[arg : endarg + 1]
2089 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2091 # re-find end of layout
2092 j = find_end_of_layout(document.body, i)
2094 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2097 # check for consecutive layouts
2098 k = find_token(document.body, "\\begin_layout", j)
2099 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2101 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2102 j = find_end_of_layout(document.body, k)
2104 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2107 endev = put_cmd_in_ert("\\end{subexamples}")
2109 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2110 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2111 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2113 document.append_local_layout("Requires covington")
2117 def revert_drs(document):
2118 " Revert DRS insets (linguistics) to ERT "
2120 if not "linguistics" in document.get_module_list():
2124 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2125 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2126 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2127 "\\begin_inset Flex SDRS"]
2131 i = find_token(document.body, drs, i+1)
2134 j = find_end_of_inset(document.body, i)
2136 document.warning("Malformed LyX document: Can't find end of DRS inset")
2139 # Check for arguments
2140 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2141 endarg = find_end_of_inset(document.body, arg)
2144 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2145 if argbeginPlain == -1:
2146 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2148 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2149 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2151 # remove Arg insets and paragraph, if it only contains this inset
2152 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2153 del document.body[arg - 1 : endarg + 4]
2155 del document.body[arg : endarg + 1]
2158 j = find_end_of_inset(document.body, i)
2160 document.warning("Malformed LyX document: Can't find end of DRS inset")
2163 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2164 endarg = find_end_of_inset(document.body, arg)
2167 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2168 if argbeginPlain == -1:
2169 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2171 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2172 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2174 # remove Arg insets and paragraph, if it only contains this inset
2175 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2176 del document.body[arg - 1 : endarg + 4]
2178 del document.body[arg : endarg + 1]
2181 j = find_end_of_inset(document.body, i)
2183 document.warning("Malformed LyX document: Can't find end of DRS inset")
2186 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2187 endarg = find_end_of_inset(document.body, arg)
2188 postarg1content = []
2190 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2191 if argbeginPlain == -1:
2192 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2194 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2195 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2197 # remove Arg insets and paragraph, if it only contains this inset
2198 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2199 del document.body[arg - 1 : endarg + 4]
2201 del document.body[arg : endarg + 1]
2204 j = find_end_of_inset(document.body, i)
2206 document.warning("Malformed LyX document: Can't find end of DRS inset")
2209 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2210 endarg = find_end_of_inset(document.body, arg)
2211 postarg2content = []
2213 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2214 if argbeginPlain == -1:
2215 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2217 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2218 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2220 # remove Arg insets and paragraph, if it only contains this inset
2221 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2222 del document.body[arg - 1 : endarg + 4]
2224 del document.body[arg : endarg + 1]
2227 j = find_end_of_inset(document.body, i)
2229 document.warning("Malformed LyX document: Can't find end of DRS inset")
2232 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2233 endarg = find_end_of_inset(document.body, arg)
2234 postarg3content = []
2236 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2237 if argbeginPlain == -1:
2238 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2240 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2241 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2243 # remove Arg insets and paragraph, if it only contains this inset
2244 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2245 del document.body[arg - 1 : endarg + 4]
2247 del document.body[arg : endarg + 1]
2250 j = find_end_of_inset(document.body, i)
2252 document.warning("Malformed LyX document: Can't find end of DRS inset")
2255 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2256 endarg = find_end_of_inset(document.body, arg)
2257 postarg4content = []
2259 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2260 if argbeginPlain == -1:
2261 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2263 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2264 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2266 # remove Arg insets and paragraph, if it only contains this inset
2267 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2268 del document.body[arg - 1 : endarg + 4]
2270 del document.body[arg : endarg + 1]
2272 # The respective LaTeX command
2274 if drs == "\\begin_inset Flex DRS*":
2276 elif drs == "\\begin_inset Flex IfThen-DRS":
2278 elif drs == "\\begin_inset Flex Cond-DRS":
2280 elif drs == "\\begin_inset Flex QDRS":
2282 elif drs == "\\begin_inset Flex NegDRS":
2284 elif drs == "\\begin_inset Flex SDRS":
2287 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2288 endInset = find_end_of_inset(document.body, i)
2289 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2290 precontent = put_cmd_in_ert(cmd)
2291 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2292 if drs == "\\begin_inset Flex SDRS":
2293 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2294 precontent += put_cmd_in_ert("{")
2297 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2298 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2299 if cmd == "\\condrs" or cmd == "\\qdrs":
2300 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2302 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2304 postcontent = put_cmd_in_ert("}")
2306 document.body[endPlain:endInset + 1] = postcontent
2307 document.body[beginPlain + 1:beginPlain] = precontent
2308 del document.body[i : beginPlain + 1]
2310 document.append_local_layout("Provides covington 1")
2311 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2317 def revert_babelfont(document):
2318 " Reverts the use of \\babelfont to user preamble "
2320 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2322 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2324 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2326 i = find_token(document.header, '\\language_package', 0)
2328 document.warning("Malformed LyX document: Missing \\language_package.")
2330 if get_value(document.header, "\\language_package", 0) != "babel":
2333 # check font settings
2335 roman = sans = typew = "default"
2337 sf_scale = tt_scale = 100.0
2339 j = find_token(document.header, "\\font_roman", 0)
2341 document.warning("Malformed LyX document: Missing \\font_roman.")
2343 # We need to use this regex since split() does not handle quote protection
2344 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2345 roman = romanfont[2].strip('"')
2346 romanfont[2] = '"default"'
2347 document.header[j] = " ".join(romanfont)
2349 j = find_token(document.header, "\\font_sans", 0)
2351 document.warning("Malformed LyX document: Missing \\font_sans.")
2353 # We need to use this regex since split() does not handle quote protection
2354 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2355 sans = sansfont[2].strip('"')
2356 sansfont[2] = '"default"'
2357 document.header[j] = " ".join(sansfont)
2359 j = find_token(document.header, "\\font_typewriter", 0)
2361 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2363 # We need to use this regex since split() does not handle quote protection
2364 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2365 typew = ttfont[2].strip('"')
2366 ttfont[2] = '"default"'
2367 document.header[j] = " ".join(ttfont)
2369 i = find_token(document.header, "\\font_osf", 0)
2371 document.warning("Malformed LyX document: Missing \\font_osf.")
2373 osf = str2bool(get_value(document.header, "\\font_osf", i))
2375 j = find_token(document.header, "\\font_sf_scale", 0)
2377 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2379 sfscale = document.header[j].split()
2382 document.header[j] = " ".join(sfscale)
2385 sf_scale = float(val)
2387 document.warning("Invalid font_sf_scale value: " + val)
2389 j = find_token(document.header, "\\font_tt_scale", 0)
2391 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2393 ttscale = document.header[j].split()
2396 document.header[j] = " ".join(ttscale)
2399 tt_scale = float(val)
2401 document.warning("Invalid font_tt_scale value: " + val)
2403 # set preamble stuff
2404 pretext = ['%% This document must be processed with xelatex or lualatex!']
2405 pretext.append('\\AtBeginDocument{%')
2406 if roman != "default":
2407 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2408 if sans != "default":
2409 sf = '\\babelfont{sf}['
2410 if sf_scale != 100.0:
2411 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2412 sf += 'Mapping=tex-text]{' + sans + '}'
2414 if typew != "default":
2415 tw = '\\babelfont{tt}'
2416 if tt_scale != 100.0:
2417 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2418 tw += '{' + typew + '}'
2421 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2423 insert_to_preamble(document, pretext)
2426 def revert_minionpro(document):
2427 " Revert native MinionPro font definition (with extra options) to LaTeX "
2429 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2431 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2433 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2436 regexp = re.compile(r'(\\font_roman_opts)')
2437 x = find_re(document.header, regexp, 0)
2441 # We need to use this regex since split() does not handle quote protection
2442 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2443 opts = romanopts[1].strip('"')
2445 i = find_token(document.header, "\\font_roman", 0)
2447 document.warning("Malformed LyX document: Missing \\font_roman.")
2450 # We need to use this regex since split() does not handle quote protection
2451 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2452 roman = romanfont[1].strip('"')
2453 if roman != "minionpro":
2455 romanfont[1] = '"default"'
2456 document.header[i] = " ".join(romanfont)
2458 j = find_token(document.header, "\\font_osf true", 0)
2461 preamble = "\\usepackage["
2463 document.header[j] = "\\font_osf false"
2467 preamble += "]{MinionPro}"
2468 add_to_preamble(document, [preamble])
2469 del document.header[x]
2472 def revert_font_opts(document):
2473 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2475 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2477 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2479 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2480 i = find_token(document.header, '\\language_package', 0)
2482 document.warning("Malformed LyX document: Missing \\language_package.")
2484 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2487 regexp = re.compile(r'(\\font_roman_opts)')
2488 i = find_re(document.header, regexp, 0)
2490 # We need to use this regex since split() does not handle quote protection
2491 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2492 opts = romanopts[1].strip('"')
2493 del document.header[i]
2495 regexp = re.compile(r'(\\font_roman)')
2496 i = find_re(document.header, regexp, 0)
2498 # We need to use this regex since split() does not handle quote protection
2499 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2500 font = romanfont[2].strip('"')
2501 romanfont[2] = '"default"'
2502 document.header[i] = " ".join(romanfont)
2503 if font != "default":
2505 preamble = "\\babelfont{rm}["
2507 preamble = "\\setmainfont["
2510 preamble += "Mapping=tex-text]{"
2513 add_to_preamble(document, [preamble])
2516 regexp = re.compile(r'(\\font_sans_opts)')
2517 i = find_re(document.header, regexp, 0)
2520 # We need to use this regex since split() does not handle quote protection
2521 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2522 opts = sfopts[1].strip('"')
2523 del document.header[i]
2525 regexp = re.compile(r'(\\font_sf_scale)')
2526 i = find_re(document.header, regexp, 0)
2528 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2529 regexp = re.compile(r'(\\font_sans)')
2530 i = find_re(document.header, regexp, 0)
2532 # We need to use this regex since split() does not handle quote protection
2533 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2534 font = sffont[2].strip('"')
2535 sffont[2] = '"default"'
2536 document.header[i] = " ".join(sffont)
2537 if font != "default":
2539 preamble = "\\babelfont{sf}["
2541 preamble = "\\setsansfont["
2545 preamble += "Scale=0."
2546 preamble += scaleval
2548 preamble += "Mapping=tex-text]{"
2551 add_to_preamble(document, [preamble])
2554 regexp = re.compile(r'(\\font_typewriter_opts)')
2555 i = find_re(document.header, regexp, 0)
2558 # We need to use this regex since split() does not handle quote protection
2559 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2560 opts = ttopts[1].strip('"')
2561 del document.header[i]
2563 regexp = re.compile(r'(\\font_tt_scale)')
2564 i = find_re(document.header, regexp, 0)
2566 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2567 regexp = re.compile(r'(\\font_typewriter)')
2568 i = find_re(document.header, regexp, 0)
2570 # We need to use this regex since split() does not handle quote protection
2571 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2572 font = ttfont[2].strip('"')
2573 ttfont[2] = '"default"'
2574 document.header[i] = " ".join(ttfont)
2575 if font != "default":
2577 preamble = "\\babelfont{tt}["
2579 preamble = "\\setmonofont["
2583 preamble += "Scale=0."
2584 preamble += scaleval
2586 preamble += "Mapping=tex-text]{"
2589 add_to_preamble(document, [preamble])
2592 def revert_plainNotoFonts_xopts(document):
2593 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2595 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2597 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2599 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2603 y = find_token(document.header, "\\font_osf true", 0)
2607 regexp = re.compile(r'(\\font_roman_opts)')
2608 x = find_re(document.header, regexp, 0)
2609 if x == -1 and not osf:
2614 # We need to use this regex since split() does not handle quote protection
2615 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2616 opts = romanopts[1].strip('"')
2622 i = find_token(document.header, "\\font_roman", 0)
2626 # We need to use this regex since split() does not handle quote protection
2627 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2628 roman = romanfont[1].strip('"')
2629 if roman != "NotoSerif-TLF":
2632 j = find_token(document.header, "\\font_sans", 0)
2636 # We need to use this regex since split() does not handle quote protection
2637 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2638 sf = sffont[1].strip('"')
2642 j = find_token(document.header, "\\font_typewriter", 0)
2646 # We need to use this regex since split() does not handle quote protection
2647 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2648 tt = ttfont[1].strip('"')
2652 # So we have noto as "complete font"
2653 romanfont[1] = '"default"'
2654 document.header[i] = " ".join(romanfont)
2656 preamble = "\\usepackage["
2658 preamble += "]{noto}"
2659 add_to_preamble(document, [preamble])
2661 document.header[y] = "\\font_osf false"
2663 del document.header[x]
2666 def revert_notoFonts_xopts(document):
2667 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2669 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2671 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2673 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2677 fm = createFontMapping(['Noto'])
2678 if revert_fonts(document, fm, fontmap, True):
2679 add_preamble_fonts(document, fontmap)
2682 def revert_IBMFonts_xopts(document):
2683 " Revert native IBM font definition (with extra options) to LaTeX "
2685 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2687 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2689 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2693 fm = createFontMapping(['IBM'])
2695 if revert_fonts(document, fm, fontmap, True):
2696 add_preamble_fonts(document, fontmap)
2699 def revert_AdobeFonts_xopts(document):
2700 " Revert native Adobe font definition (with extra options) to LaTeX "
2702 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2704 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2706 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2710 fm = createFontMapping(['Adobe'])
2712 if revert_fonts(document, fm, fontmap, True):
2713 add_preamble_fonts(document, fontmap)
2716 def convert_osf(document):
2717 " Convert \\font_osf param to new format "
2720 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2722 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2724 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2726 i = find_token(document.header, '\\font_osf', 0)
2728 document.warning("Malformed LyX document: Missing \\font_osf.")
2731 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2732 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2734 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2735 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2738 document.header.insert(i, "\\font_sans_osf false")
2739 document.header.insert(i + 1, "\\font_typewriter_osf false")
2743 x = find_token(document.header, "\\font_sans", 0)
2745 document.warning("Malformed LyX document: Missing \\font_sans.")
2747 # We need to use this regex since split() does not handle quote protection
2748 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2749 sf = sffont[1].strip('"')
2751 document.header.insert(i, "\\font_sans_osf true")
2753 document.header.insert(i, "\\font_sans_osf false")
2755 x = find_token(document.header, "\\font_typewriter", 0)
2757 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2759 # We need to use this regex since split() does not handle quote protection
2760 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2761 tt = ttfont[1].strip('"')
2763 document.header.insert(i + 1, "\\font_typewriter_osf true")
2765 document.header.insert(i + 1, "\\font_typewriter_osf false")
2768 document.header.insert(i, "\\font_sans_osf false")
2769 document.header.insert(i + 1, "\\font_typewriter_osf false")
2772 def revert_osf(document):
2773 " Revert \\font_*_osf params "
2776 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2778 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2780 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2782 i = find_token(document.header, '\\font_roman_osf', 0)
2784 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2787 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2788 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2790 i = find_token(document.header, '\\font_sans_osf', 0)
2792 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2795 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2796 del document.header[i]
2798 i = find_token(document.header, '\\font_typewriter_osf', 0)
2800 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2803 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2804 del document.header[i]
2807 i = find_token(document.header, '\\font_osf', 0)
2809 document.warning("Malformed LyX document: Missing \\font_osf.")
2811 document.header[i] = "\\font_osf true"
2814 def revert_texfontopts(document):
2815 " Revert native TeX font definitions (with extra options) to LaTeX "
2817 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2819 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2821 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2824 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2826 # First the sf (biolinum only)
2827 regexp = re.compile(r'(\\font_sans_opts)')
2828 x = find_re(document.header, regexp, 0)
2830 # We need to use this regex since split() does not handle quote protection
2831 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2832 opts = sfopts[1].strip('"')
2833 i = find_token(document.header, "\\font_sans", 0)
2835 document.warning("Malformed LyX document: Missing \\font_sans.")
2837 # We need to use this regex since split() does not handle quote protection
2838 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2839 sans = sffont[1].strip('"')
2840 if sans == "biolinum":
2842 sffont[1] = '"default"'
2843 document.header[i] = " ".join(sffont)
2845 j = find_token(document.header, "\\font_sans_osf true", 0)
2848 k = find_token(document.header, "\\font_sf_scale", 0)
2850 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2852 sfscale = document.header[k].split()
2855 document.header[k] = " ".join(sfscale)
2858 sf_scale = float(val)
2860 document.warning("Invalid font_sf_scale value: " + val)
2861 preamble = "\\usepackage["
2863 document.header[j] = "\\font_sans_osf false"
2865 if sf_scale != 100.0:
2866 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2868 preamble += "]{biolinum}"
2869 add_to_preamble(document, [preamble])
2870 del document.header[x]
2872 regexp = re.compile(r'(\\font_roman_opts)')
2873 x = find_re(document.header, regexp, 0)
2877 # We need to use this regex since split() does not handle quote protection
2878 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2879 opts = romanopts[1].strip('"')
2881 i = find_token(document.header, "\\font_roman", 0)
2883 document.warning("Malformed LyX document: Missing \\font_roman.")
2886 # We need to use this regex since split() does not handle quote protection
2887 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2888 roman = romanfont[1].strip('"')
2889 if not roman in rmfonts:
2891 romanfont[1] = '"default"'
2892 document.header[i] = " ".join(romanfont)
2894 if roman == "utopia":
2896 elif roman == "palatino":
2897 package = "mathpazo"
2898 elif roman == "times":
2899 package = "mathptmx"
2900 elif roman == "xcharter":
2901 package = "XCharter"
2903 j = find_token(document.header, "\\font_roman_osf true", 0)
2905 if roman == "cochineal":
2906 osf = "proportional,osf,"
2907 elif roman == "utopia":
2909 elif roman == "garamondx":
2911 elif roman == "libertine":
2913 elif roman == "palatino":
2915 elif roman == "xcharter":
2917 document.header[j] = "\\font_roman_osf false"
2918 k = find_token(document.header, "\\font_sc true", 0)
2920 if roman == "utopia":
2922 if roman == "palatino" and osf == "":
2924 document.header[k] = "\\font_sc false"
2925 preamble = "\\usepackage["
2928 preamble += "]{" + package + "}"
2929 add_to_preamble(document, [preamble])
2930 del document.header[x]
2933 def convert_CantarellFont(document):
2934 " Handle Cantarell font definition to LaTeX "
2936 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2937 fm = createFontMapping(['Cantarell'])
2938 convert_fonts(document, fm, "oldstyle")
2940 def revert_CantarellFont(document):
2941 " Revert native Cantarell font definition to LaTeX "
2943 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2945 fm = createFontMapping(['Cantarell'])
2946 if revert_fonts(document, fm, fontmap, False, True):
2947 add_preamble_fonts(document, fontmap)
2949 def convert_ChivoFont(document):
2950 " Handle Chivo font definition to LaTeX "
2952 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2953 fm = createFontMapping(['Chivo'])
2954 convert_fonts(document, fm, "oldstyle")
2956 def revert_ChivoFont(document):
2957 " Revert native Chivo font definition to LaTeX "
2959 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2961 fm = createFontMapping(['Chivo'])
2962 if revert_fonts(document, fm, fontmap, False, True):
2963 add_preamble_fonts(document, fontmap)
2966 def convert_FiraFont(document):
2967 " Handle Fira font definition to LaTeX "
2969 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2970 fm = createFontMapping(['Fira'])
2971 convert_fonts(document, fm, "lf")
2973 def revert_FiraFont(document):
2974 " Revert native Fira font definition to LaTeX "
2976 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2978 fm = createFontMapping(['Fira'])
2979 if revert_fonts(document, fm, fontmap, False, True):
2980 add_preamble_fonts(document, fontmap)
2983 def convert_Semibolds(document):
2984 " Move semibold options to extraopts "
2987 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2989 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2991 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2993 i = find_token(document.header, "\\font_roman", 0)
2995 document.warning("Malformed LyX document: Missing \\font_roman.")
2997 # We need to use this regex since split() does not handle quote protection
2998 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2999 roman = romanfont[1].strip('"')
3000 if roman == "IBMPlexSerifSemibold":
3001 romanfont[1] = '"IBMPlexSerif"'
3002 document.header[i] = " ".join(romanfont)
3004 if NonTeXFonts == False:
3005 regexp = re.compile(r'(\\font_roman_opts)')
3006 x = find_re(document.header, regexp, 0)
3008 # Sensible place to insert tag
3009 fo = find_token(document.header, "\\font_sf_scale")
3011 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3013 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3015 # We need to use this regex since split() does not handle quote protection
3016 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3017 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3019 i = find_token(document.header, "\\font_sans", 0)
3021 document.warning("Malformed LyX document: Missing \\font_sans.")
3023 # We need to use this regex since split() does not handle quote protection
3024 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3025 sf = sffont[1].strip('"')
3026 if sf == "IBMPlexSansSemibold":
3027 sffont[1] = '"IBMPlexSans"'
3028 document.header[i] = " ".join(sffont)
3030 if NonTeXFonts == False:
3031 regexp = re.compile(r'(\\font_sans_opts)')
3032 x = find_re(document.header, regexp, 0)
3034 # Sensible place to insert tag
3035 fo = find_token(document.header, "\\font_sf_scale")
3037 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3039 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3041 # We need to use this regex since split() does not handle quote protection
3042 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3043 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3045 i = find_token(document.header, "\\font_typewriter", 0)
3047 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3049 # We need to use this regex since split() does not handle quote protection
3050 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3051 tt = ttfont[1].strip('"')
3052 if tt == "IBMPlexMonoSemibold":
3053 ttfont[1] = '"IBMPlexMono"'
3054 document.header[i] = " ".join(ttfont)
3056 if NonTeXFonts == False:
3057 regexp = re.compile(r'(\\font_typewriter_opts)')
3058 x = find_re(document.header, regexp, 0)
3060 # Sensible place to insert tag
3061 fo = find_token(document.header, "\\font_tt_scale")
3063 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3065 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3067 # We need to use this regex since split() does not handle quote protection
3068 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3069 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3072 def convert_NotoRegulars(document):
3073 " Merge diverse noto reagular fonts "
3075 i = find_token(document.header, "\\font_roman", 0)
3077 document.warning("Malformed LyX document: Missing \\font_roman.")
3079 # We need to use this regex since split() does not handle quote protection
3080 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3081 roman = romanfont[1].strip('"')
3082 if roman == "NotoSerif-TLF":
3083 romanfont[1] = '"NotoSerifRegular"'
3084 document.header[i] = " ".join(romanfont)
3086 i = find_token(document.header, "\\font_sans", 0)
3088 document.warning("Malformed LyX document: Missing \\font_sans.")
3090 # We need to use this regex since split() does not handle quote protection
3091 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3092 sf = sffont[1].strip('"')
3093 if sf == "NotoSans-TLF":
3094 sffont[1] = '"NotoSansRegular"'
3095 document.header[i] = " ".join(sffont)
3097 i = find_token(document.header, "\\font_typewriter", 0)
3099 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3101 # We need to use this regex since split() does not handle quote protection
3102 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3103 tt = ttfont[1].strip('"')
3104 if tt == "NotoMono-TLF":
3105 ttfont[1] = '"NotoMonoRegular"'
3106 document.header[i] = " ".join(ttfont)
3109 def convert_CrimsonProFont(document):
3110 " Handle CrimsonPro font definition to LaTeX "
3112 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3113 fm = createFontMapping(['CrimsonPro'])
3114 convert_fonts(document, fm, "lf")
3116 def revert_CrimsonProFont(document):
3117 " Revert native CrimsonPro font definition to LaTeX "
3119 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3121 fm = createFontMapping(['CrimsonPro'])
3122 if revert_fonts(document, fm, fontmap, False, True):
3123 add_preamble_fonts(document, fontmap)
3126 def revert_pagesizes(document):
3127 " Revert new page sizes in memoir and KOMA to options "
3129 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3132 i = find_token(document.header, "\\use_geometry true", 0)
3136 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3138 i = find_token(document.header, "\\papersize", 0)
3140 document.warning("Malformed LyX document! Missing \\papersize header.")
3142 val = get_value(document.header, "\\papersize", i)
3147 document.header[i] = "\\papersize default"
3149 i = find_token(document.header, "\\options", 0)
3151 i = find_token(document.header, "\\textclass", 0)
3153 document.warning("Malformed LyX document! Missing \\textclass header.")
3155 document.header.insert(i, "\\options " + val)
3157 document.header[i] = document.header[i] + "," + val
3160 def convert_pagesizes(document):
3161 " Convert to new page sizes in memoir and KOMA to options "
3163 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3166 i = find_token(document.header, "\\use_geometry true", 0)
3170 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3172 i = find_token(document.header, "\\papersize", 0)
3174 document.warning("Malformed LyX document! Missing \\papersize header.")
3176 val = get_value(document.header, "\\papersize", i)
3181 i = find_token(document.header, "\\use_geometry false", 0)
3183 # Maintain use of geometry
3184 document.header[1] = "\\use_geometry true"
3186 def revert_komafontsizes(document):
3187 " Revert new font sizes in KOMA to options "
3189 if document.textclass[:3] != "scr":
3192 i = find_token(document.header, "\\paperfontsize", 0)
3194 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3197 defsizes = ["default", "10", "11", "12"]
3199 val = get_value(document.header, "\\paperfontsize", i)
3204 document.header[i] = "\\paperfontsize default"
3206 fsize = "fontsize=" + val
3208 i = find_token(document.header, "\\options", 0)
3210 i = find_token(document.header, "\\textclass", 0)
3212 document.warning("Malformed LyX document! Missing \\textclass header.")
3214 document.header.insert(i, "\\options " + fsize)
3216 document.header[i] = document.header[i] + "," + fsize
3219 def revert_dupqualicites(document):
3220 " Revert qualified citation list commands with duplicate keys to ERT "
3222 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3223 # we need to revert those with multiple uses of the same key.
3227 i = find_token(document.header, "\\cite_engine", 0)
3229 document.warning("Malformed document! Missing \\cite_engine")
3231 engine = get_value(document.header, "\\cite_engine", i)
3233 if not engine in ["biblatex", "biblatex-natbib"]:
3236 # Citation insets that support qualified lists, with their LaTeX code
3240 "citet" : "textcites",
3241 "Citet" : "Textcites",
3242 "citep" : "parencites",
3243 "Citep" : "Parencites",
3244 "Footcite" : "Smartcites",
3245 "footcite" : "smartcites",
3246 "Autocite" : "Autocites",
3247 "autocite" : "autocites",
3252 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3255 j = find_end_of_inset(document.body, i)
3257 document.warning("Can't find end of citation inset at line %d!!" %(i))
3261 k = find_token(document.body, "LatexCommand", i, j)
3263 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3267 cmd = get_value(document.body, "LatexCommand", k)
3268 if not cmd in list(ql_citations.keys()):
3272 pres = find_token(document.body, "pretextlist", i, j)
3273 posts = find_token(document.body, "posttextlist", i, j)
3274 if pres == -1 and posts == -1:
3279 key = get_quoted_value(document.body, "key", i, j)
3281 document.warning("Citation inset at line %d does not have a key!" %(i))
3285 keys = key.split(",")
3286 ukeys = list(set(keys))
3287 if len(keys) == len(ukeys):
3292 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3293 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3295 pre = get_quoted_value(document.body, "before", i, j)
3296 post = get_quoted_value(document.body, "after", i, j)
3297 prelist = pretexts.split("\t")
3300 ppp = pp.split(" ", 1)
3306 if ppp[0] in premap:
3307 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3309 premap[ppp[0]] = val
3310 postlist = posttexts.split("\t")
3314 ppp = pp.split(" ", 1)
3320 if ppp[0] in postmap:
3321 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3323 postmap[ppp[0]] = val
3324 # Replace known new commands with ERT
3325 if "(" in pre or ")" in pre:
3326 pre = "{" + pre + "}"
3327 if "(" in post or ")" in post:
3328 post = "{" + post + "}"
3329 res = "\\" + ql_citations[cmd]
3331 res += "(" + pre + ")"
3333 res += "(" + post + ")"
3337 if premap.get(kk, "") != "":
3338 akeys = premap[kk].split("\t", 1)
3341 res += "[" + akey + "]"
3343 premap[kk] = "\t".join(akeys[1:])
3346 if postmap.get(kk, "") != "":
3347 akeys = postmap[kk].split("\t", 1)
3350 res += "[" + akey + "]"
3352 postmap[kk] = "\t".join(akeys[1:])
3355 elif premap.get(kk, "") != "":
3357 res += "{" + kk + "}"
3358 document.body[i:j+1] = put_cmd_in_ert([res])
3361 def convert_pagesizenames(document):
3362 " Convert LyX page sizes names "
3364 i = find_token(document.header, "\\papersize", 0)
3366 document.warning("Malformed LyX document! Missing \\papersize header.")
3368 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3369 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3370 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3371 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3372 val = get_value(document.header, "\\papersize", i)
3374 newval = val.replace("paper", "")
3375 document.header[i] = "\\papersize " + newval
3377 def revert_pagesizenames(document):
3378 " Convert LyX page sizes names "
3380 i = find_token(document.header, "\\papersize", 0)
3382 document.warning("Malformed LyX document! Missing \\papersize header.")
3384 newnames = ["letter", "legal", "executive", \
3385 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3386 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3387 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3388 val = get_value(document.header, "\\papersize", i)
3390 newval = val + "paper"
3391 document.header[i] = "\\papersize " + newval
3398 supported_versions = ["2.4.0", "2.4"]
3400 [545, [convert_lst_literalparam]],
3405 [550, [convert_fontenc]],
3412 [557, [convert_vcsinfo]],
3413 [558, [removeFrontMatterStyles]],
3416 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
3420 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
3421 [566, [convert_hebrew_parentheses]],
3427 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
3428 [573, [convert_inputencoding_namechange]],
3429 [574, [convert_ruby_module, convert_utf8_japanese]],
3430 [575, [convert_lineno]],
3432 [577, [convert_linggloss]],
3436 [581, [convert_osf]],
3437 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
3438 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
3440 [585, [convert_pagesizes]],
3442 [587, [convert_pagesizenames]]
3445 revert = [[586, [revert_pagesizenames]],
3446 [585, [revert_dupqualicites]],
3447 [584, [revert_pagesizes,revert_komafontsizes]],
3448 [583, [revert_vcsinfo_rev_abbrev]],
3449 [582, [revert_ChivoFont,revert_CrimsonProFont]],
3450 [581, [revert_CantarellFont,revert_FiraFont]],
3451 [580, [revert_texfontopts,revert_osf]],
3452 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
3453 [578, [revert_babelfont]],
3454 [577, [revert_drs]],
3455 [576, [revert_linggloss, revert_subexarg]],
3456 [575, [revert_new_languages]],
3457 [574, [revert_lineno]],
3458 [573, [revert_ruby_module, revert_utf8_japanese]],
3459 [572, [revert_inputencoding_namechange]],
3460 [571, [revert_notoFonts]],
3461 [570, [revert_cmidruletrimming]],
3462 [569, [revert_bibfileencodings]],
3463 [568, [revert_tablestyle]],
3464 [567, [revert_soul]],
3465 [566, [revert_malayalam]],
3466 [565, [revert_hebrew_parentheses]],
3467 [564, [revert_AdobeFonts]],
3468 [563, [revert_lformatinfo]],
3469 [562, [revert_listpargs]],
3470 [561, [revert_l7ninfo]],
3471 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
3472 [559, [revert_timeinfo, revert_namenoextinfo]],
3473 [558, [revert_dateinfo]],
3474 [557, [addFrontMatterStyles]],
3475 [556, [revert_vcsinfo]],
3476 [555, [revert_bibencoding]],
3477 [554, [revert_vcolumns]],
3478 [553, [revert_stretchcolumn]],
3479 [552, [revert_tuftecite]],
3480 [551, [revert_floatpclass, revert_floatalignment]],
3481 [550, [revert_nospellcheck]],
3482 [549, [revert_fontenc]],
3483 [548, []],# dummy format change
3484 [547, [revert_lscape]],
3485 [546, [revert_xcharter]],
3486 [545, [revert_paratype]],
3487 [544, [revert_lst_literalparam]]
3491 if __name__ == "__main__":