1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 """Add collected font-packages with their option to user-preamble"""
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 """Expand fontinfo mapping"""
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
199 def convert_fonts(document, fm, osfoption = "osf"):
200 """Handle font definition (LaTeX preamble -> native)"""
201 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
202 rscaleopt = re.compile(r'^scaled?=(.*)')
204 # Check whether we go beyond font option feature introduction
205 haveFontOpts = document.end_format > 580
209 i = find_re(document.preamble, rpkg, i+1)
212 mo = rpkg.search(document.preamble[i])
213 if mo == None or mo.group(2) == None:
216 options = mo.group(2).replace(' ', '').split(",")
221 while o < len(options):
222 if options[o] == osfoption:
226 mo = rscaleopt.search(options[o])
234 if not pkg in fm.pkginmap:
239 # Try with name-option combination first
240 # (only one default option supported currently)
242 while o < len(options):
244 fn = fm.getfontname(pkg, [opt])
251 fn = fm.getfontname(pkg, [])
253 fn = fm.getfontname(pkg, options)
256 del document.preamble[i]
257 fontinfo = fm.font2pkgmap[fn]
258 if fontinfo.scaletype == None:
261 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
262 fontinfo.scaleval = oscale
263 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
264 if fontinfo.osfopt == None:
265 options.extend(osfoption)
267 osf = find_token(document.header, "\\font_osf false")
268 osftag = "\\font_osf"
269 if osf == -1 and fontinfo.fonttype != "math":
270 # Try with newer format
271 osftag = "\\font_" + fontinfo.fonttype + "_osf"
272 osf = find_token(document.header, osftag + " false")
274 document.header[osf] = osftag + " true"
275 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
276 del document.preamble[i-1]
278 if fontscale != None:
279 j = find_token(document.header, fontscale, 0)
281 val = get_value(document.header, fontscale, j)
285 scale = "%03d" % int(float(oscale) * 100)
286 document.header[j] = fontscale + " " + scale + " " + vals[1]
287 ft = "\\font_" + fontinfo.fonttype
288 j = find_token(document.header, ft, 0)
290 val = get_value(document.header, ft, j)
291 words = val.split() # ! splits also values like '"DejaVu Sans"'
292 words[0] = '"' + fn + '"'
293 document.header[j] = ft + ' ' + ' '.join(words)
294 if haveFontOpts and fontinfo.fonttype != "math":
295 fotag = "\\font_" + fontinfo.fonttype + "_opts"
296 fo = find_token(document.header, fotag)
298 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
300 # Sensible place to insert tag
301 fo = find_token(document.header, "\\font_sf_scale")
303 document.warning("Malformed LyX document! Missing \\font_sf_scale")
305 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
308 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
309 """Revert native font definition to LaTeX"""
310 # fonlist := list of fonts created from the same package
311 # Empty package means that the font-name is the same as the package-name
312 # fontmap (key = package, val += found options) will be filled
313 # and used later in add_preamble_fonts() to be added to user-preamble
315 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
316 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
318 while i < len(document.header):
319 i = find_re(document.header, rfontscale, i+1)
322 mo = rfontscale.search(document.header[i])
325 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
326 val = get_value(document.header, ft, i)
327 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
328 font = words[0].strip('"') # TeX font name has no whitespace
329 if not font in fm.font2pkgmap:
331 fontinfo = fm.font2pkgmap[font]
332 val = fontinfo.package
333 if not val in fontmap:
336 if OnlyWithXOpts or WithXOpts:
337 if ft == "\\font_math":
339 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
340 if ft == "\\font_sans":
341 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
342 elif ft == "\\font_typewriter":
343 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
344 x = find_re(document.header, regexp, 0)
345 if x == -1 and OnlyWithXOpts:
349 # We need to use this regex since split() does not handle quote protection
350 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
351 opts = xopts[1].strip('"').split(",")
352 fontmap[val].extend(opts)
353 del document.header[x]
354 words[0] = '"default"'
355 document.header[i] = ft + ' ' + ' '.join(words)
356 if fontinfo.scaleopt != None:
357 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
358 mo = rscales.search(xval)
363 # set correct scale option
364 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
365 if fontinfo.osfopt != None:
367 if fontinfo.osfdef == "true":
369 osf = find_token(document.header, "\\font_osf " + oldval)
370 if osf == -1 and ft != "\\font_math":
371 # Try with newer format
372 osftag = "\\font_roman_osf " + oldval
373 if ft == "\\font_sans":
374 osftag = "\\font_sans_osf " + oldval
375 elif ft == "\\font_typewriter":
376 osftag = "\\font_typewriter_osf " + oldval
377 osf = find_token(document.header, osftag)
379 fontmap[val].extend([fontinfo.osfopt])
380 if len(fontinfo.options) > 0:
381 fontmap[val].extend(fontinfo.options)
384 ###############################################################################
386 ### Conversion and reversion routines
388 ###############################################################################
390 def convert_inputencoding_namechange(document):
391 """Rename inputencoding settings."""
392 i = find_token(document.header, "\\inputencoding", 0)
395 s = document.header[i].replace("auto", "auto-legacy")
396 document.header[i] = s.replace("default", "auto-legacy-plain")
398 def revert_inputencoding_namechange(document):
399 """Rename inputencoding settings."""
400 i = find_token(document.header, "\\inputencoding", 0)
403 s = document.header[i].replace("auto-legacy-plain", "default")
404 document.header[i] = s.replace("auto-legacy", "auto")
406 def convert_notoFonts(document):
407 """Handle Noto fonts definition to LaTeX"""
409 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
410 fm = createFontMapping(['Noto'])
411 convert_fonts(document, fm)
413 def revert_notoFonts(document):
414 """Revert native Noto font definition to LaTeX"""
416 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
418 fm = createFontMapping(['Noto'])
419 if revert_fonts(document, fm, fontmap):
420 add_preamble_fonts(document, fontmap)
422 def convert_latexFonts(document):
423 """Handle DejaVu and IBMPlex fonts definition to LaTeX"""
425 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
426 fm = createFontMapping(['DejaVu', 'IBM'])
427 convert_fonts(document, fm)
429 def revert_latexFonts(document):
430 """Revert native DejaVu font definition to LaTeX"""
432 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
434 fm = createFontMapping(['DejaVu', 'IBM'])
435 if revert_fonts(document, fm, fontmap):
436 add_preamble_fonts(document, fontmap)
438 def convert_AdobeFonts(document):
439 """Handle Adobe Source fonts definition to LaTeX"""
441 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
442 fm = createFontMapping(['Adobe'])
443 convert_fonts(document, fm)
445 def revert_AdobeFonts(document):
446 """Revert Adobe Source font definition to LaTeX"""
448 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
450 fm = createFontMapping(['Adobe'])
451 if revert_fonts(document, fm, fontmap):
452 add_preamble_fonts(document, fontmap)
454 def removeFrontMatterStyles(document):
455 """Remove styles Begin/EndFrontmatter"""
457 layouts = ['BeginFrontmatter', 'EndFrontmatter']
458 tokenend = len('\\begin_layout ')
461 i = find_token_exact(document.body, '\\begin_layout ', i+1)
464 layout = document.body[i][tokenend:].strip()
465 if layout not in layouts:
467 j = find_end_of_layout(document.body, i)
469 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
471 while document.body[j+1].strip() == '':
473 document.body[i:j+1] = []
475 def addFrontMatterStyles(document):
476 """Use styles Begin/EndFrontmatter for elsarticle"""
478 if document.textclass != "elsarticle":
481 def insertFrontmatter(prefix, line):
483 while above > 0 and document.body[above-1].strip() == '':
486 while document.body[below].strip() == '':
488 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
489 '\\begin_inset Note Note',
491 '\\begin_layout Plain Layout',
494 '\\end_inset', '', '',
497 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
498 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
499 tokenend = len('\\begin_layout ')
503 i = find_token_exact(document.body, '\\begin_layout ', i+1)
506 layout = document.body[i][tokenend:].strip()
507 if layout not in layouts:
509 k = find_end_of_layout(document.body, i)
511 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
518 insertFrontmatter('End', k+1)
519 insertFrontmatter('Begin', first)
522 def convert_lst_literalparam(document):
523 """Add param literal to include inset"""
527 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
530 j = find_end_of_inset(document.body, i)
532 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
534 while i < j and document.body[i].strip() != '':
536 document.body.insert(i, 'literal "true"')
539 def revert_lst_literalparam(document):
540 """Remove param literal from include inset"""
544 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
547 j = find_end_of_inset(document.body, i)
549 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
551 del_token(document.body, 'literal', i, j)
554 def revert_paratype(document):
555 """Revert ParaType font definitions to LaTeX"""
557 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
559 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
560 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
561 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
562 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
565 sfval = find_token(document.header, "\\font_sf_scale", 0)
567 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
569 sfscale = document.header[sfval].split()
572 document.header[sfval] = " ".join(sfscale)
575 sf_scale = float(val)
577 document.warning("Invalid font_sf_scale value: " + val)
580 if sf_scale != "100.0":
581 sfoption = "scaled=" + str(sf_scale / 100.0)
582 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
583 ttval = get_value(document.header, "\\font_tt_scale", 0)
588 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
589 if i1 != -1 and i2 != -1 and i3!= -1:
590 add_to_preamble(document, ["\\usepackage{paratype}"])
593 add_to_preamble(document, ["\\usepackage{PTSerif}"])
594 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
597 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
599 add_to_preamble(document, ["\\usepackage{PTSans}"])
600 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
603 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
605 add_to_preamble(document, ["\\usepackage{PTMono}"])
606 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
609 def revert_xcharter(document):
610 """Revert XCharter font definitions to LaTeX"""
612 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
616 # replace unsupported font setting
617 document.header[i] = document.header[i].replace("xcharter", "default")
618 # no need for preamble code with system fonts
619 if get_bool_value(document.header, "\\use_non_tex_fonts"):
622 # transfer old style figures setting to package options
623 j = find_token(document.header, "\\font_osf true")
626 document.header[j] = "\\font_osf false"
630 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
633 def revert_lscape(document):
634 """Reverts the landscape environment (Landscape module) to TeX-code"""
636 if not "landscape" in document.get_module_list():
641 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
644 j = find_end_of_inset(document.body, i)
646 document.warning("Malformed LyX document: Can't find end of Landscape inset")
649 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
650 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
651 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
652 add_to_preamble(document, ["\\usepackage{afterpage}"])
654 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
655 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
657 add_to_preamble(document, ["\\usepackage{pdflscape}"])
658 document.del_module("landscape")
661 def convert_fontenc(document):
662 """Convert default fontenc setting"""
664 i = find_token(document.header, "\\fontencoding global", 0)
668 document.header[i] = document.header[i].replace("global", "auto")
671 def revert_fontenc(document):
672 """Revert default fontenc setting"""
674 i = find_token(document.header, "\\fontencoding auto", 0)
678 document.header[i] = document.header[i].replace("auto", "global")
681 def revert_nospellcheck(document):
682 """Remove nospellcheck font info param"""
686 i = find_token(document.body, '\\nospellcheck', i)
692 def revert_floatpclass(document):
693 """Remove float placement params 'document' and 'class'"""
695 del_token(document.header, "\\float_placement class")
699 i = find_token(document.body, '\\begin_inset Float', i + 1)
702 j = find_end_of_inset(document.body, i)
703 k = find_token(document.body, 'placement class', i, j)
705 k = find_token(document.body, 'placement document', i, j)
712 def revert_floatalignment(document):
713 """Remove float alignment params"""
715 galignment = get_value(document.header, "\\float_alignment", delete=True)
719 i = find_token(document.body, '\\begin_inset Float', i + 1)
722 j = find_end_of_inset(document.body, i)
724 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
726 k = find_token(document.body, 'alignment', i, j)
730 alignment = get_value(document.body, "alignment", k)
731 if alignment == "document":
732 alignment = galignment
734 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
736 document.warning("Can't find float layout!")
739 if alignment == "left":
740 alcmd = put_cmd_in_ert("\\raggedright{}")
741 elif alignment == "center":
742 alcmd = put_cmd_in_ert("\\centering{}")
743 elif alignment == "right":
744 alcmd = put_cmd_in_ert("\\raggedleft{}")
746 document.body[l+1:l+1] = alcmd
747 # There might be subfloats, so we do not want to move past
748 # the end of the inset.
751 def revert_tuftecite(document):
752 """Revert \cite commands in tufte classes"""
754 tufte = ["tufte-book", "tufte-handout"]
755 if document.textclass not in tufte:
760 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
763 j = find_end_of_inset(document.body, i)
765 document.warning("Can't find end of citation inset at line %d!!" %(i))
767 k = find_token(document.body, "LatexCommand", i, j)
769 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
772 cmd = get_value(document.body, "LatexCommand", k)
776 pre = get_quoted_value(document.body, "before", i, j)
777 post = get_quoted_value(document.body, "after", i, j)
778 key = get_quoted_value(document.body, "key", i, j)
780 document.warning("Citation inset at line %d does not have a key!" %(i))
782 # Replace command with ERT
785 res += "[" + pre + "]"
787 res += "[" + post + "]"
790 res += "{" + key + "}"
791 document.body[i:j+1] = put_cmd_in_ert([res])
796 def revert_stretchcolumn(document):
797 """We remove the column varwidth flags or everything else will become a mess."""
800 i = find_token(document.body, "\\begin_inset Tabular", i+1)
803 j = find_end_of_inset(document.body, i+1)
805 document.warning("Malformed LyX document: Could not find end of tabular.")
807 for k in range(i, j):
808 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
809 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
810 document.body[k] = document.body[k].replace(' varwidth="true"', '')
813 def revert_vcolumns(document):
814 """Revert standard columns with line breaks etc."""
820 i = find_token(document.body, "\\begin_inset Tabular", i+1)
823 j = find_end_of_inset(document.body, i)
825 document.warning("Malformed LyX document: Could not find end of tabular.")
828 # Collect necessary column information
830 nrows = int(document.body[i+1].split('"')[3])
831 ncols = int(document.body[i+1].split('"')[5])
833 for k in range(ncols):
834 m = find_token(document.body, "<column", m)
835 width = get_option_value(document.body[m], 'width')
836 varwidth = get_option_value(document.body[m], 'varwidth')
837 alignment = get_option_value(document.body[m], 'alignment')
838 special = get_option_value(document.body[m], 'special')
839 col_info.append([width, varwidth, alignment, special, m])
844 for row in range(nrows):
845 for col in range(ncols):
846 m = find_token(document.body, "<cell", m)
847 multicolumn = get_option_value(document.body[m], 'multicolumn')
848 multirow = get_option_value(document.body[m], 'multirow')
849 width = get_option_value(document.body[m], 'width')
850 rotate = get_option_value(document.body[m], 'rotate')
851 # Check for: linebreaks, multipars, non-standard environments
853 endcell = find_token(document.body, "</cell>", begcell)
855 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
857 elif count_pars_in_inset(document.body, begcell + 2) > 1:
859 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
861 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
862 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
864 alignment = col_info[col][2]
865 col_line = col_info[col][4]
867 if alignment == "center":
868 vval = ">{\\centering}"
869 elif alignment == "left":
870 vval = ">{\\raggedright}"
871 elif alignment == "right":
872 vval = ">{\\raggedleft}"
875 vval += "V{\\linewidth}"
877 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
878 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
879 # with newlines, and we do not want that)
881 endcell = find_token(document.body, "</cell>", begcell)
883 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
885 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
889 nle = find_end_of_inset(document.body, nl)
890 del(document.body[nle:nle+1])
892 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
894 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
900 if needarray == True:
901 add_to_preamble(document, ["\\usepackage{array}"])
902 if needvarwidth == True:
903 add_to_preamble(document, ["\\usepackage{varwidth}"])
906 def revert_bibencoding(document):
907 """Revert bibliography encoding"""
911 i = find_token(document.header, "\\cite_engine", 0)
913 document.warning("Malformed document! Missing \\cite_engine")
915 engine = get_value(document.header, "\\cite_engine", i)
919 if engine in ["biblatex", "biblatex-natbib"]:
922 # Map lyx to latex encoding names
926 "armscii8" : "armscii8",
927 "iso8859-1" : "latin1",
928 "iso8859-2" : "latin2",
929 "iso8859-3" : "latin3",
930 "iso8859-4" : "latin4",
931 "iso8859-5" : "iso88595",
932 "iso8859-6" : "8859-6",
933 "iso8859-7" : "iso-8859-7",
934 "iso8859-8" : "8859-8",
935 "iso8859-9" : "latin5",
936 "iso8859-13" : "latin7",
937 "iso8859-15" : "latin9",
938 "iso8859-16" : "latin10",
939 "applemac" : "applemac",
941 "cp437de" : "cp437de",
958 "utf8-platex" : "utf8",
965 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
968 j = find_end_of_inset(document.body, i)
970 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
972 encoding = get_quoted_value(document.body, "encoding", i, j)
975 # remove encoding line
976 k = find_token(document.body, "encoding", i, j)
979 if encoding == "default":
981 # Re-find inset end line
982 j = find_end_of_inset(document.body, i)
985 h = find_token(document.header, "\\biblio_options", 0)
987 biblio_options = get_value(document.header, "\\biblio_options", h)
988 if not "bibencoding" in biblio_options:
989 document.header[h] += ",bibencoding=%s" % encodings[encoding]
991 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
993 # this should not happen
994 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
996 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
998 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
999 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1005 def convert_vcsinfo(document):
1006 """Separate vcs Info inset from buffer Info inset."""
1009 "vcs-revision" : "revision",
1010 "vcs-tree-revision" : "tree-revision",
1011 "vcs-author" : "author",
1012 "vcs-time" : "time",
1017 i = find_token(document.body, "\\begin_inset Info", i+1)
1020 j = find_end_of_inset(document.body, i+1)
1022 document.warning("Malformed LyX document: Could not find end of Info inset.")
1024 tp = find_token(document.body, 'type', i, j)
1025 tpv = get_quoted_value(document.body, "type", tp)
1028 arg = find_token(document.body, 'arg', i, j)
1029 argv = get_quoted_value(document.body, "arg", arg)
1030 if argv not in list(types.keys()):
1032 document.body[tp] = "type \"vcs\""
1033 document.body[arg] = "arg \"" + types[argv] + "\""
1036 def revert_vcsinfo(document):
1037 """Merge vcs Info inset to buffer Info inset."""
1039 args = ["revision", "tree-revision", "author", "time", "date" ]
1042 i = find_token(document.body, "\\begin_inset Info", i+1)
1045 j = find_end_of_inset(document.body, i+1)
1047 document.warning("Malformed LyX document: Could not find end of Info inset.")
1049 tp = find_token(document.body, 'type', i, j)
1050 tpv = get_quoted_value(document.body, "type", tp)
1053 arg = find_token(document.body, 'arg', i, j)
1054 argv = get_quoted_value(document.body, "arg", arg)
1055 if argv not in args:
1056 document.warning("Malformed Info inset. Invalid vcs arg.")
1058 document.body[tp] = "type \"buffer\""
1059 document.body[arg] = "arg \"vcs-" + argv + "\""
1061 def revert_vcsinfo_rev_abbrev(document):
1062 " Convert abbreviated revisions to regular revisions. "
1066 i = find_token(document.body, "\\begin_inset Info", i+1)
1069 j = find_end_of_inset(document.body, i+1)
1071 document.warning("Malformed LyX document: Could not find end of Info inset.")
1073 tp = find_token(document.body, 'type', i, j)
1074 tpv = get_quoted_value(document.body, "type", tp)
1077 arg = find_token(document.body, 'arg', i, j)
1078 argv = get_quoted_value(document.body, "arg", arg)
1079 if( argv == "revision-abbrev" ):
1080 document.body[arg] = "arg \"revision\""
1082 def revert_dateinfo(document):
1083 """Revert date info insets to static text."""
1085 # FIXME This currently only considers the main language and uses the system locale
1086 # Ideally, it should honor context languages and switch the locale accordingly.
1088 # The date formats for each language using strftime syntax:
1089 # long, short, loclong, locmedium, locshort
1091 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1092 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1093 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1094 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1095 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1096 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1097 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1098 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1099 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1100 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1101 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1102 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1103 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1104 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1105 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1106 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1107 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1108 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1109 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1110 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1111 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1112 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1113 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1114 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1115 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1116 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1117 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1118 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1119 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1120 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1121 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1122 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1123 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1124 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1125 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1126 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1127 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1128 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1129 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1130 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1131 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1132 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1133 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1134 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1135 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1136 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1137 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1138 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1139 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1140 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1141 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1142 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1143 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1144 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1145 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1146 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1147 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1148 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1149 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1150 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1151 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1152 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1153 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1154 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1155 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1156 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1157 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1158 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1159 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1160 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1161 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1162 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1163 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1164 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1165 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1166 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1167 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1168 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1169 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1170 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1171 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1172 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1173 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1174 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1175 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1176 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1177 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1178 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1179 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1180 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1181 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1182 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1183 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1184 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1185 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1186 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1187 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1188 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1189 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1190 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1191 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1192 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1195 types = ["date", "fixdate", "moddate" ]
1196 lang = get_value(document.header, "\\language")
1198 document.warning("Malformed LyX document! No \\language header found!")
1203 i = find_token(document.body, "\\begin_inset Info", i+1)
1206 j = find_end_of_inset(document.body, i+1)
1208 document.warning("Malformed LyX document: Could not find end of Info inset.")
1210 tp = find_token(document.body, 'type', i, j)
1211 tpv = get_quoted_value(document.body, "type", tp)
1212 if tpv not in types:
1214 arg = find_token(document.body, 'arg', i, j)
1215 argv = get_quoted_value(document.body, "arg", arg)
1218 if tpv == "fixdate":
1219 datecomps = argv.split('@')
1220 if len(datecomps) > 1:
1222 isodate = datecomps[1]
1223 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1225 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1226 # FIXME if we had the path to the original document (not the one in the tmp dir),
1227 # we could use the mtime.
1228 # elif tpv == "moddate":
1229 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1232 result = dte.isodate()
1233 elif argv == "long":
1234 result = dte.strftime(dateformats[lang][0])
1235 elif argv == "short":
1236 result = dte.strftime(dateformats[lang][1])
1237 elif argv == "loclong":
1238 result = dte.strftime(dateformats[lang][2])
1239 elif argv == "locmedium":
1240 result = dte.strftime(dateformats[lang][3])
1241 elif argv == "locshort":
1242 result = dte.strftime(dateformats[lang][4])
1244 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1245 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1246 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1247 fmt = re.sub('[^\'%]d', '%d', fmt)
1248 fmt = fmt.replace("'", "")
1249 result = dte.strftime(fmt)
1250 if sys.version_info < (3,0):
1251 # In Python 2, datetime module works with binary strings,
1252 # our dateformat strings are utf8-encoded:
1253 result = result.decode('utf-8')
1254 document.body[i : j+1] = [result]
1257 def revert_timeinfo(document):
1258 """Revert time info insets to static text."""
1260 # FIXME This currently only considers the main language and uses the system locale
1261 # Ideally, it should honor context languages and switch the locale accordingly.
1262 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1265 # The time formats for each language using strftime syntax:
1268 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1269 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1270 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1271 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1272 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1273 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1274 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1275 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1276 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1277 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1278 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1279 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1280 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1281 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1282 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1283 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1284 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1285 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1286 "british" : ["%H:%M:%S %Z", "%H:%M"],
1287 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1288 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1289 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1290 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1291 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1292 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1293 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1294 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1295 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1296 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1297 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1298 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1299 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1300 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1301 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1302 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1303 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1304 "french" : ["%H:%M:%S %Z", "%H:%M"],
1305 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1306 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1307 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1308 "german" : ["%H:%M:%S %Z", "%H:%M"],
1309 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1310 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1311 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1312 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1313 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1314 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1315 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1316 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1317 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1318 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1319 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1320 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1321 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1322 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1323 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1324 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1325 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1326 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1327 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1328 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1329 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1330 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1331 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1332 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1333 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1334 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1335 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1336 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1337 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1338 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1339 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1340 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1341 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1342 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1343 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1344 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1345 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1346 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1347 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1348 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1349 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1350 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1351 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1352 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1353 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1354 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1355 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1356 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1357 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1358 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1359 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1360 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1361 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1362 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1363 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1364 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1365 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1366 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1367 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1368 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1369 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1372 types = ["time", "fixtime", "modtime" ]
1373 i = find_token(document.header, "\\language", 0)
1375 # this should not happen
1376 document.warning("Malformed LyX document! No \\language header found!")
1378 lang = get_value(document.header, "\\language", i)
1382 i = find_token(document.body, "\\begin_inset Info", i+1)
1385 j = find_end_of_inset(document.body, i+1)
1387 document.warning("Malformed LyX document: Could not find end of Info inset.")
1389 tp = find_token(document.body, 'type', i, j)
1390 tpv = get_quoted_value(document.body, "type", tp)
1391 if tpv not in types:
1393 arg = find_token(document.body, 'arg', i, j)
1394 argv = get_quoted_value(document.body, "arg", arg)
1396 dtme = datetime.now()
1398 if tpv == "fixtime":
1399 timecomps = argv.split('@')
1400 if len(timecomps) > 1:
1402 isotime = timecomps[1]
1403 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1405 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1407 m = re.search('(\d\d):(\d\d)', isotime)
1409 tme = time(int(m.group(1)), int(m.group(2)))
1410 # FIXME if we had the path to the original document (not the one in the tmp dir),
1411 # we could use the mtime.
1412 # elif tpv == "moddate":
1413 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1416 result = tme.isoformat()
1417 elif argv == "long":
1418 result = tme.strftime(timeformats[lang][0])
1419 elif argv == "short":
1420 result = tme.strftime(timeformats[lang][1])
1422 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1423 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1424 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1425 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1426 fmt = fmt.replace("'", "")
1427 result = dte.strftime(fmt)
1428 document.body[i : j+1] = result
1431 def revert_namenoextinfo(document):
1432 """Merge buffer Info inset type name-noext to name."""
1436 i = find_token(document.body, "\\begin_inset Info", i+1)
1439 j = find_end_of_inset(document.body, i+1)
1441 document.warning("Malformed LyX document: Could not find end of Info inset.")
1443 tp = find_token(document.body, 'type', i, j)
1444 tpv = get_quoted_value(document.body, "type", tp)
1447 arg = find_token(document.body, 'arg', i, j)
1448 argv = get_quoted_value(document.body, "arg", arg)
1449 if argv != "name-noext":
1451 document.body[arg] = "arg \"name\""
1454 def revert_l7ninfo(document):
1455 """Revert l7n Info inset to text."""
1459 i = find_token(document.body, "\\begin_inset Info", i+1)
1462 j = find_end_of_inset(document.body, i+1)
1464 document.warning("Malformed LyX document: Could not find end of Info inset.")
1466 tp = find_token(document.body, 'type', i, j)
1467 tpv = get_quoted_value(document.body, "type", tp)
1470 arg = find_token(document.body, 'arg', i, j)
1471 argv = get_quoted_value(document.body, "arg", arg)
1472 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1473 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1474 document.body[i : j+1] = argv
1477 def revert_listpargs(document):
1478 """Reverts listpreamble arguments to TeX-code"""
1481 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1484 j = find_end_of_inset(document.body, i)
1485 # Find containing paragraph layout
1486 parent = get_containing_layout(document.body, i)
1488 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1491 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1492 endPlain = find_end_of_layout(document.body, beginPlain)
1493 content = document.body[beginPlain + 1 : endPlain]
1494 del document.body[i:j+1]
1495 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1496 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1497 document.body[parbeg : parbeg] = subst
1500 def revert_lformatinfo(document):
1501 """Revert layout format Info inset to text."""
1505 i = find_token(document.body, "\\begin_inset Info", i+1)
1508 j = find_end_of_inset(document.body, i+1)
1510 document.warning("Malformed LyX document: Could not find end of Info inset.")
1512 tp = find_token(document.body, 'type', i, j)
1513 tpv = get_quoted_value(document.body, "type", tp)
1514 if tpv != "lyxinfo":
1516 arg = find_token(document.body, 'arg', i, j)
1517 argv = get_quoted_value(document.body, "arg", arg)
1518 if argv != "layoutformat":
1521 document.body[i : j+1] = "69"
1524 def convert_hebrew_parentheses(document):
1525 """ Swap opening/closing parentheses in Hebrew text.
1527 Up to LyX 2.4, "(" was used as closing parenthesis and
1528 ")" as opening parenthesis for Hebrew in the LyX source.
1530 # print("convert hebrew parentheses")
1531 current_languages = [document.language]
1532 for i, line in enumerate(document.body):
1533 if line.startswith('\\lang '):
1534 current_languages[-1] = line.lstrip('\\lang ')
1535 elif line.startswith('\\begin_layout'):
1536 current_languages.append(current_languages[-1])
1537 # print (line, current_languages[-1])
1538 elif line.startswith('\\end_layout'):
1539 current_languages.pop()
1540 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1541 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1544 def revert_hebrew_parentheses(document):
1545 """Store parentheses in Hebrew text reversed"""
1546 # This only exists to keep the convert/revert naming convention
1547 convert_hebrew_parentheses(document)
1550 def revert_malayalam(document):
1551 """Set the document language to English but assure Malayalam output"""
1553 revert_language(document, "malayalam", "", "malayalam")
1556 def revert_soul(document):
1557 """Revert soul module flex insets to ERT"""
1559 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1562 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1564 add_to_preamble(document, ["\\usepackage{soul}"])
1566 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1568 add_to_preamble(document, ["\\usepackage{color}"])
1570 revert_flex_inset(document.body, "Spaceletters", "\\so")
1571 revert_flex_inset(document.body, "Strikethrough", "\\st")
1572 revert_flex_inset(document.body, "Underline", "\\ul")
1573 revert_flex_inset(document.body, "Highlight", "\\hl")
1574 revert_flex_inset(document.body, "Capitalize", "\\caps")
1577 def revert_tablestyle(document):
1578 """Remove tablestyle params"""
1580 i = find_token(document.header, "\\tablestyle")
1582 del document.header[i]
1585 def revert_bibfileencodings(document):
1586 """Revert individual Biblatex bibliography encodings"""
1590 i = find_token(document.header, "\\cite_engine", 0)
1592 document.warning("Malformed document! Missing \\cite_engine")
1594 engine = get_value(document.header, "\\cite_engine", i)
1598 if engine in ["biblatex", "biblatex-natbib"]:
1601 # Map lyx to latex encoding names
1605 "armscii8" : "armscii8",
1606 "iso8859-1" : "latin1",
1607 "iso8859-2" : "latin2",
1608 "iso8859-3" : "latin3",
1609 "iso8859-4" : "latin4",
1610 "iso8859-5" : "iso88595",
1611 "iso8859-6" : "8859-6",
1612 "iso8859-7" : "iso-8859-7",
1613 "iso8859-8" : "8859-8",
1614 "iso8859-9" : "latin5",
1615 "iso8859-13" : "latin7",
1616 "iso8859-15" : "latin9",
1617 "iso8859-16" : "latin10",
1618 "applemac" : "applemac",
1620 "cp437de" : "cp437de",
1628 "cp1250" : "cp1250",
1629 "cp1251" : "cp1251",
1630 "cp1252" : "cp1252",
1631 "cp1255" : "cp1255",
1632 "cp1256" : "cp1256",
1633 "cp1257" : "cp1257",
1634 "koi8-r" : "koi8-r",
1635 "koi8-u" : "koi8-u",
1637 "utf8-platex" : "utf8",
1644 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1647 j = find_end_of_inset(document.body, i)
1649 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1651 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1655 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1656 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1657 if len(bibfiles) == 0:
1658 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1659 # remove encoding line
1660 k = find_token(document.body, "file_encodings", i, j)
1662 del document.body[k]
1663 # Re-find inset end line
1664 j = find_end_of_inset(document.body, i)
1666 enclist = encodings.split("\t")
1669 ppp = pp.split(" ", 1)
1670 encmap[ppp[0]] = ppp[1]
1671 for bib in bibfiles:
1672 pr = "\\addbibresource"
1673 if bib in encmap.keys():
1674 pr += "[bibencoding=" + encmap[bib] + "]"
1675 pr += "{" + bib + "}"
1676 add_to_preamble(document, [pr])
1677 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1678 pcmd = "printbibliography"
1680 pcmd += "[" + opts + "]"
1681 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1682 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1683 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1684 "status open", "", "\\begin_layout Plain Layout" ]
1685 repl += document.body[i:j+1]
1686 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1687 document.body[i:j+1] = repl
1693 def revert_cmidruletrimming(document):
1694 """Remove \\cmidrule trimming"""
1696 # FIXME: Revert to TeX code?
1699 # first, let's find out if we need to do anything
1700 i = find_token(document.body, '<cell ', i+1)
1703 j = document.body[i].find('trim="')
1706 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1707 # remove trim option
1708 document.body[i] = rgx.sub('', document.body[i])
1712 r'### Inserted by lyx2lyx (ruby inset) ###',
1713 r'InsetLayout Flex:Ruby',
1714 r' LyxType charstyle',
1715 r' LatexType command',
1719 r' HTMLInnerTag rb',
1720 r' HTMLInnerAttr ""',
1722 r' LabelString "Ruby"',
1723 r' Decoration Conglomerate',
1725 r' \ifdefined\kanjiskip',
1726 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1727 r' \else \ifdefined\luatexversion',
1728 r' \usepackage{luatexja-ruby}',
1729 r' \else \ifdefined\XeTeXversion',
1730 r' \usepackage{ruby}%',
1732 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1734 r' Argument post:1',
1735 r' LabelString "ruby text"',
1736 r' MenuString "Ruby Text|R"',
1737 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1738 r' Decoration Conglomerate',
1751 def convert_ruby_module(document):
1752 """Use ruby module instead of local module definition"""
1753 if document.del_local_layout(ruby_inset_def):
1754 document.add_module("ruby")
1757 def revert_ruby_module(document):
1758 """Replace ruby module with local module definition"""
1759 if document.del_module("ruby"):
1760 document.append_local_layout(ruby_inset_def)
1763 def convert_utf8_japanese(document):
1764 """Use generic utf8 with Japanese documents."""
1765 lang = get_value(document.header, "\\language")
1766 if not lang.startswith("japanese"):
1768 inputenc = get_value(document.header, "\\inputencoding")
1769 if ((lang == "japanese" and inputenc == "utf8-platex")
1770 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1771 document.set_parameter("inputencoding", "utf8")
1774 def revert_utf8_japanese(document):
1775 """Use Japanese utf8 variants with Japanese documents."""
1776 inputenc = get_value(document.header, "\\inputencoding")
1777 if inputenc != "utf8":
1779 lang = get_value(document.header, "\\language")
1780 if lang == "japanese":
1781 document.set_parameter("inputencoding", "utf8-platex")
1782 if lang == "japanese-cjk":
1783 document.set_parameter("inputencoding", "utf8-cjk")
1786 def revert_lineno(document):
1787 " Replace lineno setting with user-preamble code."
1789 options = get_quoted_value(document.header, "\\lineno_options",
1791 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1794 options = "[" + options + "]"
1795 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1798 def convert_lineno(document):
1799 " Replace user-preamble code with native lineno support."
1802 i = find_token(document.preamble, "\\linenumbers", 1)
1804 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1807 options = usepkg.group(1).strip("[]")
1808 del(document.preamble[i-1:i+1])
1809 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1811 k = find_token(document.header, "\\index ")
1813 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1815 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1816 "\\lineno_options %s" % options]
1819 def convert_aaencoding(document):
1820 " Convert default document option due to encoding change in aa class. "
1822 if document.textclass != "aa":
1825 i = find_token(document.header, "\\use_default_options true")
1828 val = get_value(document.header, "\\inputencoding")
1830 document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
1832 if val == "auto-legacy" or val == "latin9":
1833 document.header[i] = "\\use_default_options false"
1834 k = find_token(document.header, "\\options")
1836 document.header.insert(i, "\\options latin9")
1838 document.header[k] += ",latin9"
1841 def revert_aaencoding(document):
1842 " Revert default document option due to encoding change in aa class. "
1844 if document.textclass != "aa":
1847 i = find_token(document.header, "\\use_default_options true")
1850 val = get_value(document.header, "\\inputencoding")
1852 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
1855 document.header[i] = "\\use_default_options false"
1856 k = find_token(document.header, "\\options", 0)
1858 document.header.insert(i, "\\options utf8")
1860 document.header[k] = document.header[k] + ",utf8"
1863 def revert_new_languages(document):
1864 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1865 and Russian (Petrine orthography)."""
1867 # lyxname: (babelname, polyglossianame)
1868 new_languages = {"azerbaijani": ("azerbaijani", ""),
1869 "bengali": ("", "bengali"),
1870 "churchslavonic": ("", "churchslavonic"),
1871 "oldrussian": ("", "russian"),
1872 "korean": ("", "korean"),
1874 if document.language in new_languages:
1875 used_languages = set((document.language, ))
1877 used_languages = set()
1880 i = find_token(document.body, "\\lang", i+1)
1883 val = get_value(document.body, "\\lang", i)
1884 if val in new_languages:
1885 used_languages.add(val)
1887 # Korean is already supported via CJK, so leave as-is for Babel
1888 if ("korean" in used_languages
1889 and (not get_bool_value(document.header, "\\use_non_tex_fonts")
1890 or get_value(document.header, "\\language_package") == "babel")):
1891 used_languages.discard("korean")
1893 for lang in used_languages:
1894 revert_language(document, lang, *new_languages[lang])
1898 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1899 r'InsetLayout Flex:Glosse',
1901 r' LabelString "Gloss (old version)"',
1902 r' MenuString "Gloss (old version)"',
1903 r' LatexType environment',
1904 r' LatexName linggloss',
1905 r' Decoration minimalistic',
1910 r' CustomPars false',
1911 r' ForcePlain true',
1912 r' ParbreakIsNewline true',
1913 r' FreeSpacing true',
1914 r' Requires covington',
1917 r' \@ifundefined{linggloss}{%',
1918 r' \newenvironment{linggloss}[2][]{',
1919 r' \def\glosstr{\glt #1}%',
1921 r' {\glosstr\glend}}{}',
1924 r' ResetsFont true',
1926 r' Decoration conglomerate',
1927 r' LabelString "Translation"',
1928 r' MenuString "Glosse Translation|s"',
1929 r' Tooltip "Add a translation for the glosse"',
1934 glosss_inset_def = [
1935 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1936 r'InsetLayout Flex:Tri-Glosse',
1938 r' LabelString "Tri-Gloss (old version)"',
1939 r' MenuString "Tri-Gloss (old version)"',
1940 r' LatexType environment',
1941 r' LatexName lingglosss',
1942 r' Decoration minimalistic',
1947 r' CustomPars false',
1948 r' ForcePlain true',
1949 r' ParbreakIsNewline true',
1950 r' FreeSpacing true',
1952 r' Requires covington',
1955 r' \@ifundefined{lingglosss}{%',
1956 r' \newenvironment{lingglosss}[2][]{',
1957 r' \def\glosstr{\glt #1}%',
1959 r' {\glosstr\glend}}{}',
1961 r' ResetsFont true',
1963 r' Decoration conglomerate',
1964 r' LabelString "Translation"',
1965 r' MenuString "Glosse Translation|s"',
1966 r' Tooltip "Add a translation for the glosse"',
1971 def convert_linggloss(document):
1972 " Move old ling glosses to local layout "
1973 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1974 document.append_local_layout(gloss_inset_def)
1975 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1976 document.append_local_layout(glosss_inset_def)
1978 def revert_linggloss(document):
1979 " Revert to old ling gloss definitions "
1980 if not "linguistics" in document.get_module_list():
1982 document.del_local_layout(gloss_inset_def)
1983 document.del_local_layout(glosss_inset_def)
1986 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1987 for glosse in glosses:
1990 i = find_token(document.body, glosse, i+1)
1993 j = find_end_of_inset(document.body, i)
1995 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1998 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1999 endarg = find_end_of_inset(document.body, arg)
2002 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2003 if argbeginPlain == -1:
2004 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2006 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2007 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2009 # remove Arg insets and paragraph, if it only contains this inset
2010 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2011 del document.body[arg - 1 : endarg + 4]
2013 del document.body[arg : endarg + 1]
2015 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2016 endarg = find_end_of_inset(document.body, arg)
2019 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2020 if argbeginPlain == -1:
2021 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2023 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2024 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2026 # remove Arg insets and paragraph, if it only contains this inset
2027 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2028 del document.body[arg - 1 : endarg + 4]
2030 del document.body[arg : endarg + 1]
2032 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2033 endarg = find_end_of_inset(document.body, arg)
2036 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2037 if argbeginPlain == -1:
2038 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2040 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2041 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2043 # remove Arg insets and paragraph, if it only contains this inset
2044 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2045 del document.body[arg - 1 : endarg + 4]
2047 del document.body[arg : endarg + 1]
2049 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2050 endarg = find_end_of_inset(document.body, arg)
2053 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2054 if argbeginPlain == -1:
2055 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2057 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2058 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2060 # remove Arg insets and paragraph, if it only contains this inset
2061 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2062 del document.body[arg - 1 : endarg + 4]
2064 del document.body[arg : endarg + 1]
2067 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2070 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2071 endInset = find_end_of_inset(document.body, i)
2072 endPlain = find_end_of_layout(document.body, beginPlain)
2073 precontent = put_cmd_in_ert(cmd)
2074 if len(optargcontent) > 0:
2075 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2076 precontent += put_cmd_in_ert("{")
2078 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2079 if cmd == "\\trigloss":
2080 postcontent += put_cmd_in_ert("}{") + marg3content
2081 postcontent += put_cmd_in_ert("}")
2083 document.body[endPlain:endInset + 1] = postcontent
2084 document.body[beginPlain + 1:beginPlain] = precontent
2085 del document.body[i : beginPlain + 1]
2087 document.append_local_layout("Requires covington")
2092 def revert_subexarg(document):
2093 " Revert linguistic subexamples with argument to ERT "
2095 if not "linguistics" in document.get_module_list():
2101 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2104 j = find_end_of_layout(document.body, i)
2106 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2109 # check for consecutive layouts
2110 k = find_token(document.body, "\\begin_layout", j)
2111 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2113 j = find_end_of_layout(document.body, k)
2115 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2118 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2122 endarg = find_end_of_inset(document.body, arg)
2124 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2125 if argbeginPlain == -1:
2126 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2128 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2129 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2131 # remove Arg insets and paragraph, if it only contains this inset
2132 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2133 del document.body[arg - 1 : endarg + 4]
2135 del document.body[arg : endarg + 1]
2137 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2139 # re-find end of layout
2140 j = find_end_of_layout(document.body, i)
2142 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2145 # check for consecutive layouts
2146 k = find_token(document.body, "\\begin_layout", j)
2147 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2149 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2150 j = find_end_of_layout(document.body, k)
2152 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2155 endev = put_cmd_in_ert("\\end{subexamples}")
2157 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2158 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2159 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2161 document.append_local_layout("Requires covington")
2165 def revert_drs(document):
2166 " Revert DRS insets (linguistics) to ERT "
2168 if not "linguistics" in document.get_module_list():
2172 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2173 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2174 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2175 "\\begin_inset Flex SDRS"]
2179 i = find_token(document.body, drs, i+1)
2182 j = find_end_of_inset(document.body, i)
2184 document.warning("Malformed LyX document: Can't find end of DRS inset")
2187 # Check for arguments
2188 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2189 endarg = find_end_of_inset(document.body, arg)
2192 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2193 if argbeginPlain == -1:
2194 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2196 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2197 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2199 # remove Arg insets and paragraph, if it only contains this inset
2200 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2201 del document.body[arg - 1 : endarg + 4]
2203 del document.body[arg : endarg + 1]
2206 j = find_end_of_inset(document.body, i)
2208 document.warning("Malformed LyX document: Can't find end of DRS inset")
2211 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2212 endarg = find_end_of_inset(document.body, arg)
2215 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2216 if argbeginPlain == -1:
2217 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2219 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2220 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2222 # remove Arg insets and paragraph, if it only contains this inset
2223 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2224 del document.body[arg - 1 : endarg + 4]
2226 del document.body[arg : endarg + 1]
2229 j = find_end_of_inset(document.body, i)
2231 document.warning("Malformed LyX document: Can't find end of DRS inset")
2234 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2235 endarg = find_end_of_inset(document.body, arg)
2236 postarg1content = []
2238 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2239 if argbeginPlain == -1:
2240 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2242 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2243 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2245 # remove Arg insets and paragraph, if it only contains this inset
2246 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2247 del document.body[arg - 1 : endarg + 4]
2249 del document.body[arg : endarg + 1]
2252 j = find_end_of_inset(document.body, i)
2254 document.warning("Malformed LyX document: Can't find end of DRS inset")
2257 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2258 endarg = find_end_of_inset(document.body, arg)
2259 postarg2content = []
2261 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2262 if argbeginPlain == -1:
2263 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2265 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2266 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2268 # remove Arg insets and paragraph, if it only contains this inset
2269 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2270 del document.body[arg - 1 : endarg + 4]
2272 del document.body[arg : endarg + 1]
2275 j = find_end_of_inset(document.body, i)
2277 document.warning("Malformed LyX document: Can't find end of DRS inset")
2280 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2281 endarg = find_end_of_inset(document.body, arg)
2282 postarg3content = []
2284 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2285 if argbeginPlain == -1:
2286 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2288 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2289 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2291 # remove Arg insets and paragraph, if it only contains this inset
2292 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2293 del document.body[arg - 1 : endarg + 4]
2295 del document.body[arg : endarg + 1]
2298 j = find_end_of_inset(document.body, i)
2300 document.warning("Malformed LyX document: Can't find end of DRS inset")
2303 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2304 endarg = find_end_of_inset(document.body, arg)
2305 postarg4content = []
2307 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2308 if argbeginPlain == -1:
2309 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2311 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2312 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2314 # remove Arg insets and paragraph, if it only contains this inset
2315 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2316 del document.body[arg - 1 : endarg + 4]
2318 del document.body[arg : endarg + 1]
2320 # The respective LaTeX command
2322 if drs == "\\begin_inset Flex DRS*":
2324 elif drs == "\\begin_inset Flex IfThen-DRS":
2326 elif drs == "\\begin_inset Flex Cond-DRS":
2328 elif drs == "\\begin_inset Flex QDRS":
2330 elif drs == "\\begin_inset Flex NegDRS":
2332 elif drs == "\\begin_inset Flex SDRS":
2335 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2336 endInset = find_end_of_inset(document.body, i)
2337 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2338 precontent = put_cmd_in_ert(cmd)
2339 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2340 if drs == "\\begin_inset Flex SDRS":
2341 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2342 precontent += put_cmd_in_ert("{")
2345 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2346 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2347 if cmd == "\\condrs" or cmd == "\\qdrs":
2348 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2350 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2352 postcontent = put_cmd_in_ert("}")
2354 document.body[endPlain:endInset + 1] = postcontent
2355 document.body[beginPlain + 1:beginPlain] = precontent
2356 del document.body[i : beginPlain + 1]
2358 document.append_local_layout("Provides covington 1")
2359 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2365 def revert_babelfont(document):
2366 " Reverts the use of \\babelfont to user preamble "
2368 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2371 i = find_token(document.header, '\\language_package', 0)
2373 document.warning("Malformed LyX document: Missing \\language_package.")
2375 if get_value(document.header, "\\language_package", 0) != "babel":
2378 # check font settings
2380 roman = sans = typew = "default"
2382 sf_scale = tt_scale = 100.0
2384 j = find_token(document.header, "\\font_roman", 0)
2386 document.warning("Malformed LyX document: Missing \\font_roman.")
2388 # We need to use this regex since split() does not handle quote protection
2389 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2390 roman = romanfont[2].strip('"')
2391 romanfont[2] = '"default"'
2392 document.header[j] = " ".join(romanfont)
2394 j = find_token(document.header, "\\font_sans", 0)
2396 document.warning("Malformed LyX document: Missing \\font_sans.")
2398 # We need to use this regex since split() does not handle quote protection
2399 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2400 sans = sansfont[2].strip('"')
2401 sansfont[2] = '"default"'
2402 document.header[j] = " ".join(sansfont)
2404 j = find_token(document.header, "\\font_typewriter", 0)
2406 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2408 # We need to use this regex since split() does not handle quote protection
2409 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2410 typew = ttfont[2].strip('"')
2411 ttfont[2] = '"default"'
2412 document.header[j] = " ".join(ttfont)
2414 i = find_token(document.header, "\\font_osf", 0)
2416 document.warning("Malformed LyX document: Missing \\font_osf.")
2418 osf = str2bool(get_value(document.header, "\\font_osf", i))
2420 j = find_token(document.header, "\\font_sf_scale", 0)
2422 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2424 sfscale = document.header[j].split()
2427 document.header[j] = " ".join(sfscale)
2430 sf_scale = float(val)
2432 document.warning("Invalid font_sf_scale value: " + val)
2434 j = find_token(document.header, "\\font_tt_scale", 0)
2436 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2438 ttscale = document.header[j].split()
2441 document.header[j] = " ".join(ttscale)
2444 tt_scale = float(val)
2446 document.warning("Invalid font_tt_scale value: " + val)
2448 # set preamble stuff
2449 pretext = ['%% This document must be processed with xelatex or lualatex!']
2450 pretext.append('\\AtBeginDocument{%')
2451 if roman != "default":
2452 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2453 if sans != "default":
2454 sf = '\\babelfont{sf}['
2455 if sf_scale != 100.0:
2456 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2457 sf += 'Mapping=tex-text]{' + sans + '}'
2459 if typew != "default":
2460 tw = '\\babelfont{tt}'
2461 if tt_scale != 100.0:
2462 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2463 tw += '{' + typew + '}'
2466 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2468 insert_to_preamble(document, pretext)
2471 def revert_minionpro(document):
2472 " Revert native MinionPro font definition (with extra options) to LaTeX "
2474 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2477 regexp = re.compile(r'(\\font_roman_opts)')
2478 x = find_re(document.header, regexp, 0)
2482 # We need to use this regex since split() does not handle quote protection
2483 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2484 opts = romanopts[1].strip('"')
2486 i = find_token(document.header, "\\font_roman", 0)
2488 document.warning("Malformed LyX document: Missing \\font_roman.")
2491 # We need to use this regex since split() does not handle quote protection
2492 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2493 roman = romanfont[1].strip('"')
2494 if roman != "minionpro":
2496 romanfont[1] = '"default"'
2497 document.header[i] = " ".join(romanfont)
2499 j = find_token(document.header, "\\font_osf true", 0)
2502 preamble = "\\usepackage["
2504 document.header[j] = "\\font_osf false"
2508 preamble += "]{MinionPro}"
2509 add_to_preamble(document, [preamble])
2510 del document.header[x]
2513 def revert_font_opts(document):
2514 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2516 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2517 Babel = (get_value(document.header, "\\language_package") == "babel")
2520 regexp = re.compile(r'(\\font_roman_opts)')
2521 i = find_re(document.header, regexp, 0)
2523 # We need to use this regex since split() does not handle quote protection
2524 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2525 opts = romanopts[1].strip('"')
2526 del document.header[i]
2528 regexp = re.compile(r'(\\font_roman)')
2529 i = find_re(document.header, regexp, 0)
2531 # We need to use this regex since split() does not handle quote protection
2532 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2533 font = romanfont[2].strip('"')
2534 romanfont[2] = '"default"'
2535 document.header[i] = " ".join(romanfont)
2536 if font != "default":
2538 preamble = "\\babelfont{rm}["
2540 preamble = "\\setmainfont["
2543 preamble += "Mapping=tex-text]{"
2546 add_to_preamble(document, [preamble])
2549 regexp = re.compile(r'(\\font_sans_opts)')
2550 i = find_re(document.header, regexp, 0)
2553 # We need to use this regex since split() does not handle quote protection
2554 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2555 opts = sfopts[1].strip('"')
2556 del document.header[i]
2558 regexp = re.compile(r'(\\font_sf_scale)')
2559 i = find_re(document.header, regexp, 0)
2561 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2562 regexp = re.compile(r'(\\font_sans)')
2563 i = find_re(document.header, regexp, 0)
2565 # We need to use this regex since split() does not handle quote protection
2566 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2567 font = sffont[2].strip('"')
2568 sffont[2] = '"default"'
2569 document.header[i] = " ".join(sffont)
2570 if font != "default":
2572 preamble = "\\babelfont{sf}["
2574 preamble = "\\setsansfont["
2578 preamble += "Scale=0."
2579 preamble += scaleval
2581 preamble += "Mapping=tex-text]{"
2584 add_to_preamble(document, [preamble])
2587 regexp = re.compile(r'(\\font_typewriter_opts)')
2588 i = find_re(document.header, regexp, 0)
2591 # We need to use this regex since split() does not handle quote protection
2592 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2593 opts = ttopts[1].strip('"')
2594 del document.header[i]
2596 regexp = re.compile(r'(\\font_tt_scale)')
2597 i = find_re(document.header, regexp, 0)
2599 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2600 regexp = re.compile(r'(\\font_typewriter)')
2601 i = find_re(document.header, regexp, 0)
2603 # We need to use this regex since split() does not handle quote protection
2604 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2605 font = ttfont[2].strip('"')
2606 ttfont[2] = '"default"'
2607 document.header[i] = " ".join(ttfont)
2608 if font != "default":
2610 preamble = "\\babelfont{tt}["
2612 preamble = "\\setmonofont["
2616 preamble += "Scale=0."
2617 preamble += scaleval
2619 preamble += "Mapping=tex-text]{"
2622 add_to_preamble(document, [preamble])
2625 def revert_plainNotoFonts_xopts(document):
2626 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2628 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2632 y = find_token(document.header, "\\font_osf true", 0)
2636 regexp = re.compile(r'(\\font_roman_opts)')
2637 x = find_re(document.header, regexp, 0)
2638 if x == -1 and not osf:
2643 # We need to use this regex since split() does not handle quote protection
2644 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2645 opts = romanopts[1].strip('"')
2651 i = find_token(document.header, "\\font_roman", 0)
2655 # We need to use this regex since split() does not handle quote protection
2656 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2657 roman = romanfont[1].strip('"')
2658 if roman != "NotoSerif-TLF":
2661 j = find_token(document.header, "\\font_sans", 0)
2665 # We need to use this regex since split() does not handle quote protection
2666 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2667 sf = sffont[1].strip('"')
2671 j = find_token(document.header, "\\font_typewriter", 0)
2675 # We need to use this regex since split() does not handle quote protection
2676 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2677 tt = ttfont[1].strip('"')
2681 # So we have noto as "complete font"
2682 romanfont[1] = '"default"'
2683 document.header[i] = " ".join(romanfont)
2685 preamble = "\\usepackage["
2687 preamble += "]{noto}"
2688 add_to_preamble(document, [preamble])
2690 document.header[y] = "\\font_osf false"
2692 del document.header[x]
2695 def revert_notoFonts_xopts(document):
2696 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2698 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2702 fm = createFontMapping(['Noto'])
2703 if revert_fonts(document, fm, fontmap, True):
2704 add_preamble_fonts(document, fontmap)
2707 def revert_IBMFonts_xopts(document):
2708 " Revert native IBM font definition (with extra options) to LaTeX "
2710 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2714 fm = createFontMapping(['IBM'])
2716 if revert_fonts(document, fm, fontmap, True):
2717 add_preamble_fonts(document, fontmap)
2720 def revert_AdobeFonts_xopts(document):
2721 " Revert native Adobe font definition (with extra options) to LaTeX "
2723 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2727 fm = createFontMapping(['Adobe'])
2729 if revert_fonts(document, fm, fontmap, True):
2730 add_preamble_fonts(document, fontmap)
2733 def convert_osf(document):
2734 " Convert \\font_osf param to new format "
2736 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2738 i = find_token(document.header, '\\font_osf', 0)
2740 document.warning("Malformed LyX document: Missing \\font_osf.")
2743 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2744 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2746 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2747 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2750 document.header.insert(i, "\\font_sans_osf false")
2751 document.header.insert(i + 1, "\\font_typewriter_osf false")
2755 x = find_token(document.header, "\\font_sans", 0)
2757 document.warning("Malformed LyX document: Missing \\font_sans.")
2759 # We need to use this regex since split() does not handle quote protection
2760 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2761 sf = sffont[1].strip('"')
2763 document.header.insert(i, "\\font_sans_osf true")
2765 document.header.insert(i, "\\font_sans_osf false")
2767 x = find_token(document.header, "\\font_typewriter", 0)
2769 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2771 # We need to use this regex since split() does not handle quote protection
2772 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2773 tt = ttfont[1].strip('"')
2775 document.header.insert(i + 1, "\\font_typewriter_osf true")
2777 document.header.insert(i + 1, "\\font_typewriter_osf false")
2780 document.header.insert(i, "\\font_sans_osf false")
2781 document.header.insert(i + 1, "\\font_typewriter_osf false")
2784 def revert_osf(document):
2785 " Revert \\font_*_osf params "
2787 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2789 i = find_token(document.header, '\\font_roman_osf', 0)
2791 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2794 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2795 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2797 i = find_token(document.header, '\\font_sans_osf', 0)
2799 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2802 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2803 del document.header[i]
2805 i = find_token(document.header, '\\font_typewriter_osf', 0)
2807 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2810 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2811 del document.header[i]
2814 i = find_token(document.header, '\\font_osf', 0)
2816 document.warning("Malformed LyX document: Missing \\font_osf.")
2818 document.header[i] = "\\font_osf true"
2821 def revert_texfontopts(document):
2822 " Revert native TeX font definitions (with extra options) to LaTeX "
2824 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2827 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2829 # First the sf (biolinum only)
2830 regexp = re.compile(r'(\\font_sans_opts)')
2831 x = find_re(document.header, regexp, 0)
2833 # We need to use this regex since split() does not handle quote protection
2834 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2835 opts = sfopts[1].strip('"')
2836 i = find_token(document.header, "\\font_sans", 0)
2838 document.warning("Malformed LyX document: Missing \\font_sans.")
2840 # We need to use this regex since split() does not handle quote protection
2841 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2842 sans = sffont[1].strip('"')
2843 if sans == "biolinum":
2845 sffont[1] = '"default"'
2846 document.header[i] = " ".join(sffont)
2848 j = find_token(document.header, "\\font_sans_osf true", 0)
2851 k = find_token(document.header, "\\font_sf_scale", 0)
2853 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2855 sfscale = document.header[k].split()
2858 document.header[k] = " ".join(sfscale)
2861 sf_scale = float(val)
2863 document.warning("Invalid font_sf_scale value: " + val)
2864 preamble = "\\usepackage["
2866 document.header[j] = "\\font_sans_osf false"
2868 if sf_scale != 100.0:
2869 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2871 preamble += "]{biolinum}"
2872 add_to_preamble(document, [preamble])
2873 del document.header[x]
2875 regexp = re.compile(r'(\\font_roman_opts)')
2876 x = find_re(document.header, regexp, 0)
2880 # We need to use this regex since split() does not handle quote protection
2881 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2882 opts = romanopts[1].strip('"')
2884 i = find_token(document.header, "\\font_roman", 0)
2886 document.warning("Malformed LyX document: Missing \\font_roman.")
2889 # We need to use this regex since split() does not handle quote protection
2890 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2891 roman = romanfont[1].strip('"')
2892 if not roman in rmfonts:
2894 romanfont[1] = '"default"'
2895 document.header[i] = " ".join(romanfont)
2897 if roman == "utopia":
2899 elif roman == "palatino":
2900 package = "mathpazo"
2901 elif roman == "times":
2902 package = "mathptmx"
2903 elif roman == "xcharter":
2904 package = "XCharter"
2906 j = find_token(document.header, "\\font_roman_osf true", 0)
2908 if roman == "cochineal":
2909 osf = "proportional,osf,"
2910 elif roman == "utopia":
2912 elif roman == "garamondx":
2914 elif roman == "libertine":
2916 elif roman == "palatino":
2918 elif roman == "xcharter":
2920 document.header[j] = "\\font_roman_osf false"
2921 k = find_token(document.header, "\\font_sc true", 0)
2923 if roman == "utopia":
2925 if roman == "palatino" and osf == "":
2927 document.header[k] = "\\font_sc false"
2928 preamble = "\\usepackage["
2931 preamble += "]{" + package + "}"
2932 add_to_preamble(document, [preamble])
2933 del document.header[x]
2936 def convert_CantarellFont(document):
2937 " Handle Cantarell font definition to LaTeX "
2939 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2940 fm = createFontMapping(['Cantarell'])
2941 convert_fonts(document, fm, "oldstyle")
2943 def revert_CantarellFont(document):
2944 " Revert native Cantarell font definition to LaTeX "
2946 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2948 fm = createFontMapping(['Cantarell'])
2949 if revert_fonts(document, fm, fontmap, False, True):
2950 add_preamble_fonts(document, fontmap)
2952 def convert_ChivoFont(document):
2953 " Handle Chivo font definition to LaTeX "
2955 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2956 fm = createFontMapping(['Chivo'])
2957 convert_fonts(document, fm, "oldstyle")
2959 def revert_ChivoFont(document):
2960 " Revert native Chivo font definition to LaTeX "
2962 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2964 fm = createFontMapping(['Chivo'])
2965 if revert_fonts(document, fm, fontmap, False, True):
2966 add_preamble_fonts(document, fontmap)
2969 def convert_FiraFont(document):
2970 " Handle Fira font definition to LaTeX "
2972 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2973 fm = createFontMapping(['Fira'])
2974 convert_fonts(document, fm, "lf")
2976 def revert_FiraFont(document):
2977 " Revert native Fira font definition to LaTeX "
2979 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2981 fm = createFontMapping(['Fira'])
2982 if revert_fonts(document, fm, fontmap, False, True):
2983 add_preamble_fonts(document, fontmap)
2986 def convert_Semibolds(document):
2987 " Move semibold options to extraopts "
2989 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2991 i = find_token(document.header, "\\font_roman", 0)
2993 document.warning("Malformed LyX document: Missing \\font_roman.")
2995 # We need to use this regex since split() does not handle quote protection
2996 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2997 roman = romanfont[1].strip('"')
2998 if roman == "IBMPlexSerifSemibold":
2999 romanfont[1] = '"IBMPlexSerif"'
3000 document.header[i] = " ".join(romanfont)
3002 if NonTeXFonts == False:
3003 regexp = re.compile(r'(\\font_roman_opts)')
3004 x = find_re(document.header, regexp, 0)
3006 # Sensible place to insert tag
3007 fo = find_token(document.header, "\\font_sf_scale")
3009 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3011 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3013 # We need to use this regex since split() does not handle quote protection
3014 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3015 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3017 i = find_token(document.header, "\\font_sans", 0)
3019 document.warning("Malformed LyX document: Missing \\font_sans.")
3021 # We need to use this regex since split() does not handle quote protection
3022 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3023 sf = sffont[1].strip('"')
3024 if sf == "IBMPlexSansSemibold":
3025 sffont[1] = '"IBMPlexSans"'
3026 document.header[i] = " ".join(sffont)
3028 if NonTeXFonts == False:
3029 regexp = re.compile(r'(\\font_sans_opts)')
3030 x = find_re(document.header, regexp, 0)
3032 # Sensible place to insert tag
3033 fo = find_token(document.header, "\\font_sf_scale")
3035 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3037 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3039 # We need to use this regex since split() does not handle quote protection
3040 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3041 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3043 i = find_token(document.header, "\\font_typewriter", 0)
3045 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3047 # We need to use this regex since split() does not handle quote protection
3048 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3049 tt = ttfont[1].strip('"')
3050 if tt == "IBMPlexMonoSemibold":
3051 ttfont[1] = '"IBMPlexMono"'
3052 document.header[i] = " ".join(ttfont)
3054 if NonTeXFonts == False:
3055 regexp = re.compile(r'(\\font_typewriter_opts)')
3056 x = find_re(document.header, regexp, 0)
3058 # Sensible place to insert tag
3059 fo = find_token(document.header, "\\font_tt_scale")
3061 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3063 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3065 # We need to use this regex since split() does not handle quote protection
3066 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3067 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3070 def convert_NotoRegulars(document):
3071 " Merge diverse noto reagular fonts "
3073 i = find_token(document.header, "\\font_roman", 0)
3075 document.warning("Malformed LyX document: Missing \\font_roman.")
3077 # We need to use this regex since split() does not handle quote protection
3078 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3079 roman = romanfont[1].strip('"')
3080 if roman == "NotoSerif-TLF":
3081 romanfont[1] = '"NotoSerifRegular"'
3082 document.header[i] = " ".join(romanfont)
3084 i = find_token(document.header, "\\font_sans", 0)
3086 document.warning("Malformed LyX document: Missing \\font_sans.")
3088 # We need to use this regex since split() does not handle quote protection
3089 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3090 sf = sffont[1].strip('"')
3091 if sf == "NotoSans-TLF":
3092 sffont[1] = '"NotoSansRegular"'
3093 document.header[i] = " ".join(sffont)
3095 i = find_token(document.header, "\\font_typewriter", 0)
3097 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3099 # We need to use this regex since split() does not handle quote protection
3100 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3101 tt = ttfont[1].strip('"')
3102 if tt == "NotoMono-TLF":
3103 ttfont[1] = '"NotoMonoRegular"'
3104 document.header[i] = " ".join(ttfont)
3107 def convert_CrimsonProFont(document):
3108 " Handle CrimsonPro font definition to LaTeX "
3110 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3111 fm = createFontMapping(['CrimsonPro'])
3112 convert_fonts(document, fm, "lf")
3114 def revert_CrimsonProFont(document):
3115 " Revert native CrimsonPro font definition to LaTeX "
3117 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3119 fm = createFontMapping(['CrimsonPro'])
3120 if revert_fonts(document, fm, fontmap, False, True):
3121 add_preamble_fonts(document, fontmap)
3124 def revert_pagesizes(document):
3125 " Revert new page sizes in memoir and KOMA to options "
3127 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3130 i = find_token(document.header, "\\use_geometry true", 0)
3134 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3136 i = find_token(document.header, "\\papersize", 0)
3138 document.warning("Malformed LyX document! Missing \\papersize header.")
3140 val = get_value(document.header, "\\papersize", i)
3145 document.header[i] = "\\papersize default"
3147 i = find_token(document.header, "\\options", 0)
3149 i = find_token(document.header, "\\textclass", 0)
3151 document.warning("Malformed LyX document! Missing \\textclass header.")
3153 document.header.insert(i, "\\options " + val)
3155 document.header[i] = document.header[i] + "," + val
3158 def convert_pagesizes(document):
3159 " Convert to new page sizes in memoir and KOMA to options "
3161 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3164 i = find_token(document.header, "\\use_geometry true", 0)
3168 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3170 i = find_token(document.header, "\\papersize", 0)
3172 document.warning("Malformed LyX document! Missing \\papersize header.")
3174 val = get_value(document.header, "\\papersize", i)
3179 i = find_token(document.header, "\\use_geometry false", 0)
3181 # Maintain use of geometry
3182 document.header[1] = "\\use_geometry true"
3184 def revert_komafontsizes(document):
3185 " Revert new font sizes in KOMA to options "
3187 if document.textclass[:3] != "scr":
3190 i = find_token(document.header, "\\paperfontsize", 0)
3192 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3195 defsizes = ["default", "10", "11", "12"]
3197 val = get_value(document.header, "\\paperfontsize", i)
3202 document.header[i] = "\\paperfontsize default"
3204 fsize = "fontsize=" + val
3206 i = find_token(document.header, "\\options", 0)
3208 i = find_token(document.header, "\\textclass", 0)
3210 document.warning("Malformed LyX document! Missing \\textclass header.")
3212 document.header.insert(i, "\\options " + fsize)
3214 document.header[i] = document.header[i] + "," + fsize
3217 def revert_dupqualicites(document):
3218 " Revert qualified citation list commands with duplicate keys to ERT "
3220 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3221 # we need to revert those with multiple uses of the same key.
3225 i = find_token(document.header, "\\cite_engine", 0)
3227 document.warning("Malformed document! Missing \\cite_engine")
3229 engine = get_value(document.header, "\\cite_engine", i)
3231 if not engine in ["biblatex", "biblatex-natbib"]:
3234 # Citation insets that support qualified lists, with their LaTeX code
3238 "citet" : "textcites",
3239 "Citet" : "Textcites",
3240 "citep" : "parencites",
3241 "Citep" : "Parencites",
3242 "Footcite" : "Smartcites",
3243 "footcite" : "smartcites",
3244 "Autocite" : "Autocites",
3245 "autocite" : "autocites",
3250 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3253 j = find_end_of_inset(document.body, i)
3255 document.warning("Can't find end of citation inset at line %d!!" %(i))
3259 k = find_token(document.body, "LatexCommand", i, j)
3261 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3265 cmd = get_value(document.body, "LatexCommand", k)
3266 if not cmd in list(ql_citations.keys()):
3270 pres = find_token(document.body, "pretextlist", i, j)
3271 posts = find_token(document.body, "posttextlist", i, j)
3272 if pres == -1 and posts == -1:
3277 key = get_quoted_value(document.body, "key", i, j)
3279 document.warning("Citation inset at line %d does not have a key!" %(i))
3283 keys = key.split(",")
3284 ukeys = list(set(keys))
3285 if len(keys) == len(ukeys):
3290 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3291 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3293 pre = get_quoted_value(document.body, "before", i, j)
3294 post = get_quoted_value(document.body, "after", i, j)
3295 prelist = pretexts.split("\t")
3298 ppp = pp.split(" ", 1)
3304 if ppp[0] in premap:
3305 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3307 premap[ppp[0]] = val
3308 postlist = posttexts.split("\t")
3312 ppp = pp.split(" ", 1)
3318 if ppp[0] in postmap:
3319 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3321 postmap[ppp[0]] = val
3322 # Replace known new commands with ERT
3323 if "(" in pre or ")" in pre:
3324 pre = "{" + pre + "}"
3325 if "(" in post or ")" in post:
3326 post = "{" + post + "}"
3327 res = "\\" + ql_citations[cmd]
3329 res += "(" + pre + ")"
3331 res += "(" + post + ")"
3335 if premap.get(kk, "") != "":
3336 akeys = premap[kk].split("\t", 1)
3339 res += "[" + akey + "]"
3341 premap[kk] = "\t".join(akeys[1:])
3344 if postmap.get(kk, "") != "":
3345 akeys = postmap[kk].split("\t", 1)
3348 res += "[" + akey + "]"
3350 postmap[kk] = "\t".join(akeys[1:])
3353 elif premap.get(kk, "") != "":
3355 res += "{" + kk + "}"
3356 document.body[i:j+1] = put_cmd_in_ert([res])
3359 def convert_pagesizenames(document):
3360 " Convert LyX page sizes names "
3362 i = find_token(document.header, "\\papersize", 0)
3364 document.warning("Malformed LyX document! Missing \\papersize header.")
3366 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3367 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3368 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3369 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3370 val = get_value(document.header, "\\papersize", i)
3372 newval = val.replace("paper", "")
3373 document.header[i] = "\\papersize " + newval
3375 def revert_pagesizenames(document):
3376 " Convert LyX page sizes names "
3378 i = find_token(document.header, "\\papersize", 0)
3380 document.warning("Malformed LyX document! Missing \\papersize header.")
3382 newnames = ["letter", "legal", "executive", \
3383 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3384 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3385 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3386 val = get_value(document.header, "\\papersize", i)
3388 newval = val + "paper"
3389 document.header[i] = "\\papersize " + newval
3392 def revert_theendnotes(document):
3393 " Reverts native support of \\theendnotes to TeX-code "
3395 if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
3400 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3403 j = find_end_of_inset(document.body, i)
3405 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3408 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
3411 def revert_enotez(document):
3412 " Reverts native support of enotez package to TeX-code "
3414 if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
3418 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
3421 revert_flex_inset(document.body, "Endnote", "\\endnote")
3425 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3428 j = find_end_of_inset(document.body, i)
3430 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3434 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
3437 add_to_preamble(document, ["\\usepackage{enotez}"])
3438 document.del_module("enotez")
3439 document.del_module("foottoenotez")
3442 def revert_memoir_endnotes(document):
3443 " Reverts native support of memoir endnotes to TeX-code "
3445 if document.textclass != "memoir":
3448 encommand = "\\pagenote"
3449 modules = document.get_module_list()
3450 if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
3451 encommand = "\\endnote"
3453 revert_flex_inset(document.body, "Endnote", encommand)
3457 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
3460 j = find_end_of_inset(document.body, i)
3462 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3465 if document.body[i] == "\\begin_inset FloatList pagenote*":
3466 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
3468 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
3469 add_to_preamble(document, ["\\makepagenote"])
3472 def revert_totalheight(document):
3473 " Reverts graphics height parameter from totalheight to height "
3477 i = find_token(document.body, "\\begin_inset Graphics", i)
3480 j = find_end_of_inset(document.body, i)
3482 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3486 rx = re.compile(r'\s*special\s*(\S+)$')
3487 k = find_re(document.body, rx, i, j)
3491 m = rx.match(document.body[k])
3493 special = m.group(1)
3494 mspecial = special.split(',')
3495 for spc in mspecial:
3496 if spc[:7] == "height=":
3497 oldheight = spc.split('=')[1]
3498 mspecial.remove(spc)
3500 if len(mspecial) > 0:
3501 special = ",".join(mspecial)
3505 rx = re.compile(r'(\s*height\s*)(\S+)$')
3506 kk = find_re(document.body, rx, i, j)
3508 m = rx.match(document.body[kk])
3514 val = val + "," + special
3515 document.body[k] = "\tspecial " + "totalheight=" + val
3517 document.body.insert(kk, "\tspecial totalheight=" + val)
3519 document.body[kk] = m.group(1) + oldheight
3521 del document.body[kk]
3522 elif oldheight != "":
3524 document.body[k] = "\tspecial " + special
3525 document.body.insert(k, "\theight " + oldheight)
3527 document.body[k] = "\theight " + oldheight
3531 def convert_totalheight(document):
3532 " Converts graphics height parameter from totalheight to height "
3536 i = find_token(document.body, "\\begin_inset Graphics", i)
3539 j = find_end_of_inset(document.body, i)
3541 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3545 rx = re.compile(r'\s*special\s*(\S+)$')
3546 k = find_re(document.body, rx, i, j)
3550 m = rx.match(document.body[k])
3552 special = m.group(1)
3553 mspecial = special.split(',')
3554 for spc in mspecial:
3555 if spc[:12] == "totalheight=":
3556 newheight = spc.split('=')[1]
3557 mspecial.remove(spc)
3559 if len(mspecial) > 0:
3560 special = ",".join(mspecial)
3564 rx = re.compile(r'(\s*height\s*)(\S+)$')
3565 kk = find_re(document.body, rx, i, j)
3567 m = rx.match(document.body[kk])
3573 val = val + "," + special
3574 document.body[k] = "\tspecial " + "height=" + val
3576 document.body.insert(kk + 1, "\tspecial height=" + val)
3578 document.body[kk] = m.group(1) + newheight
3580 del document.body[kk]
3581 elif newheight != "":
3582 document.body.insert(k, "\theight " + newheight)
3586 def convert_changebars(document):
3587 " Converts the changebars module to native solution "
3589 if not "changebars" in document.get_module_list():
3592 i = find_token(document.header, "\\output_changes", 0)
3594 document.warning("Malformed LyX document! Missing \\output_changes header.")
3595 document.del_module("changebars")
3598 document.header.insert(i, "\\change_bars true")
3599 document.del_module("changebars")
3602 def revert_changebars(document):
3603 " Converts native changebar param to module "
3605 i = find_token(document.header, "\\change_bars", 0)
3607 document.warning("Malformed LyX document! Missing \\change_bars header.")
3610 val = get_value(document.header, "\\change_bars", i)
3613 document.add_module("changebars")
3615 del document.header[i]
3618 def convert_postpone_fragile(document):
3619 " Adds false \\postpone_fragile_content buffer param "
3621 i = find_token(document.header, "\\output_changes", 0)
3623 document.warning("Malformed LyX document! Missing \\output_changes header.")
3625 # Set this to false for old documents (see #2154)
3626 document.header.insert(i, "\\postpone_fragile_content false")
3629 def revert_postpone_fragile(document):
3630 " Remove \\postpone_fragile_content buffer param "
3632 i = find_token(document.header, "\\postpone_fragile_content", 0)
3634 document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
3637 del document.header[i]
3640 def revert_colrow_tracking(document):
3641 " Remove change tag from tabular columns/rows "
3644 i = find_token(document.body, "\\begin_inset Tabular", i+1)
3647 j = find_end_of_inset(document.body, i+1)
3649 document.warning("Malformed LyX document: Could not find end of tabular.")
3651 for k in range(i, j):
3652 m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
3654 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3655 m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
3657 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3660 def convert_counter_maintenance(document):
3661 " Convert \\maintain_unincluded_children buffer param from boolean value tro tristate "
3663 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3665 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3668 val = get_value(document.header, "\\maintain_unincluded_children", i)
3671 document.header[i] = "\\maintain_unincluded_children strict"
3673 document.header[i] = "\\maintain_unincluded_children no"
3676 def revert_counter_maintenance(document):
3677 " Revert \\maintain_unincluded_children buffer param to previous boolean value "
3679 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3681 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3684 val = get_value(document.header, "\\maintain_unincluded_children", i)
3687 document.header[i] = "\\maintain_unincluded_children false"
3689 document.header[i] = "\\maintain_unincluded_children true"
3692 def revert_counter_inset(document):
3693 " Revert counter inset to ERT, where possible"
3695 needed_counters = {}
3697 i = find_token(document.body, "\\begin_inset CommandInset counter", i)
3700 j = find_end_of_inset(document.body, i)
3702 document.warning("Can't find end of counter inset at line %d!" % i)
3705 lyx = get_quoted_value(document.body, "lyxonly", i, j)
3707 # there is nothing we can do to affect the LyX counters
3708 document.body[i : j + 1] = []
3711 cnt = get_quoted_value(document.body, "counter", i, j)
3713 document.warning("No counter given for inset at line %d!" % i)
3717 cmd = get_quoted_value(document.body, "LatexCommand", i, j)
3718 document.warning(cmd)
3721 val = get_quoted_value(document.body, "value", i, j)
3723 document.warning("Can't convert counter inset at line %d!" % i)
3725 ert = put_cmd_in_ert("\\setcounter{%s}{%s}" % (cnt, val))
3726 elif cmd == "addto":
3727 val = get_quoted_value(document.body, "value", i, j)
3729 document.warning("Can't convert counter inset at line %d!" % i)
3731 ert = put_cmd_in_ert("\\addtocounter{%s}{%s}" % (cnt, val))
3732 elif cmd == "reset":
3733 ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
3735 needed_counters[cnt] = 1
3736 savecnt = "LyXSave" + cnt
3737 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (savecnt, cnt))
3738 elif cmd == "restore":
3739 needed_counters[cnt] = 1
3740 savecnt = "LyXSave" + cnt
3741 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (cnt, savecnt))
3743 document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
3746 document.body[i : j + 1] = ert
3751 for cnt in needed_counters:
3752 pretext.append("\\newcounter{LyXSave%s}" % (cnt))
3754 add_to_preamble(document, pretext)
3757 def revert_ams_spaces(document):
3758 "Revert InsetSpace medspace and thickspace into their TeX-code counterparts"
3760 insets = ["\\medspace{}", "\\thickspace{}"]
3761 for inset in insets:
3764 i = find_token(document.body, "\\begin_inset space " + inset, i)
3767 end = find_end_of_inset(document.body, i)
3768 subst = put_cmd_in_ert(inset)
3769 document.body[i : end + 1] = subst
3773 # load amsmath in the preamble if not already loaded
3774 i = find_token(document.header, "\\use_package amsmath 2", 0)
3776 add_to_preamble(document, ["\\@ifundefined{thickspace}{\\usepackage{amsmath}}{}"])
3783 supported_versions = ["2.4.0", "2.4"]
3785 [545, [convert_lst_literalparam]],
3790 [550, [convert_fontenc]],
3797 [557, [convert_vcsinfo]],
3798 [558, [removeFrontMatterStyles]],
3801 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
3805 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
3806 [566, [convert_hebrew_parentheses]],
3812 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
3813 [573, [convert_inputencoding_namechange]],
3814 [574, [convert_ruby_module, convert_utf8_japanese]],
3815 [575, [convert_lineno, convert_aaencoding]],
3817 [577, [convert_linggloss]],
3821 [581, [convert_osf]],
3822 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
3823 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
3825 [585, [convert_pagesizes]],
3827 [587, [convert_pagesizenames]],
3829 [589, [convert_totalheight]],
3830 [590, [convert_changebars]],
3831 [591, [convert_postpone_fragile]],
3833 [593, [convert_counter_maintenance]],
3838 revert = [[594, [revert_ams_spaces]],
3839 [593, [revert_counter_inset]],
3840 [592, [revert_counter_maintenance]],
3841 [591, [revert_colrow_tracking]],
3842 [590, [revert_postpone_fragile]],
3843 [589, [revert_changebars]],
3844 [588, [revert_totalheight]],
3845 [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
3846 [586, [revert_pagesizenames]],
3847 [585, [revert_dupqualicites]],
3848 [584, [revert_pagesizes,revert_komafontsizes]],
3849 [583, [revert_vcsinfo_rev_abbrev]],
3850 [582, [revert_ChivoFont,revert_CrimsonProFont]],
3851 [581, [revert_CantarellFont,revert_FiraFont]],
3852 [580, [revert_texfontopts,revert_osf]],
3853 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
3854 [578, [revert_babelfont]],
3855 [577, [revert_drs]],
3856 [576, [revert_linggloss, revert_subexarg]],
3857 [575, [revert_new_languages]],
3858 [574, [revert_lineno, revert_aaencoding]],
3859 [573, [revert_ruby_module, revert_utf8_japanese]],
3860 [572, [revert_inputencoding_namechange]],
3861 [571, [revert_notoFonts]],
3862 [570, [revert_cmidruletrimming]],
3863 [569, [revert_bibfileencodings]],
3864 [568, [revert_tablestyle]],
3865 [567, [revert_soul]],
3866 [566, [revert_malayalam]],
3867 [565, [revert_hebrew_parentheses]],
3868 [564, [revert_AdobeFonts]],
3869 [563, [revert_lformatinfo]],
3870 [562, [revert_listpargs]],
3871 [561, [revert_l7ninfo]],
3872 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
3873 [559, [revert_timeinfo, revert_namenoextinfo]],
3874 [558, [revert_dateinfo]],
3875 [557, [addFrontMatterStyles]],
3876 [556, [revert_vcsinfo]],
3877 [555, [revert_bibencoding]],
3878 [554, [revert_vcolumns]],
3879 [553, [revert_stretchcolumn]],
3880 [552, [revert_tuftecite]],
3881 [551, [revert_floatpclass, revert_floatalignment]],
3882 [550, [revert_nospellcheck]],
3883 [549, [revert_fontenc]],
3884 [548, []], # dummy format change
3885 [547, [revert_lscape]],
3886 [546, [revert_xcharter]],
3887 [545, [revert_paratype]],
3888 [544, [revert_lst_literalparam]]
3892 if __name__ == "__main__":