1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 """Add collected font-packages with their option to user-preamble"""
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 """Expand fontinfo mapping"""
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
199 def convert_fonts(document, fm, osfoption = "osf"):
200 """Handle font definition (LaTeX preamble -> native)"""
201 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
202 rscaleopt = re.compile(r'^scaled?=(.*)')
204 # Check whether we go beyond font option feature introduction
205 haveFontOpts = document.end_format > 580
209 i = find_re(document.preamble, rpkg, i+1)
212 mo = rpkg.search(document.preamble[i])
213 if mo == None or mo.group(2) == None:
216 options = mo.group(2).replace(' ', '').split(",")
221 while o < len(options):
222 if options[o] == osfoption:
226 mo = rscaleopt.search(options[o])
234 if not pkg in fm.pkginmap:
239 # Try with name-option combination first
240 # (only one default option supported currently)
242 while o < len(options):
244 fn = fm.getfontname(pkg, [opt])
251 fn = fm.getfontname(pkg, [])
253 fn = fm.getfontname(pkg, options)
256 del document.preamble[i]
257 fontinfo = fm.font2pkgmap[fn]
258 if fontinfo.scaletype == None:
261 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
262 fontinfo.scaleval = oscale
263 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
264 if fontinfo.osfopt == None:
265 options.extend(osfoption)
267 osf = find_token(document.header, "\\font_osf false")
268 osftag = "\\font_osf"
269 if osf == -1 and fontinfo.fonttype != "math":
270 # Try with newer format
271 osftag = "\\font_" + fontinfo.fonttype + "_osf"
272 osf = find_token(document.header, osftag + " false")
274 document.header[osf] = osftag + " true"
275 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
276 del document.preamble[i-1]
278 if fontscale != None:
279 j = find_token(document.header, fontscale, 0)
281 val = get_value(document.header, fontscale, j)
285 scale = "%03d" % int(float(oscale) * 100)
286 document.header[j] = fontscale + " " + scale + " " + vals[1]
287 ft = "\\font_" + fontinfo.fonttype
288 j = find_token(document.header, ft, 0)
290 val = get_value(document.header, ft, j)
291 words = val.split() # ! splits also values like '"DejaVu Sans"'
292 words[0] = '"' + fn + '"'
293 document.header[j] = ft + ' ' + ' '.join(words)
294 if haveFontOpts and fontinfo.fonttype != "math":
295 fotag = "\\font_" + fontinfo.fonttype + "_opts"
296 fo = find_token(document.header, fotag)
298 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
300 # Sensible place to insert tag
301 fo = find_token(document.header, "\\font_sf_scale")
303 document.warning("Malformed LyX document! Missing \\font_sf_scale")
305 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
308 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
309 """Revert native font definition to LaTeX"""
310 # fonlist := list of fonts created from the same package
311 # Empty package means that the font-name is the same as the package-name
312 # fontmap (key = package, val += found options) will be filled
313 # and used later in add_preamble_fonts() to be added to user-preamble
315 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
316 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
318 while i < len(document.header):
319 i = find_re(document.header, rfontscale, i+1)
322 mo = rfontscale.search(document.header[i])
325 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
326 val = get_value(document.header, ft, i)
327 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
328 font = words[0].strip('"') # TeX font name has no whitespace
329 if not font in fm.font2pkgmap:
331 fontinfo = fm.font2pkgmap[font]
332 val = fontinfo.package
333 if not val in fontmap:
336 if OnlyWithXOpts or WithXOpts:
337 if ft == "\\font_math":
339 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
340 if ft == "\\font_sans":
341 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
342 elif ft == "\\font_typewriter":
343 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
344 x = find_re(document.header, regexp, 0)
345 if x == -1 and OnlyWithXOpts:
349 # We need to use this regex since split() does not handle quote protection
350 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
351 opts = xopts[1].strip('"').split(",")
352 fontmap[val].extend(opts)
353 del document.header[x]
354 words[0] = '"default"'
355 document.header[i] = ft + ' ' + ' '.join(words)
356 if fontinfo.scaleopt != None:
357 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
358 mo = rscales.search(xval)
363 # set correct scale option
364 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
365 if fontinfo.osfopt != None:
367 if fontinfo.osfdef == "true":
369 osf = find_token(document.header, "\\font_osf " + oldval)
370 if osf == -1 and ft != "\\font_math":
371 # Try with newer format
372 osftag = "\\font_roman_osf " + oldval
373 if ft == "\\font_sans":
374 osftag = "\\font_sans_osf " + oldval
375 elif ft == "\\font_typewriter":
376 osftag = "\\font_typewriter_osf " + oldval
377 osf = find_token(document.header, osftag)
379 fontmap[val].extend([fontinfo.osfopt])
380 if len(fontinfo.options) > 0:
381 fontmap[val].extend(fontinfo.options)
384 ###############################################################################
386 ### Conversion and reversion routines
388 ###############################################################################
390 def convert_inputencoding_namechange(document):
391 """Rename inputencoding settings."""
392 i = find_token(document.header, "\\inputencoding", 0)
395 s = document.header[i].replace("auto", "auto-legacy")
396 document.header[i] = s.replace("default", "auto-legacy-plain")
398 def revert_inputencoding_namechange(document):
399 """Rename inputencoding settings."""
400 i = find_token(document.header, "\\inputencoding", 0)
403 s = document.header[i].replace("auto-legacy-plain", "default")
404 document.header[i] = s.replace("auto-legacy", "auto")
406 def convert_notoFonts(document):
407 """Handle Noto fonts definition to LaTeX"""
409 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
410 fm = createFontMapping(['Noto'])
411 convert_fonts(document, fm)
413 def revert_notoFonts(document):
414 """Revert native Noto font definition to LaTeX"""
416 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
418 fm = createFontMapping(['Noto'])
419 if revert_fonts(document, fm, fontmap):
420 add_preamble_fonts(document, fontmap)
422 def convert_latexFonts(document):
423 """Handle DejaVu and IBMPlex fonts definition to LaTeX"""
425 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
426 fm = createFontMapping(['DejaVu', 'IBM'])
427 convert_fonts(document, fm)
429 def revert_latexFonts(document):
430 """Revert native DejaVu font definition to LaTeX"""
432 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
434 fm = createFontMapping(['DejaVu', 'IBM'])
435 if revert_fonts(document, fm, fontmap):
436 add_preamble_fonts(document, fontmap)
438 def convert_AdobeFonts(document):
439 """Handle Adobe Source fonts definition to LaTeX"""
441 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
442 fm = createFontMapping(['Adobe'])
443 convert_fonts(document, fm)
445 def revert_AdobeFonts(document):
446 """Revert Adobe Source font definition to LaTeX"""
448 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
450 fm = createFontMapping(['Adobe'])
451 if revert_fonts(document, fm, fontmap):
452 add_preamble_fonts(document, fontmap)
454 def removeFrontMatterStyles(document):
455 """Remove styles Begin/EndFrontmatter"""
457 layouts = ['BeginFrontmatter', 'EndFrontmatter']
458 tokenend = len('\\begin_layout ')
461 i = find_token_exact(document.body, '\\begin_layout ', i+1)
464 layout = document.body[i][tokenend:].strip()
465 if layout not in layouts:
467 j = find_end_of_layout(document.body, i)
469 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
471 while document.body[j+1].strip() == '':
473 document.body[i:j+1] = []
475 def addFrontMatterStyles(document):
476 """Use styles Begin/EndFrontmatter for elsarticle"""
478 if document.textclass != "elsarticle":
481 def insertFrontmatter(prefix, line):
483 while above > 0 and document.body[above-1].strip() == '':
486 while document.body[below].strip() == '':
488 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
489 '\\begin_inset Note Note',
491 '\\begin_layout Plain Layout',
494 '\\end_inset', '', '',
497 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
498 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
499 tokenend = len('\\begin_layout ')
503 i = find_token_exact(document.body, '\\begin_layout ', i+1)
506 layout = document.body[i][tokenend:].strip()
507 if layout not in layouts:
509 k = find_end_of_layout(document.body, i)
511 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
518 insertFrontmatter('End', k+1)
519 insertFrontmatter('Begin', first)
522 def convert_lst_literalparam(document):
523 """Add param literal to include inset"""
527 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
530 j = find_end_of_inset(document.body, i)
532 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
534 while i < j and document.body[i].strip() != '':
536 document.body.insert(i, 'literal "true"')
539 def revert_lst_literalparam(document):
540 """Remove param literal from include inset"""
544 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
547 j = find_end_of_inset(document.body, i)
549 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
551 del_token(document.body, 'literal', i, j)
554 def revert_paratype(document):
555 """Revert ParaType font definitions to LaTeX"""
557 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
559 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
560 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
561 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
562 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
565 sfval = find_token(document.header, "\\font_sf_scale", 0)
567 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
569 sfscale = document.header[sfval].split()
572 document.header[sfval] = " ".join(sfscale)
575 sf_scale = float(val)
577 document.warning("Invalid font_sf_scale value: " + val)
580 if sf_scale != "100.0":
581 sfoption = "scaled=" + str(sf_scale / 100.0)
582 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
583 ttval = get_value(document.header, "\\font_tt_scale", 0)
588 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
589 if i1 != -1 and i2 != -1 and i3!= -1:
590 add_to_preamble(document, ["\\usepackage{paratype}"])
593 add_to_preamble(document, ["\\usepackage{PTSerif}"])
594 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
597 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
599 add_to_preamble(document, ["\\usepackage{PTSans}"])
600 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
603 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
605 add_to_preamble(document, ["\\usepackage{PTMono}"])
606 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
609 def revert_xcharter(document):
610 """Revert XCharter font definitions to LaTeX"""
612 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
616 # replace unsupported font setting
617 document.header[i] = document.header[i].replace("xcharter", "default")
618 # no need for preamble code with system fonts
619 if get_bool_value(document.header, "\\use_non_tex_fonts"):
622 # transfer old style figures setting to package options
623 j = find_token(document.header, "\\font_osf true")
626 document.header[j] = "\\font_osf false"
630 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
633 def revert_lscape(document):
634 """Reverts the landscape environment (Landscape module) to TeX-code"""
636 if not "landscape" in document.get_module_list():
641 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
644 j = find_end_of_inset(document.body, i)
646 document.warning("Malformed LyX document: Can't find end of Landscape inset")
649 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
650 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
651 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
652 add_to_preamble(document, ["\\usepackage{afterpage}"])
654 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
655 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
657 add_to_preamble(document, ["\\usepackage{pdflscape}"])
658 document.del_module("landscape")
661 def convert_fontenc(document):
662 """Convert default fontenc setting"""
664 i = find_token(document.header, "\\fontencoding global", 0)
668 document.header[i] = document.header[i].replace("global", "auto")
671 def revert_fontenc(document):
672 """Revert default fontenc setting"""
674 i = find_token(document.header, "\\fontencoding auto", 0)
678 document.header[i] = document.header[i].replace("auto", "global")
681 def revert_nospellcheck(document):
682 """Remove nospellcheck font info param"""
686 i = find_token(document.body, '\\nospellcheck', i)
692 def revert_floatpclass(document):
693 """Remove float placement params 'document' and 'class'"""
695 del_token(document.header, "\\float_placement class")
699 i = find_token(document.body, '\\begin_inset Float', i + 1)
702 j = find_end_of_inset(document.body, i)
703 k = find_token(document.body, 'placement class', i, j)
705 k = find_token(document.body, 'placement document', i, j)
712 def revert_floatalignment(document):
713 """Remove float alignment params"""
715 galignment = get_value(document.header, "\\float_alignment", delete=True)
719 i = find_token(document.body, '\\begin_inset Float', i + 1)
722 j = find_end_of_inset(document.body, i)
724 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
726 k = find_token(document.body, 'alignment', i, j)
730 alignment = get_value(document.body, "alignment", k)
731 if alignment == "document":
732 alignment = galignment
734 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
736 document.warning("Can't find float layout!")
739 if alignment == "left":
740 alcmd = put_cmd_in_ert("\\raggedright{}")
741 elif alignment == "center":
742 alcmd = put_cmd_in_ert("\\centering{}")
743 elif alignment == "right":
744 alcmd = put_cmd_in_ert("\\raggedleft{}")
746 document.body[l+1:l+1] = alcmd
749 def revert_tuftecite(document):
750 """Revert \cite commands in tufte classes"""
752 tufte = ["tufte-book", "tufte-handout"]
753 if document.textclass not in tufte:
758 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
761 j = find_end_of_inset(document.body, i)
763 document.warning("Can't find end of citation inset at line %d!!" %(i))
765 k = find_token(document.body, "LatexCommand", i, j)
767 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
770 cmd = get_value(document.body, "LatexCommand", k)
774 pre = get_quoted_value(document.body, "before", i, j)
775 post = get_quoted_value(document.body, "after", i, j)
776 key = get_quoted_value(document.body, "key", i, j)
778 document.warning("Citation inset at line %d does not have a key!" %(i))
780 # Replace command with ERT
783 res += "[" + pre + "]"
785 res += "[" + post + "]"
788 res += "{" + key + "}"
789 document.body[i:j+1] = put_cmd_in_ert([res])
794 def revert_stretchcolumn(document):
795 """We remove the column varwidth flags or everything else will become a mess."""
798 i = find_token(document.body, "\\begin_inset Tabular", i+1)
801 j = find_end_of_inset(document.body, i+1)
803 document.warning("Malformed LyX document: Could not find end of tabular.")
805 for k in range(i, j):
806 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
807 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
808 document.body[k] = document.body[k].replace(' varwidth="true"', '')
811 def revert_vcolumns(document):
812 """Revert standard columns with line breaks etc."""
818 i = find_token(document.body, "\\begin_inset Tabular", i+1)
821 j = find_end_of_inset(document.body, i)
823 document.warning("Malformed LyX document: Could not find end of tabular.")
826 # Collect necessary column information
828 nrows = int(document.body[i+1].split('"')[3])
829 ncols = int(document.body[i+1].split('"')[5])
831 for k in range(ncols):
832 m = find_token(document.body, "<column", m)
833 width = get_option_value(document.body[m], 'width')
834 varwidth = get_option_value(document.body[m], 'varwidth')
835 alignment = get_option_value(document.body[m], 'alignment')
836 special = get_option_value(document.body[m], 'special')
837 col_info.append([width, varwidth, alignment, special, m])
842 for row in range(nrows):
843 for col in range(ncols):
844 m = find_token(document.body, "<cell", m)
845 multicolumn = get_option_value(document.body[m], 'multicolumn')
846 multirow = get_option_value(document.body[m], 'multirow')
847 width = get_option_value(document.body[m], 'width')
848 rotate = get_option_value(document.body[m], 'rotate')
849 # Check for: linebreaks, multipars, non-standard environments
851 endcell = find_token(document.body, "</cell>", begcell)
853 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
855 elif count_pars_in_inset(document.body, begcell + 2) > 1:
857 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
859 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
860 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
862 alignment = col_info[col][2]
863 col_line = col_info[col][4]
865 if alignment == "center":
866 vval = ">{\\centering}"
867 elif alignment == "left":
868 vval = ">{\\raggedright}"
869 elif alignment == "right":
870 vval = ">{\\raggedleft}"
873 vval += "V{\\linewidth}"
875 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
876 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
877 # with newlines, and we do not want that)
879 endcell = find_token(document.body, "</cell>", begcell)
881 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
883 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
887 nle = find_end_of_inset(document.body, nl)
888 del(document.body[nle:nle+1])
890 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
892 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
898 if needarray == True:
899 add_to_preamble(document, ["\\usepackage{array}"])
900 if needvarwidth == True:
901 add_to_preamble(document, ["\\usepackage{varwidth}"])
904 def revert_bibencoding(document):
905 """Revert bibliography encoding"""
909 i = find_token(document.header, "\\cite_engine", 0)
911 document.warning("Malformed document! Missing \\cite_engine")
913 engine = get_value(document.header, "\\cite_engine", i)
917 if engine in ["biblatex", "biblatex-natbib"]:
920 # Map lyx to latex encoding names
924 "armscii8" : "armscii8",
925 "iso8859-1" : "latin1",
926 "iso8859-2" : "latin2",
927 "iso8859-3" : "latin3",
928 "iso8859-4" : "latin4",
929 "iso8859-5" : "iso88595",
930 "iso8859-6" : "8859-6",
931 "iso8859-7" : "iso-8859-7",
932 "iso8859-8" : "8859-8",
933 "iso8859-9" : "latin5",
934 "iso8859-13" : "latin7",
935 "iso8859-15" : "latin9",
936 "iso8859-16" : "latin10",
937 "applemac" : "applemac",
939 "cp437de" : "cp437de",
956 "utf8-platex" : "utf8",
963 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
966 j = find_end_of_inset(document.body, i)
968 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
970 encoding = get_quoted_value(document.body, "encoding", i, j)
973 # remove encoding line
974 k = find_token(document.body, "encoding", i, j)
977 if encoding == "default":
979 # Re-find inset end line
980 j = find_end_of_inset(document.body, i)
983 h = find_token(document.header, "\\biblio_options", 0)
985 biblio_options = get_value(document.header, "\\biblio_options", h)
986 if not "bibencoding" in biblio_options:
987 document.header[h] += ",bibencoding=%s" % encodings[encoding]
989 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
991 # this should not happen
992 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
994 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
996 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
997 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1003 def convert_vcsinfo(document):
1004 """Separate vcs Info inset from buffer Info inset."""
1007 "vcs-revision" : "revision",
1008 "vcs-tree-revision" : "tree-revision",
1009 "vcs-author" : "author",
1010 "vcs-time" : "time",
1015 i = find_token(document.body, "\\begin_inset Info", i+1)
1018 j = find_end_of_inset(document.body, i+1)
1020 document.warning("Malformed LyX document: Could not find end of Info inset.")
1022 tp = find_token(document.body, 'type', i, j)
1023 tpv = get_quoted_value(document.body, "type", tp)
1026 arg = find_token(document.body, 'arg', i, j)
1027 argv = get_quoted_value(document.body, "arg", arg)
1028 if argv not in list(types.keys()):
1030 document.body[tp] = "type \"vcs\""
1031 document.body[arg] = "arg \"" + types[argv] + "\""
1034 def revert_vcsinfo(document):
1035 """Merge vcs Info inset to buffer Info inset."""
1037 args = ["revision", "tree-revision", "author", "time", "date" ]
1040 i = find_token(document.body, "\\begin_inset Info", i+1)
1043 j = find_end_of_inset(document.body, i+1)
1045 document.warning("Malformed LyX document: Could not find end of Info inset.")
1047 tp = find_token(document.body, 'type', i, j)
1048 tpv = get_quoted_value(document.body, "type", tp)
1051 arg = find_token(document.body, 'arg', i, j)
1052 argv = get_quoted_value(document.body, "arg", arg)
1053 if argv not in args:
1054 document.warning("Malformed Info inset. Invalid vcs arg.")
1056 document.body[tp] = "type \"buffer\""
1057 document.body[arg] = "arg \"vcs-" + argv + "\""
1059 def revert_vcsinfo_rev_abbrev(document):
1060 " Convert abbreviated revisions to regular revisions. "
1064 i = find_token(document.body, "\\begin_inset Info", i+1)
1067 j = find_end_of_inset(document.body, i+1)
1069 document.warning("Malformed LyX document: Could not find end of Info inset.")
1071 tp = find_token(document.body, 'type', i, j)
1072 tpv = get_quoted_value(document.body, "type", tp)
1075 arg = find_token(document.body, 'arg', i, j)
1076 argv = get_quoted_value(document.body, "arg", arg)
1077 if( argv == "revision-abbrev" ):
1078 document.body[arg] = "arg \"revision\""
1080 def revert_dateinfo(document):
1081 """Revert date info insets to static text."""
1083 # FIXME This currently only considers the main language and uses the system locale
1084 # Ideally, it should honor context languages and switch the locale accordingly.
1086 # The date formats for each language using strftime syntax:
1087 # long, short, loclong, locmedium, locshort
1089 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1090 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1091 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1092 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1093 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1094 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1095 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1096 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1097 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1098 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1099 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1100 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1101 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1102 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1103 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1104 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1105 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1106 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1107 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1108 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1109 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1110 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1111 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1112 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1113 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1114 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1115 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1116 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1117 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1118 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1119 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1120 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1121 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1122 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1123 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1124 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1125 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1126 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1127 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1128 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1129 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1130 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1131 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1132 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1133 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1134 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1135 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1136 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1137 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1138 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1139 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1140 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1141 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1142 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1143 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1144 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1145 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1146 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1147 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1148 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1149 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1150 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1151 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1152 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1153 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1154 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1155 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1156 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1157 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1158 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1159 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1160 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1161 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1162 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1163 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1164 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1165 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1166 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1167 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1168 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1169 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1170 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1171 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1172 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1173 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1174 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1175 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1176 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1177 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1178 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1179 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1180 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1181 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1182 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1183 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1184 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1185 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1186 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1187 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1188 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1189 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1190 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1193 types = ["date", "fixdate", "moddate" ]
1194 lang = get_value(document.header, "\\language")
1196 document.warning("Malformed LyX document! No \\language header found!")
1201 i = find_token(document.body, "\\begin_inset Info", i+1)
1204 j = find_end_of_inset(document.body, i+1)
1206 document.warning("Malformed LyX document: Could not find end of Info inset.")
1208 tp = find_token(document.body, 'type', i, j)
1209 tpv = get_quoted_value(document.body, "type", tp)
1210 if tpv not in types:
1212 arg = find_token(document.body, 'arg', i, j)
1213 argv = get_quoted_value(document.body, "arg", arg)
1216 if tpv == "fixdate":
1217 datecomps = argv.split('@')
1218 if len(datecomps) > 1:
1220 isodate = datecomps[1]
1221 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1223 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1224 # FIXME if we had the path to the original document (not the one in the tmp dir),
1225 # we could use the mtime.
1226 # elif tpv == "moddate":
1227 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1230 result = dte.isodate()
1231 elif argv == "long":
1232 result = dte.strftime(dateformats[lang][0])
1233 elif argv == "short":
1234 result = dte.strftime(dateformats[lang][1])
1235 elif argv == "loclong":
1236 result = dte.strftime(dateformats[lang][2])
1237 elif argv == "locmedium":
1238 result = dte.strftime(dateformats[lang][3])
1239 elif argv == "locshort":
1240 result = dte.strftime(dateformats[lang][4])
1242 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1243 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1244 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1245 fmt = re.sub('[^\'%]d', '%d', fmt)
1246 fmt = fmt.replace("'", "")
1247 result = dte.strftime(fmt)
1248 if sys.version_info < (3,0):
1249 # In Python 2, datetime module works with binary strings,
1250 # our dateformat strings are utf8-encoded:
1251 result = result.decode('utf-8')
1252 document.body[i : j+1] = [result]
1255 def revert_timeinfo(document):
1256 """Revert time info insets to static text."""
1258 # FIXME This currently only considers the main language and uses the system locale
1259 # Ideally, it should honor context languages and switch the locale accordingly.
1260 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1263 # The time formats for each language using strftime syntax:
1266 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1267 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1268 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1269 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1270 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1271 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1272 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1273 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1274 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1275 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1276 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1277 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1278 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1279 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1280 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1281 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1282 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1283 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1284 "british" : ["%H:%M:%S %Z", "%H:%M"],
1285 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1286 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1287 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1288 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1289 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1290 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1291 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1292 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1293 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1294 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1295 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1296 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1297 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1298 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1299 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1300 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1301 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1302 "french" : ["%H:%M:%S %Z", "%H:%M"],
1303 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1304 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1305 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1306 "german" : ["%H:%M:%S %Z", "%H:%M"],
1307 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1308 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1309 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1310 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1311 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1312 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1313 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1314 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1315 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1316 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1317 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1318 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1319 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1320 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1321 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1322 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1323 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1324 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1325 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1326 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1327 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1328 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1329 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1330 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1331 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1332 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1333 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1334 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1335 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1336 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1337 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1338 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1339 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1340 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1341 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1342 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1343 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1344 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1345 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1346 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1347 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1348 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1349 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1350 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1351 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1352 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1353 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1354 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1355 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1356 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1357 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1358 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1359 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1360 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1361 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1362 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1363 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1364 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1365 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1366 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1367 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1370 types = ["time", "fixtime", "modtime" ]
1371 i = find_token(document.header, "\\language", 0)
1373 # this should not happen
1374 document.warning("Malformed LyX document! No \\language header found!")
1376 lang = get_value(document.header, "\\language", i)
1380 i = find_token(document.body, "\\begin_inset Info", i+1)
1383 j = find_end_of_inset(document.body, i+1)
1385 document.warning("Malformed LyX document: Could not find end of Info inset.")
1387 tp = find_token(document.body, 'type', i, j)
1388 tpv = get_quoted_value(document.body, "type", tp)
1389 if tpv not in types:
1391 arg = find_token(document.body, 'arg', i, j)
1392 argv = get_quoted_value(document.body, "arg", arg)
1394 dtme = datetime.now()
1396 if tpv == "fixtime":
1397 timecomps = argv.split('@')
1398 if len(timecomps) > 1:
1400 isotime = timecomps[1]
1401 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1403 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1405 m = re.search('(\d\d):(\d\d)', isotime)
1407 tme = time(int(m.group(1)), int(m.group(2)))
1408 # FIXME if we had the path to the original document (not the one in the tmp dir),
1409 # we could use the mtime.
1410 # elif tpv == "moddate":
1411 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1414 result = tme.isoformat()
1415 elif argv == "long":
1416 result = tme.strftime(timeformats[lang][0])
1417 elif argv == "short":
1418 result = tme.strftime(timeformats[lang][1])
1420 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1421 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1422 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1423 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1424 fmt = fmt.replace("'", "")
1425 result = dte.strftime(fmt)
1426 document.body[i : j+1] = result
1429 def revert_namenoextinfo(document):
1430 """Merge buffer Info inset type name-noext to name."""
1434 i = find_token(document.body, "\\begin_inset Info", i+1)
1437 j = find_end_of_inset(document.body, i+1)
1439 document.warning("Malformed LyX document: Could not find end of Info inset.")
1441 tp = find_token(document.body, 'type', i, j)
1442 tpv = get_quoted_value(document.body, "type", tp)
1445 arg = find_token(document.body, 'arg', i, j)
1446 argv = get_quoted_value(document.body, "arg", arg)
1447 if argv != "name-noext":
1449 document.body[arg] = "arg \"name\""
1452 def revert_l7ninfo(document):
1453 """Revert l7n Info inset to text."""
1457 i = find_token(document.body, "\\begin_inset Info", i+1)
1460 j = find_end_of_inset(document.body, i+1)
1462 document.warning("Malformed LyX document: Could not find end of Info inset.")
1464 tp = find_token(document.body, 'type', i, j)
1465 tpv = get_quoted_value(document.body, "type", tp)
1468 arg = find_token(document.body, 'arg', i, j)
1469 argv = get_quoted_value(document.body, "arg", arg)
1470 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1471 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1472 document.body[i : j+1] = argv
1475 def revert_listpargs(document):
1476 """Reverts listpreamble arguments to TeX-code"""
1479 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1482 j = find_end_of_inset(document.body, i)
1483 # Find containing paragraph layout
1484 parent = get_containing_layout(document.body, i)
1486 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1489 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1490 endPlain = find_end_of_layout(document.body, beginPlain)
1491 content = document.body[beginPlain + 1 : endPlain]
1492 del document.body[i:j+1]
1493 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1494 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1495 document.body[parbeg : parbeg] = subst
1498 def revert_lformatinfo(document):
1499 """Revert layout format Info inset to text."""
1503 i = find_token(document.body, "\\begin_inset Info", i+1)
1506 j = find_end_of_inset(document.body, i+1)
1508 document.warning("Malformed LyX document: Could not find end of Info inset.")
1510 tp = find_token(document.body, 'type', i, j)
1511 tpv = get_quoted_value(document.body, "type", tp)
1512 if tpv != "lyxinfo":
1514 arg = find_token(document.body, 'arg', i, j)
1515 argv = get_quoted_value(document.body, "arg", arg)
1516 if argv != "layoutformat":
1519 document.body[i : j+1] = "69"
1522 def convert_hebrew_parentheses(document):
1523 """ Swap opening/closing parentheses in Hebrew text.
1525 Up to LyX 2.4, "(" was used as closing parenthesis and
1526 ")" as opening parenthesis for Hebrew in the LyX source.
1528 # print("convert hebrew parentheses")
1529 current_languages = [document.language]
1530 for i, line in enumerate(document.body):
1531 if line.startswith('\\lang '):
1532 current_languages[-1] = line.lstrip('\\lang ')
1533 elif line.startswith('\\begin_layout'):
1534 current_languages.append(current_languages[-1])
1535 # print (line, current_languages[-1])
1536 elif line.startswith('\\end_layout'):
1537 current_languages.pop()
1538 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1539 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1542 def revert_hebrew_parentheses(document):
1543 """Store parentheses in Hebrew text reversed"""
1544 # This only exists to keep the convert/revert naming convention
1545 convert_hebrew_parentheses(document)
1548 def revert_malayalam(document):
1549 """Set the document language to English but assure Malayalam output"""
1551 revert_language(document, "malayalam", "", "malayalam")
1554 def revert_soul(document):
1555 """Revert soul module flex insets to ERT"""
1557 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1560 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1562 add_to_preamble(document, ["\\usepackage{soul}"])
1564 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1566 add_to_preamble(document, ["\\usepackage{color}"])
1568 revert_flex_inset(document.body, "Spaceletters", "\\so")
1569 revert_flex_inset(document.body, "Strikethrough", "\\st")
1570 revert_flex_inset(document.body, "Underline", "\\ul")
1571 revert_flex_inset(document.body, "Highlight", "\\hl")
1572 revert_flex_inset(document.body, "Capitalize", "\\caps")
1575 def revert_tablestyle(document):
1576 """Remove tablestyle params"""
1578 i = find_token(document.header, "\\tablestyle")
1580 del document.header[i]
1583 def revert_bibfileencodings(document):
1584 """Revert individual Biblatex bibliography encodings"""
1588 i = find_token(document.header, "\\cite_engine", 0)
1590 document.warning("Malformed document! Missing \\cite_engine")
1592 engine = get_value(document.header, "\\cite_engine", i)
1596 if engine in ["biblatex", "biblatex-natbib"]:
1599 # Map lyx to latex encoding names
1603 "armscii8" : "armscii8",
1604 "iso8859-1" : "latin1",
1605 "iso8859-2" : "latin2",
1606 "iso8859-3" : "latin3",
1607 "iso8859-4" : "latin4",
1608 "iso8859-5" : "iso88595",
1609 "iso8859-6" : "8859-6",
1610 "iso8859-7" : "iso-8859-7",
1611 "iso8859-8" : "8859-8",
1612 "iso8859-9" : "latin5",
1613 "iso8859-13" : "latin7",
1614 "iso8859-15" : "latin9",
1615 "iso8859-16" : "latin10",
1616 "applemac" : "applemac",
1618 "cp437de" : "cp437de",
1626 "cp1250" : "cp1250",
1627 "cp1251" : "cp1251",
1628 "cp1252" : "cp1252",
1629 "cp1255" : "cp1255",
1630 "cp1256" : "cp1256",
1631 "cp1257" : "cp1257",
1632 "koi8-r" : "koi8-r",
1633 "koi8-u" : "koi8-u",
1635 "utf8-platex" : "utf8",
1642 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1645 j = find_end_of_inset(document.body, i)
1647 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1649 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1653 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1654 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1655 if len(bibfiles) == 0:
1656 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1657 # remove encoding line
1658 k = find_token(document.body, "file_encodings", i, j)
1660 del document.body[k]
1661 # Re-find inset end line
1662 j = find_end_of_inset(document.body, i)
1664 enclist = encodings.split("\t")
1667 ppp = pp.split(" ", 1)
1668 encmap[ppp[0]] = ppp[1]
1669 for bib in bibfiles:
1670 pr = "\\addbibresource"
1671 if bib in encmap.keys():
1672 pr += "[bibencoding=" + encmap[bib] + "]"
1673 pr += "{" + bib + "}"
1674 add_to_preamble(document, [pr])
1675 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1676 pcmd = "printbibliography"
1678 pcmd += "[" + opts + "]"
1679 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1680 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1681 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1682 "status open", "", "\\begin_layout Plain Layout" ]
1683 repl += document.body[i:j+1]
1684 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1685 document.body[i:j+1] = repl
1691 def revert_cmidruletrimming(document):
1692 """Remove \\cmidrule trimming"""
1694 # FIXME: Revert to TeX code?
1697 # first, let's find out if we need to do anything
1698 i = find_token(document.body, '<cell ', i+1)
1701 j = document.body[i].find('trim="')
1704 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1705 # remove trim option
1706 document.body[i] = rgx.sub('', document.body[i])
1710 r'### Inserted by lyx2lyx (ruby inset) ###',
1711 r'InsetLayout Flex:Ruby',
1712 r' LyxType charstyle',
1713 r' LatexType command',
1717 r' HTMLInnerTag rb',
1718 r' HTMLInnerAttr ""',
1720 r' LabelString "Ruby"',
1721 r' Decoration Conglomerate',
1723 r' \ifdefined\kanjiskip',
1724 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1725 r' \else \ifdefined\luatexversion',
1726 r' \usepackage{luatexja-ruby}',
1727 r' \else \ifdefined\XeTeXversion',
1728 r' \usepackage{ruby}%',
1730 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1732 r' Argument post:1',
1733 r' LabelString "ruby text"',
1734 r' MenuString "Ruby Text|R"',
1735 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1736 r' Decoration Conglomerate',
1748 def convert_ruby_module(document):
1749 """Use ruby module instead of local module definition"""
1750 if document.del_local_layout(ruby_inset_def):
1751 document.add_module("ruby")
1753 def revert_ruby_module(document):
1754 """Replace ruby module with local module definition"""
1755 if document.del_module("ruby"):
1756 document.append_local_layout(ruby_inset_def)
1759 def convert_utf8_japanese(document):
1760 """Use generic utf8 with Japanese documents."""
1761 lang = get_value(document.header, "\\language")
1762 if not lang.startswith("japanese"):
1764 inputenc = get_value(document.header, "\\inputencoding")
1765 if ((lang == "japanese" and inputenc == "utf8-platex")
1766 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1767 document.set_parameter("inputencoding", "utf8")
1769 def revert_utf8_japanese(document):
1770 """Use Japanese utf8 variants with Japanese documents."""
1771 inputenc = get_value(document.header, "\\inputencoding")
1772 if inputenc != "utf8":
1774 lang = get_value(document.header, "\\language")
1775 if lang == "japanese":
1776 document.set_parameter("inputencoding", "utf8-platex")
1777 if lang == "japanese-cjk":
1778 document.set_parameter("inputencoding", "utf8-cjk")
1781 def revert_lineno(document):
1782 " Replace lineno setting with user-preamble code."
1784 options = get_quoted_value(document.header, "\\lineno_options",
1786 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1789 options = "[" + options + "]"
1790 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1793 def convert_lineno(document):
1794 " Replace user-preamble code with native lineno support."
1797 i = find_token(document.preamble, "\\linenumbers", 1)
1799 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1802 options = usepkg.group(1).strip("[]")
1803 del(document.preamble[i-1:i+1])
1804 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1806 k = find_token(document.header, "\\index ")
1808 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1810 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1811 "\\lineno_options %s" % options]
1814 def convert_aaencoding(document):
1815 " Convert default document option due to encoding change in aa class. "
1817 if document.textclass != "aa":
1820 i = find_token(document.header, "\\use_default_options true")
1823 val = get_value(document.header, "\\inputencoding")
1825 document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
1827 if val == "auto-legacy" or val == "latin9":
1828 document.header[i] = "\\use_default_options false"
1829 k = find_token(document.header, "\\options")
1831 document.header.insert(i, "\\options latin9")
1833 document.header[k] += ",latin9"
1836 def revert_aaencoding(document):
1837 " Revert default document option due to encoding change in aa class. "
1839 if document.textclass != "aa":
1842 i = find_token(document.header, "\\use_default_options true")
1845 val = get_value(document.header, "\\inputencoding")
1847 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
1850 document.header[i] = "\\use_default_options false"
1851 k = find_token(document.header, "\\options", 0)
1853 document.header.insert(i, "\\options utf8")
1855 document.header[k] = document.header[k] + ",utf8"
1858 def revert_new_languages(document):
1859 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1860 and Russian (Petrine orthography)."""
1862 # lyxname: (babelname, polyglossianame)
1863 new_languages = {"azerbaijani": ("azerbaijani", ""),
1864 "bengali": ("", "bengali"),
1865 "churchslavonic": ("", "churchslavonic"),
1866 "oldrussian": ("", "russian"),
1867 "korean": ("", "korean"),
1869 if document.language in new_languages:
1870 used_languages = set((document.language, ))
1872 used_languages = set()
1875 i = find_token(document.body, "\\lang", i+1)
1878 val = get_value(document.body, "\\lang", i)
1879 if val in new_languages:
1880 used_languages.add(val)
1882 # Korean is already supported via CJK, so leave as-is for Babel
1883 if ("korean" in used_languages
1884 and (not get_bool_value(document.header, "\\use_non_tex_fonts")
1885 or get_value(document.header, "\\language_package") == "babel")):
1886 used_languages.discard("korean")
1888 for lang in used_languages:
1889 revert_language(document, lang, *new_languages[lang])
1893 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1894 r'InsetLayout Flex:Glosse',
1896 r' LabelString "Gloss (old version)"',
1897 r' MenuString "Gloss (old version)"',
1898 r' LatexType environment',
1899 r' LatexName linggloss',
1900 r' Decoration minimalistic',
1905 r' CustomPars false',
1906 r' ForcePlain true',
1907 r' ParbreakIsNewline true',
1908 r' FreeSpacing true',
1909 r' Requires covington',
1912 r' \@ifundefined{linggloss}{%',
1913 r' \newenvironment{linggloss}[2][]{',
1914 r' \def\glosstr{\glt #1}%',
1916 r' {\glosstr\glend}}{}',
1919 r' ResetsFont true',
1921 r' Decoration conglomerate',
1922 r' LabelString "Translation"',
1923 r' MenuString "Glosse Translation|s"',
1924 r' Tooltip "Add a translation for the glosse"',
1929 glosss_inset_def = [
1930 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1931 r'InsetLayout Flex:Tri-Glosse',
1933 r' LabelString "Tri-Gloss (old version)"',
1934 r' MenuString "Tri-Gloss (old version)"',
1935 r' LatexType environment',
1936 r' LatexName lingglosss',
1937 r' Decoration minimalistic',
1942 r' CustomPars false',
1943 r' ForcePlain true',
1944 r' ParbreakIsNewline true',
1945 r' FreeSpacing true',
1947 r' Requires covington',
1950 r' \@ifundefined{lingglosss}{%',
1951 r' \newenvironment{lingglosss}[2][]{',
1952 r' \def\glosstr{\glt #1}%',
1954 r' {\glosstr\glend}}{}',
1956 r' ResetsFont true',
1958 r' Decoration conglomerate',
1959 r' LabelString "Translation"',
1960 r' MenuString "Glosse Translation|s"',
1961 r' Tooltip "Add a translation for the glosse"',
1966 def convert_linggloss(document):
1967 " Move old ling glosses to local layout "
1968 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1969 document.append_local_layout(gloss_inset_def)
1970 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1971 document.append_local_layout(glosss_inset_def)
1973 def revert_linggloss(document):
1974 " Revert to old ling gloss definitions "
1975 if not "linguistics" in document.get_module_list():
1977 document.del_local_layout(gloss_inset_def)
1978 document.del_local_layout(glosss_inset_def)
1981 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1982 for glosse in glosses:
1985 i = find_token(document.body, glosse, i+1)
1988 j = find_end_of_inset(document.body, i)
1990 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1993 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1994 endarg = find_end_of_inset(document.body, arg)
1997 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1998 if argbeginPlain == -1:
1999 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2001 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2002 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2004 # remove Arg insets and paragraph, if it only contains this inset
2005 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2006 del document.body[arg - 1 : endarg + 4]
2008 del document.body[arg : endarg + 1]
2010 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2011 endarg = find_end_of_inset(document.body, arg)
2014 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2015 if argbeginPlain == -1:
2016 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2018 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2019 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2021 # remove Arg insets and paragraph, if it only contains this inset
2022 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2023 del document.body[arg - 1 : endarg + 4]
2025 del document.body[arg : endarg + 1]
2027 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2028 endarg = find_end_of_inset(document.body, arg)
2031 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2032 if argbeginPlain == -1:
2033 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2035 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2036 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2038 # remove Arg insets and paragraph, if it only contains this inset
2039 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2040 del document.body[arg - 1 : endarg + 4]
2042 del document.body[arg : endarg + 1]
2044 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2045 endarg = find_end_of_inset(document.body, arg)
2048 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2049 if argbeginPlain == -1:
2050 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2052 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2053 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2055 # remove Arg insets and paragraph, if it only contains this inset
2056 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2057 del document.body[arg - 1 : endarg + 4]
2059 del document.body[arg : endarg + 1]
2062 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2065 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2066 endInset = find_end_of_inset(document.body, i)
2067 endPlain = find_end_of_layout(document.body, beginPlain)
2068 precontent = put_cmd_in_ert(cmd)
2069 if len(optargcontent) > 0:
2070 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2071 precontent += put_cmd_in_ert("{")
2073 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2074 if cmd == "\\trigloss":
2075 postcontent += put_cmd_in_ert("}{") + marg3content
2076 postcontent += put_cmd_in_ert("}")
2078 document.body[endPlain:endInset + 1] = postcontent
2079 document.body[beginPlain + 1:beginPlain] = precontent
2080 del document.body[i : beginPlain + 1]
2082 document.append_local_layout("Requires covington")
2087 def revert_subexarg(document):
2088 " Revert linguistic subexamples with argument to ERT "
2090 if not "linguistics" in document.get_module_list():
2096 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2099 j = find_end_of_layout(document.body, i)
2101 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2104 # check for consecutive layouts
2105 k = find_token(document.body, "\\begin_layout", j)
2106 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2108 j = find_end_of_layout(document.body, k)
2110 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2113 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2117 endarg = find_end_of_inset(document.body, arg)
2119 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2120 if argbeginPlain == -1:
2121 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2123 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2124 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2126 # remove Arg insets and paragraph, if it only contains this inset
2127 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2128 del document.body[arg - 1 : endarg + 4]
2130 del document.body[arg : endarg + 1]
2132 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2134 # re-find end of layout
2135 j = find_end_of_layout(document.body, i)
2137 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2140 # check for consecutive layouts
2141 k = find_token(document.body, "\\begin_layout", j)
2142 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2144 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2145 j = find_end_of_layout(document.body, k)
2147 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2150 endev = put_cmd_in_ert("\\end{subexamples}")
2152 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2153 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2154 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2156 document.append_local_layout("Requires covington")
2160 def revert_drs(document):
2161 " Revert DRS insets (linguistics) to ERT "
2163 if not "linguistics" in document.get_module_list():
2167 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2168 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2169 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2170 "\\begin_inset Flex SDRS"]
2174 i = find_token(document.body, drs, i+1)
2177 j = find_end_of_inset(document.body, i)
2179 document.warning("Malformed LyX document: Can't find end of DRS inset")
2182 # Check for arguments
2183 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2184 endarg = find_end_of_inset(document.body, arg)
2187 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2188 if argbeginPlain == -1:
2189 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2191 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2192 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2194 # remove Arg insets and paragraph, if it only contains this inset
2195 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2196 del document.body[arg - 1 : endarg + 4]
2198 del document.body[arg : endarg + 1]
2201 j = find_end_of_inset(document.body, i)
2203 document.warning("Malformed LyX document: Can't find end of DRS inset")
2206 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2207 endarg = find_end_of_inset(document.body, arg)
2210 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2211 if argbeginPlain == -1:
2212 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2214 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2215 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2217 # remove Arg insets and paragraph, if it only contains this inset
2218 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2219 del document.body[arg - 1 : endarg + 4]
2221 del document.body[arg : endarg + 1]
2224 j = find_end_of_inset(document.body, i)
2226 document.warning("Malformed LyX document: Can't find end of DRS inset")
2229 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2230 endarg = find_end_of_inset(document.body, arg)
2231 postarg1content = []
2233 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2234 if argbeginPlain == -1:
2235 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2237 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2238 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2240 # remove Arg insets and paragraph, if it only contains this inset
2241 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2242 del document.body[arg - 1 : endarg + 4]
2244 del document.body[arg : endarg + 1]
2247 j = find_end_of_inset(document.body, i)
2249 document.warning("Malformed LyX document: Can't find end of DRS inset")
2252 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2253 endarg = find_end_of_inset(document.body, arg)
2254 postarg2content = []
2256 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2257 if argbeginPlain == -1:
2258 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2260 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2261 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2263 # remove Arg insets and paragraph, if it only contains this inset
2264 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2265 del document.body[arg - 1 : endarg + 4]
2267 del document.body[arg : endarg + 1]
2270 j = find_end_of_inset(document.body, i)
2272 document.warning("Malformed LyX document: Can't find end of DRS inset")
2275 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2276 endarg = find_end_of_inset(document.body, arg)
2277 postarg3content = []
2279 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2280 if argbeginPlain == -1:
2281 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2283 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2284 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2286 # remove Arg insets and paragraph, if it only contains this inset
2287 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2288 del document.body[arg - 1 : endarg + 4]
2290 del document.body[arg : endarg + 1]
2293 j = find_end_of_inset(document.body, i)
2295 document.warning("Malformed LyX document: Can't find end of DRS inset")
2298 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2299 endarg = find_end_of_inset(document.body, arg)
2300 postarg4content = []
2302 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2303 if argbeginPlain == -1:
2304 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2306 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2307 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2309 # remove Arg insets and paragraph, if it only contains this inset
2310 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2311 del document.body[arg - 1 : endarg + 4]
2313 del document.body[arg : endarg + 1]
2315 # The respective LaTeX command
2317 if drs == "\\begin_inset Flex DRS*":
2319 elif drs == "\\begin_inset Flex IfThen-DRS":
2321 elif drs == "\\begin_inset Flex Cond-DRS":
2323 elif drs == "\\begin_inset Flex QDRS":
2325 elif drs == "\\begin_inset Flex NegDRS":
2327 elif drs == "\\begin_inset Flex SDRS":
2330 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2331 endInset = find_end_of_inset(document.body, i)
2332 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2333 precontent = put_cmd_in_ert(cmd)
2334 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2335 if drs == "\\begin_inset Flex SDRS":
2336 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2337 precontent += put_cmd_in_ert("{")
2340 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2341 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2342 if cmd == "\\condrs" or cmd == "\\qdrs":
2343 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2345 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2347 postcontent = put_cmd_in_ert("}")
2349 document.body[endPlain:endInset + 1] = postcontent
2350 document.body[beginPlain + 1:beginPlain] = precontent
2351 del document.body[i : beginPlain + 1]
2353 document.append_local_layout("Provides covington 1")
2354 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2360 def revert_babelfont(document):
2361 " Reverts the use of \\babelfont to user preamble "
2363 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2366 i = find_token(document.header, '\\language_package', 0)
2368 document.warning("Malformed LyX document: Missing \\language_package.")
2370 if get_value(document.header, "\\language_package", 0) != "babel":
2373 # check font settings
2375 roman = sans = typew = "default"
2377 sf_scale = tt_scale = 100.0
2379 j = find_token(document.header, "\\font_roman", 0)
2381 document.warning("Malformed LyX document: Missing \\font_roman.")
2383 # We need to use this regex since split() does not handle quote protection
2384 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2385 roman = romanfont[2].strip('"')
2386 romanfont[2] = '"default"'
2387 document.header[j] = " ".join(romanfont)
2389 j = find_token(document.header, "\\font_sans", 0)
2391 document.warning("Malformed LyX document: Missing \\font_sans.")
2393 # We need to use this regex since split() does not handle quote protection
2394 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2395 sans = sansfont[2].strip('"')
2396 sansfont[2] = '"default"'
2397 document.header[j] = " ".join(sansfont)
2399 j = find_token(document.header, "\\font_typewriter", 0)
2401 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2403 # We need to use this regex since split() does not handle quote protection
2404 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2405 typew = ttfont[2].strip('"')
2406 ttfont[2] = '"default"'
2407 document.header[j] = " ".join(ttfont)
2409 i = find_token(document.header, "\\font_osf", 0)
2411 document.warning("Malformed LyX document: Missing \\font_osf.")
2413 osf = str2bool(get_value(document.header, "\\font_osf", i))
2415 j = find_token(document.header, "\\font_sf_scale", 0)
2417 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2419 sfscale = document.header[j].split()
2422 document.header[j] = " ".join(sfscale)
2425 sf_scale = float(val)
2427 document.warning("Invalid font_sf_scale value: " + val)
2429 j = find_token(document.header, "\\font_tt_scale", 0)
2431 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2433 ttscale = document.header[j].split()
2436 document.header[j] = " ".join(ttscale)
2439 tt_scale = float(val)
2441 document.warning("Invalid font_tt_scale value: " + val)
2443 # set preamble stuff
2444 pretext = ['%% This document must be processed with xelatex or lualatex!']
2445 pretext.append('\\AtBeginDocument{%')
2446 if roman != "default":
2447 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2448 if sans != "default":
2449 sf = '\\babelfont{sf}['
2450 if sf_scale != 100.0:
2451 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2452 sf += 'Mapping=tex-text]{' + sans + '}'
2454 if typew != "default":
2455 tw = '\\babelfont{tt}'
2456 if tt_scale != 100.0:
2457 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2458 tw += '{' + typew + '}'
2461 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2463 insert_to_preamble(document, pretext)
2466 def revert_minionpro(document):
2467 " Revert native MinionPro font definition (with extra options) to LaTeX "
2469 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2472 regexp = re.compile(r'(\\font_roman_opts)')
2473 x = find_re(document.header, regexp, 0)
2477 # We need to use this regex since split() does not handle quote protection
2478 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2479 opts = romanopts[1].strip('"')
2481 i = find_token(document.header, "\\font_roman", 0)
2483 document.warning("Malformed LyX document: Missing \\font_roman.")
2486 # We need to use this regex since split() does not handle quote protection
2487 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2488 roman = romanfont[1].strip('"')
2489 if roman != "minionpro":
2491 romanfont[1] = '"default"'
2492 document.header[i] = " ".join(romanfont)
2494 j = find_token(document.header, "\\font_osf true", 0)
2497 preamble = "\\usepackage["
2499 document.header[j] = "\\font_osf false"
2503 preamble += "]{MinionPro}"
2504 add_to_preamble(document, [preamble])
2505 del document.header[x]
2508 def revert_font_opts(document):
2509 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2511 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2512 Babel = (get_value(document.header, "\\language_package") == "babel")
2515 regexp = re.compile(r'(\\font_roman_opts)')
2516 i = find_re(document.header, regexp, 0)
2518 # We need to use this regex since split() does not handle quote protection
2519 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2520 opts = romanopts[1].strip('"')
2521 del document.header[i]
2523 regexp = re.compile(r'(\\font_roman)')
2524 i = find_re(document.header, regexp, 0)
2526 # We need to use this regex since split() does not handle quote protection
2527 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2528 font = romanfont[2].strip('"')
2529 romanfont[2] = '"default"'
2530 document.header[i] = " ".join(romanfont)
2531 if font != "default":
2533 preamble = "\\babelfont{rm}["
2535 preamble = "\\setmainfont["
2538 preamble += "Mapping=tex-text]{"
2541 add_to_preamble(document, [preamble])
2544 regexp = re.compile(r'(\\font_sans_opts)')
2545 i = find_re(document.header, regexp, 0)
2548 # We need to use this regex since split() does not handle quote protection
2549 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2550 opts = sfopts[1].strip('"')
2551 del document.header[i]
2553 regexp = re.compile(r'(\\font_sf_scale)')
2554 i = find_re(document.header, regexp, 0)
2556 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2557 regexp = re.compile(r'(\\font_sans)')
2558 i = find_re(document.header, regexp, 0)
2560 # We need to use this regex since split() does not handle quote protection
2561 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2562 font = sffont[2].strip('"')
2563 sffont[2] = '"default"'
2564 document.header[i] = " ".join(sffont)
2565 if font != "default":
2567 preamble = "\\babelfont{sf}["
2569 preamble = "\\setsansfont["
2573 preamble += "Scale=0."
2574 preamble += scaleval
2576 preamble += "Mapping=tex-text]{"
2579 add_to_preamble(document, [preamble])
2582 regexp = re.compile(r'(\\font_typewriter_opts)')
2583 i = find_re(document.header, regexp, 0)
2586 # We need to use this regex since split() does not handle quote protection
2587 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2588 opts = ttopts[1].strip('"')
2589 del document.header[i]
2591 regexp = re.compile(r'(\\font_tt_scale)')
2592 i = find_re(document.header, regexp, 0)
2594 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2595 regexp = re.compile(r'(\\font_typewriter)')
2596 i = find_re(document.header, regexp, 0)
2598 # We need to use this regex since split() does not handle quote protection
2599 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2600 font = ttfont[2].strip('"')
2601 ttfont[2] = '"default"'
2602 document.header[i] = " ".join(ttfont)
2603 if font != "default":
2605 preamble = "\\babelfont{tt}["
2607 preamble = "\\setmonofont["
2611 preamble += "Scale=0."
2612 preamble += scaleval
2614 preamble += "Mapping=tex-text]{"
2617 add_to_preamble(document, [preamble])
2620 def revert_plainNotoFonts_xopts(document):
2621 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2623 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2627 y = find_token(document.header, "\\font_osf true", 0)
2631 regexp = re.compile(r'(\\font_roman_opts)')
2632 x = find_re(document.header, regexp, 0)
2633 if x == -1 and not osf:
2638 # We need to use this regex since split() does not handle quote protection
2639 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2640 opts = romanopts[1].strip('"')
2646 i = find_token(document.header, "\\font_roman", 0)
2650 # We need to use this regex since split() does not handle quote protection
2651 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2652 roman = romanfont[1].strip('"')
2653 if roman != "NotoSerif-TLF":
2656 j = find_token(document.header, "\\font_sans", 0)
2660 # We need to use this regex since split() does not handle quote protection
2661 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2662 sf = sffont[1].strip('"')
2666 j = find_token(document.header, "\\font_typewriter", 0)
2670 # We need to use this regex since split() does not handle quote protection
2671 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2672 tt = ttfont[1].strip('"')
2676 # So we have noto as "complete font"
2677 romanfont[1] = '"default"'
2678 document.header[i] = " ".join(romanfont)
2680 preamble = "\\usepackage["
2682 preamble += "]{noto}"
2683 add_to_preamble(document, [preamble])
2685 document.header[y] = "\\font_osf false"
2687 del document.header[x]
2690 def revert_notoFonts_xopts(document):
2691 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2693 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2697 fm = createFontMapping(['Noto'])
2698 if revert_fonts(document, fm, fontmap, True):
2699 add_preamble_fonts(document, fontmap)
2702 def revert_IBMFonts_xopts(document):
2703 " Revert native IBM font definition (with extra options) to LaTeX "
2705 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2709 fm = createFontMapping(['IBM'])
2711 if revert_fonts(document, fm, fontmap, True):
2712 add_preamble_fonts(document, fontmap)
2715 def revert_AdobeFonts_xopts(document):
2716 " Revert native Adobe font definition (with extra options) to LaTeX "
2718 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2722 fm = createFontMapping(['Adobe'])
2724 if revert_fonts(document, fm, fontmap, True):
2725 add_preamble_fonts(document, fontmap)
2728 def convert_osf(document):
2729 " Convert \\font_osf param to new format "
2731 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2733 i = find_token(document.header, '\\font_osf', 0)
2735 document.warning("Malformed LyX document: Missing \\font_osf.")
2738 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2739 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2741 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2742 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2745 document.header.insert(i, "\\font_sans_osf false")
2746 document.header.insert(i + 1, "\\font_typewriter_osf false")
2750 x = find_token(document.header, "\\font_sans", 0)
2752 document.warning("Malformed LyX document: Missing \\font_sans.")
2754 # We need to use this regex since split() does not handle quote protection
2755 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2756 sf = sffont[1].strip('"')
2758 document.header.insert(i, "\\font_sans_osf true")
2760 document.header.insert(i, "\\font_sans_osf false")
2762 x = find_token(document.header, "\\font_typewriter", 0)
2764 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2766 # We need to use this regex since split() does not handle quote protection
2767 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2768 tt = ttfont[1].strip('"')
2770 document.header.insert(i + 1, "\\font_typewriter_osf true")
2772 document.header.insert(i + 1, "\\font_typewriter_osf false")
2775 document.header.insert(i, "\\font_sans_osf false")
2776 document.header.insert(i + 1, "\\font_typewriter_osf false")
2779 def revert_osf(document):
2780 " Revert \\font_*_osf params "
2782 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2784 i = find_token(document.header, '\\font_roman_osf', 0)
2786 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2789 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2790 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2792 i = find_token(document.header, '\\font_sans_osf', 0)
2794 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2797 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2798 del document.header[i]
2800 i = find_token(document.header, '\\font_typewriter_osf', 0)
2802 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2805 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2806 del document.header[i]
2809 i = find_token(document.header, '\\font_osf', 0)
2811 document.warning("Malformed LyX document: Missing \\font_osf.")
2813 document.header[i] = "\\font_osf true"
2816 def revert_texfontopts(document):
2817 " Revert native TeX font definitions (with extra options) to LaTeX "
2819 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2822 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2824 # First the sf (biolinum only)
2825 regexp = re.compile(r'(\\font_sans_opts)')
2826 x = find_re(document.header, regexp, 0)
2828 # We need to use this regex since split() does not handle quote protection
2829 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2830 opts = sfopts[1].strip('"')
2831 i = find_token(document.header, "\\font_sans", 0)
2833 document.warning("Malformed LyX document: Missing \\font_sans.")
2835 # We need to use this regex since split() does not handle quote protection
2836 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2837 sans = sffont[1].strip('"')
2838 if sans == "biolinum":
2840 sffont[1] = '"default"'
2841 document.header[i] = " ".join(sffont)
2843 j = find_token(document.header, "\\font_sans_osf true", 0)
2846 k = find_token(document.header, "\\font_sf_scale", 0)
2848 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2850 sfscale = document.header[k].split()
2853 document.header[k] = " ".join(sfscale)
2856 sf_scale = float(val)
2858 document.warning("Invalid font_sf_scale value: " + val)
2859 preamble = "\\usepackage["
2861 document.header[j] = "\\font_sans_osf false"
2863 if sf_scale != 100.0:
2864 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2866 preamble += "]{biolinum}"
2867 add_to_preamble(document, [preamble])
2868 del document.header[x]
2870 regexp = re.compile(r'(\\font_roman_opts)')
2871 x = find_re(document.header, regexp, 0)
2875 # We need to use this regex since split() does not handle quote protection
2876 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2877 opts = romanopts[1].strip('"')
2879 i = find_token(document.header, "\\font_roman", 0)
2881 document.warning("Malformed LyX document: Missing \\font_roman.")
2884 # We need to use this regex since split() does not handle quote protection
2885 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2886 roman = romanfont[1].strip('"')
2887 if not roman in rmfonts:
2889 romanfont[1] = '"default"'
2890 document.header[i] = " ".join(romanfont)
2892 if roman == "utopia":
2894 elif roman == "palatino":
2895 package = "mathpazo"
2896 elif roman == "times":
2897 package = "mathptmx"
2898 elif roman == "xcharter":
2899 package = "XCharter"
2901 j = find_token(document.header, "\\font_roman_osf true", 0)
2903 if roman == "cochineal":
2904 osf = "proportional,osf,"
2905 elif roman == "utopia":
2907 elif roman == "garamondx":
2909 elif roman == "libertine":
2911 elif roman == "palatino":
2913 elif roman == "xcharter":
2915 document.header[j] = "\\font_roman_osf false"
2916 k = find_token(document.header, "\\font_sc true", 0)
2918 if roman == "utopia":
2920 if roman == "palatino" and osf == "":
2922 document.header[k] = "\\font_sc false"
2923 preamble = "\\usepackage["
2926 preamble += "]{" + package + "}"
2927 add_to_preamble(document, [preamble])
2928 del document.header[x]
2931 def convert_CantarellFont(document):
2932 " Handle Cantarell font definition to LaTeX "
2934 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2935 fm = createFontMapping(['Cantarell'])
2936 convert_fonts(document, fm, "oldstyle")
2938 def revert_CantarellFont(document):
2939 " Revert native Cantarell font definition to LaTeX "
2941 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2943 fm = createFontMapping(['Cantarell'])
2944 if revert_fonts(document, fm, fontmap, False, True):
2945 add_preamble_fonts(document, fontmap)
2947 def convert_ChivoFont(document):
2948 " Handle Chivo font definition to LaTeX "
2950 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2951 fm = createFontMapping(['Chivo'])
2952 convert_fonts(document, fm, "oldstyle")
2954 def revert_ChivoFont(document):
2955 " Revert native Chivo font definition to LaTeX "
2957 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2959 fm = createFontMapping(['Chivo'])
2960 if revert_fonts(document, fm, fontmap, False, True):
2961 add_preamble_fonts(document, fontmap)
2964 def convert_FiraFont(document):
2965 " Handle Fira font definition to LaTeX "
2967 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2968 fm = createFontMapping(['Fira'])
2969 convert_fonts(document, fm, "lf")
2971 def revert_FiraFont(document):
2972 " Revert native Fira font definition to LaTeX "
2974 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2976 fm = createFontMapping(['Fira'])
2977 if revert_fonts(document, fm, fontmap, False, True):
2978 add_preamble_fonts(document, fontmap)
2981 def convert_Semibolds(document):
2982 " Move semibold options to extraopts "
2984 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2986 i = find_token(document.header, "\\font_roman", 0)
2988 document.warning("Malformed LyX document: Missing \\font_roman.")
2990 # We need to use this regex since split() does not handle quote protection
2991 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2992 roman = romanfont[1].strip('"')
2993 if roman == "IBMPlexSerifSemibold":
2994 romanfont[1] = '"IBMPlexSerif"'
2995 document.header[i] = " ".join(romanfont)
2997 if NonTeXFonts == False:
2998 regexp = re.compile(r'(\\font_roman_opts)')
2999 x = find_re(document.header, regexp, 0)
3001 # Sensible place to insert tag
3002 fo = find_token(document.header, "\\font_sf_scale")
3004 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3006 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3008 # We need to use this regex since split() does not handle quote protection
3009 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3010 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3012 i = find_token(document.header, "\\font_sans", 0)
3014 document.warning("Malformed LyX document: Missing \\font_sans.")
3016 # We need to use this regex since split() does not handle quote protection
3017 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3018 sf = sffont[1].strip('"')
3019 if sf == "IBMPlexSansSemibold":
3020 sffont[1] = '"IBMPlexSans"'
3021 document.header[i] = " ".join(sffont)
3023 if NonTeXFonts == False:
3024 regexp = re.compile(r'(\\font_sans_opts)')
3025 x = find_re(document.header, regexp, 0)
3027 # Sensible place to insert tag
3028 fo = find_token(document.header, "\\font_sf_scale")
3030 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3032 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3034 # We need to use this regex since split() does not handle quote protection
3035 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3036 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3038 i = find_token(document.header, "\\font_typewriter", 0)
3040 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3042 # We need to use this regex since split() does not handle quote protection
3043 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3044 tt = ttfont[1].strip('"')
3045 if tt == "IBMPlexMonoSemibold":
3046 ttfont[1] = '"IBMPlexMono"'
3047 document.header[i] = " ".join(ttfont)
3049 if NonTeXFonts == False:
3050 regexp = re.compile(r'(\\font_typewriter_opts)')
3051 x = find_re(document.header, regexp, 0)
3053 # Sensible place to insert tag
3054 fo = find_token(document.header, "\\font_tt_scale")
3056 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3058 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3060 # We need to use this regex since split() does not handle quote protection
3061 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3062 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3065 def convert_NotoRegulars(document):
3066 " Merge diverse noto reagular fonts "
3068 i = find_token(document.header, "\\font_roman", 0)
3070 document.warning("Malformed LyX document: Missing \\font_roman.")
3072 # We need to use this regex since split() does not handle quote protection
3073 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3074 roman = romanfont[1].strip('"')
3075 if roman == "NotoSerif-TLF":
3076 romanfont[1] = '"NotoSerifRegular"'
3077 document.header[i] = " ".join(romanfont)
3079 i = find_token(document.header, "\\font_sans", 0)
3081 document.warning("Malformed LyX document: Missing \\font_sans.")
3083 # We need to use this regex since split() does not handle quote protection
3084 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3085 sf = sffont[1].strip('"')
3086 if sf == "NotoSans-TLF":
3087 sffont[1] = '"NotoSansRegular"'
3088 document.header[i] = " ".join(sffont)
3090 i = find_token(document.header, "\\font_typewriter", 0)
3092 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3094 # We need to use this regex since split() does not handle quote protection
3095 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3096 tt = ttfont[1].strip('"')
3097 if tt == "NotoMono-TLF":
3098 ttfont[1] = '"NotoMonoRegular"'
3099 document.header[i] = " ".join(ttfont)
3102 def convert_CrimsonProFont(document):
3103 " Handle CrimsonPro font definition to LaTeX "
3105 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3106 fm = createFontMapping(['CrimsonPro'])
3107 convert_fonts(document, fm, "lf")
3109 def revert_CrimsonProFont(document):
3110 " Revert native CrimsonPro font definition to LaTeX "
3112 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3114 fm = createFontMapping(['CrimsonPro'])
3115 if revert_fonts(document, fm, fontmap, False, True):
3116 add_preamble_fonts(document, fontmap)
3119 def revert_pagesizes(document):
3120 " Revert new page sizes in memoir and KOMA to options "
3122 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3125 i = find_token(document.header, "\\use_geometry true", 0)
3129 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3131 i = find_token(document.header, "\\papersize", 0)
3133 document.warning("Malformed LyX document! Missing \\papersize header.")
3135 val = get_value(document.header, "\\papersize", i)
3140 document.header[i] = "\\papersize default"
3142 i = find_token(document.header, "\\options", 0)
3144 i = find_token(document.header, "\\textclass", 0)
3146 document.warning("Malformed LyX document! Missing \\textclass header.")
3148 document.header.insert(i, "\\options " + val)
3150 document.header[i] = document.header[i] + "," + val
3153 def convert_pagesizes(document):
3154 " Convert to new page sizes in memoir and KOMA to options "
3156 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3159 i = find_token(document.header, "\\use_geometry true", 0)
3163 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3165 i = find_token(document.header, "\\papersize", 0)
3167 document.warning("Malformed LyX document! Missing \\papersize header.")
3169 val = get_value(document.header, "\\papersize", i)
3174 i = find_token(document.header, "\\use_geometry false", 0)
3176 # Maintain use of geometry
3177 document.header[1] = "\\use_geometry true"
3179 def revert_komafontsizes(document):
3180 " Revert new font sizes in KOMA to options "
3182 if document.textclass[:3] != "scr":
3185 i = find_token(document.header, "\\paperfontsize", 0)
3187 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3190 defsizes = ["default", "10", "11", "12"]
3192 val = get_value(document.header, "\\paperfontsize", i)
3197 document.header[i] = "\\paperfontsize default"
3199 fsize = "fontsize=" + val
3201 i = find_token(document.header, "\\options", 0)
3203 i = find_token(document.header, "\\textclass", 0)
3205 document.warning("Malformed LyX document! Missing \\textclass header.")
3207 document.header.insert(i, "\\options " + fsize)
3209 document.header[i] = document.header[i] + "," + fsize
3212 def revert_dupqualicites(document):
3213 " Revert qualified citation list commands with duplicate keys to ERT "
3215 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3216 # we need to revert those with multiple uses of the same key.
3220 i = find_token(document.header, "\\cite_engine", 0)
3222 document.warning("Malformed document! Missing \\cite_engine")
3224 engine = get_value(document.header, "\\cite_engine", i)
3226 if not engine in ["biblatex", "biblatex-natbib"]:
3229 # Citation insets that support qualified lists, with their LaTeX code
3233 "citet" : "textcites",
3234 "Citet" : "Textcites",
3235 "citep" : "parencites",
3236 "Citep" : "Parencites",
3237 "Footcite" : "Smartcites",
3238 "footcite" : "smartcites",
3239 "Autocite" : "Autocites",
3240 "autocite" : "autocites",
3245 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3248 j = find_end_of_inset(document.body, i)
3250 document.warning("Can't find end of citation inset at line %d!!" %(i))
3254 k = find_token(document.body, "LatexCommand", i, j)
3256 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3260 cmd = get_value(document.body, "LatexCommand", k)
3261 if not cmd in list(ql_citations.keys()):
3265 pres = find_token(document.body, "pretextlist", i, j)
3266 posts = find_token(document.body, "posttextlist", i, j)
3267 if pres == -1 and posts == -1:
3272 key = get_quoted_value(document.body, "key", i, j)
3274 document.warning("Citation inset at line %d does not have a key!" %(i))
3278 keys = key.split(",")
3279 ukeys = list(set(keys))
3280 if len(keys) == len(ukeys):
3285 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3286 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3288 pre = get_quoted_value(document.body, "before", i, j)
3289 post = get_quoted_value(document.body, "after", i, j)
3290 prelist = pretexts.split("\t")
3293 ppp = pp.split(" ", 1)
3299 if ppp[0] in premap:
3300 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3302 premap[ppp[0]] = val
3303 postlist = posttexts.split("\t")
3307 ppp = pp.split(" ", 1)
3313 if ppp[0] in postmap:
3314 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3316 postmap[ppp[0]] = val
3317 # Replace known new commands with ERT
3318 if "(" in pre or ")" in pre:
3319 pre = "{" + pre + "}"
3320 if "(" in post or ")" in post:
3321 post = "{" + post + "}"
3322 res = "\\" + ql_citations[cmd]
3324 res += "(" + pre + ")"
3326 res += "(" + post + ")"
3330 if premap.get(kk, "") != "":
3331 akeys = premap[kk].split("\t", 1)
3334 res += "[" + akey + "]"
3336 premap[kk] = "\t".join(akeys[1:])
3339 if postmap.get(kk, "") != "":
3340 akeys = postmap[kk].split("\t", 1)
3343 res += "[" + akey + "]"
3345 postmap[kk] = "\t".join(akeys[1:])
3348 elif premap.get(kk, "") != "":
3350 res += "{" + kk + "}"
3351 document.body[i:j+1] = put_cmd_in_ert([res])
3354 def convert_pagesizenames(document):
3355 " Convert LyX page sizes names "
3357 i = find_token(document.header, "\\papersize", 0)
3359 document.warning("Malformed LyX document! Missing \\papersize header.")
3361 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3362 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3363 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3364 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3365 val = get_value(document.header, "\\papersize", i)
3367 newval = val.replace("paper", "")
3368 document.header[i] = "\\papersize " + newval
3370 def revert_pagesizenames(document):
3371 " Convert LyX page sizes names "
3373 i = find_token(document.header, "\\papersize", 0)
3375 document.warning("Malformed LyX document! Missing \\papersize header.")
3377 newnames = ["letter", "legal", "executive", \
3378 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3379 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3380 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3381 val = get_value(document.header, "\\papersize", i)
3383 newval = val + "paper"
3384 document.header[i] = "\\papersize " + newval
3387 def revert_theendnotes(document):
3388 " Reverts native support of \\theendnotes to TeX-code "
3390 if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
3395 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3398 j = find_end_of_inset(document.body, i)
3400 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3403 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
3406 def revert_enotez(document):
3407 " Reverts native support of enotez package to TeX-code "
3409 if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
3413 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
3416 revert_flex_inset(document.body, "Endnote", "\\endnote")
3420 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3423 j = find_end_of_inset(document.body, i)
3425 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3429 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
3432 add_to_preamble(document, ["\\usepackage{enotez}"])
3433 document.del_module("enotez")
3434 document.del_module("foottoenotez")
3437 def revert_memoir_endnotes(document):
3438 " Reverts native support of memoir endnotes to TeX-code "
3440 if document.textclass != "memoir":
3443 encommand = "\\pagenote"
3444 modules = document.get_module_list()
3445 if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
3446 encommand = "\\endnote"
3448 revert_flex_inset(document.body, "Endnote", encommand)
3452 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
3455 j = find_end_of_inset(document.body, i)
3457 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3460 if document.body[i] == "\\begin_inset FloatList pagenote*":
3461 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
3463 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
3464 add_to_preamble(document, ["\\makepagenote"])
3467 def revert_totalheight(document):
3468 " Reverts graphics height parameter from totalheight to height "
3472 i = find_token(document.body, "\\begin_inset Graphics", i)
3475 j = find_end_of_inset(document.body, i)
3477 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3481 rx = re.compile(r'\s*special\s*(\S+)$')
3482 k = find_re(document.body, rx, i, j)
3486 m = rx.match(document.body[k])
3488 special = m.group(1)
3489 mspecial = special.split(',')
3490 for spc in mspecial:
3491 if spc[:7] == "height=":
3492 oldheight = spc.split('=')[1]
3493 mspecial.remove(spc)
3495 if len(mspecial) > 0:
3496 special = ",".join(mspecial)
3500 rx = re.compile(r'(\s*height\s*)(\S+)$')
3501 kk = find_re(document.body, rx, i, j)
3503 m = rx.match(document.body[kk])
3509 val = val + "," + special
3510 document.body[k] = "\tspecial " + "totalheight=" + val
3512 document.body.insert(kk, "\tspecial totalheight=" + val)
3514 document.body[kk] = m.group(1) + oldheight
3516 del document.body[kk]
3517 elif oldheight != "":
3519 document.body[k] = "\tspecial " + special
3520 document.body.insert(k, "\theight " + oldheight)
3522 document.body[k] = "\theight " + oldheight
3526 def convert_totalheight(document):
3527 " Converts graphics height parameter from totalheight to height "
3531 i = find_token(document.body, "\\begin_inset Graphics", i)
3534 j = find_end_of_inset(document.body, i)
3536 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3540 rx = re.compile(r'\s*special\s*(\S+)$')
3541 k = find_re(document.body, rx, i, j)
3545 m = rx.match(document.body[k])
3547 special = m.group(1)
3548 mspecial = special.split(',')
3549 for spc in mspecial:
3550 if spc[:12] == "totalheight=":
3551 newheight = spc.split('=')[1]
3552 mspecial.remove(spc)
3554 if len(mspecial) > 0:
3555 special = ",".join(mspecial)
3559 rx = re.compile(r'(\s*height\s*)(\S+)$')
3560 kk = find_re(document.body, rx, i, j)
3562 m = rx.match(document.body[kk])
3568 val = val + "," + special
3569 document.body[k] = "\tspecial " + "height=" + val
3571 document.body.insert(kk + 1, "\tspecial height=" + val)
3573 document.body[kk] = m.group(1) + newheight
3575 del document.body[kk]
3576 elif newheight != "":
3577 document.body.insert(k, "\theight " + newheight)
3581 def convert_changebars(document):
3582 " Converts the changebars module to native solution "
3584 if not "changebars" in document.get_module_list():
3587 i = find_token(document.header, "\\output_changes", 0)
3589 document.warning("Malformed LyX document! Missing \\output_changes header.")
3590 document.del_module("changebars")
3593 document.header.insert(i, "\\change_bars true")
3594 document.del_module("changebars")
3597 def revert_changebars(document):
3598 " Converts native changebar param to module "
3600 i = find_token(document.header, "\\change_bars", 0)
3602 document.warning("Malformed LyX document! Missing \\change_bars header.")
3605 val = get_value(document.header, "\\change_bars", i)
3608 document.add_module("changebars")
3610 del document.header[i]
3613 def convert_postpone_fragile(document):
3614 " Adds false \\postpone_fragile_content buffer param "
3616 i = find_token(document.header, "\\output_changes", 0)
3618 document.warning("Malformed LyX document! Missing \\output_changes header.")
3620 # Set this to false for old documents (see #2154)
3621 document.header.insert(i, "\\postpone_fragile_content false")
3624 def revert_postpone_fragile(document):
3625 " Remove \\postpone_fragile_content buffer param "
3627 i = find_token(document.header, "\\postpone_fragile_content", 0)
3629 document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
3632 del document.header[i]
3635 def revert_colrow_tracking(document):
3636 " Remove change tag from tabular columns/rows "
3639 i = find_token(document.body, "\\begin_inset Tabular", i+1)
3642 j = find_end_of_inset(document.body, i+1)
3644 document.warning("Malformed LyX document: Could not find end of tabular.")
3646 for k in range(i, j):
3647 m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
3649 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3650 m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
3652 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3655 def convert_counter_maintenance(document):
3656 " Convert \\maintain_unincluded_children buffer param from boolean value tro tristate "
3658 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3660 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3663 val = get_value(document.header, "\\maintain_unincluded_children", i)
3666 document.header[i] = "\\maintain_unincluded_children strict"
3668 document.header[i] = "\\maintain_unincluded_children no"
3671 def revert_counter_maintenance(document):
3672 " Revert \\maintain_unincluded_children buffer param to previous boolean value "
3674 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3676 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3679 val = get_value(document.header, "\\maintain_unincluded_children", i)
3682 document.header[i] = "\\maintain_unincluded_children false"
3684 document.header[i] = "\\maintain_unincluded_children true"
3687 def revert_counter_inset(document):
3688 " Revert counter inset to ERT, where possible"
3690 needed_counters = {}
3692 i = find_token(document.body, "\\begin_inset CommandInset counter", i)
3695 j = find_end_of_inset(document.body, i)
3697 document.warning("Can't find end of counter inset at line %d!" % i)
3700 lyx = get_quoted_value(document.body, "lyxonly", i, j)
3702 # there is nothing we can do to affect the LyX counters
3703 document.body[i : j + 1] = []
3706 cnt = get_quoted_value(document.body, "counter", i, j)
3708 document.warning("No counter given for inset at line %d!" % i)
3712 cmd = get_quoted_value(document.body, "LatexCommand", i, j)
3713 document.warning(cmd)
3716 val = get_quoted_value(document.body, "value", i, j)
3718 document.warning("Can't convert counter inset at line %d!" % i)
3720 ert = put_cmd_in_ert("\\setcounter{%s}{%s}" % (cnt, val))
3721 elif cmd == "addto":
3722 val = get_quoted_value(document.body, "value", i, j)
3724 document.warning("Can't convert counter inset at line %d!" % i)
3726 ert = put_cmd_in_ert("\\addtocounter{%s}{%s}" % (cnt, val))
3727 elif cmd == "reset":
3728 ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
3730 needed_counters[cnt] = 1
3731 savecnt = "LyXSave" + cnt
3732 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (savecnt, cnt))
3733 elif cmd == "restore":
3734 needed_counters[cnt] = 1
3735 savecnt = "LyXSave" + cnt
3736 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (cnt, savecnt))
3738 document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
3741 document.body[i : j + 1] = ert
3746 for cnt in needed_counters:
3747 pretext.append("\\newcounter{LyXSave%s}" % (cnt))
3749 add_to_preamble(document, pretext)
3756 supported_versions = ["2.4.0", "2.4"]
3758 [545, [convert_lst_literalparam]],
3763 [550, [convert_fontenc]],
3770 [557, [convert_vcsinfo]],
3771 [558, [removeFrontMatterStyles]],
3774 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
3778 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
3779 [566, [convert_hebrew_parentheses]],
3785 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
3786 [573, [convert_inputencoding_namechange]],
3787 [574, [convert_ruby_module, convert_utf8_japanese]],
3788 [575, [convert_lineno, convert_aaencoding]],
3790 [577, [convert_linggloss]],
3794 [581, [convert_osf]],
3795 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
3796 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
3798 [585, [convert_pagesizes]],
3800 [587, [convert_pagesizenames]],
3802 [589, [convert_totalheight]],
3803 [590, [convert_changebars]],
3804 [591, [convert_postpone_fragile]],
3806 [593, [convert_counter_maintenance]],
3810 revert = [[593, [revert_counter_inset]],
3811 [592, [revert_counter_maintenance]],
3812 [591, [revert_colrow_tracking]],
3813 [590, [revert_postpone_fragile]],
3814 [589, [revert_changebars]],
3815 [588, [revert_totalheight]],
3816 [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
3817 [586, [revert_pagesizenames]],
3818 [585, [revert_dupqualicites]],
3819 [584, [revert_pagesizes,revert_komafontsizes]],
3820 [583, [revert_vcsinfo_rev_abbrev]],
3821 [582, [revert_ChivoFont,revert_CrimsonProFont]],
3822 [581, [revert_CantarellFont,revert_FiraFont]],
3823 [580, [revert_texfontopts,revert_osf]],
3824 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
3825 [578, [revert_babelfont]],
3826 [577, [revert_drs]],
3827 [576, [revert_linggloss, revert_subexarg]],
3828 [575, [revert_new_languages]],
3829 [574, [revert_lineno, revert_aaencoding]],
3830 [573, [revert_ruby_module, revert_utf8_japanese]],
3831 [572, [revert_inputencoding_namechange]],
3832 [571, [revert_notoFonts]],
3833 [570, [revert_cmidruletrimming]],
3834 [569, [revert_bibfileencodings]],
3835 [568, [revert_tablestyle]],
3836 [567, [revert_soul]],
3837 [566, [revert_malayalam]],
3838 [565, [revert_hebrew_parentheses]],
3839 [564, [revert_AdobeFonts]],
3840 [563, [revert_lformatinfo]],
3841 [562, [revert_listpargs]],
3842 [561, [revert_l7ninfo]],
3843 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
3844 [559, [revert_timeinfo, revert_namenoextinfo]],
3845 [558, [revert_dateinfo]],
3846 [557, [addFrontMatterStyles]],
3847 [556, [revert_vcsinfo]],
3848 [555, [revert_bibencoding]],
3849 [554, [revert_vcolumns]],
3850 [553, [revert_stretchcolumn]],
3851 [552, [revert_tuftecite]],
3852 [551, [revert_floatpclass, revert_floatalignment]],
3853 [550, [revert_nospellcheck]],
3854 [549, [revert_fontenc]],
3855 [548, []], # dummy format change
3856 [547, [revert_lscape]],
3857 [546, [revert_xcharter]],
3858 [545, [revert_paratype]],
3859 [544, [revert_lst_literalparam]]
3863 if __name__ == "__main__":