1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 """Add collected font-packages with their option to user-preamble"""
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 """Expand fontinfo mapping"""
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
199 def convert_fonts(document, fm, osfoption = "osf"):
200 """Handle font definition (LaTeX preamble -> native)"""
201 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
202 rscaleopt = re.compile(r'^scaled?=(.*)')
204 # Check whether we go beyond font option feature introduction
205 haveFontOpts = document.end_format > 580
209 i = find_re(document.preamble, rpkg, i+1)
212 mo = rpkg.search(document.preamble[i])
213 if mo == None or mo.group(2) == None:
216 options = mo.group(2).replace(' ', '').split(",")
221 while o < len(options):
222 if options[o] == osfoption:
226 mo = rscaleopt.search(options[o])
234 if not pkg in fm.pkginmap:
239 # Try with name-option combination first
240 # (only one default option supported currently)
242 while o < len(options):
244 fn = fm.getfontname(pkg, [opt])
251 fn = fm.getfontname(pkg, [])
253 fn = fm.getfontname(pkg, options)
256 del document.preamble[i]
257 fontinfo = fm.font2pkgmap[fn]
258 if fontinfo.scaletype == None:
261 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
262 fontinfo.scaleval = oscale
263 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
264 if fontinfo.osfopt == None:
265 options.extend(osfoption)
267 osf = find_token(document.header, "\\font_osf false")
268 osftag = "\\font_osf"
269 if osf == -1 and fontinfo.fonttype != "math":
270 # Try with newer format
271 osftag = "\\font_" + fontinfo.fonttype + "_osf"
272 osf = find_token(document.header, osftag + " false")
274 document.header[osf] = osftag + " true"
275 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
276 del document.preamble[i-1]
278 if fontscale != None:
279 j = find_token(document.header, fontscale, 0)
281 val = get_value(document.header, fontscale, j)
285 scale = "%03d" % int(float(oscale) * 100)
286 document.header[j] = fontscale + " " + scale + " " + vals[1]
287 ft = "\\font_" + fontinfo.fonttype
288 j = find_token(document.header, ft, 0)
290 val = get_value(document.header, ft, j)
291 words = val.split() # ! splits also values like '"DejaVu Sans"'
292 words[0] = '"' + fn + '"'
293 document.header[j] = ft + ' ' + ' '.join(words)
294 if haveFontOpts and fontinfo.fonttype != "math":
295 fotag = "\\font_" + fontinfo.fonttype + "_opts"
296 fo = find_token(document.header, fotag)
298 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
300 # Sensible place to insert tag
301 fo = find_token(document.header, "\\font_sf_scale")
303 document.warning("Malformed LyX document! Missing \\font_sf_scale")
305 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
308 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
309 """Revert native font definition to LaTeX"""
310 # fonlist := list of fonts created from the same package
311 # Empty package means that the font-name is the same as the package-name
312 # fontmap (key = package, val += found options) will be filled
313 # and used later in add_preamble_fonts() to be added to user-preamble
315 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
316 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
318 while i < len(document.header):
319 i = find_re(document.header, rfontscale, i+1)
322 mo = rfontscale.search(document.header[i])
325 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
326 val = get_value(document.header, ft, i)
327 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
328 font = words[0].strip('"') # TeX font name has no whitespace
329 if not font in fm.font2pkgmap:
331 fontinfo = fm.font2pkgmap[font]
332 val = fontinfo.package
333 if not val in fontmap:
336 if OnlyWithXOpts or WithXOpts:
337 if ft == "\\font_math":
339 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
340 if ft == "\\font_sans":
341 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
342 elif ft == "\\font_typewriter":
343 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
344 x = find_re(document.header, regexp, 0)
345 if x == -1 and OnlyWithXOpts:
349 # We need to use this regex since split() does not handle quote protection
350 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
351 opts = xopts[1].strip('"').split(",")
352 fontmap[val].extend(opts)
353 del document.header[x]
354 words[0] = '"default"'
355 document.header[i] = ft + ' ' + ' '.join(words)
356 if fontinfo.scaleopt != None:
357 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
358 mo = rscales.search(xval)
363 # set correct scale option
364 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
365 if fontinfo.osfopt != None:
367 if fontinfo.osfdef == "true":
369 osf = find_token(document.header, "\\font_osf " + oldval)
370 if osf == -1 and ft != "\\font_math":
371 # Try with newer format
372 osftag = "\\font_roman_osf " + oldval
373 if ft == "\\font_sans":
374 osftag = "\\font_sans_osf " + oldval
375 elif ft == "\\font_typewriter":
376 osftag = "\\font_typewriter_osf " + oldval
377 osf = find_token(document.header, osftag)
379 fontmap[val].extend([fontinfo.osfopt])
380 if len(fontinfo.options) > 0:
381 fontmap[val].extend(fontinfo.options)
384 ###############################################################################
386 ### Conversion and reversion routines
388 ###############################################################################
390 def convert_inputencoding_namechange(document):
391 """Rename inputencoding settings."""
392 i = find_token(document.header, "\\inputencoding", 0)
395 s = document.header[i].replace("auto", "auto-legacy")
396 document.header[i] = s.replace("default", "auto-legacy-plain")
398 def revert_inputencoding_namechange(document):
399 """Rename inputencoding settings."""
400 i = find_token(document.header, "\\inputencoding", 0)
403 s = document.header[i].replace("auto-legacy-plain", "default")
404 document.header[i] = s.replace("auto-legacy", "auto")
406 def convert_notoFonts(document):
407 """Handle Noto fonts definition to LaTeX"""
409 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
410 fm = createFontMapping(['Noto'])
411 convert_fonts(document, fm)
413 def revert_notoFonts(document):
414 """Revert native Noto font definition to LaTeX"""
416 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
418 fm = createFontMapping(['Noto'])
419 if revert_fonts(document, fm, fontmap):
420 add_preamble_fonts(document, fontmap)
422 def convert_latexFonts(document):
423 """Handle DejaVu and IBMPlex fonts definition to LaTeX"""
425 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
426 fm = createFontMapping(['DejaVu', 'IBM'])
427 convert_fonts(document, fm)
429 def revert_latexFonts(document):
430 """Revert native DejaVu font definition to LaTeX"""
432 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
434 fm = createFontMapping(['DejaVu', 'IBM'])
435 if revert_fonts(document, fm, fontmap):
436 add_preamble_fonts(document, fontmap)
438 def convert_AdobeFonts(document):
439 """Handle Adobe Source fonts definition to LaTeX"""
441 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
442 fm = createFontMapping(['Adobe'])
443 convert_fonts(document, fm)
445 def revert_AdobeFonts(document):
446 """Revert Adobe Source font definition to LaTeX"""
448 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
450 fm = createFontMapping(['Adobe'])
451 if revert_fonts(document, fm, fontmap):
452 add_preamble_fonts(document, fontmap)
454 def removeFrontMatterStyles(document):
455 """Remove styles Begin/EndFrontmatter"""
457 layouts = ['BeginFrontmatter', 'EndFrontmatter']
458 tokenend = len('\\begin_layout ')
461 i = find_token_exact(document.body, '\\begin_layout ', i+1)
464 layout = document.body[i][tokenend:].strip()
465 if layout not in layouts:
467 j = find_end_of_layout(document.body, i)
469 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
471 while document.body[j+1].strip() == '':
473 document.body[i:j+1] = []
475 def addFrontMatterStyles(document):
476 """Use styles Begin/EndFrontmatter for elsarticle"""
478 if document.textclass != "elsarticle":
481 def insertFrontmatter(prefix, line):
483 while above > 0 and document.body[above-1].strip() == '':
486 while document.body[below].strip() == '':
488 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
489 '\\begin_inset Note Note',
491 '\\begin_layout Plain Layout',
494 '\\end_inset', '', '',
497 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
498 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
499 tokenend = len('\\begin_layout ')
503 i = find_token_exact(document.body, '\\begin_layout ', i+1)
506 layout = document.body[i][tokenend:].strip()
507 if layout not in layouts:
509 k = find_end_of_layout(document.body, i)
511 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
518 insertFrontmatter('End', k+1)
519 insertFrontmatter('Begin', first)
522 def convert_lst_literalparam(document):
523 """Add param literal to include inset"""
527 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
530 j = find_end_of_inset(document.body, i)
532 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
534 while i < j and document.body[i].strip() != '':
536 document.body.insert(i, 'literal "true"')
539 def revert_lst_literalparam(document):
540 """Remove param literal from include inset"""
544 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
547 j = find_end_of_inset(document.body, i)
549 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
551 del_token(document.body, 'literal', i, j)
554 def revert_paratype(document):
555 """Revert ParaType font definitions to LaTeX"""
557 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
559 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
560 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
561 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
562 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
565 sfval = find_token(document.header, "\\font_sf_scale", 0)
567 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
569 sfscale = document.header[sfval].split()
572 document.header[sfval] = " ".join(sfscale)
575 sf_scale = float(val)
577 document.warning("Invalid font_sf_scale value: " + val)
580 if sf_scale != "100.0":
581 sfoption = "scaled=" + str(sf_scale / 100.0)
582 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
583 ttval = get_value(document.header, "\\font_tt_scale", 0)
588 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
589 if i1 != -1 and i2 != -1 and i3!= -1:
590 add_to_preamble(document, ["\\usepackage{paratype}"])
593 add_to_preamble(document, ["\\usepackage{PTSerif}"])
594 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
597 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
599 add_to_preamble(document, ["\\usepackage{PTSans}"])
600 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
603 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
605 add_to_preamble(document, ["\\usepackage{PTMono}"])
606 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
609 def revert_xcharter(document):
610 """Revert XCharter font definitions to LaTeX"""
612 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
616 # replace unsupported font setting
617 document.header[i] = document.header[i].replace("xcharter", "default")
618 # no need for preamble code with system fonts
619 if get_bool_value(document.header, "\\use_non_tex_fonts"):
622 # transfer old style figures setting to package options
623 j = find_token(document.header, "\\font_osf true")
626 document.header[j] = "\\font_osf false"
630 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
633 def revert_lscape(document):
634 """Reverts the landscape environment (Landscape module) to TeX-code"""
636 if not "landscape" in document.get_module_list():
641 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
644 j = find_end_of_inset(document.body, i)
646 document.warning("Malformed LyX document: Can't find end of Landscape inset")
649 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
650 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
651 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
652 add_to_preamble(document, ["\\usepackage{afterpage}"])
654 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
655 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
657 add_to_preamble(document, ["\\usepackage{pdflscape}"])
658 document.del_module("landscape")
661 def convert_fontenc(document):
662 """Convert default fontenc setting"""
664 i = find_token(document.header, "\\fontencoding global", 0)
668 document.header[i] = document.header[i].replace("global", "auto")
671 def revert_fontenc(document):
672 """Revert default fontenc setting"""
674 i = find_token(document.header, "\\fontencoding auto", 0)
678 document.header[i] = document.header[i].replace("auto", "global")
681 def revert_nospellcheck(document):
682 """Remove nospellcheck font info param"""
686 i = find_token(document.body, '\\nospellcheck', i)
692 def revert_floatpclass(document):
693 """Remove float placement params 'document' and 'class'"""
695 del_token(document.header, "\\float_placement class")
699 i = find_token(document.body, '\\begin_inset Float', i + 1)
702 j = find_end_of_inset(document.body, i)
703 k = find_token(document.body, 'placement class', i, j)
705 k = find_token(document.body, 'placement document', i, j)
712 def revert_floatalignment(document):
713 """Remove float alignment params"""
715 galignment = get_value(document.header, "\\float_alignment", delete=True)
719 i = find_token(document.body, '\\begin_inset Float', i + 1)
722 j = find_end_of_inset(document.body, i)
724 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
726 k = find_token(document.body, 'alignment', i, j)
730 alignment = get_value(document.body, "alignment", k)
731 if alignment == "document":
732 alignment = galignment
734 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
736 document.warning("Can't find float layout!")
739 if alignment == "left":
740 alcmd = put_cmd_in_ert("\\raggedright{}")
741 elif alignment == "center":
742 alcmd = put_cmd_in_ert("\\centering{}")
743 elif alignment == "right":
744 alcmd = put_cmd_in_ert("\\raggedleft{}")
746 document.body[l+1:l+1] = alcmd
747 # There might be subfloats, so we do not want to move past
748 # the end of the inset.
751 def revert_tuftecite(document):
752 """Revert \cite commands in tufte classes"""
754 tufte = ["tufte-book", "tufte-handout"]
755 if document.textclass not in tufte:
760 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
763 j = find_end_of_inset(document.body, i)
765 document.warning("Can't find end of citation inset at line %d!!" %(i))
767 k = find_token(document.body, "LatexCommand", i, j)
769 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
772 cmd = get_value(document.body, "LatexCommand", k)
776 pre = get_quoted_value(document.body, "before", i, j)
777 post = get_quoted_value(document.body, "after", i, j)
778 key = get_quoted_value(document.body, "key", i, j)
780 document.warning("Citation inset at line %d does not have a key!" %(i))
782 # Replace command with ERT
785 res += "[" + pre + "]"
787 res += "[" + post + "]"
790 res += "{" + key + "}"
791 document.body[i:j+1] = put_cmd_in_ert([res])
796 def revert_stretchcolumn(document):
797 """We remove the column varwidth flags or everything else will become a mess."""
800 i = find_token(document.body, "\\begin_inset Tabular", i+1)
803 j = find_end_of_inset(document.body, i+1)
805 document.warning("Malformed LyX document: Could not find end of tabular.")
807 for k in range(i, j):
808 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
809 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
810 document.body[k] = document.body[k].replace(' varwidth="true"', '')
813 def revert_vcolumns(document):
814 """Revert standard columns with line breaks etc."""
820 i = find_token(document.body, "\\begin_inset Tabular", i+1)
823 j = find_end_of_inset(document.body, i)
825 document.warning("Malformed LyX document: Could not find end of tabular.")
828 # Collect necessary column information
830 nrows = int(document.body[i+1].split('"')[3])
831 ncols = int(document.body[i+1].split('"')[5])
833 for k in range(ncols):
834 m = find_token(document.body, "<column", m)
835 width = get_option_value(document.body[m], 'width')
836 varwidth = get_option_value(document.body[m], 'varwidth')
837 alignment = get_option_value(document.body[m], 'alignment')
838 special = get_option_value(document.body[m], 'special')
839 col_info.append([width, varwidth, alignment, special, m])
844 for row in range(nrows):
845 for col in range(ncols):
846 m = find_token(document.body, "<cell", m)
847 multicolumn = get_option_value(document.body[m], 'multicolumn')
848 multirow = get_option_value(document.body[m], 'multirow')
849 width = get_option_value(document.body[m], 'width')
850 rotate = get_option_value(document.body[m], 'rotate')
851 # Check for: linebreaks, multipars, non-standard environments
853 endcell = find_token(document.body, "</cell>", begcell)
855 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
857 elif count_pars_in_inset(document.body, begcell + 2) > 1:
859 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
861 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
862 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
864 alignment = col_info[col][2]
865 col_line = col_info[col][4]
867 if alignment == "center":
868 vval = ">{\\centering}"
869 elif alignment == "left":
870 vval = ">{\\raggedright}"
871 elif alignment == "right":
872 vval = ">{\\raggedleft}"
875 vval += "V{\\linewidth}"
877 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
878 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
879 # with newlines, and we do not want that)
881 endcell = find_token(document.body, "</cell>", begcell)
883 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
885 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
889 nle = find_end_of_inset(document.body, nl)
890 del(document.body[nle:nle+1])
892 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
894 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
900 if needarray == True:
901 add_to_preamble(document, ["\\usepackage{array}"])
902 if needvarwidth == True:
903 add_to_preamble(document, ["\\usepackage{varwidth}"])
906 def revert_bibencoding(document):
907 """Revert bibliography encoding"""
911 i = find_token(document.header, "\\cite_engine", 0)
913 document.warning("Malformed document! Missing \\cite_engine")
915 engine = get_value(document.header, "\\cite_engine", i)
919 if engine in ["biblatex", "biblatex-natbib"]:
922 # Map lyx to latex encoding names
926 "armscii8" : "armscii8",
927 "iso8859-1" : "latin1",
928 "iso8859-2" : "latin2",
929 "iso8859-3" : "latin3",
930 "iso8859-4" : "latin4",
931 "iso8859-5" : "iso88595",
932 "iso8859-6" : "8859-6",
933 "iso8859-7" : "iso-8859-7",
934 "iso8859-8" : "8859-8",
935 "iso8859-9" : "latin5",
936 "iso8859-13" : "latin7",
937 "iso8859-15" : "latin9",
938 "iso8859-16" : "latin10",
939 "applemac" : "applemac",
941 "cp437de" : "cp437de",
958 "utf8-platex" : "utf8",
965 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
968 j = find_end_of_inset(document.body, i)
970 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
972 encoding = get_quoted_value(document.body, "encoding", i, j)
975 # remove encoding line
976 k = find_token(document.body, "encoding", i, j)
979 if encoding == "default":
981 # Re-find inset end line
982 j = find_end_of_inset(document.body, i)
985 h = find_token(document.header, "\\biblio_options", 0)
987 biblio_options = get_value(document.header, "\\biblio_options", h)
988 if not "bibencoding" in biblio_options:
989 document.header[h] += ",bibencoding=%s" % encodings[encoding]
991 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
993 # this should not happen
994 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
996 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
998 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
999 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1005 def convert_vcsinfo(document):
1006 """Separate vcs Info inset from buffer Info inset."""
1009 "vcs-revision" : "revision",
1010 "vcs-tree-revision" : "tree-revision",
1011 "vcs-author" : "author",
1012 "vcs-time" : "time",
1017 i = find_token(document.body, "\\begin_inset Info", i+1)
1020 j = find_end_of_inset(document.body, i+1)
1022 document.warning("Malformed LyX document: Could not find end of Info inset.")
1024 tp = find_token(document.body, 'type', i, j)
1025 tpv = get_quoted_value(document.body, "type", tp)
1028 arg = find_token(document.body, 'arg', i, j)
1029 argv = get_quoted_value(document.body, "arg", arg)
1030 if argv not in list(types.keys()):
1032 document.body[tp] = "type \"vcs\""
1033 document.body[arg] = "arg \"" + types[argv] + "\""
1036 def revert_vcsinfo(document):
1037 """Merge vcs Info inset to buffer Info inset."""
1039 args = ["revision", "tree-revision", "author", "time", "date" ]
1042 i = find_token(document.body, "\\begin_inset Info", i+1)
1045 j = find_end_of_inset(document.body, i+1)
1047 document.warning("Malformed LyX document: Could not find end of Info inset.")
1049 tp = find_token(document.body, 'type', i, j)
1050 tpv = get_quoted_value(document.body, "type", tp)
1053 arg = find_token(document.body, 'arg', i, j)
1054 argv = get_quoted_value(document.body, "arg", arg)
1055 if argv not in args:
1056 document.warning("Malformed Info inset. Invalid vcs arg.")
1058 document.body[tp] = "type \"buffer\""
1059 document.body[arg] = "arg \"vcs-" + argv + "\""
1061 def revert_vcsinfo_rev_abbrev(document):
1062 " Convert abbreviated revisions to regular revisions. "
1066 i = find_token(document.body, "\\begin_inset Info", i+1)
1069 j = find_end_of_inset(document.body, i+1)
1071 document.warning("Malformed LyX document: Could not find end of Info inset.")
1073 tp = find_token(document.body, 'type', i, j)
1074 tpv = get_quoted_value(document.body, "type", tp)
1077 arg = find_token(document.body, 'arg', i, j)
1078 argv = get_quoted_value(document.body, "arg", arg)
1079 if( argv == "revision-abbrev" ):
1080 document.body[arg] = "arg \"revision\""
1082 def revert_dateinfo(document):
1083 """Revert date info insets to static text."""
1085 # FIXME This currently only considers the main language and uses the system locale
1086 # Ideally, it should honor context languages and switch the locale accordingly.
1088 # The date formats for each language using strftime syntax:
1089 # long, short, loclong, locmedium, locshort
1091 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1092 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1093 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1094 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1095 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1096 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1097 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1098 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1099 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1100 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1101 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1102 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1103 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1104 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1105 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1106 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1107 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1108 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1109 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1110 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1111 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1112 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1113 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1114 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1115 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1116 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1117 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1118 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1119 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1120 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1121 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1122 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1123 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1124 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1125 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1126 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1127 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1128 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1129 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1130 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1131 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1132 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1133 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1134 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1135 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1136 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1137 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1138 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1139 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1140 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1141 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1142 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1143 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1144 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1145 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1146 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1147 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1148 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1149 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1150 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1151 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1152 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1153 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1154 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1155 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1156 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1157 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1158 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1159 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1160 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1161 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1162 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1163 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1164 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1165 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1166 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1167 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1168 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1169 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1170 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1171 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1172 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1173 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1174 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1175 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1176 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1177 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1178 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1179 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1180 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1181 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1182 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1183 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1184 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1185 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1186 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1187 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1188 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1189 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1190 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1191 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1192 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1195 types = ["date", "fixdate", "moddate" ]
1196 lang = get_value(document.header, "\\language")
1198 document.warning("Malformed LyX document! No \\language header found!")
1203 i = find_token(document.body, "\\begin_inset Info", i+1)
1206 j = find_end_of_inset(document.body, i+1)
1208 document.warning("Malformed LyX document: Could not find end of Info inset.")
1210 tp = find_token(document.body, 'type', i, j)
1211 tpv = get_quoted_value(document.body, "type", tp)
1212 if tpv not in types:
1214 arg = find_token(document.body, 'arg', i, j)
1215 argv = get_quoted_value(document.body, "arg", arg)
1218 if tpv == "fixdate":
1219 datecomps = argv.split('@')
1220 if len(datecomps) > 1:
1222 isodate = datecomps[1]
1223 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1225 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1226 # FIXME if we had the path to the original document (not the one in the tmp dir),
1227 # we could use the mtime.
1228 # elif tpv == "moddate":
1229 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1232 result = dte.isodate()
1233 elif argv == "long":
1234 result = dte.strftime(dateformats[lang][0])
1235 elif argv == "short":
1236 result = dte.strftime(dateformats[lang][1])
1237 elif argv == "loclong":
1238 result = dte.strftime(dateformats[lang][2])
1239 elif argv == "locmedium":
1240 result = dte.strftime(dateformats[lang][3])
1241 elif argv == "locshort":
1242 result = dte.strftime(dateformats[lang][4])
1244 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1245 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1246 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1247 fmt = re.sub('[^\'%]d', '%d', fmt)
1248 fmt = fmt.replace("'", "")
1249 result = dte.strftime(fmt)
1250 if sys.version_info < (3,0):
1251 # In Python 2, datetime module works with binary strings,
1252 # our dateformat strings are utf8-encoded:
1253 result = result.decode('utf-8')
1254 document.body[i : j+1] = [result]
1257 def revert_timeinfo(document):
1258 """Revert time info insets to static text."""
1260 # FIXME This currently only considers the main language and uses the system locale
1261 # Ideally, it should honor context languages and switch the locale accordingly.
1262 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1265 # The time formats for each language using strftime syntax:
1268 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1269 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1270 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1271 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1272 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1273 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1274 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1275 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1276 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1277 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1278 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1279 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1280 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1281 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1282 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1283 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1284 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1285 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1286 "british" : ["%H:%M:%S %Z", "%H:%M"],
1287 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1288 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1289 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1290 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1291 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1292 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1293 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1294 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1295 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1296 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1297 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1298 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1299 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1300 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1301 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1302 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1303 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1304 "french" : ["%H:%M:%S %Z", "%H:%M"],
1305 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1306 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1307 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1308 "german" : ["%H:%M:%S %Z", "%H:%M"],
1309 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1310 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1311 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1312 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1313 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1314 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1315 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1316 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1317 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1318 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1319 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1320 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1321 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1322 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1323 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1324 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1325 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1326 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1327 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1328 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1329 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1330 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1331 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1332 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1333 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1334 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1335 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1336 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1337 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1338 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1339 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1340 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1341 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1342 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1343 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1344 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1345 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1346 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1347 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1348 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1349 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1350 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1351 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1352 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1353 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1354 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1355 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1356 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1357 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1358 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1359 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1360 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1361 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1362 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1363 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1364 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1365 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1366 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1367 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1368 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1369 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1372 types = ["time", "fixtime", "modtime" ]
1373 i = find_token(document.header, "\\language", 0)
1375 # this should not happen
1376 document.warning("Malformed LyX document! No \\language header found!")
1378 lang = get_value(document.header, "\\language", i)
1382 i = find_token(document.body, "\\begin_inset Info", i+1)
1385 j = find_end_of_inset(document.body, i+1)
1387 document.warning("Malformed LyX document: Could not find end of Info inset.")
1389 tp = find_token(document.body, 'type', i, j)
1390 tpv = get_quoted_value(document.body, "type", tp)
1391 if tpv not in types:
1393 arg = find_token(document.body, 'arg', i, j)
1394 argv = get_quoted_value(document.body, "arg", arg)
1396 dtme = datetime.now()
1398 if tpv == "fixtime":
1399 timecomps = argv.split('@')
1400 if len(timecomps) > 1:
1402 isotime = timecomps[1]
1403 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1405 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1407 m = re.search('(\d\d):(\d\d)', isotime)
1409 tme = time(int(m.group(1)), int(m.group(2)))
1410 # FIXME if we had the path to the original document (not the one in the tmp dir),
1411 # we could use the mtime.
1412 # elif tpv == "moddate":
1413 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1416 result = tme.isoformat()
1417 elif argv == "long":
1418 result = tme.strftime(timeformats[lang][0])
1419 elif argv == "short":
1420 result = tme.strftime(timeformats[lang][1])
1422 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1423 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1424 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1425 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1426 fmt = fmt.replace("'", "")
1427 result = dte.strftime(fmt)
1428 document.body[i : j+1] = result
1431 def revert_namenoextinfo(document):
1432 """Merge buffer Info inset type name-noext to name."""
1436 i = find_token(document.body, "\\begin_inset Info", i+1)
1439 j = find_end_of_inset(document.body, i+1)
1441 document.warning("Malformed LyX document: Could not find end of Info inset.")
1443 tp = find_token(document.body, 'type', i, j)
1444 tpv = get_quoted_value(document.body, "type", tp)
1447 arg = find_token(document.body, 'arg', i, j)
1448 argv = get_quoted_value(document.body, "arg", arg)
1449 if argv != "name-noext":
1451 document.body[arg] = "arg \"name\""
1454 def revert_l7ninfo(document):
1455 """Revert l7n Info inset to text."""
1459 i = find_token(document.body, "\\begin_inset Info", i+1)
1462 j = find_end_of_inset(document.body, i+1)
1464 document.warning("Malformed LyX document: Could not find end of Info inset.")
1466 tp = find_token(document.body, 'type', i, j)
1467 tpv = get_quoted_value(document.body, "type", tp)
1470 arg = find_token(document.body, 'arg', i, j)
1471 argv = get_quoted_value(document.body, "arg", arg)
1472 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1473 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1474 document.body[i : j+1] = argv
1477 def revert_listpargs(document):
1478 """Reverts listpreamble arguments to TeX-code"""
1481 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1484 j = find_end_of_inset(document.body, i)
1485 # Find containing paragraph layout
1486 parent = get_containing_layout(document.body, i)
1488 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1491 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1492 endPlain = find_end_of_layout(document.body, beginPlain)
1493 content = document.body[beginPlain + 1 : endPlain]
1494 del document.body[i:j+1]
1495 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1496 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1497 document.body[parbeg : parbeg] = subst
1500 def revert_lformatinfo(document):
1501 """Revert layout format Info inset to text."""
1505 i = find_token(document.body, "\\begin_inset Info", i+1)
1508 j = find_end_of_inset(document.body, i+1)
1510 document.warning("Malformed LyX document: Could not find end of Info inset.")
1512 tp = find_token(document.body, 'type', i, j)
1513 tpv = get_quoted_value(document.body, "type", tp)
1514 if tpv != "lyxinfo":
1516 arg = find_token(document.body, 'arg', i, j)
1517 argv = get_quoted_value(document.body, "arg", arg)
1518 if argv != "layoutformat":
1521 document.body[i : j+1] = "69"
1524 def convert_hebrew_parentheses(document):
1525 """ Swap opening/closing parentheses in Hebrew text.
1527 Up to LyX 2.4, "(" was used as closing parenthesis and
1528 ")" as opening parenthesis for Hebrew in the LyX source.
1530 # print("convert hebrew parentheses")
1531 current_languages = [document.language]
1532 for i, line in enumerate(document.body):
1533 if line.startswith('\\lang '):
1534 current_languages[-1] = line.lstrip('\\lang ')
1535 elif line.startswith('\\begin_layout'):
1536 current_languages.append(current_languages[-1])
1537 # print (line, current_languages[-1])
1538 elif line.startswith('\\end_layout'):
1539 current_languages.pop()
1540 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1541 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1544 def revert_hebrew_parentheses(document):
1545 """Store parentheses in Hebrew text reversed"""
1546 # This only exists to keep the convert/revert naming convention
1547 convert_hebrew_parentheses(document)
1550 def revert_malayalam(document):
1551 """Set the document language to English but assure Malayalam output"""
1553 revert_language(document, "malayalam", "", "malayalam")
1556 def revert_soul(document):
1557 """Revert soul module flex insets to ERT"""
1559 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1562 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1564 add_to_preamble(document, ["\\usepackage{soul}"])
1566 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1568 add_to_preamble(document, ["\\usepackage{color}"])
1570 revert_flex_inset(document.body, "Spaceletters", "\\so")
1571 revert_flex_inset(document.body, "Strikethrough", "\\st")
1572 revert_flex_inset(document.body, "Underline", "\\ul")
1573 revert_flex_inset(document.body, "Highlight", "\\hl")
1574 revert_flex_inset(document.body, "Capitalize", "\\caps")
1577 def revert_tablestyle(document):
1578 """Remove tablestyle params"""
1580 i = find_token(document.header, "\\tablestyle")
1582 del document.header[i]
1585 def revert_bibfileencodings(document):
1586 """Revert individual Biblatex bibliography encodings"""
1590 i = find_token(document.header, "\\cite_engine", 0)
1592 document.warning("Malformed document! Missing \\cite_engine")
1594 engine = get_value(document.header, "\\cite_engine", i)
1598 if engine in ["biblatex", "biblatex-natbib"]:
1601 # Map lyx to latex encoding names
1605 "armscii8" : "armscii8",
1606 "iso8859-1" : "latin1",
1607 "iso8859-2" : "latin2",
1608 "iso8859-3" : "latin3",
1609 "iso8859-4" : "latin4",
1610 "iso8859-5" : "iso88595",
1611 "iso8859-6" : "8859-6",
1612 "iso8859-7" : "iso-8859-7",
1613 "iso8859-8" : "8859-8",
1614 "iso8859-9" : "latin5",
1615 "iso8859-13" : "latin7",
1616 "iso8859-15" : "latin9",
1617 "iso8859-16" : "latin10",
1618 "applemac" : "applemac",
1620 "cp437de" : "cp437de",
1628 "cp1250" : "cp1250",
1629 "cp1251" : "cp1251",
1630 "cp1252" : "cp1252",
1631 "cp1255" : "cp1255",
1632 "cp1256" : "cp1256",
1633 "cp1257" : "cp1257",
1634 "koi8-r" : "koi8-r",
1635 "koi8-u" : "koi8-u",
1637 "utf8-platex" : "utf8",
1644 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1647 j = find_end_of_inset(document.body, i)
1649 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1651 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1655 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1656 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1657 if len(bibfiles) == 0:
1658 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1659 # remove encoding line
1660 k = find_token(document.body, "file_encodings", i, j)
1662 del document.body[k]
1663 # Re-find inset end line
1664 j = find_end_of_inset(document.body, i)
1666 enclist = encodings.split("\t")
1669 ppp = pp.split(" ", 1)
1670 encmap[ppp[0]] = ppp[1]
1671 for bib in bibfiles:
1672 pr = "\\addbibresource"
1673 if bib in encmap.keys():
1674 pr += "[bibencoding=" + encmap[bib] + "]"
1675 pr += "{" + bib + "}"
1676 add_to_preamble(document, [pr])
1677 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1678 pcmd = "printbibliography"
1680 pcmd += "[" + opts + "]"
1681 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1682 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1683 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1684 "status open", "", "\\begin_layout Plain Layout" ]
1685 repl += document.body[i:j+1]
1686 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1687 document.body[i:j+1] = repl
1693 def revert_cmidruletrimming(document):
1694 """Remove \\cmidrule trimming"""
1696 # FIXME: Revert to TeX code?
1699 # first, let's find out if we need to do anything
1700 i = find_token(document.body, '<cell ', i+1)
1703 j = document.body[i].find('trim="')
1706 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1707 # remove trim option
1708 document.body[i] = rgx.sub('', document.body[i])
1712 r'### Inserted by lyx2lyx (ruby inset) ###',
1713 r'InsetLayout Flex:Ruby',
1714 r' LyxType charstyle',
1715 r' LatexType command',
1719 r' HTMLInnerTag rb',
1720 r' HTMLInnerAttr ""',
1722 r' LabelString "Ruby"',
1723 r' Decoration Conglomerate',
1725 r' \ifdefined\kanjiskip',
1726 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1727 r' \else \ifdefined\luatexversion',
1728 r' \usepackage{luatexja-ruby}',
1729 r' \else \ifdefined\XeTeXversion',
1730 r' \usepackage{ruby}%',
1732 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1734 r' Argument post:1',
1735 r' LabelString "ruby text"',
1736 r' MenuString "Ruby Text|R"',
1737 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1738 r' Decoration Conglomerate',
1750 def convert_ruby_module(document):
1751 """Use ruby module instead of local module definition"""
1752 if document.del_local_layout(ruby_inset_def):
1753 document.add_module("ruby")
1755 def revert_ruby_module(document):
1756 """Replace ruby module with local module definition"""
1757 if document.del_module("ruby"):
1758 document.append_local_layout(ruby_inset_def)
1761 def convert_utf8_japanese(document):
1762 """Use generic utf8 with Japanese documents."""
1763 lang = get_value(document.header, "\\language")
1764 if not lang.startswith("japanese"):
1766 inputenc = get_value(document.header, "\\inputencoding")
1767 if ((lang == "japanese" and inputenc == "utf8-platex")
1768 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1769 document.set_parameter("inputencoding", "utf8")
1771 def revert_utf8_japanese(document):
1772 """Use Japanese utf8 variants with Japanese documents."""
1773 inputenc = get_value(document.header, "\\inputencoding")
1774 if inputenc != "utf8":
1776 lang = get_value(document.header, "\\language")
1777 if lang == "japanese":
1778 document.set_parameter("inputencoding", "utf8-platex")
1779 if lang == "japanese-cjk":
1780 document.set_parameter("inputencoding", "utf8-cjk")
1783 def revert_lineno(document):
1784 " Replace lineno setting with user-preamble code."
1786 options = get_quoted_value(document.header, "\\lineno_options",
1788 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1791 options = "[" + options + "]"
1792 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1795 def convert_lineno(document):
1796 " Replace user-preamble code with native lineno support."
1799 i = find_token(document.preamble, "\\linenumbers", 1)
1801 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1804 options = usepkg.group(1).strip("[]")
1805 del(document.preamble[i-1:i+1])
1806 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1808 k = find_token(document.header, "\\index ")
1810 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1812 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1813 "\\lineno_options %s" % options]
1816 def convert_aaencoding(document):
1817 " Convert default document option due to encoding change in aa class. "
1819 if document.textclass != "aa":
1822 i = find_token(document.header, "\\use_default_options true")
1825 val = get_value(document.header, "\\inputencoding")
1827 document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
1829 if val == "auto-legacy" or val == "latin9":
1830 document.header[i] = "\\use_default_options false"
1831 k = find_token(document.header, "\\options")
1833 document.header.insert(i, "\\options latin9")
1835 document.header[k] += ",latin9"
1838 def revert_aaencoding(document):
1839 " Revert default document option due to encoding change in aa class. "
1841 if document.textclass != "aa":
1844 i = find_token(document.header, "\\use_default_options true")
1847 val = get_value(document.header, "\\inputencoding")
1849 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
1852 document.header[i] = "\\use_default_options false"
1853 k = find_token(document.header, "\\options", 0)
1855 document.header.insert(i, "\\options utf8")
1857 document.header[k] = document.header[k] + ",utf8"
1860 def revert_new_languages(document):
1861 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1862 and Russian (Petrine orthography)."""
1864 # lyxname: (babelname, polyglossianame)
1865 new_languages = {"azerbaijani": ("azerbaijani", ""),
1866 "bengali": ("", "bengali"),
1867 "churchslavonic": ("", "churchslavonic"),
1868 "oldrussian": ("", "russian"),
1869 "korean": ("", "korean"),
1871 if document.language in new_languages:
1872 used_languages = set((document.language, ))
1874 used_languages = set()
1877 i = find_token(document.body, "\\lang", i+1)
1880 val = get_value(document.body, "\\lang", i)
1881 if val in new_languages:
1882 used_languages.add(val)
1884 # Korean is already supported via CJK, so leave as-is for Babel
1885 if ("korean" in used_languages
1886 and (not get_bool_value(document.header, "\\use_non_tex_fonts")
1887 or get_value(document.header, "\\language_package") == "babel")):
1888 used_languages.discard("korean")
1890 for lang in used_languages:
1891 revert_language(document, lang, *new_languages[lang])
1895 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1896 r'InsetLayout Flex:Glosse',
1898 r' LabelString "Gloss (old version)"',
1899 r' MenuString "Gloss (old version)"',
1900 r' LatexType environment',
1901 r' LatexName linggloss',
1902 r' Decoration minimalistic',
1907 r' CustomPars false',
1908 r' ForcePlain true',
1909 r' ParbreakIsNewline true',
1910 r' FreeSpacing true',
1911 r' Requires covington',
1914 r' \@ifundefined{linggloss}{%',
1915 r' \newenvironment{linggloss}[2][]{',
1916 r' \def\glosstr{\glt #1}%',
1918 r' {\glosstr\glend}}{}',
1921 r' ResetsFont true',
1923 r' Decoration conglomerate',
1924 r' LabelString "Translation"',
1925 r' MenuString "Glosse Translation|s"',
1926 r' Tooltip "Add a translation for the glosse"',
1931 glosss_inset_def = [
1932 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1933 r'InsetLayout Flex:Tri-Glosse',
1935 r' LabelString "Tri-Gloss (old version)"',
1936 r' MenuString "Tri-Gloss (old version)"',
1937 r' LatexType environment',
1938 r' LatexName lingglosss',
1939 r' Decoration minimalistic',
1944 r' CustomPars false',
1945 r' ForcePlain true',
1946 r' ParbreakIsNewline true',
1947 r' FreeSpacing true',
1949 r' Requires covington',
1952 r' \@ifundefined{lingglosss}{%',
1953 r' \newenvironment{lingglosss}[2][]{',
1954 r' \def\glosstr{\glt #1}%',
1956 r' {\glosstr\glend}}{}',
1958 r' ResetsFont true',
1960 r' Decoration conglomerate',
1961 r' LabelString "Translation"',
1962 r' MenuString "Glosse Translation|s"',
1963 r' Tooltip "Add a translation for the glosse"',
1968 def convert_linggloss(document):
1969 " Move old ling glosses to local layout "
1970 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1971 document.append_local_layout(gloss_inset_def)
1972 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1973 document.append_local_layout(glosss_inset_def)
1975 def revert_linggloss(document):
1976 " Revert to old ling gloss definitions "
1977 if not "linguistics" in document.get_module_list():
1979 document.del_local_layout(gloss_inset_def)
1980 document.del_local_layout(glosss_inset_def)
1983 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1984 for glosse in glosses:
1987 i = find_token(document.body, glosse, i+1)
1990 j = find_end_of_inset(document.body, i)
1992 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1995 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1996 endarg = find_end_of_inset(document.body, arg)
1999 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2000 if argbeginPlain == -1:
2001 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2003 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2004 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2006 # remove Arg insets and paragraph, if it only contains this inset
2007 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2008 del document.body[arg - 1 : endarg + 4]
2010 del document.body[arg : endarg + 1]
2012 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2013 endarg = find_end_of_inset(document.body, arg)
2016 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2017 if argbeginPlain == -1:
2018 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2020 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2021 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2023 # remove Arg insets and paragraph, if it only contains this inset
2024 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2025 del document.body[arg - 1 : endarg + 4]
2027 del document.body[arg : endarg + 1]
2029 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2030 endarg = find_end_of_inset(document.body, arg)
2033 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2034 if argbeginPlain == -1:
2035 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2037 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2038 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2040 # remove Arg insets and paragraph, if it only contains this inset
2041 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2042 del document.body[arg - 1 : endarg + 4]
2044 del document.body[arg : endarg + 1]
2046 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2047 endarg = find_end_of_inset(document.body, arg)
2050 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2051 if argbeginPlain == -1:
2052 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2054 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2055 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2057 # remove Arg insets and paragraph, if it only contains this inset
2058 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2059 del document.body[arg - 1 : endarg + 4]
2061 del document.body[arg : endarg + 1]
2064 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2067 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2068 endInset = find_end_of_inset(document.body, i)
2069 endPlain = find_end_of_layout(document.body, beginPlain)
2070 precontent = put_cmd_in_ert(cmd)
2071 if len(optargcontent) > 0:
2072 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2073 precontent += put_cmd_in_ert("{")
2075 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2076 if cmd == "\\trigloss":
2077 postcontent += put_cmd_in_ert("}{") + marg3content
2078 postcontent += put_cmd_in_ert("}")
2080 document.body[endPlain:endInset + 1] = postcontent
2081 document.body[beginPlain + 1:beginPlain] = precontent
2082 del document.body[i : beginPlain + 1]
2084 document.append_local_layout("Requires covington")
2089 def revert_subexarg(document):
2090 " Revert linguistic subexamples with argument to ERT "
2092 if not "linguistics" in document.get_module_list():
2098 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2101 j = find_end_of_layout(document.body, i)
2103 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2106 # check for consecutive layouts
2107 k = find_token(document.body, "\\begin_layout", j)
2108 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2110 j = find_end_of_layout(document.body, k)
2112 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2115 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2119 endarg = find_end_of_inset(document.body, arg)
2121 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2122 if argbeginPlain == -1:
2123 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2125 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2126 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2128 # remove Arg insets and paragraph, if it only contains this inset
2129 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2130 del document.body[arg - 1 : endarg + 4]
2132 del document.body[arg : endarg + 1]
2134 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2136 # re-find end of layout
2137 j = find_end_of_layout(document.body, i)
2139 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2142 # check for consecutive layouts
2143 k = find_token(document.body, "\\begin_layout", j)
2144 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2146 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2147 j = find_end_of_layout(document.body, k)
2149 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2152 endev = put_cmd_in_ert("\\end{subexamples}")
2154 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2155 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2156 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2158 document.append_local_layout("Requires covington")
2162 def revert_drs(document):
2163 " Revert DRS insets (linguistics) to ERT "
2165 if not "linguistics" in document.get_module_list():
2169 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2170 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2171 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2172 "\\begin_inset Flex SDRS"]
2176 i = find_token(document.body, drs, i+1)
2179 j = find_end_of_inset(document.body, i)
2181 document.warning("Malformed LyX document: Can't find end of DRS inset")
2184 # Check for arguments
2185 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2186 endarg = find_end_of_inset(document.body, arg)
2189 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2190 if argbeginPlain == -1:
2191 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2193 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2194 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2196 # remove Arg insets and paragraph, if it only contains this inset
2197 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2198 del document.body[arg - 1 : endarg + 4]
2200 del document.body[arg : endarg + 1]
2203 j = find_end_of_inset(document.body, i)
2205 document.warning("Malformed LyX document: Can't find end of DRS inset")
2208 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2209 endarg = find_end_of_inset(document.body, arg)
2212 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2213 if argbeginPlain == -1:
2214 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2216 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2217 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2219 # remove Arg insets and paragraph, if it only contains this inset
2220 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2221 del document.body[arg - 1 : endarg + 4]
2223 del document.body[arg : endarg + 1]
2226 j = find_end_of_inset(document.body, i)
2228 document.warning("Malformed LyX document: Can't find end of DRS inset")
2231 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2232 endarg = find_end_of_inset(document.body, arg)
2233 postarg1content = []
2235 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2236 if argbeginPlain == -1:
2237 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2239 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2240 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2242 # remove Arg insets and paragraph, if it only contains this inset
2243 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2244 del document.body[arg - 1 : endarg + 4]
2246 del document.body[arg : endarg + 1]
2249 j = find_end_of_inset(document.body, i)
2251 document.warning("Malformed LyX document: Can't find end of DRS inset")
2254 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2255 endarg = find_end_of_inset(document.body, arg)
2256 postarg2content = []
2258 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2259 if argbeginPlain == -1:
2260 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2262 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2263 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2265 # remove Arg insets and paragraph, if it only contains this inset
2266 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2267 del document.body[arg - 1 : endarg + 4]
2269 del document.body[arg : endarg + 1]
2272 j = find_end_of_inset(document.body, i)
2274 document.warning("Malformed LyX document: Can't find end of DRS inset")
2277 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2278 endarg = find_end_of_inset(document.body, arg)
2279 postarg3content = []
2281 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2282 if argbeginPlain == -1:
2283 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2285 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2286 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2288 # remove Arg insets and paragraph, if it only contains this inset
2289 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2290 del document.body[arg - 1 : endarg + 4]
2292 del document.body[arg : endarg + 1]
2295 j = find_end_of_inset(document.body, i)
2297 document.warning("Malformed LyX document: Can't find end of DRS inset")
2300 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2301 endarg = find_end_of_inset(document.body, arg)
2302 postarg4content = []
2304 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2305 if argbeginPlain == -1:
2306 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2308 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2309 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2311 # remove Arg insets and paragraph, if it only contains this inset
2312 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2313 del document.body[arg - 1 : endarg + 4]
2315 del document.body[arg : endarg + 1]
2317 # The respective LaTeX command
2319 if drs == "\\begin_inset Flex DRS*":
2321 elif drs == "\\begin_inset Flex IfThen-DRS":
2323 elif drs == "\\begin_inset Flex Cond-DRS":
2325 elif drs == "\\begin_inset Flex QDRS":
2327 elif drs == "\\begin_inset Flex NegDRS":
2329 elif drs == "\\begin_inset Flex SDRS":
2332 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2333 endInset = find_end_of_inset(document.body, i)
2334 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2335 precontent = put_cmd_in_ert(cmd)
2336 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2337 if drs == "\\begin_inset Flex SDRS":
2338 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2339 precontent += put_cmd_in_ert("{")
2342 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2343 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2344 if cmd == "\\condrs" or cmd == "\\qdrs":
2345 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2347 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2349 postcontent = put_cmd_in_ert("}")
2351 document.body[endPlain:endInset + 1] = postcontent
2352 document.body[beginPlain + 1:beginPlain] = precontent
2353 del document.body[i : beginPlain + 1]
2355 document.append_local_layout("Provides covington 1")
2356 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2362 def revert_babelfont(document):
2363 " Reverts the use of \\babelfont to user preamble "
2365 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2368 i = find_token(document.header, '\\language_package', 0)
2370 document.warning("Malformed LyX document: Missing \\language_package.")
2372 if get_value(document.header, "\\language_package", 0) != "babel":
2375 # check font settings
2377 roman = sans = typew = "default"
2379 sf_scale = tt_scale = 100.0
2381 j = find_token(document.header, "\\font_roman", 0)
2383 document.warning("Malformed LyX document: Missing \\font_roman.")
2385 # We need to use this regex since split() does not handle quote protection
2386 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2387 roman = romanfont[2].strip('"')
2388 romanfont[2] = '"default"'
2389 document.header[j] = " ".join(romanfont)
2391 j = find_token(document.header, "\\font_sans", 0)
2393 document.warning("Malformed LyX document: Missing \\font_sans.")
2395 # We need to use this regex since split() does not handle quote protection
2396 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2397 sans = sansfont[2].strip('"')
2398 sansfont[2] = '"default"'
2399 document.header[j] = " ".join(sansfont)
2401 j = find_token(document.header, "\\font_typewriter", 0)
2403 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2405 # We need to use this regex since split() does not handle quote protection
2406 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2407 typew = ttfont[2].strip('"')
2408 ttfont[2] = '"default"'
2409 document.header[j] = " ".join(ttfont)
2411 i = find_token(document.header, "\\font_osf", 0)
2413 document.warning("Malformed LyX document: Missing \\font_osf.")
2415 osf = str2bool(get_value(document.header, "\\font_osf", i))
2417 j = find_token(document.header, "\\font_sf_scale", 0)
2419 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2421 sfscale = document.header[j].split()
2424 document.header[j] = " ".join(sfscale)
2427 sf_scale = float(val)
2429 document.warning("Invalid font_sf_scale value: " + val)
2431 j = find_token(document.header, "\\font_tt_scale", 0)
2433 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2435 ttscale = document.header[j].split()
2438 document.header[j] = " ".join(ttscale)
2441 tt_scale = float(val)
2443 document.warning("Invalid font_tt_scale value: " + val)
2445 # set preamble stuff
2446 pretext = ['%% This document must be processed with xelatex or lualatex!']
2447 pretext.append('\\AtBeginDocument{%')
2448 if roman != "default":
2449 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2450 if sans != "default":
2451 sf = '\\babelfont{sf}['
2452 if sf_scale != 100.0:
2453 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2454 sf += 'Mapping=tex-text]{' + sans + '}'
2456 if typew != "default":
2457 tw = '\\babelfont{tt}'
2458 if tt_scale != 100.0:
2459 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2460 tw += '{' + typew + '}'
2463 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2465 insert_to_preamble(document, pretext)
2468 def revert_minionpro(document):
2469 " Revert native MinionPro font definition (with extra options) to LaTeX "
2471 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2474 regexp = re.compile(r'(\\font_roman_opts)')
2475 x = find_re(document.header, regexp, 0)
2479 # We need to use this regex since split() does not handle quote protection
2480 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2481 opts = romanopts[1].strip('"')
2483 i = find_token(document.header, "\\font_roman", 0)
2485 document.warning("Malformed LyX document: Missing \\font_roman.")
2488 # We need to use this regex since split() does not handle quote protection
2489 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2490 roman = romanfont[1].strip('"')
2491 if roman != "minionpro":
2493 romanfont[1] = '"default"'
2494 document.header[i] = " ".join(romanfont)
2496 j = find_token(document.header, "\\font_osf true", 0)
2499 preamble = "\\usepackage["
2501 document.header[j] = "\\font_osf false"
2505 preamble += "]{MinionPro}"
2506 add_to_preamble(document, [preamble])
2507 del document.header[x]
2510 def revert_font_opts(document):
2511 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2513 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2514 Babel = (get_value(document.header, "\\language_package") == "babel")
2517 regexp = re.compile(r'(\\font_roman_opts)')
2518 i = find_re(document.header, regexp, 0)
2520 # We need to use this regex since split() does not handle quote protection
2521 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2522 opts = romanopts[1].strip('"')
2523 del document.header[i]
2525 regexp = re.compile(r'(\\font_roman)')
2526 i = find_re(document.header, regexp, 0)
2528 # We need to use this regex since split() does not handle quote protection
2529 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2530 font = romanfont[2].strip('"')
2531 romanfont[2] = '"default"'
2532 document.header[i] = " ".join(romanfont)
2533 if font != "default":
2535 preamble = "\\babelfont{rm}["
2537 preamble = "\\setmainfont["
2540 preamble += "Mapping=tex-text]{"
2543 add_to_preamble(document, [preamble])
2546 regexp = re.compile(r'(\\font_sans_opts)')
2547 i = find_re(document.header, regexp, 0)
2550 # We need to use this regex since split() does not handle quote protection
2551 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2552 opts = sfopts[1].strip('"')
2553 del document.header[i]
2555 regexp = re.compile(r'(\\font_sf_scale)')
2556 i = find_re(document.header, regexp, 0)
2558 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2559 regexp = re.compile(r'(\\font_sans)')
2560 i = find_re(document.header, regexp, 0)
2562 # We need to use this regex since split() does not handle quote protection
2563 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2564 font = sffont[2].strip('"')
2565 sffont[2] = '"default"'
2566 document.header[i] = " ".join(sffont)
2567 if font != "default":
2569 preamble = "\\babelfont{sf}["
2571 preamble = "\\setsansfont["
2575 preamble += "Scale=0."
2576 preamble += scaleval
2578 preamble += "Mapping=tex-text]{"
2581 add_to_preamble(document, [preamble])
2584 regexp = re.compile(r'(\\font_typewriter_opts)')
2585 i = find_re(document.header, regexp, 0)
2588 # We need to use this regex since split() does not handle quote protection
2589 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2590 opts = ttopts[1].strip('"')
2591 del document.header[i]
2593 regexp = re.compile(r'(\\font_tt_scale)')
2594 i = find_re(document.header, regexp, 0)
2596 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2597 regexp = re.compile(r'(\\font_typewriter)')
2598 i = find_re(document.header, regexp, 0)
2600 # We need to use this regex since split() does not handle quote protection
2601 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2602 font = ttfont[2].strip('"')
2603 ttfont[2] = '"default"'
2604 document.header[i] = " ".join(ttfont)
2605 if font != "default":
2607 preamble = "\\babelfont{tt}["
2609 preamble = "\\setmonofont["
2613 preamble += "Scale=0."
2614 preamble += scaleval
2616 preamble += "Mapping=tex-text]{"
2619 add_to_preamble(document, [preamble])
2622 def revert_plainNotoFonts_xopts(document):
2623 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2625 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2629 y = find_token(document.header, "\\font_osf true", 0)
2633 regexp = re.compile(r'(\\font_roman_opts)')
2634 x = find_re(document.header, regexp, 0)
2635 if x == -1 and not osf:
2640 # We need to use this regex since split() does not handle quote protection
2641 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2642 opts = romanopts[1].strip('"')
2648 i = find_token(document.header, "\\font_roman", 0)
2652 # We need to use this regex since split() does not handle quote protection
2653 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2654 roman = romanfont[1].strip('"')
2655 if roman != "NotoSerif-TLF":
2658 j = find_token(document.header, "\\font_sans", 0)
2662 # We need to use this regex since split() does not handle quote protection
2663 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2664 sf = sffont[1].strip('"')
2668 j = find_token(document.header, "\\font_typewriter", 0)
2672 # We need to use this regex since split() does not handle quote protection
2673 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2674 tt = ttfont[1].strip('"')
2678 # So we have noto as "complete font"
2679 romanfont[1] = '"default"'
2680 document.header[i] = " ".join(romanfont)
2682 preamble = "\\usepackage["
2684 preamble += "]{noto}"
2685 add_to_preamble(document, [preamble])
2687 document.header[y] = "\\font_osf false"
2689 del document.header[x]
2692 def revert_notoFonts_xopts(document):
2693 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2695 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2699 fm = createFontMapping(['Noto'])
2700 if revert_fonts(document, fm, fontmap, True):
2701 add_preamble_fonts(document, fontmap)
2704 def revert_IBMFonts_xopts(document):
2705 " Revert native IBM font definition (with extra options) to LaTeX "
2707 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2711 fm = createFontMapping(['IBM'])
2713 if revert_fonts(document, fm, fontmap, True):
2714 add_preamble_fonts(document, fontmap)
2717 def revert_AdobeFonts_xopts(document):
2718 " Revert native Adobe font definition (with extra options) to LaTeX "
2720 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2724 fm = createFontMapping(['Adobe'])
2726 if revert_fonts(document, fm, fontmap, True):
2727 add_preamble_fonts(document, fontmap)
2730 def convert_osf(document):
2731 " Convert \\font_osf param to new format "
2733 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2735 i = find_token(document.header, '\\font_osf', 0)
2737 document.warning("Malformed LyX document: Missing \\font_osf.")
2740 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2741 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2743 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2744 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2747 document.header.insert(i, "\\font_sans_osf false")
2748 document.header.insert(i + 1, "\\font_typewriter_osf false")
2752 x = find_token(document.header, "\\font_sans", 0)
2754 document.warning("Malformed LyX document: Missing \\font_sans.")
2756 # We need to use this regex since split() does not handle quote protection
2757 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2758 sf = sffont[1].strip('"')
2760 document.header.insert(i, "\\font_sans_osf true")
2762 document.header.insert(i, "\\font_sans_osf false")
2764 x = find_token(document.header, "\\font_typewriter", 0)
2766 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2768 # We need to use this regex since split() does not handle quote protection
2769 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2770 tt = ttfont[1].strip('"')
2772 document.header.insert(i + 1, "\\font_typewriter_osf true")
2774 document.header.insert(i + 1, "\\font_typewriter_osf false")
2777 document.header.insert(i, "\\font_sans_osf false")
2778 document.header.insert(i + 1, "\\font_typewriter_osf false")
2781 def revert_osf(document):
2782 " Revert \\font_*_osf params "
2784 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2786 i = find_token(document.header, '\\font_roman_osf', 0)
2788 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2791 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2792 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2794 i = find_token(document.header, '\\font_sans_osf', 0)
2796 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2799 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2800 del document.header[i]
2802 i = find_token(document.header, '\\font_typewriter_osf', 0)
2804 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2807 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2808 del document.header[i]
2811 i = find_token(document.header, '\\font_osf', 0)
2813 document.warning("Malformed LyX document: Missing \\font_osf.")
2815 document.header[i] = "\\font_osf true"
2818 def revert_texfontopts(document):
2819 " Revert native TeX font definitions (with extra options) to LaTeX "
2821 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2824 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2826 # First the sf (biolinum only)
2827 regexp = re.compile(r'(\\font_sans_opts)')
2828 x = find_re(document.header, regexp, 0)
2830 # We need to use this regex since split() does not handle quote protection
2831 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2832 opts = sfopts[1].strip('"')
2833 i = find_token(document.header, "\\font_sans", 0)
2835 document.warning("Malformed LyX document: Missing \\font_sans.")
2837 # We need to use this regex since split() does not handle quote protection
2838 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2839 sans = sffont[1].strip('"')
2840 if sans == "biolinum":
2842 sffont[1] = '"default"'
2843 document.header[i] = " ".join(sffont)
2845 j = find_token(document.header, "\\font_sans_osf true", 0)
2848 k = find_token(document.header, "\\font_sf_scale", 0)
2850 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2852 sfscale = document.header[k].split()
2855 document.header[k] = " ".join(sfscale)
2858 sf_scale = float(val)
2860 document.warning("Invalid font_sf_scale value: " + val)
2861 preamble = "\\usepackage["
2863 document.header[j] = "\\font_sans_osf false"
2865 if sf_scale != 100.0:
2866 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2868 preamble += "]{biolinum}"
2869 add_to_preamble(document, [preamble])
2870 del document.header[x]
2872 regexp = re.compile(r'(\\font_roman_opts)')
2873 x = find_re(document.header, regexp, 0)
2877 # We need to use this regex since split() does not handle quote protection
2878 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2879 opts = romanopts[1].strip('"')
2881 i = find_token(document.header, "\\font_roman", 0)
2883 document.warning("Malformed LyX document: Missing \\font_roman.")
2886 # We need to use this regex since split() does not handle quote protection
2887 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2888 roman = romanfont[1].strip('"')
2889 if not roman in rmfonts:
2891 romanfont[1] = '"default"'
2892 document.header[i] = " ".join(romanfont)
2894 if roman == "utopia":
2896 elif roman == "palatino":
2897 package = "mathpazo"
2898 elif roman == "times":
2899 package = "mathptmx"
2900 elif roman == "xcharter":
2901 package = "XCharter"
2903 j = find_token(document.header, "\\font_roman_osf true", 0)
2905 if roman == "cochineal":
2906 osf = "proportional,osf,"
2907 elif roman == "utopia":
2909 elif roman == "garamondx":
2911 elif roman == "libertine":
2913 elif roman == "palatino":
2915 elif roman == "xcharter":
2917 document.header[j] = "\\font_roman_osf false"
2918 k = find_token(document.header, "\\font_sc true", 0)
2920 if roman == "utopia":
2922 if roman == "palatino" and osf == "":
2924 document.header[k] = "\\font_sc false"
2925 preamble = "\\usepackage["
2928 preamble += "]{" + package + "}"
2929 add_to_preamble(document, [preamble])
2930 del document.header[x]
2933 def convert_CantarellFont(document):
2934 " Handle Cantarell font definition to LaTeX "
2936 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2937 fm = createFontMapping(['Cantarell'])
2938 convert_fonts(document, fm, "oldstyle")
2940 def revert_CantarellFont(document):
2941 " Revert native Cantarell font definition to LaTeX "
2943 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2945 fm = createFontMapping(['Cantarell'])
2946 if revert_fonts(document, fm, fontmap, False, True):
2947 add_preamble_fonts(document, fontmap)
2949 def convert_ChivoFont(document):
2950 " Handle Chivo font definition to LaTeX "
2952 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2953 fm = createFontMapping(['Chivo'])
2954 convert_fonts(document, fm, "oldstyle")
2956 def revert_ChivoFont(document):
2957 " Revert native Chivo font definition to LaTeX "
2959 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2961 fm = createFontMapping(['Chivo'])
2962 if revert_fonts(document, fm, fontmap, False, True):
2963 add_preamble_fonts(document, fontmap)
2966 def convert_FiraFont(document):
2967 " Handle Fira font definition to LaTeX "
2969 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2970 fm = createFontMapping(['Fira'])
2971 convert_fonts(document, fm, "lf")
2973 def revert_FiraFont(document):
2974 " Revert native Fira font definition to LaTeX "
2976 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2978 fm = createFontMapping(['Fira'])
2979 if revert_fonts(document, fm, fontmap, False, True):
2980 add_preamble_fonts(document, fontmap)
2983 def convert_Semibolds(document):
2984 " Move semibold options to extraopts "
2986 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2988 i = find_token(document.header, "\\font_roman", 0)
2990 document.warning("Malformed LyX document: Missing \\font_roman.")
2992 # We need to use this regex since split() does not handle quote protection
2993 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2994 roman = romanfont[1].strip('"')
2995 if roman == "IBMPlexSerifSemibold":
2996 romanfont[1] = '"IBMPlexSerif"'
2997 document.header[i] = " ".join(romanfont)
2999 if NonTeXFonts == False:
3000 regexp = re.compile(r'(\\font_roman_opts)')
3001 x = find_re(document.header, regexp, 0)
3003 # Sensible place to insert tag
3004 fo = find_token(document.header, "\\font_sf_scale")
3006 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3008 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3010 # We need to use this regex since split() does not handle quote protection
3011 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3012 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3014 i = find_token(document.header, "\\font_sans", 0)
3016 document.warning("Malformed LyX document: Missing \\font_sans.")
3018 # We need to use this regex since split() does not handle quote protection
3019 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3020 sf = sffont[1].strip('"')
3021 if sf == "IBMPlexSansSemibold":
3022 sffont[1] = '"IBMPlexSans"'
3023 document.header[i] = " ".join(sffont)
3025 if NonTeXFonts == False:
3026 regexp = re.compile(r'(\\font_sans_opts)')
3027 x = find_re(document.header, regexp, 0)
3029 # Sensible place to insert tag
3030 fo = find_token(document.header, "\\font_sf_scale")
3032 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3034 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3036 # We need to use this regex since split() does not handle quote protection
3037 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3038 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3040 i = find_token(document.header, "\\font_typewriter", 0)
3042 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3044 # We need to use this regex since split() does not handle quote protection
3045 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3046 tt = ttfont[1].strip('"')
3047 if tt == "IBMPlexMonoSemibold":
3048 ttfont[1] = '"IBMPlexMono"'
3049 document.header[i] = " ".join(ttfont)
3051 if NonTeXFonts == False:
3052 regexp = re.compile(r'(\\font_typewriter_opts)')
3053 x = find_re(document.header, regexp, 0)
3055 # Sensible place to insert tag
3056 fo = find_token(document.header, "\\font_tt_scale")
3058 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3060 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3062 # We need to use this regex since split() does not handle quote protection
3063 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3064 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3067 def convert_NotoRegulars(document):
3068 " Merge diverse noto reagular fonts "
3070 i = find_token(document.header, "\\font_roman", 0)
3072 document.warning("Malformed LyX document: Missing \\font_roman.")
3074 # We need to use this regex since split() does not handle quote protection
3075 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3076 roman = romanfont[1].strip('"')
3077 if roman == "NotoSerif-TLF":
3078 romanfont[1] = '"NotoSerifRegular"'
3079 document.header[i] = " ".join(romanfont)
3081 i = find_token(document.header, "\\font_sans", 0)
3083 document.warning("Malformed LyX document: Missing \\font_sans.")
3085 # We need to use this regex since split() does not handle quote protection
3086 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3087 sf = sffont[1].strip('"')
3088 if sf == "NotoSans-TLF":
3089 sffont[1] = '"NotoSansRegular"'
3090 document.header[i] = " ".join(sffont)
3092 i = find_token(document.header, "\\font_typewriter", 0)
3094 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3096 # We need to use this regex since split() does not handle quote protection
3097 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3098 tt = ttfont[1].strip('"')
3099 if tt == "NotoMono-TLF":
3100 ttfont[1] = '"NotoMonoRegular"'
3101 document.header[i] = " ".join(ttfont)
3104 def convert_CrimsonProFont(document):
3105 " Handle CrimsonPro font definition to LaTeX "
3107 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3108 fm = createFontMapping(['CrimsonPro'])
3109 convert_fonts(document, fm, "lf")
3111 def revert_CrimsonProFont(document):
3112 " Revert native CrimsonPro font definition to LaTeX "
3114 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3116 fm = createFontMapping(['CrimsonPro'])
3117 if revert_fonts(document, fm, fontmap, False, True):
3118 add_preamble_fonts(document, fontmap)
3121 def revert_pagesizes(document):
3122 " Revert new page sizes in memoir and KOMA to options "
3124 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3127 i = find_token(document.header, "\\use_geometry true", 0)
3131 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3133 i = find_token(document.header, "\\papersize", 0)
3135 document.warning("Malformed LyX document! Missing \\papersize header.")
3137 val = get_value(document.header, "\\papersize", i)
3142 document.header[i] = "\\papersize default"
3144 i = find_token(document.header, "\\options", 0)
3146 i = find_token(document.header, "\\textclass", 0)
3148 document.warning("Malformed LyX document! Missing \\textclass header.")
3150 document.header.insert(i, "\\options " + val)
3152 document.header[i] = document.header[i] + "," + val
3155 def convert_pagesizes(document):
3156 " Convert to new page sizes in memoir and KOMA to options "
3158 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3161 i = find_token(document.header, "\\use_geometry true", 0)
3165 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3167 i = find_token(document.header, "\\papersize", 0)
3169 document.warning("Malformed LyX document! Missing \\papersize header.")
3171 val = get_value(document.header, "\\papersize", i)
3176 i = find_token(document.header, "\\use_geometry false", 0)
3178 # Maintain use of geometry
3179 document.header[1] = "\\use_geometry true"
3181 def revert_komafontsizes(document):
3182 " Revert new font sizes in KOMA to options "
3184 if document.textclass[:3] != "scr":
3187 i = find_token(document.header, "\\paperfontsize", 0)
3189 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3192 defsizes = ["default", "10", "11", "12"]
3194 val = get_value(document.header, "\\paperfontsize", i)
3199 document.header[i] = "\\paperfontsize default"
3201 fsize = "fontsize=" + val
3203 i = find_token(document.header, "\\options", 0)
3205 i = find_token(document.header, "\\textclass", 0)
3207 document.warning("Malformed LyX document! Missing \\textclass header.")
3209 document.header.insert(i, "\\options " + fsize)
3211 document.header[i] = document.header[i] + "," + fsize
3214 def revert_dupqualicites(document):
3215 " Revert qualified citation list commands with duplicate keys to ERT "
3217 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3218 # we need to revert those with multiple uses of the same key.
3222 i = find_token(document.header, "\\cite_engine", 0)
3224 document.warning("Malformed document! Missing \\cite_engine")
3226 engine = get_value(document.header, "\\cite_engine", i)
3228 if not engine in ["biblatex", "biblatex-natbib"]:
3231 # Citation insets that support qualified lists, with their LaTeX code
3235 "citet" : "textcites",
3236 "Citet" : "Textcites",
3237 "citep" : "parencites",
3238 "Citep" : "Parencites",
3239 "Footcite" : "Smartcites",
3240 "footcite" : "smartcites",
3241 "Autocite" : "Autocites",
3242 "autocite" : "autocites",
3247 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3250 j = find_end_of_inset(document.body, i)
3252 document.warning("Can't find end of citation inset at line %d!!" %(i))
3256 k = find_token(document.body, "LatexCommand", i, j)
3258 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3262 cmd = get_value(document.body, "LatexCommand", k)
3263 if not cmd in list(ql_citations.keys()):
3267 pres = find_token(document.body, "pretextlist", i, j)
3268 posts = find_token(document.body, "posttextlist", i, j)
3269 if pres == -1 and posts == -1:
3274 key = get_quoted_value(document.body, "key", i, j)
3276 document.warning("Citation inset at line %d does not have a key!" %(i))
3280 keys = key.split(",")
3281 ukeys = list(set(keys))
3282 if len(keys) == len(ukeys):
3287 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3288 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3290 pre = get_quoted_value(document.body, "before", i, j)
3291 post = get_quoted_value(document.body, "after", i, j)
3292 prelist = pretexts.split("\t")
3295 ppp = pp.split(" ", 1)
3301 if ppp[0] in premap:
3302 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3304 premap[ppp[0]] = val
3305 postlist = posttexts.split("\t")
3309 ppp = pp.split(" ", 1)
3315 if ppp[0] in postmap:
3316 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3318 postmap[ppp[0]] = val
3319 # Replace known new commands with ERT
3320 if "(" in pre or ")" in pre:
3321 pre = "{" + pre + "}"
3322 if "(" in post or ")" in post:
3323 post = "{" + post + "}"
3324 res = "\\" + ql_citations[cmd]
3326 res += "(" + pre + ")"
3328 res += "(" + post + ")"
3332 if premap.get(kk, "") != "":
3333 akeys = premap[kk].split("\t", 1)
3336 res += "[" + akey + "]"
3338 premap[kk] = "\t".join(akeys[1:])
3341 if postmap.get(kk, "") != "":
3342 akeys = postmap[kk].split("\t", 1)
3345 res += "[" + akey + "]"
3347 postmap[kk] = "\t".join(akeys[1:])
3350 elif premap.get(kk, "") != "":
3352 res += "{" + kk + "}"
3353 document.body[i:j+1] = put_cmd_in_ert([res])
3356 def convert_pagesizenames(document):
3357 " Convert LyX page sizes names "
3359 i = find_token(document.header, "\\papersize", 0)
3361 document.warning("Malformed LyX document! Missing \\papersize header.")
3363 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3364 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3365 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3366 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3367 val = get_value(document.header, "\\papersize", i)
3369 newval = val.replace("paper", "")
3370 document.header[i] = "\\papersize " + newval
3372 def revert_pagesizenames(document):
3373 " Convert LyX page sizes names "
3375 i = find_token(document.header, "\\papersize", 0)
3377 document.warning("Malformed LyX document! Missing \\papersize header.")
3379 newnames = ["letter", "legal", "executive", \
3380 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3381 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3382 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3383 val = get_value(document.header, "\\papersize", i)
3385 newval = val + "paper"
3386 document.header[i] = "\\papersize " + newval
3389 def revert_theendnotes(document):
3390 " Reverts native support of \\theendnotes to TeX-code "
3392 if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
3397 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3400 j = find_end_of_inset(document.body, i)
3402 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3405 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
3408 def revert_enotez(document):
3409 " Reverts native support of enotez package to TeX-code "
3411 if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
3415 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
3418 revert_flex_inset(document.body, "Endnote", "\\endnote")
3422 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3425 j = find_end_of_inset(document.body, i)
3427 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3431 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
3434 add_to_preamble(document, ["\\usepackage{enotez}"])
3435 document.del_module("enotez")
3436 document.del_module("foottoenotez")
3439 def revert_memoir_endnotes(document):
3440 " Reverts native support of memoir endnotes to TeX-code "
3442 if document.textclass != "memoir":
3445 encommand = "\\pagenote"
3446 modules = document.get_module_list()
3447 if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
3448 encommand = "\\endnote"
3450 revert_flex_inset(document.body, "Endnote", encommand)
3454 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
3457 j = find_end_of_inset(document.body, i)
3459 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3462 if document.body[i] == "\\begin_inset FloatList pagenote*":
3463 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
3465 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
3466 add_to_preamble(document, ["\\makepagenote"])
3469 def revert_totalheight(document):
3470 " Reverts graphics height parameter from totalheight to height "
3474 i = find_token(document.body, "\\begin_inset Graphics", i)
3477 j = find_end_of_inset(document.body, i)
3479 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3483 rx = re.compile(r'\s*special\s*(\S+)$')
3484 k = find_re(document.body, rx, i, j)
3488 m = rx.match(document.body[k])
3490 special = m.group(1)
3491 mspecial = special.split(',')
3492 for spc in mspecial:
3493 if spc[:7] == "height=":
3494 oldheight = spc.split('=')[1]
3495 mspecial.remove(spc)
3497 if len(mspecial) > 0:
3498 special = ",".join(mspecial)
3502 rx = re.compile(r'(\s*height\s*)(\S+)$')
3503 kk = find_re(document.body, rx, i, j)
3505 m = rx.match(document.body[kk])
3511 val = val + "," + special
3512 document.body[k] = "\tspecial " + "totalheight=" + val
3514 document.body.insert(kk, "\tspecial totalheight=" + val)
3516 document.body[kk] = m.group(1) + oldheight
3518 del document.body[kk]
3519 elif oldheight != "":
3521 document.body[k] = "\tspecial " + special
3522 document.body.insert(k, "\theight " + oldheight)
3524 document.body[k] = "\theight " + oldheight
3528 def convert_totalheight(document):
3529 " Converts graphics height parameter from totalheight to height "
3533 i = find_token(document.body, "\\begin_inset Graphics", i)
3536 j = find_end_of_inset(document.body, i)
3538 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3542 rx = re.compile(r'\s*special\s*(\S+)$')
3543 k = find_re(document.body, rx, i, j)
3547 m = rx.match(document.body[k])
3549 special = m.group(1)
3550 mspecial = special.split(',')
3551 for spc in mspecial:
3552 if spc[:12] == "totalheight=":
3553 newheight = spc.split('=')[1]
3554 mspecial.remove(spc)
3556 if len(mspecial) > 0:
3557 special = ",".join(mspecial)
3561 rx = re.compile(r'(\s*height\s*)(\S+)$')
3562 kk = find_re(document.body, rx, i, j)
3564 m = rx.match(document.body[kk])
3570 val = val + "," + special
3571 document.body[k] = "\tspecial " + "height=" + val
3573 document.body.insert(kk + 1, "\tspecial height=" + val)
3575 document.body[kk] = m.group(1) + newheight
3577 del document.body[kk]
3578 elif newheight != "":
3579 document.body.insert(k, "\theight " + newheight)
3583 def convert_changebars(document):
3584 " Converts the changebars module to native solution "
3586 if not "changebars" in document.get_module_list():
3589 i = find_token(document.header, "\\output_changes", 0)
3591 document.warning("Malformed LyX document! Missing \\output_changes header.")
3592 document.del_module("changebars")
3595 document.header.insert(i, "\\change_bars true")
3596 document.del_module("changebars")
3599 def revert_changebars(document):
3600 " Converts native changebar param to module "
3602 i = find_token(document.header, "\\change_bars", 0)
3604 document.warning("Malformed LyX document! Missing \\change_bars header.")
3607 val = get_value(document.header, "\\change_bars", i)
3610 document.add_module("changebars")
3612 del document.header[i]
3615 def convert_postpone_fragile(document):
3616 " Adds false \\postpone_fragile_content buffer param "
3618 i = find_token(document.header, "\\output_changes", 0)
3620 document.warning("Malformed LyX document! Missing \\output_changes header.")
3622 # Set this to false for old documents (see #2154)
3623 document.header.insert(i, "\\postpone_fragile_content false")
3626 def revert_postpone_fragile(document):
3627 " Remove \\postpone_fragile_content buffer param "
3629 i = find_token(document.header, "\\postpone_fragile_content", 0)
3631 document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
3634 del document.header[i]
3637 def revert_colrow_tracking(document):
3638 " Remove change tag from tabular columns/rows "
3641 i = find_token(document.body, "\\begin_inset Tabular", i+1)
3644 j = find_end_of_inset(document.body, i+1)
3646 document.warning("Malformed LyX document: Could not find end of tabular.")
3648 for k in range(i, j):
3649 m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
3651 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3652 m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
3654 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3657 def convert_counter_maintenance(document):
3658 " Convert \\maintain_unincluded_children buffer param from boolean value tro tristate "
3660 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3662 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3665 val = get_value(document.header, "\\maintain_unincluded_children", i)
3668 document.header[i] = "\\maintain_unincluded_children strict"
3670 document.header[i] = "\\maintain_unincluded_children no"
3673 def revert_counter_maintenance(document):
3674 " Revert \\maintain_unincluded_children buffer param to previous boolean value "
3676 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3678 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3681 val = get_value(document.header, "\\maintain_unincluded_children", i)
3684 document.header[i] = "\\maintain_unincluded_children false"
3686 document.header[i] = "\\maintain_unincluded_children true"
3689 def revert_counter_inset(document):
3690 " Revert counter inset to ERT, where possible"
3692 needed_counters = {}
3694 i = find_token(document.body, "\\begin_inset CommandInset counter", i)
3697 j = find_end_of_inset(document.body, i)
3699 document.warning("Can't find end of counter inset at line %d!" % i)
3702 lyx = get_quoted_value(document.body, "lyxonly", i, j)
3704 # there is nothing we can do to affect the LyX counters
3705 document.body[i : j + 1] = []
3708 cnt = get_quoted_value(document.body, "counter", i, j)
3710 document.warning("No counter given for inset at line %d!" % i)
3714 cmd = get_quoted_value(document.body, "LatexCommand", i, j)
3715 document.warning(cmd)
3718 val = get_quoted_value(document.body, "value", i, j)
3720 document.warning("Can't convert counter inset at line %d!" % i)
3722 ert = put_cmd_in_ert("\\setcounter{%s}{%s}" % (cnt, val))
3723 elif cmd == "addto":
3724 val = get_quoted_value(document.body, "value", i, j)
3726 document.warning("Can't convert counter inset at line %d!" % i)
3728 ert = put_cmd_in_ert("\\addtocounter{%s}{%s}" % (cnt, val))
3729 elif cmd == "reset":
3730 ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
3732 needed_counters[cnt] = 1
3733 savecnt = "LyXSave" + cnt
3734 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (savecnt, cnt))
3735 elif cmd == "restore":
3736 needed_counters[cnt] = 1
3737 savecnt = "LyXSave" + cnt
3738 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (cnt, savecnt))
3740 document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
3743 document.body[i : j + 1] = ert
3748 for cnt in needed_counters:
3749 pretext.append("\\newcounter{LyXSave%s}" % (cnt))
3751 add_to_preamble(document, pretext)
3758 supported_versions = ["2.4.0", "2.4"]
3760 [545, [convert_lst_literalparam]],
3765 [550, [convert_fontenc]],
3772 [557, [convert_vcsinfo]],
3773 [558, [removeFrontMatterStyles]],
3776 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
3780 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
3781 [566, [convert_hebrew_parentheses]],
3787 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
3788 [573, [convert_inputencoding_namechange]],
3789 [574, [convert_ruby_module, convert_utf8_japanese]],
3790 [575, [convert_lineno, convert_aaencoding]],
3792 [577, [convert_linggloss]],
3796 [581, [convert_osf]],
3797 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
3798 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
3800 [585, [convert_pagesizes]],
3802 [587, [convert_pagesizenames]],
3804 [589, [convert_totalheight]],
3805 [590, [convert_changebars]],
3806 [591, [convert_postpone_fragile]],
3808 [593, [convert_counter_maintenance]],
3812 revert = [[593, [revert_counter_inset]],
3813 [592, [revert_counter_maintenance]],
3814 [591, [revert_colrow_tracking]],
3815 [590, [revert_postpone_fragile]],
3816 [589, [revert_changebars]],
3817 [588, [revert_totalheight]],
3818 [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
3819 [586, [revert_pagesizenames]],
3820 [585, [revert_dupqualicites]],
3821 [584, [revert_pagesizes,revert_komafontsizes]],
3822 [583, [revert_vcsinfo_rev_abbrev]],
3823 [582, [revert_ChivoFont,revert_CrimsonProFont]],
3824 [581, [revert_CantarellFont,revert_FiraFont]],
3825 [580, [revert_texfontopts,revert_osf]],
3826 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
3827 [578, [revert_babelfont]],
3828 [577, [revert_drs]],
3829 [576, [revert_linggloss, revert_subexarg]],
3830 [575, [revert_new_languages]],
3831 [574, [revert_lineno, revert_aaencoding]],
3832 [573, [revert_ruby_module, revert_utf8_japanese]],
3833 [572, [revert_inputencoding_namechange]],
3834 [571, [revert_notoFonts]],
3835 [570, [revert_cmidruletrimming]],
3836 [569, [revert_bibfileencodings]],
3837 [568, [revert_tablestyle]],
3838 [567, [revert_soul]],
3839 [566, [revert_malayalam]],
3840 [565, [revert_hebrew_parentheses]],
3841 [564, [revert_AdobeFonts]],
3842 [563, [revert_lformatinfo]],
3843 [562, [revert_listpargs]],
3844 [561, [revert_l7ninfo]],
3845 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
3846 [559, [revert_timeinfo, revert_namenoextinfo]],
3847 [558, [revert_dateinfo]],
3848 [557, [addFrontMatterStyles]],
3849 [556, [revert_vcsinfo]],
3850 [555, [revert_bibencoding]],
3851 [554, [revert_vcolumns]],
3852 [553, [revert_stretchcolumn]],
3853 [552, [revert_tuftecite]],
3854 [551, [revert_floatpclass, revert_floatalignment]],
3855 [550, [revert_nospellcheck]],
3856 [549, [revert_fontenc]],
3857 [548, []], # dummy format change
3858 [547, [revert_lscape]],
3859 [546, [revert_xcharter]],
3860 [545, [revert_paratype]],
3861 [544, [revert_lst_literalparam]]
3865 if __name__ == "__main__":