1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 " Expand fontinfo mapping"
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
183 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
184 'FiraSansThin,thin', 'FiraSansLight,light',
185 'FiraSansExtralight,extralight',
186 'FiraSansUltralight,ultralight'],
187 "sans", "sf", "FiraSans", "scaled", "lf", "true")
188 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
191 def convert_fonts(document, fm, osfoption = "osf"):
192 " Handle font definition (LaTeX preamble -> native) "
194 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
195 rscaleopt = re.compile(r'^scaled?=(.*)')
197 # Check whether we go beyond font option feature introduction
198 haveFontOpts = document.end_format > 580
201 while i < len(document.preamble):
202 i = find_re(document.preamble, rpkg, i+1)
205 mo = rpkg.search(document.preamble[i])
206 if mo == None or mo.group(2) == None:
209 options = mo.group(2).replace(' ', '').split(",")
214 while o < len(options):
215 if options[o] == osfoption:
219 mo = rscaleopt.search(options[o])
227 if not pkg in fm.pkginmap:
232 # Try with name-option combination first
233 # (only one default option supported currently)
235 while o < len(options):
237 fn = fm.getfontname(pkg, [opt])
244 fn = fm.getfontname(pkg, [])
246 fn = fm.getfontname(pkg, options)
249 del document.preamble[i]
250 fontinfo = fm.font2pkgmap[fn]
251 if fontinfo.scaletype == None:
254 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
255 fontinfo.scaleval = oscale
256 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
257 if fontinfo.osfopt == None:
258 options.extend(osfoption)
260 osf = find_token(document.header, "\\font_osf false")
261 osftag = "\\font_osf"
262 if osf == -1 and fontinfo.fonttype != "math":
263 # Try with newer format
264 osftag = "\\font_" + fontinfo.fonttype + "_osf"
265 osf = find_token(document.header, osftag + " false")
267 document.header[osf] = osftag + " true"
268 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
269 del document.preamble[i-1]
271 if fontscale != None:
272 j = find_token(document.header, fontscale, 0)
274 val = get_value(document.header, fontscale, j)
278 scale = "%03d" % int(float(oscale) * 100)
279 document.header[j] = fontscale + " " + scale + " " + vals[1]
280 ft = "\\font_" + fontinfo.fonttype
281 j = find_token(document.header, ft, 0)
283 val = get_value(document.header, ft, j)
284 words = val.split() # ! splits also values like '"DejaVu Sans"'
285 words[0] = '"' + fn + '"'
286 document.header[j] = ft + ' ' + ' '.join(words)
287 if haveFontOpts and fontinfo.fonttype != "math":
288 fotag = "\\font_" + fontinfo.fonttype + "_opts"
289 fo = find_token(document.header, fotag)
291 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
293 # Sensible place to insert tag
294 fo = find_token(document.header, "\\font_sf_scale")
296 document.warning("Malformed LyX document! Missing \\font_sf_scale")
298 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
302 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
303 " Revert native font definition to LaTeX "
304 # fonlist := list of fonts created from the same package
305 # Empty package means that the font-name is the same as the package-name
306 # fontmap (key = package, val += found options) will be filled
307 # and used later in add_preamble_fonts() to be added to user-preamble
309 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
310 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
312 while i < len(document.header):
313 i = find_re(document.header, rfontscale, i+1)
316 mo = rfontscale.search(document.header[i])
319 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
320 val = get_value(document.header, ft, i)
321 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
322 font = words[0].strip('"') # TeX font name has no whitespace
323 if not font in fm.font2pkgmap:
325 fontinfo = fm.font2pkgmap[font]
326 val = fontinfo.package
327 if not val in fontmap:
330 if OnlyWithXOpts or WithXOpts:
331 if ft == "\\font_math":
333 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
334 if ft == "\\font_sans":
335 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
336 elif ft == "\\font_typewriter":
337 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
338 x = find_re(document.header, regexp, 0)
339 if x == -1 and OnlyWithXOpts:
343 # We need to use this regex since split() does not handle quote protection
344 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
345 opts = xopts[1].strip('"').split(",")
346 fontmap[val].extend(opts)
347 del document.header[x]
348 words[0] = '"default"'
349 document.header[i] = ft + ' ' + ' '.join(words)
350 if fontinfo.scaleopt != None:
351 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
352 mo = rscales.search(xval)
357 # set correct scale option
358 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
359 if fontinfo.osfopt != None:
361 if fontinfo.osfdef == "true":
363 osf = find_token(document.header, "\\font_osf " + oldval)
364 if osf == -1 and ft != "\\font_math":
365 # Try with newer format
366 osftag = "\\font_roman_osf " + oldval
367 if ft == "\\font_sans":
368 osftag = "\\font_sans_osf " + oldval
369 elif ft == "\\font_typewriter":
370 osftag = "\\font_typewriter_osf " + oldval
371 osf = find_token(document.header, osftag)
373 fontmap[val].extend([fontinfo.osfopt])
374 if len(fontinfo.options) > 0:
375 fontmap[val].extend(fontinfo.options)
378 ###############################################################################
380 ### Conversion and reversion routines
382 ###############################################################################
384 def convert_inputencoding_namechange(document):
385 " Rename inputencoding settings. "
386 i = find_token(document.header, "\\inputencoding", 0)
389 s = document.header[i].replace("auto", "auto-legacy")
390 document.header[i] = s.replace("default", "auto-legacy-plain")
392 def revert_inputencoding_namechange(document):
393 " Rename inputencoding settings. "
394 i = find_token(document.header, "\\inputencoding", 0)
397 s = document.header[i].replace("auto-legacy-plain", "default")
398 document.header[i] = s.replace("auto-legacy", "auto")
400 def convert_notoFonts(document):
401 " Handle Noto fonts definition to LaTeX "
403 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
404 fm = createFontMapping(['Noto'])
405 convert_fonts(document, fm)
407 def revert_notoFonts(document):
408 " Revert native Noto font definition to LaTeX "
410 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
412 fm = createFontMapping(['Noto'])
413 if revert_fonts(document, fm, fontmap):
414 add_preamble_fonts(document, fontmap)
416 def convert_latexFonts(document):
417 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
419 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
420 fm = createFontMapping(['DejaVu', 'IBM'])
421 convert_fonts(document, fm)
423 def revert_latexFonts(document):
424 " Revert native DejaVu font definition to LaTeX "
426 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
428 fm = createFontMapping(['DejaVu', 'IBM'])
429 if revert_fonts(document, fm, fontmap):
430 add_preamble_fonts(document, fontmap)
432 def convert_AdobeFonts(document):
433 " Handle Adobe Source fonts definition to LaTeX "
435 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
436 fm = createFontMapping(['Adobe'])
437 convert_fonts(document, fm)
439 def revert_AdobeFonts(document):
440 " Revert Adobe Source font definition to LaTeX "
442 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
444 fm = createFontMapping(['Adobe'])
445 if revert_fonts(document, fm, fontmap):
446 add_preamble_fonts(document, fontmap)
448 def removeFrontMatterStyles(document):
449 " Remove styles Begin/EndFrontmatter"
451 layouts = ['BeginFrontmatter', 'EndFrontmatter']
452 tokenend = len('\\begin_layout ')
455 i = find_token_exact(document.body, '\\begin_layout ', i+1)
458 layout = document.body[i][tokenend:].strip()
459 if layout not in layouts:
461 j = find_end_of_layout(document.body, i)
463 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
465 while document.body[j+1].strip() == '':
467 document.body[i:j+1] = []
469 def addFrontMatterStyles(document):
470 " Use styles Begin/EndFrontmatter for elsarticle"
472 if document.textclass != "elsarticle":
475 def insertFrontmatter(prefix, line):
477 while above > 0 and document.body[above-1].strip() == '':
480 while document.body[below].strip() == '':
482 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
483 '\\begin_inset Note Note',
485 '\\begin_layout Plain Layout',
488 '\\end_inset', '', '',
491 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
492 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
493 tokenend = len('\\begin_layout ')
497 i = find_token_exact(document.body, '\\begin_layout ', i+1)
500 layout = document.body[i][tokenend:].strip()
501 if layout not in layouts:
503 k = find_end_of_layout(document.body, i)
505 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
512 insertFrontmatter('End', k+1)
513 insertFrontmatter('Begin', first)
516 def convert_lst_literalparam(document):
517 " Add param literal to include inset "
521 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
524 j = find_end_of_inset(document.body, i)
526 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
528 while i < j and document.body[i].strip() != '':
530 document.body.insert(i, 'literal "true"')
533 def revert_lst_literalparam(document):
534 " Remove param literal from include inset "
538 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
541 j = find_end_of_inset(document.body, i)
543 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
545 del_token(document.body, 'literal', i, j)
548 def revert_paratype(document):
549 " Revert ParaType font definitions to LaTeX "
551 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
553 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
554 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
555 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
556 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
559 sfval = find_token(document.header, "\\font_sf_scale", 0)
561 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
563 sfscale = document.header[sfval].split()
566 document.header[sfval] = " ".join(sfscale)
569 sf_scale = float(val)
571 document.warning("Invalid font_sf_scale value: " + val)
574 if sf_scale != "100.0":
575 sfoption = "scaled=" + str(sf_scale / 100.0)
576 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
577 ttval = get_value(document.header, "\\font_tt_scale", 0)
582 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
583 if i1 != -1 and i2 != -1 and i3!= -1:
584 add_to_preamble(document, ["\\usepackage{paratype}"])
587 add_to_preamble(document, ["\\usepackage{PTSerif}"])
588 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
591 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
593 add_to_preamble(document, ["\\usepackage{PTSans}"])
594 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
597 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
599 add_to_preamble(document, ["\\usepackage{PTMono}"])
600 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
603 def revert_xcharter(document):
604 " Revert XCharter font definitions to LaTeX "
606 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
610 # replace unsupported font setting
611 document.header[i] = document.header[i].replace("xcharter", "default")
612 # no need for preamble code with system fonts
613 if get_bool_value(document.header, "\\use_non_tex_fonts"):
616 # transfer old style figures setting to package options
617 j = find_token(document.header, "\\font_osf true")
620 document.header[j] = "\\font_osf false"
624 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
627 def revert_lscape(document):
628 " Reverts the landscape environment (Landscape module) to TeX-code "
630 if not "landscape" in document.get_module_list():
635 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
638 j = find_end_of_inset(document.body, i)
640 document.warning("Malformed LyX document: Can't find end of Landscape inset")
643 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
644 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
645 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
646 add_to_preamble(document, ["\\usepackage{afterpage}"])
648 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
649 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
651 add_to_preamble(document, ["\\usepackage{pdflscape}"])
654 def convert_fontenc(document):
655 " Convert default fontenc setting "
657 i = find_token(document.header, "\\fontencoding global", 0)
661 document.header[i] = document.header[i].replace("global", "auto")
664 def revert_fontenc(document):
665 " Revert default fontenc setting "
667 i = find_token(document.header, "\\fontencoding auto", 0)
671 document.header[i] = document.header[i].replace("auto", "global")
674 def revert_nospellcheck(document):
675 " Remove nospellcheck font info param "
679 i = find_token(document.body, '\\nospellcheck', i)
685 def revert_floatpclass(document):
686 " Remove float placement params 'document' and 'class' "
688 del_token(document.header, "\\float_placement class")
692 i = find_token(document.body, '\\begin_inset Float', i+1)
695 j = find_end_of_inset(document.body, i)
696 k = find_token(document.body, 'placement class', i, i + 2)
698 k = find_token(document.body, 'placement document', i, i + 2)
705 def revert_floatalignment(document):
706 " Remove float alignment params "
708 galignment = get_value(document.header, "\\float_alignment", delete=True)
712 i = find_token(document.body, '\\begin_inset Float', i+1)
715 j = find_end_of_inset(document.body, i)
717 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
719 k = find_token(document.body, 'alignment', i, i+4)
723 alignment = get_value(document.body, "alignment", k)
724 if alignment == "document":
725 alignment = galignment
727 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
729 document.warning("Can't find float layout!")
732 if alignment == "left":
733 alcmd = put_cmd_in_ert("\\raggedright{}")
734 elif alignment == "center":
735 alcmd = put_cmd_in_ert("\\centering{}")
736 elif alignment == "right":
737 alcmd = put_cmd_in_ert("\\raggedleft{}")
739 document.body[l+1:l+1] = alcmd
742 def revert_tuftecite(document):
743 " Revert \cite commands in tufte classes "
745 tufte = ["tufte-book", "tufte-handout"]
746 if document.textclass not in tufte:
751 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
754 j = find_end_of_inset(document.body, i)
756 document.warning("Can't find end of citation inset at line %d!!" %(i))
758 k = find_token(document.body, "LatexCommand", i, j)
760 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
763 cmd = get_value(document.body, "LatexCommand", k)
767 pre = get_quoted_value(document.body, "before", i, j)
768 post = get_quoted_value(document.body, "after", i, j)
769 key = get_quoted_value(document.body, "key", i, j)
771 document.warning("Citation inset at line %d does not have a key!" %(i))
773 # Replace command with ERT
776 res += "[" + pre + "]"
778 res += "[" + post + "]"
781 res += "{" + key + "}"
782 document.body[i:j+1] = put_cmd_in_ert([res])
786 def revert_stretchcolumn(document):
787 " We remove the column varwidth flags or everything else will become a mess. "
790 i = find_token(document.body, "\\begin_inset Tabular", i+1)
793 j = find_end_of_inset(document.body, i+1)
795 document.warning("Malformed LyX document: Could not find end of tabular.")
797 for k in range(i, j):
798 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
799 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
800 document.body[k] = document.body[k].replace(' varwidth="true"', '')
803 def revert_vcolumns(document):
804 " Revert standard columns with line breaks etc. "
810 i = find_token(document.body, "\\begin_inset Tabular", i+1)
813 j = find_end_of_inset(document.body, i)
815 document.warning("Malformed LyX document: Could not find end of tabular.")
818 # Collect necessary column information
820 nrows = int(document.body[i+1].split('"')[3])
821 ncols = int(document.body[i+1].split('"')[5])
823 for k in range(ncols):
824 m = find_token(document.body, "<column", m)
825 width = get_option_value(document.body[m], 'width')
826 varwidth = get_option_value(document.body[m], 'varwidth')
827 alignment = get_option_value(document.body[m], 'alignment')
828 special = get_option_value(document.body[m], 'special')
829 col_info.append([width, varwidth, alignment, special, m])
834 for row in range(nrows):
835 for col in range(ncols):
836 m = find_token(document.body, "<cell", m)
837 multicolumn = get_option_value(document.body[m], 'multicolumn')
838 multirow = get_option_value(document.body[m], 'multirow')
839 width = get_option_value(document.body[m], 'width')
840 rotate = get_option_value(document.body[m], 'rotate')
841 # Check for: linebreaks, multipars, non-standard environments
843 endcell = find_token(document.body, "</cell>", begcell)
845 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
847 elif count_pars_in_inset(document.body, begcell + 2) > 1:
849 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
851 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
852 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
854 alignment = col_info[col][2]
855 col_line = col_info[col][4]
857 if alignment == "center":
858 vval = ">{\\centering}"
859 elif alignment == "left":
860 vval = ">{\\raggedright}"
861 elif alignment == "right":
862 vval = ">{\\raggedleft}"
865 vval += "V{\\linewidth}"
867 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
868 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
869 # with newlines, and we do not want that)
871 endcell = find_token(document.body, "</cell>", begcell)
873 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
875 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
879 nle = find_end_of_inset(document.body, nl)
880 del(document.body[nle:nle+1])
882 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
884 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
890 if needarray == True:
891 add_to_preamble(document, ["\\usepackage{array}"])
892 if needvarwidth == True:
893 add_to_preamble(document, ["\\usepackage{varwidth}"])
896 def revert_bibencoding(document):
897 " Revert bibliography encoding "
901 i = find_token(document.header, "\\cite_engine", 0)
903 document.warning("Malformed document! Missing \\cite_engine")
905 engine = get_value(document.header, "\\cite_engine", i)
909 if engine in ["biblatex", "biblatex-natbib"]:
912 # Map lyx to latex encoding names
916 "armscii8" : "armscii8",
917 "iso8859-1" : "latin1",
918 "iso8859-2" : "latin2",
919 "iso8859-3" : "latin3",
920 "iso8859-4" : "latin4",
921 "iso8859-5" : "iso88595",
922 "iso8859-6" : "8859-6",
923 "iso8859-7" : "iso-8859-7",
924 "iso8859-8" : "8859-8",
925 "iso8859-9" : "latin5",
926 "iso8859-13" : "latin7",
927 "iso8859-15" : "latin9",
928 "iso8859-16" : "latin10",
929 "applemac" : "applemac",
931 "cp437de" : "cp437de",
948 "utf8-platex" : "utf8",
955 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
958 j = find_end_of_inset(document.body, i)
960 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
962 encoding = get_quoted_value(document.body, "encoding", i, j)
965 # remove encoding line
966 k = find_token(document.body, "encoding", i, j)
969 if encoding == "default":
971 # Re-find inset end line
972 j = find_end_of_inset(document.body, i)
975 h = find_token(document.header, "\\biblio_options", 0)
977 biblio_options = get_value(document.header, "\\biblio_options", h)
978 if not "bibencoding" in biblio_options:
979 document.header[h] += ",bibencoding=%s" % encodings[encoding]
981 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
983 # this should not happen
984 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
986 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
988 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
989 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
995 def convert_vcsinfo(document):
996 " Separate vcs Info inset from buffer Info inset. "
999 "vcs-revision" : "revision",
1000 "vcs-tree-revision" : "tree-revision",
1001 "vcs-author" : "author",
1002 "vcs-time" : "time",
1007 i = find_token(document.body, "\\begin_inset Info", i+1)
1010 j = find_end_of_inset(document.body, i+1)
1012 document.warning("Malformed LyX document: Could not find end of Info inset.")
1014 tp = find_token(document.body, 'type', i, j)
1015 tpv = get_quoted_value(document.body, "type", tp)
1018 arg = find_token(document.body, 'arg', i, j)
1019 argv = get_quoted_value(document.body, "arg", arg)
1020 if argv not in list(types.keys()):
1022 document.body[tp] = "type \"vcs\""
1023 document.body[arg] = "arg \"" + types[argv] + "\""
1026 def revert_vcsinfo(document):
1027 " Merge vcs Info inset to buffer Info inset. "
1029 args = ["revision", "tree-revision", "author", "time", "date" ]
1032 i = find_token(document.body, "\\begin_inset Info", i+1)
1035 j = find_end_of_inset(document.body, i+1)
1037 document.warning("Malformed LyX document: Could not find end of Info inset.")
1039 tp = find_token(document.body, 'type', i, j)
1040 tpv = get_quoted_value(document.body, "type", tp)
1043 arg = find_token(document.body, 'arg', i, j)
1044 argv = get_quoted_value(document.body, "arg", arg)
1045 if argv not in args:
1046 document.warning("Malformed Info inset. Invalid vcs arg.")
1048 document.body[tp] = "type \"buffer\""
1049 document.body[arg] = "arg \"vcs-" + argv + "\""
1052 def revert_dateinfo(document):
1053 " Revert date info insets to static text. "
1055 # FIXME This currently only considers the main language and uses the system locale
1056 # Ideally, it should honor context languages and switch the locale accordingly.
1058 # The date formats for each language using strftime syntax:
1059 # long, short, loclong, locmedium, locshort
1061 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1062 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1063 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1064 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1065 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1066 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1067 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1068 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1069 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1070 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1071 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1072 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1073 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1074 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1075 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1076 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1077 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1078 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1079 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1080 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1081 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1082 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1083 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1084 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1085 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1086 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1087 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1088 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1089 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1090 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1091 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1092 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1093 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1094 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1095 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1096 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1097 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1098 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1099 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1100 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1101 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1102 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1103 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1104 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1105 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1106 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1107 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1108 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1109 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1110 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1111 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1112 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1113 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1114 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1115 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1116 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1117 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1118 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1119 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1120 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1121 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1122 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1123 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1124 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1125 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1126 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1127 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1128 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1129 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1130 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1131 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1132 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1133 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1134 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1135 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1136 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1137 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1138 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1139 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1140 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1141 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1142 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1143 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1144 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1145 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1146 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1147 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1148 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1149 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1150 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1151 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1152 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1153 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1154 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1155 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1156 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1157 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1158 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1159 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1160 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1161 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1162 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1165 types = ["date", "fixdate", "moddate" ]
1166 lang = get_value(document.header, "\\language")
1168 document.warning("Malformed LyX document! No \\language header found!")
1173 i = find_token(document.body, "\\begin_inset Info", i+1)
1176 j = find_end_of_inset(document.body, i+1)
1178 document.warning("Malformed LyX document: Could not find end of Info inset.")
1180 tp = find_token(document.body, 'type', i, j)
1181 tpv = get_quoted_value(document.body, "type", tp)
1182 if tpv not in types:
1184 arg = find_token(document.body, 'arg', i, j)
1185 argv = get_quoted_value(document.body, "arg", arg)
1188 if tpv == "fixdate":
1189 datecomps = argv.split('@')
1190 if len(datecomps) > 1:
1192 isodate = datecomps[1]
1193 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1195 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1196 # FIXME if we had the path to the original document (not the one in the tmp dir),
1197 # we could use the mtime.
1198 # elif tpv == "moddate":
1199 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1202 result = dte.isodate()
1203 elif argv == "long":
1204 result = dte.strftime(dateformats[lang][0])
1205 elif argv == "short":
1206 result = dte.strftime(dateformats[lang][1])
1207 elif argv == "loclong":
1208 result = dte.strftime(dateformats[lang][2])
1209 elif argv == "locmedium":
1210 result = dte.strftime(dateformats[lang][3])
1211 elif argv == "locshort":
1212 result = dte.strftime(dateformats[lang][4])
1214 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1215 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1216 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1217 fmt = re.sub('[^\'%]d', '%d', fmt)
1218 fmt = fmt.replace("'", "")
1219 result = dte.strftime(fmt)
1220 if sys.version_info < (3,0):
1221 # In Python 2, datetime module works with binary strings,
1222 # our dateformat strings are utf8-encoded:
1223 result = result.decode('utf-8')
1224 document.body[i : j+1] = [result]
1227 def revert_timeinfo(document):
1228 " Revert time info insets to static text. "
1230 # FIXME This currently only considers the main language and uses the system locale
1231 # Ideally, it should honor context languages and switch the locale accordingly.
1232 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1235 # The time formats for each language using strftime syntax:
1238 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1239 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1240 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1241 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1242 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1243 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1244 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1245 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1246 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1247 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1248 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1249 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1250 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1251 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1252 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1253 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1254 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1255 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1256 "british" : ["%H:%M:%S %Z", "%H:%M"],
1257 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1258 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1259 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1260 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1261 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1262 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1263 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1264 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1265 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1266 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1267 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1268 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1269 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1270 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1271 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1272 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1273 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1274 "french" : ["%H:%M:%S %Z", "%H:%M"],
1275 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1276 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1277 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1278 "german" : ["%H:%M:%S %Z", "%H:%M"],
1279 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1280 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1281 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1282 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1283 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1284 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1285 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1286 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1287 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1288 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1289 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1290 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1291 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1292 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1293 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1294 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1295 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1296 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1297 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1298 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1299 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1300 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1301 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1302 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1303 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1304 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1305 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1306 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1307 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1308 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1309 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1310 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1311 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1312 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1313 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1314 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1315 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1316 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1317 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1318 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1319 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1320 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1321 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1322 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1323 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1324 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1325 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1326 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1327 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1328 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1329 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1330 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1331 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1332 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1333 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1334 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1335 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1336 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1337 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1338 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1339 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1342 types = ["time", "fixtime", "modtime" ]
1344 i = find_token(document.header, "\\language", 0)
1346 # this should not happen
1347 document.warning("Malformed LyX document! No \\language header found!")
1349 lang = get_value(document.header, "\\language", i)
1353 i = find_token(document.body, "\\begin_inset Info", i+1)
1356 j = find_end_of_inset(document.body, i+1)
1358 document.warning("Malformed LyX document: Could not find end of Info inset.")
1360 tp = find_token(document.body, 'type', i, j)
1361 tpv = get_quoted_value(document.body, "type", tp)
1362 if tpv not in types:
1364 arg = find_token(document.body, 'arg', i, j)
1365 argv = get_quoted_value(document.body, "arg", arg)
1367 dtme = datetime.now()
1369 if tpv == "fixtime":
1370 timecomps = argv.split('@')
1371 if len(timecomps) > 1:
1373 isotime = timecomps[1]
1374 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1376 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1378 m = re.search('(\d\d):(\d\d)', isotime)
1380 tme = time(int(m.group(1)), int(m.group(2)))
1381 # FIXME if we had the path to the original document (not the one in the tmp dir),
1382 # we could use the mtime.
1383 # elif tpv == "moddate":
1384 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1387 result = tme.isoformat()
1388 elif argv == "long":
1389 result = tme.strftime(timeformats[lang][0])
1390 elif argv == "short":
1391 result = tme.strftime(timeformats[lang][1])
1393 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1394 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1395 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1396 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1397 fmt = fmt.replace("'", "")
1398 result = dte.strftime(fmt)
1399 document.body[i : j+1] = result
1402 def revert_namenoextinfo(document):
1403 " Merge buffer Info inset type name-noext to name. "
1407 i = find_token(document.body, "\\begin_inset Info", i+1)
1410 j = find_end_of_inset(document.body, i+1)
1412 document.warning("Malformed LyX document: Could not find end of Info inset.")
1414 tp = find_token(document.body, 'type', i, j)
1415 tpv = get_quoted_value(document.body, "type", tp)
1418 arg = find_token(document.body, 'arg', i, j)
1419 argv = get_quoted_value(document.body, "arg", arg)
1420 if argv != "name-noext":
1422 document.body[arg] = "arg \"name\""
1425 def revert_l7ninfo(document):
1426 " Revert l7n Info inset to text. "
1430 i = find_token(document.body, "\\begin_inset Info", i+1)
1433 j = find_end_of_inset(document.body, i+1)
1435 document.warning("Malformed LyX document: Could not find end of Info inset.")
1437 tp = find_token(document.body, 'type', i, j)
1438 tpv = get_quoted_value(document.body, "type", tp)
1441 arg = find_token(document.body, 'arg', i, j)
1442 argv = get_quoted_value(document.body, "arg", arg)
1443 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1444 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1445 document.body[i : j+1] = argv
1448 def revert_listpargs(document):
1449 " Reverts listpreamble arguments to TeX-code "
1452 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1455 j = find_end_of_inset(document.body, i)
1456 # Find containing paragraph layout
1457 parent = get_containing_layout(document.body, i)
1459 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1462 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1463 endPlain = find_end_of_layout(document.body, beginPlain)
1464 content = document.body[beginPlain + 1 : endPlain]
1465 del document.body[i:j+1]
1466 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1467 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1468 document.body[parbeg : parbeg] = subst
1471 def revert_lformatinfo(document):
1472 " Revert layout format Info inset to text. "
1476 i = find_token(document.body, "\\begin_inset Info", i+1)
1479 j = find_end_of_inset(document.body, i+1)
1481 document.warning("Malformed LyX document: Could not find end of Info inset.")
1483 tp = find_token(document.body, 'type', i, j)
1484 tpv = get_quoted_value(document.body, "type", tp)
1485 if tpv != "lyxinfo":
1487 arg = find_token(document.body, 'arg', i, j)
1488 argv = get_quoted_value(document.body, "arg", arg)
1489 if argv != "layoutformat":
1492 document.body[i : j+1] = "69"
1495 def convert_hebrew_parentheses(document):
1496 """ Swap opening/closing parentheses in Hebrew text.
1498 Up to LyX 2.4, "(" was used as closing parenthesis and
1499 ")" as opening parenthesis for Hebrew in the LyX source.
1501 # print("convert hebrew parentheses")
1502 current_languages = [document.language]
1503 for i, line in enumerate(document.body):
1504 if line.startswith('\\lang '):
1505 current_languages[-1] = line.lstrip('\\lang ')
1506 elif line.startswith('\\begin_layout'):
1507 current_languages.append(current_languages[-1])
1508 # print (line, current_languages[-1])
1509 elif line.startswith('\\end_layout'):
1510 current_languages.pop()
1511 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1512 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1515 def revert_hebrew_parentheses(document):
1516 " Store parentheses in Hebrew text reversed"
1517 # This only exists to keep the convert/revert naming convention
1518 convert_hebrew_parentheses(document)
1521 def revert_malayalam(document):
1522 " Set the document language to English but assure Malayalam output "
1524 revert_language(document, "malayalam", "", "malayalam")
1527 def revert_soul(document):
1528 " Revert soul module flex insets to ERT "
1530 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1533 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1535 add_to_preamble(document, ["\\usepackage{soul}"])
1537 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1539 add_to_preamble(document, ["\\usepackage{color}"])
1541 revert_flex_inset(document.body, "Spaceletters", "\\so")
1542 revert_flex_inset(document.body, "Strikethrough", "\\st")
1543 revert_flex_inset(document.body, "Underline", "\\ul")
1544 revert_flex_inset(document.body, "Highlight", "\\hl")
1545 revert_flex_inset(document.body, "Capitalize", "\\caps")
1548 def revert_tablestyle(document):
1549 " Remove tablestyle params "
1552 i = find_token(document.header, "\\tablestyle")
1554 del document.header[i]
1557 def revert_bibfileencodings(document):
1558 " Revert individual Biblatex bibliography encodings "
1562 i = find_token(document.header, "\\cite_engine", 0)
1564 document.warning("Malformed document! Missing \\cite_engine")
1566 engine = get_value(document.header, "\\cite_engine", i)
1570 if engine in ["biblatex", "biblatex-natbib"]:
1573 # Map lyx to latex encoding names
1577 "armscii8" : "armscii8",
1578 "iso8859-1" : "latin1",
1579 "iso8859-2" : "latin2",
1580 "iso8859-3" : "latin3",
1581 "iso8859-4" : "latin4",
1582 "iso8859-5" : "iso88595",
1583 "iso8859-6" : "8859-6",
1584 "iso8859-7" : "iso-8859-7",
1585 "iso8859-8" : "8859-8",
1586 "iso8859-9" : "latin5",
1587 "iso8859-13" : "latin7",
1588 "iso8859-15" : "latin9",
1589 "iso8859-16" : "latin10",
1590 "applemac" : "applemac",
1592 "cp437de" : "cp437de",
1600 "cp1250" : "cp1250",
1601 "cp1251" : "cp1251",
1602 "cp1252" : "cp1252",
1603 "cp1255" : "cp1255",
1604 "cp1256" : "cp1256",
1605 "cp1257" : "cp1257",
1606 "koi8-r" : "koi8-r",
1607 "koi8-u" : "koi8-u",
1609 "utf8-platex" : "utf8",
1616 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1619 j = find_end_of_inset(document.body, i)
1621 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1623 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1627 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1628 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1629 if len(bibfiles) == 0:
1630 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1631 # remove encoding line
1632 k = find_token(document.body, "file_encodings", i, j)
1634 del document.body[k]
1635 # Re-find inset end line
1636 j = find_end_of_inset(document.body, i)
1638 enclist = encodings.split("\t")
1641 ppp = pp.split(" ", 1)
1642 encmap[ppp[0]] = ppp[1]
1643 for bib in bibfiles:
1644 pr = "\\addbibresource"
1645 if bib in encmap.keys():
1646 pr += "[bibencoding=" + encmap[bib] + "]"
1647 pr += "{" + bib + "}"
1648 add_to_preamble(document, [pr])
1649 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1650 pcmd = "printbibliography"
1652 pcmd += "[" + opts + "]"
1653 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1654 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1655 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1656 "status open", "", "\\begin_layout Plain Layout" ]
1657 repl += document.body[i:j+1]
1658 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1659 document.body[i:j+1] = repl
1665 def revert_cmidruletrimming(document):
1666 " Remove \\cmidrule trimming "
1668 # FIXME: Revert to TeX code?
1671 # first, let's find out if we need to do anything
1672 i = find_token(document.body, '<cell ', i+1)
1675 j = document.body[i].find('trim="')
1678 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1679 # remove trim option
1680 document.body[i] = rgx.sub('', document.body[i])
1684 r'### Inserted by lyx2lyx (ruby inset) ###',
1685 r'InsetLayout Flex:Ruby',
1686 r' LyxType charstyle',
1687 r' LatexType command',
1691 r' HTMLInnerTag rb',
1692 r' HTMLInnerAttr ""',
1694 r' LabelString "Ruby"',
1695 r' Decoration Conglomerate',
1697 r' \ifdefined\kanjiskip',
1698 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1699 r' \else \ifdefined\luatexversion',
1700 r' \usepackage{luatexja-ruby}',
1701 r' \else \ifdefined\XeTeXversion',
1702 r' \usepackage{ruby}%',
1704 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1706 r' Argument post:1',
1707 r' LabelString "ruby text"',
1708 r' MenuString "Ruby Text|R"',
1709 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1710 r' Decoration Conglomerate',
1722 def convert_ruby_module(document):
1723 " Use ruby module instead of local module definition "
1724 if document.del_local_layout(ruby_inset_def):
1725 document.add_module("ruby")
1727 def revert_ruby_module(document):
1728 " Replace ruby module with local module definition "
1729 if document.del_module("ruby"):
1730 document.append_local_layout(ruby_inset_def)
1733 def convert_utf8_japanese(document):
1734 " Use generic utf8 with Japanese documents."
1735 lang = get_value(document.header, "\\language")
1736 if not lang.startswith("japanese"):
1738 inputenc = get_value(document.header, "\\inputencoding")
1739 if ((lang == "japanese" and inputenc == "utf8-platex")
1740 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1741 document.set_parameter("inputencoding", "utf8")
1743 def revert_utf8_japanese(document):
1744 " Use Japanese utf8 variants with Japanese documents."
1745 inputenc = get_value(document.header, "\\inputencoding")
1746 if inputenc != "utf8":
1748 lang = get_value(document.header, "\\language")
1749 if lang == "japanese":
1750 document.set_parameter("inputencoding", "utf8-platex")
1751 if lang == "japanese-cjk":
1752 document.set_parameter("inputencoding", "utf8-cjk")
1755 def revert_lineno(document):
1756 " Replace lineno setting with user-preamble code."
1758 options = get_quoted_value(document.header, "\\lineno_options",
1760 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1763 options = "[" + options + "]"
1764 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1767 def convert_lineno(document):
1768 " Replace user-preamble code with native lineno support."
1771 i = find_token(document.preamble, "\\linenumbers", 1)
1773 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1776 options = usepkg.group(1).strip("[]")
1777 del(document.preamble[i-1:i+1])
1778 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1780 k = find_token(document.header, "\\index ")
1782 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1784 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1785 "\\lineno_options %s" % options]
1788 def revert_new_languages(document):
1789 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1790 and Russian (Petrine orthography)."""
1792 # lyxname: (babelname, polyglossianame)
1793 new_languages = {"azerbaijani": ("azerbaijani", ""),
1794 "bengali": ("", "bengali"),
1795 "churchslavonic": ("", "churchslavonic"),
1796 "oldrussian": ("", "russian"),
1797 "korean": ("", "korean"),
1799 used_languages = set()
1800 if document.language in new_languages:
1801 used_languages.add(document.language)
1804 i = find_token(document.body, "\\lang", i+1)
1807 if document.body[i][6:].strip() in new_languages:
1808 used_languages.add(document.language)
1810 # Korean is already supported via CJK, so leave as-is for Babel
1811 if ("korean" in used_languages
1812 and get_bool_value(document.header, "\\use_non_tex_fonts")
1813 and get_value(document.header, "\\language_package") in ("default", "auto")):
1814 revert_language(document, "korean", "", "korean")
1815 used_languages.discard("korean")
1817 for lang in used_languages:
1818 revert(lang, *new_languages[lang])
1822 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1823 r'InsetLayout Flex:Glosse',
1825 r' LabelString "Gloss (old version)"',
1826 r' MenuString "Gloss (old version)"',
1827 r' LatexType environment',
1828 r' LatexName linggloss',
1829 r' Decoration minimalistic',
1834 r' CustomPars false',
1835 r' ForcePlain true',
1836 r' ParbreakIsNewline true',
1837 r' FreeSpacing true',
1838 r' Requires covington',
1841 r' \@ifundefined{linggloss}{%',
1842 r' \newenvironment{linggloss}[2][]{',
1843 r' \def\glosstr{\glt #1}%',
1845 r' {\glosstr\glend}}{}',
1848 r' ResetsFont true',
1850 r' Decoration conglomerate',
1851 r' LabelString "Translation"',
1852 r' MenuString "Glosse Translation|s"',
1853 r' Tooltip "Add a translation for the glosse"',
1858 glosss_inset_def = [
1859 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1860 r'InsetLayout Flex:Tri-Glosse',
1862 r' LabelString "Tri-Gloss (old version)"',
1863 r' MenuString "Tri-Gloss (old version)"',
1864 r' LatexType environment',
1865 r' LatexName lingglosss',
1866 r' Decoration minimalistic',
1871 r' CustomPars false',
1872 r' ForcePlain true',
1873 r' ParbreakIsNewline true',
1874 r' FreeSpacing true',
1876 r' Requires covington',
1879 r' \@ifundefined{lingglosss}{%',
1880 r' \newenvironment{lingglosss}[2][]{',
1881 r' \def\glosstr{\glt #1}%',
1883 r' {\glosstr\glend}}{}',
1885 r' ResetsFont true',
1887 r' Decoration conglomerate',
1888 r' LabelString "Translation"',
1889 r' MenuString "Glosse Translation|s"',
1890 r' Tooltip "Add a translation for the glosse"',
1895 def convert_linggloss(document):
1896 " Move old ling glosses to local layout "
1897 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1898 document.append_local_layout(gloss_inset_def)
1899 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1900 document.append_local_layout(glosss_inset_def)
1902 def revert_linggloss(document):
1903 " Revert to old ling gloss definitions "
1904 if not "linguistics" in document.get_module_list():
1906 document.del_local_layout(gloss_inset_def)
1907 document.del_local_layout(glosss_inset_def)
1910 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1911 for glosse in glosses:
1914 i = find_token(document.body, glosse, i+1)
1917 j = find_end_of_inset(document.body, i)
1919 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1922 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1923 endarg = find_end_of_inset(document.body, arg)
1926 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1927 if argbeginPlain == -1:
1928 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1930 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1931 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
1933 # remove Arg insets and paragraph, if it only contains this inset
1934 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1935 del document.body[arg - 1 : endarg + 4]
1937 del document.body[arg : endarg + 1]
1939 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
1940 endarg = find_end_of_inset(document.body, arg)
1943 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1944 if argbeginPlain == -1:
1945 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
1947 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1948 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
1950 # remove Arg insets and paragraph, if it only contains this inset
1951 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1952 del document.body[arg - 1 : endarg + 4]
1954 del document.body[arg : endarg + 1]
1956 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
1957 endarg = find_end_of_inset(document.body, arg)
1960 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1961 if argbeginPlain == -1:
1962 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
1964 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1965 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
1967 # remove Arg insets and paragraph, if it only contains this inset
1968 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1969 del document.body[arg - 1 : endarg + 4]
1971 del document.body[arg : endarg + 1]
1973 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
1974 endarg = find_end_of_inset(document.body, arg)
1977 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1978 if argbeginPlain == -1:
1979 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
1981 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1982 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
1984 # remove Arg insets and paragraph, if it only contains this inset
1985 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1986 del document.body[arg - 1 : endarg + 4]
1988 del document.body[arg : endarg + 1]
1991 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
1994 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1995 endInset = find_end_of_inset(document.body, i)
1996 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
1997 precontent = put_cmd_in_ert(cmd)
1998 if len(optargcontent) > 0:
1999 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2000 precontent += put_cmd_in_ert("{")
2002 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2003 if cmd == "\\trigloss":
2004 postcontent += put_cmd_in_ert("}{") + marg3content
2005 postcontent += put_cmd_in_ert("}")
2007 document.body[endPlain:endInset + 1] = postcontent
2008 document.body[beginPlain + 1:beginPlain] = precontent
2009 del document.body[i : beginPlain + 1]
2011 document.append_local_layout("Requires covington")
2016 def revert_subexarg(document):
2017 " Revert linguistic subexamples with argument to ERT "
2019 if not "linguistics" in document.get_module_list():
2025 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2028 j = find_end_of_layout(document.body, i)
2030 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2033 # check for consecutive layouts
2034 k = find_token(document.body, "\\begin_layout", j)
2035 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2037 j = find_end_of_layout(document.body, k)
2039 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2042 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2046 endarg = find_end_of_inset(document.body, arg)
2048 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2049 if argbeginPlain == -1:
2050 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2052 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2053 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2055 # remove Arg insets and paragraph, if it only contains this inset
2056 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2057 del document.body[arg - 1 : endarg + 4]
2059 del document.body[arg : endarg + 1]
2061 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2063 # re-find end of layout
2064 j = find_end_of_layout(document.body, i)
2066 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2069 # check for consecutive layouts
2070 k = find_token(document.body, "\\begin_layout", j)
2071 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2073 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2074 j = find_end_of_layout(document.body, k)
2076 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2079 endev = put_cmd_in_ert("\\end{subexamples}")
2081 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2082 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2083 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2085 document.append_local_layout("Requires covington")
2089 def revert_drs(document):
2090 " Revert DRS insets (linguistics) to ERT "
2092 if not "linguistics" in document.get_module_list():
2096 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2097 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2098 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2099 "\\begin_inset Flex SDRS"]
2103 i = find_token(document.body, drs, i+1)
2106 j = find_end_of_inset(document.body, i)
2108 document.warning("Malformed LyX document: Can't find end of DRS inset")
2111 # Check for arguments
2112 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2113 endarg = find_end_of_inset(document.body, arg)
2116 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2117 if argbeginPlain == -1:
2118 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2120 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2121 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2123 # remove Arg insets and paragraph, if it only contains this inset
2124 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2125 del document.body[arg - 1 : endarg + 4]
2127 del document.body[arg : endarg + 1]
2130 j = find_end_of_inset(document.body, i)
2132 document.warning("Malformed LyX document: Can't find end of DRS inset")
2135 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2136 endarg = find_end_of_inset(document.body, arg)
2139 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2140 if argbeginPlain == -1:
2141 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2143 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2144 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2146 # remove Arg insets and paragraph, if it only contains this inset
2147 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2148 del document.body[arg - 1 : endarg + 4]
2150 del document.body[arg : endarg + 1]
2153 j = find_end_of_inset(document.body, i)
2155 document.warning("Malformed LyX document: Can't find end of DRS inset")
2158 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2159 endarg = find_end_of_inset(document.body, arg)
2160 postarg1content = []
2162 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2163 if argbeginPlain == -1:
2164 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2166 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2167 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2169 # remove Arg insets and paragraph, if it only contains this inset
2170 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2171 del document.body[arg - 1 : endarg + 4]
2173 del document.body[arg : endarg + 1]
2176 j = find_end_of_inset(document.body, i)
2178 document.warning("Malformed LyX document: Can't find end of DRS inset")
2181 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2182 endarg = find_end_of_inset(document.body, arg)
2183 postarg2content = []
2185 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2186 if argbeginPlain == -1:
2187 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2189 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2190 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2192 # remove Arg insets and paragraph, if it only contains this inset
2193 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2194 del document.body[arg - 1 : endarg + 4]
2196 del document.body[arg : endarg + 1]
2199 j = find_end_of_inset(document.body, i)
2201 document.warning("Malformed LyX document: Can't find end of DRS inset")
2204 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2205 endarg = find_end_of_inset(document.body, arg)
2206 postarg3content = []
2208 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2209 if argbeginPlain == -1:
2210 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2212 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2213 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2215 # remove Arg insets and paragraph, if it only contains this inset
2216 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2217 del document.body[arg - 1 : endarg + 4]
2219 del document.body[arg : endarg + 1]
2222 j = find_end_of_inset(document.body, i)
2224 document.warning("Malformed LyX document: Can't find end of DRS inset")
2227 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2228 endarg = find_end_of_inset(document.body, arg)
2229 postarg4content = []
2231 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2232 if argbeginPlain == -1:
2233 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2235 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2236 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2238 # remove Arg insets and paragraph, if it only contains this inset
2239 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2240 del document.body[arg - 1 : endarg + 4]
2242 del document.body[arg : endarg + 1]
2244 # The respective LaTeX command
2246 if drs == "\\begin_inset Flex DRS*":
2248 elif drs == "\\begin_inset Flex IfThen-DRS":
2250 elif drs == "\\begin_inset Flex Cond-DRS":
2252 elif drs == "\\begin_inset Flex QDRS":
2254 elif drs == "\\begin_inset Flex NegDRS":
2256 elif drs == "\\begin_inset Flex SDRS":
2259 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2260 endInset = find_end_of_inset(document.body, i)
2261 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2262 precontent = put_cmd_in_ert(cmd)
2263 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2264 if drs == "\\begin_inset Flex SDRS":
2265 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2266 precontent += put_cmd_in_ert("{")
2269 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2270 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2271 if cmd == "\\condrs" or cmd == "\\qdrs":
2272 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2274 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2276 postcontent = put_cmd_in_ert("}")
2278 document.body[endPlain:endInset + 1] = postcontent
2279 document.body[beginPlain + 1:beginPlain] = precontent
2280 del document.body[i : beginPlain + 1]
2282 document.append_local_layout("Provides covington 1")
2283 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2289 def revert_babelfont(document):
2290 " Reverts the use of \\babelfont to user preamble "
2292 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2294 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2296 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2298 i = find_token(document.header, '\\language_package', 0)
2300 document.warning("Malformed LyX document: Missing \\language_package.")
2302 if get_value(document.header, "\\language_package", 0) != "babel":
2305 # check font settings
2307 roman = sans = typew = "default"
2309 sf_scale = tt_scale = 100.0
2311 j = find_token(document.header, "\\font_roman", 0)
2313 document.warning("Malformed LyX document: Missing \\font_roman.")
2315 # We need to use this regex since split() does not handle quote protection
2316 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2317 roman = romanfont[2].strip('"')
2318 romanfont[2] = '"default"'
2319 document.header[j] = " ".join(romanfont)
2321 j = find_token(document.header, "\\font_sans", 0)
2323 document.warning("Malformed LyX document: Missing \\font_sans.")
2325 # We need to use this regex since split() does not handle quote protection
2326 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2327 sans = sansfont[2].strip('"')
2328 sansfont[2] = '"default"'
2329 document.header[j] = " ".join(sansfont)
2331 j = find_token(document.header, "\\font_typewriter", 0)
2333 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2335 # We need to use this regex since split() does not handle quote protection
2336 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2337 typew = ttfont[2].strip('"')
2338 ttfont[2] = '"default"'
2339 document.header[j] = " ".join(ttfont)
2341 i = find_token(document.header, "\\font_osf", 0)
2343 document.warning("Malformed LyX document: Missing \\font_osf.")
2345 osf = str2bool(get_value(document.header, "\\font_osf", i))
2347 j = find_token(document.header, "\\font_sf_scale", 0)
2349 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2351 sfscale = document.header[j].split()
2354 document.header[j] = " ".join(sfscale)
2357 sf_scale = float(val)
2359 document.warning("Invalid font_sf_scale value: " + val)
2361 j = find_token(document.header, "\\font_tt_scale", 0)
2363 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2365 ttscale = document.header[j].split()
2368 document.header[j] = " ".join(ttscale)
2371 tt_scale = float(val)
2373 document.warning("Invalid font_tt_scale value: " + val)
2375 # set preamble stuff
2376 pretext = ['%% This document must be processed with xelatex or lualatex!']
2377 pretext.append('\\AtBeginDocument{%')
2378 if roman != "default":
2379 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2380 if sans != "default":
2381 sf = '\\babelfont{sf}['
2382 if sf_scale != 100.0:
2383 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2384 sf += 'Mapping=tex-text]{' + sans + '}'
2386 if typew != "default":
2387 tw = '\\babelfont{tt}'
2388 if tt_scale != 100.0:
2389 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2390 tw += '{' + typew + '}'
2393 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2395 insert_to_preamble(document, pretext)
2398 def revert_minionpro(document):
2399 " Revert native MinionPro font definition (with extra options) to LaTeX "
2401 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2403 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2405 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2408 regexp = re.compile(r'(\\font_roman_opts)')
2409 x = find_re(document.header, regexp, 0)
2413 # We need to use this regex since split() does not handle quote protection
2414 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2415 opts = romanopts[1].strip('"')
2417 i = find_token(document.header, "\\font_roman", 0)
2419 document.warning("Malformed LyX document: Missing \\font_roman.")
2422 # We need to use this regex since split() does not handle quote protection
2423 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2424 roman = romanfont[1].strip('"')
2425 if roman != "minionpro":
2427 romanfont[1] = '"default"'
2428 document.header[i] = " ".join(romanfont)
2430 j = find_token(document.header, "\\font_osf true", 0)
2433 preamble = "\\usepackage["
2435 document.header[j] = "\\font_osf false"
2439 preamble += "]{MinionPro}"
2440 add_to_preamble(document, [preamble])
2441 del document.header[x]
2444 def revert_font_opts(document):
2445 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2447 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2449 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2451 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2452 i = find_token(document.header, '\\language_package', 0)
2454 document.warning("Malformed LyX document: Missing \\language_package.")
2456 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2459 regexp = re.compile(r'(\\font_roman_opts)')
2460 i = find_re(document.header, regexp, 0)
2462 # We need to use this regex since split() does not handle quote protection
2463 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2464 opts = romanopts[1].strip('"')
2465 del document.header[i]
2467 regexp = re.compile(r'(\\font_roman)')
2468 i = find_re(document.header, regexp, 0)
2470 # We need to use this regex since split() does not handle quote protection
2471 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2472 font = romanfont[2].strip('"')
2473 romanfont[2] = '"default"'
2474 document.header[i] = " ".join(romanfont)
2475 if font != "default":
2477 preamble = "\\babelfont{rm}["
2479 preamble = "\\setmainfont["
2482 preamble += "Mapping=tex-text]{"
2485 add_to_preamble(document, [preamble])
2488 regexp = re.compile(r'(\\font_sans_opts)')
2489 i = find_re(document.header, regexp, 0)
2492 # We need to use this regex since split() does not handle quote protection
2493 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2494 opts = sfopts[1].strip('"')
2495 del document.header[i]
2497 regexp = re.compile(r'(\\font_sf_scale)')
2498 i = find_re(document.header, regexp, 0)
2500 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2501 regexp = re.compile(r'(\\font_sans)')
2502 i = find_re(document.header, regexp, 0)
2504 # We need to use this regex since split() does not handle quote protection
2505 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2506 font = sffont[2].strip('"')
2507 sffont[2] = '"default"'
2508 document.header[i] = " ".join(sffont)
2509 if font != "default":
2511 preamble = "\\babelfont{sf}["
2513 preamble = "\\setsansfont["
2517 preamble += "Scale=0."
2518 preamble += scaleval
2520 preamble += "Mapping=tex-text]{"
2523 add_to_preamble(document, [preamble])
2526 regexp = re.compile(r'(\\font_typewriter_opts)')
2527 i = find_re(document.header, regexp, 0)
2530 # We need to use this regex since split() does not handle quote protection
2531 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2532 opts = ttopts[1].strip('"')
2533 del document.header[i]
2535 regexp = re.compile(r'(\\font_tt_scale)')
2536 i = find_re(document.header, regexp, 0)
2538 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2539 regexp = re.compile(r'(\\font_typewriter)')
2540 i = find_re(document.header, regexp, 0)
2542 # We need to use this regex since split() does not handle quote protection
2543 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2544 font = ttfont[2].strip('"')
2545 ttfont[2] = '"default"'
2546 document.header[i] = " ".join(ttfont)
2547 if font != "default":
2549 preamble = "\\babelfont{tt}["
2551 preamble = "\\setmonofont["
2555 preamble += "Scale=0."
2556 preamble += scaleval
2558 preamble += "Mapping=tex-text]{"
2561 add_to_preamble(document, [preamble])
2564 def revert_plainNotoFonts_xopts(document):
2565 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2567 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2569 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2571 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2575 y = find_token(document.header, "\\font_osf true", 0)
2579 regexp = re.compile(r'(\\font_roman_opts)')
2580 x = find_re(document.header, regexp, 0)
2581 if x == -1 and not osf:
2586 # We need to use this regex since split() does not handle quote protection
2587 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2588 opts = romanopts[1].strip('"')
2594 i = find_token(document.header, "\\font_roman", 0)
2598 # We need to use this regex since split() does not handle quote protection
2599 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2600 roman = romanfont[1].strip('"')
2601 if roman != "NotoSerif-TLF":
2604 j = find_token(document.header, "\\font_sans", 0)
2608 # We need to use this regex since split() does not handle quote protection
2609 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2610 sf = sffont[1].strip('"')
2614 j = find_token(document.header, "\\font_typewriter", 0)
2618 # We need to use this regex since split() does not handle quote protection
2619 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2620 tt = ttfont[1].strip('"')
2624 # So we have noto as "complete font"
2625 romanfont[1] = '"default"'
2626 document.header[i] = " ".join(romanfont)
2628 preamble = "\\usepackage["
2630 preamble += "]{noto}"
2631 add_to_preamble(document, [preamble])
2633 document.header[y] = "\\font_osf false"
2635 del document.header[x]
2638 def revert_notoFonts_xopts(document):
2639 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2641 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2643 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2645 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2649 fm = createFontMapping(['Noto'])
2650 if revert_fonts(document, fm, fontmap, True):
2651 add_preamble_fonts(document, fontmap)
2654 def revert_IBMFonts_xopts(document):
2655 " Revert native IBM font definition (with extra options) to LaTeX "
2657 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2659 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2661 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2665 fm = createFontMapping(['IBM'])
2667 if revert_fonts(document, fm, fontmap, True):
2668 add_preamble_fonts(document, fontmap)
2671 def revert_AdobeFonts_xopts(document):
2672 " Revert native Adobe font definition (with extra options) to LaTeX "
2674 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2676 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2678 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2682 fm = createFontMapping(['Adobe'])
2684 if revert_fonts(document, fm, fontmap, True):
2685 add_preamble_fonts(document, fontmap)
2688 def convert_osf(document):
2689 " Convert \\font_osf param to new format "
2692 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2694 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2696 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2698 i = find_token(document.header, '\\font_osf', 0)
2700 document.warning("Malformed LyX document: Missing \\font_osf.")
2703 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2704 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2706 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2707 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2710 document.header.insert(i, "\\font_sans_osf false")
2711 document.header.insert(i + 1, "\\font_typewriter_osf false")
2715 x = find_token(document.header, "\\font_sans", 0)
2717 document.warning("Malformed LyX document: Missing \\font_sans.")
2719 # We need to use this regex since split() does not handle quote protection
2720 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2721 sf = sffont[1].strip('"')
2723 document.header.insert(i, "\\font_sans_osf true")
2725 document.header.insert(i, "\\font_sans_osf false")
2727 x = find_token(document.header, "\\font_typewriter", 0)
2729 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2731 # We need to use this regex since split() does not handle quote protection
2732 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2733 tt = ttfont[1].strip('"')
2735 document.header.insert(i + 1, "\\font_typewriter_osf true")
2737 document.header.insert(i + 1, "\\font_typewriter_osf false")
2740 document.header.insert(i, "\\font_sans_osf false")
2741 document.header.insert(i + 1, "\\font_typewriter_osf false")
2744 def revert_osf(document):
2745 " Revert \\font_*_osf params "
2748 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2750 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2752 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2754 i = find_token(document.header, '\\font_roman_osf', 0)
2756 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2759 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2760 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2762 i = find_token(document.header, '\\font_sans_osf', 0)
2764 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2767 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2768 del document.header[i]
2770 i = find_token(document.header, '\\font_typewriter_osf', 0)
2772 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2775 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2776 del document.header[i]
2779 i = find_token(document.header, '\\font_osf', 0)
2781 document.warning("Malformed LyX document: Missing \\font_osf.")
2783 document.header[i] = "\\font_osf true"
2786 def revert_texfontopts(document):
2787 " Revert native TeX font definitions (with extra options) to LaTeX "
2789 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2791 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2793 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2796 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2798 # First the sf (biolinum only)
2799 regexp = re.compile(r'(\\font_sans_opts)')
2800 x = find_re(document.header, regexp, 0)
2802 # We need to use this regex since split() does not handle quote protection
2803 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2804 opts = sfopts[1].strip('"')
2805 i = find_token(document.header, "\\font_sans", 0)
2807 document.warning("Malformed LyX document: Missing \\font_sans.")
2809 # We need to use this regex since split() does not handle quote protection
2810 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2811 sans = sffont[1].strip('"')
2812 if sans == "biolinum":
2814 sffont[1] = '"default"'
2815 document.header[i] = " ".join(sffont)
2817 j = find_token(document.header, "\\font_sans_osf true", 0)
2820 k = find_token(document.header, "\\font_sf_scale", 0)
2822 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2824 sfscale = document.header[k].split()
2827 document.header[k] = " ".join(sfscale)
2830 sf_scale = float(val)
2832 document.warning("Invalid font_sf_scale value: " + val)
2833 preamble = "\\usepackage["
2835 document.header[j] = "\\font_sans_osf false"
2837 if sf_scale != 100.0:
2838 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2840 preamble += "]{biolinum}"
2841 add_to_preamble(document, [preamble])
2842 del document.header[x]
2844 regexp = re.compile(r'(\\font_roman_opts)')
2845 x = find_re(document.header, regexp, 0)
2849 # We need to use this regex since split() does not handle quote protection
2850 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2851 opts = romanopts[1].strip('"')
2853 i = find_token(document.header, "\\font_roman", 0)
2855 document.warning("Malformed LyX document: Missing \\font_roman.")
2858 # We need to use this regex since split() does not handle quote protection
2859 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2860 roman = romanfont[1].strip('"')
2861 if not roman in rmfonts:
2863 romanfont[1] = '"default"'
2864 document.header[i] = " ".join(romanfont)
2866 if roman == "utopia":
2868 elif roman == "palatino":
2869 package = "mathpazo"
2870 elif roman == "times":
2871 package = "mathptmx"
2872 elif roman == "xcharter":
2873 package = "XCharter"
2875 j = find_token(document.header, "\\font_roman_osf true", 0)
2877 if roman == "cochineal":
2878 osf = "proportional,osf,"
2879 elif roman == "utopia":
2881 elif roman == "garamondx":
2883 elif roman == "libertine":
2885 elif roman == "palatino":
2887 elif roman == "xcharter":
2889 document.header[j] = "\\font_roman_osf false"
2890 k = find_token(document.header, "\\font_sc true", 0)
2892 if roman == "utopia":
2894 if roman == "palatino" and osf == "":
2896 document.header[k] = "\\font_sc false"
2897 preamble = "\\usepackage["
2900 preamble += "]{" + package + "}"
2901 add_to_preamble(document, [preamble])
2902 del document.header[x]
2905 def convert_CantarellFont(document):
2906 " Handle Cantarell font definition to LaTeX "
2908 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2909 fm = createFontMapping(['Cantarell'])
2910 convert_fonts(document, fm, "oldstyle")
2912 def revert_CantarellFont(document):
2913 " Revert native Cantarell font definition to LaTeX "
2915 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2917 fm = createFontMapping(['Cantarell'])
2918 if revert_fonts(document, fm, fontmap, False, True):
2919 add_preamble_fonts(document, fontmap)
2922 def convert_FiraFont(document):
2923 " Handle Fira font definition to LaTeX "
2925 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2926 fm = createFontMapping(['Fira'])
2927 convert_fonts(document, fm, "lf")
2929 def revert_FiraFont(document):
2930 " Revert native Fira font definition to LaTeX "
2932 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2934 fm = createFontMapping(['Fira'])
2935 if revert_fonts(document, fm, fontmap, False, True):
2936 add_preamble_fonts(document, fontmap)
2943 supported_versions = ["2.4.0", "2.4"]
2945 [545, [convert_lst_literalparam]],
2950 [550, [convert_fontenc]],
2957 [557, [convert_vcsinfo]],
2958 [558, [removeFrontMatterStyles]],
2961 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
2965 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
2966 [566, [convert_hebrew_parentheses]],
2972 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
2973 [573, [convert_inputencoding_namechange]],
2974 [574, [convert_ruby_module, convert_utf8_japanese]],
2975 [575, [convert_lineno]],
2977 [577, [convert_linggloss]],
2981 [581, [convert_osf]],
2982 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
2985 revert = [[581, [revert_CantarellFont,revert_FiraFont]],
2986 [580, [revert_texfontopts,revert_osf]],
2987 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
2988 [578, [revert_babelfont]],
2989 [577, [revert_drs]],
2990 [576, [revert_linggloss, revert_subexarg]],
2991 [575, [revert_new_languages]],
2992 [574, [revert_lineno]],
2993 [573, [revert_ruby_module, revert_utf8_japanese]],
2994 [572, [revert_inputencoding_namechange]],
2995 [571, [revert_notoFonts]],
2996 [570, [revert_cmidruletrimming]],
2997 [569, [revert_bibfileencodings]],
2998 [568, [revert_tablestyle]],
2999 [567, [revert_soul]],
3000 [566, [revert_malayalam]],
3001 [565, [revert_hebrew_parentheses]],
3002 [564, [revert_AdobeFonts]],
3003 [563, [revert_lformatinfo]],
3004 [562, [revert_listpargs]],
3005 [561, [revert_l7ninfo]],
3006 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
3007 [559, [revert_timeinfo, revert_namenoextinfo]],
3008 [558, [revert_dateinfo]],
3009 [557, [addFrontMatterStyles]],
3010 [556, [revert_vcsinfo]],
3011 [555, [revert_bibencoding]],
3012 [554, [revert_vcolumns]],
3013 [553, [revert_stretchcolumn]],
3014 [552, [revert_tuftecite]],
3015 [551, [revert_floatpclass, revert_floatalignment]],
3016 [550, [revert_nospellcheck]],
3017 [549, [revert_fontenc]],
3018 [548, []],# dummy format change
3019 [547, [revert_lscape]],
3020 [546, [revert_xcharter]],
3021 [545, [revert_paratype]],
3022 [544, [revert_lst_literalparam]]
3026 if __name__ == "__main__":