1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
78 self.pkgkey = createkey(self.package, self.options)
82 self.font2pkgmap = dict()
83 self.pkg2fontmap = dict()
84 self.pkginmap = dict() # defines, if a map for package exists
86 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None):
87 " Expand fontinfo mapping"
89 # fontlist: list of fontnames, each element
90 # may contain a ','-separated list of needed options
91 # like e.g. 'IBMPlexSansCondensed,condensed'
92 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
93 # scale_type: one of None, 'sf', 'tt'
94 # pkg: package defining the font. Defaults to fontname if None
95 # scaleopt: one of None, 'scale', 'scaled', or some other string
96 # to be used in scale option (e.g. scaled=0.7)
97 # osfopt: None or some other string to be used in osf option
100 fe.fonttype = font_type
101 fe.scaletype = scale_type
104 fe.fontname = font_name
106 fe.scaleopt = scaleopt
109 fe.package = font_name
113 self.font2pkgmap[font_name] = fe
114 if fe.pkgkey in self.pkg2fontmap:
115 # Repeated the same entry? Check content
116 if self.pkg2fontmap[fe.pkgkey] != font_name:
117 document.error("Something is wrong in pkgname+options <-> fontname mapping")
118 self.pkg2fontmap[fe.pkgkey] = font_name
119 self.pkginmap[fe.package] = 1
121 def getfontname(self, pkg, options):
123 pkgkey = createkey(pkg, options)
124 if not pkgkey in self.pkg2fontmap:
126 fontname = self.pkg2fontmap[pkgkey]
127 if not fontname in self.font2pkgmap:
128 document.error("Something is wrong in pkgname+options <-> fontname mapping")
130 if pkgkey == self.font2pkgmap[fontname].pkgkey:
134 def createFontMapping(fontlist):
135 # Create info for known fonts for the use in
136 # convert_latexFonts() and
137 # revert_latexFonts()
139 # * Would be more handy to parse latexFonts file,
140 # but the path to this file is unknown
141 # * For now, add DejaVu and IBMPlex only.
142 # * Expand, if desired
144 for font in fontlist:
146 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
147 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
148 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
150 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
151 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
152 'IBMPlexSerifSemibold,semibold'],
153 "roman", None, "plex-serif")
154 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
155 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
156 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
157 "sans", "sf", "plex-sans", "scale")
158 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
159 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
160 'IBMPlexMonoSemibold,semibold'],
161 "typewriter", "tt", "plex-mono", "scale")
162 elif font == 'Adobe':
163 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
164 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
165 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
167 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
168 'NotoSerifThin,thin', 'NotoSerifLight,light',
169 'NotoSerifExtralight,extralight'],
170 "roman", None, "noto-serif", None, "osf")
171 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
172 'NotoSansThin,thin', 'NotoSansLight,light',
173 'NotoSansExtralight,extralight'],
174 "sans", "sf", "noto-sans", "scaled")
175 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
178 def convert_fonts(document, fm):
179 " Handle font definition (LaTeX preamble -> native) "
181 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
182 rscaleopt = re.compile(r'^scaled?=(.*)')
185 while i < len(document.preamble):
186 i = find_re(document.preamble, rpkg, i+1)
189 mo = rpkg.search(document.preamble[i])
190 if mo == None or mo.group(2) == None:
193 options = mo.group(2).replace(' ', '').split(",")
199 while o < len(options):
200 if options[o] == osfoption:
204 mo = rscaleopt.search(options[o])
212 if not pkg in fm.pkginmap:
215 fn = fm.getfontname(pkg, options)
218 del document.preamble[i]
219 fontinfo = fm.font2pkgmap[fn]
220 if fontinfo.scaletype == None:
223 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
224 fontinfo.scaleval = oscale
226 if fontinfo.osfopt == None:
227 options.extend("osf")
229 osf = find_token(document.header, "\\font_osf false")
231 document.header[osf] = "\\font_osf true"
232 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
233 del document.preamble[i-1]
235 if fontscale != None:
236 j = find_token(document.header, fontscale, 0)
238 val = get_value(document.header, fontscale, j)
242 scale = "%03d" % int(float(oscale) * 100)
243 document.header[j] = fontscale + " " + scale + " " + vals[1]
244 ft = "\\font_" + fontinfo.fonttype
245 j = find_token(document.header, ft, 0)
247 val = get_value(document.header, ft, j)
248 words = val.split() # ! splits also values like '"DejaVu Sans"'
249 words[0] = '"' + fn + '"'
250 document.header[j] = ft + ' ' + ' '.join(words)
252 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False):
253 " Revert native font definition to LaTeX "
254 # fonlist := list of fonts created from the same package
255 # Empty package means that the font-name is the same as the package-name
256 # fontmap (key = package, val += found options) will be filled
257 # and used later in add_preamble_fonts() to be added to user-preamble
259 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
260 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
262 while i < len(document.header):
263 i = find_re(document.header, rfontscale, i+1)
266 mo = rfontscale.search(document.header[i])
269 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
270 val = get_value(document.header, ft, i)
271 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
272 font = words[0].strip('"') # TeX font name has no whitespace
273 if not font in fm.font2pkgmap:
275 fontinfo = fm.font2pkgmap[font]
276 val = fontinfo.package
277 if not val in fontmap:
281 if ft == "\\font_math":
283 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
284 if ft == "\\font_sans":
285 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
286 elif ft == "\\font_typewriter":
287 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
288 x = find_re(document.header, regexp, 0)
292 # We need to use this regex since split() does not handle quote protection
293 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
294 opts = xopts[1].strip('"').split(",")
295 fontmap[val].extend(opts)
296 del document.header[x]
297 words[0] = '"default"'
298 document.header[i] = ft + ' ' + ' '.join(words)
299 if fontinfo.scaleopt != None:
300 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
301 mo = rscales.search(xval)
306 # set correct scale option
307 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
308 if fontinfo.osfopt != None:
309 osf = find_token(document.header, "\\font_osf true")
311 fontmap[val].extend([fontinfo.osfopt])
312 if len(fontinfo.options) > 0:
313 fontmap[val].extend(fontinfo.options)
316 ###############################################################################
318 ### Conversion and reversion routines
320 ###############################################################################
322 def convert_inputencoding_namechange(document):
323 " Rename inputencoding settings. "
324 i = find_token(document.header, "\\inputencoding", 0)
327 s = document.header[i].replace("auto", "auto-legacy")
328 document.header[i] = s.replace("default", "auto-legacy-plain")
330 def revert_inputencoding_namechange(document):
331 " Rename inputencoding settings. "
332 i = find_token(document.header, "\\inputencoding", 0)
335 s = document.header[i].replace("auto-legacy-plain", "default")
336 document.header[i] = s.replace("auto-legacy", "auto")
338 def convert_notoFonts(document):
339 " Handle Noto fonts definition to LaTeX "
341 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
342 fm = createFontMapping(['Noto'])
343 convert_fonts(document, fm)
345 def revert_notoFonts(document):
346 " Revert native Noto font definition to LaTeX "
348 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
350 fm = createFontMapping(['Noto'])
351 if revert_fonts(document, fm, fontmap):
352 add_preamble_fonts(document, fontmap)
354 def convert_latexFonts(document):
355 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
357 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
358 fm = createFontMapping(['DejaVu', 'IBM'])
359 convert_fonts(document, fm)
361 def revert_latexFonts(document):
362 " Revert native DejaVu font definition to LaTeX "
364 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
366 fm = createFontMapping(['DejaVu', 'IBM'])
367 if revert_fonts(document, fm, fontmap):
368 add_preamble_fonts(document, fontmap)
370 def convert_AdobeFonts(document):
371 " Handle Adobe Source fonts definition to LaTeX "
373 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
374 fm = createFontMapping(['Adobe'])
375 convert_fonts(document, fm)
377 def revert_AdobeFonts(document):
378 " Revert Adobe Source font definition to LaTeX "
380 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
382 fm = createFontMapping(['Adobe'])
383 if revert_fonts(document, fm, fontmap):
384 add_preamble_fonts(document, fontmap)
386 def removeFrontMatterStyles(document):
387 " Remove styles Begin/EndFrontmatter"
389 layouts = ['BeginFrontmatter', 'EndFrontmatter']
390 tokenend = len('\\begin_layout ')
393 i = find_token_exact(document.body, '\\begin_layout ', i+1)
396 layout = document.body[i][tokenend:].strip()
397 if layout not in layouts:
399 j = find_end_of_layout(document.body, i)
401 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
403 while document.body[j+1].strip() == '':
405 document.body[i:j+1] = []
407 def addFrontMatterStyles(document):
408 " Use styles Begin/EndFrontmatter for elsarticle"
410 if document.textclass != "elsarticle":
413 def insertFrontmatter(prefix, line):
415 while above > 0 and document.body[above-1].strip() == '':
418 while document.body[below].strip() == '':
420 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
421 '\\begin_inset Note Note',
423 '\\begin_layout Plain Layout',
426 '\\end_inset', '', '',
429 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
430 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
431 tokenend = len('\\begin_layout ')
435 i = find_token_exact(document.body, '\\begin_layout ', i+1)
438 layout = document.body[i][tokenend:].strip()
439 if layout not in layouts:
441 k = find_end_of_layout(document.body, i)
443 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
450 insertFrontmatter('End', k+1)
451 insertFrontmatter('Begin', first)
454 def convert_lst_literalparam(document):
455 " Add param literal to include inset "
459 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
462 j = find_end_of_inset(document.body, i)
464 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
466 while i < j and document.body[i].strip() != '':
468 document.body.insert(i, 'literal "true"')
471 def revert_lst_literalparam(document):
472 " Remove param literal from include inset "
476 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
479 j = find_end_of_inset(document.body, i)
481 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
483 del_token(document.body, 'literal', i, j)
486 def revert_paratype(document):
487 " Revert ParaType font definitions to LaTeX "
489 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
491 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
492 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
493 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
494 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
497 sfval = find_token(document.header, "\\font_sf_scale", 0)
499 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
501 sfscale = document.header[sfval].split()
504 document.header[sfval] = " ".join(sfscale)
507 sf_scale = float(val)
509 document.warning("Invalid font_sf_scale value: " + val)
512 if sf_scale != "100.0":
513 sfoption = "scaled=" + str(sf_scale / 100.0)
514 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
515 ttval = get_value(document.header, "\\font_tt_scale", 0)
520 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
521 if i1 != -1 and i2 != -1 and i3!= -1:
522 add_to_preamble(document, ["\\usepackage{paratype}"])
525 add_to_preamble(document, ["\\usepackage{PTSerif}"])
526 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
529 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
531 add_to_preamble(document, ["\\usepackage{PTSans}"])
532 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
535 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
537 add_to_preamble(document, ["\\usepackage{PTMono}"])
538 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
541 def revert_xcharter(document):
542 " Revert XCharter font definitions to LaTeX "
544 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
548 # replace unsupported font setting
549 document.header[i] = document.header[i].replace("xcharter", "default")
550 # no need for preamble code with system fonts
551 if get_bool_value(document.header, "\\use_non_tex_fonts"):
554 # transfer old style figures setting to package options
555 j = find_token(document.header, "\\font_osf true")
558 document.header[j] = "\\font_osf false"
562 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
565 def revert_lscape(document):
566 " Reverts the landscape environment (Landscape module) to TeX-code "
568 if not "landscape" in document.get_module_list():
573 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
576 j = find_end_of_inset(document.body, i)
578 document.warning("Malformed LyX document: Can't find end of Landscape inset")
581 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
582 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
583 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
584 add_to_preamble(document, ["\\usepackage{afterpage}"])
586 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
587 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
589 add_to_preamble(document, ["\\usepackage{pdflscape}"])
592 def convert_fontenc(document):
593 " Convert default fontenc setting "
595 i = find_token(document.header, "\\fontencoding global", 0)
599 document.header[i] = document.header[i].replace("global", "auto")
602 def revert_fontenc(document):
603 " Revert default fontenc setting "
605 i = find_token(document.header, "\\fontencoding auto", 0)
609 document.header[i] = document.header[i].replace("auto", "global")
612 def revert_nospellcheck(document):
613 " Remove nospellcheck font info param "
617 i = find_token(document.body, '\\nospellcheck', i)
623 def revert_floatpclass(document):
624 " Remove float placement params 'document' and 'class' "
626 del_token(document.header, "\\float_placement class")
630 i = find_token(document.body, '\\begin_inset Float', i+1)
633 j = find_end_of_inset(document.body, i)
634 k = find_token(document.body, 'placement class', i, i + 2)
636 k = find_token(document.body, 'placement document', i, i + 2)
643 def revert_floatalignment(document):
644 " Remove float alignment params "
646 galignment = get_value(document.header, "\\float_alignment", delete=True)
650 i = find_token(document.body, '\\begin_inset Float', i+1)
653 j = find_end_of_inset(document.body, i)
655 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
657 k = find_token(document.body, 'alignment', i, i+4)
661 alignment = get_value(document.body, "alignment", k)
662 if alignment == "document":
663 alignment = galignment
665 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
667 document.warning("Can't find float layout!")
670 if alignment == "left":
671 alcmd = put_cmd_in_ert("\\raggedright{}")
672 elif alignment == "center":
673 alcmd = put_cmd_in_ert("\\centering{}")
674 elif alignment == "right":
675 alcmd = put_cmd_in_ert("\\raggedleft{}")
677 document.body[l+1:l+1] = alcmd
680 def revert_tuftecite(document):
681 " Revert \cite commands in tufte classes "
683 tufte = ["tufte-book", "tufte-handout"]
684 if document.textclass not in tufte:
689 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
692 j = find_end_of_inset(document.body, i)
694 document.warning("Can't find end of citation inset at line %d!!" %(i))
696 k = find_token(document.body, "LatexCommand", i, j)
698 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
701 cmd = get_value(document.body, "LatexCommand", k)
705 pre = get_quoted_value(document.body, "before", i, j)
706 post = get_quoted_value(document.body, "after", i, j)
707 key = get_quoted_value(document.body, "key", i, j)
709 document.warning("Citation inset at line %d does not have a key!" %(i))
711 # Replace command with ERT
714 res += "[" + pre + "]"
716 res += "[" + post + "]"
719 res += "{" + key + "}"
720 document.body[i:j+1] = put_cmd_in_ert([res])
724 def revert_stretchcolumn(document):
725 " We remove the column varwidth flags or everything else will become a mess. "
728 i = find_token(document.body, "\\begin_inset Tabular", i+1)
731 j = find_end_of_inset(document.body, i+1)
733 document.warning("Malformed LyX document: Could not find end of tabular.")
735 for k in range(i, j):
736 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
737 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
738 document.body[k] = document.body[k].replace(' varwidth="true"', '')
741 def revert_vcolumns(document):
742 " Revert standard columns with line breaks etc. "
748 i = find_token(document.body, "\\begin_inset Tabular", i+1)
751 j = find_end_of_inset(document.body, i)
753 document.warning("Malformed LyX document: Could not find end of tabular.")
756 # Collect necessary column information
758 nrows = int(document.body[i+1].split('"')[3])
759 ncols = int(document.body[i+1].split('"')[5])
761 for k in range(ncols):
762 m = find_token(document.body, "<column", m)
763 width = get_option_value(document.body[m], 'width')
764 varwidth = get_option_value(document.body[m], 'varwidth')
765 alignment = get_option_value(document.body[m], 'alignment')
766 special = get_option_value(document.body[m], 'special')
767 col_info.append([width, varwidth, alignment, special, m])
772 for row in range(nrows):
773 for col in range(ncols):
774 m = find_token(document.body, "<cell", m)
775 multicolumn = get_option_value(document.body[m], 'multicolumn')
776 multirow = get_option_value(document.body[m], 'multirow')
777 width = get_option_value(document.body[m], 'width')
778 rotate = get_option_value(document.body[m], 'rotate')
779 # Check for: linebreaks, multipars, non-standard environments
781 endcell = find_token(document.body, "</cell>", begcell)
783 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
785 elif count_pars_in_inset(document.body, begcell + 2) > 1:
787 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
789 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
790 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
792 alignment = col_info[col][2]
793 col_line = col_info[col][4]
795 if alignment == "center":
796 vval = ">{\\centering}"
797 elif alignment == "left":
798 vval = ">{\\raggedright}"
799 elif alignment == "right":
800 vval = ">{\\raggedleft}"
803 vval += "V{\\linewidth}"
805 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
806 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
807 # with newlines, and we do not want that)
809 endcell = find_token(document.body, "</cell>", begcell)
811 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
813 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
817 nle = find_end_of_inset(document.body, nl)
818 del(document.body[nle:nle+1])
820 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
822 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
828 if needarray == True:
829 add_to_preamble(document, ["\\usepackage{array}"])
830 if needvarwidth == True:
831 add_to_preamble(document, ["\\usepackage{varwidth}"])
834 def revert_bibencoding(document):
835 " Revert bibliography encoding "
839 i = find_token(document.header, "\\cite_engine", 0)
841 document.warning("Malformed document! Missing \\cite_engine")
843 engine = get_value(document.header, "\\cite_engine", i)
847 if engine in ["biblatex", "biblatex-natbib"]:
850 # Map lyx to latex encoding names
854 "armscii8" : "armscii8",
855 "iso8859-1" : "latin1",
856 "iso8859-2" : "latin2",
857 "iso8859-3" : "latin3",
858 "iso8859-4" : "latin4",
859 "iso8859-5" : "iso88595",
860 "iso8859-6" : "8859-6",
861 "iso8859-7" : "iso-8859-7",
862 "iso8859-8" : "8859-8",
863 "iso8859-9" : "latin5",
864 "iso8859-13" : "latin7",
865 "iso8859-15" : "latin9",
866 "iso8859-16" : "latin10",
867 "applemac" : "applemac",
869 "cp437de" : "cp437de",
886 "utf8-platex" : "utf8",
893 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
896 j = find_end_of_inset(document.body, i)
898 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
900 encoding = get_quoted_value(document.body, "encoding", i, j)
903 # remove encoding line
904 k = find_token(document.body, "encoding", i, j)
907 if encoding == "default":
909 # Re-find inset end line
910 j = find_end_of_inset(document.body, i)
913 h = find_token(document.header, "\\biblio_options", 0)
915 biblio_options = get_value(document.header, "\\biblio_options", h)
916 if not "bibencoding" in biblio_options:
917 document.header[h] += ",bibencoding=%s" % encodings[encoding]
919 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
921 # this should not happen
922 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
924 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
926 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
927 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
933 def convert_vcsinfo(document):
934 " Separate vcs Info inset from buffer Info inset. "
937 "vcs-revision" : "revision",
938 "vcs-tree-revision" : "tree-revision",
939 "vcs-author" : "author",
945 i = find_token(document.body, "\\begin_inset Info", i+1)
948 j = find_end_of_inset(document.body, i+1)
950 document.warning("Malformed LyX document: Could not find end of Info inset.")
952 tp = find_token(document.body, 'type', i, j)
953 tpv = get_quoted_value(document.body, "type", tp)
956 arg = find_token(document.body, 'arg', i, j)
957 argv = get_quoted_value(document.body, "arg", arg)
958 if argv not in list(types.keys()):
960 document.body[tp] = "type \"vcs\""
961 document.body[arg] = "arg \"" + types[argv] + "\""
964 def revert_vcsinfo(document):
965 " Merge vcs Info inset to buffer Info inset. "
967 args = ["revision", "tree-revision", "author", "time", "date" ]
970 i = find_token(document.body, "\\begin_inset Info", i+1)
973 j = find_end_of_inset(document.body, i+1)
975 document.warning("Malformed LyX document: Could not find end of Info inset.")
977 tp = find_token(document.body, 'type', i, j)
978 tpv = get_quoted_value(document.body, "type", tp)
981 arg = find_token(document.body, 'arg', i, j)
982 argv = get_quoted_value(document.body, "arg", arg)
984 document.warning("Malformed Info inset. Invalid vcs arg.")
986 document.body[tp] = "type \"buffer\""
987 document.body[arg] = "arg \"vcs-" + argv + "\""
990 def revert_dateinfo(document):
991 " Revert date info insets to static text. "
993 # FIXME This currently only considers the main language and uses the system locale
994 # Ideally, it should honor context languages and switch the locale accordingly.
996 # The date formats for each language using strftime syntax:
997 # long, short, loclong, locmedium, locshort
999 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1000 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1001 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1002 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1003 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1004 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1005 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1006 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1007 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1008 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1009 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1010 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1011 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1012 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1013 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1014 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1015 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1016 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1017 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1018 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1019 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1020 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1021 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1022 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1023 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1024 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1025 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1026 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1027 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1028 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1029 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1030 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1031 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1032 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1033 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1034 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1035 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1036 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1037 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1038 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1039 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1040 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1041 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1042 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1043 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1044 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1045 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1046 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1047 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1048 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1049 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1050 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1051 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1052 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1053 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1054 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1055 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1056 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1057 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1058 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1059 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1060 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1061 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1062 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1063 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1064 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1065 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1066 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1067 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1068 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1069 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1070 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1071 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1072 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1073 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1074 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1075 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1076 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1077 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1078 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1079 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1080 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1081 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1082 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1083 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1084 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1085 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1086 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1087 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1088 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1089 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1090 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1091 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1092 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1093 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1094 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1095 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1096 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1097 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1098 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1099 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1100 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1103 types = ["date", "fixdate", "moddate" ]
1104 lang = get_value(document.header, "\\language")
1106 document.warning("Malformed LyX document! No \\language header found!")
1111 i = find_token(document.body, "\\begin_inset Info", i+1)
1114 j = find_end_of_inset(document.body, i+1)
1116 document.warning("Malformed LyX document: Could not find end of Info inset.")
1118 tp = find_token(document.body, 'type', i, j)
1119 tpv = get_quoted_value(document.body, "type", tp)
1120 if tpv not in types:
1122 arg = find_token(document.body, 'arg', i, j)
1123 argv = get_quoted_value(document.body, "arg", arg)
1126 if tpv == "fixdate":
1127 datecomps = argv.split('@')
1128 if len(datecomps) > 1:
1130 isodate = datecomps[1]
1131 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1133 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1134 # FIXME if we had the path to the original document (not the one in the tmp dir),
1135 # we could use the mtime.
1136 # elif tpv == "moddate":
1137 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1140 result = dte.isodate()
1141 elif argv == "long":
1142 result = dte.strftime(dateformats[lang][0])
1143 elif argv == "short":
1144 result = dte.strftime(dateformats[lang][1])
1145 elif argv == "loclong":
1146 result = dte.strftime(dateformats[lang][2])
1147 elif argv == "locmedium":
1148 result = dte.strftime(dateformats[lang][3])
1149 elif argv == "locshort":
1150 result = dte.strftime(dateformats[lang][4])
1152 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1153 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1154 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1155 fmt = re.sub('[^\'%]d', '%d', fmt)
1156 fmt = fmt.replace("'", "")
1157 result = dte.strftime(fmt)
1158 if sys.version_info < (3,0):
1159 # In Python 2, datetime module works with binary strings,
1160 # our dateformat strings are utf8-encoded:
1161 result = result.decode('utf-8')
1162 document.body[i : j+1] = [result]
1165 def revert_timeinfo(document):
1166 " Revert time info insets to static text. "
1168 # FIXME This currently only considers the main language and uses the system locale
1169 # Ideally, it should honor context languages and switch the locale accordingly.
1170 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1173 # The time formats for each language using strftime syntax:
1176 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1177 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1178 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1179 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1180 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1181 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1182 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1183 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1184 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1185 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1186 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1187 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1188 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1189 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1190 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1191 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1192 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1193 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1194 "british" : ["%H:%M:%S %Z", "%H:%M"],
1195 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1196 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1197 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1198 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1199 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1200 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1201 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1202 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1203 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1204 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1205 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1206 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1207 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1208 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1209 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1210 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1211 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1212 "french" : ["%H:%M:%S %Z", "%H:%M"],
1213 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1214 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1215 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1216 "german" : ["%H:%M:%S %Z", "%H:%M"],
1217 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1218 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1219 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1220 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1221 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1222 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1223 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1224 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1225 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1226 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1227 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1228 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1229 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1230 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1231 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1232 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1233 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1234 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1235 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1236 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1237 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1238 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1239 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1240 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1241 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1242 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1243 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1244 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1245 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1246 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1247 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1248 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1249 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1250 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1251 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1252 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1253 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1254 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1255 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1256 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1257 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1258 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1259 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1260 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1261 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1262 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1263 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1264 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1265 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1266 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1267 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1268 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1269 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1270 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1271 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1272 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1273 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1274 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1275 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1276 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1277 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1280 types = ["time", "fixtime", "modtime" ]
1282 i = find_token(document.header, "\\language", 0)
1284 # this should not happen
1285 document.warning("Malformed LyX document! No \\language header found!")
1287 lang = get_value(document.header, "\\language", i)
1291 i = find_token(document.body, "\\begin_inset Info", i+1)
1294 j = find_end_of_inset(document.body, i+1)
1296 document.warning("Malformed LyX document: Could not find end of Info inset.")
1298 tp = find_token(document.body, 'type', i, j)
1299 tpv = get_quoted_value(document.body, "type", tp)
1300 if tpv not in types:
1302 arg = find_token(document.body, 'arg', i, j)
1303 argv = get_quoted_value(document.body, "arg", arg)
1305 dtme = datetime.now()
1307 if tpv == "fixtime":
1308 timecomps = argv.split('@')
1309 if len(timecomps) > 1:
1311 isotime = timecomps[1]
1312 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1314 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1316 m = re.search('(\d\d):(\d\d)', isotime)
1318 tme = time(int(m.group(1)), int(m.group(2)))
1319 # FIXME if we had the path to the original document (not the one in the tmp dir),
1320 # we could use the mtime.
1321 # elif tpv == "moddate":
1322 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1325 result = tme.isoformat()
1326 elif argv == "long":
1327 result = tme.strftime(timeformats[lang][0])
1328 elif argv == "short":
1329 result = tme.strftime(timeformats[lang][1])
1331 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1332 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1333 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1334 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1335 fmt = fmt.replace("'", "")
1336 result = dte.strftime(fmt)
1337 document.body[i : j+1] = result
1340 def revert_namenoextinfo(document):
1341 " Merge buffer Info inset type name-noext to name. "
1345 i = find_token(document.body, "\\begin_inset Info", i+1)
1348 j = find_end_of_inset(document.body, i+1)
1350 document.warning("Malformed LyX document: Could not find end of Info inset.")
1352 tp = find_token(document.body, 'type', i, j)
1353 tpv = get_quoted_value(document.body, "type", tp)
1356 arg = find_token(document.body, 'arg', i, j)
1357 argv = get_quoted_value(document.body, "arg", arg)
1358 if argv != "name-noext":
1360 document.body[arg] = "arg \"name\""
1363 def revert_l7ninfo(document):
1364 " Revert l7n Info inset to text. "
1368 i = find_token(document.body, "\\begin_inset Info", i+1)
1371 j = find_end_of_inset(document.body, i+1)
1373 document.warning("Malformed LyX document: Could not find end of Info inset.")
1375 tp = find_token(document.body, 'type', i, j)
1376 tpv = get_quoted_value(document.body, "type", tp)
1379 arg = find_token(document.body, 'arg', i, j)
1380 argv = get_quoted_value(document.body, "arg", arg)
1381 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1382 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1383 document.body[i : j+1] = argv
1386 def revert_listpargs(document):
1387 " Reverts listpreamble arguments to TeX-code "
1390 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1393 j = find_end_of_inset(document.body, i)
1394 # Find containing paragraph layout
1395 parent = get_containing_layout(document.body, i)
1397 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1400 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1401 endPlain = find_end_of_layout(document.body, beginPlain)
1402 content = document.body[beginPlain + 1 : endPlain]
1403 del document.body[i:j+1]
1404 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1405 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1406 document.body[parbeg : parbeg] = subst
1409 def revert_lformatinfo(document):
1410 " Revert layout format Info inset to text. "
1414 i = find_token(document.body, "\\begin_inset Info", i+1)
1417 j = find_end_of_inset(document.body, i+1)
1419 document.warning("Malformed LyX document: Could not find end of Info inset.")
1421 tp = find_token(document.body, 'type', i, j)
1422 tpv = get_quoted_value(document.body, "type", tp)
1423 if tpv != "lyxinfo":
1425 arg = find_token(document.body, 'arg', i, j)
1426 argv = get_quoted_value(document.body, "arg", arg)
1427 if argv != "layoutformat":
1430 document.body[i : j+1] = "69"
1433 def convert_hebrew_parentheses(document):
1434 """ Swap opening/closing parentheses in Hebrew text.
1436 Up to LyX 2.4, "(" was used as closing parenthesis and
1437 ")" as opening parenthesis for Hebrew in the LyX source.
1439 # print("convert hebrew parentheses")
1440 current_languages = [document.language]
1441 for i, line in enumerate(document.body):
1442 if line.startswith('\\lang '):
1443 current_languages[-1] = line.lstrip('\\lang ')
1444 elif line.startswith('\\begin_layout'):
1445 current_languages.append(current_languages[-1])
1446 # print (line, current_languages[-1])
1447 elif line.startswith('\\end_layout'):
1448 current_languages.pop()
1449 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1450 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1453 def revert_hebrew_parentheses(document):
1454 " Store parentheses in Hebrew text reversed"
1455 # This only exists to keep the convert/revert naming convention
1456 convert_hebrew_parentheses(document)
1459 def revert_malayalam(document):
1460 " Set the document language to English but assure Malayalam output "
1462 revert_language(document, "malayalam", "", "malayalam")
1465 def revert_soul(document):
1466 " Revert soul module flex insets to ERT "
1468 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1471 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1473 add_to_preamble(document, ["\\usepackage{soul}"])
1475 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1477 add_to_preamble(document, ["\\usepackage{color}"])
1479 revert_flex_inset(document.body, "Spaceletters", "\\so")
1480 revert_flex_inset(document.body, "Strikethrough", "\\st")
1481 revert_flex_inset(document.body, "Underline", "\\ul")
1482 revert_flex_inset(document.body, "Highlight", "\\hl")
1483 revert_flex_inset(document.body, "Capitalize", "\\caps")
1486 def revert_tablestyle(document):
1487 " Remove tablestyle params "
1490 i = find_token(document.header, "\\tablestyle")
1492 del document.header[i]
1495 def revert_bibfileencodings(document):
1496 " Revert individual Biblatex bibliography encodings "
1500 i = find_token(document.header, "\\cite_engine", 0)
1502 document.warning("Malformed document! Missing \\cite_engine")
1504 engine = get_value(document.header, "\\cite_engine", i)
1508 if engine in ["biblatex", "biblatex-natbib"]:
1511 # Map lyx to latex encoding names
1515 "armscii8" : "armscii8",
1516 "iso8859-1" : "latin1",
1517 "iso8859-2" : "latin2",
1518 "iso8859-3" : "latin3",
1519 "iso8859-4" : "latin4",
1520 "iso8859-5" : "iso88595",
1521 "iso8859-6" : "8859-6",
1522 "iso8859-7" : "iso-8859-7",
1523 "iso8859-8" : "8859-8",
1524 "iso8859-9" : "latin5",
1525 "iso8859-13" : "latin7",
1526 "iso8859-15" : "latin9",
1527 "iso8859-16" : "latin10",
1528 "applemac" : "applemac",
1530 "cp437de" : "cp437de",
1538 "cp1250" : "cp1250",
1539 "cp1251" : "cp1251",
1540 "cp1252" : "cp1252",
1541 "cp1255" : "cp1255",
1542 "cp1256" : "cp1256",
1543 "cp1257" : "cp1257",
1544 "koi8-r" : "koi8-r",
1545 "koi8-u" : "koi8-u",
1547 "utf8-platex" : "utf8",
1554 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1557 j = find_end_of_inset(document.body, i)
1559 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1561 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1565 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1566 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1567 if len(bibfiles) == 0:
1568 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1569 # remove encoding line
1570 k = find_token(document.body, "file_encodings", i, j)
1572 del document.body[k]
1573 # Re-find inset end line
1574 j = find_end_of_inset(document.body, i)
1576 enclist = encodings.split("\t")
1579 ppp = pp.split(" ", 1)
1580 encmap[ppp[0]] = ppp[1]
1581 for bib in bibfiles:
1582 pr = "\\addbibresource"
1583 if bib in encmap.keys():
1584 pr += "[bibencoding=" + encmap[bib] + "]"
1585 pr += "{" + bib + "}"
1586 add_to_preamble(document, [pr])
1587 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1588 pcmd = "printbibliography"
1590 pcmd += "[" + opts + "]"
1591 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1592 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1593 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1594 "status open", "", "\\begin_layout Plain Layout" ]
1595 repl += document.body[i:j+1]
1596 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1597 document.body[i:j+1] = repl
1603 def revert_cmidruletrimming(document):
1604 " Remove \\cmidrule trimming "
1606 # FIXME: Revert to TeX code?
1609 # first, let's find out if we need to do anything
1610 i = find_token(document.body, '<cell ', i+1)
1613 j = document.body[i].find('trim="')
1616 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1617 # remove trim option
1618 document.body[i] = rgx.sub('', document.body[i])
1622 r'### Inserted by lyx2lyx (ruby inset) ###',
1623 r'InsetLayout Flex:Ruby',
1624 r' LyxType charstyle',
1625 r' LatexType command',
1629 r' HTMLInnerTag rb',
1630 r' HTMLInnerAttr ""',
1632 r' LabelString "Ruby"',
1633 r' Decoration Conglomerate',
1635 r' \ifdefined\kanjiskip',
1636 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1637 r' \else \ifdefined\luatexversion',
1638 r' \usepackage{luatexja-ruby}',
1639 r' \else \ifdefined\XeTeXversion',
1640 r' \usepackage{ruby}%',
1642 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1644 r' Argument post:1',
1645 r' LabelString "ruby text"',
1646 r' MenuString "Ruby Text|R"',
1647 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1648 r' Decoration Conglomerate',
1660 def convert_ruby_module(document):
1661 " Use ruby module instead of local module definition "
1662 if document.del_local_layout(ruby_inset_def):
1663 document.add_module("ruby")
1665 def revert_ruby_module(document):
1666 " Replace ruby module with local module definition "
1667 if document.del_module("ruby"):
1668 document.append_local_layout(ruby_inset_def)
1671 def convert_utf8_japanese(document):
1672 " Use generic utf8 with Japanese documents."
1673 lang = get_value(document.header, "\\language")
1674 if not lang.startswith("japanese"):
1676 inputenc = get_value(document.header, "\\inputencoding")
1677 if ((lang == "japanese" and inputenc == "utf8-platex")
1678 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1679 document.set_parameter("inputencoding", "utf8")
1681 def revert_utf8_japanese(document):
1682 " Use Japanese utf8 variants with Japanese documents."
1683 inputenc = get_value(document.header, "\\inputencoding")
1684 if inputenc != "utf8":
1686 lang = get_value(document.header, "\\language")
1687 if lang == "japanese":
1688 document.set_parameter("inputencoding", "utf8-platex")
1689 if lang == "japanese-cjk":
1690 document.set_parameter("inputencoding", "utf8-cjk")
1693 def revert_lineno(document):
1694 " Replace lineno setting with user-preamble code."
1696 options = get_quoted_value(document.header, "\\lineno_options",
1698 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1701 options = "[" + options + "]"
1702 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1705 def convert_lineno(document):
1706 " Replace user-preamble code with native lineno support."
1709 i = find_token(document.preamble, "\\linenumbers", 1)
1711 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1714 options = usepkg.group(1).strip("[]")
1715 del(document.preamble[i-1:i+1])
1716 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1718 k = find_token(document.header, "\\index ")
1720 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1722 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1723 "\\lineno_options %s" % options]
1726 def revert_new_languages(document):
1727 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1728 and Russian (Petrine orthography)."""
1730 # lyxname: (babelname, polyglossianame)
1731 new_languages = {"azerbaijani": ("azerbaijani", ""),
1732 "bengali": ("", "bengali"),
1733 "churchslavonic": ("", "churchslavonic"),
1734 "oldrussian": ("", "russian"),
1735 "korean": ("", "korean"),
1737 used_languages = set()
1738 if document.language in new_languages:
1739 used_languages.add(document.language)
1742 i = find_token(document.body, "\\lang", i+1)
1745 if document.body[i][6:].strip() in new_languages:
1746 used_languages.add(document.language)
1748 # Korean is already supported via CJK, so leave as-is for Babel
1749 if ("korean" in used_languages
1750 and get_bool_value(document.header, "\\use_non_tex_fonts")
1751 and get_value(document.header, "\\language_package") in ("default", "auto")):
1752 revert_language(document, "korean", "", "korean")
1753 used_languages.discard("korean")
1755 for lang in used_languages:
1756 revert(lang, *new_languages[lang])
1760 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1761 r'InsetLayout Flex:Glosse',
1763 r' LabelString "Gloss (old version)"',
1764 r' MenuString "Gloss (old version)"',
1765 r' LatexType environment',
1766 r' LatexName linggloss',
1767 r' Decoration minimalistic',
1772 r' CustomPars false',
1773 r' ForcePlain true',
1774 r' ParbreakIsNewline true',
1775 r' FreeSpacing true',
1776 r' Requires covington',
1779 r' \@ifundefined{linggloss}{%',
1780 r' \newenvironment{linggloss}[2][]{',
1781 r' \def\glosstr{\glt #1}%',
1783 r' {\glosstr\glend}}{}',
1786 r' ResetsFont true',
1788 r' Decoration conglomerate',
1789 r' LabelString "Translation"',
1790 r' MenuString "Glosse Translation|s"',
1791 r' Tooltip "Add a translation for the glosse"',
1796 glosss_inset_def = [
1797 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1798 r'InsetLayout Flex:Tri-Glosse',
1800 r' LabelString "Tri-Gloss (old version)"',
1801 r' MenuString "Tri-Gloss (old version)"',
1802 r' LatexType environment',
1803 r' LatexName lingglosss',
1804 r' Decoration minimalistic',
1809 r' CustomPars false',
1810 r' ForcePlain true',
1811 r' ParbreakIsNewline true',
1812 r' FreeSpacing true',
1814 r' Requires covington',
1817 r' \@ifundefined{lingglosss}{%',
1818 r' \newenvironment{lingglosss}[2][]{',
1819 r' \def\glosstr{\glt #1}%',
1821 r' {\glosstr\glend}}{}',
1823 r' ResetsFont true',
1825 r' Decoration conglomerate',
1826 r' LabelString "Translation"',
1827 r' MenuString "Glosse Translation|s"',
1828 r' Tooltip "Add a translation for the glosse"',
1833 def convert_linggloss(document):
1834 " Move old ling glosses to local layout "
1835 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1836 document.append_local_layout(gloss_inset_def)
1837 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1838 document.append_local_layout(glosss_inset_def)
1840 def revert_linggloss(document):
1841 " Revert to old ling gloss definitions "
1842 if not "linguistics" in document.get_module_list():
1844 document.del_local_layout(gloss_inset_def)
1845 document.del_local_layout(glosss_inset_def)
1848 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1849 for glosse in glosses:
1852 i = find_token(document.body, glosse, i+1)
1855 j = find_end_of_inset(document.body, i)
1857 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1860 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1861 endarg = find_end_of_inset(document.body, arg)
1864 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1865 if argbeginPlain == -1:
1866 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1868 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1869 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
1871 # remove Arg insets and paragraph, if it only contains this inset
1872 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1873 del document.body[arg - 1 : endarg + 4]
1875 del document.body[arg : endarg + 1]
1877 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
1878 endarg = find_end_of_inset(document.body, arg)
1881 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1882 if argbeginPlain == -1:
1883 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
1885 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1886 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
1888 # remove Arg insets and paragraph, if it only contains this inset
1889 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1890 del document.body[arg - 1 : endarg + 4]
1892 del document.body[arg : endarg + 1]
1894 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
1895 endarg = find_end_of_inset(document.body, arg)
1898 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1899 if argbeginPlain == -1:
1900 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
1902 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1903 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
1905 # remove Arg insets and paragraph, if it only contains this inset
1906 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1907 del document.body[arg - 1 : endarg + 4]
1909 del document.body[arg : endarg + 1]
1911 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
1912 endarg = find_end_of_inset(document.body, arg)
1915 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1916 if argbeginPlain == -1:
1917 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
1919 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1920 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
1922 # remove Arg insets and paragraph, if it only contains this inset
1923 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1924 del document.body[arg - 1 : endarg + 4]
1926 del document.body[arg : endarg + 1]
1929 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
1932 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1933 endInset = find_end_of_inset(document.body, i)
1934 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
1935 precontent = put_cmd_in_ert(cmd)
1936 if len(optargcontent) > 0:
1937 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
1938 precontent += put_cmd_in_ert("{")
1940 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
1941 if cmd == "\\trigloss":
1942 postcontent += put_cmd_in_ert("}{") + marg3content
1943 postcontent += put_cmd_in_ert("}")
1945 document.body[endPlain:endInset + 1] = postcontent
1946 document.body[beginPlain + 1:beginPlain] = precontent
1947 del document.body[i : beginPlain + 1]
1949 document.append_local_layout("Requires covington")
1954 def revert_subexarg(document):
1955 " Revert linguistic subexamples with argument to ERT "
1957 if not "linguistics" in document.get_module_list():
1963 i = find_token(document.body, "\\begin_layout Subexample", i+1)
1966 j = find_end_of_layout(document.body, i)
1968 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1971 # check for consecutive layouts
1972 k = find_token(document.body, "\\begin_layout", j)
1973 if k == -1 or document.body[k] != "\\begin_layout Subexample":
1975 j = find_end_of_layout(document.body, k)
1977 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1980 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1984 endarg = find_end_of_inset(document.body, arg)
1986 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1987 if argbeginPlain == -1:
1988 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1990 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1991 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
1993 # remove Arg insets and paragraph, if it only contains this inset
1994 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1995 del document.body[arg - 1 : endarg + 4]
1997 del document.body[arg : endarg + 1]
1999 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2001 # re-find end of layout
2002 j = find_end_of_layout(document.body, i)
2004 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2007 # check for consecutive layouts
2008 k = find_token(document.body, "\\begin_layout", j)
2009 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2011 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2012 j = find_end_of_layout(document.body, k)
2014 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2017 endev = put_cmd_in_ert("\\end{subexamples}")
2019 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2020 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2021 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2023 document.append_local_layout("Requires covington")
2027 def revert_drs(document):
2028 " Revert DRS insets (linguistics) to ERT "
2030 if not "linguistics" in document.get_module_list():
2034 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2035 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2036 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2037 "\\begin_inset Flex SDRS"]
2041 i = find_token(document.body, drs, i+1)
2044 j = find_end_of_inset(document.body, i)
2046 document.warning("Malformed LyX document: Can't find end of DRS inset")
2049 # Check for arguments
2050 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2051 endarg = find_end_of_inset(document.body, arg)
2054 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2055 if argbeginPlain == -1:
2056 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2058 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2059 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2061 # remove Arg insets and paragraph, if it only contains this inset
2062 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2063 del document.body[arg - 1 : endarg + 4]
2065 del document.body[arg : endarg + 1]
2068 j = find_end_of_inset(document.body, i)
2070 document.warning("Malformed LyX document: Can't find end of DRS inset")
2073 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2074 endarg = find_end_of_inset(document.body, arg)
2077 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2078 if argbeginPlain == -1:
2079 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2081 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2082 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2084 # remove Arg insets and paragraph, if it only contains this inset
2085 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2086 del document.body[arg - 1 : endarg + 4]
2088 del document.body[arg : endarg + 1]
2091 j = find_end_of_inset(document.body, i)
2093 document.warning("Malformed LyX document: Can't find end of DRS inset")
2096 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2097 endarg = find_end_of_inset(document.body, arg)
2098 postarg1content = []
2100 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2101 if argbeginPlain == -1:
2102 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2104 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2105 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2107 # remove Arg insets and paragraph, if it only contains this inset
2108 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2109 del document.body[arg - 1 : endarg + 4]
2111 del document.body[arg : endarg + 1]
2114 j = find_end_of_inset(document.body, i)
2116 document.warning("Malformed LyX document: Can't find end of DRS inset")
2119 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2120 endarg = find_end_of_inset(document.body, arg)
2121 postarg2content = []
2123 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2124 if argbeginPlain == -1:
2125 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2127 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2128 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2130 # remove Arg insets and paragraph, if it only contains this inset
2131 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2132 del document.body[arg - 1 : endarg + 4]
2134 del document.body[arg : endarg + 1]
2137 j = find_end_of_inset(document.body, i)
2139 document.warning("Malformed LyX document: Can't find end of DRS inset")
2142 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2143 endarg = find_end_of_inset(document.body, arg)
2144 postarg3content = []
2146 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2147 if argbeginPlain == -1:
2148 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2150 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2151 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2153 # remove Arg insets and paragraph, if it only contains this inset
2154 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2155 del document.body[arg - 1 : endarg + 4]
2157 del document.body[arg : endarg + 1]
2160 j = find_end_of_inset(document.body, i)
2162 document.warning("Malformed LyX document: Can't find end of DRS inset")
2165 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2166 endarg = find_end_of_inset(document.body, arg)
2167 postarg4content = []
2169 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2170 if argbeginPlain == -1:
2171 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2173 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2174 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2176 # remove Arg insets and paragraph, if it only contains this inset
2177 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2178 del document.body[arg - 1 : endarg + 4]
2180 del document.body[arg : endarg + 1]
2182 # The respective LaTeX command
2184 if drs == "\\begin_inset Flex DRS*":
2186 elif drs == "\\begin_inset Flex IfThen-DRS":
2188 elif drs == "\\begin_inset Flex Cond-DRS":
2190 elif drs == "\\begin_inset Flex QDRS":
2192 elif drs == "\\begin_inset Flex NegDRS":
2194 elif drs == "\\begin_inset Flex SDRS":
2197 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2198 endInset = find_end_of_inset(document.body, i)
2199 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2200 precontent = put_cmd_in_ert(cmd)
2201 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2202 if drs == "\\begin_inset Flex SDRS":
2203 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2204 precontent += put_cmd_in_ert("{")
2207 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2208 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2209 if cmd == "\\condrs" or cmd == "\\qdrs":
2210 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2212 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2214 postcontent = put_cmd_in_ert("}")
2216 document.body[endPlain:endInset + 1] = postcontent
2217 document.body[beginPlain + 1:beginPlain] = precontent
2218 del document.body[i : beginPlain + 1]
2220 document.append_local_layout("Provides covington 1")
2221 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2227 def revert_babelfont(document):
2228 " Reverts the use of \\babelfont to user preamble "
2230 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2232 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2234 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2236 i = find_token(document.header, '\\language_package', 0)
2238 document.warning("Malformed LyX document: Missing \\language_package.")
2240 if get_value(document.header, "\\language_package", 0) != "babel":
2243 # check font settings
2245 roman = sans = typew = "default"
2247 sf_scale = tt_scale = 100.0
2249 j = find_token(document.header, "\\font_roman", 0)
2251 document.warning("Malformed LyX document: Missing \\font_roman.")
2253 # We need to use this regex since split() does not handle quote protection
2254 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2255 roman = romanfont[2].strip('"')
2256 romanfont[2] = '"default"'
2257 document.header[j] = " ".join(romanfont)
2259 j = find_token(document.header, "\\font_sans", 0)
2261 document.warning("Malformed LyX document: Missing \\font_sans.")
2263 # We need to use this regex since split() does not handle quote protection
2264 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2265 sans = sansfont[2].strip('"')
2266 sansfont[2] = '"default"'
2267 document.header[j] = " ".join(sansfont)
2269 j = find_token(document.header, "\\font_typewriter", 0)
2271 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2273 # We need to use this regex since split() does not handle quote protection
2274 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2275 typew = ttfont[2].strip('"')
2276 ttfont[2] = '"default"'
2277 document.header[j] = " ".join(ttfont)
2279 i = find_token(document.header, "\\font_osf", 0)
2281 document.warning("Malformed LyX document: Missing \\font_osf.")
2283 osf = str2bool(get_value(document.header, "\\font_osf", i))
2285 j = find_token(document.header, "\\font_sf_scale", 0)
2287 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2289 sfscale = document.header[j].split()
2292 document.header[j] = " ".join(sfscale)
2295 sf_scale = float(val)
2297 document.warning("Invalid font_sf_scale value: " + val)
2299 j = find_token(document.header, "\\font_tt_scale", 0)
2301 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2303 ttscale = document.header[j].split()
2306 document.header[j] = " ".join(ttscale)
2309 tt_scale = float(val)
2311 document.warning("Invalid font_tt_scale value: " + val)
2313 # set preamble stuff
2314 pretext = ['%% This document must be processed with xelatex or lualatex!']
2315 pretext.append('\\AtBeginDocument{%')
2316 if roman != "default":
2317 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2318 if sans != "default":
2319 sf = '\\babelfont{sf}['
2320 if sf_scale != 100.0:
2321 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2322 sf += 'Mapping=tex-text]{' + sans + '}'
2324 if typew != "default":
2325 tw = '\\babelfont{tt}'
2326 if tt_scale != 100.0:
2327 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2328 tw += '{' + typew + '}'
2331 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2333 insert_to_preamble(document, pretext)
2336 def revert_minionpro(document):
2337 " Revert native MinionPro font definition (with extra options) to LaTeX "
2339 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2341 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2343 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2346 regexp = re.compile(r'(\\font_roman_opts)')
2347 x = find_re(document.header, regexp, 0)
2351 # We need to use this regex since split() does not handle quote protection
2352 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2353 opts = romanopts[1].strip('"')
2355 i = find_token(document.header, "\\font_roman", 0)
2357 document.warning("Malformed LyX document: Missing \\font_roman.")
2360 # We need to use this regex since split() does not handle quote protection
2361 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2362 roman = romanfont[1].strip('"')
2363 if roman != "minionpro":
2365 romanfont[1] = '"default"'
2366 document.header[i] = " ".join(romanfont)
2368 j = find_token(document.header, "\\font_osf true", 0)
2371 preamble = "\\usepackage["
2373 document.header[j] = "\\font_osf false"
2377 preamble += "]{MinionPro}"
2378 add_to_preamble(document, [preamble])
2379 del document.header[x]
2382 def revert_font_opts(document):
2383 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2385 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2387 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2389 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2390 i = find_token(document.header, '\\language_package', 0)
2392 document.warning("Malformed LyX document: Missing \\language_package.")
2394 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2397 regexp = re.compile(r'(\\font_roman_opts)')
2398 i = find_re(document.header, regexp, 0)
2400 # We need to use this regex since split() does not handle quote protection
2401 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2402 opts = romanopts[1].strip('"')
2403 del document.header[i]
2405 regexp = re.compile(r'(\\font_roman)')
2406 i = find_re(document.header, regexp, 0)
2408 # We need to use this regex since split() does not handle quote protection
2409 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2410 font = romanfont[2].strip('"')
2411 romanfont[2] = '"default"'
2412 document.header[i] = " ".join(romanfont)
2413 if font != "default":
2415 preamble = "\\babelfont{rm}["
2417 preamble = "\\setmainfont["
2420 preamble += "Mapping=tex-text]{"
2423 add_to_preamble(document, [preamble])
2426 regexp = re.compile(r'(\\font_sans_opts)')
2427 i = find_re(document.header, regexp, 0)
2430 # We need to use this regex since split() does not handle quote protection
2431 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2432 opts = sfopts[1].strip('"')
2433 del document.header[i]
2435 regexp = re.compile(r'(\\font_sf_scale)')
2436 i = find_re(document.header, regexp, 0)
2438 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2439 regexp = re.compile(r'(\\font_sans)')
2440 i = find_re(document.header, regexp, 0)
2442 # We need to use this regex since split() does not handle quote protection
2443 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2444 font = sffont[2].strip('"')
2445 sffont[2] = '"default"'
2446 document.header[i] = " ".join(sffont)
2447 if font != "default":
2449 preamble = "\\babelfont{sf}["
2451 preamble = "\\setsansfont["
2455 preamble += "Scale=0."
2456 preamble += scaleval
2458 preamble += "Mapping=tex-text]{"
2461 add_to_preamble(document, [preamble])
2464 regexp = re.compile(r'(\\font_typewriter_opts)')
2465 i = find_re(document.header, regexp, 0)
2468 # We need to use this regex since split() does not handle quote protection
2469 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2470 opts = ttopts[1].strip('"')
2471 del document.header[i]
2473 regexp = re.compile(r'(\\font_tt_scale)')
2474 i = find_re(document.header, regexp, 0)
2476 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2477 regexp = re.compile(r'(\\font_typewriter)')
2478 i = find_re(document.header, regexp, 0)
2480 # We need to use this regex since split() does not handle quote protection
2481 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2482 font = ttfont[2].strip('"')
2483 ttfont[2] = '"default"'
2484 document.header[i] = " ".join(ttfont)
2485 if font != "default":
2487 preamble = "\\babelfont{tt}["
2489 preamble = "\\setmonofont["
2493 preamble += "Scale=0."
2494 preamble += scaleval
2496 preamble += "Mapping=tex-text]{"
2499 add_to_preamble(document, [preamble])
2502 def revert_plainNotoFonts_xopts(document):
2503 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2505 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2507 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2509 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2513 y = find_token(document.header, "\\font_osf true", 0)
2517 regexp = re.compile(r'(\\font_roman_opts)')
2518 x = find_re(document.header, regexp, 0)
2519 if x == -1 and not osf:
2524 # We need to use this regex since split() does not handle quote protection
2525 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2526 opts = romanopts[1].strip('"')
2532 i = find_token(document.header, "\\font_roman", 0)
2536 # We need to use this regex since split() does not handle quote protection
2537 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2538 roman = romanfont[1].strip('"')
2539 if roman != "NotoSerif-TLF":
2542 j = find_token(document.header, "\\font_sans", 0)
2546 # We need to use this regex since split() does not handle quote protection
2547 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2548 sf = sffont[1].strip('"')
2552 j = find_token(document.header, "\\font_typewriter", 0)
2556 # We need to use this regex since split() does not handle quote protection
2557 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2558 tt = ttfont[1].strip('"')
2562 # So we have noto as "complete font"
2563 romanfont[1] = '"default"'
2564 document.header[i] = " ".join(romanfont)
2566 preamble = "\\usepackage["
2568 preamble += "]{noto}"
2569 add_to_preamble(document, [preamble])
2571 document.header[y] = "\\font_osf false"
2573 del document.header[x]
2576 def revert_notoFonts_xopts(document):
2577 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2579 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2581 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2583 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2587 fm = createFontMapping(['Noto'])
2588 if revert_fonts(document, fm, fontmap, True):
2589 add_preamble_fonts(document, fontmap)
2592 def revert_IBMFonts_xopts(document):
2593 " Revert native IBM font definition (with extra options) to LaTeX "
2595 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2597 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2599 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2603 fm = createFontMapping(['IBM'])
2605 if revert_fonts(document, fm, fontmap, True):
2606 add_preamble_fonts(document, fontmap)
2609 def revert_AdobeFonts_xopts(document):
2610 " Revert native Adobe font definition (with extra options) to LaTeX "
2612 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2614 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2616 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2620 fm = createFontMapping(['Adobe'])
2622 if revert_fonts(document, fm, fontmap, True):
2623 add_preamble_fonts(document, fontmap)
2626 def convert_osf(document):
2627 " Convert \\font_osf param to new format "
2630 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2632 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2634 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2636 i = find_token(document.header, '\\font_osf', 0)
2638 document.warning("Malformed LyX document: Missing \\font_osf.")
2641 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2642 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2644 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2645 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2648 document.header.insert(i, "\\font_sans_osf false")
2649 document.header.insert(i + 1, "\\font_typewriter_osf false")
2653 x = find_token(document.header, "\\font_sans", 0)
2655 document.warning("Malformed LyX document: Missing \\font_sans.")
2657 # We need to use this regex since split() does not handle quote protection
2658 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2659 sf = sffont[1].strip('"')
2661 document.header.insert(i, "\\font_sans_osf true")
2663 document.header.insert(i, "\\font_sans_osf false")
2665 x = find_token(document.header, "\\font_typewriter", 0)
2667 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2669 # We need to use this regex since split() does not handle quote protection
2670 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2671 tt = ttfont[1].strip('"')
2673 document.header.insert(i + 1, "\\font_sans_osf true")
2675 document.header.insert(i + 1, "\\font_sans_osf false")
2678 document.header.insert(i, "\\font_sans_osf false")
2679 document.header.insert(i + 1, "\\font_typewriter_osf false")
2682 def revert_osf(document):
2683 " Revert \\font_*_osf params "
2686 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2688 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2690 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2692 i = find_token(document.header, '\\font_roman_osf', 0)
2694 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2697 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2698 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2700 i = find_token(document.header, '\\font_sans_osf', 0)
2702 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2705 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2706 del document.header[i]
2708 i = find_token(document.header, '\\font_typewriter_osf', 0)
2710 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2713 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2714 del document.header[i]
2717 i = find_token(document.header, '\\font_osf', 0)
2719 document.warning("Malformed LyX document: Missing \\font_osf.")
2721 document.header[i] = "\\font_osf true"
2724 def revert_texfontopts(document):
2725 " Revert native TeX font definitions (with extra options) to LaTeX "
2727 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2729 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2731 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2734 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2736 # First the sf (biolinum only)
2737 regexp = re.compile(r'(\\font_sans_opts)')
2738 x = find_re(document.header, regexp, 0)
2740 # We need to use this regex since split() does not handle quote protection
2741 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2742 opts = sfopts[1].strip('"')
2743 i = find_token(document.header, "\\font_sans", 0)
2745 document.warning("Malformed LyX document: Missing \\font_sans.")
2747 # We need to use this regex since split() does not handle quote protection
2748 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2749 sans = sffont[1].strip('"')
2750 if sans == "biolinum":
2752 sffont[1] = '"default"'
2753 document.header[i] = " ".join(sffont)
2755 j = find_token(document.header, "\\font_sans_osf true", 0)
2758 k = find_token(document.header, "\\font_sf_scale", 0)
2760 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2762 sfscale = document.header[k].split()
2765 document.header[k] = " ".join(sfscale)
2768 sf_scale = float(val)
2770 document.warning("Invalid font_sf_scale value: " + val)
2771 preamble = "\\usepackage["
2773 document.header[j] = "\\font_sans_osf false"
2775 if sf_scale != 100.0:
2776 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2778 preamble += "]{biolinum}"
2779 add_to_preamble(document, [preamble])
2780 del document.header[x]
2782 regexp = re.compile(r'(\\font_roman_opts)')
2783 x = find_re(document.header, regexp, 0)
2787 # We need to use this regex since split() does not handle quote protection
2788 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2789 opts = romanopts[1].strip('"')
2791 i = find_token(document.header, "\\font_roman", 0)
2793 document.warning("Malformed LyX document: Missing \\font_roman.")
2796 # We need to use this regex since split() does not handle quote protection
2797 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2798 roman = romanfont[1].strip('"')
2799 if not roman in rmfonts:
2801 romanfont[1] = '"default"'
2802 document.header[i] = " ".join(romanfont)
2804 if roman == "utopia":
2806 elif roman == "palatino":
2807 package = "mathpazo"
2808 elif roman == "times":
2809 package = "mathptmx"
2810 elif roman == "xcharter":
2811 package = "XCharter"
2813 j = find_token(document.header, "\\font_roman_osf true", 0)
2815 if roman == "cochineal":
2816 osf = "proportional,osf,"
2817 elif roman == "utopia":
2819 elif roman == "garamondx":
2821 elif roman == "libertine":
2823 elif roman == "palatino":
2825 elif roman == "xcharter":
2827 document.header[j] = "\\font_roman_osf false"
2828 k = find_token(document.header, "\\font_sc true", 0)
2830 if roman == "utopia":
2832 if roman == "palatino" and osf == "":
2834 document.header[k] = "\\font_sc false"
2835 preamble = "\\usepackage["
2838 preamble += "]{" + package + "}"
2839 add_to_preamble(document, [preamble])
2840 del document.header[x]
2847 supported_versions = ["2.4.0", "2.4"]
2849 [545, [convert_lst_literalparam]],
2854 [550, [convert_fontenc]],
2861 [557, [convert_vcsinfo]],
2862 [558, [removeFrontMatterStyles]],
2865 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
2869 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
2870 [566, [convert_hebrew_parentheses]],
2876 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
2877 [573, [convert_inputencoding_namechange]],
2878 [574, [convert_ruby_module, convert_utf8_japanese]],
2879 [575, [convert_lineno]],
2881 [577, [convert_linggloss]],
2885 [581, [convert_osf]]
2888 revert = [[580, [revert_texfontopts,revert_osf]],
2889 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
2890 [578, [revert_babelfont]],
2891 [577, [revert_drs]],
2892 [576, [revert_linggloss, revert_subexarg]],
2893 [575, [revert_new_languages]],
2894 [574, [revert_lineno]],
2895 [573, [revert_ruby_module, revert_utf8_japanese]],
2896 [572, [revert_inputencoding_namechange]],
2897 [571, [revert_notoFonts]],
2898 [570, [revert_cmidruletrimming]],
2899 [569, [revert_bibfileencodings]],
2900 [568, [revert_tablestyle]],
2901 [567, [revert_soul]],
2902 [566, [revert_malayalam]],
2903 [565, [revert_hebrew_parentheses]],
2904 [564, [revert_AdobeFonts]],
2905 [563, [revert_lformatinfo]],
2906 [562, [revert_listpargs]],
2907 [561, [revert_l7ninfo]],
2908 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
2909 [559, [revert_timeinfo, revert_namenoextinfo]],
2910 [558, [revert_dateinfo]],
2911 [557, [addFrontMatterStyles]],
2912 [556, [revert_vcsinfo]],
2913 [555, [revert_bibencoding]],
2914 [554, [revert_vcolumns]],
2915 [553, [revert_stretchcolumn]],
2916 [552, [revert_tuftecite]],
2917 [551, [revert_floatpclass, revert_floatalignment]],
2918 [550, [revert_nospellcheck]],
2919 [549, [revert_fontenc]],
2920 [548, []],# dummy format change
2921 [547, [revert_lscape]],
2922 [546, [revert_xcharter]],
2923 [545, [revert_paratype]],
2924 [544, [revert_lst_literalparam]]
2928 if __name__ == "__main__":