1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
78 self.pkgkey = createkey(self.package, self.options)
82 self.font2pkgmap = dict()
83 self.pkg2fontmap = dict()
84 self.pkginmap = dict() # defines, if a map for package exists
86 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None):
87 " Expand fontinfo mapping"
89 # fontlist: list of fontnames, each element
90 # may contain a ','-separated list of needed options
91 # like e.g. 'IBMPlexSansCondensed,condensed'
92 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
93 # scale_type: one of None, 'sf', 'tt'
94 # pkg: package defining the font. Defaults to fontname if None
95 # scaleopt: one of None, 'scale', 'scaled', or some other string
96 # to be used in scale option (e.g. scaled=0.7)
97 # osfopt: None or some other string to be used in osf option
100 fe.fonttype = font_type
101 fe.scaletype = scale_type
104 fe.fontname = font_name
106 fe.scaleopt = scaleopt
109 fe.package = font_name
113 self.font2pkgmap[font_name] = fe
114 if fe.pkgkey in self.pkg2fontmap:
115 # Repeated the same entry? Check content
116 if self.pkg2fontmap[fe.pkgkey] != font_name:
117 document.error("Something is wrong in pkgname+options <-> fontname mapping")
118 self.pkg2fontmap[fe.pkgkey] = font_name
119 self.pkginmap[fe.package] = 1
121 def getfontname(self, pkg, options):
123 pkgkey = createkey(pkg, options)
124 if not pkgkey in self.pkg2fontmap:
126 fontname = self.pkg2fontmap[pkgkey]
127 if not fontname in self.font2pkgmap:
128 document.error("Something is wrong in pkgname+options <-> fontname mapping")
130 if pkgkey == self.font2pkgmap[fontname].pkgkey:
134 def createFontMapping(fontlist):
135 # Create info for known fonts for the use in
136 # convert_latexFonts() and
137 # revert_latexFonts()
139 # * Would be more handy to parse latexFonts file,
140 # but the path to this file is unknown
141 # * For now, add DejaVu and IBMPlex only.
142 # * Expand, if desired
144 for font in fontlist:
146 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
147 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
148 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
150 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
151 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
152 'IBMPlexSerifSemibold,semibold'],
153 "roman", None, "plex-serif")
154 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
155 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
156 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
157 "sans", "sf", "plex-sans", "scale")
158 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
159 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
160 'IBMPlexMonoSemibold,semibold'],
161 "typewriter", "tt", "plex-mono", "scale")
162 elif font == 'Adobe':
163 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
164 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
165 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
167 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
168 'NotoSerifThin,thin', 'NotoSerifLight,light',
169 'NotoSerifExtralight,extralight'],
170 "roman", None, "noto-serif", None, "osf")
171 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
172 'NotoSansThin,thin', 'NotoSansLight,light',
173 'NotoSansExtralight,extralight'],
174 "sans", "sf", "noto-sans", "scaled")
175 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
178 def convert_fonts(document, fm):
179 " Handle font definition (LaTeX preamble -> native) "
181 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
182 rscaleopt = re.compile(r'^scaled?=(.*)')
185 while i < len(document.preamble):
186 i = find_re(document.preamble, rpkg, i+1)
189 mo = rpkg.search(document.preamble[i])
190 if mo == None or mo.group(2) == None:
193 options = mo.group(2).replace(' ', '').split(",")
199 while o < len(options):
200 if options[o] == osfoption:
204 mo = rscaleopt.search(options[o])
212 if not pkg in fm.pkginmap:
215 fn = fm.getfontname(pkg, options)
218 del document.preamble[i]
219 fontinfo = fm.font2pkgmap[fn]
220 if fontinfo.scaletype == None:
223 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
224 fontinfo.scaleval = oscale
226 if fontinfo.osfopt == None:
227 options.extend("osf")
229 osf = find_token(document.header, "\\font_osf false")
231 document.header[osf] = "\\font_osf true"
232 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
233 del document.preamble[i-1]
235 if fontscale != None:
236 j = find_token(document.header, fontscale, 0)
238 val = get_value(document.header, fontscale, j)
242 scale = "%03d" % int(float(oscale) * 100)
243 document.header[j] = fontscale + " " + scale + " " + vals[1]
244 ft = "\\font_" + fontinfo.fonttype
245 j = find_token(document.header, ft, 0)
247 val = get_value(document.header, ft, j)
248 words = val.split() # ! splits also values like '"DejaVu Sans"'
249 words[0] = '"' + fn + '"'
250 document.header[j] = ft + ' ' + ' '.join(words)
252 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False):
253 " Revert native font definition to LaTeX "
254 # fonlist := list of fonts created from the same package
255 # Empty package means that the font-name is the same as the package-name
256 # fontmap (key = package, val += found options) will be filled
257 # and used later in add_preamble_fonts() to be added to user-preamble
259 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
260 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
262 while i < len(document.header):
263 i = find_re(document.header, rfontscale, i+1)
266 mo = rfontscale.search(document.header[i])
269 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
270 val = get_value(document.header, ft, i)
271 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
272 font = words[0].strip('"') # TeX font name has no whitespace
273 if not font in fm.font2pkgmap:
275 fontinfo = fm.font2pkgmap[font]
276 val = fontinfo.package
277 if not val in fontmap:
281 if ft == "\\font_math":
283 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
284 if ft == "\\font_sans":
285 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
286 elif ft == "\\font_typewriter":
287 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
288 x = find_re(document.header, regexp, 0)
292 # We need to use this regex since split() does not handle quote protection
293 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
294 opts = xopts[1].strip('"').split(",")
295 fontmap[val].extend(opts)
296 del document.header[x]
297 words[0] = '"default"'
298 document.header[i] = ft + ' ' + ' '.join(words)
299 if fontinfo.scaleopt != None:
300 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
301 mo = rscales.search(xval)
306 # set correct scale option
307 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
308 if fontinfo.osfopt != None:
309 osf = find_token(document.header, "\\font_osf true")
311 fontmap[val].extend([fontinfo.osfopt])
312 if len(fontinfo.options) > 0:
313 fontmap[val].extend(fontinfo.options)
316 ###############################################################################
318 ### Conversion and reversion routines
320 ###############################################################################
322 def convert_inputencoding_namechange(document):
323 " Rename inputencoding settings. "
324 i = find_token(document.header, "\\inputencoding", 0)
327 s = document.header[i].replace("auto", "auto-legacy")
328 document.header[i] = s.replace("default", "auto-legacy-plain")
330 def revert_inputencoding_namechange(document):
331 " Rename inputencoding settings. "
332 i = find_token(document.header, "\\inputencoding", 0)
335 s = document.header[i].replace("auto-legacy-plain", "default")
336 document.header[i] = s.replace("auto-legacy", "auto")
338 def convert_notoFonts(document):
339 " Handle Noto fonts definition to LaTeX "
341 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
342 fm = createFontMapping(['Noto'])
343 convert_fonts(document, fm)
345 def revert_notoFonts(document):
346 " Revert native Noto font definition to LaTeX "
348 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
350 fm = createFontMapping(['Noto'])
351 if revert_fonts(document, fm, fontmap):
352 add_preamble_fonts(document, fontmap)
354 def convert_latexFonts(document):
355 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
357 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
358 fm = createFontMapping(['DejaVu', 'IBM'])
359 convert_fonts(document, fm)
361 def revert_latexFonts(document):
362 " Revert native DejaVu font definition to LaTeX "
364 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
366 fm = createFontMapping(['DejaVu', 'IBM'])
367 if revert_fonts(document, fm, fontmap):
368 add_preamble_fonts(document, fontmap)
370 def convert_AdobeFonts(document):
371 " Handle Adobe Source fonts definition to LaTeX "
373 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
374 fm = createFontMapping(['Adobe'])
375 convert_fonts(document, fm)
377 def revert_AdobeFonts(document):
378 " Revert Adobe Source font definition to LaTeX "
380 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
382 fm = createFontMapping(['Adobe'])
383 if revert_fonts(document, fm, fontmap):
384 add_preamble_fonts(document, fontmap)
386 def removeFrontMatterStyles(document):
387 " Remove styles Begin/EndFrontmatter"
389 layouts = ['BeginFrontmatter', 'EndFrontmatter']
390 tokenend = len('\\begin_layout ')
393 i = find_token_exact(document.body, '\\begin_layout ', i+1)
396 layout = document.body[i][tokenend:].strip()
397 if layout not in layouts:
399 j = find_end_of_layout(document.body, i)
401 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
403 while document.body[j+1].strip() == '':
405 document.body[i:j+1] = []
407 def addFrontMatterStyles(document):
408 " Use styles Begin/EndFrontmatter for elsarticle"
410 if document.textclass != "elsarticle":
413 def insertFrontmatter(prefix, line):
415 while above > 0 and document.body[above-1].strip() == '':
418 while document.body[below].strip() == '':
420 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
421 '\\begin_inset Note Note',
423 '\\begin_layout Plain Layout',
426 '\\end_inset', '', '',
429 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
430 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
431 tokenend = len('\\begin_layout ')
435 i = find_token_exact(document.body, '\\begin_layout ', i+1)
438 layout = document.body[i][tokenend:].strip()
439 if layout not in layouts:
441 k = find_end_of_layout(document.body, i)
443 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
450 insertFrontmatter('End', k+1)
451 insertFrontmatter('Begin', first)
454 def convert_lst_literalparam(document):
455 " Add param literal to include inset "
459 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
462 j = find_end_of_inset(document.body, i)
464 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
466 while i < j and document.body[i].strip() != '':
468 document.body.insert(i, 'literal "true"')
471 def revert_lst_literalparam(document):
472 " Remove param literal from include inset "
476 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
479 j = find_end_of_inset(document.body, i)
481 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
483 del_token(document.body, 'literal', i, j)
486 def revert_paratype(document):
487 " Revert ParaType font definitions to LaTeX "
489 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
491 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
492 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
493 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
494 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
495 sfval = get_value(document.header, "\\font_sf_scale", 0)
500 sfoption = "scaled=" + format(float(sfval) / 100, '.2f')
501 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
502 ttval = get_value(document.header, "\\font_tt_scale", 0)
507 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
508 if i1 != -1 and i2 != -1 and i3!= -1:
509 add_to_preamble(document, ["\\usepackage{paratype}"])
512 add_to_preamble(document, ["\\usepackage{PTSerif}"])
513 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
516 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
518 add_to_preamble(document, ["\\usepackage{PTSans}"])
519 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
522 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
524 add_to_preamble(document, ["\\usepackage{PTMono}"])
525 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
528 def revert_xcharter(document):
529 " Revert XCharter font definitions to LaTeX "
531 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
535 # replace unsupported font setting
536 document.header[i] = document.header[i].replace("xcharter", "default")
537 # no need for preamble code with system fonts
538 if get_bool_value(document.header, "\\use_non_tex_fonts"):
541 # transfer old style figures setting to package options
542 j = find_token(document.header, "\\font_osf true")
545 document.header[j] = "\\font_osf false"
549 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
552 def revert_lscape(document):
553 " Reverts the landscape environment (Landscape module) to TeX-code "
555 if not "landscape" in document.get_module_list():
560 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
563 j = find_end_of_inset(document.body, i)
565 document.warning("Malformed LyX document: Can't find end of Landscape inset")
568 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
569 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
570 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
571 add_to_preamble(document, ["\\usepackage{afterpage}"])
573 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
574 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
576 add_to_preamble(document, ["\\usepackage{pdflscape}"])
579 def convert_fontenc(document):
580 " Convert default fontenc setting "
582 i = find_token(document.header, "\\fontencoding global", 0)
586 document.header[i] = document.header[i].replace("global", "auto")
589 def revert_fontenc(document):
590 " Revert default fontenc setting "
592 i = find_token(document.header, "\\fontencoding auto", 0)
596 document.header[i] = document.header[i].replace("auto", "global")
599 def revert_nospellcheck(document):
600 " Remove nospellcheck font info param "
604 i = find_token(document.body, '\\nospellcheck', i)
610 def revert_floatpclass(document):
611 " Remove float placement params 'document' and 'class' "
613 del_token(document.header, "\\float_placement class")
617 i = find_token(document.body, '\\begin_inset Float', i+1)
620 j = find_end_of_inset(document.body, i)
621 k = find_token(document.body, 'placement class', i, i + 2)
623 k = find_token(document.body, 'placement document', i, i + 2)
630 def revert_floatalignment(document):
631 " Remove float alignment params "
633 galignment = get_value(document.header, "\\float_alignment", delete=True)
637 i = find_token(document.body, '\\begin_inset Float', i+1)
640 j = find_end_of_inset(document.body, i)
642 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
644 k = find_token(document.body, 'alignment', i, i+4)
648 alignment = get_value(document.body, "alignment", k)
649 if alignment == "document":
650 alignment = galignment
652 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
654 document.warning("Can't find float layout!")
657 if alignment == "left":
658 alcmd = put_cmd_in_ert("\\raggedright{}")
659 elif alignment == "center":
660 alcmd = put_cmd_in_ert("\\centering{}")
661 elif alignment == "right":
662 alcmd = put_cmd_in_ert("\\raggedleft{}")
664 document.body[l+1:l+1] = alcmd
667 def revert_tuftecite(document):
668 " Revert \cite commands in tufte classes "
670 tufte = ["tufte-book", "tufte-handout"]
671 if document.textclass not in tufte:
676 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
679 j = find_end_of_inset(document.body, i)
681 document.warning("Can't find end of citation inset at line %d!!" %(i))
683 k = find_token(document.body, "LatexCommand", i, j)
685 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
688 cmd = get_value(document.body, "LatexCommand", k)
692 pre = get_quoted_value(document.body, "before", i, j)
693 post = get_quoted_value(document.body, "after", i, j)
694 key = get_quoted_value(document.body, "key", i, j)
696 document.warning("Citation inset at line %d does not have a key!" %(i))
698 # Replace command with ERT
701 res += "[" + pre + "]"
703 res += "[" + post + "]"
706 res += "{" + key + "}"
707 document.body[i:j+1] = put_cmd_in_ert([res])
711 def revert_stretchcolumn(document):
712 " We remove the column varwidth flags or everything else will become a mess. "
715 i = find_token(document.body, "\\begin_inset Tabular", i+1)
718 j = find_end_of_inset(document.body, i+1)
720 document.warning("Malformed LyX document: Could not find end of tabular.")
722 for k in range(i, j):
723 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
724 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
725 document.body[k] = document.body[k].replace(' varwidth="true"', '')
728 def revert_vcolumns(document):
729 " Revert standard columns with line breaks etc. "
735 i = find_token(document.body, "\\begin_inset Tabular", i+1)
738 j = find_end_of_inset(document.body, i)
740 document.warning("Malformed LyX document: Could not find end of tabular.")
743 # Collect necessary column information
745 nrows = int(document.body[i+1].split('"')[3])
746 ncols = int(document.body[i+1].split('"')[5])
748 for k in range(ncols):
749 m = find_token(document.body, "<column", m)
750 width = get_option_value(document.body[m], 'width')
751 varwidth = get_option_value(document.body[m], 'varwidth')
752 alignment = get_option_value(document.body[m], 'alignment')
753 special = get_option_value(document.body[m], 'special')
754 col_info.append([width, varwidth, alignment, special, m])
759 for row in range(nrows):
760 for col in range(ncols):
761 m = find_token(document.body, "<cell", m)
762 multicolumn = get_option_value(document.body[m], 'multicolumn')
763 multirow = get_option_value(document.body[m], 'multirow')
764 width = get_option_value(document.body[m], 'width')
765 rotate = get_option_value(document.body[m], 'rotate')
766 # Check for: linebreaks, multipars, non-standard environments
768 endcell = find_token(document.body, "</cell>", begcell)
770 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
772 elif count_pars_in_inset(document.body, begcell + 2) > 1:
774 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
776 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
777 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
779 alignment = col_info[col][2]
780 col_line = col_info[col][4]
782 if alignment == "center":
783 vval = ">{\\centering}"
784 elif alignment == "left":
785 vval = ">{\\raggedright}"
786 elif alignment == "right":
787 vval = ">{\\raggedleft}"
790 vval += "V{\\linewidth}"
792 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
793 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
794 # with newlines, and we do not want that)
796 endcell = find_token(document.body, "</cell>", begcell)
798 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
800 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
804 nle = find_end_of_inset(document.body, nl)
805 del(document.body[nle:nle+1])
807 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
809 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
815 if needarray == True:
816 add_to_preamble(document, ["\\usepackage{array}"])
817 if needvarwidth == True:
818 add_to_preamble(document, ["\\usepackage{varwidth}"])
821 def revert_bibencoding(document):
822 " Revert bibliography encoding "
826 i = find_token(document.header, "\\cite_engine", 0)
828 document.warning("Malformed document! Missing \\cite_engine")
830 engine = get_value(document.header, "\\cite_engine", i)
834 if engine in ["biblatex", "biblatex-natbib"]:
837 # Map lyx to latex encoding names
841 "armscii8" : "armscii8",
842 "iso8859-1" : "latin1",
843 "iso8859-2" : "latin2",
844 "iso8859-3" : "latin3",
845 "iso8859-4" : "latin4",
846 "iso8859-5" : "iso88595",
847 "iso8859-6" : "8859-6",
848 "iso8859-7" : "iso-8859-7",
849 "iso8859-8" : "8859-8",
850 "iso8859-9" : "latin5",
851 "iso8859-13" : "latin7",
852 "iso8859-15" : "latin9",
853 "iso8859-16" : "latin10",
854 "applemac" : "applemac",
856 "cp437de" : "cp437de",
873 "utf8-platex" : "utf8",
880 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
883 j = find_end_of_inset(document.body, i)
885 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
887 encoding = get_quoted_value(document.body, "encoding", i, j)
890 # remove encoding line
891 k = find_token(document.body, "encoding", i, j)
894 if encoding == "default":
896 # Re-find inset end line
897 j = find_end_of_inset(document.body, i)
900 h = find_token(document.header, "\\biblio_options", 0)
902 biblio_options = get_value(document.header, "\\biblio_options", h)
903 if not "bibencoding" in biblio_options:
904 document.header[h] += ",bibencoding=%s" % encodings[encoding]
906 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
908 # this should not happen
909 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
911 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
913 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
914 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
920 def convert_vcsinfo(document):
921 " Separate vcs Info inset from buffer Info inset. "
924 "vcs-revision" : "revision",
925 "vcs-tree-revision" : "tree-revision",
926 "vcs-author" : "author",
932 i = find_token(document.body, "\\begin_inset Info", i+1)
935 j = find_end_of_inset(document.body, i+1)
937 document.warning("Malformed LyX document: Could not find end of Info inset.")
939 tp = find_token(document.body, 'type', i, j)
940 tpv = get_quoted_value(document.body, "type", tp)
943 arg = find_token(document.body, 'arg', i, j)
944 argv = get_quoted_value(document.body, "arg", arg)
945 if argv not in list(types.keys()):
947 document.body[tp] = "type \"vcs\""
948 document.body[arg] = "arg \"" + types[argv] + "\""
951 def revert_vcsinfo(document):
952 " Merge vcs Info inset to buffer Info inset. "
954 args = ["revision", "tree-revision", "author", "time", "date" ]
957 i = find_token(document.body, "\\begin_inset Info", i+1)
960 j = find_end_of_inset(document.body, i+1)
962 document.warning("Malformed LyX document: Could not find end of Info inset.")
964 tp = find_token(document.body, 'type', i, j)
965 tpv = get_quoted_value(document.body, "type", tp)
968 arg = find_token(document.body, 'arg', i, j)
969 argv = get_quoted_value(document.body, "arg", arg)
971 document.warning("Malformed Info inset. Invalid vcs arg.")
973 document.body[tp] = "type \"buffer\""
974 document.body[arg] = "arg \"vcs-" + argv + "\""
977 def revert_dateinfo(document):
978 " Revert date info insets to static text. "
980 # FIXME This currently only considers the main language and uses the system locale
981 # Ideally, it should honor context languages and switch the locale accordingly.
983 # The date formats for each language using strftime syntax:
984 # long, short, loclong, locmedium, locshort
986 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
987 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
988 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
989 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
990 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
991 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
992 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
993 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
994 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
995 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
996 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
997 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
998 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
999 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1000 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1001 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1002 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1003 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1004 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1005 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1006 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1007 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1008 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1009 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1010 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1011 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1012 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1013 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1014 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1015 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1016 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1017 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1018 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1019 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1020 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1021 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1022 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1023 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1024 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1025 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1026 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1027 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1028 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1029 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1030 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1031 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1032 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1033 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1034 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1035 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1036 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1037 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1038 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1039 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1040 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1041 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1042 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1043 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1044 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1045 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1046 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1047 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1048 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1049 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1050 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1051 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1052 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1053 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1054 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1055 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1056 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1057 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1058 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1059 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1060 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1061 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1062 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1063 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1064 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1065 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1066 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1067 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1068 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1069 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1070 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1071 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1072 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1073 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1074 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1075 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1076 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1077 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1078 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1079 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1080 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1081 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1082 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1083 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1084 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1085 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1086 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1087 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1090 types = ["date", "fixdate", "moddate" ]
1091 lang = get_value(document.header, "\\language")
1093 document.warning("Malformed LyX document! No \\language header found!")
1098 i = find_token(document.body, "\\begin_inset Info", i+1)
1101 j = find_end_of_inset(document.body, i+1)
1103 document.warning("Malformed LyX document: Could not find end of Info inset.")
1105 tp = find_token(document.body, 'type', i, j)
1106 tpv = get_quoted_value(document.body, "type", tp)
1107 if tpv not in types:
1109 arg = find_token(document.body, 'arg', i, j)
1110 argv = get_quoted_value(document.body, "arg", arg)
1113 if tpv == "fixdate":
1114 datecomps = argv.split('@')
1115 if len(datecomps) > 1:
1117 isodate = datecomps[1]
1118 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1120 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1121 # FIXME if we had the path to the original document (not the one in the tmp dir),
1122 # we could use the mtime.
1123 # elif tpv == "moddate":
1124 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1127 result = dte.isodate()
1128 elif argv == "long":
1129 result = dte.strftime(dateformats[lang][0])
1130 elif argv == "short":
1131 result = dte.strftime(dateformats[lang][1])
1132 elif argv == "loclong":
1133 result = dte.strftime(dateformats[lang][2])
1134 elif argv == "locmedium":
1135 result = dte.strftime(dateformats[lang][3])
1136 elif argv == "locshort":
1137 result = dte.strftime(dateformats[lang][4])
1139 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1140 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1141 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1142 fmt = re.sub('[^\'%]d', '%d', fmt)
1143 fmt = fmt.replace("'", "")
1144 result = dte.strftime(fmt)
1145 if sys.version_info < (3,0):
1146 # In Python 2, datetime module works with binary strings,
1147 # our dateformat strings are utf8-encoded:
1148 result = result.decode('utf-8')
1149 document.body[i : j+1] = [result]
1152 def revert_timeinfo(document):
1153 " Revert time info insets to static text. "
1155 # FIXME This currently only considers the main language and uses the system locale
1156 # Ideally, it should honor context languages and switch the locale accordingly.
1157 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1160 # The time formats for each language using strftime syntax:
1163 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1164 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1165 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1166 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1167 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1168 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1169 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1170 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1171 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1172 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1173 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1174 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1175 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1176 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1177 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1178 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1179 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1180 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1181 "british" : ["%H:%M:%S %Z", "%H:%M"],
1182 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1183 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1184 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1185 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1186 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1187 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1188 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1189 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1190 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1191 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1192 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1193 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1194 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1195 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1196 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1197 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1198 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1199 "french" : ["%H:%M:%S %Z", "%H:%M"],
1200 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1201 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1202 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1203 "german" : ["%H:%M:%S %Z", "%H:%M"],
1204 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1205 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1206 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1207 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1208 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1209 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1210 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1211 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1212 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1213 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1214 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1215 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1216 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1217 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1218 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1219 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1220 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1221 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1222 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1223 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1224 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1225 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1226 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1227 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1228 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1229 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1230 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1231 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1232 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1233 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1234 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1235 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1236 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1237 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1238 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1239 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1240 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1241 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1242 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1243 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1244 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1245 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1246 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1247 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1248 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1249 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1250 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1251 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1252 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1253 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1254 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1255 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1256 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1257 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1258 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1259 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1260 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1261 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1262 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1263 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1264 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1267 types = ["time", "fixtime", "modtime" ]
1269 i = find_token(document.header, "\\language", 0)
1271 # this should not happen
1272 document.warning("Malformed LyX document! No \\language header found!")
1274 lang = get_value(document.header, "\\language", i)
1278 i = find_token(document.body, "\\begin_inset Info", i+1)
1281 j = find_end_of_inset(document.body, i+1)
1283 document.warning("Malformed LyX document: Could not find end of Info inset.")
1285 tp = find_token(document.body, 'type', i, j)
1286 tpv = get_quoted_value(document.body, "type", tp)
1287 if tpv not in types:
1289 arg = find_token(document.body, 'arg', i, j)
1290 argv = get_quoted_value(document.body, "arg", arg)
1292 dtme = datetime.now()
1294 if tpv == "fixtime":
1295 timecomps = argv.split('@')
1296 if len(timecomps) > 1:
1298 isotime = timecomps[1]
1299 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1301 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1303 m = re.search('(\d\d):(\d\d)', isotime)
1305 tme = time(int(m.group(1)), int(m.group(2)))
1306 # FIXME if we had the path to the original document (not the one in the tmp dir),
1307 # we could use the mtime.
1308 # elif tpv == "moddate":
1309 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1312 result = tme.isoformat()
1313 elif argv == "long":
1314 result = tme.strftime(timeformats[lang][0])
1315 elif argv == "short":
1316 result = tme.strftime(timeformats[lang][1])
1318 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1319 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1320 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1321 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1322 fmt = fmt.replace("'", "")
1323 result = dte.strftime(fmt)
1324 document.body[i : j+1] = result
1327 def revert_namenoextinfo(document):
1328 " Merge buffer Info inset type name-noext to name. "
1332 i = find_token(document.body, "\\begin_inset Info", i+1)
1335 j = find_end_of_inset(document.body, i+1)
1337 document.warning("Malformed LyX document: Could not find end of Info inset.")
1339 tp = find_token(document.body, 'type', i, j)
1340 tpv = get_quoted_value(document.body, "type", tp)
1343 arg = find_token(document.body, 'arg', i, j)
1344 argv = get_quoted_value(document.body, "arg", arg)
1345 if argv != "name-noext":
1347 document.body[arg] = "arg \"name\""
1350 def revert_l7ninfo(document):
1351 " Revert l7n Info inset to text. "
1355 i = find_token(document.body, "\\begin_inset Info", i+1)
1358 j = find_end_of_inset(document.body, i+1)
1360 document.warning("Malformed LyX document: Could not find end of Info inset.")
1362 tp = find_token(document.body, 'type', i, j)
1363 tpv = get_quoted_value(document.body, "type", tp)
1366 arg = find_token(document.body, 'arg', i, j)
1367 argv = get_quoted_value(document.body, "arg", arg)
1368 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1369 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1370 document.body[i : j+1] = argv
1373 def revert_listpargs(document):
1374 " Reverts listpreamble arguments to TeX-code "
1377 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1380 j = find_end_of_inset(document.body, i)
1381 # Find containing paragraph layout
1382 parent = get_containing_layout(document.body, i)
1384 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1387 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1388 endPlain = find_end_of_layout(document.body, beginPlain)
1389 content = document.body[beginPlain + 1 : endPlain]
1390 del document.body[i:j+1]
1391 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1392 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1393 document.body[parbeg : parbeg] = subst
1396 def revert_lformatinfo(document):
1397 " Revert layout format Info inset to text. "
1401 i = find_token(document.body, "\\begin_inset Info", i+1)
1404 j = find_end_of_inset(document.body, i+1)
1406 document.warning("Malformed LyX document: Could not find end of Info inset.")
1408 tp = find_token(document.body, 'type', i, j)
1409 tpv = get_quoted_value(document.body, "type", tp)
1410 if tpv != "lyxinfo":
1412 arg = find_token(document.body, 'arg', i, j)
1413 argv = get_quoted_value(document.body, "arg", arg)
1414 if argv != "layoutformat":
1417 document.body[i : j+1] = "69"
1420 def convert_hebrew_parentheses(document):
1421 """ Swap opening/closing parentheses in Hebrew text.
1423 Up to LyX 2.4, "(" was used as closing parenthesis and
1424 ")" as opening parenthesis for Hebrew in the LyX source.
1426 # print("convert hebrew parentheses")
1427 current_languages = [document.language]
1428 for i, line in enumerate(document.body):
1429 if line.startswith('\\lang '):
1430 current_languages[-1] = line.lstrip('\\lang ')
1431 elif line.startswith('\\begin_layout'):
1432 current_languages.append(current_languages[-1])
1433 # print (line, current_languages[-1])
1434 elif line.startswith('\\end_layout'):
1435 current_languages.pop()
1436 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1437 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1440 def revert_hebrew_parentheses(document):
1441 " Store parentheses in Hebrew text reversed"
1442 # This only exists to keep the convert/revert naming convention
1443 convert_hebrew_parentheses(document)
1446 def revert_malayalam(document):
1447 " Set the document language to English but assure Malayalam output "
1449 revert_language(document, "malayalam", "", "malayalam")
1452 def revert_soul(document):
1453 " Revert soul module flex insets to ERT "
1455 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1458 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1460 add_to_preamble(document, ["\\usepackage{soul}"])
1462 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1464 add_to_preamble(document, ["\\usepackage{color}"])
1466 revert_flex_inset(document.body, "Spaceletters", "\\so")
1467 revert_flex_inset(document.body, "Strikethrough", "\\st")
1468 revert_flex_inset(document.body, "Underline", "\\ul")
1469 revert_flex_inset(document.body, "Highlight", "\\hl")
1470 revert_flex_inset(document.body, "Capitalize", "\\caps")
1473 def revert_tablestyle(document):
1474 " Remove tablestyle params "
1477 i = find_token(document.header, "\\tablestyle")
1479 del document.header[i]
1482 def revert_bibfileencodings(document):
1483 " Revert individual Biblatex bibliography encodings "
1487 i = find_token(document.header, "\\cite_engine", 0)
1489 document.warning("Malformed document! Missing \\cite_engine")
1491 engine = get_value(document.header, "\\cite_engine", i)
1495 if engine in ["biblatex", "biblatex-natbib"]:
1498 # Map lyx to latex encoding names
1502 "armscii8" : "armscii8",
1503 "iso8859-1" : "latin1",
1504 "iso8859-2" : "latin2",
1505 "iso8859-3" : "latin3",
1506 "iso8859-4" : "latin4",
1507 "iso8859-5" : "iso88595",
1508 "iso8859-6" : "8859-6",
1509 "iso8859-7" : "iso-8859-7",
1510 "iso8859-8" : "8859-8",
1511 "iso8859-9" : "latin5",
1512 "iso8859-13" : "latin7",
1513 "iso8859-15" : "latin9",
1514 "iso8859-16" : "latin10",
1515 "applemac" : "applemac",
1517 "cp437de" : "cp437de",
1525 "cp1250" : "cp1250",
1526 "cp1251" : "cp1251",
1527 "cp1252" : "cp1252",
1528 "cp1255" : "cp1255",
1529 "cp1256" : "cp1256",
1530 "cp1257" : "cp1257",
1531 "koi8-r" : "koi8-r",
1532 "koi8-u" : "koi8-u",
1534 "utf8-platex" : "utf8",
1541 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1544 j = find_end_of_inset(document.body, i)
1546 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1548 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1552 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1553 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1554 if len(bibfiles) == 0:
1555 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1556 # remove encoding line
1557 k = find_token(document.body, "file_encodings", i, j)
1559 del document.body[k]
1560 # Re-find inset end line
1561 j = find_end_of_inset(document.body, i)
1563 enclist = encodings.split("\t")
1566 ppp = pp.split(" ", 1)
1567 encmap[ppp[0]] = ppp[1]
1568 for bib in bibfiles:
1569 pr = "\\addbibresource"
1570 if bib in encmap.keys():
1571 pr += "[bibencoding=" + encmap[bib] + "]"
1572 pr += "{" + bib + "}"
1573 add_to_preamble(document, [pr])
1574 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1575 pcmd = "printbibliography"
1577 pcmd += "[" + opts + "]"
1578 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1579 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1580 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1581 "status open", "", "\\begin_layout Plain Layout" ]
1582 repl += document.body[i:j+1]
1583 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1584 document.body[i:j+1] = repl
1590 def revert_cmidruletrimming(document):
1591 " Remove \\cmidrule trimming "
1593 # FIXME: Revert to TeX code?
1596 # first, let's find out if we need to do anything
1597 i = find_token(document.body, '<cell ', i+1)
1600 j = document.body[i].find('trim="')
1603 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1604 # remove trim option
1605 document.body[i] = rgx.sub('', document.body[i])
1609 r'### Inserted by lyx2lyx (ruby inset) ###',
1610 r'InsetLayout Flex:Ruby',
1611 r' LyxType charstyle',
1612 r' LatexType command',
1616 r' HTMLInnerTag rb',
1617 r' HTMLInnerAttr ""',
1619 r' LabelString "Ruby"',
1620 r' Decoration Conglomerate',
1622 r' \ifdefined\kanjiskip',
1623 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1624 r' \else \ifdefined\luatexversion',
1625 r' \usepackage{luatexja-ruby}',
1626 r' \else \ifdefined\XeTeXversion',
1627 r' \usepackage{ruby}%',
1629 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1631 r' Argument post:1',
1632 r' LabelString "ruby text"',
1633 r' MenuString "Ruby Text|R"',
1634 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1635 r' Decoration Conglomerate',
1647 def convert_ruby_module(document):
1648 " Use ruby module instead of local module definition "
1649 if document.del_local_layout(ruby_inset_def):
1650 document.add_module("ruby")
1652 def revert_ruby_module(document):
1653 " Replace ruby module with local module definition "
1654 if document.del_module("ruby"):
1655 document.append_local_layout(ruby_inset_def)
1658 def convert_utf8_japanese(document):
1659 " Use generic utf8 with Japanese documents."
1660 lang = get_value(document.header, "\\language")
1661 if not lang.startswith("japanese"):
1663 inputenc = get_value(document.header, "\\inputencoding")
1664 if ((lang == "japanese" and inputenc == "utf8-platex")
1665 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1666 document.set_parameter("inputencoding", "utf8")
1668 def revert_utf8_japanese(document):
1669 " Use Japanese utf8 variants with Japanese documents."
1670 inputenc = get_value(document.header, "\\inputencoding")
1671 if inputenc != "utf8":
1673 lang = get_value(document.header, "\\language")
1674 if lang == "japanese":
1675 document.set_parameter("inputencoding", "utf8-platex")
1676 if lang == "japanese-cjk":
1677 document.set_parameter("inputencoding", "utf8-cjk")
1680 def revert_lineno(document):
1681 " Replace lineno setting with user-preamble code."
1683 options = get_quoted_value(document.header, "\\lineno_options",
1685 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1688 options = "[" + options + "]"
1689 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1692 def convert_lineno(document):
1693 " Replace user-preamble code with native lineno support."
1696 i = find_token(document.preamble, "\\linenumbers", 1)
1698 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1701 options = usepkg.group(1).strip("[]")
1702 del(document.preamble[i-1:i+1])
1703 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1705 k = find_token(document.header, "\\index ")
1707 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1709 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1710 "\\lineno_options %s" % options]
1713 def revert_new_languages(document):
1714 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1715 and Russian (Petrine orthography)."""
1717 # lyxname: (babelname, polyglossianame)
1718 new_languages = {"azerbaijani": ("azerbaijani", ""),
1719 "bengali": ("", "bengali"),
1720 "churchslavonic": ("", "churchslavonic"),
1721 "oldrussian": ("", "russian"),
1722 "korean": ("", "korean"),
1724 used_languages = set()
1725 if document.language in new_languages:
1726 used_languages.add(document.language)
1729 i = find_token(document.body, "\\lang", i+1)
1732 if document.body[i][6:].strip() in new_languages:
1733 used_languages.add(document.language)
1735 # Korean is already supported via CJK, so leave as-is for Babel
1736 if ("korean" in used_languages
1737 and get_bool_value(document.header, "\\use_non_tex_fonts")
1738 and get_value(document.header, "\\language_package") in ("default", "auto")):
1739 revert_language(document, "korean", "", "korean")
1740 used_languages.discard("korean")
1742 for lang in used_languages:
1743 revert(lang, *new_languages[lang])
1747 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1748 r'InsetLayout Flex:Glosse',
1750 r' LabelString "Gloss (old version)"',
1751 r' MenuString "Gloss (old version)"',
1752 r' LatexType environment',
1753 r' LatexName linggloss',
1754 r' Decoration minimalistic',
1759 r' CustomPars false',
1760 r' ForcePlain true',
1761 r' ParbreakIsNewline true',
1762 r' FreeSpacing true',
1763 r' Requires covington',
1766 r' \@ifundefined{linggloss}{%',
1767 r' \newenvironment{linggloss}[2][]{',
1768 r' \def\glosstr{\glt #1}%',
1770 r' {\glosstr\glend}}{}',
1773 r' ResetsFont true',
1775 r' Decoration conglomerate',
1776 r' LabelString "Translation"',
1777 r' MenuString "Glosse Translation|s"',
1778 r' Tooltip "Add a translation for the glosse"',
1783 glosss_inset_def = [
1784 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1785 r'InsetLayout Flex:Tri-Glosse',
1787 r' LabelString "Tri-Gloss (old version)"',
1788 r' MenuString "Tri-Gloss (old version)"',
1789 r' LatexType environment',
1790 r' LatexName lingglosss',
1791 r' Decoration minimalistic',
1796 r' CustomPars false',
1797 r' ForcePlain true',
1798 r' ParbreakIsNewline true',
1799 r' FreeSpacing true',
1801 r' Requires covington',
1804 r' \@ifundefined{lingglosss}{%',
1805 r' \newenvironment{lingglosss}[2][]{',
1806 r' \def\glosstr{\glt #1}%',
1808 r' {\glosstr\glend}}{}',
1810 r' ResetsFont true',
1812 r' Decoration conglomerate',
1813 r' LabelString "Translation"',
1814 r' MenuString "Glosse Translation|s"',
1815 r' Tooltip "Add a translation for the glosse"',
1820 def convert_linggloss(document):
1821 " Move old ling glosses to local layout "
1822 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1823 document.append_local_layout(gloss_inset_def)
1824 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1825 document.append_local_layout(glosss_inset_def)
1827 def revert_linggloss(document):
1828 " Revert to old ling gloss definitions "
1829 if not "linguistics" in document.get_module_list():
1831 document.del_local_layout(gloss_inset_def)
1832 document.del_local_layout(glosss_inset_def)
1835 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1836 for glosse in glosses:
1839 i = find_token(document.body, glosse, i+1)
1842 j = find_end_of_inset(document.body, i)
1844 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1847 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1848 endarg = find_end_of_inset(document.body, arg)
1851 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1852 if argbeginPlain == -1:
1853 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1855 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1856 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
1858 # remove Arg insets and paragraph, if it only contains this inset
1859 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1860 del document.body[arg - 1 : endarg + 4]
1862 del document.body[arg : endarg + 1]
1864 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
1865 endarg = find_end_of_inset(document.body, arg)
1868 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1869 if argbeginPlain == -1:
1870 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
1872 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1873 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
1875 # remove Arg insets and paragraph, if it only contains this inset
1876 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1877 del document.body[arg - 1 : endarg + 4]
1879 del document.body[arg : endarg + 1]
1881 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
1882 endarg = find_end_of_inset(document.body, arg)
1885 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1886 if argbeginPlain == -1:
1887 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
1889 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1890 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
1892 # remove Arg insets and paragraph, if it only contains this inset
1893 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1894 del document.body[arg - 1 : endarg + 4]
1896 del document.body[arg : endarg + 1]
1898 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
1899 endarg = find_end_of_inset(document.body, arg)
1902 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1903 if argbeginPlain == -1:
1904 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
1906 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1907 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
1909 # remove Arg insets and paragraph, if it only contains this inset
1910 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1911 del document.body[arg - 1 : endarg + 4]
1913 del document.body[arg : endarg + 1]
1916 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
1919 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1920 endInset = find_end_of_inset(document.body, i)
1921 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
1922 precontent = put_cmd_in_ert(cmd)
1923 if len(optargcontent) > 0:
1924 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
1925 precontent += put_cmd_in_ert("{")
1927 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
1928 if cmd == "\\trigloss":
1929 postcontent += put_cmd_in_ert("}{") + marg3content
1930 postcontent += put_cmd_in_ert("}")
1932 document.body[endPlain:endInset + 1] = postcontent
1933 document.body[beginPlain + 1:beginPlain] = precontent
1934 del document.body[i : beginPlain + 1]
1936 document.append_local_layout("Requires covington")
1941 def revert_subexarg(document):
1942 " Revert linguistic subexamples with argument to ERT "
1944 if not "linguistics" in document.get_module_list():
1950 i = find_token(document.body, "\\begin_layout Subexample", i+1)
1953 j = find_end_of_layout(document.body, i)
1955 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1958 # check for consecutive layouts
1959 k = find_token(document.body, "\\begin_layout", j)
1960 if k == -1 or document.body[k] != "\\begin_layout Subexample":
1962 j = find_end_of_layout(document.body, k)
1964 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1967 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1971 endarg = find_end_of_inset(document.body, arg)
1973 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1974 if argbeginPlain == -1:
1975 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1977 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1978 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
1980 # remove Arg insets and paragraph, if it only contains this inset
1981 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1982 del document.body[arg - 1 : endarg + 4]
1984 del document.body[arg : endarg + 1]
1986 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
1988 # re-find end of layout
1989 j = find_end_of_layout(document.body, i)
1991 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1994 # check for consecutive layouts
1995 k = find_token(document.body, "\\begin_layout", j)
1996 if k == -1 or document.body[k] != "\\begin_layout Subexample":
1998 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
1999 j = find_end_of_layout(document.body, k)
2001 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2004 endev = put_cmd_in_ert("\\end{subexamples}")
2006 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2007 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2008 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2010 document.append_local_layout("Requires covington")
2014 def revert_drs(document):
2015 " Revert DRS insets (linguistics) to ERT "
2017 if not "linguistics" in document.get_module_list():
2021 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2022 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2023 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2024 "\\begin_inset Flex SDRS"]
2028 i = find_token(document.body, drs, i+1)
2031 j = find_end_of_inset(document.body, i)
2033 document.warning("Malformed LyX document: Can't find end of DRS inset")
2036 # Check for arguments
2037 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2038 endarg = find_end_of_inset(document.body, arg)
2041 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2042 if argbeginPlain == -1:
2043 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2045 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2046 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2048 # remove Arg insets and paragraph, if it only contains this inset
2049 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2050 del document.body[arg - 1 : endarg + 4]
2052 del document.body[arg : endarg + 1]
2055 j = find_end_of_inset(document.body, i)
2057 document.warning("Malformed LyX document: Can't find end of DRS inset")
2060 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2061 endarg = find_end_of_inset(document.body, arg)
2064 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2065 if argbeginPlain == -1:
2066 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2068 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2069 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2071 # remove Arg insets and paragraph, if it only contains this inset
2072 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2073 del document.body[arg - 1 : endarg + 4]
2075 del document.body[arg : endarg + 1]
2078 j = find_end_of_inset(document.body, i)
2080 document.warning("Malformed LyX document: Can't find end of DRS inset")
2083 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2084 endarg = find_end_of_inset(document.body, arg)
2085 postarg1content = []
2087 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2088 if argbeginPlain == -1:
2089 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2091 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2092 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2094 # remove Arg insets and paragraph, if it only contains this inset
2095 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2096 del document.body[arg - 1 : endarg + 4]
2098 del document.body[arg : endarg + 1]
2101 j = find_end_of_inset(document.body, i)
2103 document.warning("Malformed LyX document: Can't find end of DRS inset")
2106 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2107 endarg = find_end_of_inset(document.body, arg)
2108 postarg2content = []
2110 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2111 if argbeginPlain == -1:
2112 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2114 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2115 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2117 # remove Arg insets and paragraph, if it only contains this inset
2118 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2119 del document.body[arg - 1 : endarg + 4]
2121 del document.body[arg : endarg + 1]
2124 j = find_end_of_inset(document.body, i)
2126 document.warning("Malformed LyX document: Can't find end of DRS inset")
2129 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2130 endarg = find_end_of_inset(document.body, arg)
2131 postarg3content = []
2133 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2134 if argbeginPlain == -1:
2135 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2137 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2138 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2140 # remove Arg insets and paragraph, if it only contains this inset
2141 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2142 del document.body[arg - 1 : endarg + 4]
2144 del document.body[arg : endarg + 1]
2147 j = find_end_of_inset(document.body, i)
2149 document.warning("Malformed LyX document: Can't find end of DRS inset")
2152 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2153 endarg = find_end_of_inset(document.body, arg)
2154 postarg4content = []
2156 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2157 if argbeginPlain == -1:
2158 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2160 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2161 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2163 # remove Arg insets and paragraph, if it only contains this inset
2164 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2165 del document.body[arg - 1 : endarg + 4]
2167 del document.body[arg : endarg + 1]
2169 # The respective LaTeX command
2171 if drs == "\\begin_inset Flex DRS*":
2173 elif drs == "\\begin_inset Flex IfThen-DRS":
2175 elif drs == "\\begin_inset Flex Cond-DRS":
2177 elif drs == "\\begin_inset Flex QDRS":
2179 elif drs == "\\begin_inset Flex NegDRS":
2181 elif drs == "\\begin_inset Flex SDRS":
2184 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2185 endInset = find_end_of_inset(document.body, i)
2186 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2187 precontent = put_cmd_in_ert(cmd)
2188 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2189 if drs == "\\begin_inset Flex SDRS":
2190 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2191 precontent += put_cmd_in_ert("{")
2194 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2195 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2196 if cmd == "\\condrs" or cmd == "\\qdrs":
2197 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2199 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2201 postcontent = put_cmd_in_ert("}")
2203 document.body[endPlain:endInset + 1] = postcontent
2204 document.body[beginPlain + 1:beginPlain] = precontent
2205 del document.body[i : beginPlain + 1]
2207 document.append_local_layout("Provides covington 1")
2208 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2214 def revert_babelfont(document):
2215 " Reverts the use of \\babelfont to user preamble "
2217 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2219 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2221 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2223 i = find_token(document.header, '\\language_package', 0)
2225 document.warning("Malformed LyX document: Missing \\language_package.")
2227 if get_value(document.header, "\\language_package", 0) != "babel":
2230 # check font settings
2232 roman = sans = typew = "default"
2234 sf_scale = tt_scale = 100.0
2236 j = find_token(document.header, "\\font_roman", 0)
2238 document.warning("Malformed LyX document: Missing \\font_roman.")
2240 # We need to use this regex since split() does not handle quote protection
2241 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2242 roman = romanfont[2].strip('"')
2243 romanfont[2] = '"default"'
2244 document.header[j] = " ".join(romanfont)
2246 j = find_token(document.header, "\\font_sans", 0)
2248 document.warning("Malformed LyX document: Missing \\font_sans.")
2250 # We need to use this regex since split() does not handle quote protection
2251 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2252 sans = sansfont[2].strip('"')
2253 sansfont[2] = '"default"'
2254 document.header[j] = " ".join(sansfont)
2256 j = find_token(document.header, "\\font_typewriter", 0)
2258 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2260 # We need to use this regex since split() does not handle quote protection
2261 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2262 typew = ttfont[2].strip('"')
2263 ttfont[2] = '"default"'
2264 document.header[j] = " ".join(ttfont)
2266 i = find_token(document.header, "\\font_osf", 0)
2268 document.warning("Malformed LyX document: Missing \\font_osf.")
2270 osf = str2bool(get_value(document.header, "\\font_osf", i))
2272 j = find_token(document.header, "\\font_sf_scale", 0)
2274 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2276 sfscale = document.header[j].split()
2279 document.header[j] = " ".join(sfscale)
2282 sf_scale = float(val)
2284 document.warning("Invalid font_sf_scale value: " + val)
2286 j = find_token(document.header, "\\font_tt_scale", 0)
2288 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2290 ttscale = document.header[j].split()
2293 document.header[j] = " ".join(ttscale)
2296 tt_scale = float(val)
2298 document.warning("Invalid font_tt_scale value: " + val)
2300 # set preamble stuff
2301 pretext = ['%% This document must be processed with xelatex or lualatex!']
2302 pretext.append('\\AtBeginDocument{%')
2303 if roman != "default":
2304 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2305 if sans != "default":
2306 sf = '\\babelfont{sf}['
2307 if sf_scale != 100.0:
2308 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2309 sf += 'Mapping=tex-text]{' + sans + '}'
2311 if typew != "default":
2312 tw = '\\babelfont{tt}'
2313 if tt_scale != 100.0:
2314 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2315 tw += '{' + typew + '}'
2318 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2320 insert_to_preamble(document, pretext)
2323 def revert_minionpro(document):
2324 " Revert native MinionPro font definition (with extra options) to LaTeX "
2326 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2328 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2330 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2333 regexp = re.compile(r'(\\font_roman_opts)')
2334 x = find_re(document.header, regexp, 0)
2338 # We need to use this regex since split() does not handle quote protection
2339 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2340 opts = romanopts[1].strip('"')
2342 i = find_token(document.header, "\\font_roman", 0)
2344 document.warning("Malformed LyX document: Missing \\font_roman.")
2347 # We need to use this regex since split() does not handle quote protection
2348 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2349 roman = romanfont[1].strip('"')
2350 if roman != "minionpro":
2352 romanfont[1] = '"default"'
2353 document.header[i] = " ".join(romanfont)
2355 j = find_token(document.header, "\\font_osf true", 0)
2358 preamble = "\\usepackage["
2360 document.header[j] = "\\font_osf false"
2364 preamble += "]{MinionPro}"
2365 add_to_preamble(document, [preamble])
2366 del document.header[x]
2369 def revert_font_opts(document):
2370 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2372 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2374 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2376 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2377 i = find_token(document.header, '\\language_package', 0)
2379 document.warning("Malformed LyX document: Missing \\language_package.")
2381 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2384 regexp = re.compile(r'(\\font_roman_opts)')
2385 i = find_re(document.header, regexp, 0)
2387 # We need to use this regex since split() does not handle quote protection
2388 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2389 opts = romanopts[1].strip('"')
2390 del document.header[i]
2392 regexp = re.compile(r'(\\font_roman)')
2393 i = find_re(document.header, regexp, 0)
2395 # We need to use this regex since split() does not handle quote protection
2396 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2397 font = romanfont[2].strip('"')
2398 romanfont[2] = '"default"'
2399 document.header[i] = " ".join(romanfont)
2400 if font != "default":
2402 preamble = "\\babelfont{rm}["
2404 preamble = "\\setmainfont["
2407 preamble += "Mapping=tex-text]{"
2410 add_to_preamble(document, [preamble])
2413 regexp = re.compile(r'(\\font_sans_opts)')
2414 i = find_re(document.header, regexp, 0)
2417 # We need to use this regex since split() does not handle quote protection
2418 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2419 opts = sfopts[1].strip('"')
2420 del document.header[i]
2422 regexp = re.compile(r'(\\font_sf_scale)')
2423 i = find_re(document.header, regexp, 0)
2425 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2426 regexp = re.compile(r'(\\font_sans)')
2427 i = find_re(document.header, regexp, 0)
2429 # We need to use this regex since split() does not handle quote protection
2430 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2431 font = sffont[2].strip('"')
2432 sffont[2] = '"default"'
2433 document.header[i] = " ".join(sffont)
2434 if font != "default":
2436 preamble = "\\babelfont{sf}["
2438 preamble = "\\setsansfont["
2442 preamble += "Scale=0."
2443 preamble += scaleval
2445 preamble += "Mapping=tex-text]{"
2448 add_to_preamble(document, [preamble])
2451 regexp = re.compile(r'(\\font_typewriter_opts)')
2452 i = find_re(document.header, regexp, 0)
2455 # We need to use this regex since split() does not handle quote protection
2456 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2457 opts = ttopts[1].strip('"')
2458 del document.header[i]
2460 regexp = re.compile(r'(\\font_tt_scale)')
2461 i = find_re(document.header, regexp, 0)
2463 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2464 regexp = re.compile(r'(\\font_typewriter)')
2465 i = find_re(document.header, regexp, 0)
2467 # We need to use this regex since split() does not handle quote protection
2468 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2469 font = ttfont[2].strip('"')
2470 ttfont[2] = '"default"'
2471 document.header[i] = " ".join(ttfont)
2472 if font != "default":
2474 preamble = "\\babelfont{tt}["
2476 preamble = "\\setmonofont["
2480 preamble += "Scale=0."
2481 preamble += scaleval
2483 preamble += "Mapping=tex-text]{"
2486 add_to_preamble(document, [preamble])
2489 def revert_plainNotoFonts_xopts(document):
2490 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2492 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2494 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2496 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2500 y = find_token(document.header, "\\font_osf true", 0)
2504 regexp = re.compile(r'(\\font_roman_opts)')
2505 x = find_re(document.header, regexp, 0)
2506 if x == -1 and not osf:
2511 # We need to use this regex since split() does not handle quote protection
2512 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2513 opts = romanopts[1].strip('"')
2519 i = find_token(document.header, "\\font_roman", 0)
2523 # We need to use this regex since split() does not handle quote protection
2524 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2525 roman = romanfont[1].strip('"')
2526 if roman != "NotoSerif-TLF":
2529 j = find_token(document.header, "\\font_sans", 0)
2533 # We need to use this regex since split() does not handle quote protection
2534 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2535 sf = sffont[1].strip('"')
2539 j = find_token(document.header, "\\font_typewriter", 0)
2543 # We need to use this regex since split() does not handle quote protection
2544 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2545 tt = ttfont[1].strip('"')
2549 # So we have noto as "complete font"
2550 romanfont[1] = '"default"'
2551 document.header[i] = " ".join(romanfont)
2553 preamble = "\\usepackage["
2555 preamble += "]{noto}"
2556 add_to_preamble(document, [preamble])
2558 document.header[y] = "\\font_osf false"
2560 del document.header[x]
2563 def revert_notoFonts_xopts(document):
2564 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2566 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2568 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2570 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2574 fm = createFontMapping(['Noto'])
2575 if revert_fonts(document, fm, fontmap, True):
2576 add_preamble_fonts(document, fontmap)
2579 def revert_IBMFonts_xopts(document):
2580 " Revert native IBM font definition (with extra options) to LaTeX "
2582 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2584 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2586 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2590 fm = createFontMapping(['IBM'])
2592 if revert_fonts(document, fm, fontmap, True):
2593 add_preamble_fonts(document, fontmap)
2596 def revert_AdobeFonts_xopts(document):
2597 " Revert native Adobe font definition (with extra options) to LaTeX "
2599 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2601 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2603 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2607 fm = createFontMapping(['Adobe'])
2609 if revert_fonts(document, fm, fontmap, True):
2610 add_preamble_fonts(document, fontmap)
2613 def convert_osf(document):
2614 " Convert \\font_osf param to new format "
2617 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2619 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2621 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2623 i = find_token(document.header, '\\font_osf', 0)
2625 document.warning("Malformed LyX document: Missing \\font_osf.")
2628 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2629 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2631 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2632 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2635 document.header.insert(i, "\\font_sans_osf false")
2636 document.header.insert(i + 1, "\\font_typewriter_osf false")
2640 x = find_token(document.header, "\\font_sans", 0)
2642 document.warning("Malformed LyX document: Missing \\font_sans.")
2644 # We need to use this regex since split() does not handle quote protection
2645 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2646 sf = sffont[1].strip('"')
2648 document.header.insert(i, "\\font_sans_osf true")
2650 document.header.insert(i, "\\font_sans_osf false")
2652 x = find_token(document.header, "\\font_typewriter", 0)
2654 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2656 # We need to use this regex since split() does not handle quote protection
2657 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2658 tt = ttfont[1].strip('"')
2660 document.header.insert(i + 1, "\\font_sans_osf true")
2662 document.header.insert(i + 1, "\\font_sans_osf false")
2665 document.header.insert(i, "\\font_sans_osf false")
2666 document.header.insert(i + 1, "\\font_typewriter_osf false")
2669 def revert_osf(document):
2670 " Revert \\font_*_osf params "
2673 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2675 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2677 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2679 i = find_token(document.header, '\\font_roman_osf', 0)
2681 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2684 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2685 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2687 i = find_token(document.header, '\\font_sans_osf', 0)
2689 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2692 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2693 del document.header[i]
2695 i = find_token(document.header, '\\font_typewriter_osf', 0)
2697 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2700 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2701 del document.header[i]
2704 i = find_token(document.header, '\\font_osf', 0)
2706 document.warning("Malformed LyX document: Missing \\font_osf.")
2708 document.header[i] = "\\font_osf true"
2715 supported_versions = ["2.4.0", "2.4"]
2717 [545, [convert_lst_literalparam]],
2722 [550, [convert_fontenc]],
2729 [557, [convert_vcsinfo]],
2730 [558, [removeFrontMatterStyles]],
2733 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
2737 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
2738 [566, [convert_hebrew_parentheses]],
2744 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
2745 [573, [convert_inputencoding_namechange]],
2746 [574, [convert_ruby_module, convert_utf8_japanese]],
2747 [575, [convert_lineno]],
2749 [577, [convert_linggloss]],
2753 [581, [convert_osf]]
2756 revert = [[580, [revert_osf]],
2757 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
2758 [578, [revert_babelfont]],
2759 [577, [revert_drs]],
2760 [576, [revert_linggloss, revert_subexarg]],
2761 [575, [revert_new_languages]],
2762 [574, [revert_lineno]],
2763 [573, [revert_ruby_module, revert_utf8_japanese]],
2764 [572, [revert_inputencoding_namechange]],
2765 [571, [revert_notoFonts]],
2766 [570, [revert_cmidruletrimming]],
2767 [569, [revert_bibfileencodings]],
2768 [568, [revert_tablestyle]],
2769 [567, [revert_soul]],
2770 [566, [revert_malayalam]],
2771 [565, [revert_hebrew_parentheses]],
2772 [564, [revert_AdobeFonts]],
2773 [563, [revert_lformatinfo]],
2774 [562, [revert_listpargs]],
2775 [561, [revert_l7ninfo]],
2776 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
2777 [559, [revert_timeinfo, revert_namenoextinfo]],
2778 [558, [revert_dateinfo]],
2779 [557, [addFrontMatterStyles]],
2780 [556, [revert_vcsinfo]],
2781 [555, [revert_bibencoding]],
2782 [554, [revert_vcolumns]],
2783 [553, [revert_stretchcolumn]],
2784 [552, [revert_tuftecite]],
2785 [551, [revert_floatpclass, revert_floatalignment]],
2786 [550, [revert_nospellcheck]],
2787 [549, [revert_fontenc]],
2788 [548, []],# dummy format change
2789 [547, [revert_lscape]],
2790 [546, [revert_xcharter]],
2791 [545, [revert_paratype]],
2792 [544, [revert_lst_literalparam]]
2796 if __name__ == "__main__":