1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
78 self.pkgkey = createkey(self.package, self.options)
82 self.font2pkgmap = dict()
83 self.pkg2fontmap = dict()
84 self.pkginmap = dict() # defines, if a map for package exists
86 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None):
87 " Expand fontinfo mapping"
89 # fontlist: list of fontnames, each element
90 # may contain a ','-separated list of needed options
91 # like e.g. 'IBMPlexSansCondensed,condensed'
92 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
93 # scale_type: one of None, 'sf', 'tt'
94 # pkg: package defining the font. Defaults to fontname if None
95 # scaleopt: one of None, 'scale', 'scaled', or some other string
96 # to be used in scale option (e.g. scaled=0.7)
97 # osfopt: None or some other string to be used in osf option
100 fe.fonttype = font_type
101 fe.scaletype = scale_type
104 fe.fontname = font_name
106 fe.scaleopt = scaleopt
109 fe.package = font_name
113 self.font2pkgmap[font_name] = fe
114 if fe.pkgkey in self.pkg2fontmap:
115 # Repeated the same entry? Check content
116 if self.pkg2fontmap[fe.pkgkey] != font_name:
117 document.error("Something is wrong in pkgname+options <-> fontname mapping")
118 self.pkg2fontmap[fe.pkgkey] = font_name
119 self.pkginmap[fe.package] = 1
121 def getfontname(self, pkg, options):
123 pkgkey = createkey(pkg, options)
124 if not pkgkey in self.pkg2fontmap:
126 fontname = self.pkg2fontmap[pkgkey]
127 if not fontname in self.font2pkgmap:
128 document.error("Something is wrong in pkgname+options <-> fontname mapping")
130 if pkgkey == self.font2pkgmap[fontname].pkgkey:
134 def createFontMapping(fontlist):
135 # Create info for known fonts for the use in
136 # convert_latexFonts() and
137 # revert_latexFonts()
139 # * Would be more handy to parse latexFonts file,
140 # but the path to this file is unknown
141 # * For now, add DejaVu and IBMPlex only.
142 # * Expand, if desired
144 for font in fontlist:
146 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
147 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
148 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
150 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
151 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
152 'IBMPlexSerifSemibold,semibold'],
153 "roman", None, "plex-serif")
154 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
155 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
156 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
157 "sans", "sf", "plex-sans", "scale")
158 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
159 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
160 'IBMPlexMonoSemibold,semibold'],
161 "typewriter", "tt", "plex-mono", "scale")
162 elif font == 'Adobe':
163 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
164 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled")
165 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled")
167 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
168 'NotoSerifThin,thin', 'NotoSerifLight,light',
169 'NotoSerifExtralight,extralight'],
170 "roman", None, "noto-serif", None, "osf")
171 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
172 'NotoSansThin,thin', 'NotoSansLight,light',
173 'NotoSansExtralight,extralight'],
174 "sans", "sf", "noto-sans", "scaled")
175 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
178 def convert_fonts(document, fm):
179 " Handle font definition (LaTeX preamble -> native) "
181 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
182 rscaleopt = re.compile(r'^scaled?=(.*)')
185 while i < len(document.preamble):
186 i = find_re(document.preamble, rpkg, i+1)
189 mo = rpkg.search(document.preamble[i])
190 if mo == None or mo.group(2) == None:
193 options = mo.group(2).replace(' ', '').split(",")
199 while o < len(options):
200 if options[o] == osfoption:
204 mo = rscaleopt.search(options[o])
212 if not pkg in fm.pkginmap:
215 fn = fm.getfontname(pkg, options)
218 del document.preamble[i]
219 fontinfo = fm.font2pkgmap[fn]
220 if fontinfo.scaletype == None:
223 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
224 fontinfo.scaleval = oscale
226 if fontinfo.osfopt == None:
227 options.extend("osf")
229 osf = find_token(document.header, "\\font_osf false")
231 document.header[osf] = "\\font_osf true"
232 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
233 del document.preamble[i-1]
235 if fontscale != None:
236 j = find_token(document.header, fontscale, 0)
238 val = get_value(document.header, fontscale, j)
242 scale = "%03d" % int(float(oscale) * 100)
243 document.header[j] = fontscale + " " + scale + " " + vals[1]
244 ft = "\\font_" + fontinfo.fonttype
245 j = find_token(document.header, ft, 0)
247 val = get_value(document.header, ft, j)
248 words = val.split() # ! splits also values like '"DejaVu Sans"'
249 words[0] = '"' + fn + '"'
250 document.header[j] = ft + ' ' + ' '.join(words)
252 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False):
253 " Revert native font definition to LaTeX "
254 # fonlist := list of fonts created from the same package
255 # Empty package means that the font-name is the same as the package-name
256 # fontmap (key = package, val += found options) will be filled
257 # and used later in add_preamble_fonts() to be added to user-preamble
259 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
260 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
262 while i < len(document.header):
263 i = find_re(document.header, rfontscale, i+1)
266 mo = rfontscale.search(document.header[i])
269 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
270 val = get_value(document.header, ft, i)
271 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
272 font = words[0].strip('"') # TeX font name has no whitespace
273 if not font in fm.font2pkgmap:
275 fontinfo = fm.font2pkgmap[font]
276 val = fontinfo.package
277 if not val in fontmap:
281 if ft == "\\font_math":
283 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
284 if ft == "\\font_sans":
285 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
286 elif ft == "\\font_typewriter":
287 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
288 x = find_re(document.header, regexp, 0)
292 # We need to use this regex since split() does not handle quote protection
293 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
294 opts = xopts[1].strip('"').split(",")
295 fontmap[val].extend(opts)
296 del document.header[x]
297 words[0] = '"default"'
298 document.header[i] = ft + ' ' + ' '.join(words)
299 if fontinfo.scaleopt != None:
300 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
301 mo = rscales.search(xval)
306 # set correct scale option
307 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
308 if fontinfo.osfopt != None and fontinfo.fonttype == "roman":
309 osf = find_token(document.header, "\\font_osf true")
311 document.header[osf] = "\\font_osf false"
312 fontmap[val].extend([fontinfo.osfopt])
313 if len(fontinfo.options) > 0:
314 fontmap[val].extend(fontinfo.options)
317 ###############################################################################
319 ### Conversion and reversion routines
321 ###############################################################################
323 def convert_inputencoding_namechange(document):
324 " Rename inputencoding settings. "
325 i = find_token(document.header, "\\inputencoding", 0)
328 s = document.header[i].replace("auto", "auto-legacy")
329 document.header[i] = s.replace("default", "auto-legacy-plain")
331 def revert_inputencoding_namechange(document):
332 " Rename inputencoding settings. "
333 i = find_token(document.header, "\\inputencoding", 0)
336 s = document.header[i].replace("auto-legacy-plain", "default")
337 document.header[i] = s.replace("auto-legacy", "auto")
339 def convert_notoFonts(document):
340 " Handle Noto fonts definition to LaTeX "
342 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
343 fm = createFontMapping(['Noto'])
344 convert_fonts(document, fm)
346 def revert_notoFonts(document):
347 " Revert native Noto font definition to LaTeX "
349 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
351 fm = createFontMapping(['Noto'])
352 if revert_fonts(document, fm, fontmap):
353 add_preamble_fonts(document, fontmap)
355 def convert_latexFonts(document):
356 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
358 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
359 fm = createFontMapping(['DejaVu', 'IBM'])
360 convert_fonts(document, fm)
362 def revert_latexFonts(document):
363 " Revert native DejaVu font definition to LaTeX "
365 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
367 fm = createFontMapping(['DejaVu', 'IBM'])
368 if revert_fonts(document, fm, fontmap):
369 add_preamble_fonts(document, fontmap)
371 def convert_AdobeFonts(document):
372 " Handle Adobe Source fonts definition to LaTeX "
374 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
375 fm = createFontMapping(['Adobe'])
376 convert_fonts(document, fm)
378 def revert_AdobeFonts(document):
379 " Revert Adobe Source font definition to LaTeX "
381 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
383 fm = createFontMapping(['Adobe'])
384 if revert_fonts(document, fm, fontmap):
385 add_preamble_fonts(document, fontmap)
387 def removeFrontMatterStyles(document):
388 " Remove styles Begin/EndFrontmatter"
390 layouts = ['BeginFrontmatter', 'EndFrontmatter']
391 tokenend = len('\\begin_layout ')
394 i = find_token_exact(document.body, '\\begin_layout ', i+1)
397 layout = document.body[i][tokenend:].strip()
398 if layout not in layouts:
400 j = find_end_of_layout(document.body, i)
402 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
404 while document.body[j+1].strip() == '':
406 document.body[i:j+1] = []
408 def addFrontMatterStyles(document):
409 " Use styles Begin/EndFrontmatter for elsarticle"
411 if document.textclass != "elsarticle":
414 def insertFrontmatter(prefix, line):
416 while above > 0 and document.body[above-1].strip() == '':
419 while document.body[below].strip() == '':
421 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
422 '\\begin_inset Note Note',
424 '\\begin_layout Plain Layout',
427 '\\end_inset', '', '',
430 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
431 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
432 tokenend = len('\\begin_layout ')
436 i = find_token_exact(document.body, '\\begin_layout ', i+1)
439 layout = document.body[i][tokenend:].strip()
440 if layout not in layouts:
442 k = find_end_of_layout(document.body, i)
444 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
451 insertFrontmatter('End', k+1)
452 insertFrontmatter('Begin', first)
455 def convert_lst_literalparam(document):
456 " Add param literal to include inset "
460 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
463 j = find_end_of_inset(document.body, i)
465 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
467 while i < j and document.body[i].strip() != '':
469 document.body.insert(i, 'literal "true"')
472 def revert_lst_literalparam(document):
473 " Remove param literal from include inset "
477 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
480 j = find_end_of_inset(document.body, i)
482 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
484 del_token(document.body, 'literal', i, j)
487 def revert_paratype(document):
488 " Revert ParaType font definitions to LaTeX "
490 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
492 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
493 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
494 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
495 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
496 sfval = get_value(document.header, "\\font_sf_scale", 0)
501 sfoption = "scaled=" + format(float(sfval) / 100, '.2f')
502 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
503 ttval = get_value(document.header, "\\font_tt_scale", 0)
508 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
509 if i1 != -1 and i2 != -1 and i3!= -1:
510 add_to_preamble(document, ["\\usepackage{paratype}"])
513 add_to_preamble(document, ["\\usepackage{PTSerif}"])
514 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
517 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
519 add_to_preamble(document, ["\\usepackage{PTSans}"])
520 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
523 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
525 add_to_preamble(document, ["\\usepackage{PTMono}"])
526 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
529 def revert_xcharter(document):
530 " Revert XCharter font definitions to LaTeX "
532 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
536 # replace unsupported font setting
537 document.header[i] = document.header[i].replace("xcharter", "default")
538 # no need for preamble code with system fonts
539 if get_bool_value(document.header, "\\use_non_tex_fonts"):
542 # transfer old style figures setting to package options
543 j = find_token(document.header, "\\font_osf true")
546 document.header[j] = "\\font_osf false"
550 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
553 def revert_lscape(document):
554 " Reverts the landscape environment (Landscape module) to TeX-code "
556 if not "landscape" in document.get_module_list():
561 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
564 j = find_end_of_inset(document.body, i)
566 document.warning("Malformed LyX document: Can't find end of Landscape inset")
569 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
570 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
571 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
572 add_to_preamble(document, ["\\usepackage{afterpage}"])
574 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
575 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
577 add_to_preamble(document, ["\\usepackage{pdflscape}"])
580 def convert_fontenc(document):
581 " Convert default fontenc setting "
583 i = find_token(document.header, "\\fontencoding global", 0)
587 document.header[i] = document.header[i].replace("global", "auto")
590 def revert_fontenc(document):
591 " Revert default fontenc setting "
593 i = find_token(document.header, "\\fontencoding auto", 0)
597 document.header[i] = document.header[i].replace("auto", "global")
600 def revert_nospellcheck(document):
601 " Remove nospellcheck font info param "
605 i = find_token(document.body, '\\nospellcheck', i)
611 def revert_floatpclass(document):
612 " Remove float placement params 'document' and 'class' "
614 del_token(document.header, "\\float_placement class")
618 i = find_token(document.body, '\\begin_inset Float', i+1)
621 j = find_end_of_inset(document.body, i)
622 k = find_token(document.body, 'placement class', i, i + 2)
624 k = find_token(document.body, 'placement document', i, i + 2)
631 def revert_floatalignment(document):
632 " Remove float alignment params "
634 galignment = get_value(document.header, "\\float_alignment", delete=True)
638 i = find_token(document.body, '\\begin_inset Float', i+1)
641 j = find_end_of_inset(document.body, i)
643 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
645 k = find_token(document.body, 'alignment', i, i+4)
649 alignment = get_value(document.body, "alignment", k)
650 if alignment == "document":
651 alignment = galignment
653 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
655 document.warning("Can't find float layout!")
658 if alignment == "left":
659 alcmd = put_cmd_in_ert("\\raggedright{}")
660 elif alignment == "center":
661 alcmd = put_cmd_in_ert("\\centering{}")
662 elif alignment == "right":
663 alcmd = put_cmd_in_ert("\\raggedleft{}")
665 document.body[l+1:l+1] = alcmd
668 def revert_tuftecite(document):
669 " Revert \cite commands in tufte classes "
671 tufte = ["tufte-book", "tufte-handout"]
672 if document.textclass not in tufte:
677 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
680 j = find_end_of_inset(document.body, i)
682 document.warning("Can't find end of citation inset at line %d!!" %(i))
684 k = find_token(document.body, "LatexCommand", i, j)
686 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
689 cmd = get_value(document.body, "LatexCommand", k)
693 pre = get_quoted_value(document.body, "before", i, j)
694 post = get_quoted_value(document.body, "after", i, j)
695 key = get_quoted_value(document.body, "key", i, j)
697 document.warning("Citation inset at line %d does not have a key!" %(i))
699 # Replace command with ERT
702 res += "[" + pre + "]"
704 res += "[" + post + "]"
707 res += "{" + key + "}"
708 document.body[i:j+1] = put_cmd_in_ert([res])
712 def revert_stretchcolumn(document):
713 " We remove the column varwidth flags or everything else will become a mess. "
716 i = find_token(document.body, "\\begin_inset Tabular", i+1)
719 j = find_end_of_inset(document.body, i+1)
721 document.warning("Malformed LyX document: Could not find end of tabular.")
723 for k in range(i, j):
724 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
725 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
726 document.body[k] = document.body[k].replace(' varwidth="true"', '')
729 def revert_vcolumns(document):
730 " Revert standard columns with line breaks etc. "
736 i = find_token(document.body, "\\begin_inset Tabular", i+1)
739 j = find_end_of_inset(document.body, i)
741 document.warning("Malformed LyX document: Could not find end of tabular.")
744 # Collect necessary column information
746 nrows = int(document.body[i+1].split('"')[3])
747 ncols = int(document.body[i+1].split('"')[5])
749 for k in range(ncols):
750 m = find_token(document.body, "<column", m)
751 width = get_option_value(document.body[m], 'width')
752 varwidth = get_option_value(document.body[m], 'varwidth')
753 alignment = get_option_value(document.body[m], 'alignment')
754 special = get_option_value(document.body[m], 'special')
755 col_info.append([width, varwidth, alignment, special, m])
760 for row in range(nrows):
761 for col in range(ncols):
762 m = find_token(document.body, "<cell", m)
763 multicolumn = get_option_value(document.body[m], 'multicolumn')
764 multirow = get_option_value(document.body[m], 'multirow')
765 width = get_option_value(document.body[m], 'width')
766 rotate = get_option_value(document.body[m], 'rotate')
767 # Check for: linebreaks, multipars, non-standard environments
769 endcell = find_token(document.body, "</cell>", begcell)
771 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
773 elif count_pars_in_inset(document.body, begcell + 2) > 1:
775 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
777 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
778 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
780 alignment = col_info[col][2]
781 col_line = col_info[col][4]
783 if alignment == "center":
784 vval = ">{\\centering}"
785 elif alignment == "left":
786 vval = ">{\\raggedright}"
787 elif alignment == "right":
788 vval = ">{\\raggedleft}"
791 vval += "V{\\linewidth}"
793 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
794 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
795 # with newlines, and we do not want that)
797 endcell = find_token(document.body, "</cell>", begcell)
799 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
801 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
805 nle = find_end_of_inset(document.body, nl)
806 del(document.body[nle:nle+1])
808 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
810 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
816 if needarray == True:
817 add_to_preamble(document, ["\\usepackage{array}"])
818 if needvarwidth == True:
819 add_to_preamble(document, ["\\usepackage{varwidth}"])
822 def revert_bibencoding(document):
823 " Revert bibliography encoding "
827 i = find_token(document.header, "\\cite_engine", 0)
829 document.warning("Malformed document! Missing \\cite_engine")
831 engine = get_value(document.header, "\\cite_engine", i)
835 if engine in ["biblatex", "biblatex-natbib"]:
838 # Map lyx to latex encoding names
842 "armscii8" : "armscii8",
843 "iso8859-1" : "latin1",
844 "iso8859-2" : "latin2",
845 "iso8859-3" : "latin3",
846 "iso8859-4" : "latin4",
847 "iso8859-5" : "iso88595",
848 "iso8859-6" : "8859-6",
849 "iso8859-7" : "iso-8859-7",
850 "iso8859-8" : "8859-8",
851 "iso8859-9" : "latin5",
852 "iso8859-13" : "latin7",
853 "iso8859-15" : "latin9",
854 "iso8859-16" : "latin10",
855 "applemac" : "applemac",
857 "cp437de" : "cp437de",
874 "utf8-platex" : "utf8",
881 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
884 j = find_end_of_inset(document.body, i)
886 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
888 encoding = get_quoted_value(document.body, "encoding", i, j)
891 # remove encoding line
892 k = find_token(document.body, "encoding", i, j)
895 if encoding == "default":
897 # Re-find inset end line
898 j = find_end_of_inset(document.body, i)
901 h = find_token(document.header, "\\biblio_options", 0)
903 biblio_options = get_value(document.header, "\\biblio_options", h)
904 if not "bibencoding" in biblio_options:
905 document.header[h] += ",bibencoding=%s" % encodings[encoding]
907 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
909 # this should not happen
910 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
912 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
914 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
915 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
921 def convert_vcsinfo(document):
922 " Separate vcs Info inset from buffer Info inset. "
925 "vcs-revision" : "revision",
926 "vcs-tree-revision" : "tree-revision",
927 "vcs-author" : "author",
933 i = find_token(document.body, "\\begin_inset Info", i+1)
936 j = find_end_of_inset(document.body, i+1)
938 document.warning("Malformed LyX document: Could not find end of Info inset.")
940 tp = find_token(document.body, 'type', i, j)
941 tpv = get_quoted_value(document.body, "type", tp)
944 arg = find_token(document.body, 'arg', i, j)
945 argv = get_quoted_value(document.body, "arg", arg)
946 if argv not in list(types.keys()):
948 document.body[tp] = "type \"vcs\""
949 document.body[arg] = "arg \"" + types[argv] + "\""
952 def revert_vcsinfo(document):
953 " Merge vcs Info inset to buffer Info inset. "
955 args = ["revision", "tree-revision", "author", "time", "date" ]
958 i = find_token(document.body, "\\begin_inset Info", i+1)
961 j = find_end_of_inset(document.body, i+1)
963 document.warning("Malformed LyX document: Could not find end of Info inset.")
965 tp = find_token(document.body, 'type', i, j)
966 tpv = get_quoted_value(document.body, "type", tp)
969 arg = find_token(document.body, 'arg', i, j)
970 argv = get_quoted_value(document.body, "arg", arg)
972 document.warning("Malformed Info inset. Invalid vcs arg.")
974 document.body[tp] = "type \"buffer\""
975 document.body[arg] = "arg \"vcs-" + argv + "\""
978 def revert_dateinfo(document):
979 " Revert date info insets to static text. "
981 # FIXME This currently only considers the main language and uses the system locale
982 # Ideally, it should honor context languages and switch the locale accordingly.
984 # The date formats for each language using strftime syntax:
985 # long, short, loclong, locmedium, locshort
987 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
988 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
989 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
990 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
991 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
992 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
993 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
994 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
995 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
996 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
997 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
998 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
999 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1000 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1001 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1002 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1003 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1004 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1005 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1006 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1007 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1008 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1009 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1010 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1011 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1012 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1013 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1014 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1015 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1016 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1017 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1018 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1019 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1020 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1021 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1022 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1023 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1024 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1025 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1026 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1027 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1028 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1029 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1030 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1031 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1032 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1033 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1034 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1035 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1036 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1037 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1038 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1039 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1040 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1041 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1042 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1043 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1044 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1045 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1046 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1047 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1048 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1049 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1050 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1051 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1052 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1053 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1054 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1055 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1056 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1057 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1058 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1059 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1060 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1061 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1062 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1063 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1064 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1065 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1066 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1067 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1068 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1069 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1070 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1071 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1072 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1073 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1074 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1075 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1076 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1077 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1078 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1079 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1080 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1081 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1082 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1083 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1084 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1085 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1086 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1087 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1088 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1091 types = ["date", "fixdate", "moddate" ]
1092 lang = get_value(document.header, "\\language")
1094 document.warning("Malformed LyX document! No \\language header found!")
1099 i = find_token(document.body, "\\begin_inset Info", i+1)
1102 j = find_end_of_inset(document.body, i+1)
1104 document.warning("Malformed LyX document: Could not find end of Info inset.")
1106 tp = find_token(document.body, 'type', i, j)
1107 tpv = get_quoted_value(document.body, "type", tp)
1108 if tpv not in types:
1110 arg = find_token(document.body, 'arg', i, j)
1111 argv = get_quoted_value(document.body, "arg", arg)
1114 if tpv == "fixdate":
1115 datecomps = argv.split('@')
1116 if len(datecomps) > 1:
1118 isodate = datecomps[1]
1119 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1121 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1122 # FIXME if we had the path to the original document (not the one in the tmp dir),
1123 # we could use the mtime.
1124 # elif tpv == "moddate":
1125 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1128 result = dte.isodate()
1129 elif argv == "long":
1130 result = dte.strftime(dateformats[lang][0])
1131 elif argv == "short":
1132 result = dte.strftime(dateformats[lang][1])
1133 elif argv == "loclong":
1134 result = dte.strftime(dateformats[lang][2])
1135 elif argv == "locmedium":
1136 result = dte.strftime(dateformats[lang][3])
1137 elif argv == "locshort":
1138 result = dte.strftime(dateformats[lang][4])
1140 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1141 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1142 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1143 fmt = re.sub('[^\'%]d', '%d', fmt)
1144 fmt = fmt.replace("'", "")
1145 result = dte.strftime(fmt)
1146 if sys.version_info < (3,0):
1147 # In Python 2, datetime module works with binary strings,
1148 # our dateformat strings are utf8-encoded:
1149 result = result.decode('utf-8')
1150 document.body[i : j+1] = [result]
1153 def revert_timeinfo(document):
1154 " Revert time info insets to static text. "
1156 # FIXME This currently only considers the main language and uses the system locale
1157 # Ideally, it should honor context languages and switch the locale accordingly.
1158 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1161 # The time formats for each language using strftime syntax:
1164 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1165 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1166 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1167 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1168 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1169 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1170 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1171 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1172 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1173 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1174 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1175 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1176 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1177 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1178 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1179 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1180 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1181 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1182 "british" : ["%H:%M:%S %Z", "%H:%M"],
1183 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1184 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1185 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1186 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1187 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1188 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1189 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1190 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1191 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1192 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1193 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1194 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1195 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1196 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1197 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1198 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1199 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1200 "french" : ["%H:%M:%S %Z", "%H:%M"],
1201 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1202 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1203 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1204 "german" : ["%H:%M:%S %Z", "%H:%M"],
1205 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1206 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1207 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1208 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1209 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1210 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1211 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1212 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1213 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1214 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1215 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1216 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1217 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1218 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1219 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1220 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1221 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1222 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1223 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1224 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1225 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1226 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1227 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1228 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1229 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1230 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1231 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1232 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1233 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1234 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1235 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1236 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1237 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1238 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1239 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1240 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1241 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1242 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1243 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1244 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1245 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1246 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1247 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1248 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1249 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1250 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1251 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1252 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1253 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1254 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1255 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1256 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1257 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1258 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1259 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1260 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1261 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1262 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1263 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1264 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1265 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1268 types = ["time", "fixtime", "modtime" ]
1270 i = find_token(document.header, "\\language", 0)
1272 # this should not happen
1273 document.warning("Malformed LyX document! No \\language header found!")
1275 lang = get_value(document.header, "\\language", i)
1279 i = find_token(document.body, "\\begin_inset Info", i+1)
1282 j = find_end_of_inset(document.body, i+1)
1284 document.warning("Malformed LyX document: Could not find end of Info inset.")
1286 tp = find_token(document.body, 'type', i, j)
1287 tpv = get_quoted_value(document.body, "type", tp)
1288 if tpv not in types:
1290 arg = find_token(document.body, 'arg', i, j)
1291 argv = get_quoted_value(document.body, "arg", arg)
1293 dtme = datetime.now()
1295 if tpv == "fixtime":
1296 timecomps = argv.split('@')
1297 if len(timecomps) > 1:
1299 isotime = timecomps[1]
1300 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1302 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1304 m = re.search('(\d\d):(\d\d)', isotime)
1306 tme = time(int(m.group(1)), int(m.group(2)))
1307 # FIXME if we had the path to the original document (not the one in the tmp dir),
1308 # we could use the mtime.
1309 # elif tpv == "moddate":
1310 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1313 result = tme.isoformat()
1314 elif argv == "long":
1315 result = tme.strftime(timeformats[lang][0])
1316 elif argv == "short":
1317 result = tme.strftime(timeformats[lang][1])
1319 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1320 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1321 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1322 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1323 fmt = fmt.replace("'", "")
1324 result = dte.strftime(fmt)
1325 document.body[i : j+1] = result
1328 def revert_namenoextinfo(document):
1329 " Merge buffer Info inset type name-noext to name. "
1333 i = find_token(document.body, "\\begin_inset Info", i+1)
1336 j = find_end_of_inset(document.body, i+1)
1338 document.warning("Malformed LyX document: Could not find end of Info inset.")
1340 tp = find_token(document.body, 'type', i, j)
1341 tpv = get_quoted_value(document.body, "type", tp)
1344 arg = find_token(document.body, 'arg', i, j)
1345 argv = get_quoted_value(document.body, "arg", arg)
1346 if argv != "name-noext":
1348 document.body[arg] = "arg \"name\""
1351 def revert_l7ninfo(document):
1352 " Revert l7n Info inset to text. "
1356 i = find_token(document.body, "\\begin_inset Info", i+1)
1359 j = find_end_of_inset(document.body, i+1)
1361 document.warning("Malformed LyX document: Could not find end of Info inset.")
1363 tp = find_token(document.body, 'type', i, j)
1364 tpv = get_quoted_value(document.body, "type", tp)
1367 arg = find_token(document.body, 'arg', i, j)
1368 argv = get_quoted_value(document.body, "arg", arg)
1369 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1370 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1371 document.body[i : j+1] = argv
1374 def revert_listpargs(document):
1375 " Reverts listpreamble arguments to TeX-code "
1378 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1381 j = find_end_of_inset(document.body, i)
1382 # Find containing paragraph layout
1383 parent = get_containing_layout(document.body, i)
1385 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1388 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1389 endPlain = find_end_of_layout(document.body, beginPlain)
1390 content = document.body[beginPlain + 1 : endPlain]
1391 del document.body[i:j+1]
1392 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1393 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1394 document.body[parbeg : parbeg] = subst
1397 def revert_lformatinfo(document):
1398 " Revert layout format Info inset to text. "
1402 i = find_token(document.body, "\\begin_inset Info", i+1)
1405 j = find_end_of_inset(document.body, i+1)
1407 document.warning("Malformed LyX document: Could not find end of Info inset.")
1409 tp = find_token(document.body, 'type', i, j)
1410 tpv = get_quoted_value(document.body, "type", tp)
1411 if tpv != "lyxinfo":
1413 arg = find_token(document.body, 'arg', i, j)
1414 argv = get_quoted_value(document.body, "arg", arg)
1415 if argv != "layoutformat":
1418 document.body[i : j+1] = "69"
1421 def convert_hebrew_parentheses(document):
1422 """ Swap opening/closing parentheses in Hebrew text.
1424 Up to LyX 2.4, "(" was used as closing parenthesis and
1425 ")" as opening parenthesis for Hebrew in the LyX source.
1427 # print("convert hebrew parentheses")
1428 current_languages = [document.language]
1429 for i, line in enumerate(document.body):
1430 if line.startswith('\\lang '):
1431 current_languages[-1] = line.lstrip('\\lang ')
1432 elif line.startswith('\\begin_layout'):
1433 current_languages.append(current_languages[-1])
1434 # print (line, current_languages[-1])
1435 elif line.startswith('\\end_layout'):
1436 current_languages.pop()
1437 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1438 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1441 def revert_hebrew_parentheses(document):
1442 " Store parentheses in Hebrew text reversed"
1443 # This only exists to keep the convert/revert naming convention
1444 convert_hebrew_parentheses(document)
1447 def revert_malayalam(document):
1448 " Set the document language to English but assure Malayalam output "
1450 revert_language(document, "malayalam", "", "malayalam")
1453 def revert_soul(document):
1454 " Revert soul module flex insets to ERT "
1456 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1459 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1461 add_to_preamble(document, ["\\usepackage{soul}"])
1463 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1465 add_to_preamble(document, ["\\usepackage{color}"])
1467 revert_flex_inset(document.body, "Spaceletters", "\\so")
1468 revert_flex_inset(document.body, "Strikethrough", "\\st")
1469 revert_flex_inset(document.body, "Underline", "\\ul")
1470 revert_flex_inset(document.body, "Highlight", "\\hl")
1471 revert_flex_inset(document.body, "Capitalize", "\\caps")
1474 def revert_tablestyle(document):
1475 " Remove tablestyle params "
1478 i = find_token(document.header, "\\tablestyle")
1480 del document.header[i]
1483 def revert_bibfileencodings(document):
1484 " Revert individual Biblatex bibliography encodings "
1488 i = find_token(document.header, "\\cite_engine", 0)
1490 document.warning("Malformed document! Missing \\cite_engine")
1492 engine = get_value(document.header, "\\cite_engine", i)
1496 if engine in ["biblatex", "biblatex-natbib"]:
1499 # Map lyx to latex encoding names
1503 "armscii8" : "armscii8",
1504 "iso8859-1" : "latin1",
1505 "iso8859-2" : "latin2",
1506 "iso8859-3" : "latin3",
1507 "iso8859-4" : "latin4",
1508 "iso8859-5" : "iso88595",
1509 "iso8859-6" : "8859-6",
1510 "iso8859-7" : "iso-8859-7",
1511 "iso8859-8" : "8859-8",
1512 "iso8859-9" : "latin5",
1513 "iso8859-13" : "latin7",
1514 "iso8859-15" : "latin9",
1515 "iso8859-16" : "latin10",
1516 "applemac" : "applemac",
1518 "cp437de" : "cp437de",
1526 "cp1250" : "cp1250",
1527 "cp1251" : "cp1251",
1528 "cp1252" : "cp1252",
1529 "cp1255" : "cp1255",
1530 "cp1256" : "cp1256",
1531 "cp1257" : "cp1257",
1532 "koi8-r" : "koi8-r",
1533 "koi8-u" : "koi8-u",
1535 "utf8-platex" : "utf8",
1542 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1545 j = find_end_of_inset(document.body, i)
1547 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1549 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1553 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1554 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1555 if len(bibfiles) == 0:
1556 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1557 # remove encoding line
1558 k = find_token(document.body, "file_encodings", i, j)
1560 del document.body[k]
1561 # Re-find inset end line
1562 j = find_end_of_inset(document.body, i)
1564 enclist = encodings.split("\t")
1567 ppp = pp.split(" ", 1)
1568 encmap[ppp[0]] = ppp[1]
1569 for bib in bibfiles:
1570 pr = "\\addbibresource"
1571 if bib in encmap.keys():
1572 pr += "[bibencoding=" + encmap[bib] + "]"
1573 pr += "{" + bib + "}"
1574 add_to_preamble(document, [pr])
1575 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1576 pcmd = "printbibliography"
1578 pcmd += "[" + opts + "]"
1579 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1580 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1581 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1582 "status open", "", "\\begin_layout Plain Layout" ]
1583 repl += document.body[i:j+1]
1584 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1585 document.body[i:j+1] = repl
1591 def revert_cmidruletrimming(document):
1592 " Remove \\cmidrule trimming "
1594 # FIXME: Revert to TeX code?
1597 # first, let's find out if we need to do anything
1598 i = find_token(document.body, '<cell ', i+1)
1601 j = document.body[i].find('trim="')
1604 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1605 # remove trim option
1606 document.body[i] = rgx.sub('', document.body[i])
1610 r'### Inserted by lyx2lyx (ruby inset) ###',
1611 r'InsetLayout Flex:Ruby',
1612 r' LyxType charstyle',
1613 r' LatexType command',
1617 r' HTMLInnerTag rb',
1618 r' HTMLInnerAttr ""',
1620 r' LabelString "Ruby"',
1621 r' Decoration Conglomerate',
1623 r' \ifdefined\kanjiskip',
1624 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1625 r' \else \ifdefined\luatexversion',
1626 r' \usepackage{luatexja-ruby}',
1627 r' \else \ifdefined\XeTeXversion',
1628 r' \usepackage{ruby}%',
1630 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1632 r' Argument post:1',
1633 r' LabelString "ruby text"',
1634 r' MenuString "Ruby Text|R"',
1635 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1636 r' Decoration Conglomerate',
1648 def convert_ruby_module(document):
1649 " Use ruby module instead of local module definition "
1650 if document.del_local_layout(ruby_inset_def):
1651 document.add_module("ruby")
1653 def revert_ruby_module(document):
1654 " Replace ruby module with local module definition "
1655 if document.del_module("ruby"):
1656 document.append_local_layout(ruby_inset_def)
1659 def convert_utf8_japanese(document):
1660 " Use generic utf8 with Japanese documents."
1661 lang = get_value(document.header, "\\language")
1662 if not lang.startswith("japanese"):
1664 inputenc = get_value(document.header, "\\inputencoding")
1665 if ((lang == "japanese" and inputenc == "utf8-platex")
1666 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1667 document.set_parameter("inputencoding", "utf8")
1669 def revert_utf8_japanese(document):
1670 " Use Japanese utf8 variants with Japanese documents."
1671 inputenc = get_value(document.header, "\\inputencoding")
1672 if inputenc != "utf8":
1674 lang = get_value(document.header, "\\language")
1675 if lang == "japanese":
1676 document.set_parameter("inputencoding", "utf8-platex")
1677 if lang == "japanese-cjk":
1678 document.set_parameter("inputencoding", "utf8-cjk")
1681 def revert_lineno(document):
1682 " Replace lineno setting with user-preamble code."
1684 options = get_quoted_value(document.header, "\\lineno_options",
1686 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1689 options = "[" + options + "]"
1690 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1693 def convert_lineno(document):
1694 " Replace user-preamble code with native lineno support."
1697 i = find_token(document.preamble, "\\linenumbers", 1)
1699 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1702 options = usepkg.group(1).strip("[]")
1703 del(document.preamble[i-1:i+1])
1704 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1706 k = find_token(document.header, "\\index ")
1708 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1710 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1711 "\\lineno_options %s" % options]
1714 def revert_new_languages(document):
1715 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1716 and Russian (Petrine orthography)."""
1718 # lyxname: (babelname, polyglossianame)
1719 new_languages = {"azerbaijani": ("azerbaijani", ""),
1720 "bengali": ("", "bengali"),
1721 "churchslavonic": ("", "churchslavonic"),
1722 "oldrussian": ("", "russian"),
1723 "korean": ("", "korean"),
1725 used_languages = set()
1726 if document.language in new_languages:
1727 used_languages.add(document.language)
1730 i = find_token(document.body, "\\lang", i+1)
1733 if document.body[i][6:].strip() in new_languages:
1734 used_languages.add(document.language)
1736 # Korean is already supported via CJK, so leave as-is for Babel
1737 if ("korean" in used_languages
1738 and get_bool_value(document.header, "\\use_non_tex_fonts")
1739 and get_value(document.header, "\\language_package") in ("default", "auto")):
1740 revert_language(document, "korean", "", "korean")
1741 used_languages.discard("korean")
1743 for lang in used_languages:
1744 revert(lang, *new_languages[lang])
1748 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1749 r'InsetLayout Flex:Glosse',
1751 r' LabelString "Gloss (old version)"',
1752 r' MenuString "Gloss (old version)"',
1753 r' LatexType environment',
1754 r' LatexName linggloss',
1755 r' Decoration minimalistic',
1760 r' CustomPars false',
1761 r' ForcePlain true',
1762 r' ParbreakIsNewline true',
1763 r' FreeSpacing true',
1764 r' Requires covington',
1767 r' \@ifundefined{linggloss}{%',
1768 r' \newenvironment{linggloss}[2][]{',
1769 r' \def\glosstr{\glt #1}%',
1771 r' {\glosstr\glend}}{}',
1774 r' ResetsFont true',
1776 r' Decoration conglomerate',
1777 r' LabelString "Translation"',
1778 r' MenuString "Glosse Translation|s"',
1779 r' Tooltip "Add a translation for the glosse"',
1784 glosss_inset_def = [
1785 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1786 r'InsetLayout Flex:Tri-Glosse',
1788 r' LabelString "Tri-Gloss (old version)"',
1789 r' MenuString "Tri-Gloss (old version)"',
1790 r' LatexType environment',
1791 r' LatexName lingglosss',
1792 r' Decoration minimalistic',
1797 r' CustomPars false',
1798 r' ForcePlain true',
1799 r' ParbreakIsNewline true',
1800 r' FreeSpacing true',
1802 r' Requires covington',
1805 r' \@ifundefined{lingglosss}{%',
1806 r' \newenvironment{lingglosss}[2][]{',
1807 r' \def\glosstr{\glt #1}%',
1809 r' {\glosstr\glend}}{}',
1811 r' ResetsFont true',
1813 r' Decoration conglomerate',
1814 r' LabelString "Translation"',
1815 r' MenuString "Glosse Translation|s"',
1816 r' Tooltip "Add a translation for the glosse"',
1821 def convert_linggloss(document):
1822 " Move old ling glosses to local layout "
1823 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1824 document.append_local_layout(gloss_inset_def)
1825 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1826 document.append_local_layout(glosss_inset_def)
1828 def revert_linggloss(document):
1829 " Revert to old ling gloss definitions "
1830 if not "linguistics" in document.get_module_list():
1832 document.del_local_layout(gloss_inset_def)
1833 document.del_local_layout(glosss_inset_def)
1836 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1837 for glosse in glosses:
1840 i = find_token(document.body, glosse, i+1)
1843 j = find_end_of_inset(document.body, i)
1845 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1848 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1849 endarg = find_end_of_inset(document.body, arg)
1852 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1853 if argbeginPlain == -1:
1854 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1856 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1857 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
1859 # remove Arg insets and paragraph, if it only contains this inset
1860 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1861 del document.body[arg - 1 : endarg + 4]
1863 del document.body[arg : endarg + 1]
1865 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
1866 endarg = find_end_of_inset(document.body, arg)
1869 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1870 if argbeginPlain == -1:
1871 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
1873 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1874 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
1876 # remove Arg insets and paragraph, if it only contains this inset
1877 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1878 del document.body[arg - 1 : endarg + 4]
1880 del document.body[arg : endarg + 1]
1882 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
1883 endarg = find_end_of_inset(document.body, arg)
1886 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1887 if argbeginPlain == -1:
1888 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
1890 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1891 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
1893 # remove Arg insets and paragraph, if it only contains this inset
1894 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1895 del document.body[arg - 1 : endarg + 4]
1897 del document.body[arg : endarg + 1]
1899 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
1900 endarg = find_end_of_inset(document.body, arg)
1903 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1904 if argbeginPlain == -1:
1905 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
1907 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1908 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
1910 # remove Arg insets and paragraph, if it only contains this inset
1911 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1912 del document.body[arg - 1 : endarg + 4]
1914 del document.body[arg : endarg + 1]
1917 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
1920 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1921 endInset = find_end_of_inset(document.body, i)
1922 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
1923 precontent = put_cmd_in_ert(cmd)
1924 if len(optargcontent) > 0:
1925 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
1926 precontent += put_cmd_in_ert("{")
1928 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
1929 if cmd == "\\trigloss":
1930 postcontent += put_cmd_in_ert("}{") + marg3content
1931 postcontent += put_cmd_in_ert("}")
1933 document.body[endPlain:endInset + 1] = postcontent
1934 document.body[beginPlain + 1:beginPlain] = precontent
1935 del document.body[i : beginPlain + 1]
1937 document.append_local_layout("Requires covington")
1942 def revert_subexarg(document):
1943 " Revert linguistic subexamples with argument to ERT "
1945 if not "linguistics" in document.get_module_list():
1951 i = find_token(document.body, "\\begin_layout Subexample", i+1)
1954 j = find_end_of_layout(document.body, i)
1956 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1959 # check for consecutive layouts
1960 k = find_token(document.body, "\\begin_layout", j)
1961 if k == -1 or document.body[k] != "\\begin_layout Subexample":
1963 j = find_end_of_layout(document.body, k)
1965 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1968 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1972 endarg = find_end_of_inset(document.body, arg)
1974 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1975 if argbeginPlain == -1:
1976 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1978 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1979 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
1981 # remove Arg insets and paragraph, if it only contains this inset
1982 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1983 del document.body[arg - 1 : endarg + 4]
1985 del document.body[arg : endarg + 1]
1987 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
1989 # re-find end of layout
1990 j = find_end_of_layout(document.body, i)
1992 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1995 # check for consecutive layouts
1996 k = find_token(document.body, "\\begin_layout", j)
1997 if k == -1 or document.body[k] != "\\begin_layout Subexample":
1999 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2000 j = find_end_of_layout(document.body, k)
2002 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2005 endev = put_cmd_in_ert("\\end{subexamples}")
2007 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2008 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2009 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2011 document.append_local_layout("Requires covington")
2015 def revert_drs(document):
2016 " Revert DRS insets (linguistics) to ERT "
2018 if not "linguistics" in document.get_module_list():
2022 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2023 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2024 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2025 "\\begin_inset Flex SDRS"]
2029 i = find_token(document.body, drs, i+1)
2032 j = find_end_of_inset(document.body, i)
2034 document.warning("Malformed LyX document: Can't find end of DRS inset")
2037 # Check for arguments
2038 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2039 endarg = find_end_of_inset(document.body, arg)
2042 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2043 if argbeginPlain == -1:
2044 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2046 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2047 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2049 # remove Arg insets and paragraph, if it only contains this inset
2050 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2051 del document.body[arg - 1 : endarg + 4]
2053 del document.body[arg : endarg + 1]
2056 j = find_end_of_inset(document.body, i)
2058 document.warning("Malformed LyX document: Can't find end of DRS inset")
2061 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2062 endarg = find_end_of_inset(document.body, arg)
2065 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2066 if argbeginPlain == -1:
2067 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2069 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2070 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2072 # remove Arg insets and paragraph, if it only contains this inset
2073 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2074 del document.body[arg - 1 : endarg + 4]
2076 del document.body[arg : endarg + 1]
2079 j = find_end_of_inset(document.body, i)
2081 document.warning("Malformed LyX document: Can't find end of DRS inset")
2084 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2085 endarg = find_end_of_inset(document.body, arg)
2086 postarg1content = []
2088 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2089 if argbeginPlain == -1:
2090 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2092 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2093 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2095 # remove Arg insets and paragraph, if it only contains this inset
2096 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2097 del document.body[arg - 1 : endarg + 4]
2099 del document.body[arg : endarg + 1]
2102 j = find_end_of_inset(document.body, i)
2104 document.warning("Malformed LyX document: Can't find end of DRS inset")
2107 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2108 endarg = find_end_of_inset(document.body, arg)
2109 postarg2content = []
2111 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2112 if argbeginPlain == -1:
2113 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2115 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2116 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2118 # remove Arg insets and paragraph, if it only contains this inset
2119 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2120 del document.body[arg - 1 : endarg + 4]
2122 del document.body[arg : endarg + 1]
2125 j = find_end_of_inset(document.body, i)
2127 document.warning("Malformed LyX document: Can't find end of DRS inset")
2130 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2131 endarg = find_end_of_inset(document.body, arg)
2132 postarg3content = []
2134 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2135 if argbeginPlain == -1:
2136 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2138 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2139 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2141 # remove Arg insets and paragraph, if it only contains this inset
2142 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2143 del document.body[arg - 1 : endarg + 4]
2145 del document.body[arg : endarg + 1]
2148 j = find_end_of_inset(document.body, i)
2150 document.warning("Malformed LyX document: Can't find end of DRS inset")
2153 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2154 endarg = find_end_of_inset(document.body, arg)
2155 postarg4content = []
2157 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2158 if argbeginPlain == -1:
2159 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2161 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2162 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2164 # remove Arg insets and paragraph, if it only contains this inset
2165 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2166 del document.body[arg - 1 : endarg + 4]
2168 del document.body[arg : endarg + 1]
2170 # The respective LaTeX command
2172 if drs == "\\begin_inset Flex DRS*":
2174 elif drs == "\\begin_inset Flex IfThen-DRS":
2176 elif drs == "\\begin_inset Flex Cond-DRS":
2178 elif drs == "\\begin_inset Flex QDRS":
2180 elif drs == "\\begin_inset Flex NegDRS":
2182 elif drs == "\\begin_inset Flex SDRS":
2185 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2186 endInset = find_end_of_inset(document.body, i)
2187 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2188 precontent = put_cmd_in_ert(cmd)
2189 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2190 if drs == "\\begin_inset Flex SDRS":
2191 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2192 precontent += put_cmd_in_ert("{")
2195 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2196 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2197 if cmd == "\\condrs" or cmd == "\\qdrs":
2198 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2200 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2202 postcontent = put_cmd_in_ert("}")
2204 document.body[endPlain:endInset + 1] = postcontent
2205 document.body[beginPlain + 1:beginPlain] = precontent
2206 del document.body[i : beginPlain + 1]
2208 document.append_local_layout("Provides covington 1")
2209 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2215 def revert_babelfont(document):
2216 " Reverts the use of \\babelfont to user preamble "
2218 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2220 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2222 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2224 i = find_token(document.header, '\\language_package', 0)
2226 document.warning("Malformed LyX document: Missing \\language_package.")
2228 if get_value(document.header, "\\language_package", 0) != "babel":
2231 # check font settings
2233 roman = sans = typew = "default"
2235 sf_scale = tt_scale = 100.0
2237 j = find_token(document.header, "\\font_roman", 0)
2239 document.warning("Malformed LyX document: Missing \\font_roman.")
2241 # We need to use this regex since split() does not handle quote protection
2242 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2243 roman = romanfont[2].strip('"')
2244 romanfont[2] = '"default"'
2245 document.header[j] = " ".join(romanfont)
2247 j = find_token(document.header, "\\font_sans", 0)
2249 document.warning("Malformed LyX document: Missing \\font_sans.")
2251 # We need to use this regex since split() does not handle quote protection
2252 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2253 sans = sansfont[2].strip('"')
2254 sansfont[2] = '"default"'
2255 document.header[j] = " ".join(sansfont)
2257 j = find_token(document.header, "\\font_typewriter", 0)
2259 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2261 # We need to use this regex since split() does not handle quote protection
2262 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2263 typew = ttfont[2].strip('"')
2264 ttfont[2] = '"default"'
2265 document.header[j] = " ".join(ttfont)
2267 i = find_token(document.header, "\\font_osf", 0)
2269 document.warning("Malformed LyX document: Missing \\font_osf.")
2271 osf = str2bool(get_value(document.header, "\\font_osf", i))
2273 j = find_token(document.header, "\\font_sf_scale", 0)
2275 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2277 sfscale = document.header[j].split()
2280 document.header[j] = " ".join(sfscale)
2283 sf_scale = float(val)
2285 document.warning("Invalid font_sf_scale value: " + val)
2287 j = find_token(document.header, "\\font_tt_scale", 0)
2289 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2291 ttscale = document.header[j].split()
2294 document.header[j] = " ".join(ttscale)
2297 tt_scale = float(val)
2299 document.warning("Invalid font_tt_scale value: " + val)
2301 # set preamble stuff
2302 pretext = ['%% This document must be processed with xelatex or lualatex!']
2303 pretext.append('\\AtBeginDocument{%')
2304 if roman != "default":
2305 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2306 if sans != "default":
2307 sf = '\\babelfont{sf}['
2308 if sf_scale != 100.0:
2309 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2310 sf += 'Mapping=tex-text]{' + sans + '}'
2312 if typew != "default":
2313 tw = '\\babelfont{tt}'
2314 if tt_scale != 100.0:
2315 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2316 tw += '{' + typew + '}'
2319 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2321 insert_to_preamble(document, pretext)
2324 def revert_minionpro(document):
2325 " Revert native MinionPro font definition (with extra options) to LaTeX "
2327 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2329 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2331 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2334 regexp = re.compile(r'(\\font_roman_opts)')
2335 x = find_re(document.header, regexp, 0)
2339 # We need to use this regex since split() does not handle quote protection
2340 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2341 opts = romanopts[1].strip('"')
2343 i = find_token(document.header, "\\font_roman", 0)
2345 document.warning("Malformed LyX document: Missing \\font_roman.")
2348 # We need to use this regex since split() does not handle quote protection
2349 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2350 roman = romanfont[1].strip('"')
2351 if roman != "minionpro":
2353 romanfont[1] = '"default"'
2354 document.header[i] = " ".join(romanfont)
2356 j = find_token(document.header, "\\font_osf true", 0)
2359 preamble = "\\usepackage["
2361 document.header[j] = "\\font_osf false"
2365 preamble += "]{MinionPro}"
2366 add_to_preamble(document, [preamble])
2367 del document.header[x]
2370 def revert_font_opts(document):
2371 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2373 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2375 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2377 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2378 i = find_token(document.header, '\\language_package', 0)
2380 document.warning("Malformed LyX document: Missing \\language_package.")
2382 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2385 regexp = re.compile(r'(\\font_roman_opts)')
2386 i = find_re(document.header, regexp, 0)
2388 # We need to use this regex since split() does not handle quote protection
2389 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2390 opts = romanopts[1].strip('"')
2391 del document.header[i]
2393 regexp = re.compile(r'(\\font_roman)')
2394 i = find_re(document.header, regexp, 0)
2396 # We need to use this regex since split() does not handle quote protection
2397 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2398 font = romanfont[2].strip('"')
2399 romanfont[2] = '"default"'
2400 document.header[i] = " ".join(romanfont)
2401 if font != "default":
2403 preamble = "\\babelfont{rm}["
2405 preamble = "\\setmainfont["
2408 preamble += "Mapping=tex-text]{"
2411 add_to_preamble(document, [preamble])
2414 regexp = re.compile(r'(\\font_sans_opts)')
2415 i = find_re(document.header, regexp, 0)
2418 # We need to use this regex since split() does not handle quote protection
2419 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2420 opts = sfopts[1].strip('"')
2421 del document.header[i]
2423 regexp = re.compile(r'(\\font_sf_scale)')
2424 i = find_re(document.header, regexp, 0)
2426 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2427 regexp = re.compile(r'(\\font_sans)')
2428 i = find_re(document.header, regexp, 0)
2430 # We need to use this regex since split() does not handle quote protection
2431 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2432 font = sffont[2].strip('"')
2433 sffont[2] = '"default"'
2434 document.header[i] = " ".join(sffont)
2435 if font != "default":
2437 preamble = "\\babelfont{sf}["
2439 preamble = "\\setsansfont["
2443 preamble += "Scale=0."
2444 preamble += scaleval
2446 preamble += "Mapping=tex-text]{"
2449 add_to_preamble(document, [preamble])
2452 regexp = re.compile(r'(\\font_typewriter_opts)')
2453 i = find_re(document.header, regexp, 0)
2456 # We need to use this regex since split() does not handle quote protection
2457 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2458 opts = ttopts[1].strip('"')
2459 del document.header[i]
2461 regexp = re.compile(r'(\\font_tt_scale)')
2462 i = find_re(document.header, regexp, 0)
2464 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2465 regexp = re.compile(r'(\\font_typewriter)')
2466 i = find_re(document.header, regexp, 0)
2468 # We need to use this regex since split() does not handle quote protection
2469 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2470 font = ttfont[2].strip('"')
2471 ttfont[2] = '"default"'
2472 document.header[i] = " ".join(ttfont)
2473 if font != "default":
2475 preamble = "\\babelfont{tt}["
2477 preamble = "\\setmonofont["
2481 preamble += "Scale=0."
2482 preamble += scaleval
2484 preamble += "Mapping=tex-text]{"
2487 add_to_preamble(document, [preamble])
2490 def revert_plainNotoFonts_xopts(document):
2491 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2493 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2495 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2497 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2501 y = find_token(document.header, "\\font_osf true", 0)
2505 regexp = re.compile(r'(\\font_roman_opts)')
2506 x = find_re(document.header, regexp, 0)
2507 if x == -1 and not osf:
2512 # We need to use this regex since split() does not handle quote protection
2513 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2514 opts = romanopts[1].strip('"')
2520 i = find_token(document.header, "\\font_roman", 0)
2524 # We need to use this regex since split() does not handle quote protection
2525 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2526 roman = romanfont[1].strip('"')
2527 if roman != "NotoSerif-TLF":
2530 j = find_token(document.header, "\\font_sans", 0)
2534 # We need to use this regex since split() does not handle quote protection
2535 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2536 sf = sffont[1].strip('"')
2540 j = find_token(document.header, "\\font_typewriter", 0)
2544 # We need to use this regex since split() does not handle quote protection
2545 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2546 tt = ttfont[1].strip('"')
2550 # So we have noto as "complete font"
2551 romanfont[1] = '"default"'
2552 document.header[i] = " ".join(romanfont)
2554 preamble = "\\usepackage["
2556 preamble += "]{noto}"
2557 add_to_preamble(document, [preamble])
2559 document.header[y] = "\\font_osf false"
2561 del document.header[x]
2564 def revert_notoFonts_xopts(document):
2565 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2567 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2569 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2571 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2575 fm = createFontMapping(['Noto'])
2576 if revert_fonts(document, fm, fontmap, True):
2577 add_preamble_fonts(document, fontmap)
2580 def revert_IBMFonts_xopts(document):
2581 " Revert native IBM font definition (with extra options) to LaTeX "
2584 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2586 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2588 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2592 fm = createFontMapping(['IBM'])
2594 if revert_fonts(document, fm, fontmap, True):
2595 add_preamble_fonts(document, fontmap)
2598 def revert_AdobeFonts_xopts(document):
2599 " Revert native Adobe font definition (with extra options) to LaTeX "
2601 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2603 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2605 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2609 fm = createFontMapping(['Adobe'])
2611 if revert_fonts(document, fm, fontmap, True):
2612 add_preamble_fonts(document, fontmap)
2619 supported_versions = ["2.4.0", "2.4"]
2621 [545, [convert_lst_literalparam]],
2626 [550, [convert_fontenc]],
2633 [557, [convert_vcsinfo]],
2634 [558, [removeFrontMatterStyles]],
2637 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
2641 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
2642 [566, [convert_hebrew_parentheses]],
2648 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
2649 [573, [convert_inputencoding_namechange]],
2650 [574, [convert_ruby_module, convert_utf8_japanese]],
2651 [575, [convert_lineno]],
2653 [577, [convert_linggloss]],
2659 revert = [[579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
2660 [578, [revert_babelfont]],
2661 [577, [revert_drs]],
2662 [576, [revert_linggloss, revert_subexarg]],
2663 [575, [revert_new_languages]],
2664 [574, [revert_lineno]],
2665 [573, [revert_ruby_module, revert_utf8_japanese]],
2666 [572, [revert_inputencoding_namechange]],
2667 [571, [revert_notoFonts]],
2668 [570, [revert_cmidruletrimming]],
2669 [569, [revert_bibfileencodings]],
2670 [568, [revert_tablestyle]],
2671 [567, [revert_soul]],
2672 [566, [revert_malayalam]],
2673 [565, [revert_hebrew_parentheses]],
2674 [564, [revert_AdobeFonts]],
2675 [563, [revert_lformatinfo]],
2676 [562, [revert_listpargs]],
2677 [561, [revert_l7ninfo]],
2678 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
2679 [559, [revert_timeinfo, revert_namenoextinfo]],
2680 [558, [revert_dateinfo]],
2681 [557, [addFrontMatterStyles]],
2682 [556, [revert_vcsinfo]],
2683 [555, [revert_bibencoding]],
2684 [554, [revert_vcolumns]],
2685 [553, [revert_stretchcolumn]],
2686 [552, [revert_tuftecite]],
2687 [551, [revert_floatpclass, revert_floatalignment]],
2688 [550, [revert_nospellcheck]],
2689 [549, [revert_fontenc]],
2690 [548, []],# dummy format change
2691 [547, [revert_lscape]],
2692 [546, [revert_xcharter]],
2693 [545, [revert_paratype]],
2694 [544, [revert_lst_literalparam]]
2698 if __name__ == "__main__":