1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
78 self.pkgkey = createkey(self.package, self.options)
82 self.font2pkgmap = dict()
83 self.pkg2fontmap = dict()
84 self.pkginmap = dict() # defines, if a map for package exists
86 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None):
87 " Expand fontinfo mapping"
89 # fontlist: list of fontnames, each element
90 # may contain a ','-separated list of needed options
91 # like e.g. 'IBMPlexSansCondensed,condensed'
92 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
93 # scale_type: one of None, 'sf', 'tt'
94 # pkg: package defining the font. Defaults to fontname if None
95 # scaleopt: one of None, 'scale', 'scaled', or some other string
96 # to be used in scale option (e.g. scaled=0.7)
97 # osfopt: None or some other string to be used in osf option
100 fe.fonttype = font_type
101 fe.scaletype = scale_type
104 fe.fontname = font_name
106 fe.scaleopt = scaleopt
109 fe.package = font_name
113 self.font2pkgmap[font_name] = fe
114 if fe.pkgkey in self.pkg2fontmap:
115 # Repeated the same entry? Check content
116 if self.pkg2fontmap[fe.pkgkey] != font_name:
117 document.error("Something is wrong in pkgname+options <-> fontname mapping")
118 self.pkg2fontmap[fe.pkgkey] = font_name
119 self.pkginmap[fe.package] = 1
121 def getfontname(self, pkg, options):
123 pkgkey = createkey(pkg, options)
124 if not pkgkey in self.pkg2fontmap:
126 fontname = self.pkg2fontmap[pkgkey]
127 if not fontname in self.font2pkgmap:
128 document.error("Something is wrong in pkgname+options <-> fontname mapping")
130 if pkgkey == self.font2pkgmap[fontname].pkgkey:
134 def createFontMapping(fontlist):
135 # Create info for known fonts for the use in
136 # convert_latexFonts() and
137 # revert_latexFonts()
139 # * Would be more handy to parse latexFonts file,
140 # but the path to this file is unknown
141 # * For now, add DejaVu and IBMPlex only.
142 # * Expand, if desired
144 for font in fontlist:
146 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
147 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
148 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
150 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
151 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
152 'IBMPlexSerifSemibold,semibold'],
153 "roman", None, "plex-serif")
154 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
155 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
156 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
157 "sans", "sf", "plex-sans", "scale")
158 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
159 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
160 'IBMPlexMonoSemibold,semibold'],
161 "typewriter", "tt", "plex-mono", "scale")
162 elif font == 'Adobe':
163 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
164 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
165 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
167 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
168 'NotoSerifThin,thin', 'NotoSerifLight,light',
169 'NotoSerifExtralight,extralight'],
170 "roman", None, "noto-serif", None, "osf")
171 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
172 'NotoSansThin,thin', 'NotoSansLight,light',
173 'NotoSansExtralight,extralight'],
174 "sans", "sf", "noto-sans", "scaled")
175 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
176 elif font == 'Cantarell':
177 fm.expandFontMapping(['cantarell,defaultsans'],
178 "sans", "sf", "cantarell", "scaled", "oldstyle")
181 def convert_fonts(document, fm):
182 " Handle font definition (LaTeX preamble -> native) "
184 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
185 rscaleopt = re.compile(r'^scaled?=(.*)')
188 while i < len(document.preamble):
189 i = find_re(document.preamble, rpkg, i+1)
192 mo = rpkg.search(document.preamble[i])
193 if mo == None or mo.group(2) == None:
196 options = mo.group(2).replace(' ', '').split(",")
202 while o < len(options):
203 if options[o] == osfoption:
207 mo = rscaleopt.search(options[o])
215 if not pkg in fm.pkginmap:
218 fn = fm.getfontname(pkg, options)
221 del document.preamble[i]
222 fontinfo = fm.font2pkgmap[fn]
223 if fontinfo.scaletype == None:
226 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
227 fontinfo.scaleval = oscale
229 if fontinfo.osfopt == None:
230 options.extend("osf")
232 osf = find_token(document.header, "\\font_osf false")
234 document.header[osf] = "\\font_osf true"
235 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
236 del document.preamble[i-1]
238 if fontscale != None:
239 j = find_token(document.header, fontscale, 0)
241 val = get_value(document.header, fontscale, j)
245 scale = "%03d" % int(float(oscale) * 100)
246 document.header[j] = fontscale + " " + scale + " " + vals[1]
247 ft = "\\font_" + fontinfo.fonttype
248 j = find_token(document.header, ft, 0)
250 val = get_value(document.header, ft, j)
251 words = val.split() # ! splits also values like '"DejaVu Sans"'
252 words[0] = '"' + fn + '"'
253 document.header[j] = ft + ' ' + ' '.join(words)
255 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
256 " Revert native font definition to LaTeX "
257 # fonlist := list of fonts created from the same package
258 # Empty package means that the font-name is the same as the package-name
259 # fontmap (key = package, val += found options) will be filled
260 # and used later in add_preamble_fonts() to be added to user-preamble
262 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
263 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
265 while i < len(document.header):
266 i = find_re(document.header, rfontscale, i+1)
269 mo = rfontscale.search(document.header[i])
272 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
273 val = get_value(document.header, ft, i)
274 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
275 font = words[0].strip('"') # TeX font name has no whitespace
276 if not font in fm.font2pkgmap:
278 fontinfo = fm.font2pkgmap[font]
279 val = fontinfo.package
280 if not val in fontmap:
283 if OnlyWithXOpts or WithXOpts:
284 if ft == "\\font_math":
286 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
287 if ft == "\\font_sans":
288 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
289 elif ft == "\\font_typewriter":
290 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
291 x = find_re(document.header, regexp, 0)
292 if x == -1 and OnlyWithXOpts:
296 # We need to use this regex since split() does not handle quote protection
297 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
298 opts = xopts[1].strip('"').split(",")
299 fontmap[val].extend(opts)
300 del document.header[x]
301 words[0] = '"default"'
302 document.header[i] = ft + ' ' + ' '.join(words)
303 if fontinfo.scaleopt != None:
304 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
305 mo = rscales.search(xval)
310 # set correct scale option
311 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
312 if fontinfo.osfopt != None:
313 osf = find_token(document.header, "\\font_osf true")
314 if osf == -1 and ft != "\\font_math":
315 # Try with newer format
316 osftag = "\\font_roman_osf true"
317 if ft == "\\font_sans":
318 osftag = "\\font_sans_osf true"
319 elif ft == "\\font_typewriter":
320 osftag = "\\font_typewriter_osf true"
321 osf = find_token(document.header, osftag)
323 fontmap[val].extend([fontinfo.osfopt])
324 if len(fontinfo.options) > 0:
325 fontmap[val].extend(fontinfo.options)
328 ###############################################################################
330 ### Conversion and reversion routines
332 ###############################################################################
334 def convert_inputencoding_namechange(document):
335 " Rename inputencoding settings. "
336 i = find_token(document.header, "\\inputencoding", 0)
339 s = document.header[i].replace("auto", "auto-legacy")
340 document.header[i] = s.replace("default", "auto-legacy-plain")
342 def revert_inputencoding_namechange(document):
343 " Rename inputencoding settings. "
344 i = find_token(document.header, "\\inputencoding", 0)
347 s = document.header[i].replace("auto-legacy-plain", "default")
348 document.header[i] = s.replace("auto-legacy", "auto")
350 def convert_notoFonts(document):
351 " Handle Noto fonts definition to LaTeX "
353 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
354 fm = createFontMapping(['Noto'])
355 convert_fonts(document, fm)
357 def revert_notoFonts(document):
358 " Revert native Noto font definition to LaTeX "
360 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
362 fm = createFontMapping(['Noto'])
363 if revert_fonts(document, fm, fontmap):
364 add_preamble_fonts(document, fontmap)
366 def convert_latexFonts(document):
367 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
369 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
370 fm = createFontMapping(['DejaVu', 'IBM'])
371 convert_fonts(document, fm)
373 def revert_latexFonts(document):
374 " Revert native DejaVu font definition to LaTeX "
376 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
378 fm = createFontMapping(['DejaVu', 'IBM'])
379 if revert_fonts(document, fm, fontmap):
380 add_preamble_fonts(document, fontmap)
382 def convert_AdobeFonts(document):
383 " Handle Adobe Source fonts definition to LaTeX "
385 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
386 fm = createFontMapping(['Adobe'])
387 convert_fonts(document, fm)
389 def revert_AdobeFonts(document):
390 " Revert Adobe Source font definition to LaTeX "
392 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
394 fm = createFontMapping(['Adobe'])
395 if revert_fonts(document, fm, fontmap):
396 add_preamble_fonts(document, fontmap)
398 def removeFrontMatterStyles(document):
399 " Remove styles Begin/EndFrontmatter"
401 layouts = ['BeginFrontmatter', 'EndFrontmatter']
402 tokenend = len('\\begin_layout ')
405 i = find_token_exact(document.body, '\\begin_layout ', i+1)
408 layout = document.body[i][tokenend:].strip()
409 if layout not in layouts:
411 j = find_end_of_layout(document.body, i)
413 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
415 while document.body[j+1].strip() == '':
417 document.body[i:j+1] = []
419 def addFrontMatterStyles(document):
420 " Use styles Begin/EndFrontmatter for elsarticle"
422 if document.textclass != "elsarticle":
425 def insertFrontmatter(prefix, line):
427 while above > 0 and document.body[above-1].strip() == '':
430 while document.body[below].strip() == '':
432 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
433 '\\begin_inset Note Note',
435 '\\begin_layout Plain Layout',
438 '\\end_inset', '', '',
441 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
442 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
443 tokenend = len('\\begin_layout ')
447 i = find_token_exact(document.body, '\\begin_layout ', i+1)
450 layout = document.body[i][tokenend:].strip()
451 if layout not in layouts:
453 k = find_end_of_layout(document.body, i)
455 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
462 insertFrontmatter('End', k+1)
463 insertFrontmatter('Begin', first)
466 def convert_lst_literalparam(document):
467 " Add param literal to include inset "
471 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
474 j = find_end_of_inset(document.body, i)
476 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
478 while i < j and document.body[i].strip() != '':
480 document.body.insert(i, 'literal "true"')
483 def revert_lst_literalparam(document):
484 " Remove param literal from include inset "
488 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
491 j = find_end_of_inset(document.body, i)
493 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
495 del_token(document.body, 'literal', i, j)
498 def revert_paratype(document):
499 " Revert ParaType font definitions to LaTeX "
501 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
503 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
504 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
505 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
506 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
509 sfval = find_token(document.header, "\\font_sf_scale", 0)
511 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
513 sfscale = document.header[sfval].split()
516 document.header[sfval] = " ".join(sfscale)
519 sf_scale = float(val)
521 document.warning("Invalid font_sf_scale value: " + val)
524 if sf_scale != "100.0":
525 sfoption = "scaled=" + str(sf_scale / 100.0)
526 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
527 ttval = get_value(document.header, "\\font_tt_scale", 0)
532 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
533 if i1 != -1 and i2 != -1 and i3!= -1:
534 add_to_preamble(document, ["\\usepackage{paratype}"])
537 add_to_preamble(document, ["\\usepackage{PTSerif}"])
538 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
541 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
543 add_to_preamble(document, ["\\usepackage{PTSans}"])
544 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
547 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
549 add_to_preamble(document, ["\\usepackage{PTMono}"])
550 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
553 def revert_xcharter(document):
554 " Revert XCharter font definitions to LaTeX "
556 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
560 # replace unsupported font setting
561 document.header[i] = document.header[i].replace("xcharter", "default")
562 # no need for preamble code with system fonts
563 if get_bool_value(document.header, "\\use_non_tex_fonts"):
566 # transfer old style figures setting to package options
567 j = find_token(document.header, "\\font_osf true")
570 document.header[j] = "\\font_osf false"
574 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
577 def revert_lscape(document):
578 " Reverts the landscape environment (Landscape module) to TeX-code "
580 if not "landscape" in document.get_module_list():
585 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
588 j = find_end_of_inset(document.body, i)
590 document.warning("Malformed LyX document: Can't find end of Landscape inset")
593 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
594 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
595 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
596 add_to_preamble(document, ["\\usepackage{afterpage}"])
598 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
599 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
601 add_to_preamble(document, ["\\usepackage{pdflscape}"])
604 def convert_fontenc(document):
605 " Convert default fontenc setting "
607 i = find_token(document.header, "\\fontencoding global", 0)
611 document.header[i] = document.header[i].replace("global", "auto")
614 def revert_fontenc(document):
615 " Revert default fontenc setting "
617 i = find_token(document.header, "\\fontencoding auto", 0)
621 document.header[i] = document.header[i].replace("auto", "global")
624 def revert_nospellcheck(document):
625 " Remove nospellcheck font info param "
629 i = find_token(document.body, '\\nospellcheck', i)
635 def revert_floatpclass(document):
636 " Remove float placement params 'document' and 'class' "
638 del_token(document.header, "\\float_placement class")
642 i = find_token(document.body, '\\begin_inset Float', i+1)
645 j = find_end_of_inset(document.body, i)
646 k = find_token(document.body, 'placement class', i, i + 2)
648 k = find_token(document.body, 'placement document', i, i + 2)
655 def revert_floatalignment(document):
656 " Remove float alignment params "
658 galignment = get_value(document.header, "\\float_alignment", delete=True)
662 i = find_token(document.body, '\\begin_inset Float', i+1)
665 j = find_end_of_inset(document.body, i)
667 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
669 k = find_token(document.body, 'alignment', i, i+4)
673 alignment = get_value(document.body, "alignment", k)
674 if alignment == "document":
675 alignment = galignment
677 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
679 document.warning("Can't find float layout!")
682 if alignment == "left":
683 alcmd = put_cmd_in_ert("\\raggedright{}")
684 elif alignment == "center":
685 alcmd = put_cmd_in_ert("\\centering{}")
686 elif alignment == "right":
687 alcmd = put_cmd_in_ert("\\raggedleft{}")
689 document.body[l+1:l+1] = alcmd
692 def revert_tuftecite(document):
693 " Revert \cite commands in tufte classes "
695 tufte = ["tufte-book", "tufte-handout"]
696 if document.textclass not in tufte:
701 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
704 j = find_end_of_inset(document.body, i)
706 document.warning("Can't find end of citation inset at line %d!!" %(i))
708 k = find_token(document.body, "LatexCommand", i, j)
710 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
713 cmd = get_value(document.body, "LatexCommand", k)
717 pre = get_quoted_value(document.body, "before", i, j)
718 post = get_quoted_value(document.body, "after", i, j)
719 key = get_quoted_value(document.body, "key", i, j)
721 document.warning("Citation inset at line %d does not have a key!" %(i))
723 # Replace command with ERT
726 res += "[" + pre + "]"
728 res += "[" + post + "]"
731 res += "{" + key + "}"
732 document.body[i:j+1] = put_cmd_in_ert([res])
736 def revert_stretchcolumn(document):
737 " We remove the column varwidth flags or everything else will become a mess. "
740 i = find_token(document.body, "\\begin_inset Tabular", i+1)
743 j = find_end_of_inset(document.body, i+1)
745 document.warning("Malformed LyX document: Could not find end of tabular.")
747 for k in range(i, j):
748 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
749 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
750 document.body[k] = document.body[k].replace(' varwidth="true"', '')
753 def revert_vcolumns(document):
754 " Revert standard columns with line breaks etc. "
760 i = find_token(document.body, "\\begin_inset Tabular", i+1)
763 j = find_end_of_inset(document.body, i)
765 document.warning("Malformed LyX document: Could not find end of tabular.")
768 # Collect necessary column information
770 nrows = int(document.body[i+1].split('"')[3])
771 ncols = int(document.body[i+1].split('"')[5])
773 for k in range(ncols):
774 m = find_token(document.body, "<column", m)
775 width = get_option_value(document.body[m], 'width')
776 varwidth = get_option_value(document.body[m], 'varwidth')
777 alignment = get_option_value(document.body[m], 'alignment')
778 special = get_option_value(document.body[m], 'special')
779 col_info.append([width, varwidth, alignment, special, m])
784 for row in range(nrows):
785 for col in range(ncols):
786 m = find_token(document.body, "<cell", m)
787 multicolumn = get_option_value(document.body[m], 'multicolumn')
788 multirow = get_option_value(document.body[m], 'multirow')
789 width = get_option_value(document.body[m], 'width')
790 rotate = get_option_value(document.body[m], 'rotate')
791 # Check for: linebreaks, multipars, non-standard environments
793 endcell = find_token(document.body, "</cell>", begcell)
795 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
797 elif count_pars_in_inset(document.body, begcell + 2) > 1:
799 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
801 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
802 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
804 alignment = col_info[col][2]
805 col_line = col_info[col][4]
807 if alignment == "center":
808 vval = ">{\\centering}"
809 elif alignment == "left":
810 vval = ">{\\raggedright}"
811 elif alignment == "right":
812 vval = ">{\\raggedleft}"
815 vval += "V{\\linewidth}"
817 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
818 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
819 # with newlines, and we do not want that)
821 endcell = find_token(document.body, "</cell>", begcell)
823 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
825 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
829 nle = find_end_of_inset(document.body, nl)
830 del(document.body[nle:nle+1])
832 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
834 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
840 if needarray == True:
841 add_to_preamble(document, ["\\usepackage{array}"])
842 if needvarwidth == True:
843 add_to_preamble(document, ["\\usepackage{varwidth}"])
846 def revert_bibencoding(document):
847 " Revert bibliography encoding "
851 i = find_token(document.header, "\\cite_engine", 0)
853 document.warning("Malformed document! Missing \\cite_engine")
855 engine = get_value(document.header, "\\cite_engine", i)
859 if engine in ["biblatex", "biblatex-natbib"]:
862 # Map lyx to latex encoding names
866 "armscii8" : "armscii8",
867 "iso8859-1" : "latin1",
868 "iso8859-2" : "latin2",
869 "iso8859-3" : "latin3",
870 "iso8859-4" : "latin4",
871 "iso8859-5" : "iso88595",
872 "iso8859-6" : "8859-6",
873 "iso8859-7" : "iso-8859-7",
874 "iso8859-8" : "8859-8",
875 "iso8859-9" : "latin5",
876 "iso8859-13" : "latin7",
877 "iso8859-15" : "latin9",
878 "iso8859-16" : "latin10",
879 "applemac" : "applemac",
881 "cp437de" : "cp437de",
898 "utf8-platex" : "utf8",
905 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
908 j = find_end_of_inset(document.body, i)
910 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
912 encoding = get_quoted_value(document.body, "encoding", i, j)
915 # remove encoding line
916 k = find_token(document.body, "encoding", i, j)
919 if encoding == "default":
921 # Re-find inset end line
922 j = find_end_of_inset(document.body, i)
925 h = find_token(document.header, "\\biblio_options", 0)
927 biblio_options = get_value(document.header, "\\biblio_options", h)
928 if not "bibencoding" in biblio_options:
929 document.header[h] += ",bibencoding=%s" % encodings[encoding]
931 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
933 # this should not happen
934 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
936 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
938 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
939 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
945 def convert_vcsinfo(document):
946 " Separate vcs Info inset from buffer Info inset. "
949 "vcs-revision" : "revision",
950 "vcs-tree-revision" : "tree-revision",
951 "vcs-author" : "author",
957 i = find_token(document.body, "\\begin_inset Info", i+1)
960 j = find_end_of_inset(document.body, i+1)
962 document.warning("Malformed LyX document: Could not find end of Info inset.")
964 tp = find_token(document.body, 'type', i, j)
965 tpv = get_quoted_value(document.body, "type", tp)
968 arg = find_token(document.body, 'arg', i, j)
969 argv = get_quoted_value(document.body, "arg", arg)
970 if argv not in list(types.keys()):
972 document.body[tp] = "type \"vcs\""
973 document.body[arg] = "arg \"" + types[argv] + "\""
976 def revert_vcsinfo(document):
977 " Merge vcs Info inset to buffer Info inset. "
979 args = ["revision", "tree-revision", "author", "time", "date" ]
982 i = find_token(document.body, "\\begin_inset Info", i+1)
985 j = find_end_of_inset(document.body, i+1)
987 document.warning("Malformed LyX document: Could not find end of Info inset.")
989 tp = find_token(document.body, 'type', i, j)
990 tpv = get_quoted_value(document.body, "type", tp)
993 arg = find_token(document.body, 'arg', i, j)
994 argv = get_quoted_value(document.body, "arg", arg)
996 document.warning("Malformed Info inset. Invalid vcs arg.")
998 document.body[tp] = "type \"buffer\""
999 document.body[arg] = "arg \"vcs-" + argv + "\""
1002 def revert_dateinfo(document):
1003 " Revert date info insets to static text. "
1005 # FIXME This currently only considers the main language and uses the system locale
1006 # Ideally, it should honor context languages and switch the locale accordingly.
1008 # The date formats for each language using strftime syntax:
1009 # long, short, loclong, locmedium, locshort
1011 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1012 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1013 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1014 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1015 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1016 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1017 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1018 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1019 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1020 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1021 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1022 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1023 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1024 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1025 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1026 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1027 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1028 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1029 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1030 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1031 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1032 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1033 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1034 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1035 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1036 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1037 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1038 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1039 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1040 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1041 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1042 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1043 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1044 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1045 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1046 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1047 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1048 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1049 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1050 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1051 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1052 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1053 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1054 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1055 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1056 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1057 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1058 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1059 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1060 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1061 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1062 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1063 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1064 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1065 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1066 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1067 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1068 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1069 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1070 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1071 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1072 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1073 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1074 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1075 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1076 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1077 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1078 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1079 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1080 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1081 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1082 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1083 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1084 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1085 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1086 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1087 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1088 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1089 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1090 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1091 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1092 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1093 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1094 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1095 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1096 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1097 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1098 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1099 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1100 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1101 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1102 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1103 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1104 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1105 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1106 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1107 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1108 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1109 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1110 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1111 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1112 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1115 types = ["date", "fixdate", "moddate" ]
1116 lang = get_value(document.header, "\\language")
1118 document.warning("Malformed LyX document! No \\language header found!")
1123 i = find_token(document.body, "\\begin_inset Info", i+1)
1126 j = find_end_of_inset(document.body, i+1)
1128 document.warning("Malformed LyX document: Could not find end of Info inset.")
1130 tp = find_token(document.body, 'type', i, j)
1131 tpv = get_quoted_value(document.body, "type", tp)
1132 if tpv not in types:
1134 arg = find_token(document.body, 'arg', i, j)
1135 argv = get_quoted_value(document.body, "arg", arg)
1138 if tpv == "fixdate":
1139 datecomps = argv.split('@')
1140 if len(datecomps) > 1:
1142 isodate = datecomps[1]
1143 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1145 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1146 # FIXME if we had the path to the original document (not the one in the tmp dir),
1147 # we could use the mtime.
1148 # elif tpv == "moddate":
1149 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1152 result = dte.isodate()
1153 elif argv == "long":
1154 result = dte.strftime(dateformats[lang][0])
1155 elif argv == "short":
1156 result = dte.strftime(dateformats[lang][1])
1157 elif argv == "loclong":
1158 result = dte.strftime(dateformats[lang][2])
1159 elif argv == "locmedium":
1160 result = dte.strftime(dateformats[lang][3])
1161 elif argv == "locshort":
1162 result = dte.strftime(dateformats[lang][4])
1164 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1165 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1166 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1167 fmt = re.sub('[^\'%]d', '%d', fmt)
1168 fmt = fmt.replace("'", "")
1169 result = dte.strftime(fmt)
1170 if sys.version_info < (3,0):
1171 # In Python 2, datetime module works with binary strings,
1172 # our dateformat strings are utf8-encoded:
1173 result = result.decode('utf-8')
1174 document.body[i : j+1] = [result]
1177 def revert_timeinfo(document):
1178 " Revert time info insets to static text. "
1180 # FIXME This currently only considers the main language and uses the system locale
1181 # Ideally, it should honor context languages and switch the locale accordingly.
1182 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1185 # The time formats for each language using strftime syntax:
1188 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1189 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1190 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1191 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1192 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1193 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1194 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1195 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1196 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1197 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1198 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1199 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1200 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1201 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1202 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1203 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1204 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1205 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1206 "british" : ["%H:%M:%S %Z", "%H:%M"],
1207 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1208 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1209 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1210 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1211 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1212 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1213 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1214 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1215 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1216 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1217 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1218 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1219 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1220 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1221 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1222 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1223 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1224 "french" : ["%H:%M:%S %Z", "%H:%M"],
1225 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1226 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1227 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1228 "german" : ["%H:%M:%S %Z", "%H:%M"],
1229 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1230 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1231 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1232 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1233 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1234 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1235 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1236 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1237 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1238 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1239 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1240 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1241 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1242 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1243 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1244 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1245 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1246 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1247 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1248 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1249 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1250 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1251 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1252 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1253 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1254 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1255 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1256 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1257 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1258 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1259 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1260 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1261 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1262 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1263 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1264 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1265 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1266 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1267 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1268 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1269 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1270 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1271 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1272 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1273 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1274 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1275 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1276 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1277 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1278 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1279 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1280 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1281 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1282 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1283 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1284 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1285 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1286 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1287 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1288 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1289 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1292 types = ["time", "fixtime", "modtime" ]
1294 i = find_token(document.header, "\\language", 0)
1296 # this should not happen
1297 document.warning("Malformed LyX document! No \\language header found!")
1299 lang = get_value(document.header, "\\language", i)
1303 i = find_token(document.body, "\\begin_inset Info", i+1)
1306 j = find_end_of_inset(document.body, i+1)
1308 document.warning("Malformed LyX document: Could not find end of Info inset.")
1310 tp = find_token(document.body, 'type', i, j)
1311 tpv = get_quoted_value(document.body, "type", tp)
1312 if tpv not in types:
1314 arg = find_token(document.body, 'arg', i, j)
1315 argv = get_quoted_value(document.body, "arg", arg)
1317 dtme = datetime.now()
1319 if tpv == "fixtime":
1320 timecomps = argv.split('@')
1321 if len(timecomps) > 1:
1323 isotime = timecomps[1]
1324 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1326 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1328 m = re.search('(\d\d):(\d\d)', isotime)
1330 tme = time(int(m.group(1)), int(m.group(2)))
1331 # FIXME if we had the path to the original document (not the one in the tmp dir),
1332 # we could use the mtime.
1333 # elif tpv == "moddate":
1334 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1337 result = tme.isoformat()
1338 elif argv == "long":
1339 result = tme.strftime(timeformats[lang][0])
1340 elif argv == "short":
1341 result = tme.strftime(timeformats[lang][1])
1343 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1344 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1345 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1346 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1347 fmt = fmt.replace("'", "")
1348 result = dte.strftime(fmt)
1349 document.body[i : j+1] = result
1352 def revert_namenoextinfo(document):
1353 " Merge buffer Info inset type name-noext to name. "
1357 i = find_token(document.body, "\\begin_inset Info", i+1)
1360 j = find_end_of_inset(document.body, i+1)
1362 document.warning("Malformed LyX document: Could not find end of Info inset.")
1364 tp = find_token(document.body, 'type', i, j)
1365 tpv = get_quoted_value(document.body, "type", tp)
1368 arg = find_token(document.body, 'arg', i, j)
1369 argv = get_quoted_value(document.body, "arg", arg)
1370 if argv != "name-noext":
1372 document.body[arg] = "arg \"name\""
1375 def revert_l7ninfo(document):
1376 " Revert l7n Info inset to text. "
1380 i = find_token(document.body, "\\begin_inset Info", i+1)
1383 j = find_end_of_inset(document.body, i+1)
1385 document.warning("Malformed LyX document: Could not find end of Info inset.")
1387 tp = find_token(document.body, 'type', i, j)
1388 tpv = get_quoted_value(document.body, "type", tp)
1391 arg = find_token(document.body, 'arg', i, j)
1392 argv = get_quoted_value(document.body, "arg", arg)
1393 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1394 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1395 document.body[i : j+1] = argv
1398 def revert_listpargs(document):
1399 " Reverts listpreamble arguments to TeX-code "
1402 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1405 j = find_end_of_inset(document.body, i)
1406 # Find containing paragraph layout
1407 parent = get_containing_layout(document.body, i)
1409 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1412 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1413 endPlain = find_end_of_layout(document.body, beginPlain)
1414 content = document.body[beginPlain + 1 : endPlain]
1415 del document.body[i:j+1]
1416 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1417 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1418 document.body[parbeg : parbeg] = subst
1421 def revert_lformatinfo(document):
1422 " Revert layout format Info inset to text. "
1426 i = find_token(document.body, "\\begin_inset Info", i+1)
1429 j = find_end_of_inset(document.body, i+1)
1431 document.warning("Malformed LyX document: Could not find end of Info inset.")
1433 tp = find_token(document.body, 'type', i, j)
1434 tpv = get_quoted_value(document.body, "type", tp)
1435 if tpv != "lyxinfo":
1437 arg = find_token(document.body, 'arg', i, j)
1438 argv = get_quoted_value(document.body, "arg", arg)
1439 if argv != "layoutformat":
1442 document.body[i : j+1] = "69"
1445 def convert_hebrew_parentheses(document):
1446 """ Swap opening/closing parentheses in Hebrew text.
1448 Up to LyX 2.4, "(" was used as closing parenthesis and
1449 ")" as opening parenthesis for Hebrew in the LyX source.
1451 # print("convert hebrew parentheses")
1452 current_languages = [document.language]
1453 for i, line in enumerate(document.body):
1454 if line.startswith('\\lang '):
1455 current_languages[-1] = line.lstrip('\\lang ')
1456 elif line.startswith('\\begin_layout'):
1457 current_languages.append(current_languages[-1])
1458 # print (line, current_languages[-1])
1459 elif line.startswith('\\end_layout'):
1460 current_languages.pop()
1461 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1462 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1465 def revert_hebrew_parentheses(document):
1466 " Store parentheses in Hebrew text reversed"
1467 # This only exists to keep the convert/revert naming convention
1468 convert_hebrew_parentheses(document)
1471 def revert_malayalam(document):
1472 " Set the document language to English but assure Malayalam output "
1474 revert_language(document, "malayalam", "", "malayalam")
1477 def revert_soul(document):
1478 " Revert soul module flex insets to ERT "
1480 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1483 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1485 add_to_preamble(document, ["\\usepackage{soul}"])
1487 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1489 add_to_preamble(document, ["\\usepackage{color}"])
1491 revert_flex_inset(document.body, "Spaceletters", "\\so")
1492 revert_flex_inset(document.body, "Strikethrough", "\\st")
1493 revert_flex_inset(document.body, "Underline", "\\ul")
1494 revert_flex_inset(document.body, "Highlight", "\\hl")
1495 revert_flex_inset(document.body, "Capitalize", "\\caps")
1498 def revert_tablestyle(document):
1499 " Remove tablestyle params "
1502 i = find_token(document.header, "\\tablestyle")
1504 del document.header[i]
1507 def revert_bibfileencodings(document):
1508 " Revert individual Biblatex bibliography encodings "
1512 i = find_token(document.header, "\\cite_engine", 0)
1514 document.warning("Malformed document! Missing \\cite_engine")
1516 engine = get_value(document.header, "\\cite_engine", i)
1520 if engine in ["biblatex", "biblatex-natbib"]:
1523 # Map lyx to latex encoding names
1527 "armscii8" : "armscii8",
1528 "iso8859-1" : "latin1",
1529 "iso8859-2" : "latin2",
1530 "iso8859-3" : "latin3",
1531 "iso8859-4" : "latin4",
1532 "iso8859-5" : "iso88595",
1533 "iso8859-6" : "8859-6",
1534 "iso8859-7" : "iso-8859-7",
1535 "iso8859-8" : "8859-8",
1536 "iso8859-9" : "latin5",
1537 "iso8859-13" : "latin7",
1538 "iso8859-15" : "latin9",
1539 "iso8859-16" : "latin10",
1540 "applemac" : "applemac",
1542 "cp437de" : "cp437de",
1550 "cp1250" : "cp1250",
1551 "cp1251" : "cp1251",
1552 "cp1252" : "cp1252",
1553 "cp1255" : "cp1255",
1554 "cp1256" : "cp1256",
1555 "cp1257" : "cp1257",
1556 "koi8-r" : "koi8-r",
1557 "koi8-u" : "koi8-u",
1559 "utf8-platex" : "utf8",
1566 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1569 j = find_end_of_inset(document.body, i)
1571 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1573 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1577 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1578 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1579 if len(bibfiles) == 0:
1580 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1581 # remove encoding line
1582 k = find_token(document.body, "file_encodings", i, j)
1584 del document.body[k]
1585 # Re-find inset end line
1586 j = find_end_of_inset(document.body, i)
1588 enclist = encodings.split("\t")
1591 ppp = pp.split(" ", 1)
1592 encmap[ppp[0]] = ppp[1]
1593 for bib in bibfiles:
1594 pr = "\\addbibresource"
1595 if bib in encmap.keys():
1596 pr += "[bibencoding=" + encmap[bib] + "]"
1597 pr += "{" + bib + "}"
1598 add_to_preamble(document, [pr])
1599 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1600 pcmd = "printbibliography"
1602 pcmd += "[" + opts + "]"
1603 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1604 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1605 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1606 "status open", "", "\\begin_layout Plain Layout" ]
1607 repl += document.body[i:j+1]
1608 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1609 document.body[i:j+1] = repl
1615 def revert_cmidruletrimming(document):
1616 " Remove \\cmidrule trimming "
1618 # FIXME: Revert to TeX code?
1621 # first, let's find out if we need to do anything
1622 i = find_token(document.body, '<cell ', i+1)
1625 j = document.body[i].find('trim="')
1628 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1629 # remove trim option
1630 document.body[i] = rgx.sub('', document.body[i])
1634 r'### Inserted by lyx2lyx (ruby inset) ###',
1635 r'InsetLayout Flex:Ruby',
1636 r' LyxType charstyle',
1637 r' LatexType command',
1641 r' HTMLInnerTag rb',
1642 r' HTMLInnerAttr ""',
1644 r' LabelString "Ruby"',
1645 r' Decoration Conglomerate',
1647 r' \ifdefined\kanjiskip',
1648 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1649 r' \else \ifdefined\luatexversion',
1650 r' \usepackage{luatexja-ruby}',
1651 r' \else \ifdefined\XeTeXversion',
1652 r' \usepackage{ruby}%',
1654 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1656 r' Argument post:1',
1657 r' LabelString "ruby text"',
1658 r' MenuString "Ruby Text|R"',
1659 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1660 r' Decoration Conglomerate',
1672 def convert_ruby_module(document):
1673 " Use ruby module instead of local module definition "
1674 if document.del_local_layout(ruby_inset_def):
1675 document.add_module("ruby")
1677 def revert_ruby_module(document):
1678 " Replace ruby module with local module definition "
1679 if document.del_module("ruby"):
1680 document.append_local_layout(ruby_inset_def)
1683 def convert_utf8_japanese(document):
1684 " Use generic utf8 with Japanese documents."
1685 lang = get_value(document.header, "\\language")
1686 if not lang.startswith("japanese"):
1688 inputenc = get_value(document.header, "\\inputencoding")
1689 if ((lang == "japanese" and inputenc == "utf8-platex")
1690 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1691 document.set_parameter("inputencoding", "utf8")
1693 def revert_utf8_japanese(document):
1694 " Use Japanese utf8 variants with Japanese documents."
1695 inputenc = get_value(document.header, "\\inputencoding")
1696 if inputenc != "utf8":
1698 lang = get_value(document.header, "\\language")
1699 if lang == "japanese":
1700 document.set_parameter("inputencoding", "utf8-platex")
1701 if lang == "japanese-cjk":
1702 document.set_parameter("inputencoding", "utf8-cjk")
1705 def revert_lineno(document):
1706 " Replace lineno setting with user-preamble code."
1708 options = get_quoted_value(document.header, "\\lineno_options",
1710 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1713 options = "[" + options + "]"
1714 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1717 def convert_lineno(document):
1718 " Replace user-preamble code with native lineno support."
1721 i = find_token(document.preamble, "\\linenumbers", 1)
1723 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1726 options = usepkg.group(1).strip("[]")
1727 del(document.preamble[i-1:i+1])
1728 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1730 k = find_token(document.header, "\\index ")
1732 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1734 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1735 "\\lineno_options %s" % options]
1738 def revert_new_languages(document):
1739 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1740 and Russian (Petrine orthography)."""
1742 # lyxname: (babelname, polyglossianame)
1743 new_languages = {"azerbaijani": ("azerbaijani", ""),
1744 "bengali": ("", "bengali"),
1745 "churchslavonic": ("", "churchslavonic"),
1746 "oldrussian": ("", "russian"),
1747 "korean": ("", "korean"),
1749 used_languages = set()
1750 if document.language in new_languages:
1751 used_languages.add(document.language)
1754 i = find_token(document.body, "\\lang", i+1)
1757 if document.body[i][6:].strip() in new_languages:
1758 used_languages.add(document.language)
1760 # Korean is already supported via CJK, so leave as-is for Babel
1761 if ("korean" in used_languages
1762 and get_bool_value(document.header, "\\use_non_tex_fonts")
1763 and get_value(document.header, "\\language_package") in ("default", "auto")):
1764 revert_language(document, "korean", "", "korean")
1765 used_languages.discard("korean")
1767 for lang in used_languages:
1768 revert(lang, *new_languages[lang])
1772 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1773 r'InsetLayout Flex:Glosse',
1775 r' LabelString "Gloss (old version)"',
1776 r' MenuString "Gloss (old version)"',
1777 r' LatexType environment',
1778 r' LatexName linggloss',
1779 r' Decoration minimalistic',
1784 r' CustomPars false',
1785 r' ForcePlain true',
1786 r' ParbreakIsNewline true',
1787 r' FreeSpacing true',
1788 r' Requires covington',
1791 r' \@ifundefined{linggloss}{%',
1792 r' \newenvironment{linggloss}[2][]{',
1793 r' \def\glosstr{\glt #1}%',
1795 r' {\glosstr\glend}}{}',
1798 r' ResetsFont true',
1800 r' Decoration conglomerate',
1801 r' LabelString "Translation"',
1802 r' MenuString "Glosse Translation|s"',
1803 r' Tooltip "Add a translation for the glosse"',
1808 glosss_inset_def = [
1809 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1810 r'InsetLayout Flex:Tri-Glosse',
1812 r' LabelString "Tri-Gloss (old version)"',
1813 r' MenuString "Tri-Gloss (old version)"',
1814 r' LatexType environment',
1815 r' LatexName lingglosss',
1816 r' Decoration minimalistic',
1821 r' CustomPars false',
1822 r' ForcePlain true',
1823 r' ParbreakIsNewline true',
1824 r' FreeSpacing true',
1826 r' Requires covington',
1829 r' \@ifundefined{lingglosss}{%',
1830 r' \newenvironment{lingglosss}[2][]{',
1831 r' \def\glosstr{\glt #1}%',
1833 r' {\glosstr\glend}}{}',
1835 r' ResetsFont true',
1837 r' Decoration conglomerate',
1838 r' LabelString "Translation"',
1839 r' MenuString "Glosse Translation|s"',
1840 r' Tooltip "Add a translation for the glosse"',
1845 def convert_linggloss(document):
1846 " Move old ling glosses to local layout "
1847 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1848 document.append_local_layout(gloss_inset_def)
1849 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1850 document.append_local_layout(glosss_inset_def)
1852 def revert_linggloss(document):
1853 " Revert to old ling gloss definitions "
1854 if not "linguistics" in document.get_module_list():
1856 document.del_local_layout(gloss_inset_def)
1857 document.del_local_layout(glosss_inset_def)
1860 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1861 for glosse in glosses:
1864 i = find_token(document.body, glosse, i+1)
1867 j = find_end_of_inset(document.body, i)
1869 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1872 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1873 endarg = find_end_of_inset(document.body, arg)
1876 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1877 if argbeginPlain == -1:
1878 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1880 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1881 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
1883 # remove Arg insets and paragraph, if it only contains this inset
1884 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1885 del document.body[arg - 1 : endarg + 4]
1887 del document.body[arg : endarg + 1]
1889 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
1890 endarg = find_end_of_inset(document.body, arg)
1893 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1894 if argbeginPlain == -1:
1895 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
1897 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1898 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
1900 # remove Arg insets and paragraph, if it only contains this inset
1901 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1902 del document.body[arg - 1 : endarg + 4]
1904 del document.body[arg : endarg + 1]
1906 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
1907 endarg = find_end_of_inset(document.body, arg)
1910 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1911 if argbeginPlain == -1:
1912 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
1914 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1915 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
1917 # remove Arg insets and paragraph, if it only contains this inset
1918 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1919 del document.body[arg - 1 : endarg + 4]
1921 del document.body[arg : endarg + 1]
1923 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
1924 endarg = find_end_of_inset(document.body, arg)
1927 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1928 if argbeginPlain == -1:
1929 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
1931 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1932 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
1934 # remove Arg insets and paragraph, if it only contains this inset
1935 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1936 del document.body[arg - 1 : endarg + 4]
1938 del document.body[arg : endarg + 1]
1941 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
1944 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1945 endInset = find_end_of_inset(document.body, i)
1946 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
1947 precontent = put_cmd_in_ert(cmd)
1948 if len(optargcontent) > 0:
1949 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
1950 precontent += put_cmd_in_ert("{")
1952 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
1953 if cmd == "\\trigloss":
1954 postcontent += put_cmd_in_ert("}{") + marg3content
1955 postcontent += put_cmd_in_ert("}")
1957 document.body[endPlain:endInset + 1] = postcontent
1958 document.body[beginPlain + 1:beginPlain] = precontent
1959 del document.body[i : beginPlain + 1]
1961 document.append_local_layout("Requires covington")
1966 def revert_subexarg(document):
1967 " Revert linguistic subexamples with argument to ERT "
1969 if not "linguistics" in document.get_module_list():
1975 i = find_token(document.body, "\\begin_layout Subexample", i+1)
1978 j = find_end_of_layout(document.body, i)
1980 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1983 # check for consecutive layouts
1984 k = find_token(document.body, "\\begin_layout", j)
1985 if k == -1 or document.body[k] != "\\begin_layout Subexample":
1987 j = find_end_of_layout(document.body, k)
1989 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1992 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1996 endarg = find_end_of_inset(document.body, arg)
1998 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1999 if argbeginPlain == -1:
2000 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2002 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2003 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2005 # remove Arg insets and paragraph, if it only contains this inset
2006 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2007 del document.body[arg - 1 : endarg + 4]
2009 del document.body[arg : endarg + 1]
2011 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2013 # re-find end of layout
2014 j = find_end_of_layout(document.body, i)
2016 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2019 # check for consecutive layouts
2020 k = find_token(document.body, "\\begin_layout", j)
2021 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2023 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2024 j = find_end_of_layout(document.body, k)
2026 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2029 endev = put_cmd_in_ert("\\end{subexamples}")
2031 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2032 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2033 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2035 document.append_local_layout("Requires covington")
2039 def revert_drs(document):
2040 " Revert DRS insets (linguistics) to ERT "
2042 if not "linguistics" in document.get_module_list():
2046 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2047 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2048 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2049 "\\begin_inset Flex SDRS"]
2053 i = find_token(document.body, drs, i+1)
2056 j = find_end_of_inset(document.body, i)
2058 document.warning("Malformed LyX document: Can't find end of DRS inset")
2061 # Check for arguments
2062 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2063 endarg = find_end_of_inset(document.body, arg)
2066 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2067 if argbeginPlain == -1:
2068 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2070 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2071 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2073 # remove Arg insets and paragraph, if it only contains this inset
2074 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2075 del document.body[arg - 1 : endarg + 4]
2077 del document.body[arg : endarg + 1]
2080 j = find_end_of_inset(document.body, i)
2082 document.warning("Malformed LyX document: Can't find end of DRS inset")
2085 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2086 endarg = find_end_of_inset(document.body, arg)
2089 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2090 if argbeginPlain == -1:
2091 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2093 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2094 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2096 # remove Arg insets and paragraph, if it only contains this inset
2097 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2098 del document.body[arg - 1 : endarg + 4]
2100 del document.body[arg : endarg + 1]
2103 j = find_end_of_inset(document.body, i)
2105 document.warning("Malformed LyX document: Can't find end of DRS inset")
2108 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2109 endarg = find_end_of_inset(document.body, arg)
2110 postarg1content = []
2112 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2113 if argbeginPlain == -1:
2114 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2116 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2117 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2119 # remove Arg insets and paragraph, if it only contains this inset
2120 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2121 del document.body[arg - 1 : endarg + 4]
2123 del document.body[arg : endarg + 1]
2126 j = find_end_of_inset(document.body, i)
2128 document.warning("Malformed LyX document: Can't find end of DRS inset")
2131 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2132 endarg = find_end_of_inset(document.body, arg)
2133 postarg2content = []
2135 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2136 if argbeginPlain == -1:
2137 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2139 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2140 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2142 # remove Arg insets and paragraph, if it only contains this inset
2143 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2144 del document.body[arg - 1 : endarg + 4]
2146 del document.body[arg : endarg + 1]
2149 j = find_end_of_inset(document.body, i)
2151 document.warning("Malformed LyX document: Can't find end of DRS inset")
2154 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2155 endarg = find_end_of_inset(document.body, arg)
2156 postarg3content = []
2158 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2159 if argbeginPlain == -1:
2160 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2162 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2163 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2165 # remove Arg insets and paragraph, if it only contains this inset
2166 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2167 del document.body[arg - 1 : endarg + 4]
2169 del document.body[arg : endarg + 1]
2172 j = find_end_of_inset(document.body, i)
2174 document.warning("Malformed LyX document: Can't find end of DRS inset")
2177 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2178 endarg = find_end_of_inset(document.body, arg)
2179 postarg4content = []
2181 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2182 if argbeginPlain == -1:
2183 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2185 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2186 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2188 # remove Arg insets and paragraph, if it only contains this inset
2189 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2190 del document.body[arg - 1 : endarg + 4]
2192 del document.body[arg : endarg + 1]
2194 # The respective LaTeX command
2196 if drs == "\\begin_inset Flex DRS*":
2198 elif drs == "\\begin_inset Flex IfThen-DRS":
2200 elif drs == "\\begin_inset Flex Cond-DRS":
2202 elif drs == "\\begin_inset Flex QDRS":
2204 elif drs == "\\begin_inset Flex NegDRS":
2206 elif drs == "\\begin_inset Flex SDRS":
2209 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2210 endInset = find_end_of_inset(document.body, i)
2211 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2212 precontent = put_cmd_in_ert(cmd)
2213 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2214 if drs == "\\begin_inset Flex SDRS":
2215 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2216 precontent += put_cmd_in_ert("{")
2219 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2220 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2221 if cmd == "\\condrs" or cmd == "\\qdrs":
2222 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2224 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2226 postcontent = put_cmd_in_ert("}")
2228 document.body[endPlain:endInset + 1] = postcontent
2229 document.body[beginPlain + 1:beginPlain] = precontent
2230 del document.body[i : beginPlain + 1]
2232 document.append_local_layout("Provides covington 1")
2233 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2239 def revert_babelfont(document):
2240 " Reverts the use of \\babelfont to user preamble "
2242 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2244 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2246 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2248 i = find_token(document.header, '\\language_package', 0)
2250 document.warning("Malformed LyX document: Missing \\language_package.")
2252 if get_value(document.header, "\\language_package", 0) != "babel":
2255 # check font settings
2257 roman = sans = typew = "default"
2259 sf_scale = tt_scale = 100.0
2261 j = find_token(document.header, "\\font_roman", 0)
2263 document.warning("Malformed LyX document: Missing \\font_roman.")
2265 # We need to use this regex since split() does not handle quote protection
2266 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2267 roman = romanfont[2].strip('"')
2268 romanfont[2] = '"default"'
2269 document.header[j] = " ".join(romanfont)
2271 j = find_token(document.header, "\\font_sans", 0)
2273 document.warning("Malformed LyX document: Missing \\font_sans.")
2275 # We need to use this regex since split() does not handle quote protection
2276 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2277 sans = sansfont[2].strip('"')
2278 sansfont[2] = '"default"'
2279 document.header[j] = " ".join(sansfont)
2281 j = find_token(document.header, "\\font_typewriter", 0)
2283 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2285 # We need to use this regex since split() does not handle quote protection
2286 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2287 typew = ttfont[2].strip('"')
2288 ttfont[2] = '"default"'
2289 document.header[j] = " ".join(ttfont)
2291 i = find_token(document.header, "\\font_osf", 0)
2293 document.warning("Malformed LyX document: Missing \\font_osf.")
2295 osf = str2bool(get_value(document.header, "\\font_osf", i))
2297 j = find_token(document.header, "\\font_sf_scale", 0)
2299 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2301 sfscale = document.header[j].split()
2304 document.header[j] = " ".join(sfscale)
2307 sf_scale = float(val)
2309 document.warning("Invalid font_sf_scale value: " + val)
2311 j = find_token(document.header, "\\font_tt_scale", 0)
2313 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2315 ttscale = document.header[j].split()
2318 document.header[j] = " ".join(ttscale)
2321 tt_scale = float(val)
2323 document.warning("Invalid font_tt_scale value: " + val)
2325 # set preamble stuff
2326 pretext = ['%% This document must be processed with xelatex or lualatex!']
2327 pretext.append('\\AtBeginDocument{%')
2328 if roman != "default":
2329 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2330 if sans != "default":
2331 sf = '\\babelfont{sf}['
2332 if sf_scale != 100.0:
2333 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2334 sf += 'Mapping=tex-text]{' + sans + '}'
2336 if typew != "default":
2337 tw = '\\babelfont{tt}'
2338 if tt_scale != 100.0:
2339 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2340 tw += '{' + typew + '}'
2343 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2345 insert_to_preamble(document, pretext)
2348 def revert_minionpro(document):
2349 " Revert native MinionPro font definition (with extra options) to LaTeX "
2351 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2353 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2355 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2358 regexp = re.compile(r'(\\font_roman_opts)')
2359 x = find_re(document.header, regexp, 0)
2363 # We need to use this regex since split() does not handle quote protection
2364 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2365 opts = romanopts[1].strip('"')
2367 i = find_token(document.header, "\\font_roman", 0)
2369 document.warning("Malformed LyX document: Missing \\font_roman.")
2372 # We need to use this regex since split() does not handle quote protection
2373 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2374 roman = romanfont[1].strip('"')
2375 if roman != "minionpro":
2377 romanfont[1] = '"default"'
2378 document.header[i] = " ".join(romanfont)
2380 j = find_token(document.header, "\\font_osf true", 0)
2383 preamble = "\\usepackage["
2385 document.header[j] = "\\font_osf false"
2389 preamble += "]{MinionPro}"
2390 add_to_preamble(document, [preamble])
2391 del document.header[x]
2394 def revert_font_opts(document):
2395 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2397 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2399 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2401 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2402 i = find_token(document.header, '\\language_package', 0)
2404 document.warning("Malformed LyX document: Missing \\language_package.")
2406 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2409 regexp = re.compile(r'(\\font_roman_opts)')
2410 i = find_re(document.header, regexp, 0)
2412 # We need to use this regex since split() does not handle quote protection
2413 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2414 opts = romanopts[1].strip('"')
2415 del document.header[i]
2417 regexp = re.compile(r'(\\font_roman)')
2418 i = find_re(document.header, regexp, 0)
2420 # We need to use this regex since split() does not handle quote protection
2421 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2422 font = romanfont[2].strip('"')
2423 romanfont[2] = '"default"'
2424 document.header[i] = " ".join(romanfont)
2425 if font != "default":
2427 preamble = "\\babelfont{rm}["
2429 preamble = "\\setmainfont["
2432 preamble += "Mapping=tex-text]{"
2435 add_to_preamble(document, [preamble])
2438 regexp = re.compile(r'(\\font_sans_opts)')
2439 i = find_re(document.header, regexp, 0)
2442 # We need to use this regex since split() does not handle quote protection
2443 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2444 opts = sfopts[1].strip('"')
2445 del document.header[i]
2447 regexp = re.compile(r'(\\font_sf_scale)')
2448 i = find_re(document.header, regexp, 0)
2450 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2451 regexp = re.compile(r'(\\font_sans)')
2452 i = find_re(document.header, regexp, 0)
2454 # We need to use this regex since split() does not handle quote protection
2455 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2456 font = sffont[2].strip('"')
2457 sffont[2] = '"default"'
2458 document.header[i] = " ".join(sffont)
2459 if font != "default":
2461 preamble = "\\babelfont{sf}["
2463 preamble = "\\setsansfont["
2467 preamble += "Scale=0."
2468 preamble += scaleval
2470 preamble += "Mapping=tex-text]{"
2473 add_to_preamble(document, [preamble])
2476 regexp = re.compile(r'(\\font_typewriter_opts)')
2477 i = find_re(document.header, regexp, 0)
2480 # We need to use this regex since split() does not handle quote protection
2481 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2482 opts = ttopts[1].strip('"')
2483 del document.header[i]
2485 regexp = re.compile(r'(\\font_tt_scale)')
2486 i = find_re(document.header, regexp, 0)
2488 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2489 regexp = re.compile(r'(\\font_typewriter)')
2490 i = find_re(document.header, regexp, 0)
2492 # We need to use this regex since split() does not handle quote protection
2493 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2494 font = ttfont[2].strip('"')
2495 ttfont[2] = '"default"'
2496 document.header[i] = " ".join(ttfont)
2497 if font != "default":
2499 preamble = "\\babelfont{tt}["
2501 preamble = "\\setmonofont["
2505 preamble += "Scale=0."
2506 preamble += scaleval
2508 preamble += "Mapping=tex-text]{"
2511 add_to_preamble(document, [preamble])
2514 def revert_plainNotoFonts_xopts(document):
2515 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2517 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2519 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2521 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2525 y = find_token(document.header, "\\font_osf true", 0)
2529 regexp = re.compile(r'(\\font_roman_opts)')
2530 x = find_re(document.header, regexp, 0)
2531 if x == -1 and not osf:
2536 # We need to use this regex since split() does not handle quote protection
2537 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2538 opts = romanopts[1].strip('"')
2544 i = find_token(document.header, "\\font_roman", 0)
2548 # We need to use this regex since split() does not handle quote protection
2549 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2550 roman = romanfont[1].strip('"')
2551 if roman != "NotoSerif-TLF":
2554 j = find_token(document.header, "\\font_sans", 0)
2558 # We need to use this regex since split() does not handle quote protection
2559 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2560 sf = sffont[1].strip('"')
2564 j = find_token(document.header, "\\font_typewriter", 0)
2568 # We need to use this regex since split() does not handle quote protection
2569 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2570 tt = ttfont[1].strip('"')
2574 # So we have noto as "complete font"
2575 romanfont[1] = '"default"'
2576 document.header[i] = " ".join(romanfont)
2578 preamble = "\\usepackage["
2580 preamble += "]{noto}"
2581 add_to_preamble(document, [preamble])
2583 document.header[y] = "\\font_osf false"
2585 del document.header[x]
2588 def revert_notoFonts_xopts(document):
2589 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2591 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2593 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2595 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2599 fm = createFontMapping(['Noto'])
2600 if revert_fonts(document, fm, fontmap, True):
2601 add_preamble_fonts(document, fontmap)
2604 def revert_IBMFonts_xopts(document):
2605 " Revert native IBM font definition (with extra options) to LaTeX "
2607 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2609 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2611 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2615 fm = createFontMapping(['IBM'])
2617 if revert_fonts(document, fm, fontmap, True):
2618 add_preamble_fonts(document, fontmap)
2621 def revert_AdobeFonts_xopts(document):
2622 " Revert native Adobe font definition (with extra options) to LaTeX "
2624 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2626 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2628 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2632 fm = createFontMapping(['Adobe'])
2634 if revert_fonts(document, fm, fontmap, True):
2635 add_preamble_fonts(document, fontmap)
2638 def convert_osf(document):
2639 " Convert \\font_osf param to new format "
2642 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2644 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2646 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2648 i = find_token(document.header, '\\font_osf', 0)
2650 document.warning("Malformed LyX document: Missing \\font_osf.")
2653 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2654 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2656 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2657 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2660 document.header.insert(i, "\\font_sans_osf false")
2661 document.header.insert(i + 1, "\\font_typewriter_osf false")
2665 x = find_token(document.header, "\\font_sans", 0)
2667 document.warning("Malformed LyX document: Missing \\font_sans.")
2669 # We need to use this regex since split() does not handle quote protection
2670 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2671 sf = sffont[1].strip('"')
2673 document.header.insert(i, "\\font_sans_osf true")
2675 document.header.insert(i, "\\font_sans_osf false")
2677 x = find_token(document.header, "\\font_typewriter", 0)
2679 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2681 # We need to use this regex since split() does not handle quote protection
2682 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2683 tt = ttfont[1].strip('"')
2685 document.header.insert(i + 1, "\\font_sans_osf true")
2687 document.header.insert(i + 1, "\\font_sans_osf false")
2690 document.header.insert(i, "\\font_sans_osf false")
2691 document.header.insert(i + 1, "\\font_typewriter_osf false")
2694 def revert_osf(document):
2695 " Revert \\font_*_osf params "
2698 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2700 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2702 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2704 i = find_token(document.header, '\\font_roman_osf', 0)
2706 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2709 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2710 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2712 i = find_token(document.header, '\\font_sans_osf', 0)
2714 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2717 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2718 del document.header[i]
2720 i = find_token(document.header, '\\font_typewriter_osf', 0)
2722 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2725 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2726 del document.header[i]
2729 i = find_token(document.header, '\\font_osf', 0)
2731 document.warning("Malformed LyX document: Missing \\font_osf.")
2733 document.header[i] = "\\font_osf true"
2736 def revert_texfontopts(document):
2737 " Revert native TeX font definitions (with extra options) to LaTeX "
2739 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2741 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2743 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2746 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2748 # First the sf (biolinum only)
2749 regexp = re.compile(r'(\\font_sans_opts)')
2750 x = find_re(document.header, regexp, 0)
2752 # We need to use this regex since split() does not handle quote protection
2753 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2754 opts = sfopts[1].strip('"')
2755 i = find_token(document.header, "\\font_sans", 0)
2757 document.warning("Malformed LyX document: Missing \\font_sans.")
2759 # We need to use this regex since split() does not handle quote protection
2760 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2761 sans = sffont[1].strip('"')
2762 if sans == "biolinum":
2764 sffont[1] = '"default"'
2765 document.header[i] = " ".join(sffont)
2767 j = find_token(document.header, "\\font_sans_osf true", 0)
2770 k = find_token(document.header, "\\font_sf_scale", 0)
2772 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2774 sfscale = document.header[k].split()
2777 document.header[k] = " ".join(sfscale)
2780 sf_scale = float(val)
2782 document.warning("Invalid font_sf_scale value: " + val)
2783 preamble = "\\usepackage["
2785 document.header[j] = "\\font_sans_osf false"
2787 if sf_scale != 100.0:
2788 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2790 preamble += "]{biolinum}"
2791 add_to_preamble(document, [preamble])
2792 del document.header[x]
2794 regexp = re.compile(r'(\\font_roman_opts)')
2795 x = find_re(document.header, regexp, 0)
2799 # We need to use this regex since split() does not handle quote protection
2800 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2801 opts = romanopts[1].strip('"')
2803 i = find_token(document.header, "\\font_roman", 0)
2805 document.warning("Malformed LyX document: Missing \\font_roman.")
2808 # We need to use this regex since split() does not handle quote protection
2809 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2810 roman = romanfont[1].strip('"')
2811 if not roman in rmfonts:
2813 romanfont[1] = '"default"'
2814 document.header[i] = " ".join(romanfont)
2816 if roman == "utopia":
2818 elif roman == "palatino":
2819 package = "mathpazo"
2820 elif roman == "times":
2821 package = "mathptmx"
2822 elif roman == "xcharter":
2823 package = "XCharter"
2825 j = find_token(document.header, "\\font_roman_osf true", 0)
2827 if roman == "cochineal":
2828 osf = "proportional,osf,"
2829 elif roman == "utopia":
2831 elif roman == "garamondx":
2833 elif roman == "libertine":
2835 elif roman == "palatino":
2837 elif roman == "xcharter":
2839 document.header[j] = "\\font_roman_osf false"
2840 k = find_token(document.header, "\\font_sc true", 0)
2842 if roman == "utopia":
2844 if roman == "palatino" and osf == "":
2846 document.header[k] = "\\font_sc false"
2847 preamble = "\\usepackage["
2850 preamble += "]{" + package + "}"
2851 add_to_preamble(document, [preamble])
2852 del document.header[x]
2855 def convert_CantarellFont(document):
2856 " Handle Cantarell font definition to LaTeX "
2858 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2859 fm = createFontMapping(['Cantarell'])
2860 convert_fonts(document, fm)
2862 def revert_CantarellFont(document):
2863 " Revert native Cantarell font definition to LaTeX "
2865 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2867 fm = createFontMapping(['Cantarell'])
2868 if revert_fonts(document, fm, fontmap, False, True):
2869 add_preamble_fonts(document, fontmap)
2876 supported_versions = ["2.4.0", "2.4"]
2878 [545, [convert_lst_literalparam]],
2883 [550, [convert_fontenc]],
2890 [557, [convert_vcsinfo]],
2891 [558, [removeFrontMatterStyles]],
2894 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
2898 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
2899 [566, [convert_hebrew_parentheses]],
2905 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
2906 [573, [convert_inputencoding_namechange]],
2907 [574, [convert_ruby_module, convert_utf8_japanese]],
2908 [575, [convert_lineno]],
2910 [577, [convert_linggloss]],
2914 [581, [convert_osf]],
2915 [582, [convert_CantarellFont]],
2918 revert = [[581, [revert_CantarellFont]],
2919 [580, [revert_texfontopts,revert_osf]],
2920 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
2921 [578, [revert_babelfont]],
2922 [577, [revert_drs]],
2923 [576, [revert_linggloss, revert_subexarg]],
2924 [575, [revert_new_languages]],
2925 [574, [revert_lineno]],
2926 [573, [revert_ruby_module, revert_utf8_japanese]],
2927 [572, [revert_inputencoding_namechange]],
2928 [571, [revert_notoFonts]],
2929 [570, [revert_cmidruletrimming]],
2930 [569, [revert_bibfileencodings]],
2931 [568, [revert_tablestyle]],
2932 [567, [revert_soul]],
2933 [566, [revert_malayalam]],
2934 [565, [revert_hebrew_parentheses]],
2935 [564, [revert_AdobeFonts]],
2936 [563, [revert_lformatinfo]],
2937 [562, [revert_listpargs]],
2938 [561, [revert_l7ninfo]],
2939 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
2940 [559, [revert_timeinfo, revert_namenoextinfo]],
2941 [558, [revert_dateinfo]],
2942 [557, [addFrontMatterStyles]],
2943 [556, [revert_vcsinfo]],
2944 [555, [revert_bibencoding]],
2945 [554, [revert_vcolumns]],
2946 [553, [revert_stretchcolumn]],
2947 [552, [revert_tuftecite]],
2948 [551, [revert_floatpclass, revert_floatalignment]],
2949 [550, [revert_nospellcheck]],
2950 [549, [revert_fontenc]],
2951 [548, []],# dummy format change
2952 [547, [revert_lscape]],
2953 [546, [revert_xcharter]],
2954 [545, [revert_paratype]],
2955 [544, [revert_lst_literalparam]]
2959 if __name__ == "__main__":