1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
77 self.pkgkey = createkey(self.package, self.options)
81 self.font2pkgmap = dict()
82 self.pkg2fontmap = dict()
83 self.pkginmap = dict() # defines, if a map for package exists
85 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None):
86 " Expand fontinfo mapping"
88 # fontlist: list of fontnames, each element
89 # may contain a ','-separated list of needed options
90 # like e.g. 'IBMPlexSansCondensed,condensed'
91 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
92 # scale_type: one of None, 'sf', 'tt'
93 # pkg: package defining the font. Defaults to fontname if None
94 # scaleopt: one of None, 'scale', 'scaled', or some other string
95 # to be used in scale option (e.g. scaled=0.7)
98 fe.fonttype = font_type
99 fe.scaletype = scale_type
102 fe.fontname = font_name
104 fe.scaleopt = scaleopt
106 fe.package = font_name
110 self.font2pkgmap[font_name] = fe
111 if fe.pkgkey in self.pkg2fontmap:
112 # Repeated the same entry? Check content
113 if self.pkg2fontmap[fe.pkgkey] != font_name:
114 document.error("Something is wrong in pkgname+options <-> fontname mapping")
115 self.pkg2fontmap[fe.pkgkey] = font_name
116 self.pkginmap[fe.package] = 1
118 def getfontname(self, pkg, options):
120 pkgkey = createkey(pkg, options)
121 if not pkgkey in self.pkg2fontmap:
123 fontname = self.pkg2fontmap[pkgkey]
124 if not fontname in self.font2pkgmap:
125 document.error("Something is wrong in pkgname+options <-> fontname mapping")
127 if pkgkey == self.font2pkgmap[fontname].pkgkey:
131 def createFontMapping(fontlist):
132 # Create info for known fonts for the use in
133 # convert_latexFonts() and
134 # revert_latexFonts()
136 # * Would be more handy to parse latexFonts file,
137 # but the path to this file is unknown
138 # * For now, add DejaVu and IBMPlex only.
139 # * Expand, if desired
141 for font in fontlist:
143 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
144 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
145 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
147 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
148 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
149 'IBMPlexSerifSemibold,semibold'],
150 "roman", None, "plex-serif")
151 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
152 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
153 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
154 "sans", "sf", "plex-sans", "scale")
155 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
156 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
157 'IBMPlexMonoSemibold,semibold'],
158 "typewriter", "tt", "plex-mono", "scale")
159 elif font == 'Adobe':
160 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro")
161 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled")
162 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled")
164 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
165 'NotoSerifThin,thin', 'NotoSerifLight,light',
166 'NotoSerifExtralight,extralight'],
167 "roman", None, "noto-serif")
168 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
169 'NotoSansThin,thin', 'NotoSansLight,light',
170 'NotoSansExtralight,extralight'],
171 "sans", "sf", "noto-sans", "scaled")
172 fm.expandFontMapping(['NotoMonoRegular'], "typewriter", "tt", "noto-mono", "scaled")
175 def convert_fonts(document, fm):
176 " Handle font definition (LaTeX preamble -> native) "
178 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
179 rscaleopt = re.compile(r'^scaled?=(.*)')
182 while i < len(document.preamble):
183 i = find_re(document.preamble, rpkg, i+1)
186 mo = rpkg.search(document.preamble[i])
187 if mo == None or mo.group(2) == None:
190 options = mo.group(2).replace(' ', '').split(",")
194 while o < len(options):
195 mo = rscaleopt.search(options[o])
203 if not pkg in fm.pkginmap:
206 fn = fm.getfontname(pkg, options)
209 del document.preamble[i]
210 fontinfo = fm.font2pkgmap[fn]
211 if fontinfo.scaletype == None:
214 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
215 fontinfo.scaleval = oscale
217 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
218 del document.preamble[i-1]
220 if fontscale != None:
221 j = find_token(document.header, fontscale, 0)
223 val = get_value(document.header, fontscale, j)
227 scale = "%03d" % int(float(oscale) * 100)
228 document.header[j] = fontscale + " " + scale + " " + vals[1]
229 ft = "\\font_" + fontinfo.fonttype
230 j = find_token(document.header, ft, 0)
232 val = get_value(document.header, ft, j)
233 words = val.split() # ! splits also values like '"DejaVu Sans"'
234 words[0] = '"' + fn + '"'
235 document.header[j] = ft + ' ' + ' '.join(words)
237 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False):
238 " Revert native font definition to LaTeX "
239 # fonlist := list of fonts created from the same package
240 # Empty package means that the font-name is the same as the package-name
241 # fontmap (key = package, val += found options) will be filled
242 # and used later in add_preamble_fonts() to be added to user-preamble
244 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
245 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
247 while i < len(document.header):
248 i = find_re(document.header, rfontscale, i+1)
251 mo = rfontscale.search(document.header[i])
254 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
255 val = get_value(document.header, ft, i)
256 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
257 font = words[0].strip('"') # TeX font name has no whitespace
258 if not font in fm.font2pkgmap:
260 fontinfo = fm.font2pkgmap[font]
261 val = fontinfo.package
262 if not val in fontmap:
266 if ft == "\\font_math":
268 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
269 if ft == "\\font_sans":
270 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
271 elif ft == "\\font_typewriter":
272 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
273 x = find_re(document.header, regexp, 0)
277 # We need to use this regex since split() does not handle quote protection
278 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
279 opts = xopts[1].strip('"').split(",")
280 fontmap[val].extend(opts)
281 del document.header[x]
282 words[0] = '"default"'
283 document.header[i] = ft + ' ' + ' '.join(words)
284 if fontinfo.scaleopt != None:
285 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
286 mo = rscales.search(xval)
291 # set correct scale option
292 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
293 if len(fontinfo.options) > 0:
294 fontmap[val].extend(fontinfo.options)
296 ###############################################################################
298 ### Conversion and reversion routines
300 ###############################################################################
302 def convert_inputencoding_namechange(document):
303 " Rename inputencoding settings. "
304 i = find_token(document.header, "\\inputencoding", 0)
307 s = document.header[i].replace("auto", "auto-legacy")
308 document.header[i] = s.replace("default", "auto-legacy-plain")
310 def revert_inputencoding_namechange(document):
311 " Rename inputencoding settings. "
312 i = find_token(document.header, "\\inputencoding", 0)
315 s = document.header[i].replace("auto-legacy-plain", "default")
316 document.header[i] = s.replace("auto-legacy", "auto")
318 def convert_notoFonts(document):
319 " Handle Noto fonts definition to LaTeX "
321 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
322 fm = createFontMapping(['Noto'])
323 convert_fonts(document, fm)
325 def revert_notoFonts(document):
326 " Revert native Noto font definition to LaTeX "
328 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
330 fm = createFontMapping(['Noto'])
331 revert_fonts(document, fm, fontmap)
332 add_preamble_fonts(document, fontmap)
334 def convert_latexFonts(document):
335 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
337 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
338 fm = createFontMapping(['DejaVu', 'IBM'])
339 convert_fonts(document, fm)
341 def revert_latexFonts(document):
342 " Revert native DejaVu font definition to LaTeX "
344 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
346 fm = createFontMapping(['DejaVu', 'IBM'])
347 revert_fonts(document, fm, fontmap)
348 add_preamble_fonts(document, fontmap)
350 def convert_AdobeFonts(document):
351 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
353 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
354 fm = createFontMapping(['Adobe'])
355 convert_fonts(document, fm)
357 def revert_AdobeFonts(document):
358 " Revert native DejaVu font definition to LaTeX "
360 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
362 fm = createFontMapping(['Adobe'])
363 revert_fonts(document, fm, fontmap)
364 add_preamble_fonts(document, fontmap)
366 def removeFrontMatterStyles(document):
367 " Remove styles Begin/EndFrontmatter"
369 layouts = ['BeginFrontmatter', 'EndFrontmatter']
370 tokenend = len('\\begin_layout ')
373 i = find_token_exact(document.body, '\\begin_layout ', i+1)
376 layout = document.body[i][tokenend:].strip()
377 if layout not in layouts:
379 j = find_end_of_layout(document.body, i)
381 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
383 while document.body[j+1].strip() == '':
385 document.body[i:j+1] = []
387 def addFrontMatterStyles(document):
388 " Use styles Begin/EndFrontmatter for elsarticle"
390 if document.textclass != "elsarticle":
393 def insertFrontmatter(prefix, line):
395 while above > 0 and document.body[above-1].strip() == '':
398 while document.body[below].strip() == '':
400 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
401 '\\begin_inset Note Note',
403 '\\begin_layout Plain Layout',
406 '\\end_inset', '', '',
409 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
410 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
411 tokenend = len('\\begin_layout ')
415 i = find_token_exact(document.body, '\\begin_layout ', i+1)
418 layout = document.body[i][tokenend:].strip()
419 if layout not in layouts:
421 k = find_end_of_layout(document.body, i)
423 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
430 insertFrontmatter('End', k+1)
431 insertFrontmatter('Begin', first)
434 def convert_lst_literalparam(document):
435 " Add param literal to include inset "
439 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
442 j = find_end_of_inset(document.body, i)
444 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
446 while i < j and document.body[i].strip() != '':
448 document.body.insert(i, 'literal "true"')
451 def revert_lst_literalparam(document):
452 " Remove param literal from include inset "
456 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
459 j = find_end_of_inset(document.body, i)
461 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
463 del_token(document.body, 'literal', i, j)
466 def revert_paratype(document):
467 " Revert ParaType font definitions to LaTeX "
469 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
471 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
472 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
473 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
474 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
475 sfval = get_value(document.header, "\\font_sf_scale", 0)
480 sfoption = "scaled=" + format(float(sfval) / 100, '.2f')
481 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
482 ttval = get_value(document.header, "\\font_tt_scale", 0)
487 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
488 if i1 != -1 and i2 != -1 and i3!= -1:
489 add_to_preamble(document, ["\\usepackage{paratype}"])
492 add_to_preamble(document, ["\\usepackage{PTSerif}"])
493 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
496 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
498 add_to_preamble(document, ["\\usepackage{PTSans}"])
499 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
502 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
504 add_to_preamble(document, ["\\usepackage{PTMono}"])
505 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
508 def revert_xcharter(document):
509 " Revert XCharter font definitions to LaTeX "
511 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
515 # replace unsupported font setting
516 document.header[i] = document.header[i].replace("xcharter", "default")
517 # no need for preamble code with system fonts
518 if get_bool_value(document.header, "\\use_non_tex_fonts"):
521 # transfer old style figures setting to package options
522 j = find_token(document.header, "\\font_osf true")
525 document.header[j] = "\\font_osf false"
529 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
532 def revert_lscape(document):
533 " Reverts the landscape environment (Landscape module) to TeX-code "
535 if not "landscape" in document.get_module_list():
540 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
543 j = find_end_of_inset(document.body, i)
545 document.warning("Malformed LyX document: Can't find end of Landscape inset")
548 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
549 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
550 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
551 add_to_preamble(document, ["\\usepackage{afterpage}"])
553 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
554 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
556 add_to_preamble(document, ["\\usepackage{pdflscape}"])
559 def convert_fontenc(document):
560 " Convert default fontenc setting "
562 i = find_token(document.header, "\\fontencoding global", 0)
566 document.header[i] = document.header[i].replace("global", "auto")
569 def revert_fontenc(document):
570 " Revert default fontenc setting "
572 i = find_token(document.header, "\\fontencoding auto", 0)
576 document.header[i] = document.header[i].replace("auto", "global")
579 def revert_nospellcheck(document):
580 " Remove nospellcheck font info param "
584 i = find_token(document.body, '\\nospellcheck', i)
590 def revert_floatpclass(document):
591 " Remove float placement params 'document' and 'class' "
593 del_token(document.header, "\\float_placement class")
597 i = find_token(document.body, '\\begin_inset Float', i+1)
600 j = find_end_of_inset(document.body, i)
601 k = find_token(document.body, 'placement class', i, i + 2)
603 k = find_token(document.body, 'placement document', i, i + 2)
610 def revert_floatalignment(document):
611 " Remove float alignment params "
613 galignment = get_value(document.header, "\\float_alignment", delete=True)
617 i = find_token(document.body, '\\begin_inset Float', i+1)
620 j = find_end_of_inset(document.body, i)
622 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
624 k = find_token(document.body, 'alignment', i, i+4)
628 alignment = get_value(document.body, "alignment", k)
629 if alignment == "document":
630 alignment = galignment
632 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
634 document.warning("Can't find float layout!")
637 if alignment == "left":
638 alcmd = put_cmd_in_ert("\\raggedright{}")
639 elif alignment == "center":
640 alcmd = put_cmd_in_ert("\\centering{}")
641 elif alignment == "right":
642 alcmd = put_cmd_in_ert("\\raggedleft{}")
644 document.body[l+1:l+1] = alcmd
647 def revert_tuftecite(document):
648 " Revert \cite commands in tufte classes "
650 tufte = ["tufte-book", "tufte-handout"]
651 if document.textclass not in tufte:
656 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
659 j = find_end_of_inset(document.body, i)
661 document.warning("Can't find end of citation inset at line %d!!" %(i))
663 k = find_token(document.body, "LatexCommand", i, j)
665 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
668 cmd = get_value(document.body, "LatexCommand", k)
672 pre = get_quoted_value(document.body, "before", i, j)
673 post = get_quoted_value(document.body, "after", i, j)
674 key = get_quoted_value(document.body, "key", i, j)
676 document.warning("Citation inset at line %d does not have a key!" %(i))
678 # Replace command with ERT
681 res += "[" + pre + "]"
683 res += "[" + post + "]"
686 res += "{" + key + "}"
687 document.body[i:j+1] = put_cmd_in_ert([res])
691 def revert_stretchcolumn(document):
692 " We remove the column varwidth flags or everything else will become a mess. "
695 i = find_token(document.body, "\\begin_inset Tabular", i+1)
698 j = find_end_of_inset(document.body, i+1)
700 document.warning("Malformed LyX document: Could not find end of tabular.")
702 for k in range(i, j):
703 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
704 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
705 document.body[k] = document.body[k].replace(' varwidth="true"', '')
708 def revert_vcolumns(document):
709 " Revert standard columns with line breaks etc. "
715 i = find_token(document.body, "\\begin_inset Tabular", i+1)
718 j = find_end_of_inset(document.body, i)
720 document.warning("Malformed LyX document: Could not find end of tabular.")
723 # Collect necessary column information
725 nrows = int(document.body[i+1].split('"')[3])
726 ncols = int(document.body[i+1].split('"')[5])
728 for k in range(ncols):
729 m = find_token(document.body, "<column", m)
730 width = get_option_value(document.body[m], 'width')
731 varwidth = get_option_value(document.body[m], 'varwidth')
732 alignment = get_option_value(document.body[m], 'alignment')
733 special = get_option_value(document.body[m], 'special')
734 col_info.append([width, varwidth, alignment, special, m])
739 for row in range(nrows):
740 for col in range(ncols):
741 m = find_token(document.body, "<cell", m)
742 multicolumn = get_option_value(document.body[m], 'multicolumn')
743 multirow = get_option_value(document.body[m], 'multirow')
744 width = get_option_value(document.body[m], 'width')
745 rotate = get_option_value(document.body[m], 'rotate')
746 # Check for: linebreaks, multipars, non-standard environments
748 endcell = find_token(document.body, "</cell>", begcell)
750 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
752 elif count_pars_in_inset(document.body, begcell + 2) > 1:
754 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
756 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
757 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
759 alignment = col_info[col][2]
760 col_line = col_info[col][4]
762 if alignment == "center":
763 vval = ">{\\centering}"
764 elif alignment == "left":
765 vval = ">{\\raggedright}"
766 elif alignment == "right":
767 vval = ">{\\raggedleft}"
770 vval += "V{\\linewidth}"
772 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
773 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
774 # with newlines, and we do not want that)
776 endcell = find_token(document.body, "</cell>", begcell)
778 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
780 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
784 nle = find_end_of_inset(document.body, nl)
785 del(document.body[nle:nle+1])
787 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
789 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
795 if needarray == True:
796 add_to_preamble(document, ["\\usepackage{array}"])
797 if needvarwidth == True:
798 add_to_preamble(document, ["\\usepackage{varwidth}"])
801 def revert_bibencoding(document):
802 " Revert bibliography encoding "
806 i = find_token(document.header, "\\cite_engine", 0)
808 document.warning("Malformed document! Missing \\cite_engine")
810 engine = get_value(document.header, "\\cite_engine", i)
814 if engine in ["biblatex", "biblatex-natbib"]:
817 # Map lyx to latex encoding names
821 "armscii8" : "armscii8",
822 "iso8859-1" : "latin1",
823 "iso8859-2" : "latin2",
824 "iso8859-3" : "latin3",
825 "iso8859-4" : "latin4",
826 "iso8859-5" : "iso88595",
827 "iso8859-6" : "8859-6",
828 "iso8859-7" : "iso-8859-7",
829 "iso8859-8" : "8859-8",
830 "iso8859-9" : "latin5",
831 "iso8859-13" : "latin7",
832 "iso8859-15" : "latin9",
833 "iso8859-16" : "latin10",
834 "applemac" : "applemac",
836 "cp437de" : "cp437de",
853 "utf8-platex" : "utf8",
860 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
863 j = find_end_of_inset(document.body, i)
865 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
867 encoding = get_quoted_value(document.body, "encoding", i, j)
870 # remove encoding line
871 k = find_token(document.body, "encoding", i, j)
874 if encoding == "default":
876 # Re-find inset end line
877 j = find_end_of_inset(document.body, i)
880 h = find_token(document.header, "\\biblio_options", 0)
882 biblio_options = get_value(document.header, "\\biblio_options", h)
883 if not "bibencoding" in biblio_options:
884 document.header[h] += ",bibencoding=%s" % encodings[encoding]
886 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
888 # this should not happen
889 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
891 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
893 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
894 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
900 def convert_vcsinfo(document):
901 " Separate vcs Info inset from buffer Info inset. "
904 "vcs-revision" : "revision",
905 "vcs-tree-revision" : "tree-revision",
906 "vcs-author" : "author",
912 i = find_token(document.body, "\\begin_inset Info", i+1)
915 j = find_end_of_inset(document.body, i+1)
917 document.warning("Malformed LyX document: Could not find end of Info inset.")
919 tp = find_token(document.body, 'type', i, j)
920 tpv = get_quoted_value(document.body, "type", tp)
923 arg = find_token(document.body, 'arg', i, j)
924 argv = get_quoted_value(document.body, "arg", arg)
925 if argv not in list(types.keys()):
927 document.body[tp] = "type \"vcs\""
928 document.body[arg] = "arg \"" + types[argv] + "\""
931 def revert_vcsinfo(document):
932 " Merge vcs Info inset to buffer Info inset. "
934 args = ["revision", "tree-revision", "author", "time", "date" ]
937 i = find_token(document.body, "\\begin_inset Info", i+1)
940 j = find_end_of_inset(document.body, i+1)
942 document.warning("Malformed LyX document: Could not find end of Info inset.")
944 tp = find_token(document.body, 'type', i, j)
945 tpv = get_quoted_value(document.body, "type", tp)
948 arg = find_token(document.body, 'arg', i, j)
949 argv = get_quoted_value(document.body, "arg", arg)
951 document.warning("Malformed Info inset. Invalid vcs arg.")
953 document.body[tp] = "type \"buffer\""
954 document.body[arg] = "arg \"vcs-" + argv + "\""
957 def revert_dateinfo(document):
958 " Revert date info insets to static text. "
960 # FIXME This currently only considers the main language and uses the system locale
961 # Ideally, it should honor context languages and switch the locale accordingly.
963 # The date formats for each language using strftime syntax:
964 # long, short, loclong, locmedium, locshort
966 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
967 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
968 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
969 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
970 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
971 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
972 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
973 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
974 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
975 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
976 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
977 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
978 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
979 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
980 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
981 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
982 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
983 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
984 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
985 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
986 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
987 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
988 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
989 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
990 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
991 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
992 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
993 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
994 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
995 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
996 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
997 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
998 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
999 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1000 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1001 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1002 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1003 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1004 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1005 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1006 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1007 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1008 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1009 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1010 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1011 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1012 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1013 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1014 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1015 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1016 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1017 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1018 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1019 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1020 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1021 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1022 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1023 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1024 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1025 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1026 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1027 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1028 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1029 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1030 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1031 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1032 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1033 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1034 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1035 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1036 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1037 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1038 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1039 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1040 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1041 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1042 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1043 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1044 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1045 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1046 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1047 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1048 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1049 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1050 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1051 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1052 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1053 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1054 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1055 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1056 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1057 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1058 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1059 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1060 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1061 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1062 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1063 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1064 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1065 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1066 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1067 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1070 types = ["date", "fixdate", "moddate" ]
1071 lang = get_value(document.header, "\\language")
1073 document.warning("Malformed LyX document! No \\language header found!")
1078 i = find_token(document.body, "\\begin_inset Info", i+1)
1081 j = find_end_of_inset(document.body, i+1)
1083 document.warning("Malformed LyX document: Could not find end of Info inset.")
1085 tp = find_token(document.body, 'type', i, j)
1086 tpv = get_quoted_value(document.body, "type", tp)
1087 if tpv not in types:
1089 arg = find_token(document.body, 'arg', i, j)
1090 argv = get_quoted_value(document.body, "arg", arg)
1093 if tpv == "fixdate":
1094 datecomps = argv.split('@')
1095 if len(datecomps) > 1:
1097 isodate = datecomps[1]
1098 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1100 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1101 # FIXME if we had the path to the original document (not the one in the tmp dir),
1102 # we could use the mtime.
1103 # elif tpv == "moddate":
1104 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1107 result = dte.isodate()
1108 elif argv == "long":
1109 result = dte.strftime(dateformats[lang][0])
1110 elif argv == "short":
1111 result = dte.strftime(dateformats[lang][1])
1112 elif argv == "loclong":
1113 result = dte.strftime(dateformats[lang][2])
1114 elif argv == "locmedium":
1115 result = dte.strftime(dateformats[lang][3])
1116 elif argv == "locshort":
1117 result = dte.strftime(dateformats[lang][4])
1119 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1120 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1121 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1122 fmt = re.sub('[^\'%]d', '%d', fmt)
1123 fmt = fmt.replace("'", "")
1124 result = dte.strftime(fmt)
1125 if sys.version_info < (3,0):
1126 # In Python 2, datetime module works with binary strings,
1127 # our dateformat strings are utf8-encoded:
1128 result = result.decode('utf-8')
1129 document.body[i : j+1] = [result]
1132 def revert_timeinfo(document):
1133 " Revert time info insets to static text. "
1135 # FIXME This currently only considers the main language and uses the system locale
1136 # Ideally, it should honor context languages and switch the locale accordingly.
1137 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1140 # The time formats for each language using strftime syntax:
1143 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1144 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1145 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1146 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1147 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1148 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1149 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1150 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1151 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1152 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1153 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1154 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1155 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1156 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1157 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1158 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1159 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1160 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1161 "british" : ["%H:%M:%S %Z", "%H:%M"],
1162 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1163 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1164 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1165 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1166 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1167 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1168 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1169 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1170 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1171 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1172 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1173 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1174 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1175 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1176 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1177 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1178 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1179 "french" : ["%H:%M:%S %Z", "%H:%M"],
1180 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1181 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1182 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1183 "german" : ["%H:%M:%S %Z", "%H:%M"],
1184 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1185 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1186 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1187 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1188 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1189 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1190 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1191 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1192 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1193 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1194 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1195 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1196 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1197 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1198 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1199 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1200 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1201 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1202 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1203 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1204 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1205 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1206 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1207 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1208 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1209 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1210 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1211 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1212 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1213 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1214 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1215 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1216 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1217 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1218 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1219 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1220 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1221 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1222 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1223 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1224 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1225 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1226 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1227 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1228 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1229 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1230 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1231 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1232 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1233 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1234 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1235 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1236 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1237 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1238 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1239 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1240 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1241 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1242 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1243 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1244 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1247 types = ["time", "fixtime", "modtime" ]
1249 i = find_token(document.header, "\\language", 0)
1251 # this should not happen
1252 document.warning("Malformed LyX document! No \\language header found!")
1254 lang = get_value(document.header, "\\language", i)
1258 i = find_token(document.body, "\\begin_inset Info", i+1)
1261 j = find_end_of_inset(document.body, i+1)
1263 document.warning("Malformed LyX document: Could not find end of Info inset.")
1265 tp = find_token(document.body, 'type', i, j)
1266 tpv = get_quoted_value(document.body, "type", tp)
1267 if tpv not in types:
1269 arg = find_token(document.body, 'arg', i, j)
1270 argv = get_quoted_value(document.body, "arg", arg)
1272 dtme = datetime.now()
1274 if tpv == "fixtime":
1275 timecomps = argv.split('@')
1276 if len(timecomps) > 1:
1278 isotime = timecomps[1]
1279 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1281 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1283 m = re.search('(\d\d):(\d\d)', isotime)
1285 tme = time(int(m.group(1)), int(m.group(2)))
1286 # FIXME if we had the path to the original document (not the one in the tmp dir),
1287 # we could use the mtime.
1288 # elif tpv == "moddate":
1289 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1292 result = tme.isoformat()
1293 elif argv == "long":
1294 result = tme.strftime(timeformats[lang][0])
1295 elif argv == "short":
1296 result = tme.strftime(timeformats[lang][1])
1298 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1299 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1300 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1301 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1302 fmt = fmt.replace("'", "")
1303 result = dte.strftime(fmt)
1304 document.body[i : j+1] = result
1307 def revert_namenoextinfo(document):
1308 " Merge buffer Info inset type name-noext to name. "
1312 i = find_token(document.body, "\\begin_inset Info", i+1)
1315 j = find_end_of_inset(document.body, i+1)
1317 document.warning("Malformed LyX document: Could not find end of Info inset.")
1319 tp = find_token(document.body, 'type', i, j)
1320 tpv = get_quoted_value(document.body, "type", tp)
1323 arg = find_token(document.body, 'arg', i, j)
1324 argv = get_quoted_value(document.body, "arg", arg)
1325 if argv != "name-noext":
1327 document.body[arg] = "arg \"name\""
1330 def revert_l7ninfo(document):
1331 " Revert l7n Info inset to text. "
1335 i = find_token(document.body, "\\begin_inset Info", i+1)
1338 j = find_end_of_inset(document.body, i+1)
1340 document.warning("Malformed LyX document: Could not find end of Info inset.")
1342 tp = find_token(document.body, 'type', i, j)
1343 tpv = get_quoted_value(document.body, "type", tp)
1346 arg = find_token(document.body, 'arg', i, j)
1347 argv = get_quoted_value(document.body, "arg", arg)
1348 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1349 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1350 document.body[i : j+1] = argv
1353 def revert_listpargs(document):
1354 " Reverts listpreamble arguments to TeX-code "
1357 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1360 j = find_end_of_inset(document.body, i)
1361 # Find containing paragraph layout
1362 parent = get_containing_layout(document.body, i)
1364 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1367 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1368 endPlain = find_end_of_layout(document.body, beginPlain)
1369 content = document.body[beginPlain + 1 : endPlain]
1370 del document.body[i:j+1]
1371 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1372 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1373 document.body[parbeg : parbeg] = subst
1376 def revert_lformatinfo(document):
1377 " Revert layout format Info inset to text. "
1381 i = find_token(document.body, "\\begin_inset Info", i+1)
1384 j = find_end_of_inset(document.body, i+1)
1386 document.warning("Malformed LyX document: Could not find end of Info inset.")
1388 tp = find_token(document.body, 'type', i, j)
1389 tpv = get_quoted_value(document.body, "type", tp)
1390 if tpv != "lyxinfo":
1392 arg = find_token(document.body, 'arg', i, j)
1393 argv = get_quoted_value(document.body, "arg", arg)
1394 if argv != "layoutformat":
1397 document.body[i : j+1] = "69"
1400 def convert_hebrew_parentheses(document):
1401 """ Swap opening/closing parentheses in Hebrew text.
1403 Up to LyX 2.4, "(" was used as closing parenthesis and
1404 ")" as opening parenthesis for Hebrew in the LyX source.
1406 # print("convert hebrew parentheses")
1407 current_languages = [document.language]
1408 for i, line in enumerate(document.body):
1409 if line.startswith('\\lang '):
1410 current_languages[-1] = line.lstrip('\\lang ')
1411 elif line.startswith('\\begin_layout'):
1412 current_languages.append(current_languages[-1])
1413 # print (line, current_languages[-1])
1414 elif line.startswith('\\end_layout'):
1415 current_languages.pop()
1416 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1417 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1420 def revert_hebrew_parentheses(document):
1421 " Store parentheses in Hebrew text reversed"
1422 # This only exists to keep the convert/revert naming convention
1423 convert_hebrew_parentheses(document)
1426 def revert_malayalam(document):
1427 " Set the document language to English but assure Malayalam output "
1429 revert_language(document, "malayalam", "", "malayalam")
1432 def revert_soul(document):
1433 " Revert soul module flex insets to ERT "
1435 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1438 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1440 add_to_preamble(document, ["\\usepackage{soul}"])
1442 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1444 add_to_preamble(document, ["\\usepackage{color}"])
1446 revert_flex_inset(document.body, "Spaceletters", "\\so")
1447 revert_flex_inset(document.body, "Strikethrough", "\\st")
1448 revert_flex_inset(document.body, "Underline", "\\ul")
1449 revert_flex_inset(document.body, "Highlight", "\\hl")
1450 revert_flex_inset(document.body, "Capitalize", "\\caps")
1453 def revert_tablestyle(document):
1454 " Remove tablestyle params "
1457 i = find_token(document.header, "\\tablestyle")
1459 del document.header[i]
1462 def revert_bibfileencodings(document):
1463 " Revert individual Biblatex bibliography encodings "
1467 i = find_token(document.header, "\\cite_engine", 0)
1469 document.warning("Malformed document! Missing \\cite_engine")
1471 engine = get_value(document.header, "\\cite_engine", i)
1475 if engine in ["biblatex", "biblatex-natbib"]:
1478 # Map lyx to latex encoding names
1482 "armscii8" : "armscii8",
1483 "iso8859-1" : "latin1",
1484 "iso8859-2" : "latin2",
1485 "iso8859-3" : "latin3",
1486 "iso8859-4" : "latin4",
1487 "iso8859-5" : "iso88595",
1488 "iso8859-6" : "8859-6",
1489 "iso8859-7" : "iso-8859-7",
1490 "iso8859-8" : "8859-8",
1491 "iso8859-9" : "latin5",
1492 "iso8859-13" : "latin7",
1493 "iso8859-15" : "latin9",
1494 "iso8859-16" : "latin10",
1495 "applemac" : "applemac",
1497 "cp437de" : "cp437de",
1505 "cp1250" : "cp1250",
1506 "cp1251" : "cp1251",
1507 "cp1252" : "cp1252",
1508 "cp1255" : "cp1255",
1509 "cp1256" : "cp1256",
1510 "cp1257" : "cp1257",
1511 "koi8-r" : "koi8-r",
1512 "koi8-u" : "koi8-u",
1514 "utf8-platex" : "utf8",
1521 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1524 j = find_end_of_inset(document.body, i)
1526 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1528 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1532 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1533 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1534 if len(bibfiles) == 0:
1535 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1536 # remove encoding line
1537 k = find_token(document.body, "file_encodings", i, j)
1539 del document.body[k]
1540 # Re-find inset end line
1541 j = find_end_of_inset(document.body, i)
1543 enclist = encodings.split("\t")
1546 ppp = pp.split(" ", 1)
1547 encmap[ppp[0]] = ppp[1]
1548 for bib in bibfiles:
1549 pr = "\\addbibresource"
1550 if bib in encmap.keys():
1551 pr += "[bibencoding=" + encmap[bib] + "]"
1552 pr += "{" + bib + "}"
1553 add_to_preamble(document, [pr])
1554 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1555 pcmd = "printbibliography"
1557 pcmd += "[" + opts + "]"
1558 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1559 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1560 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1561 "status open", "", "\\begin_layout Plain Layout" ]
1562 repl += document.body[i:j+1]
1563 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1564 document.body[i:j+1] = repl
1570 def revert_cmidruletrimming(document):
1571 " Remove \\cmidrule trimming "
1573 # FIXME: Revert to TeX code?
1576 # first, let's find out if we need to do anything
1577 i = find_token(document.body, '<cell ', i+1)
1580 j = document.body[i].find('trim="')
1583 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1584 # remove trim option
1585 document.body[i] = rgx.sub('', document.body[i])
1589 r'### Inserted by lyx2lyx (ruby inset) ###',
1590 r'InsetLayout Flex:Ruby',
1591 r' LyxType charstyle',
1592 r' LatexType command',
1596 r' HTMLInnerTag rb',
1597 r' HTMLInnerAttr ""',
1599 r' LabelString "Ruby"',
1600 r' Decoration Conglomerate',
1602 r' \ifdefined\kanjiskip',
1603 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1604 r' \else \ifdefined\luatexversion',
1605 r' \usepackage{luatexja-ruby}',
1606 r' \else \ifdefined\XeTeXversion',
1607 r' \usepackage{ruby}%',
1609 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1611 r' Argument post:1',
1612 r' LabelString "ruby text"',
1613 r' MenuString "Ruby Text|R"',
1614 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1615 r' Decoration Conglomerate',
1627 def convert_ruby_module(document):
1628 " Use ruby module instead of local module definition "
1629 if document.del_local_layout(ruby_inset_def):
1630 document.add_module("ruby")
1632 def revert_ruby_module(document):
1633 " Replace ruby module with local module definition "
1634 if document.del_module("ruby"):
1635 document.append_local_layout(ruby_inset_def)
1638 def convert_utf8_japanese(document):
1639 " Use generic utf8 with Japanese documents."
1640 lang = get_value(document.header, "\\language")
1641 if not lang.startswith("japanese"):
1643 inputenc = get_value(document.header, "\\inputencoding")
1644 if ((lang == "japanese" and inputenc == "utf8-platex")
1645 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1646 document.set_parameter("inputencoding", "utf8")
1648 def revert_utf8_japanese(document):
1649 " Use Japanese utf8 variants with Japanese documents."
1650 inputenc = get_value(document.header, "\\inputencoding")
1651 if inputenc != "utf8":
1653 lang = get_value(document.header, "\\language")
1654 if lang == "japanese":
1655 document.set_parameter("inputencoding", "utf8-platex")
1656 if lang == "japanese-cjk":
1657 document.set_parameter("inputencoding", "utf8-cjk")
1660 def revert_lineno(document):
1661 " Replace lineno setting with user-preamble code."
1663 options = get_quoted_value(document.header, "\\lineno_options",
1665 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1668 options = "[" + options + "]"
1669 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1672 def convert_lineno(document):
1673 " Replace user-preamble code with native lineno support."
1676 i = find_token(document.preamble, "\\linenumbers", 1)
1678 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1681 options = usepkg.group(1).strip("[]")
1682 del(document.preamble[i-1:i+1])
1683 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1685 k = find_token(document.header, "\\index ")
1687 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1689 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1690 "\\lineno_options %s" % options]
1693 def revert_new_languages(document):
1694 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1695 and Russian (Petrine orthography)."""
1697 # lyxname: (babelname, polyglossianame)
1698 new_languages = {"azerbaijani": ("azerbaijani", ""),
1699 "bengali": ("", "bengali"),
1700 "churchslavonic": ("", "churchslavonic"),
1701 "oldrussian": ("", "russian"),
1702 "korean": ("", "korean"),
1704 used_languages = set()
1705 if document.language in new_languages:
1706 used_languages.add(document.language)
1709 i = find_token(document.body, "\\lang", i+1)
1712 if document.body[i][6:].strip() in new_languages:
1713 used_languages.add(document.language)
1715 # Korean is already supported via CJK, so leave as-is for Babel
1716 if ("korean" in used_languages
1717 and get_bool_value(document.header, "\\use_non_tex_fonts")
1718 and get_value(document.header, "\\language_package") in ("default", "auto")):
1719 revert_language(document, "korean", "", "korean")
1720 used_languages.discard("korean")
1722 for lang in used_languages:
1723 revert(lang, *new_languages[lang])
1727 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1728 r'InsetLayout Flex:Glosse',
1730 r' LabelString "Gloss (old version)"',
1731 r' MenuString "Gloss (old version)"',
1732 r' LatexType environment',
1733 r' LatexName linggloss',
1734 r' Decoration minimalistic',
1739 r' CustomPars false',
1740 r' ForcePlain true',
1741 r' ParbreakIsNewline true',
1742 r' FreeSpacing true',
1743 r' Requires covington',
1746 r' \@ifundefined{linggloss}{%',
1747 r' \newenvironment{linggloss}[2][]{',
1748 r' \def\glosstr{\glt #1}%',
1750 r' {\glosstr\glend}}{}',
1753 r' ResetsFont true',
1755 r' Decoration conglomerate',
1756 r' LabelString "Translation"',
1757 r' MenuString "Glosse Translation|s"',
1758 r' Tooltip "Add a translation for the glosse"',
1763 glosss_inset_def = [
1764 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1765 r'InsetLayout Flex:Tri-Glosse',
1767 r' LabelString "Tri-Gloss (old version)"',
1768 r' MenuString "Tri-Gloss (old version)"',
1769 r' LatexType environment',
1770 r' LatexName lingglosss',
1771 r' Decoration minimalistic',
1776 r' CustomPars false',
1777 r' ForcePlain true',
1778 r' ParbreakIsNewline true',
1779 r' FreeSpacing true',
1781 r' Requires covington',
1784 r' \@ifundefined{lingglosss}{%',
1785 r' \newenvironment{lingglosss}[2][]{',
1786 r' \def\glosstr{\glt #1}%',
1788 r' {\glosstr\glend}}{}',
1790 r' ResetsFont true',
1792 r' Decoration conglomerate',
1793 r' LabelString "Translation"',
1794 r' MenuString "Glosse Translation|s"',
1795 r' Tooltip "Add a translation for the glosse"',
1800 def convert_linggloss(document):
1801 " Move old ling glosses to local layout "
1802 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1803 document.append_local_layout(gloss_inset_def)
1804 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1805 document.append_local_layout(glosss_inset_def)
1807 def revert_linggloss(document):
1808 " Revert to old ling gloss definitions "
1809 if not "linguistics" in document.get_module_list():
1811 document.del_local_layout(gloss_inset_def)
1812 document.del_local_layout(glosss_inset_def)
1815 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1816 for glosse in glosses:
1819 i = find_token(document.body, glosse, i+1)
1822 j = find_end_of_inset(document.body, i)
1824 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1827 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1828 endarg = find_end_of_inset(document.body, arg)
1831 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1832 if argbeginPlain == -1:
1833 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1835 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1836 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
1838 # remove Arg insets and paragraph, if it only contains this inset
1839 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1840 del document.body[arg - 1 : endarg + 4]
1842 del document.body[arg : endarg + 1]
1844 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
1845 endarg = find_end_of_inset(document.body, arg)
1848 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1849 if argbeginPlain == -1:
1850 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
1852 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1853 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
1855 # remove Arg insets and paragraph, if it only contains this inset
1856 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1857 del document.body[arg - 1 : endarg + 4]
1859 del document.body[arg : endarg + 1]
1861 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
1862 endarg = find_end_of_inset(document.body, arg)
1865 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1866 if argbeginPlain == -1:
1867 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
1869 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1870 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
1872 # remove Arg insets and paragraph, if it only contains this inset
1873 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1874 del document.body[arg - 1 : endarg + 4]
1876 del document.body[arg : endarg + 1]
1878 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
1879 endarg = find_end_of_inset(document.body, arg)
1882 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1883 if argbeginPlain == -1:
1884 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
1886 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1887 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
1889 # remove Arg insets and paragraph, if it only contains this inset
1890 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1891 del document.body[arg - 1 : endarg + 4]
1893 del document.body[arg : endarg + 1]
1896 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
1899 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1900 endInset = find_end_of_inset(document.body, i)
1901 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
1902 precontent = put_cmd_in_ert(cmd)
1903 if len(optargcontent) > 0:
1904 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
1905 precontent += put_cmd_in_ert("{")
1907 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
1908 if cmd == "\\trigloss":
1909 postcontent += put_cmd_in_ert("}{") + marg3content
1910 postcontent += put_cmd_in_ert("}")
1912 document.body[endPlain:endInset + 1] = postcontent
1913 document.body[beginPlain + 1:beginPlain] = precontent
1914 del document.body[i : beginPlain + 1]
1916 document.append_local_layout("Requires covington")
1921 def revert_subexarg(document):
1922 " Revert linguistic subexamples with argument to ERT "
1924 if not "linguistics" in document.get_module_list():
1930 i = find_token(document.body, "\\begin_layout Subexample", i+1)
1933 j = find_end_of_layout(document.body, i)
1935 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1938 # check for consecutive layouts
1939 k = find_token(document.body, "\\begin_layout", j)
1940 if k == -1 or document.body[k] != "\\begin_layout Subexample":
1942 j = find_end_of_layout(document.body, k)
1944 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1947 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1951 endarg = find_end_of_inset(document.body, arg)
1953 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1954 if argbeginPlain == -1:
1955 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1957 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1958 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
1960 # remove Arg insets and paragraph, if it only contains this inset
1961 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1962 del document.body[arg - 1 : endarg + 4]
1964 del document.body[arg : endarg + 1]
1966 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
1968 # re-find end of layout
1969 j = find_end_of_layout(document.body, i)
1971 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1974 # check for consecutive layouts
1975 k = find_token(document.body, "\\begin_layout", j)
1976 if k == -1 or document.body[k] != "\\begin_layout Subexample":
1978 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
1979 j = find_end_of_layout(document.body, k)
1981 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1984 endev = put_cmd_in_ert("\\end{subexamples}")
1986 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
1987 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
1988 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
1990 document.append_local_layout("Requires covington")
1994 def revert_drs(document):
1995 " Revert DRS insets (linguistics) to ERT "
1997 if not "linguistics" in document.get_module_list():
2001 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2002 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2003 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2004 "\\begin_inset Flex SDRS"]
2008 i = find_token(document.body, drs, i+1)
2011 j = find_end_of_inset(document.body, i)
2013 document.warning("Malformed LyX document: Can't find end of DRS inset")
2016 # Check for arguments
2017 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2018 endarg = find_end_of_inset(document.body, arg)
2021 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2022 if argbeginPlain == -1:
2023 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2025 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2026 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2028 # remove Arg insets and paragraph, if it only contains this inset
2029 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2030 del document.body[arg - 1 : endarg + 4]
2032 del document.body[arg : endarg + 1]
2035 j = find_end_of_inset(document.body, i)
2037 document.warning("Malformed LyX document: Can't find end of DRS inset")
2040 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2041 endarg = find_end_of_inset(document.body, arg)
2044 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2045 if argbeginPlain == -1:
2046 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2048 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2049 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2051 # remove Arg insets and paragraph, if it only contains this inset
2052 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2053 del document.body[arg - 1 : endarg + 4]
2055 del document.body[arg : endarg + 1]
2058 j = find_end_of_inset(document.body, i)
2060 document.warning("Malformed LyX document: Can't find end of DRS inset")
2063 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2064 endarg = find_end_of_inset(document.body, arg)
2065 postarg1content = []
2067 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2068 if argbeginPlain == -1:
2069 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2071 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2072 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2074 # remove Arg insets and paragraph, if it only contains this inset
2075 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2076 del document.body[arg - 1 : endarg + 4]
2078 del document.body[arg : endarg + 1]
2081 j = find_end_of_inset(document.body, i)
2083 document.warning("Malformed LyX document: Can't find end of DRS inset")
2086 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2087 endarg = find_end_of_inset(document.body, arg)
2088 postarg2content = []
2090 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2091 if argbeginPlain == -1:
2092 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2094 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2095 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2097 # remove Arg insets and paragraph, if it only contains this inset
2098 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2099 del document.body[arg - 1 : endarg + 4]
2101 del document.body[arg : endarg + 1]
2104 j = find_end_of_inset(document.body, i)
2106 document.warning("Malformed LyX document: Can't find end of DRS inset")
2109 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2110 endarg = find_end_of_inset(document.body, arg)
2111 postarg3content = []
2113 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2114 if argbeginPlain == -1:
2115 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2117 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2118 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2120 # remove Arg insets and paragraph, if it only contains this inset
2121 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2122 del document.body[arg - 1 : endarg + 4]
2124 del document.body[arg : endarg + 1]
2127 j = find_end_of_inset(document.body, i)
2129 document.warning("Malformed LyX document: Can't find end of DRS inset")
2132 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2133 endarg = find_end_of_inset(document.body, arg)
2134 postarg4content = []
2136 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2137 if argbeginPlain == -1:
2138 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2140 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2141 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2143 # remove Arg insets and paragraph, if it only contains this inset
2144 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2145 del document.body[arg - 1 : endarg + 4]
2147 del document.body[arg : endarg + 1]
2149 # The respective LaTeX command
2151 if drs == "\\begin_inset Flex DRS*":
2153 elif drs == "\\begin_inset Flex IfThen-DRS":
2155 elif drs == "\\begin_inset Flex Cond-DRS":
2157 elif drs == "\\begin_inset Flex QDRS":
2159 elif drs == "\\begin_inset Flex NegDRS":
2161 elif drs == "\\begin_inset Flex SDRS":
2164 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2165 endInset = find_end_of_inset(document.body, i)
2166 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2167 precontent = put_cmd_in_ert(cmd)
2168 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2169 if drs == "\\begin_inset Flex SDRS":
2170 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2171 precontent += put_cmd_in_ert("{")
2174 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2175 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2176 if cmd == "\\condrs" or cmd == "\\qdrs":
2177 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2179 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2181 postcontent = put_cmd_in_ert("}")
2183 document.body[endPlain:endInset + 1] = postcontent
2184 document.body[beginPlain + 1:beginPlain] = precontent
2185 del document.body[i : beginPlain + 1]
2187 document.append_local_layout("Provides covington 1")
2188 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2194 def revert_babelfont(document):
2195 " Reverts the use of \\babelfont to user preamble "
2197 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2199 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2201 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2203 i = find_token(document.header, '\\language_package', 0)
2205 document.warning("Malformed LyX document: Missing \\language_package.")
2207 if get_value(document.header, "\\language_package", 0) != "babel":
2210 # check font settings
2212 roman = sans = typew = "default"
2214 sf_scale = tt_scale = 100.0
2216 j = find_token(document.header, "\\font_roman", 0)
2218 document.warning("Malformed LyX document: Missing \\font_roman.")
2220 # We need to use this regex since split() does not handle quote protection
2221 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2222 roman = romanfont[2].strip('"')
2223 romanfont[2] = '"default"'
2224 document.header[j] = " ".join(romanfont)
2226 j = find_token(document.header, "\\font_sans", 0)
2228 document.warning("Malformed LyX document: Missing \\font_sans.")
2230 # We need to use this regex since split() does not handle quote protection
2231 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2232 sans = sansfont[2].strip('"')
2233 sansfont[2] = '"default"'
2234 document.header[j] = " ".join(sansfont)
2236 j = find_token(document.header, "\\font_typewriter", 0)
2238 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2240 # We need to use this regex since split() does not handle quote protection
2241 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2242 typew = ttfont[2].strip('"')
2243 ttfont[2] = '"default"'
2244 document.header[j] = " ".join(ttfont)
2246 i = find_token(document.header, "\\font_osf", 0)
2248 document.warning("Malformed LyX document: Missing \\font_osf.")
2250 osf = str2bool(get_value(document.header, "\\font_osf", i))
2252 j = find_token(document.header, "\\font_sf_scale", 0)
2254 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2256 sfscale = document.header[j].split()
2259 document.header[j] = " ".join(sfscale)
2262 sf_scale = float(val)
2264 document.warning("Invalid font_sf_scale value: " + val)
2266 j = find_token(document.header, "\\font_tt_scale", 0)
2268 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2270 ttscale = document.header[j].split()
2273 document.header[j] = " ".join(ttscale)
2276 tt_scale = float(val)
2278 document.warning("Invalid font_tt_scale value: " + val)
2280 # set preamble stuff
2281 pretext = ['%% This document must be processed with xelatex or lualatex!']
2282 pretext.append('\\AtBeginDocument{%')
2283 if roman != "default":
2284 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2285 if sans != "default":
2286 sf = '\\babelfont{sf}['
2287 if sf_scale != 100.0:
2288 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2289 sf += 'Mapping=tex-text]{' + sans + '}'
2291 if typew != "default":
2292 tw = '\\babelfont{tt}'
2293 if tt_scale != 100.0:
2294 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2295 tw += '{' + typew + '}'
2298 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2300 insert_to_preamble(document, pretext)
2303 def revert_minionpro(document):
2304 " Revert native MinionPro font definition (with extra options) to LaTeX "
2306 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2308 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2310 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2313 regexp = re.compile(r'(\\font_roman_opts)')
2314 x = find_re(document.header, regexp, 0)
2318 # We need to use this regex since split() does not handle quote protection
2319 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2320 opts = romanopts[1].strip('"')
2322 i = find_token(document.header, "\\font_roman", 0)
2324 document.warning("Malformed LyX document: Missing \\font_roman.")
2327 # We need to use this regex since split() does not handle quote protection
2328 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2329 roman = romanfont[1].strip('"')
2330 if roman != "minionpro":
2332 romanfont[1] = '"default"'
2333 document.header[i] = " ".join(romanfont)
2335 j = find_token(document.header, "\\font_osf true", 0)
2338 preamble = "\\usepackage["
2340 document.header[j] = "\\font_osf false"
2344 preamble += "]{MinionPro}"
2345 add_to_preamble(document, [preamble])
2346 del document.header[x]
2349 def revert_font_opts(document):
2350 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2352 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2354 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2356 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2357 i = find_token(document.header, '\\language_package', 0)
2359 document.warning("Malformed LyX document: Missing \\language_package.")
2361 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2364 regexp = re.compile(r'(\\font_roman_opts)')
2365 i = find_re(document.header, regexp, 0)
2367 # We need to use this regex since split() does not handle quote protection
2368 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2369 opts = romanopts[1].strip('"')
2370 del document.header[i]
2372 regexp = re.compile(r'(\\font_roman)')
2373 i = find_re(document.header, regexp, 0)
2375 # We need to use this regex since split() does not handle quote protection
2376 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2377 font = romanfont[2].strip('"')
2378 romanfont[2] = '"default"'
2379 document.header[i] = " ".join(romanfont)
2380 if font != "default":
2382 preamble = "\\babelfont{rm}["
2384 preamble = "\\setmainfont["
2387 preamble += "Mapping=tex-text]{"
2390 add_to_preamble(document, [preamble])
2393 regexp = re.compile(r'(\\font_sans_opts)')
2394 i = find_re(document.header, regexp, 0)
2397 # We need to use this regex since split() does not handle quote protection
2398 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2399 opts = sfopts[1].strip('"')
2400 del document.header[i]
2402 regexp = re.compile(r'(\\font_sf_scale)')
2403 i = find_re(document.header, regexp, 0)
2405 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2406 regexp = re.compile(r'(\\font_sans)')
2407 i = find_re(document.header, regexp, 0)
2409 # We need to use this regex since split() does not handle quote protection
2410 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2411 font = sffont[2].strip('"')
2412 sffont[2] = '"default"'
2413 document.header[i] = " ".join(sffont)
2414 if font != "default":
2416 preamble = "\\babelfont{sf}["
2418 preamble = "\\setsansfont["
2422 preamble += "Scale=0."
2423 preamble += scaleval
2425 preamble += "Mapping=tex-text]{"
2428 add_to_preamble(document, [preamble])
2431 regexp = re.compile(r'(\\font_typewriter_opts)')
2432 i = find_re(document.header, regexp, 0)
2435 # We need to use this regex since split() does not handle quote protection
2436 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2437 opts = ttopts[1].strip('"')
2438 del document.header[i]
2440 regexp = re.compile(r'(\\font_tt_scale)')
2441 i = find_re(document.header, regexp, 0)
2443 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2444 regexp = re.compile(r'(\\font_typewriter)')
2445 i = find_re(document.header, regexp, 0)
2447 # We need to use this regex since split() does not handle quote protection
2448 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2449 font = ttfont[2].strip('"')
2450 ttfont[2] = '"default"'
2451 document.header[i] = " ".join(ttfont)
2452 if font != "default":
2454 preamble = "\\babelfont{tt}["
2456 preamble = "\\setmonofont["
2460 preamble += "Scale=0."
2461 preamble += scaleval
2463 preamble += "Mapping=tex-text]{"
2466 add_to_preamble(document, [preamble])
2469 def revert_plainNotoFonts_xopts(document):
2470 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2472 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2474 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2476 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2479 regexp = re.compile(r'(\\font_roman_opts)')
2480 x = find_re(document.header, regexp, 0)
2484 # We need to use this regex since split() does not handle quote protection
2485 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2486 opts = romanopts[1].strip('"')
2488 i = find_token(document.header, "\\font_roman", 0)
2492 # We need to use this regex since split() does not handle quote protection
2493 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2494 roman = romanfont[1].strip('"')
2495 if roman != "NotoSerif-TLF":
2498 j = find_token(document.header, "\\font_sans", 0)
2502 # We need to use this regex since split() does not handle quote protection
2503 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2504 sf = sffont[1].strip('"')
2508 j = find_token(document.header, "\\font_typewriter", 0)
2512 # We need to use this regex since split() does not handle quote protection
2513 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2514 tt = ttfont[1].strip('"')
2518 # So we have noto as "complete font"
2519 romanfont[1] = '"default"'
2520 document.header[i] = " ".join(romanfont)
2522 preamble = "\\usepackage["
2524 preamble += "]{noto}"
2525 add_to_preamble(document, [preamble])
2526 del document.header[x]
2529 def revert_notoFonts_xopts(document):
2530 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2532 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2534 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2536 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2540 fm = createFontMapping(['Noto'])
2541 revert_fonts(document, fm, fontmap, True)
2542 add_preamble_fonts(document, fontmap)
2545 def revert_IBMFonts_xopts(document):
2546 " Revert native IBM font definition (with extra options) to LaTeX "
2549 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2551 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2553 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2557 fm = createFontMapping(['IBM'])
2559 revert_fonts(document, fm, fontmap, True)
2560 add_preamble_fonts(document, fontmap)
2563 def revert_AdobeFonts_xopts(document):
2564 " Revert native Adobe font definition (with extra options) to LaTeX "
2566 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2568 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2570 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2574 fm = createFontMapping(['Adobe'])
2576 revert_fonts(document, fm, fontmap, True)
2577 add_preamble_fonts(document, fontmap)
2584 supported_versions = ["2.4.0", "2.4"]
2586 [545, [convert_lst_literalparam]],
2591 [550, [convert_fontenc]],
2598 [557, [convert_vcsinfo]],
2599 [558, [removeFrontMatterStyles]],
2602 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
2606 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
2607 [566, [convert_hebrew_parentheses]],
2613 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
2614 [573, [convert_inputencoding_namechange]],
2615 [574, [convert_ruby_module, convert_utf8_japanese]],
2616 [575, [convert_lineno]],
2618 [577, [convert_linggloss]],
2624 revert = [[579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
2625 [578, [revert_babelfont]],
2626 [577, [revert_drs]],
2627 [576, [revert_linggloss, revert_subexarg]],
2628 [575, [revert_new_languages]],
2629 [574, [revert_lineno]],
2630 [573, [revert_ruby_module, revert_utf8_japanese]],
2631 [572, [revert_inputencoding_namechange]],
2632 [571, [revert_notoFonts]],
2633 [570, [revert_cmidruletrimming]],
2634 [569, [revert_bibfileencodings]],
2635 [568, [revert_tablestyle]],
2636 [567, [revert_soul]],
2637 [566, [revert_malayalam]],
2638 [565, [revert_hebrew_parentheses]],
2639 [564, [revert_AdobeFonts]],
2640 [563, [revert_lformatinfo]],
2641 [562, [revert_listpargs]],
2642 [561, [revert_l7ninfo]],
2643 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
2644 [559, [revert_timeinfo, revert_namenoextinfo]],
2645 [558, [revert_dateinfo]],
2646 [557, [addFrontMatterStyles]],
2647 [556, [revert_vcsinfo]],
2648 [555, [revert_bibencoding]],
2649 [554, [revert_vcolumns]],
2650 [553, [revert_stretchcolumn]],
2651 [552, [revert_tuftecite]],
2652 [551, [revert_floatpclass, revert_floatalignment]],
2653 [550, [revert_nospellcheck]],
2654 [549, [revert_fontenc]],
2655 [548, []],# dummy format change
2656 [547, [revert_lscape]],
2657 [546, [revert_xcharter]],
2658 [545, [revert_paratype]],
2659 [544, [revert_lst_literalparam]]
2663 if __name__ == "__main__":