1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
77 self.pkgkey = createkey(self.package, self.options)
81 self.font2pkgmap = dict()
82 self.pkg2fontmap = dict()
83 self.pkginmap = dict() # defines, if a map for package exists
85 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None):
86 " Expand fontinfo mapping"
88 # fontlist: list of fontnames, each element
89 # may contain a ','-separated list of needed options
90 # like e.g. 'IBMPlexSansCondensed,condensed'
91 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
92 # scale_type: one of None, 'sf', 'tt'
93 # pkg: package defining the font. Defaults to fontname if None
94 # scaleopt: one of None, 'scale', 'scaled', or some other string
95 # to be used in scale option (e.g. scaled=0.7)
98 fe.fonttype = font_type
99 fe.scaletype = scale_type
102 fe.fontname = font_name
104 fe.scaleopt = scaleopt
106 fe.package = font_name
110 self.font2pkgmap[font_name] = fe
111 if fe.pkgkey in self.pkg2fontmap:
112 # Repeated the same entry? Check content
113 if self.pkg2fontmap[fe.pkgkey] != font_name:
114 document.error("Something is wrong in pkgname+options <-> fontname mapping")
115 self.pkg2fontmap[fe.pkgkey] = font_name
116 self.pkginmap[fe.package] = 1
118 def getfontname(self, pkg, options):
120 pkgkey = createkey(pkg, options)
121 if not pkgkey in self.pkg2fontmap:
123 fontname = self.pkg2fontmap[pkgkey]
124 if not fontname in self.font2pkgmap:
125 document.error("Something is wrong in pkgname+options <-> fontname mapping")
127 if pkgkey == self.font2pkgmap[fontname].pkgkey:
131 def createFontMapping(fontlist):
132 # Create info for known fonts for the use in
133 # convert_latexFonts() and
134 # revert_latexFonts()
136 # * Would be more handy to parse latexFonts file,
137 # but the path to this file is unknown
138 # * For now, add DejaVu and IBMPlex only.
139 # * Expand, if desired
141 for font in fontlist:
143 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
144 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
145 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
147 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
148 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
149 'IBMPlexSerifSemibold,semibold'],
150 "roman", None, "plex-serif")
151 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
152 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
153 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
154 "sans", "sf", "plex-sans", "scale")
155 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
156 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
157 'IBMPlexMonoSemibold,semibold'],
158 "typewriter", "tt", "plex-mono", "scale")
159 elif font == 'Adobe':
160 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro")
161 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled")
162 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled")
164 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
165 'NotoSerifThin,thin', 'NotoSerifLight,light',
166 'NotoSerifExtralight,extralight'],
167 "roman", None, "noto-serif")
168 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
169 'NotoSansThin,thin', 'NotoSansLight,light',
170 'NotoSansExtralight,extralight'],
171 "sans", "sf", "noto-sans", "scaled")
172 fm.expandFontMapping(['NotoMonoRegular'], "typewriter", "tt", "noto-mono", "scaled")
175 def convert_fonts(document, fm):
176 " Handle font definition (LaTeX preamble -> native) "
178 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
179 rscaleopt = re.compile(r'^scaled?=(.*)')
182 while i < len(document.preamble):
183 i = find_re(document.preamble, rpkg, i+1)
186 mo = rpkg.search(document.preamble[i])
187 if mo == None or mo.group(2) == None:
190 options = mo.group(2).replace(' ', '').split(",")
194 while o < len(options):
195 mo = rscaleopt.search(options[o])
203 if not pkg in fm.pkginmap:
206 fn = fm.getfontname(pkg, options)
209 del document.preamble[i]
210 fontinfo = fm.font2pkgmap[fn]
211 if fontinfo.scaletype == None:
214 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
215 fontinfo.scaleval = oscale
217 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
218 del document.preamble[i-1]
220 if fontscale != None:
221 j = find_token(document.header, fontscale, 0)
223 val = get_value(document.header, fontscale, j)
227 scale = "%03d" % int(float(oscale) * 100)
228 document.header[j] = fontscale + " " + scale + " " + vals[1]
229 ft = "\\font_" + fontinfo.fonttype
230 j = find_token(document.header, ft, 0)
232 val = get_value(document.header, ft, j)
233 words = val.split() # ! splits also values like '"DejaVu Sans"'
234 words[0] = '"' + fn + '"'
235 document.header[j] = ft + ' ' + ' '.join(words)
237 def revert_fonts(document, fm, fontmap):
238 " Revert native font definition to LaTeX "
239 # fonlist := list of fonts created from the same package
240 # Empty package means that the font-name is the same as the package-name
241 # fontmap (key = package, val += found options) will be filled
242 # and used later in add_preamble_fonts() to be added to user-preamble
244 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
245 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
247 while i < len(document.header):
248 i = find_re(document.header, rfontscale, i+1)
251 mo = rfontscale.search(document.header[i])
254 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
255 val = get_value(document.header, ft, i)
256 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
257 font = words[0].strip('"') # TeX font name has no whitespace
258 if not font in fm.font2pkgmap:
260 fontinfo = fm.font2pkgmap[font]
261 val = fontinfo.package
262 if not val in fontmap:
264 words[0] = '"default"'
265 document.header[i] = ft + ' ' + ' '.join(words)
266 if fontinfo.scaleopt != None:
267 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
268 mo = rscales.search(xval)
273 # set correct scale option
274 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
275 if len(fontinfo.options) > 0:
276 fontmap[val].extend(fontinfo.options)
278 ###############################################################################
280 ### Conversion and reversion routines
282 ###############################################################################
284 def convert_inputencoding_namechange(document):
285 " Rename inputencoding settings. "
286 i = find_token(document.header, "\\inputencoding", 0)
289 s = document.header[i].replace("auto", "auto-legacy")
290 document.header[i] = s.replace("default", "auto-legacy-plain")
292 def revert_inputencoding_namechange(document):
293 " Rename inputencoding settings. "
294 i = find_token(document.header, "\\inputencoding", 0)
297 s = document.header[i].replace("auto-legacy-plain", "default")
298 document.header[i] = s.replace("auto-legacy", "auto")
300 def convert_notoFonts(document):
301 " Handle Noto fonts definition to LaTeX "
303 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
304 fm = createFontMapping(['Noto'])
305 convert_fonts(document, fm)
307 def revert_notoFonts(document):
308 " Revert native Noto font definition to LaTeX "
310 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
312 fm = createFontMapping(['Noto'])
313 revert_fonts(document, fm, fontmap)
314 add_preamble_fonts(document, fontmap)
316 def convert_latexFonts(document):
317 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
319 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
320 fm = createFontMapping(['DejaVu', 'IBM'])
321 convert_fonts(document, fm)
323 def revert_latexFonts(document):
324 " Revert native DejaVu font definition to LaTeX "
326 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
328 fm = createFontMapping(['DejaVu', 'IBM'])
329 revert_fonts(document, fm, fontmap)
330 add_preamble_fonts(document, fontmap)
332 def convert_AdobeFonts(document):
333 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
335 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
336 fm = createFontMapping(['Adobe'])
337 convert_fonts(document, fm)
339 def revert_AdobeFonts(document):
340 " Revert native DejaVu font definition to LaTeX "
342 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
344 fm = createFontMapping(['Adobe'])
345 revert_fonts(document, fm, fontmap)
346 add_preamble_fonts(document, fontmap)
348 def removeFrontMatterStyles(document):
349 " Remove styles Begin/EndFrontmatter"
351 layouts = ['BeginFrontmatter', 'EndFrontmatter']
352 tokenend = len('\\begin_layout ')
355 i = find_token_exact(document.body, '\\begin_layout ', i+1)
358 layout = document.body[i][tokenend:].strip()
359 if layout not in layouts:
361 j = find_end_of_layout(document.body, i)
363 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
365 while document.body[j+1].strip() == '':
367 document.body[i:j+1] = []
369 def addFrontMatterStyles(document):
370 " Use styles Begin/EndFrontmatter for elsarticle"
372 if document.textclass != "elsarticle":
375 def insertFrontmatter(prefix, line):
377 while above > 0 and document.body[above-1].strip() == '':
380 while document.body[below].strip() == '':
382 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
383 '\\begin_inset Note Note',
385 '\\begin_layout Plain Layout',
388 '\\end_inset', '', '',
391 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
392 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
393 tokenend = len('\\begin_layout ')
397 i = find_token_exact(document.body, '\\begin_layout ', i+1)
400 layout = document.body[i][tokenend:].strip()
401 if layout not in layouts:
403 k = find_end_of_layout(document.body, i)
405 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
412 insertFrontmatter('End', k+1)
413 insertFrontmatter('Begin', first)
416 def convert_lst_literalparam(document):
417 " Add param literal to include inset "
421 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
424 j = find_end_of_inset(document.body, i)
426 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
428 while i < j and document.body[i].strip() != '':
430 document.body.insert(i, 'literal "true"')
433 def revert_lst_literalparam(document):
434 " Remove param literal from include inset "
438 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
441 j = find_end_of_inset(document.body, i)
443 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
445 del_token(document.body, 'literal', i, j)
448 def revert_paratype(document):
449 " Revert ParaType font definitions to LaTeX "
451 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
453 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
454 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
455 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
456 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
457 sfval = get_value(document.header, "\\font_sf_scale", 0)
462 sfoption = "scaled=" + format(float(sfval) / 100, '.2f')
463 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
464 ttval = get_value(document.header, "\\font_tt_scale", 0)
469 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
470 if i1 != -1 and i2 != -1 and i3!= -1:
471 add_to_preamble(document, ["\\usepackage{paratype}"])
474 add_to_preamble(document, ["\\usepackage{PTSerif}"])
475 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
478 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
480 add_to_preamble(document, ["\\usepackage{PTSans}"])
481 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
484 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
486 add_to_preamble(document, ["\\usepackage{PTMono}"])
487 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
490 def revert_xcharter(document):
491 " Revert XCharter font definitions to LaTeX "
493 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
497 # replace unsupported font setting
498 document.header[i] = document.header[i].replace("xcharter", "default")
499 # no need for preamble code with system fonts
500 if get_bool_value(document.header, "\\use_non_tex_fonts"):
503 # transfer old style figures setting to package options
504 j = find_token(document.header, "\\font_osf true")
507 document.header[j] = "\\font_osf false"
511 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
514 def revert_lscape(document):
515 " Reverts the landscape environment (Landscape module) to TeX-code "
517 if not "landscape" in document.get_module_list():
522 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
525 j = find_end_of_inset(document.body, i)
527 document.warning("Malformed LyX document: Can't find end of Landscape inset")
530 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
531 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
532 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
533 add_to_preamble(document, ["\\usepackage{afterpage}"])
535 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
536 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
538 add_to_preamble(document, ["\\usepackage{pdflscape}"])
541 def convert_fontenc(document):
542 " Convert default fontenc setting "
544 i = find_token(document.header, "\\fontencoding global", 0)
548 document.header[i] = document.header[i].replace("global", "auto")
551 def revert_fontenc(document):
552 " Revert default fontenc setting "
554 i = find_token(document.header, "\\fontencoding auto", 0)
558 document.header[i] = document.header[i].replace("auto", "global")
561 def revert_nospellcheck(document):
562 " Remove nospellcheck font info param "
566 i = find_token(document.body, '\\nospellcheck', i)
572 def revert_floatpclass(document):
573 " Remove float placement params 'document' and 'class' "
575 del_token(document.header, "\\float_placement class")
579 i = find_token(document.body, '\\begin_inset Float', i+1)
582 j = find_end_of_inset(document.body, i)
583 k = find_token(document.body, 'placement class', i, i + 2)
585 k = find_token(document.body, 'placement document', i, i + 2)
592 def revert_floatalignment(document):
593 " Remove float alignment params "
595 galignment = get_value(document.header, "\\float_alignment", delete=True)
599 i = find_token(document.body, '\\begin_inset Float', i+1)
602 j = find_end_of_inset(document.body, i)
604 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
606 k = find_token(document.body, 'alignment', i, i+4)
610 alignment = get_value(document.body, "alignment", k)
611 if alignment == "document":
612 alignment = galignment
614 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
616 document.warning("Can't find float layout!")
619 if alignment == "left":
620 alcmd = put_cmd_in_ert("\\raggedright{}")
621 elif alignment == "center":
622 alcmd = put_cmd_in_ert("\\centering{}")
623 elif alignment == "right":
624 alcmd = put_cmd_in_ert("\\raggedleft{}")
626 document.body[l+1:l+1] = alcmd
629 def revert_tuftecite(document):
630 " Revert \cite commands in tufte classes "
632 tufte = ["tufte-book", "tufte-handout"]
633 if document.textclass not in tufte:
638 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
641 j = find_end_of_inset(document.body, i)
643 document.warning("Can't find end of citation inset at line %d!!" %(i))
645 k = find_token(document.body, "LatexCommand", i, j)
647 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
650 cmd = get_value(document.body, "LatexCommand", k)
654 pre = get_quoted_value(document.body, "before", i, j)
655 post = get_quoted_value(document.body, "after", i, j)
656 key = get_quoted_value(document.body, "key", i, j)
658 document.warning("Citation inset at line %d does not have a key!" %(i))
660 # Replace command with ERT
663 res += "[" + pre + "]"
665 res += "[" + post + "]"
668 res += "{" + key + "}"
669 document.body[i:j+1] = put_cmd_in_ert([res])
673 def revert_stretchcolumn(document):
674 " We remove the column varwidth flags or everything else will become a mess. "
677 i = find_token(document.body, "\\begin_inset Tabular", i+1)
680 j = find_end_of_inset(document.body, i+1)
682 document.warning("Malformed LyX document: Could not find end of tabular.")
684 for k in range(i, j):
685 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
686 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
687 document.body[k] = document.body[k].replace(' varwidth="true"', '')
690 def revert_vcolumns(document):
691 " Revert standard columns with line breaks etc. "
697 i = find_token(document.body, "\\begin_inset Tabular", i+1)
700 j = find_end_of_inset(document.body, i)
702 document.warning("Malformed LyX document: Could not find end of tabular.")
705 # Collect necessary column information
707 nrows = int(document.body[i+1].split('"')[3])
708 ncols = int(document.body[i+1].split('"')[5])
710 for k in range(ncols):
711 m = find_token(document.body, "<column", m)
712 width = get_option_value(document.body[m], 'width')
713 varwidth = get_option_value(document.body[m], 'varwidth')
714 alignment = get_option_value(document.body[m], 'alignment')
715 special = get_option_value(document.body[m], 'special')
716 col_info.append([width, varwidth, alignment, special, m])
721 for row in range(nrows):
722 for col in range(ncols):
723 m = find_token(document.body, "<cell", m)
724 multicolumn = get_option_value(document.body[m], 'multicolumn')
725 multirow = get_option_value(document.body[m], 'multirow')
726 width = get_option_value(document.body[m], 'width')
727 rotate = get_option_value(document.body[m], 'rotate')
728 # Check for: linebreaks, multipars, non-standard environments
730 endcell = find_token(document.body, "</cell>", begcell)
732 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
734 elif count_pars_in_inset(document.body, begcell + 2) > 1:
736 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
738 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
739 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
741 alignment = col_info[col][2]
742 col_line = col_info[col][4]
744 if alignment == "center":
745 vval = ">{\\centering}"
746 elif alignment == "left":
747 vval = ">{\\raggedright}"
748 elif alignment == "right":
749 vval = ">{\\raggedleft}"
752 vval += "V{\\linewidth}"
754 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
755 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
756 # with newlines, and we do not want that)
758 endcell = find_token(document.body, "</cell>", begcell)
760 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
762 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
766 nle = find_end_of_inset(document.body, nl)
767 del(document.body[nle:nle+1])
769 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
771 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
777 if needarray == True:
778 add_to_preamble(document, ["\\usepackage{array}"])
779 if needvarwidth == True:
780 add_to_preamble(document, ["\\usepackage{varwidth}"])
783 def revert_bibencoding(document):
784 " Revert bibliography encoding "
788 i = find_token(document.header, "\\cite_engine", 0)
790 document.warning("Malformed document! Missing \\cite_engine")
792 engine = get_value(document.header, "\\cite_engine", i)
796 if engine in ["biblatex", "biblatex-natbib"]:
799 # Map lyx to latex encoding names
803 "armscii8" : "armscii8",
804 "iso8859-1" : "latin1",
805 "iso8859-2" : "latin2",
806 "iso8859-3" : "latin3",
807 "iso8859-4" : "latin4",
808 "iso8859-5" : "iso88595",
809 "iso8859-6" : "8859-6",
810 "iso8859-7" : "iso-8859-7",
811 "iso8859-8" : "8859-8",
812 "iso8859-9" : "latin5",
813 "iso8859-13" : "latin7",
814 "iso8859-15" : "latin9",
815 "iso8859-16" : "latin10",
816 "applemac" : "applemac",
818 "cp437de" : "cp437de",
835 "utf8-platex" : "utf8",
842 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
845 j = find_end_of_inset(document.body, i)
847 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
849 encoding = get_quoted_value(document.body, "encoding", i, j)
852 # remove encoding line
853 k = find_token(document.body, "encoding", i, j)
856 if encoding == "default":
858 # Re-find inset end line
859 j = find_end_of_inset(document.body, i)
862 h = find_token(document.header, "\\biblio_options", 0)
864 biblio_options = get_value(document.header, "\\biblio_options", h)
865 if not "bibencoding" in biblio_options:
866 document.header[h] += ",bibencoding=%s" % encodings[encoding]
868 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
870 # this should not happen
871 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
873 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
875 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
876 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
882 def convert_vcsinfo(document):
883 " Separate vcs Info inset from buffer Info inset. "
886 "vcs-revision" : "revision",
887 "vcs-tree-revision" : "tree-revision",
888 "vcs-author" : "author",
894 i = find_token(document.body, "\\begin_inset Info", i+1)
897 j = find_end_of_inset(document.body, i+1)
899 document.warning("Malformed LyX document: Could not find end of Info inset.")
901 tp = find_token(document.body, 'type', i, j)
902 tpv = get_quoted_value(document.body, "type", tp)
905 arg = find_token(document.body, 'arg', i, j)
906 argv = get_quoted_value(document.body, "arg", arg)
907 if argv not in list(types.keys()):
909 document.body[tp] = "type \"vcs\""
910 document.body[arg] = "arg \"" + types[argv] + "\""
913 def revert_vcsinfo(document):
914 " Merge vcs Info inset to buffer Info inset. "
916 args = ["revision", "tree-revision", "author", "time", "date" ]
919 i = find_token(document.body, "\\begin_inset Info", i+1)
922 j = find_end_of_inset(document.body, i+1)
924 document.warning("Malformed LyX document: Could not find end of Info inset.")
926 tp = find_token(document.body, 'type', i, j)
927 tpv = get_quoted_value(document.body, "type", tp)
930 arg = find_token(document.body, 'arg', i, j)
931 argv = get_quoted_value(document.body, "arg", arg)
933 document.warning("Malformed Info inset. Invalid vcs arg.")
935 document.body[tp] = "type \"buffer\""
936 document.body[arg] = "arg \"vcs-" + argv + "\""
939 def revert_dateinfo(document):
940 " Revert date info insets to static text. "
942 # FIXME This currently only considers the main language and uses the system locale
943 # Ideally, it should honor context languages and switch the locale accordingly.
945 # The date formats for each language using strftime syntax:
946 # long, short, loclong, locmedium, locshort
948 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
949 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
950 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
951 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
952 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
953 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
954 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
955 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
956 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
957 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
958 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
959 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
960 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
961 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
962 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
963 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
964 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
965 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
966 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
967 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
968 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
969 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
970 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
971 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
972 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
973 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
974 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
975 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
976 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
977 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
978 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
979 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
980 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
981 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
982 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
983 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
984 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
985 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
986 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
987 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
988 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
989 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
990 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
991 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
992 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
993 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
994 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
995 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
996 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
997 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
998 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
999 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1000 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1001 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1002 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1003 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1004 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1005 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1006 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1007 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1008 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1009 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1010 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1011 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1012 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1013 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1014 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1015 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1016 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1017 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1018 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1019 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1020 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1021 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1022 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1023 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1024 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1025 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1026 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1027 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1028 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1029 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1030 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1031 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1032 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1033 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1034 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1035 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1036 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1037 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1038 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1039 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1040 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1041 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1042 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1043 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1044 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1045 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1046 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1047 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1048 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1049 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1052 types = ["date", "fixdate", "moddate" ]
1053 lang = get_value(document.header, "\\language")
1055 document.warning("Malformed LyX document! No \\language header found!")
1060 i = find_token(document.body, "\\begin_inset Info", i+1)
1063 j = find_end_of_inset(document.body, i+1)
1065 document.warning("Malformed LyX document: Could not find end of Info inset.")
1067 tp = find_token(document.body, 'type', i, j)
1068 tpv = get_quoted_value(document.body, "type", tp)
1069 if tpv not in types:
1071 arg = find_token(document.body, 'arg', i, j)
1072 argv = get_quoted_value(document.body, "arg", arg)
1075 if tpv == "fixdate":
1076 datecomps = argv.split('@')
1077 if len(datecomps) > 1:
1079 isodate = datecomps[1]
1080 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1082 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1083 # FIXME if we had the path to the original document (not the one in the tmp dir),
1084 # we could use the mtime.
1085 # elif tpv == "moddate":
1086 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1089 result = dte.isodate()
1090 elif argv == "long":
1091 result = dte.strftime(dateformats[lang][0])
1092 elif argv == "short":
1093 result = dte.strftime(dateformats[lang][1])
1094 elif argv == "loclong":
1095 result = dte.strftime(dateformats[lang][2])
1096 elif argv == "locmedium":
1097 result = dte.strftime(dateformats[lang][3])
1098 elif argv == "locshort":
1099 result = dte.strftime(dateformats[lang][4])
1101 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1102 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1103 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1104 fmt = re.sub('[^\'%]d', '%d', fmt)
1105 fmt = fmt.replace("'", "")
1106 result = dte.strftime(fmt)
1107 if sys.version_info < (3,0):
1108 # In Python 2, datetime module works with binary strings,
1109 # our dateformat strings are utf8-encoded:
1110 result = result.decode('utf-8')
1111 document.body[i : j+1] = [result]
1114 def revert_timeinfo(document):
1115 " Revert time info insets to static text. "
1117 # FIXME This currently only considers the main language and uses the system locale
1118 # Ideally, it should honor context languages and switch the locale accordingly.
1119 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1122 # The time formats for each language using strftime syntax:
1125 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1126 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1127 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1128 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1129 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1130 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1131 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1132 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1133 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1134 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1135 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1136 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1137 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1138 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1139 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1140 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1141 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1142 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1143 "british" : ["%H:%M:%S %Z", "%H:%M"],
1144 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1145 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1146 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1147 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1148 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1149 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1150 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1151 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1152 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1153 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1154 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1155 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1156 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1157 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1158 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1159 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1160 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1161 "french" : ["%H:%M:%S %Z", "%H:%M"],
1162 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1163 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1164 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1165 "german" : ["%H:%M:%S %Z", "%H:%M"],
1166 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1167 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1168 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1169 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1170 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1171 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1172 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1173 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1174 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1175 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1176 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1177 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1178 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1179 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1180 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1181 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1182 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1183 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1184 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1185 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1186 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1187 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1188 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1189 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1190 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1191 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1192 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1193 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1194 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1195 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1196 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1197 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1198 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1199 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1200 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1201 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1202 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1203 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1204 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1205 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1206 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1207 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1208 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1209 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1210 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1211 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1212 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1213 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1214 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1215 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1216 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1217 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1218 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1219 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1220 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1221 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1222 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1223 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1224 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1225 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1226 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1229 types = ["time", "fixtime", "modtime" ]
1231 i = find_token(document.header, "\\language", 0)
1233 # this should not happen
1234 document.warning("Malformed LyX document! No \\language header found!")
1236 lang = get_value(document.header, "\\language", i)
1240 i = find_token(document.body, "\\begin_inset Info", i+1)
1243 j = find_end_of_inset(document.body, i+1)
1245 document.warning("Malformed LyX document: Could not find end of Info inset.")
1247 tp = find_token(document.body, 'type', i, j)
1248 tpv = get_quoted_value(document.body, "type", tp)
1249 if tpv not in types:
1251 arg = find_token(document.body, 'arg', i, j)
1252 argv = get_quoted_value(document.body, "arg", arg)
1254 dtme = datetime.now()
1256 if tpv == "fixtime":
1257 timecomps = argv.split('@')
1258 if len(timecomps) > 1:
1260 isotime = timecomps[1]
1261 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1263 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1265 m = re.search('(\d\d):(\d\d)', isotime)
1267 tme = time(int(m.group(1)), int(m.group(2)))
1268 # FIXME if we had the path to the original document (not the one in the tmp dir),
1269 # we could use the mtime.
1270 # elif tpv == "moddate":
1271 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1274 result = tme.isoformat()
1275 elif argv == "long":
1276 result = tme.strftime(timeformats[lang][0])
1277 elif argv == "short":
1278 result = tme.strftime(timeformats[lang][1])
1280 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1281 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1282 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1283 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1284 fmt = fmt.replace("'", "")
1285 result = dte.strftime(fmt)
1286 document.body[i : j+1] = result
1289 def revert_namenoextinfo(document):
1290 " Merge buffer Info inset type name-noext to name. "
1294 i = find_token(document.body, "\\begin_inset Info", i+1)
1297 j = find_end_of_inset(document.body, i+1)
1299 document.warning("Malformed LyX document: Could not find end of Info inset.")
1301 tp = find_token(document.body, 'type', i, j)
1302 tpv = get_quoted_value(document.body, "type", tp)
1305 arg = find_token(document.body, 'arg', i, j)
1306 argv = get_quoted_value(document.body, "arg", arg)
1307 if argv != "name-noext":
1309 document.body[arg] = "arg \"name\""
1312 def revert_l7ninfo(document):
1313 " Revert l7n Info inset to text. "
1317 i = find_token(document.body, "\\begin_inset Info", i+1)
1320 j = find_end_of_inset(document.body, i+1)
1322 document.warning("Malformed LyX document: Could not find end of Info inset.")
1324 tp = find_token(document.body, 'type', i, j)
1325 tpv = get_quoted_value(document.body, "type", tp)
1328 arg = find_token(document.body, 'arg', i, j)
1329 argv = get_quoted_value(document.body, "arg", arg)
1330 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1331 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1332 document.body[i : j+1] = argv
1335 def revert_listpargs(document):
1336 " Reverts listpreamble arguments to TeX-code "
1339 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1342 j = find_end_of_inset(document.body, i)
1343 # Find containing paragraph layout
1344 parent = get_containing_layout(document.body, i)
1346 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1349 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1350 endPlain = find_end_of_layout(document.body, beginPlain)
1351 content = document.body[beginPlain + 1 : endPlain]
1352 del document.body[i:j+1]
1353 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1354 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1355 document.body[parbeg : parbeg] = subst
1358 def revert_lformatinfo(document):
1359 " Revert layout format Info inset to text. "
1363 i = find_token(document.body, "\\begin_inset Info", i+1)
1366 j = find_end_of_inset(document.body, i+1)
1368 document.warning("Malformed LyX document: Could not find end of Info inset.")
1370 tp = find_token(document.body, 'type', i, j)
1371 tpv = get_quoted_value(document.body, "type", tp)
1372 if tpv != "lyxinfo":
1374 arg = find_token(document.body, 'arg', i, j)
1375 argv = get_quoted_value(document.body, "arg", arg)
1376 if argv != "layoutformat":
1379 document.body[i : j+1] = "69"
1382 def convert_hebrew_parentheses(document):
1383 """ Swap opening/closing parentheses in Hebrew text.
1385 Up to LyX 2.4, "(" was used as closing parenthesis and
1386 ")" as opening parenthesis for Hebrew in the LyX source.
1388 # print("convert hebrew parentheses")
1389 current_languages = [document.language]
1390 for i, line in enumerate(document.body):
1391 if line.startswith('\\lang '):
1392 current_languages[-1] = line.lstrip('\\lang ')
1393 elif line.startswith('\\begin_layout'):
1394 current_languages.append(current_languages[-1])
1395 # print (line, current_languages[-1])
1396 elif line.startswith('\\end_layout'):
1397 current_languages.pop()
1398 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1399 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1402 def revert_hebrew_parentheses(document):
1403 " Store parentheses in Hebrew text reversed"
1404 # This only exists to keep the convert/revert naming convention
1405 convert_hebrew_parentheses(document)
1408 def revert_malayalam(document):
1409 " Set the document language to English but assure Malayalam output "
1411 revert_language(document, "malayalam", "", "malayalam")
1414 def revert_soul(document):
1415 " Revert soul module flex insets to ERT "
1417 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1420 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1422 add_to_preamble(document, ["\\usepackage{soul}"])
1424 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1426 add_to_preamble(document, ["\\usepackage{color}"])
1428 revert_flex_inset(document.body, "Spaceletters", "\\so")
1429 revert_flex_inset(document.body, "Strikethrough", "\\st")
1430 revert_flex_inset(document.body, "Underline", "\\ul")
1431 revert_flex_inset(document.body, "Highlight", "\\hl")
1432 revert_flex_inset(document.body, "Capitalize", "\\caps")
1435 def revert_tablestyle(document):
1436 " Remove tablestyle params "
1439 i = find_token(document.header, "\\tablestyle")
1441 del document.header[i]
1444 def revert_bibfileencodings(document):
1445 " Revert individual Biblatex bibliography encodings "
1449 i = find_token(document.header, "\\cite_engine", 0)
1451 document.warning("Malformed document! Missing \\cite_engine")
1453 engine = get_value(document.header, "\\cite_engine", i)
1457 if engine in ["biblatex", "biblatex-natbib"]:
1460 # Map lyx to latex encoding names
1464 "armscii8" : "armscii8",
1465 "iso8859-1" : "latin1",
1466 "iso8859-2" : "latin2",
1467 "iso8859-3" : "latin3",
1468 "iso8859-4" : "latin4",
1469 "iso8859-5" : "iso88595",
1470 "iso8859-6" : "8859-6",
1471 "iso8859-7" : "iso-8859-7",
1472 "iso8859-8" : "8859-8",
1473 "iso8859-9" : "latin5",
1474 "iso8859-13" : "latin7",
1475 "iso8859-15" : "latin9",
1476 "iso8859-16" : "latin10",
1477 "applemac" : "applemac",
1479 "cp437de" : "cp437de",
1487 "cp1250" : "cp1250",
1488 "cp1251" : "cp1251",
1489 "cp1252" : "cp1252",
1490 "cp1255" : "cp1255",
1491 "cp1256" : "cp1256",
1492 "cp1257" : "cp1257",
1493 "koi8-r" : "koi8-r",
1494 "koi8-u" : "koi8-u",
1496 "utf8-platex" : "utf8",
1503 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1506 j = find_end_of_inset(document.body, i)
1508 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1510 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1514 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1515 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1516 if len(bibfiles) == 0:
1517 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1518 # remove encoding line
1519 k = find_token(document.body, "file_encodings", i, j)
1521 del document.body[k]
1522 # Re-find inset end line
1523 j = find_end_of_inset(document.body, i)
1525 enclist = encodings.split("\t")
1528 ppp = pp.split(" ", 1)
1529 encmap[ppp[0]] = ppp[1]
1530 for bib in bibfiles:
1531 pr = "\\addbibresource"
1532 if bib in encmap.keys():
1533 pr += "[bibencoding=" + encmap[bib] + "]"
1534 pr += "{" + bib + "}"
1535 add_to_preamble(document, [pr])
1536 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1537 pcmd = "printbibliography"
1539 pcmd += "[" + opts + "]"
1540 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1541 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1542 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1543 "status open", "", "\\begin_layout Plain Layout" ]
1544 repl += document.body[i:j+1]
1545 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1546 document.body[i:j+1] = repl
1552 def revert_cmidruletrimming(document):
1553 " Remove \\cmidrule trimming "
1555 # FIXME: Revert to TeX code?
1558 # first, let's find out if we need to do anything
1559 i = find_token(document.body, '<cell ', i+1)
1562 j = document.body[i].find('trim="')
1565 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1566 # remove trim option
1567 document.body[i] = rgx.sub('', document.body[i])
1571 r'### Inserted by lyx2lyx (ruby inset) ###',
1572 r'InsetLayout Flex:Ruby',
1573 r' LyxType charstyle',
1574 r' LatexType command',
1578 r' HTMLInnerTag rb',
1579 r' HTMLInnerAttr ""',
1581 r' LabelString "Ruby"',
1582 r' Decoration Conglomerate',
1584 r' \ifdefined\kanjiskip',
1585 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1586 r' \else \ifdefined\luatexversion',
1587 r' \usepackage{luatexja-ruby}',
1588 r' \else \ifdefined\XeTeXversion',
1589 r' \usepackage{ruby}%',
1591 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1593 r' Argument post:1',
1594 r' LabelString "ruby text"',
1595 r' MenuString "Ruby Text|R"',
1596 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1597 r' Decoration Conglomerate',
1609 def convert_ruby_module(document):
1610 " Use ruby module instead of local module definition "
1611 if document.del_local_layout(ruby_inset_def):
1612 document.add_module("ruby")
1614 def revert_ruby_module(document):
1615 " Replace ruby module with local module definition "
1616 if document.del_module("ruby"):
1617 document.append_local_layout(ruby_inset_def)
1620 def convert_utf8_japanese(document):
1621 " Use generic utf8 with Japanese documents."
1622 lang = get_value(document.header, "\\language")
1623 if not lang.startswith("japanese"):
1625 inputenc = get_value(document.header, "\\inputencoding")
1626 if ((lang == "japanese" and inputenc == "utf8-platex")
1627 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1628 document.set_parameter("inputencoding", "utf8")
1630 def revert_utf8_japanese(document):
1631 " Use Japanese utf8 variants with Japanese documents."
1632 inputenc = get_value(document.header, "\\inputencoding")
1633 if inputenc != "utf8":
1635 lang = get_value(document.header, "\\language")
1636 if lang == "japanese":
1637 document.set_parameter("inputencoding", "utf8-platex")
1638 if lang == "japanese-cjk":
1639 document.set_parameter("inputencoding", "utf8-cjk")
1642 def revert_lineno(document):
1643 " Replace lineno setting with user-preamble code."
1645 options = get_quoted_value(document.header, "\\lineno_options",
1647 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1650 options = "[" + options + "]"
1651 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1654 def convert_lineno(document):
1655 " Replace user-preamble code with native lineno support."
1658 i = find_token(document.preamble, "\\linenumbers", 1)
1660 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1663 options = usepkg.group(1).strip("[]")
1664 del(document.preamble[i-1:i+1])
1665 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1667 k = find_token(document.header, "\\index ")
1669 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1671 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1672 "\\lineno_options %s" % options]
1675 def revert_new_languages(document):
1676 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1677 and Russian (Petrine orthography)."""
1679 # lyxname: (babelname, polyglossianame)
1680 new_languages = {"azerbaijani": ("azerbaijani", ""),
1681 "bengali": ("", "bengali"),
1682 "churchslavonic": ("", "churchslavonic"),
1683 "oldrussian": ("", "russian"),
1684 "korean": ("", "korean"),
1686 used_languages = set()
1687 if document.language in new_languages:
1688 used_languages.add(document.language)
1691 i = find_token(document.body, "\\lang", i+1)
1694 if document.body[i][6:].strip() in new_languages:
1695 used_languages.add(document.language)
1697 # Korean is already supported via CJK, so leave as-is for Babel
1698 if ("korean" in used_languages
1699 and get_bool_value(document.header, "\\use_non_tex_fonts")
1700 and get_value(document.header, "\\language_package") in ("default", "auto")):
1701 revert_language(document, "korean", "", "korean")
1702 used_languages.discard("korean")
1704 for lang in used_languages:
1705 revert(lang, *new_languages[lang])
1709 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1710 r'InsetLayout Flex:Glosse',
1712 r' LabelString "Gloss (old version)"',
1713 r' MenuString "Gloss (old version)"',
1714 r' LatexType environment',
1715 r' LatexName linggloss',
1716 r' Decoration minimalistic',
1721 r' CustomPars false',
1722 r' ForcePlain true',
1723 r' ParbreakIsNewline true',
1724 r' FreeSpacing true',
1725 r' Requires covington',
1728 r' \@ifundefined{linggloss}{%',
1729 r' \newenvironment{linggloss}[2][]{',
1730 r' \def\glosstr{\glt #1}%',
1732 r' {\glosstr\glend}}{}',
1735 r' ResetsFont true',
1737 r' Decoration conglomerate',
1738 r' LabelString "Translation"',
1739 r' MenuString "Glosse Translation|s"',
1740 r' Tooltip "Add a translation for the glosse"',
1745 glosss_inset_def = [
1746 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1747 r'InsetLayout Flex:Tri-Glosse',
1749 r' LabelString "Tri-Gloss (old version)"',
1750 r' MenuString "Tri-Gloss (old version)"',
1751 r' LatexType environment',
1752 r' LatexName lingglosss',
1753 r' Decoration minimalistic',
1758 r' CustomPars false',
1759 r' ForcePlain true',
1760 r' ParbreakIsNewline true',
1761 r' FreeSpacing true',
1763 r' Requires covington',
1766 r' \@ifundefined{lingglosss}{%',
1767 r' \newenvironment{lingglosss}[2][]{',
1768 r' \def\glosstr{\glt #1}%',
1770 r' {\glosstr\glend}}{}',
1772 r' ResetsFont true',
1774 r' Decoration conglomerate',
1775 r' LabelString "Translation"',
1776 r' MenuString "Glosse Translation|s"',
1777 r' Tooltip "Add a translation for the glosse"',
1782 def convert_linggloss(document):
1783 " Move old ling glosses to local layout "
1784 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1785 document.append_local_layout(gloss_inset_def)
1786 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1787 document.append_local_layout(glosss_inset_def)
1789 def revert_linggloss(document):
1790 " Revert to old ling gloss definitions "
1791 if not "linguistics" in document.get_module_list():
1793 document.del_local_layout(gloss_inset_def)
1794 document.del_local_layout(glosss_inset_def)
1797 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1798 for glosse in glosses:
1801 i = find_token(document.body, glosse, i+1)
1804 j = find_end_of_inset(document.body, i)
1806 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1809 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1810 endarg = find_end_of_inset(document.body, arg)
1813 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1814 if argbeginPlain == -1:
1815 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1817 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1818 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
1820 # remove Arg insets and paragraph, if it only contains this inset
1821 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1822 del document.body[arg - 1 : endarg + 4]
1824 del document.body[arg : endarg + 1]
1826 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
1827 endarg = find_end_of_inset(document.body, arg)
1830 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1831 if argbeginPlain == -1:
1832 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
1834 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1835 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
1837 # remove Arg insets and paragraph, if it only contains this inset
1838 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1839 del document.body[arg - 1 : endarg + 4]
1841 del document.body[arg : endarg + 1]
1843 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
1844 endarg = find_end_of_inset(document.body, arg)
1847 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1848 if argbeginPlain == -1:
1849 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
1851 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1852 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
1854 # remove Arg insets and paragraph, if it only contains this inset
1855 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1856 del document.body[arg - 1 : endarg + 4]
1858 del document.body[arg : endarg + 1]
1860 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
1861 endarg = find_end_of_inset(document.body, arg)
1864 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1865 if argbeginPlain == -1:
1866 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
1868 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1869 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
1871 # remove Arg insets and paragraph, if it only contains this inset
1872 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1873 del document.body[arg - 1 : endarg + 4]
1875 del document.body[arg : endarg + 1]
1878 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
1881 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1882 endInset = find_end_of_inset(document.body, i)
1883 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
1884 precontent = put_cmd_in_ert(cmd)
1885 if len(optargcontent) > 0:
1886 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
1887 precontent += put_cmd_in_ert("{")
1889 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
1890 if cmd == "\\trigloss":
1891 postcontent += put_cmd_in_ert("}{") + marg3content
1892 postcontent += put_cmd_in_ert("}")
1894 document.body[endPlain:endInset + 1] = postcontent
1895 document.body[beginPlain + 1:beginPlain] = precontent
1896 del document.body[i : beginPlain + 1]
1898 document.append_local_layout("Requires covington")
1903 def revert_subexarg(document):
1904 " Revert linguistic subexamples with argument to ERT "
1906 if not "linguistics" in document.get_module_list():
1912 i = find_token(document.body, "\\begin_layout Subexample", i+1)
1915 j = find_end_of_layout(document.body, i)
1917 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1920 # check for consecutive layouts
1921 k = find_token(document.body, "\\begin_layout", j)
1922 if k == -1 or document.body[k] != "\\begin_layout Subexample":
1924 j = find_end_of_layout(document.body, k)
1926 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1929 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1933 endarg = find_end_of_inset(document.body, arg)
1935 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1936 if argbeginPlain == -1:
1937 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1939 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1940 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
1942 # remove Arg insets and paragraph, if it only contains this inset
1943 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1944 del document.body[arg - 1 : endarg + 4]
1946 del document.body[arg : endarg + 1]
1948 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
1950 # re-find end of layout
1951 j = find_end_of_layout(document.body, i)
1953 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1956 # check for consecutive layouts
1957 k = find_token(document.body, "\\begin_layout", j)
1958 if k == -1 or document.body[k] != "\\begin_layout Subexample":
1960 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
1961 j = find_end_of_layout(document.body, k)
1963 document.warning("Malformed LyX document: Can't find end of Subexample layout")
1966 endev = put_cmd_in_ert("\\end{subexamples}")
1968 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
1969 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
1970 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
1972 document.append_local_layout("Requires covington")
1976 def revert_drs(document):
1977 " Revert DRS insets (linguistics) to ERT "
1979 if not "linguistics" in document.get_module_list():
1983 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
1984 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
1985 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
1986 "\\begin_inset Flex SDRS"]
1990 i = find_token(document.body, drs, i+1)
1993 j = find_end_of_inset(document.body, i)
1995 document.warning("Malformed LyX document: Can't find end of DRS inset")
1998 # Check for arguments
1999 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2000 endarg = find_end_of_inset(document.body, arg)
2003 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2004 if argbeginPlain == -1:
2005 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2007 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2008 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2010 # remove Arg insets and paragraph, if it only contains this inset
2011 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2012 del document.body[arg - 1 : endarg + 4]
2014 del document.body[arg : endarg + 1]
2017 j = find_end_of_inset(document.body, i)
2019 document.warning("Malformed LyX document: Can't find end of DRS inset")
2022 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2023 endarg = find_end_of_inset(document.body, arg)
2026 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2027 if argbeginPlain == -1:
2028 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2030 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2031 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2033 # remove Arg insets and paragraph, if it only contains this inset
2034 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2035 del document.body[arg - 1 : endarg + 4]
2037 del document.body[arg : endarg + 1]
2040 j = find_end_of_inset(document.body, i)
2042 document.warning("Malformed LyX document: Can't find end of DRS inset")
2045 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2046 endarg = find_end_of_inset(document.body, arg)
2047 postarg1content = []
2049 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2050 if argbeginPlain == -1:
2051 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2053 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2054 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2056 # remove Arg insets and paragraph, if it only contains this inset
2057 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2058 del document.body[arg - 1 : endarg + 4]
2060 del document.body[arg : endarg + 1]
2063 j = find_end_of_inset(document.body, i)
2065 document.warning("Malformed LyX document: Can't find end of DRS inset")
2068 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2069 endarg = find_end_of_inset(document.body, arg)
2070 postarg2content = []
2072 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2073 if argbeginPlain == -1:
2074 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2076 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2077 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2079 # remove Arg insets and paragraph, if it only contains this inset
2080 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2081 del document.body[arg - 1 : endarg + 4]
2083 del document.body[arg : endarg + 1]
2086 j = find_end_of_inset(document.body, i)
2088 document.warning("Malformed LyX document: Can't find end of DRS inset")
2091 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2092 endarg = find_end_of_inset(document.body, arg)
2093 postarg3content = []
2095 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2096 if argbeginPlain == -1:
2097 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2099 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2100 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2102 # remove Arg insets and paragraph, if it only contains this inset
2103 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2104 del document.body[arg - 1 : endarg + 4]
2106 del document.body[arg : endarg + 1]
2109 j = find_end_of_inset(document.body, i)
2111 document.warning("Malformed LyX document: Can't find end of DRS inset")
2114 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2115 endarg = find_end_of_inset(document.body, arg)
2116 postarg4content = []
2118 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2119 if argbeginPlain == -1:
2120 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2122 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2123 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2125 # remove Arg insets and paragraph, if it only contains this inset
2126 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2127 del document.body[arg - 1 : endarg + 4]
2129 del document.body[arg : endarg + 1]
2131 # The respective LaTeX command
2133 if drs == "\\begin_inset Flex DRS*":
2135 elif drs == "\\begin_inset Flex IfThen-DRS":
2137 elif drs == "\\begin_inset Flex Cond-DRS":
2139 elif drs == "\\begin_inset Flex QDRS":
2141 elif drs == "\\begin_inset Flex NegDRS":
2143 elif drs == "\\begin_inset Flex SDRS":
2146 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2147 endInset = find_end_of_inset(document.body, i)
2148 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2149 precontent = put_cmd_in_ert(cmd)
2150 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2151 if drs == "\\begin_inset Flex SDRS":
2152 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2153 precontent += put_cmd_in_ert("{")
2156 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2157 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2158 if cmd == "\\condrs" or cmd == "\\qdrs":
2159 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2161 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2163 postcontent = put_cmd_in_ert("}")
2165 document.body[endPlain:endInset + 1] = postcontent
2166 document.body[beginPlain + 1:beginPlain] = precontent
2167 del document.body[i : beginPlain + 1]
2169 document.append_local_layout("Provides covington 1")
2170 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2176 def revert_babelfont(document):
2177 " Reverts the use of \\babelfont to user preamble "
2179 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2181 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2183 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2185 i = find_token(document.header, '\\language_package', 0)
2187 document.warning("Malformed LyX document: Missing \\language_package.")
2189 if get_value(document.header, "\\language_package", 0) != "babel":
2192 # check font settings
2194 roman = sans = typew = "default"
2196 sf_scale = tt_scale = 100.0
2198 j = find_token(document.header, "\\font_roman", 0)
2200 document.warning("Malformed LyX document: Missing \\font_roman.")
2202 # We need to use this regex since split() does not handle quote protection
2203 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2204 roman = romanfont[2].strip('"')
2205 romanfont[2] = '"default"'
2206 document.header[j] = " ".join(romanfont)
2208 j = find_token(document.header, "\\font_sans", 0)
2210 document.warning("Malformed LyX document: Missing \\font_sans.")
2212 # We need to use this regex since split() does not handle quote protection
2213 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2214 sans = sansfont[2].strip('"')
2215 sansfont[2] = '"default"'
2216 document.header[j] = " ".join(sansfont)
2218 j = find_token(document.header, "\\font_typewriter", 0)
2220 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2222 # We need to use this regex since split() does not handle quote protection
2223 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2224 typew = ttfont[2].strip('"')
2225 ttfont[2] = '"default"'
2226 document.header[j] = " ".join(ttfont)
2228 i = find_token(document.header, "\\font_osf", 0)
2230 document.warning("Malformed LyX document: Missing \\font_osf.")
2232 osf = str2bool(get_value(document.header, "\\font_osf", i))
2234 j = find_token(document.header, "\\font_sf_scale", 0)
2236 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2238 sfscale = document.header[j].split()
2241 document.header[j] = " ".join(sfscale)
2244 sf_scale = float(val)
2246 document.warning("Invalid font_sf_scale value: " + val)
2248 j = find_token(document.header, "\\font_tt_scale", 0)
2250 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2252 ttscale = document.header[j].split()
2255 document.header[j] = " ".join(ttscale)
2258 tt_scale = float(val)
2260 document.warning("Invalid font_tt_scale value: " + val)
2262 # set preamble stuff
2263 pretext = ['%% This document must be processed with xelatex or lualatex!']
2264 pretext.append('\\AtBeginDocument{%')
2265 if roman != "default":
2266 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2267 if sans != "default":
2268 sf = '\\babelfont{sf}['
2269 if sf_scale != 100.0:
2270 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2271 sf += 'Mapping=tex-text]{' + sans + '}'
2273 if typew != "default":
2274 tw = '\\babelfont{tt}'
2275 if tt_scale != 100.0:
2276 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2277 tw += '{' + typew + '}'
2280 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2282 insert_to_preamble(document, pretext)
2290 supported_versions = ["2.4.0", "2.4"]
2292 [545, [convert_lst_literalparam]],
2297 [550, [convert_fontenc]],
2304 [557, [convert_vcsinfo]],
2305 [558, [removeFrontMatterStyles]],
2308 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
2312 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
2313 [566, [convert_hebrew_parentheses]],
2319 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
2320 [573, [convert_inputencoding_namechange]],
2321 [574, [convert_ruby_module, convert_utf8_japanese]],
2322 [575, [convert_lineno]],
2324 [577, [convert_linggloss]],
2329 revert = [[578, [revert_babelfont]],
2330 [577, [revert_drs]],
2331 [576, [revert_linggloss, revert_subexarg]],
2332 [575, [revert_new_languages]],
2333 [574, [revert_lineno]],
2334 [573, [revert_ruby_module, revert_utf8_japanese]],
2335 [572, [revert_inputencoding_namechange]],
2336 [571, [revert_notoFonts]],
2337 [570, [revert_cmidruletrimming]],
2338 [569, [revert_bibfileencodings]],
2339 [568, [revert_tablestyle]],
2340 [567, [revert_soul]],
2341 [566, [revert_malayalam]],
2342 [565, [revert_hebrew_parentheses]],
2343 [564, [revert_AdobeFonts]],
2344 [563, [revert_lformatinfo]],
2345 [562, [revert_listpargs]],
2346 [561, [revert_l7ninfo]],
2347 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
2348 [559, [revert_timeinfo, revert_namenoextinfo]],
2349 [558, [revert_dateinfo]],
2350 [557, [addFrontMatterStyles]],
2351 [556, [revert_vcsinfo]],
2352 [555, [revert_bibencoding]],
2353 [554, [revert_vcolumns]],
2354 [553, [revert_stretchcolumn]],
2355 [552, [revert_tuftecite]],
2356 [551, [revert_floatpclass, revert_floatalignment]],
2357 [550, [revert_nospellcheck]],
2358 [549, [revert_fontenc]],
2359 [548, []],# dummy format change
2360 [547, [revert_lscape]],
2361 [546, [revert_xcharter]],
2362 [545, [revert_paratype]],
2363 [544, [revert_lst_literalparam]]
2367 if __name__ == "__main__":