1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, find_end_of_inset, find_end_of_layout,
30 find_token, find_re, get_bool_value, get_containing_layout,
31 get_option_value, get_value, get_quoted_value)
32 # del_token, del_value, del_complete_lines,
33 # find_complete_lines, find_end_of,
34 # find_re, find_substring, find_token_backwards,
35 # get_containing_inset,
36 # is_in_inset, set_bool_value
37 # find_tokens, find_token_exact, check_token
39 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble)
40 # revert_font_attrs, insert_to_preamble, latex_length
41 # get_ert, lyx2latex, lyx2verbatim, length_in_bp, convert_info_insets
42 # revert_flex_inset, hex2ratio, str2bool
44 ####################################################################
45 # Private helper functions
47 def convert_fonts(document, font_list, font_type, scale_type):
48 " Handle font definition to LaTeX "
50 rpkg = re.compile(r'^\\usepackage(\[scaled=([^\]]*)\])?\{([^\}]+)\}')
52 if scale_type == None:
55 fontscale = "\\font_" + scale_type + "_scale"
57 while i < len(document.preamble):
58 i = find_re(document.preamble, rpkg, i)
61 mo = rpkg.search(document.preamble[i])
64 if not pkg in font_list:
67 del document.preamble[i]
68 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
69 del document.preamble[i-1]
71 j = find_token(document.header, fontscale, 0)
73 val = get_value(document.header, fontscale, j)
77 scale = "%03d" % int(float(option) * 100)
78 document.header[j] = fontscale + " " + scale + " " + vals[1]
79 j = find_token(document.header, ft, 0)
81 val = get_value(document.header, ft, j)
83 document.header[j] = ft + ' "' + pkg + '" ' + vals[1]
85 def revert_fonts(document, font_list):
86 " Revert native font definition to LaTeX "
88 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
89 font_types = ["\\font_roman", "\\font_sans,sf", "\\font_typewriter,tt"]
90 for ft1 in font_types:
93 i = find_token(document.header, ft, 0)
95 val = get_value(document.header, ft, i)
97 val = words[0].replace('"', '')
100 document.header[i] = ft + ' "default" ' + words[1]
102 xval = get_value(document.header, "\\font_" + fts[1] + "_scale", 0)
106 xoption = "[scaled=" + format(float(xval) / 100, '.2f') + "]"
107 preamble = "\\usepackage" + xoption + "{%s}" % val
108 add_to_preamble(document, [preamble])
110 ###############################################################################
112 ### Conversion and reversion routines
114 ###############################################################################
116 def convert_dejavu(document):
117 " Handle DejaVu font definition to LaTeX "
119 dejavu_fonts_roman = ['DejaVuSerif', 'DejaVuSerifCondensed']
120 dejavu_fonts_sans = ['DejaVuSans','DejaVuSansCondensed']
121 dejavu_fonts_typewriter = ['DejaVuSansMono']
123 convert_fonts(document, dejavu_fonts_roman, "\\font_roman", None)
124 convert_fonts(document, dejavu_fonts_sans, "\\font_sans", "sf")
125 convert_fonts(document, dejavu_fonts_typewriter, "\\font_typewriter", "tt")
127 def revert_dejavu(document):
128 " Revert native DejaVu font definition to LaTeX "
130 dejavu_fonts = ['DejaVuSerif', 'DejaVuSerifCondensed', 'DejaVuSans',
131 'DejaVuSansMono', 'DejaVuSansCondensed']
132 revert_fonts(document, dejavu_fonts)
134 def removeFrontMatterStyles(document):
135 " Remove styles Begin/EndFrontmatter"
137 layouts = ['BeginFrontmatter', 'EndFrontmatter']
138 for layout in layouts:
141 i = find_token(document.body, '\\begin_layout ' + layout, i)
144 j = find_end_of_layout(document.body, i)
146 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
149 while i > 0 and document.body[i-1].strip() == '':
151 while document.body[j+1].strip() == '':
153 document.body[i:j+1] = ['']
155 def addFrontMatterStyles(document):
156 " Use styles Begin/EndFrontmatter for elsarticle"
158 def insertFrontmatter(prefix, line):
160 while above > 0 and document.body[above-1].strip() == '':
163 while document.body[below].strip() == '':
165 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
166 '\\begin_inset Note Note',
168 '\\begin_layout Plain Layout',
171 '\\end_inset', '', '',
174 if document.textclass == "elsarticle":
175 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
176 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
179 for layout in layouts:
182 i = find_token(document.body, '\\begin_layout ' + layout, i)
185 k = find_end_of_layout(document.body, i)
187 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
190 if first == -1 or i < first:
192 if last == -1 or last <= k:
197 insertFrontmatter('End', last)
198 insertFrontmatter('Begin', first)
200 def convert_lst_literalparam(document):
201 " Add param literal to include inset "
205 i = find_token(document.body, '\\begin_inset CommandInset include', i)
208 j = find_end_of_inset(document.body, i)
210 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
213 while i < j and document.body[i].strip() != '':
215 document.body.insert(i, "literal \"true\"")
218 def revert_lst_literalparam(document):
219 " Remove param literal from include inset "
223 i = find_token(document.body, '\\begin_inset CommandInset include', i)
226 j = find_end_of_inset(document.body, i)
228 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
231 k = find_token(document.body, 'literal', i, j)
238 def revert_paratype(document):
239 " Revert ParaType font definitions to LaTeX "
241 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
243 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
244 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
245 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
246 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
247 sfval = get_value(document.header, "\\font_sf_scale", 0)
252 sfoption = "scaled=" + format(float(sfval) / 100, '.2f')
253 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
254 ttval = get_value(document.header, "\\font_tt_scale", 0)
259 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
260 if i1 != -1 and i2 != -1 and i3!= -1:
261 add_to_preamble(document, ["\\usepackage{paratype}"])
264 add_to_preamble(document, ["\\usepackage{PTSerif}"])
265 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
268 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
270 add_to_preamble(document, ["\\usepackage{PTSans}"])
271 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
274 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
276 add_to_preamble(document, ["\\usepackage{PTMono}"])
277 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
280 def revert_xcharter(document):
281 " Revert XCharter font definitions to LaTeX "
283 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
287 # replace unsupported font setting
288 document.header[i] = document.header[i].replace("xcharter", "default")
289 # no need for preamble code with system fonts
290 if get_bool_value(document.header, "\\use_non_tex_fonts"):
293 # transfer old style figures setting to package options
294 j = find_token(document.header, "\\font_osf true")
297 document.header[j] = "\\font_osf false"
301 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
304 def revert_lscape(document):
305 " Reverts the landscape environment (Landscape module) to TeX-code "
307 if not "landscape" in document.get_module_list():
312 i = find_token(document.body, "\\begin_inset Flex Landscape", i)
315 j = find_end_of_inset(document.body, i)
317 document.warning("Malformed LyX document: Can't find end of Landscape inset")
321 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
322 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
323 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
324 add_to_preamble(document, ["\\usepackage{afterpage}"])
326 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
327 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
329 add_to_preamble(document, ["\\usepackage{pdflscape}"])
333 def convert_fontenc(document):
334 " Convert default fontenc setting "
336 i = find_token(document.header, "\\fontencoding global", 0)
340 document.header[i] = document.header[i].replace("global", "auto")
343 def revert_fontenc(document):
344 " Revert default fontenc setting "
346 i = find_token(document.header, "\\fontencoding auto", 0)
350 document.header[i] = document.header[i].replace("auto", "global")
353 def revert_nospellcheck(document):
354 " Remove nospellcheck font info param "
358 i = find_token(document.body, '\\nospellcheck', i)
364 def revert_floatpclass(document):
365 " Remove float placement params 'document' and 'class' "
368 i = find_token(document.header, "\\float_placement class", 0)
370 del document.header[i]
374 i = find_token(document.body, '\\begin_inset Float', i)
377 j = find_end_of_inset(document.body, i)
378 k = find_token(document.body, 'placement class', i, i + 2)
380 k = find_token(document.body, 'placement document', i, i + 2)
388 def revert_floatalignment(document):
389 " Remove float alignment params "
392 i = find_token(document.header, "\\float_alignment", 0)
395 galignment = get_value(document.header, "\\float_alignment", i)
396 del document.header[i]
400 i = find_token(document.body, '\\begin_inset Float', i)
403 j = find_end_of_inset(document.body, i)
405 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
407 k = find_token(document.body, 'alignment', i, i + 4)
411 alignment = get_value(document.body, "alignment", k)
412 if alignment == "document":
413 alignment = galignment
415 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
417 document.warning("Can't find float layout!")
421 if alignment == "left":
422 alcmd = put_cmd_in_ert("\\raggedright{}")
423 elif alignment == "center":
424 alcmd = put_cmd_in_ert("\\centering{}")
425 elif alignment == "right":
426 alcmd = put_cmd_in_ert("\\raggedleft{}")
428 document.body[l+1:l+1] = alcmd
432 def revert_tuftecite(document):
433 " Revert \cite commands in tufte classes "
435 tufte = ["tufte-book", "tufte-handout"]
436 if document.textclass not in tufte:
441 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
444 j = find_end_of_inset(document.body, i)
446 document.warning("Can't find end of citation inset at line %d!!" %(i))
449 k = find_token(document.body, "LatexCommand", i, j)
451 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
454 cmd = get_value(document.body, "LatexCommand", k)
458 pre = get_quoted_value(document.body, "before", i, j)
459 post = get_quoted_value(document.body, "after", i, j)
460 key = get_quoted_value(document.body, "key", i, j)
462 document.warning("Citation inset at line %d does not have a key!" %(i))
464 # Replace command with ERT
467 res += "[" + pre + "]"
469 res += "[" + post + "]"
472 res += "{" + key + "}"
473 document.body[i:j+1] = put_cmd_in_ert([res])
477 def revert_stretchcolumn(document):
478 " We remove the column varwidth flags or everything else will become a mess. "
481 i = find_token(document.body, "\\begin_inset Tabular", i)
484 j = find_end_of_inset(document.body, i + 1)
486 document.warning("Malformed LyX document: Could not find end of tabular.")
488 for k in range(i, j):
489 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
490 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
491 document.body[k] = document.body[k].replace(' varwidth="true"', '')
495 def revert_vcolumns(document):
496 " Revert standard columns with line breaks etc. "
502 i = find_token(document.body, "\\begin_inset Tabular", i)
505 j = find_end_of_inset(document.body, i)
507 document.warning("Malformed LyX document: Could not find end of tabular.")
511 # Collect necessary column information
513 nrows = int(document.body[i+1].split('"')[3])
514 ncols = int(document.body[i+1].split('"')[5])
516 for k in range(ncols):
517 m = find_token(document.body, "<column", m)
518 width = get_option_value(document.body[m], 'width')
519 varwidth = get_option_value(document.body[m], 'varwidth')
520 alignment = get_option_value(document.body[m], 'alignment')
521 special = get_option_value(document.body[m], 'special')
522 col_info.append([width, varwidth, alignment, special, m])
527 for row in range(nrows):
528 for col in range(ncols):
529 m = find_token(document.body, "<cell", m)
530 multicolumn = get_option_value(document.body[m], 'multicolumn')
531 multirow = get_option_value(document.body[m], 'multirow')
532 width = get_option_value(document.body[m], 'width')
533 rotate = get_option_value(document.body[m], 'rotate')
534 # Check for: linebreaks, multipars, non-standard environments
536 endcell = find_token(document.body, "</cell>", begcell)
538 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
540 elif count_pars_in_inset(document.body, begcell + 2) > 1:
542 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
544 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
545 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
547 alignment = col_info[col][2]
548 col_line = col_info[col][4]
550 if alignment == "center":
551 vval = ">{\\centering}"
552 elif alignment == "left":
553 vval = ">{\\raggedright}"
554 elif alignment == "right":
555 vval = ">{\\raggedleft}"
558 vval += "V{\\linewidth}"
560 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
561 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
562 # with newlines, and we do not want that)
564 endcell = find_token(document.body, "</cell>", begcell)
566 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
568 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
572 nle = find_end_of_inset(document.body, nl)
573 del(document.body[nle:nle+1])
575 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
577 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
583 if needarray == True:
584 add_to_preamble(document, ["\\usepackage{array}"])
585 if needvarwidth == True:
586 add_to_preamble(document, ["\\usepackage{varwidth}"])
589 def revert_bibencoding(document):
590 " Revert bibliography encoding "
594 i = find_token(document.header, "\\cite_engine", 0)
596 document.warning("Malformed document! Missing \\cite_engine")
598 engine = get_value(document.header, "\\cite_engine", i)
602 if engine in ["biblatex", "biblatex-natbib"]:
605 # Map lyx to latex encoding names
609 "armscii8" : "armscii8",
610 "iso8859-1" : "latin1",
611 "iso8859-2" : "latin2",
612 "iso8859-3" : "latin3",
613 "iso8859-4" : "latin4",
614 "iso8859-5" : "iso88595",
615 "iso8859-6" : "8859-6",
616 "iso8859-7" : "iso-8859-7",
617 "iso8859-8" : "8859-8",
618 "iso8859-9" : "latin5",
619 "iso8859-13" : "latin7",
620 "iso8859-15" : "latin9",
621 "iso8859-16" : "latin10",
622 "applemac" : "applemac",
624 "cp437de" : "cp437de",
641 "utf8-platex" : "utf8",
648 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
651 j = find_end_of_inset(document.body, i)
653 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
656 encoding = get_quoted_value(document.body, "encoding", i, j)
660 # remove encoding line
661 k = find_token(document.body, "encoding", i, j)
664 # Re-find inset end line
665 j = find_end_of_inset(document.body, i)
668 h = find_token(document.header, "\\biblio_options", 0)
670 biblio_options = get_value(document.header, "\\biblio_options", h)
671 if not "bibencoding" in biblio_options:
672 document.header[h] += ",bibencoding=%s" % encodings[encoding]
674 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
676 # this should not happen
677 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
679 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
681 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
682 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
688 def convert_vcsinfo(document):
689 " Separate vcs Info inset from buffer Info inset. "
692 "vcs-revision" : "revision",
693 "vcs-tree-revision" : "tree-revision",
694 "vcs-author" : "author",
700 i = find_token(document.body, "\\begin_inset Info", i)
703 j = find_end_of_inset(document.body, i + 1)
705 document.warning("Malformed LyX document: Could not find end of Info inset.")
708 tp = find_token(document.body, 'type', i, j)
709 tpv = get_quoted_value(document.body, "type", tp)
713 arg = find_token(document.body, 'arg', i, j)
714 argv = get_quoted_value(document.body, "arg", arg)
715 if argv not in list(types.keys()):
718 document.body[tp] = "type \"vcs\""
719 document.body[arg] = "arg \"" + types[argv] + "\""
723 def revert_vcsinfo(document):
724 " Merge vcs Info inset to buffer Info inset. "
726 args = ["revision", "tree-revision", "author", "time", "date" ]
729 i = find_token(document.body, "\\begin_inset Info", i)
732 j = find_end_of_inset(document.body, i + 1)
734 document.warning("Malformed LyX document: Could not find end of Info inset.")
737 tp = find_token(document.body, 'type', i, j)
738 tpv = get_quoted_value(document.body, "type", tp)
742 arg = find_token(document.body, 'arg', i, j)
743 argv = get_quoted_value(document.body, "arg", arg)
745 document.warning("Malformed Info inset. Invalid vcs arg.")
748 document.body[tp] = "type \"buffer\""
749 document.body[arg] = "arg \"vcs-" + argv + "\""
753 def revert_dateinfo(document):
754 " Revert date info insets to static text. "
756 # FIXME This currently only considers the main language and uses the system locale
757 # Ideally, it should honor context languages and switch the locale accordingly.
759 # The date formats for each language using strftime syntax:
760 # long, short, loclong, locmedium, locshort
762 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
763 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
764 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
765 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
766 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
767 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
768 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
769 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
770 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
771 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
772 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
773 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
774 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
775 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
776 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
777 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
778 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
779 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
780 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
781 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
782 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
783 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
784 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
785 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
786 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
787 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
788 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
789 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
790 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
791 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
792 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
793 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
794 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
795 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
796 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
797 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
798 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
799 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
800 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
801 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
802 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
803 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
804 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
805 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
806 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
807 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
808 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
809 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
810 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
811 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
812 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
813 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
814 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
815 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
816 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
817 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
818 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
819 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
820 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
821 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
822 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
823 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
824 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
825 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
826 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
827 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
828 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
829 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
830 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
831 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
832 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
833 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
834 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
835 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
836 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
837 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
838 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
839 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
840 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
841 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
842 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
843 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
844 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
845 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
846 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
847 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
848 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
849 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
850 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
851 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
852 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
853 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
854 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
855 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
856 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
857 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
858 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
859 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
860 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
861 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
862 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
865 types = ["date", "fixdate", "moddate" ]
867 i = find_token(document.header, "\\language", 0)
869 # this should not happen
870 document.warning("Malformed LyX document! No \\language header found!")
872 lang = get_value(document.header, "\\language", i)
876 i = find_token(document.body, "\\begin_inset Info", i)
879 j = find_end_of_inset(document.body, i + 1)
881 document.warning("Malformed LyX document: Could not find end of Info inset.")
884 tp = find_token(document.body, 'type', i, j)
885 tpv = get_quoted_value(document.body, "type", tp)
889 arg = find_token(document.body, 'arg', i, j)
890 argv = get_quoted_value(document.body, "arg", arg)
894 datecomps = argv.split('@')
895 if len(datecomps) > 1:
897 isodate = datecomps[1]
898 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
900 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
901 # FIXME if we had the path to the original document (not the one in the tmp dir),
902 # we could use the mtime.
903 # elif tpv == "moddate":
904 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
907 result = dte.isodate()
909 result = dte.strftime(dateformats[lang][0])
910 elif argv == "short":
911 result = dte.strftime(dateformats[lang][1])
912 elif argv == "loclong":
913 result = dte.strftime(dateformats[lang][2])
914 elif argv == "locmedium":
915 result = dte.strftime(dateformats[lang][3])
916 elif argv == "locshort":
917 result = dte.strftime(dateformats[lang][4])
919 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
920 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
921 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
922 fmt = re.sub('[^\'%]d', '%d', fmt)
923 fmt = fmt.replace("'", "")
924 result = dte.strftime(fmt)
925 document.body[i : j+1] = result
929 def revert_timeinfo(document):
930 " Revert time info insets to static text. "
932 # FIXME This currently only considers the main language and uses the system locale
933 # Ideally, it should honor context languages and switch the locale accordingly.
934 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
937 # The time formats for each language using strftime syntax:
940 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
941 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
942 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
943 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
944 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
945 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
946 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
947 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
948 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
949 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
950 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
951 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
952 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
953 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
954 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
955 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
956 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
957 "breton" : ["%H:%M:%S %Z", "%H:%M"],
958 "british" : ["%H:%M:%S %Z", "%H:%M"],
959 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
960 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
961 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
962 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
963 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
964 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
965 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
966 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
967 "czech" : ["%H:%M:%S %Z", "%H:%M"],
968 "danish" : ["%H.%M.%S %Z", "%H.%M"],
969 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
970 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
971 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
972 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
973 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
974 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
975 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
976 "french" : ["%H:%M:%S %Z", "%H:%M"],
977 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
978 "galician" : ["%H:%M:%S %Z", "%H:%M"],
979 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
980 "german" : ["%H:%M:%S %Z", "%H:%M"],
981 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
982 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
983 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
984 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
985 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
986 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
987 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
988 "irish" : ["%H:%M:%S %Z", "%H:%M"],
989 "italian" : ["%H:%M:%S %Z", "%H:%M"],
990 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
991 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
992 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
993 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
994 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
995 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
996 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
997 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
998 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
999 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1000 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1001 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1002 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1003 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1004 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1005 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1006 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1007 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1008 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1009 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1010 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1011 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1012 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1013 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1014 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1015 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1016 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1017 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1018 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1019 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1020 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1021 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1022 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1023 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1024 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1025 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1026 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1027 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1028 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1029 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1030 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1031 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1032 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1033 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1034 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1035 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1036 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1037 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1038 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1039 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1040 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1043 types = ["time", "fixtime", "modtime" ]
1045 i = find_token(document.header, "\\language", 0)
1047 # this should not happen
1048 document.warning("Malformed LyX document! No \\language header found!")
1050 lang = get_value(document.header, "\\language", i)
1054 i = find_token(document.body, "\\begin_inset Info", i)
1057 j = find_end_of_inset(document.body, i + 1)
1059 document.warning("Malformed LyX document: Could not find end of Info inset.")
1062 tp = find_token(document.body, 'type', i, j)
1063 tpv = get_quoted_value(document.body, "type", tp)
1064 if tpv not in types:
1067 arg = find_token(document.body, 'arg', i, j)
1068 argv = get_quoted_value(document.body, "arg", arg)
1070 dtme = datetime.now()
1072 if tpv == "fixtime":
1073 timecomps = argv.split('@')
1074 if len(timecomps) > 1:
1076 isotime = timecomps[1]
1077 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1079 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1081 m = re.search('(\d\d):(\d\d)', isotime)
1083 tme = time(int(m.group(1)), int(m.group(2)))
1084 # FIXME if we had the path to the original document (not the one in the tmp dir),
1085 # we could use the mtime.
1086 # elif tpv == "moddate":
1087 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1090 result = tme.isoformat()
1091 elif argv == "long":
1092 result = tme.strftime(timeformats[lang][0])
1093 elif argv == "short":
1094 result = tme.strftime(timeformats[lang][1])
1096 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1097 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1098 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1099 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1100 fmt = fmt.replace("'", "")
1101 result = dte.strftime(fmt)
1102 document.body[i : j+1] = result
1106 def revert_namenoextinfo(document):
1107 " Merge buffer Info inset type name-noext to name. "
1111 i = find_token(document.body, "\\begin_inset Info", i)
1114 j = find_end_of_inset(document.body, i + 1)
1116 document.warning("Malformed LyX document: Could not find end of Info inset.")
1119 tp = find_token(document.body, 'type', i, j)
1120 tpv = get_quoted_value(document.body, "type", tp)
1124 arg = find_token(document.body, 'arg', i, j)
1125 argv = get_quoted_value(document.body, "arg", arg)
1126 if argv != "name-noext":
1129 document.body[arg] = "arg \"name\""
1133 def revert_l7ninfo(document):
1134 " Revert l7n Info inset to text. "
1138 i = find_token(document.body, "\\begin_inset Info", i)
1141 j = find_end_of_inset(document.body, i + 1)
1143 document.warning("Malformed LyX document: Could not find end of Info inset.")
1146 tp = find_token(document.body, 'type', i, j)
1147 tpv = get_quoted_value(document.body, "type", tp)
1151 arg = find_token(document.body, 'arg', i, j)
1152 argv = get_quoted_value(document.body, "arg", arg)
1153 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1154 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1155 document.body[i : j+1] = argv
1159 def revert_listpargs(document):
1160 " Reverts listpreamble arguments to TeX-code "
1163 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i)
1166 j = find_end_of_inset(document.body, i)
1167 # Find containing paragraph layout
1168 parent = get_containing_layout(document.body, i)
1170 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1174 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1175 endPlain = find_end_of_layout(document.body, beginPlain)
1176 content = document.body[beginPlain + 1 : endPlain]
1177 del document.body[i:j+1]
1178 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1179 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1180 document.body[parbeg : parbeg] = subst
1188 supported_versions = ["2.4.0", "2.4"]
1190 [545, [convert_lst_literalparam]],
1195 [550, [convert_fontenc]],
1202 [557, [convert_vcsinfo]],
1203 [558, [removeFrontMatterStyles]],
1206 [561, [convert_dejavu]],
1212 [562, [revert_listpargs]],
1213 [561, [revert_l7ninfo]],
1214 [560, [revert_dejavu]],
1215 [559, [revert_timeinfo, revert_namenoextinfo]],
1216 [558, [revert_dateinfo]],
1217 [557, [addFrontMatterStyles]],
1218 [556, [revert_vcsinfo]],
1219 [555, [revert_bibencoding]],
1220 [554, [revert_vcolumns]],
1221 [553, [revert_stretchcolumn]],
1222 [552, [revert_tuftecite]],
1223 [551, [revert_floatpclass, revert_floatalignment]],
1224 [550, [revert_nospellcheck]],
1225 [549, [revert_fontenc]],
1226 [548, []],# dummy format change
1227 [547, [revert_lscape]],
1228 [546, [revert_xcharter]],
1229 [545, [revert_paratype]],
1230 [544, [revert_lst_literalparam]]
1234 if __name__ == "__main__":