1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, find_end_of_inset, find_end_of_layout,
30 find_token, find_re, get_bool_value, get_containing_layout,
31 get_option_value, get_value, get_quoted_value)
32 # del_token, del_value, del_complete_lines,
33 # find_complete_lines, find_end_of,
34 # find_re, find_substring, find_token_backwards,
35 # get_containing_inset,
36 # is_in_inset, set_bool_value
37 # find_tokens, find_token_exact, check_token
39 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble)
40 # revert_font_attrs, insert_to_preamble, latex_length
41 # get_ert, lyx2latex, lyx2verbatim, length_in_bp, convert_info_insets
42 # revert_flex_inset, hex2ratio, str2bool
44 ####################################################################
45 # Private helper functions
47 def add_preamble_fonts(document, fontmap):
48 " Add collected font-packages with their option to user-preamble"
51 if len(fontmap[pkg]) > 0:
52 xoption = "[" + ",".join(fontmap[pkg]) + "]"
55 preamble = "\\usepackage" + xoption + "{%s}" % pkg
56 add_to_preamble(document, [preamble])
59 def convert_fonts(document, font_list, font_type, scale_type, pkg):
60 " Handle font definition to LaTeX "
62 def createkey(pkg, options):
64 return pkg + ':' + "-".join(options)
66 def getfontname(pkg, options, pkg2fontmap, font2pkgmap):
69 pkgkey = createkey(pkg, options)
71 if not pkgkey in pkg2fontmap:
73 fontname = pkg2fontmap[pkgkey]
74 if not fontname in font2pkgmap:
75 document.warning("Something is wrong in pkgname+options <-> fontname conversion")
78 pkgkey2 = createkey(font2pkgmap[fontname].package, font2pkgmap[fontname].options)
83 # We need a mapping pkg+options => font_name
84 # and font_name => pkg+options
98 fe.package = font_name
101 font2pkgmap[font_name] = fe
102 pkgkey = createkey(fe.package, fe.options)
103 if pkgkey in pkg2fontmap:
104 # Repeated the same entry? Check content
105 if pkg2fontmap[pkgkey] != font_name:
107 pkg2fontmap[pkgkey] = font_name
108 pkgmap[fe.package] = 1
110 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
111 rscaleopt = re.compile(r'^scaled?=(.*)')
113 if scale_type == None:
116 fontscale = "\\font_" + scale_type + "_scale"
118 while i < len(document.preamble):
119 i = find_re(document.preamble, rpkg, i)
122 mo = rpkg.search(document.preamble[i])
123 if mo == None or mo.group(2) == None:
126 options = mo.group(2).replace(' ', '').split(",")
130 while o < len(options):
131 mo = rscaleopt.search(options[o])
139 if not pkg in pkgmap:
143 fn = getfontname(pkg, options, pkg2fontmap, font2pkgmap)
147 del document.preamble[i]
148 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
149 del document.preamble[i-1]
150 if fontscale != None:
151 j = find_token(document.header, fontscale, 0)
153 val = get_value(document.header, fontscale, j)
157 scale = "%03d" % int(float(oscale) * 100)
158 document.header[j] = fontscale + " " + scale + " " + vals[1]
159 j = find_token(document.header, ft, 0)
161 val = get_value(document.header, ft, j)
163 document.header[j] = ft + ' "' + fn + '" ' + vals[1]
165 def revert_fonts(document, font_list, fontmap, package=None):
166 " Revert native font definition to LaTeX "
167 # fonlist := list of fonts created from the same package
168 # Empty package means that the font-name is the same as the package-name
169 # fontmap (key = package, val += found options) will be filled
170 # and used later in add_preamble_fonts() to be added to user-preamble
172 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
173 font_types = ["\\font_roman", "\\font_sans,sf", "\\font_typewriter,tt", "\\font_math,math"]
175 for fontl1 in font_list:
176 fontl = fontl1.split(",")
178 optmap[font] = fontl[1:]
179 for ft1 in font_types:
182 i = find_token(document.header, ft, 0)
185 val = get_value(document.header, ft, i)
187 font = words[0].replace('"', '')
188 if not font in optmap:
194 if not val in fontmap:
196 document.header[i] = ft + ' "default" ' + words[1]
198 xval = get_value(document.header, "\\font_" + fts[1] + "_scale", 0)
203 # set correct scale option
204 if re.match('Deja.*', val):
208 fontmap[val].extend([scale_par + "=" + format(float(xval) / 100, '.2f')])
209 if len(optmap[font]) > 0:
210 fontmap[val].extend(optmap[font])
212 ###############################################################################
214 ### Conversion and reversion routines
216 ###############################################################################
218 def convert_ibmplex(document):
219 " Handle IBM Plex font definition to LaTeX "
221 ibmplex_fonts_roman = ['IBMPlexSerif', 'IBMPlexSerifThin,thin',
222 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
223 'IBMPlexSerifSemibold,semibold']
224 ibmplex_fonts_sans = ['IBMPlexSans','IBMPlexSansCondensed,condensed',
225 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
226 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold']
227 ibmplex_fonts_typewriter = ['IBMPlexMono', 'IBMPlexMonoThin,thin',
228 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
229 'IBMPlexMonoSemibold,semibold']
231 convert_fonts(document, ibmplex_fonts_roman, "\\font_roman", None, "plex-serif")
232 convert_fonts(document, ibmplex_fonts_sans, "\\font_sans", "sf", "plex-sans")
233 convert_fonts(document, ibmplex_fonts_typewriter, "\\font_typewriter", "tt", "plex-mono")
235 def revert_ibmplex(document):
236 " Revert native IBM Plex font definition to LaTeX "
239 revert_fonts(document, ['IBMPlexSerif', 'IBMPlexSerifThin,thin',
240 'IBMPlexSerifExtraLight,extralight',
241 'IBMPlexSerifLight,light', 'IBMPlexSerifSemibold,semibold'],
242 fontmap, "plex-serif")
243 revert_fonts(document, ['IBMPlexSans','IBMPlexSansCondensed,condensed',
244 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
245 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
246 fontmap, "plex-sans")
247 revert_fonts(document, ['IBMPlexMono', 'IBMPlexMonoThin,thin',
248 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
249 'IBMPlexMonoSemibold,semibold'],
250 fontmap, "plex-mono")
251 add_preamble_fonts(document, fontmap)
253 def convert_dejavu(document):
254 " Handle DejaVu font definition to LaTeX "
256 dejavu_fonts_roman = ['DejaVuSerif', 'DejaVuSerifCondensed']
257 dejavu_fonts_sans = ['DejaVuSans','DejaVuSansCondensed']
258 dejavu_fonts_typewriter = ['DejaVuSansMono']
260 convert_fonts(document, dejavu_fonts_roman, "\\font_roman", None, None)
261 convert_fonts(document, dejavu_fonts_sans, "\\font_sans", "sf", None)
262 convert_fonts(document, dejavu_fonts_typewriter, "\\font_typewriter", "tt", None)
264 def revert_dejavu(document):
265 " Revert native DejaVu font definition to LaTeX "
267 dejavu_fonts = ['DejaVuSerif', 'DejaVuSerifCondensed', 'DejaVuSans',
268 'DejaVuSansMono', 'DejaVuSansCondensed']
270 revert_fonts(document, dejavu_fonts, fontmap)
271 add_preamble_fonts(document, fontmap)
273 def removeFrontMatterStyles(document):
274 " Remove styles Begin/EndFrontmatter"
276 layouts = ['BeginFrontmatter', 'EndFrontmatter']
277 for layout in layouts:
280 i = find_token(document.body, '\\begin_layout ' + layout, i)
283 j = find_end_of_layout(document.body, i)
285 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
288 while i > 0 and document.body[i-1].strip() == '':
290 while document.body[j+1].strip() == '':
292 document.body[i:j+1] = ['']
294 def addFrontMatterStyles(document):
295 " Use styles Begin/EndFrontmatter for elsarticle"
297 def insertFrontmatter(prefix, line):
299 while above > 0 and document.body[above-1].strip() == '':
302 while document.body[below].strip() == '':
304 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
305 '\\begin_inset Note Note',
307 '\\begin_layout Plain Layout',
310 '\\end_inset', '', '',
313 if document.textclass == "elsarticle":
314 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
315 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
318 for layout in layouts:
321 i = find_token(document.body, '\\begin_layout ' + layout, i)
324 k = find_end_of_layout(document.body, i)
326 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
329 if first == -1 or i < first:
331 if last == -1 or last <= k:
336 insertFrontmatter('End', last)
337 insertFrontmatter('Begin', first)
339 def convert_lst_literalparam(document):
340 " Add param literal to include inset "
344 i = find_token(document.body, '\\begin_inset CommandInset include', i)
347 j = find_end_of_inset(document.body, i)
349 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
352 while i < j and document.body[i].strip() != '':
354 document.body.insert(i, "literal \"true\"")
357 def revert_lst_literalparam(document):
358 " Remove param literal from include inset "
362 i = find_token(document.body, '\\begin_inset CommandInset include', i)
365 j = find_end_of_inset(document.body, i)
367 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
370 k = find_token(document.body, 'literal', i, j)
377 def revert_paratype(document):
378 " Revert ParaType font definitions to LaTeX "
380 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
382 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
383 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
384 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
385 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
386 sfval = get_value(document.header, "\\font_sf_scale", 0)
391 sfoption = "scaled=" + format(float(sfval) / 100, '.2f')
392 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
393 ttval = get_value(document.header, "\\font_tt_scale", 0)
398 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
399 if i1 != -1 and i2 != -1 and i3!= -1:
400 add_to_preamble(document, ["\\usepackage{paratype}"])
403 add_to_preamble(document, ["\\usepackage{PTSerif}"])
404 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
407 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
409 add_to_preamble(document, ["\\usepackage{PTSans}"])
410 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
413 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
415 add_to_preamble(document, ["\\usepackage{PTMono}"])
416 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
419 def revert_xcharter(document):
420 " Revert XCharter font definitions to LaTeX "
422 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
426 # replace unsupported font setting
427 document.header[i] = document.header[i].replace("xcharter", "default")
428 # no need for preamble code with system fonts
429 if get_bool_value(document.header, "\\use_non_tex_fonts"):
432 # transfer old style figures setting to package options
433 j = find_token(document.header, "\\font_osf true")
436 document.header[j] = "\\font_osf false"
440 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
443 def revert_lscape(document):
444 " Reverts the landscape environment (Landscape module) to TeX-code "
446 if not "landscape" in document.get_module_list():
451 i = find_token(document.body, "\\begin_inset Flex Landscape", i)
454 j = find_end_of_inset(document.body, i)
456 document.warning("Malformed LyX document: Can't find end of Landscape inset")
460 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
461 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
462 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
463 add_to_preamble(document, ["\\usepackage{afterpage}"])
465 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
466 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
468 add_to_preamble(document, ["\\usepackage{pdflscape}"])
472 def convert_fontenc(document):
473 " Convert default fontenc setting "
475 i = find_token(document.header, "\\fontencoding global", 0)
479 document.header[i] = document.header[i].replace("global", "auto")
482 def revert_fontenc(document):
483 " Revert default fontenc setting "
485 i = find_token(document.header, "\\fontencoding auto", 0)
489 document.header[i] = document.header[i].replace("auto", "global")
492 def revert_nospellcheck(document):
493 " Remove nospellcheck font info param "
497 i = find_token(document.body, '\\nospellcheck', i)
503 def revert_floatpclass(document):
504 " Remove float placement params 'document' and 'class' "
507 i = find_token(document.header, "\\float_placement class", 0)
509 del document.header[i]
513 i = find_token(document.body, '\\begin_inset Float', i)
516 j = find_end_of_inset(document.body, i)
517 k = find_token(document.body, 'placement class', i, i + 2)
519 k = find_token(document.body, 'placement document', i, i + 2)
527 def revert_floatalignment(document):
528 " Remove float alignment params "
531 i = find_token(document.header, "\\float_alignment", 0)
534 galignment = get_value(document.header, "\\float_alignment", i)
535 del document.header[i]
539 i = find_token(document.body, '\\begin_inset Float', i)
542 j = find_end_of_inset(document.body, i)
544 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
546 k = find_token(document.body, 'alignment', i, i + 4)
550 alignment = get_value(document.body, "alignment", k)
551 if alignment == "document":
552 alignment = galignment
554 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
556 document.warning("Can't find float layout!")
560 if alignment == "left":
561 alcmd = put_cmd_in_ert("\\raggedright{}")
562 elif alignment == "center":
563 alcmd = put_cmd_in_ert("\\centering{}")
564 elif alignment == "right":
565 alcmd = put_cmd_in_ert("\\raggedleft{}")
567 document.body[l+1:l+1] = alcmd
571 def revert_tuftecite(document):
572 " Revert \cite commands in tufte classes "
574 tufte = ["tufte-book", "tufte-handout"]
575 if document.textclass not in tufte:
580 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
583 j = find_end_of_inset(document.body, i)
585 document.warning("Can't find end of citation inset at line %d!!" %(i))
588 k = find_token(document.body, "LatexCommand", i, j)
590 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
593 cmd = get_value(document.body, "LatexCommand", k)
597 pre = get_quoted_value(document.body, "before", i, j)
598 post = get_quoted_value(document.body, "after", i, j)
599 key = get_quoted_value(document.body, "key", i, j)
601 document.warning("Citation inset at line %d does not have a key!" %(i))
603 # Replace command with ERT
606 res += "[" + pre + "]"
608 res += "[" + post + "]"
611 res += "{" + key + "}"
612 document.body[i:j+1] = put_cmd_in_ert([res])
616 def revert_stretchcolumn(document):
617 " We remove the column varwidth flags or everything else will become a mess. "
620 i = find_token(document.body, "\\begin_inset Tabular", i)
623 j = find_end_of_inset(document.body, i + 1)
625 document.warning("Malformed LyX document: Could not find end of tabular.")
627 for k in range(i, j):
628 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
629 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
630 document.body[k] = document.body[k].replace(' varwidth="true"', '')
634 def revert_vcolumns(document):
635 " Revert standard columns with line breaks etc. "
641 i = find_token(document.body, "\\begin_inset Tabular", i)
644 j = find_end_of_inset(document.body, i)
646 document.warning("Malformed LyX document: Could not find end of tabular.")
650 # Collect necessary column information
652 nrows = int(document.body[i+1].split('"')[3])
653 ncols = int(document.body[i+1].split('"')[5])
655 for k in range(ncols):
656 m = find_token(document.body, "<column", m)
657 width = get_option_value(document.body[m], 'width')
658 varwidth = get_option_value(document.body[m], 'varwidth')
659 alignment = get_option_value(document.body[m], 'alignment')
660 special = get_option_value(document.body[m], 'special')
661 col_info.append([width, varwidth, alignment, special, m])
666 for row in range(nrows):
667 for col in range(ncols):
668 m = find_token(document.body, "<cell", m)
669 multicolumn = get_option_value(document.body[m], 'multicolumn')
670 multirow = get_option_value(document.body[m], 'multirow')
671 width = get_option_value(document.body[m], 'width')
672 rotate = get_option_value(document.body[m], 'rotate')
673 # Check for: linebreaks, multipars, non-standard environments
675 endcell = find_token(document.body, "</cell>", begcell)
677 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
679 elif count_pars_in_inset(document.body, begcell + 2) > 1:
681 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
683 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
684 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
686 alignment = col_info[col][2]
687 col_line = col_info[col][4]
689 if alignment == "center":
690 vval = ">{\\centering}"
691 elif alignment == "left":
692 vval = ">{\\raggedright}"
693 elif alignment == "right":
694 vval = ">{\\raggedleft}"
697 vval += "V{\\linewidth}"
699 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
700 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
701 # with newlines, and we do not want that)
703 endcell = find_token(document.body, "</cell>", begcell)
705 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
707 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
711 nle = find_end_of_inset(document.body, nl)
712 del(document.body[nle:nle+1])
714 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
716 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
722 if needarray == True:
723 add_to_preamble(document, ["\\usepackage{array}"])
724 if needvarwidth == True:
725 add_to_preamble(document, ["\\usepackage{varwidth}"])
728 def revert_bibencoding(document):
729 " Revert bibliography encoding "
733 i = find_token(document.header, "\\cite_engine", 0)
735 document.warning("Malformed document! Missing \\cite_engine")
737 engine = get_value(document.header, "\\cite_engine", i)
741 if engine in ["biblatex", "biblatex-natbib"]:
744 # Map lyx to latex encoding names
748 "armscii8" : "armscii8",
749 "iso8859-1" : "latin1",
750 "iso8859-2" : "latin2",
751 "iso8859-3" : "latin3",
752 "iso8859-4" : "latin4",
753 "iso8859-5" : "iso88595",
754 "iso8859-6" : "8859-6",
755 "iso8859-7" : "iso-8859-7",
756 "iso8859-8" : "8859-8",
757 "iso8859-9" : "latin5",
758 "iso8859-13" : "latin7",
759 "iso8859-15" : "latin9",
760 "iso8859-16" : "latin10",
761 "applemac" : "applemac",
763 "cp437de" : "cp437de",
780 "utf8-platex" : "utf8",
787 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
790 j = find_end_of_inset(document.body, i)
792 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
795 encoding = get_quoted_value(document.body, "encoding", i, j)
799 # remove encoding line
800 k = find_token(document.body, "encoding", i, j)
803 # Re-find inset end line
804 j = find_end_of_inset(document.body, i)
807 h = find_token(document.header, "\\biblio_options", 0)
809 biblio_options = get_value(document.header, "\\biblio_options", h)
810 if not "bibencoding" in biblio_options:
811 document.header[h] += ",bibencoding=%s" % encodings[encoding]
813 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
815 # this should not happen
816 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
818 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
820 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
821 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
827 def convert_vcsinfo(document):
828 " Separate vcs Info inset from buffer Info inset. "
831 "vcs-revision" : "revision",
832 "vcs-tree-revision" : "tree-revision",
833 "vcs-author" : "author",
839 i = find_token(document.body, "\\begin_inset Info", i)
842 j = find_end_of_inset(document.body, i + 1)
844 document.warning("Malformed LyX document: Could not find end of Info inset.")
847 tp = find_token(document.body, 'type', i, j)
848 tpv = get_quoted_value(document.body, "type", tp)
852 arg = find_token(document.body, 'arg', i, j)
853 argv = get_quoted_value(document.body, "arg", arg)
854 if argv not in list(types.keys()):
857 document.body[tp] = "type \"vcs\""
858 document.body[arg] = "arg \"" + types[argv] + "\""
862 def revert_vcsinfo(document):
863 " Merge vcs Info inset to buffer Info inset. "
865 args = ["revision", "tree-revision", "author", "time", "date" ]
868 i = find_token(document.body, "\\begin_inset Info", i)
871 j = find_end_of_inset(document.body, i + 1)
873 document.warning("Malformed LyX document: Could not find end of Info inset.")
876 tp = find_token(document.body, 'type', i, j)
877 tpv = get_quoted_value(document.body, "type", tp)
881 arg = find_token(document.body, 'arg', i, j)
882 argv = get_quoted_value(document.body, "arg", arg)
884 document.warning("Malformed Info inset. Invalid vcs arg.")
887 document.body[tp] = "type \"buffer\""
888 document.body[arg] = "arg \"vcs-" + argv + "\""
892 def revert_dateinfo(document):
893 " Revert date info insets to static text. "
895 # FIXME This currently only considers the main language and uses the system locale
896 # Ideally, it should honor context languages and switch the locale accordingly.
898 # The date formats for each language using strftime syntax:
899 # long, short, loclong, locmedium, locshort
901 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
902 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
903 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
904 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
905 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
906 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
907 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
908 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
909 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
910 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
911 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
912 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
913 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
914 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
915 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
916 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
917 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
918 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
919 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
920 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
921 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
922 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
923 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
924 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
925 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
926 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
927 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
928 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
929 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
930 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
931 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
932 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
933 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
934 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
935 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
936 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
937 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
938 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
939 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
940 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
941 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
942 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
943 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
944 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
945 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
946 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
947 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
948 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
949 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
950 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
951 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
952 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
953 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
954 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
955 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
956 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
957 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
958 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
959 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
960 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
961 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
962 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
963 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
964 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
965 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
966 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
967 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
968 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
969 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
970 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
971 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
972 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
973 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
974 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
975 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
976 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
977 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
978 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
979 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
980 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
981 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
982 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
983 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
984 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
985 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
986 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
987 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
988 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
989 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
990 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
991 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
992 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
993 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
994 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
995 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
996 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
997 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
998 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
999 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1000 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1001 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1004 types = ["date", "fixdate", "moddate" ]
1006 i = find_token(document.header, "\\language", 0)
1008 # this should not happen
1009 document.warning("Malformed LyX document! No \\language header found!")
1011 lang = get_value(document.header, "\\language", i)
1015 i = find_token(document.body, "\\begin_inset Info", i)
1018 j = find_end_of_inset(document.body, i + 1)
1020 document.warning("Malformed LyX document: Could not find end of Info inset.")
1023 tp = find_token(document.body, 'type', i, j)
1024 tpv = get_quoted_value(document.body, "type", tp)
1025 if tpv not in types:
1028 arg = find_token(document.body, 'arg', i, j)
1029 argv = get_quoted_value(document.body, "arg", arg)
1032 if tpv == "fixdate":
1033 datecomps = argv.split('@')
1034 if len(datecomps) > 1:
1036 isodate = datecomps[1]
1037 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1039 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1040 # FIXME if we had the path to the original document (not the one in the tmp dir),
1041 # we could use the mtime.
1042 # elif tpv == "moddate":
1043 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1046 result = dte.isodate()
1047 elif argv == "long":
1048 result = dte.strftime(dateformats[lang][0])
1049 elif argv == "short":
1050 result = dte.strftime(dateformats[lang][1])
1051 elif argv == "loclong":
1052 result = dte.strftime(dateformats[lang][2])
1053 elif argv == "locmedium":
1054 result = dte.strftime(dateformats[lang][3])
1055 elif argv == "locshort":
1056 result = dte.strftime(dateformats[lang][4])
1058 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1059 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1060 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1061 fmt = re.sub('[^\'%]d', '%d', fmt)
1062 fmt = fmt.replace("'", "")
1063 result = dte.strftime(fmt)
1064 document.body[i : j+1] = result
1068 def revert_timeinfo(document):
1069 " Revert time info insets to static text. "
1071 # FIXME This currently only considers the main language and uses the system locale
1072 # Ideally, it should honor context languages and switch the locale accordingly.
1073 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1076 # The time formats for each language using strftime syntax:
1079 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1080 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1081 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1082 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1083 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1084 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1085 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1086 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1087 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1088 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1089 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1090 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1091 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1092 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1093 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1094 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1095 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1096 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1097 "british" : ["%H:%M:%S %Z", "%H:%M"],
1098 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1099 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1100 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1101 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1102 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1103 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1104 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1105 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1106 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1107 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1108 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1109 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1110 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1111 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1112 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1113 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1114 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1115 "french" : ["%H:%M:%S %Z", "%H:%M"],
1116 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1117 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1118 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1119 "german" : ["%H:%M:%S %Z", "%H:%M"],
1120 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1121 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1122 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1123 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1124 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1125 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1126 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1127 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1128 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1129 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1130 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1131 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1132 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1133 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1134 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1135 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1136 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1137 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1138 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1139 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1140 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1141 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1142 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1143 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1144 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1145 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1146 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1147 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1148 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1149 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1150 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1151 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1152 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1153 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1154 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1155 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1156 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1157 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1158 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1159 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1160 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1161 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1162 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1163 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1164 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1165 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1166 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1167 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1168 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1169 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1170 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1171 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1172 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1173 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1174 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1175 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1176 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1177 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1178 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1179 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1182 types = ["time", "fixtime", "modtime" ]
1184 i = find_token(document.header, "\\language", 0)
1186 # this should not happen
1187 document.warning("Malformed LyX document! No \\language header found!")
1189 lang = get_value(document.header, "\\language", i)
1193 i = find_token(document.body, "\\begin_inset Info", i)
1196 j = find_end_of_inset(document.body, i + 1)
1198 document.warning("Malformed LyX document: Could not find end of Info inset.")
1201 tp = find_token(document.body, 'type', i, j)
1202 tpv = get_quoted_value(document.body, "type", tp)
1203 if tpv not in types:
1206 arg = find_token(document.body, 'arg', i, j)
1207 argv = get_quoted_value(document.body, "arg", arg)
1209 dtme = datetime.now()
1211 if tpv == "fixtime":
1212 timecomps = argv.split('@')
1213 if len(timecomps) > 1:
1215 isotime = timecomps[1]
1216 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1218 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1220 m = re.search('(\d\d):(\d\d)', isotime)
1222 tme = time(int(m.group(1)), int(m.group(2)))
1223 # FIXME if we had the path to the original document (not the one in the tmp dir),
1224 # we could use the mtime.
1225 # elif tpv == "moddate":
1226 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1229 result = tme.isoformat()
1230 elif argv == "long":
1231 result = tme.strftime(timeformats[lang][0])
1232 elif argv == "short":
1233 result = tme.strftime(timeformats[lang][1])
1235 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1236 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1237 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1238 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1239 fmt = fmt.replace("'", "")
1240 result = dte.strftime(fmt)
1241 document.body[i : j+1] = result
1245 def revert_namenoextinfo(document):
1246 " Merge buffer Info inset type name-noext to name. "
1250 i = find_token(document.body, "\\begin_inset Info", i)
1253 j = find_end_of_inset(document.body, i + 1)
1255 document.warning("Malformed LyX document: Could not find end of Info inset.")
1258 tp = find_token(document.body, 'type', i, j)
1259 tpv = get_quoted_value(document.body, "type", tp)
1263 arg = find_token(document.body, 'arg', i, j)
1264 argv = get_quoted_value(document.body, "arg", arg)
1265 if argv != "name-noext":
1268 document.body[arg] = "arg \"name\""
1272 def revert_l7ninfo(document):
1273 " Revert l7n Info inset to text. "
1277 i = find_token(document.body, "\\begin_inset Info", i)
1280 j = find_end_of_inset(document.body, i + 1)
1282 document.warning("Malformed LyX document: Could not find end of Info inset.")
1285 tp = find_token(document.body, 'type', i, j)
1286 tpv = get_quoted_value(document.body, "type", tp)
1290 arg = find_token(document.body, 'arg', i, j)
1291 argv = get_quoted_value(document.body, "arg", arg)
1292 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1293 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1294 document.body[i : j+1] = argv
1298 def revert_listpargs(document):
1299 " Reverts listpreamble arguments to TeX-code "
1302 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i)
1305 j = find_end_of_inset(document.body, i)
1306 # Find containing paragraph layout
1307 parent = get_containing_layout(document.body, i)
1309 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1313 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1314 endPlain = find_end_of_layout(document.body, beginPlain)
1315 content = document.body[beginPlain + 1 : endPlain]
1316 del document.body[i:j+1]
1317 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1318 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1319 document.body[parbeg : parbeg] = subst
1327 supported_versions = ["2.4.0", "2.4"]
1329 [545, [convert_lst_literalparam]],
1334 [550, [convert_fontenc]],
1341 [557, [convert_vcsinfo]],
1342 [558, [removeFrontMatterStyles]],
1345 [561, [convert_dejavu, convert_ibmplex]],
1351 [562, [revert_listpargs]],
1352 [561, [revert_l7ninfo]],
1353 [560, [revert_ibmplex, revert_dejavu]],
1354 [559, [revert_timeinfo, revert_namenoextinfo]],
1355 [558, [revert_dateinfo]],
1356 [557, [addFrontMatterStyles]],
1357 [556, [revert_vcsinfo]],
1358 [555, [revert_bibencoding]],
1359 [554, [revert_vcolumns]],
1360 [553, [revert_stretchcolumn]],
1361 [552, [revert_tuftecite]],
1362 [551, [revert_floatpclass, revert_floatalignment]],
1363 [550, [revert_nospellcheck]],
1364 [549, [revert_fontenc]],
1365 [548, []],# dummy format change
1366 [547, [revert_lscape]],
1367 [546, [revert_xcharter]],
1368 [545, [revert_paratype]],
1369 [544, [revert_lst_literalparam]]
1373 if __name__ == "__main__":