1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # -*- coding: utf-8 -*-
4 # Copyright (C) 2015 The LyX team
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 """ Convert files to the file format generated by lyx 2.2"""
26 # Uncomment only what you need to import, please.
28 #from parser_tools import find_token, find_end_of, find_tokens, \
29 # find_token_exact, find_end_of_inset, find_end_of_layout, \
30 # find_token_backwards, is_in_inset, get_value, get_quoted_value, \
31 # del_token, check_token, get_option_value
33 from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, get_ert, lyx2latex, \
34 lyx2verbatim, length_in_bp, convert_info_insets
35 # insert_to_preamble, latex_length, revert_flex_inset, \
36 # revert_font_attrs, hex2ratio, str2bool
38 from parser_tools import find_token, find_token_backwards, find_re, \
39 find_end_of_inset, find_end_of_layout, find_nonempty_line, \
40 get_containing_layout, get_value, check_token
42 ####################################################################
43 # Private helper functions
45 def revert_Argument_to_TeX_brace(document, line, endline, n, nmax, environment, opt, nolastopt):
47 Reverts an InsetArgument to TeX-code
49 revert_Argument_to_TeX_brace(document, LineOfBegin, LineOfEnd, StartArgument, EndArgument, isEnvironment, isOpt, notLastOpt)
50 LineOfBegin is the line of the \begin_layout or \begin_inset statement
51 LineOfEnd is the line of the \end_layout or \end_inset statement, if "0" is given, the end of the file is used instead
52 StartArgument is the number of the first argument that needs to be converted
53 EndArgument is the number of the last argument that needs to be converted or the last defined one
54 isEnvironment must be true, if the layout is for a LaTeX environment
55 isOpt must be true, if the argument is an optional one
56 notLastOpt must be true if the argument is mandatory and followed by optional ones
60 while lineArg != -1 and n < nmax + 1:
61 lineArg = find_token(document.body, "\\begin_inset Argument " + str(n), line)
62 if lineArg > endline and endline != 0:
65 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", lineArg)
66 # we have to assure that no other inset is in the Argument
67 beginInset = find_token(document.body, "\\begin_inset", beginPlain)
68 endInset = find_token(document.body, "\\end_inset", beginPlain)
71 while beginInset < endInset and beginInset != -1:
72 beginInset = find_token(document.body, "\\begin_inset", k)
73 endInset = find_token(document.body, "\\end_inset", l)
76 if environment == False:
78 if nolastopt == False:
79 document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}{")
81 document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}")
82 del(document.body[lineArg : beginPlain + 1])
85 document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("]")
86 document.body[lineArg : beginPlain + 1] = put_cmd_in_ert("[")
90 document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}")
91 document.body[lineArg : beginPlain + 1] = put_cmd_in_ert("{")
94 document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("]")
95 document.body[lineArg : beginPlain + 1] = put_cmd_in_ert("[")
101 ###############################################################################
103 ### Conversion and reversion routines
105 ###############################################################################
107 def convert_longtable_label_internal(document, forward):
109 Convert reference to "LongTableNoNumber" into "Unnumbered" if forward is True
112 old_reference = "\\begin_inset Caption LongTableNoNumber"
113 new_reference = "\\begin_inset Caption Unnumbered"
115 # if the purpose is to revert swap the strings roles
117 old_reference, new_reference = new_reference, old_reference
121 i = find_token(document.body, old_reference, i)
126 document.body[i] = new_reference
129 def convert_longtable_label(document):
130 convert_longtable_label_internal(document, True)
133 def revert_longtable_label(document):
134 convert_longtable_label_internal(document, False)
137 def convert_separator(document):
139 Convert layout separators to separator insets and add (LaTeX) paragraph
140 breaks in order to mimic previous LaTeX export.
143 parins = ["\\begin_inset Separator parbreak", "\\end_inset", ""]
144 parlay = ["\\begin_layout Standard", "\\begin_inset Separator parbreak",
145 "\\end_inset", "", "\\end_layout", ""]
147 "family" : "default",
148 "series" : "default",
157 i = find_token(document.body, "\\begin_deeper", i)
161 j = find_token_backwards(document.body, "\\end_layout", i-1)
163 # reset any text style before inserting the inset
164 lay = get_containing_layout(document.body, j-1)
166 content = "\n".join(document.body[lay[1]:lay[2]])
167 for val in list(sty_dict.keys()):
168 if content.find("\\%s" % val) != -1:
169 document.body[j:j] = ["\\%s %s" % (val, sty_dict[val])]
172 document.body[j:j] = parins
173 i = i + len(parins) + 1
179 i = find_token(document.body, "\\align", i)
183 lay = get_containing_layout(document.body, i)
184 if lay != False and lay[0] == "Plain Layout":
188 j = find_token_backwards(document.body, "\\end_layout", i-1)
190 lay = get_containing_layout(document.body, j-1)
191 if lay != False and lay[0] == "Standard" \
192 and find_token(document.body, "\\align", lay[1], lay[2]) == -1 \
193 and find_token(document.body, "\\begin_inset VSpace", lay[1], lay[2]) == -1:
194 # reset any text style before inserting the inset
195 content = "\n".join(document.body[lay[1]:lay[2]])
196 for val in list(sty_dict.keys()):
197 if content.find("\\%s" % val) != -1:
198 document.body[j:j] = ["\\%s %s" % (val, sty_dict[val])]
201 document.body[j:j] = parins
202 i = i + len(parins) + 1
208 regexp = re.compile(r'^\\begin_layout (?:(-*)|(\s*))(Separator|EndOfSlide)(?:(-*)|(\s*))$', re.IGNORECASE)
212 i = find_re(document.body, regexp, i)
216 j = find_end_of_layout(document.body, i)
218 document.warning("Malformed LyX document: Missing `\\end_layout'.")
221 lay = get_containing_layout(document.body, j-1)
223 lines = document.body[lay[3]:lay[2]]
227 document.body[i:j+1] = parlay
229 document.body[i+1:i+1] = lines
231 i = i + len(parlay) + len(lines) + 1
234 def revert_separator(document):
235 " Revert separator insets to layout separators "
237 beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
238 if document.textclass in beamer_classes:
239 beglaysep = "\\begin_layout Separator"
241 beglaysep = "\\begin_layout --Separator--"
243 parsep = [beglaysep, "", "\\end_layout", ""]
244 comert = ["\\begin_inset ERT", "status collapsed", "",
245 "\\begin_layout Plain Layout", "%", "\\end_layout",
246 "", "\\end_inset", ""]
247 empert = ["\\begin_inset ERT", "status collapsed", "",
248 "\\begin_layout Plain Layout", " ", "\\end_layout",
249 "", "\\end_inset", ""]
253 i = find_token(document.body, "\\begin_inset Separator", i)
257 lay = get_containing_layout(document.body, i)
259 document.warning("Malformed LyX document: Can't convert separator inset at line " + str(i))
266 kind = get_value(document.body, "\\begin_inset Separator", i, i+1, "plain").split()[1]
267 before = document.body[beg+1:i]
268 something_before = len(before) > 0 and len("".join(before)) > 0
269 j = find_end_of_inset(document.body, i)
270 after = document.body[j+1:end]
271 something_after = len(after) > 0 and len("".join(after)) > 0
273 beg = beg + len(before) + 1
274 elif something_before:
275 document.body[i:i] = ["\\end_layout", ""]
283 document.body[beg:j+1] = empert
286 document.body[beg:j+1] = comert
290 if layoutname == "Standard":
291 if not something_before:
292 document.body[beg:j+1] = parsep
294 document.body[i:i] = ["", "\\begin_layout Standard"]
297 document.body[beg:j+1] = ["\\begin_layout Standard"]
300 document.body[beg:j+1] = ["\\begin_deeper"]
302 end = end + 1 - (j + 1 - beg)
303 if not something_before:
304 document.body[i:i] = parsep
306 end = end + len(parsep)
307 document.body[i:i] = ["\\begin_layout Standard"]
308 document.body[end+2:end+2] = ["", "\\end_deeper", ""]
311 next_par_is_aligned = False
312 k = find_nonempty_line(document.body, end+1)
313 if k != -1 and check_token(document.body[k], "\\begin_layout"):
314 lay = get_containing_layout(document.body, k)
315 next_par_is_aligned = lay != False and \
316 find_token(document.body, "\\align", lay[1], lay[2]) != -1
317 if k != -1 and not next_par_is_aligned \
318 and not check_token(document.body[k], "\\end_deeper") \
319 and not check_token(document.body[k], "\\begin_deeper"):
320 if layoutname == "Standard":
321 document.body[beg:j+1] = [beglaysep]
324 document.body[beg:j+1] = ["\\begin_deeper", beglaysep]
325 end = end + 2 - (j + 1 - beg)
326 document.body[end+1:end+1] = ["", "\\end_deeper", ""]
330 del document.body[i:end+1]
332 del document.body[i:end-1]
337 def convert_parbreak(document):
339 Convert parbreak separators not specifically used to separate
340 environments to latexpar separators.
342 parbreakinset = "\\begin_inset Separator parbreak"
345 i = find_token(document.body, parbreakinset, i)
348 lay = get_containing_layout(document.body, i)
350 document.warning("Malformed LyX document: Can't convert separator inset at line " + str(i))
353 if lay[0] == "Standard":
354 # Convert only if not alone in the paragraph
355 k1 = find_nonempty_line(document.body, lay[1] + 1, i + 1)
356 k2 = find_nonempty_line(document.body, i + 1, lay[2])
357 if (k1 < i) or (k2 > i + 1) or not check_token(document.body[i], parbreakinset):
358 document.body[i] = document.body[i].replace("parbreak", "latexpar")
360 document.body[i] = document.body[i].replace("parbreak", "latexpar")
364 def revert_parbreak(document):
366 Revert latexpar separators to parbreak separators.
370 i = find_token(document.body, "\\begin_inset Separator latexpar", i)
373 document.body[i] = document.body[i].replace("latexpar", "parbreak")
377 def revert_smash(document):
378 " Set amsmath to on if smash commands are used "
380 commands = ["smash[t]", "smash[b]", "notag"]
381 i = find_token(document.header, "\\use_package amsmath", 0)
383 document.warning("Malformed LyX document: Can't find \\use_package amsmath.")
385 value = get_value(document.header, "\\use_package amsmath", i).split()[1]
387 # nothing to do if package is not auto but on or off
391 j = find_token(document.body, '\\begin_inset Formula', j)
394 k = find_end_of_inset(document.body, j)
396 document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(j))
399 code = "\n".join(document.body[j:k])
401 if code.find("\\%s" % c) != -1:
402 # set amsmath to on, since it is loaded by the newer format
403 document.header[i] = "\\use_package amsmath 2"
408 def revert_swissgerman(document):
409 " Set language german-ch-old to german "
411 if document.language == "german-ch-old":
412 document.language = "german"
413 i = find_token(document.header, "\\language", 0)
415 document.header[i] = "\\language german"
418 j = find_token(document.body, "\\lang german-ch-old", j)
421 document.body[j] = document.body[j].replace("\\lang german-ch-old", "\\lang german")
425 def revert_use_package(document, pkg, commands, oldauto, supported):
426 # oldauto defines how the version we are reverting to behaves:
427 # if it is true, the old version uses the package automatically.
428 # if it is false, the old version never uses the package.
429 # If "supported" is true, the target version also supports this
431 regexp = re.compile(r'(\\use_package\s+%s)' % pkg)
432 p = find_re(document.header, regexp, 0)
433 value = "1" # default is auto
435 value = get_value(document.header, "\\use_package" , p).split()[1]
437 del document.header[p]
438 if value == "2" and not supported: # on
439 add_to_preamble(document, ["\\usepackage{" + pkg + "}"])
440 elif value == "1" and not oldauto: # auto
443 i = find_token(document.body, '\\begin_inset Formula', i)
446 j = find_end_of_inset(document.body, i)
448 document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
451 code = "\n".join(document.body[i:j])
453 if code.find("\\%s" % c) != -1:
455 document.header[p] = "\\use_package " + pkg + " 2"
457 add_to_preamble(document, ["\\usepackage{" + pkg + "}"])
462 mathtools_commands = ["xhookrightarrow", "xhookleftarrow", "xRightarrow", \
463 "xrightharpoondown", "xrightharpoonup", "xrightleftharpoons", \
464 "xLeftarrow", "xleftharpoondown", "xleftharpoonup", \
465 "xleftrightarrow", "xLeftrightarrow", "xleftrightharpoons", \
468 def revert_xarrow(document):
469 "remove use_package mathtools"
470 revert_use_package(document, "mathtools", mathtools_commands, False, True)
473 def revert_beamer_lemma(document):
474 " Reverts beamer lemma layout to ERT "
476 beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
477 if document.textclass not in beamer_classes:
483 i = find_token(document.body, "\\begin_layout Lemma", i)
486 j = find_end_of_layout(document.body, i)
488 document.warning("Malformed LyX document: Can't find end of Lemma layout")
491 arg1 = find_token(document.body, "\\begin_inset Argument 1", i, j)
492 endarg1 = find_end_of_inset(document.body, arg1)
493 arg2 = find_token(document.body, "\\begin_inset Argument 2", i, j)
494 endarg2 = find_end_of_inset(document.body, arg2)
498 beginPlain1 = find_token(document.body, "\\begin_layout Plain Layout", arg1, endarg1)
499 if beginPlain1 == -1:
500 document.warning("Malformed LyX document: Can't find arg1 plain Layout")
503 endPlain1 = find_end_of_inset(document.body, beginPlain1)
504 content1 = document.body[beginPlain1 + 1 : endPlain1 - 2]
505 subst1 = put_cmd_in_ert("<") + content1 + put_cmd_in_ert(">")
507 beginPlain2 = find_token(document.body, "\\begin_layout Plain Layout", arg2, endarg2)
508 if beginPlain2 == -1:
509 document.warning("Malformed LyX document: Can't find arg2 plain Layout")
512 endPlain2 = find_end_of_inset(document.body, beginPlain2)
513 content2 = document.body[beginPlain2 + 1 : endPlain2 - 2]
514 subst2 = put_cmd_in_ert("[") + content2 + put_cmd_in_ert("]")
518 del document.body[arg2 : endarg2 + 1]
520 del document.body[arg1 : endarg1 + 1]
522 del document.body[arg1 : endarg1 + 1]
524 del document.body[arg2 : endarg2 + 1]
526 # index of end layout has probably changed
527 j = find_end_of_layout(document.body, i)
529 document.warning("Malformed LyX document: Can't find end of Lemma layout")
535 # if this is not a consecutive env, add start command
537 begcmd = put_cmd_in_ert("\\begin{lemma}")
539 # has this a consecutive lemma?
540 consecutive = document.body[j + 2] == "\\begin_layout Lemma"
542 # if this is not followed by a consecutive env, add end command
544 document.body[j : j + 1] = put_cmd_in_ert("\\end{lemma}") + ["\\end_layout"]
546 document.body[i : i + 1] = ["\\begin_layout Standard", ""] + begcmd + subst1 + subst2
552 def revert_question_env(document):
554 Reverts question and question* environments of
555 theorems-ams-extended-bytype module to ERT
558 # Do we use theorems-ams-extended-bytype module?
559 if not "theorems-ams-extended-bytype" in document.get_module_list():
565 i = find_token(document.body, "\\begin_layout Question", i)
569 starred = document.body[i] == "\\begin_layout Question*"
571 j = find_end_of_layout(document.body, i)
573 document.warning("Malformed LyX document: Can't find end of Question layout")
577 # if this is not a consecutive env, add start command
581 begcmd = put_cmd_in_ert("\\begin{question*}")
583 begcmd = put_cmd_in_ert("\\begin{question}")
585 # has this a consecutive theorem of same type?
588 consecutive = document.body[j + 2] == "\\begin_layout Question*"
590 consecutive = document.body[j + 2] == "\\begin_layout Question"
592 # if this is not followed by a consecutive env, add end command
595 document.body[j : j + 1] = put_cmd_in_ert("\\end{question*}") + ["\\end_layout"]
597 document.body[j : j + 1] = put_cmd_in_ert("\\end{question}") + ["\\end_layout"]
599 document.body[i : i + 1] = ["\\begin_layout Standard", ""] + begcmd
601 add_to_preamble(document, "\\providecommand{\questionname}{Question}")
604 add_to_preamble(document, "\\theoremstyle{plain}\n" \
605 "\\newtheorem*{question*}{\\protect\\questionname}")
607 add_to_preamble(document, "\\theoremstyle{plain}\n" \
608 "\\newtheorem{question}{\\protect\\questionname}")
613 def convert_dashes(document):
614 "convert -- and --- to \\twohyphens and \\threehyphens"
616 if document.backend != "latex":
620 while i < len(document.body):
621 words = document.body[i].split()
622 if len(words) > 1 and words[0] == "\\begin_inset" and \
623 words[1] in ["CommandInset", "ERT", "External", "Formula", "Graphics", "IPA", "listings"]:
624 # must not replace anything in insets that store LaTeX contents in .lyx files
625 # (math and command insets withut overridden read() and write() methods
626 # filtering out IPA makes Text::readParToken() more simple
627 # skip ERT as well since it is not needed there
628 j = find_end_of_inset(document.body, i)
630 document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
635 if len(words) > 0 and words[0] in ["\\leftindent", "\\paragraph_spacing", "\\align", "\\labelwidthstring"]:
636 # skip paragraph parameters (bug 10243)
640 j = document.body[i].find("--")
643 front = document.body[i][:j]
644 back = document.body[i][j+2:]
645 # We can have an arbitrary number of consecutive hyphens.
646 # These must be split into the corresponding number of two and three hyphens
647 # We must match what LaTeX does: First try emdash, then endash, then single hyphen
648 if back.find("-") == 0:
651 document.body.insert(i+1, back)
652 document.body[i] = front + "\\threehyphens"
655 document.body.insert(i+1, back)
656 document.body[i] = front + "\\twohyphens"
660 def revert_dashes(document):
661 "convert \\twohyphens and \\threehyphens to -- and ---"
664 while i < len(document.body):
665 words = document.body[i].split()
666 if len(words) > 1 and words[0] == "\\begin_inset" and \
667 words[1] in ["CommandInset", "ERT", "External", "Formula", "Graphics", "IPA", "listings"]:
669 j = find_end_of_inset(document.body, i)
671 document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
677 if document.body[i].find("\\twohyphens") >= 0:
678 document.body[i] = document.body[i].replace("\\twohyphens", "--")
680 if document.body[i].find("\\threehyphens") >= 0:
681 document.body[i] = document.body[i].replace("\\threehyphens", "---")
683 if replaced and i+1 < len(document.body) and \
684 (document.body[i+1].find("\\") != 0 or \
685 document.body[i+1].find("\\twohyphens") == 0 or
686 document.body[i+1].find("\\threehyphens") == 0) and \
687 len(document.body[i]) + len(document.body[i+1]) <= 80:
688 document.body[i] = document.body[i] + document.body[i+1]
689 document.body[i+1:i+2] = []
694 # order is important for the last three!
695 phrases = ["LyX", "LaTeX2e", "LaTeX", "TeX"]
697 def is_part_of_converted_phrase(line, j, phrase):
698 "is phrase part of an already converted phrase?"
700 converted = "\\SpecialCharNoPassThru \\" + p
701 pos = j + len(phrase) - len(converted)
703 if line[pos:pos+len(converted)] == converted:
708 def convert_phrases(document):
709 "convert special phrases from plain text to \\SpecialCharNoPassThru"
711 if document.backend != "latex":
714 for phrase in phrases:
716 while i < len(document.body):
717 words = document.body[i].split()
718 if len(words) > 1 and words[0] == "\\begin_inset" and \
719 words[1] in ["CommandInset", "External", "Formula", "Graphics", "listings"]:
720 # must not replace anything in insets that store LaTeX contents in .lyx files
721 # (math and command insets withut overridden read() and write() methods
722 j = find_end_of_inset(document.body, i)
724 document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
729 if document.body[i].find("\\") == 0:
732 j = document.body[i].find(phrase)
736 if not is_part_of_converted_phrase(document.body[i], j, phrase):
737 front = document.body[i][:j]
738 back = document.body[i][j+len(phrase):]
740 document.body.insert(i+1, back)
741 # We cannot use SpecialChar since we do not know whether we are outside passThru
742 document.body[i] = front + "\\SpecialCharNoPassThru \\" + phrase
746 def revert_phrases(document):
747 "convert special phrases to plain text"
750 while i < len(document.body):
751 words = document.body[i].split()
752 if len(words) > 1 and words[0] == "\\begin_inset" and \
753 words[1] in ["CommandInset", "External", "Formula", "Graphics", "listings"]:
754 # see convert_phrases
755 j = find_end_of_inset(document.body, i)
757 document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
763 for phrase in phrases:
764 # we can replace SpecialChar since LyX ensures that it cannot be inserted into passThru parts
765 if document.body[i].find("\\SpecialChar \\" + phrase) >= 0:
766 document.body[i] = document.body[i].replace("\\SpecialChar \\" + phrase, phrase)
768 if document.body[i].find("\\SpecialCharNoPassThru \\" + phrase) >= 0:
769 document.body[i] = document.body[i].replace("\\SpecialCharNoPassThru \\" + phrase, phrase)
771 if replaced and i+1 < len(document.body) and \
772 (document.body[i+1].find("\\") != 0 or \
773 document.body[i+1].find("\\SpecialChar") == 0) and \
774 len(document.body[i]) + len(document.body[i+1]) <= 80:
775 document.body[i] = document.body[i] + document.body[i+1]
776 document.body[i+1:i+2] = []
781 def convert_specialchar_internal(document, forward):
782 specialchars = {"\\-":"softhyphen", "\\textcompwordmark{}":"ligaturebreak", \
783 "\\@.":"endofsentence", "\\ldots{}":"ldots", \
784 "\\menuseparator":"menuseparator", "\\slash{}":"breakableslash", \
785 "\\nobreakdash-":"nobreakdash", "\\LyX":"LyX", \
786 "\\TeX":"TeX", "\\LaTeX2e":"LaTeX2e", \
787 "\\LaTeX":"LaTeX" # must be after LaTeX2e
791 while i < len(document.body):
792 words = document.body[i].split()
793 if len(words) > 1 and words[0] == "\\begin_inset" and \
794 words[1] in ["CommandInset", "External", "Formula", "Graphics", "listings"]:
795 # see convert_phrases
796 j = find_end_of_inset(document.body, i)
798 document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
803 for key, value in specialchars.items():
805 document.body[i] = document.body[i].replace("\\SpecialChar " + key, "\\SpecialChar " + value)
806 document.body[i] = document.body[i].replace("\\SpecialCharNoPassThru " + key, "\\SpecialCharNoPassThru " + value)
808 document.body[i] = document.body[i].replace("\\SpecialChar " + value, "\\SpecialChar " + key)
809 document.body[i] = document.body[i].replace("\\SpecialCharNoPassThru " + value, "\\SpecialCharNoPassThru " + key)
813 def convert_specialchar(document):
814 "convert special characters to new syntax"
815 convert_specialchar_internal(document, True)
818 def revert_specialchar(document):
819 "convert special characters to old syntax"
820 convert_specialchar_internal(document, False)
823 def revert_georgian(document):
824 "Set the document language to English but assure Georgian output"
826 if document.language == "georgian":
827 document.language = "english"
828 i = find_token(document.header, "\\language georgian", 0)
830 document.header[i] = "\\language english"
831 j = find_token(document.header, "\\language_package default", 0)
833 document.header[j] = "\\language_package babel"
834 k = find_token(document.header, "\\options", 0)
836 document.header[k] = document.header[k].replace("\\options", "\\options georgian,")
838 l = find_token(document.header, "\\use_default_options", 0)
839 document.header.insert(l + 1, "\\options georgian")
842 def revert_sigplan_doi(document):
843 " Reverts sigplanconf DOI layout to ERT "
845 if document.textclass != "sigplanconf":
850 i = find_token(document.body, "\\begin_layout DOI", i)
853 j = find_end_of_layout(document.body, i)
855 document.warning("Malformed LyX document: Can't find end of DOI layout")
859 content = lyx2latex(document, document.body[i:j + 1])
860 add_to_preamble(document, ["\\doi{" + content + "}"])
861 del document.body[i:j + 1]
865 def revert_ex_itemargs(document):
866 " Reverts \\item arguments of the example environments (Linguistics module) to TeX-code "
868 if not "linguistics" in document.get_module_list():
872 example_layouts = ["Numbered Examples (consecutive)", "Subexample"]
874 i = find_token(document.body, "\\begin_inset Argument item:", i)
877 j = find_end_of_inset(document.body, i)
878 # Find containing paragraph layout
879 parent = get_containing_layout(document.body, i)
881 document.warning("Malformed LyX document: Can't find parent paragraph layout")
885 layoutname = parent[0]
886 if layoutname in example_layouts:
887 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
888 endPlain = find_end_of_layout(document.body, beginPlain)
889 content = document.body[beginPlain + 1 : endPlain]
890 del document.body[i:j+1]
891 subst = put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
892 document.body[parbeg : parbeg] = subst
896 def revert_forest(document):
897 " Reverts the forest environment (Linguistics module) to TeX-code "
899 if not "linguistics" in document.get_module_list():
904 i = find_token(document.body, "\\begin_inset Flex Structure Tree", i)
907 j = find_end_of_inset(document.body, i)
909 document.warning("Malformed LyX document: Can't find end of Structure Tree inset")
913 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
914 endPlain = find_end_of_layout(document.body, beginPlain)
915 content = lyx2latex(document, document.body[beginPlain : endPlain])
917 add_to_preamble(document, ["\\usepackage{forest}"])
919 document.body[i:j + 1] = put_cmd_in_ert("\\begin{forest}" + content + "\\end{forest}")
923 def revert_glossgroup(document):
924 " Reverts the GroupGlossedWords inset (Linguistics module) to TeX-code "
926 if not "linguistics" in document.get_module_list():
931 i = find_token(document.body, "\\begin_inset Flex GroupGlossedWords", i)
934 j = find_end_of_inset(document.body, i)
936 document.warning("Malformed LyX document: Can't find end of GroupGlossedWords inset")
940 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
941 endPlain = find_end_of_layout(document.body, beginPlain)
942 content = lyx2verbatim(document, document.body[beginPlain : endPlain])
944 document.body[i:j + 1] = ["{", "", content, "", "}"]
948 def revert_newgloss(document):
949 " Reverts the new Glosse insets (Linguistics module) to the old format "
951 if not "linguistics" in document.get_module_list():
954 glosses = ("\\begin_inset Flex Glosse", "\\begin_inset Flex Tri-Glosse")
955 for glosse in glosses:
958 i = find_token(document.body, glosse, i)
961 j = find_end_of_inset(document.body, i)
963 document.warning("Malformed LyX document: Can't find end of Glosse inset")
967 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
968 endarg = find_end_of_inset(document.body, arg)
971 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
972 if argbeginPlain == -1:
973 document.warning("Malformed LyX document: Can't find arg plain Layout")
976 argendPlain = find_end_of_inset(document.body, argbeginPlain)
977 argcontent = lyx2verbatim(document, document.body[argbeginPlain : argendPlain - 2])
979 document.body[j:j] = ["", "\\begin_layout Plain Layout","\\backslash", "glt ",
980 argcontent, "\\end_layout"]
982 # remove Arg insets and paragraph, if it only contains this inset
983 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
984 del document.body[arg - 1 : endarg + 4]
986 del document.body[arg : endarg + 1]
988 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
989 endPlain = find_end_of_layout(document.body, beginPlain)
990 content = lyx2verbatim(document, document.body[beginPlain : endPlain])
992 document.body[beginPlain + 1:endPlain] = [content]
995 # Dissolve ERT insets
996 for glosse in glosses:
999 i = find_token(document.body, glosse, i)
1002 j = find_end_of_inset(document.body, i)
1004 document.warning("Malformed LyX document: Can't find end of Glosse inset")
1008 ert = find_token(document.body, "\\begin_inset ERT", i, j)
1011 ertend = find_end_of_inset(document.body, ert)
1013 document.warning("Malformed LyX document: Can't find end of ERT inset")
1016 ertcontent = get_ert(document.body, ert, True)
1017 document.body[ert : ertend + 1] = [ertcontent]
1021 def convert_newgloss(document):
1022 " Converts Glosse insets (Linguistics module) to the new format "
1024 if not "linguistics" in document.get_module_list():
1027 glosses = ("\\begin_inset Flex Glosse", "\\begin_inset Flex Tri-Glosse")
1028 for glosse in glosses:
1031 i = find_token(document.body, glosse, i)
1034 j = find_end_of_inset(document.body, i)
1036 document.warning("Malformed LyX document: Can't find end of Glosse inset")
1043 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", k, j)
1044 if beginPlain == -1:
1046 endPlain = find_end_of_layout(document.body, beginPlain)
1048 document.warning("Malformed LyX document: Can't find end of Glosse layout")
1052 glt = find_token(document.body, "\\backslash", beginPlain, endPlain)
1053 if glt != -1 and document.body[glt + 1].startswith("glt"):
1054 document.body[glt + 1] = document.body[glt + 1].lstrip("glt").lstrip()
1055 argcontent = document.body[glt + 1 : endPlain]
1056 document.body[beginPlain + 1 : endPlain] = ["\\begin_inset Argument 1", "status open", "",
1057 "\\begin_layout Plain Layout", "\\begin_inset ERT", "status open", "",
1058 "\\begin_layout Plain Layout", ""] + argcontent + ["\\end_layout", "", "\\end_inset", "",
1059 "\\end_layout", "", "\\end_inset"]
1061 content = document.body[beginPlain + 1 : endPlain]
1062 document.body[beginPlain + 1 : endPlain] = ["\\begin_inset ERT", "status open", "",
1063 "\\begin_layout Plain Layout"] + content + ["\\end_layout", "", "\\end_inset"]
1065 endPlain = find_end_of_layout(document.body, beginPlain)
1067 j = find_end_of_inset(document.body, i)
1072 def convert_BoxFeatures(document):
1073 " adds new box features "
1077 i = find_token(document.body, "height_special", i)
1080 document.body[i+1:i+1] = ['thickness "0.4pt"', 'separation "3pt"', 'shadowsize "4pt"']
1084 def revert_BoxFeatures(document):
1085 " outputs new box features as TeX code "
1089 defaultThick = "0.4pt"
1090 defaultShadow = "4pt"
1092 i = find_token(document.body, "height_special", i)
1095 # read out the values
1096 beg = document.body[i+1].find('"');
1097 end = document.body[i+1].rfind('"');
1098 thickness = document.body[i+1][beg+1:end];
1099 beg = document.body[i+2].find('"');
1100 end = document.body[i+2].rfind('"');
1101 separation = document.body[i+2][beg+1:end];
1102 beg = document.body[i+3].find('"');
1103 end = document.body[i+3].rfind('"');
1104 shadowsize = document.body[i+3][beg+1:end];
1105 # delete the specification
1106 del document.body[i+1:i+4]
1108 # first output the closing brace
1109 if shadowsize != defaultShadow or separation != defaultSep or thickness != defaultThick:
1110 document.body[i + 10 : i + 10] = put_cmd_in_ert("}")
1111 # now output the lengths
1112 if shadowsize != defaultShadow or separation != defaultSep or thickness != defaultThick:
1113 document.body[i - 10 : i - 10] = put_cmd_in_ert("{")
1114 if thickness != defaultThick:
1115 document.body[i - 5 : i - 4] = ["{\\backslash fboxrule " + thickness]
1116 if separation != defaultSep and thickness == defaultThick:
1117 document.body[i - 5 : i - 4] = ["{\\backslash fboxsep " + separation]
1118 if separation != defaultSep and thickness != defaultThick:
1119 document.body[i - 5 : i - 4] = ["{\\backslash fboxrule " + thickness + "\\backslash fboxsep " + separation]
1120 if shadowsize != defaultShadow and separation == defaultSep and thickness == defaultThick:
1121 document.body[i - 5 : i - 4] = ["{\\backslash shadowsize " + shadowsize]
1122 if shadowsize != defaultShadow and separation != defaultSep and thickness == defaultThick:
1123 document.body[i - 5 : i - 4] = ["{\\backslash fboxsep " + separation + "\\backslash shadowsize " + shadowsize]
1124 if shadowsize != defaultShadow and separation == defaultSep and thickness != defaultThick:
1125 document.body[i - 5 : i - 4] = ["{\\backslash fboxrule " + thickness + "\\backslash shadowsize " + shadowsize]
1126 if shadowsize != defaultShadow and separation != defaultSep and thickness != defaultThick:
1127 document.body[i - 5 : i - 4] = ["{\\backslash fboxrule " + thickness + "\\backslash fboxsep " + separation + "\\backslash shadowsize " + shadowsize]
1131 def convert_origin(document):
1132 " Insert the origin tag "
1134 i = find_token(document.header, "\\textclass ", 0)
1136 document.warning("Malformed LyX document: No \\textclass!!")
1138 if document.dir == u'':
1142 if document.systemlyxdir and document.systemlyxdir != u'':
1144 if os.path.isabs(document.dir):
1145 absdir = os.path.normpath(document.dir)
1147 absdir = os.path.normpath(os.path.abspath(document.dir))
1148 if os.path.isabs(document.systemlyxdir):
1149 abssys = os.path.normpath(document.systemlyxdir)
1151 abssys = os.path.normpath(os.path.abspath(document.systemlyxdir))
1152 relpath = os.path.relpath(absdir, abssys)
1153 if relpath.find(u'..') == 0:
1158 origin = document.dir.replace(u'\\', u'/') + u'/'
1160 origin = os.path.join(u"/systemlyxdir", relpath).replace(u'\\', u'/') + u'/'
1161 document.header[i:i] = ["\\origin " + origin]
1164 def revert_origin(document):
1165 " Remove the origin tag "
1167 i = find_token(document.header, "\\origin ", 0)
1169 document.warning("Malformed LyX document: No \\origin!!")
1171 del document.header[i]
1174 color_names = ["brown", "darkgray", "gray", \
1175 "lightgray", "lime", "olive", "orange", \
1176 "pink", "purple", "teal", "violet"]
1178 def revert_textcolor(document):
1179 " revert new \\textcolor colors to TeX code "
1185 i = find_token(document.body, "\\color ", i)
1189 for color in list(color_names):
1190 if document.body[i] == "\\color " + color:
1191 # register that xcolor must be loaded in the preamble
1194 add_to_preamble(document, ["\\@ifundefined{rangeHsb}{\\usepackage{xcolor}}{}"])
1195 # find the next \\color and/or the next \\end_layout
1196 j = find_token(document.body, "\\color", i + 1)
1197 k = find_token(document.body, "\\end_layout", i + 1)
1198 if j == -1 and k != -1:
1201 # first output the closing brace
1203 document.body[k: k] = put_cmd_in_ert("}")
1205 document.body[j: j] = put_cmd_in_ert("}")
1206 # now output the \textcolor command
1207 document.body[i : i + 1] = put_cmd_in_ert("\\textcolor{" + color + "}{")
1211 def convert_colorbox(document):
1212 " adds color settings for boxes "
1216 i = find_token(document.body, "shadowsize", i)
1219 document.body[i+1:i+1] = ['framecolor "black"', 'backgroundcolor "none"']
1223 def revert_colorbox(document):
1224 " outputs color settings for boxes as TeX code "
1227 defaultframecolor = "black"
1228 defaultbackcolor = "none"
1230 binset = find_token(document.body, "\\begin_inset Box", binset)
1234 einset = find_end_of_inset(document.body, binset)
1236 document.warning("Malformed LyX document: Can't find end of box inset!")
1240 blay = find_token(document.body, "\\begin_layout", binset, einset)
1242 document.warning("Malformed LyX document: Can't find start of layout!")
1246 # doing it this way, we make sure only to find a framecolor option
1247 frame = find_token(document.body, "framecolor", binset, blay)
1252 beg = document.body[frame].find('"')
1253 end = document.body[frame].rfind('"')
1254 framecolor = document.body[frame][beg + 1 : end]
1256 # this should be on the next line
1258 beg = document.body[bgcolor].find('"')
1259 end = document.body[bgcolor].rfind('"')
1260 backcolor = document.body[bgcolor][beg + 1 : end]
1263 del document.body[frame : frame + 2]
1264 # adjust end of inset
1267 if document.body[binset] == "\\begin_inset Box Boxed" and \
1268 framecolor != defaultframecolor:
1269 document.body[binset] = "\\begin_inset Box Frameless"
1272 # first output the closing brace
1273 if framecolor == defaultframecolor and backcolor == defaultbackcolor:
1277 # we also neeed to load xcolor in the preamble but only once
1278 add_to_preamble(document, ["\\@ifundefined{rangeHsb}{\\usepackage{xcolor}}{}"])
1279 document.body[einset + 1 : einset + 1] = put_cmd_in_ert("}")
1280 if framecolor != defaultframecolor:
1281 document.body[binset:binset] = put_cmd_in_ert("\\fcolorbox{" + framecolor + "}{" + backcolor + "}{")
1283 document.body[binset:binset] = put_cmd_in_ert("\\colorbox{" + backcolor + "}{")
1288 def revert_mathmulticol(document):
1289 " Convert formulas to ERT if they contain multicolumns "
1293 i = find_token(document.body, '\\begin_inset Formula', i)
1296 j = find_end_of_inset(document.body, i)
1298 document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
1301 lines = document.body[i:j]
1302 lines[0] = lines[0].replace('\\begin_inset Formula', '').lstrip()
1303 code = "\n".join(lines)
1308 n = code.find("\\multicolumn", k)
1309 # no need to convert degenerated multicolumn cells,
1310 # they work in old LyX versions as "math ERT"
1311 if n != -1 and code.find("\\multicolumn{1}", k) != n:
1312 ert = put_cmd_in_ert(code)
1313 document.body[i:j+1] = ert
1319 i = find_end_of_inset(document.body, i)
1324 def revert_jss(document):
1325 " Reverts JSS In_Preamble commands to ERT in preamble "
1327 if document.textclass != "jss":
1336 # at first revert the inset layouts because they can be part of the In_Preamble layouts
1337 while m != -1 or j != -1 or h != -1 or k != -1 or n != -1:
1340 h = find_token(document.body, "\\begin_inset Flex Pkg", h)
1342 endh = find_end_of_inset(document.body, h)
1343 document.body[endh - 2 : endh + 1] = put_cmd_in_ert("}")
1344 document.body[h : h + 4] = put_cmd_in_ert("\\pkg{")
1348 m = find_token(document.body, "\\begin_inset Flex Proglang", m)
1350 endm = find_end_of_inset(document.body, m)
1351 document.body[endm - 2 : endm + 1] = put_cmd_in_ert("}")
1352 document.body[m : m + 4] = put_cmd_in_ert("\\proglang{")
1356 j = find_token(document.body, "\\begin_inset Flex Code", j)
1358 # assure that we are not in a Code Chunk inset
1359 if document.body[j][-1] == "e":
1360 endj = find_end_of_inset(document.body, j)
1361 document.body[endj - 2 : endj + 1] = put_cmd_in_ert("}")
1362 document.body[j : j + 4] = put_cmd_in_ert("\\code{")
1368 k = find_token(document.body, "\\begin_inset Flex E-mail", k)
1370 endk = find_end_of_inset(document.body, k)
1371 document.body[endk - 2 : endk + 1] = put_cmd_in_ert("}")
1372 document.body[k : k + 4] = put_cmd_in_ert("\\email{")
1376 n = find_token(document.body, "\\begin_inset Flex URL", n)
1378 endn = find_end_of_inset(document.body, n)
1379 document.body[endn - 2 : endn + 1] = put_cmd_in_ert("}")
1380 document.body[n : n + 4] = put_cmd_in_ert("\\url{")
1382 # now revert the In_Preamble layouts
1384 i = find_token(document.body, "\\begin_layout Title", 0)
1387 j = find_end_of_layout(document.body, i)
1389 document.warning("Malformed LyX document: Can't find end of Title layout")
1392 content = lyx2latex(document, document.body[i:j + 1])
1393 add_to_preamble(document, ["\\title{" + content + "}"])
1394 del document.body[i:j + 1]
1396 i = find_token(document.body, "\\begin_layout Author", 0)
1399 j = find_end_of_layout(document.body, i)
1401 document.warning("Malformed LyX document: Can't find end of Author layout")
1404 content = lyx2latex(document, document.body[i:j + 1])
1405 add_to_preamble(document, ["\\author{" + content + "}"])
1406 del document.body[i:j + 1]
1408 i = find_token(document.body, "\\begin_layout Plain Author", 0)
1411 j = find_end_of_layout(document.body, i)
1413 document.warning("Malformed LyX document: Can't find end of Plain Author layout")
1416 content = lyx2latex(document, document.body[i:j + 1])
1417 add_to_preamble(document, ["\\Plainauthor{" + content + "}"])
1418 del document.body[i:j + 1]
1420 i = find_token(document.body, "\\begin_layout Plain Title", 0)
1423 j = find_end_of_layout(document.body, i)
1425 document.warning("Malformed LyX document: Can't find end of Plain Title layout")
1428 content = lyx2latex(document, document.body[i:j + 1])
1429 add_to_preamble(document, ["\\Plaintitle{" + content + "}"])
1430 del document.body[i:j + 1]
1432 i = find_token(document.body, "\\begin_layout Short Title", 0)
1435 j = find_end_of_layout(document.body, i)
1437 document.warning("Malformed LyX document: Can't find end of Short Title layout")
1440 content = lyx2latex(document, document.body[i:j + 1])
1441 add_to_preamble(document, ["\\Shorttitle{" + content + "}"])
1442 del document.body[i:j + 1]
1444 i = find_token(document.body, "\\begin_layout Abstract", 0)
1447 j = find_end_of_layout(document.body, i)
1449 document.warning("Malformed LyX document: Can't find end of Abstract layout")
1452 content = lyx2latex(document, document.body[i:j + 1])
1453 add_to_preamble(document, ["\\Abstract{" + content + "}"])
1454 del document.body[i:j + 1]
1456 i = find_token(document.body, "\\begin_layout Keywords", 0)
1459 j = find_end_of_layout(document.body, i)
1461 document.warning("Malformed LyX document: Can't find end of Keywords layout")
1464 content = lyx2latex(document, document.body[i:j + 1])
1465 add_to_preamble(document, ["\\Keywords{" + content + "}"])
1466 del document.body[i:j + 1]
1468 i = find_token(document.body, "\\begin_layout Plain Keywords", 0)
1471 j = find_end_of_layout(document.body, i)
1473 document.warning("Malformed LyX document: Can't find end of Plain Keywords layout")
1476 content = lyx2latex(document, document.body[i:j + 1])
1477 add_to_preamble(document, ["\\Plainkeywords{" + content + "}"])
1478 del document.body[i:j + 1]
1480 i = find_token(document.body, "\\begin_layout Address", 0)
1483 j = find_end_of_layout(document.body, i)
1485 document.warning("Malformed LyX document: Can't find end of Address layout")
1488 content = lyx2latex(document, document.body[i:j + 1])
1489 add_to_preamble(document, ["\\Address{" + content + "}"])
1490 del document.body[i:j + 1]
1491 # finally handle the code layouts
1496 while m != -1 or j != -1 or h != -1 or k != -1:
1499 h = find_token(document.body, "\\begin_inset Flex Code Chunk", h)
1501 endh = find_end_of_inset(document.body, h)
1502 document.body[endh : endh + 1] = put_cmd_in_ert("\\end{CodeChunk}")
1503 document.body[h : h + 3] = put_cmd_in_ert("\\begin{CodeChunk}")
1504 document.body[h - 1 : h] = ["\\begin_layout Standard"]
1508 j = find_token(document.body, "\\begin_layout Code Input", j)
1510 endj = find_end_of_layout(document.body, j)
1511 document.body[endj : endj + 1] = ["\\end_layout", "", "\\begin_layout Standard"]
1512 document.body[endj + 3 : endj + 4] = put_cmd_in_ert("\\end{CodeInput}")
1513 document.body[endj + 13 : endj + 13] = ["\\end_layout", "", "\\begin_layout Standard"]
1514 document.body[j + 1 : j] = ["\\end_layout", "", "\\begin_layout Standard"]
1515 document.body[j : j + 1] = put_cmd_in_ert("\\begin{CodeInput}")
1519 k = find_token(document.body, "\\begin_layout Code Output", k)
1521 endk = find_end_of_layout(document.body, k)
1522 document.body[endk : endk + 1] = ["\\end_layout", "", "\\begin_layout Standard"]
1523 document.body[endk + 3 : endk + 4] = put_cmd_in_ert("\\end{CodeOutput}")
1524 document.body[endk + 13 : endk + 13] = ["\\end_layout", "", "\\begin_layout Standard"]
1525 document.body[k + 1 : k] = ["\\end_layout", "", "\\begin_layout Standard"]
1526 document.body[k : k + 1] = put_cmd_in_ert("\\begin{CodeOutput}")
1530 m = find_token(document.body, "\\begin_layout Code", m)
1532 endm = find_end_of_layout(document.body, m)
1533 document.body[endm : endm + 1] = ["\\end_layout", "", "\\begin_layout Standard"]
1534 document.body[endm + 3 : endm + 4] = put_cmd_in_ert("\\end{Code}")
1535 document.body[endm + 13 : endm + 13] = ["\\end_layout", "", "\\begin_layout Standard"]
1536 document.body[m + 1 : m] = ["\\end_layout", "", "\\begin_layout Standard"]
1537 document.body[m : m + 1] = put_cmd_in_ert("\\begin{Code}")
1541 def convert_subref(document):
1542 " converts sub: ref prefixes to subref: "
1545 rx = re.compile(r'^name \"sub:(.+)$')
1548 i = find_token(document.body, "\\begin_inset CommandInset label", i)
1551 j = find_end_of_inset(document.body, i)
1553 document.warning("Malformed LyX document: Can't find end of Label inset at line " + str(i))
1557 for p in range(i, j):
1558 m = rx.match(document.body[p])
1561 document.body[p] = "name \"subsec:" + label
1565 rx = re.compile(r'^reference \"sub:(.+)$')
1568 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1571 j = find_end_of_inset(document.body, i)
1573 document.warning("Malformed LyX document: Can't find end of Ref inset at line " + str(i))
1577 for p in range(i, j):
1578 m = rx.match(document.body[p])
1581 document.body[p] = "reference \"subsec:" + label
1587 def revert_subref(document):
1588 " reverts subref: ref prefixes to sub: "
1591 rx = re.compile(r'^name \"subsec:(.+)$')
1594 i = find_token(document.body, "\\begin_inset CommandInset label", i)
1597 j = find_end_of_inset(document.body, i)
1599 document.warning("Malformed LyX document: Can't find end of Label inset at line " + str(i))
1603 for p in range(i, j):
1604 m = rx.match(document.body[p])
1607 document.body[p] = "name \"sub:" + label
1612 rx = re.compile(r'^reference \"subsec:(.+)$')
1615 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1618 j = find_end_of_inset(document.body, i)
1620 document.warning("Malformed LyX document: Can't find end of Ref inset at line " + str(i))
1624 for p in range(i, j):
1625 m = rx.match(document.body[p])
1628 document.body[p] = "reference \"sub:" + label
1633 def convert_nounzip(document):
1634 " remove the noUnzip parameter of graphics insets "
1636 rx = re.compile(r'\s*noUnzip\s*$')
1639 i = find_token(document.body, "\\begin_inset Graphics", i)
1642 j = find_end_of_inset(document.body, i)
1644 document.warning("Malformed LyX document: Can't find end of graphics inset at line " + str(i))
1648 k = find_re(document.body, rx, i, j)
1650 del document.body[k]
1655 def convert_revert_external_bbox(document, forward):
1656 " add units to bounding box of external insets "
1658 rx = re.compile(r'^\s*boundingBox\s+\S+\s+\S+\s+\S+\s+\S+\s*$')
1661 i = find_token(document.body, "\\begin_inset External", i)
1664 j = find_end_of_inset(document.body, i)
1666 document.warning("Malformed LyX document: Can't find end of external inset at line " + str(i))
1669 k = find_re(document.body, rx, i, j)
1673 tokens = document.body[k].split()
1675 for t in range(1, 5):
1678 for t in range(1, 5):
1679 tokens[t] = length_in_bp(tokens[t])
1680 document.body[k] = "\tboundingBox " + tokens[1] + " " + tokens[2] + " " + \
1681 tokens[3] + " " + tokens[4]
1685 def convert_external_bbox(document):
1686 convert_revert_external_bbox(document, True)
1689 def revert_external_bbox(document):
1690 convert_revert_external_bbox(document, False)
1693 def revert_tcolorbox_1(document):
1694 " Reverts the Flex:Subtitle inset of tcolorbox to TeX-code "
1697 i = find_token(document.header, "tcolorbox", i)
1703 flex = find_token(document.body, "\\begin_inset Flex Subtitle", flex)
1706 flexEnd = find_end_of_inset(document.body, flex)
1707 wasOpt = revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, False, True, False)
1708 revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, False, False, False)
1709 flexEnd = find_end_of_inset(document.body, flex)
1711 document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\tcbsubtitle")
1713 document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\tcbsubtitle{")
1714 document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("}")
1718 def revert_tcolorbox_2(document):
1719 " Reverts the Flex:Raster_Color_Box inset of tcolorbox to TeX-code "
1722 i = find_token(document.header, "tcolorbox", i)
1728 flex = find_token(document.body, "\\begin_inset Flex Raster Color Box", flex)
1731 flexEnd = find_end_of_inset(document.body, flex)
1732 revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, True, True, False)
1733 flexEnd = find_end_of_inset(document.body, flex)
1734 document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\begin{tcbraster}")
1735 document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("\\end{tcbraster}")
1739 def revert_tcolorbox_3(document):
1740 " Reverts the Flex:Custom_Color_Box_1 inset of tcolorbox to TeX-code "
1743 i = find_token(document.header, "tcolorbox", i)
1749 flex = find_token(document.body, "\\begin_inset Flex Custom Color Box 1", flex)
1752 flexEnd = find_end_of_inset(document.body, flex)
1753 revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, True, True, False)
1754 revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, True, False, False)
1755 flexEnd = find_end_of_inset(document.body, flex)
1756 document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\begin{cBoxA}")
1757 document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("{}\\end{cBoxA}")
1761 def revert_tcolorbox_4(document):
1762 " Reverts the Flex:Custom_Color_Box_2 inset of tcolorbox to TeX-code "
1765 i = find_token(document.header, "tcolorbox", i)
1771 flex = find_token(document.body, "\\begin_inset Flex Custom Color Box 2", flex)
1774 flexEnd = find_end_of_inset(document.body, flex)
1775 revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, True, True, False)
1776 revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, True, False, False)
1777 flexEnd = find_end_of_inset(document.body, flex)
1778 document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\begin{cBoxB}")
1779 document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("{}\\end{cBoxB}")
1783 def revert_tcolorbox_5(document):
1784 " Reverts the Flex:Custom_Color_Box_3 inset of tcolorbox to TeX-code "
1787 i = find_token(document.header, "tcolorbox", i)
1793 flex = find_token(document.body, "\\begin_inset Flex Custom Color Box 3", flex)
1796 flexEnd = find_end_of_inset(document.body, flex)
1797 revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, True, True, False)
1798 revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, True, False, False)
1799 flexEnd = find_end_of_inset(document.body, flex)
1800 document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\begin{cBoxC}")
1801 document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("{}\\end{cBoxC}")
1805 def revert_tcolorbox_6(document):
1806 " Reverts the Flex:Custom_Color_Box_4 inset of tcolorbox to TeX-code "
1809 i = find_token(document.header, "tcolorbox", i)
1815 flex = find_token(document.body, "\\begin_inset Flex Custom Color Box 4", flex)
1818 flexEnd = find_end_of_inset(document.body, flex)
1819 revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, True, True, False)
1820 revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, True, False, False)
1821 flexEnd = find_end_of_inset(document.body, flex)
1822 document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\begin{cBoxD}")
1823 document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("{}\\end{cBoxD}")
1827 def revert_tcolorbox_7(document):
1828 " Reverts the Flex:Custom_Color_Box_5 inset of tcolorbox to TeX-code "
1831 i = find_token(document.header, "tcolorbox", i)
1837 flex = find_token(document.body, "\\begin_inset Flex Custom Color Box 5", flex)
1840 flexEnd = find_end_of_inset(document.body, flex)
1841 revert_Argument_to_TeX_brace(document, flex, flexEnd, 1, 1, True, True, False)
1842 revert_Argument_to_TeX_brace(document, flex, 0, 2, 2, True, False, False)
1843 flexEnd = find_end_of_inset(document.body, flex)
1844 document.body[flex + 0 : flex + 4] = put_cmd_in_ert("\\begin{cBoxE}")
1845 document.body[flexEnd + 4 : flexEnd + 7] = put_cmd_in_ert("{}\\end{cBoxE}")
1849 def revert_tcolorbox_8(document):
1850 " Reverts the layout New Color Box Type of tcolorbox to TeX-code "
1856 i = find_token(document.body, "\\begin_layout New Color Box Type", i)
1858 j = find_end_of_layout(document.body, i)
1859 wasOpt = revert_Argument_to_TeX_brace(document, i, j, 1, 1, False, True, False)
1860 revert_Argument_to_TeX_brace(document, i, 0, 2, 2, False, False, True)
1861 revert_Argument_to_TeX_brace(document, i, 0, 3, 4, False, True, False)
1862 document.body[i] = document.body[i].replace("\\begin_layout New Color Box Type", "\\begin_layout Standard")
1864 document.body[i + 1 : i + 1] = put_cmd_in_ert("\\newtcolorbox")
1866 document.body[i + 1 : i + 1] = put_cmd_in_ert("\\newtcolorbox{")
1867 k = find_end_of_inset(document.body, j)
1868 k = find_token(document.body, "\\end_inset", k + 1)
1869 k = find_token(document.body, "\\end_inset", k + 1)
1871 k = find_token(document.body, "\\end_inset", k + 1)
1872 document.body[k + 2 : j + 2] = put_cmd_in_ert("{")
1873 j = find_token(document.body, "\\begin_layout Standard", j + 1)
1874 document.body[j - 2 : j - 2] = put_cmd_in_ert("}")
1880 def revert_moderncv_1(document):
1881 " Reverts the new inset of moderncv to TeX-code in preamble "
1883 if document.textclass != "moderncv":
1889 # at first revert the new styles
1891 i = find_token(document.body, "\\begin_layout CVIcons", 0)
1894 j = find_end_of_layout(document.body, i)
1896 document.warning("Malformed LyX document: Can't find end of CVIcons layout")
1899 content = lyx2latex(document, document.body[i:j + 1])
1900 add_to_preamble(document, ["\\moderncvicons{" + content + "}"])
1901 del document.body[i:j + 1]
1903 i = find_token(document.body, "\\begin_layout CVColumnWidth", 0)
1906 j = find_end_of_layout(document.body, i)
1908 document.warning("Malformed LyX document: Can't find end of CVColumnWidth layout")
1911 content = lyx2latex(document, document.body[i:j + 1])
1912 add_to_preamble(document, ["\\setlength{\hintscolumnwidth}{" + content + "}"])
1913 del document.body[i:j + 1]
1914 # now change the new styles to the obsolete ones
1916 i = find_token(document.body, "\\begin_layout Name", 0)
1919 j = find_end_of_layout(document.body, i)
1921 document.warning("Malformed LyX document: Can't find end of Name layout")
1924 lineArg = find_token(document.body, "\\begin_inset Argument 1", i)
1925 if lineArg > j and j != 0:
1928 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", lineArg)
1929 # we have to assure that no other inset is in the Argument
1930 beginInset = find_token(document.body, "\\begin_inset", beginPlain)
1931 endInset = find_token(document.body, "\\end_inset", beginPlain)
1934 while beginInset < endInset and beginInset != -1:
1935 beginInset = find_token(document.body, "\\begin_inset", k)
1936 endInset = find_token(document.body, "\\end_inset", l)
1939 Arg2 = document.body[l + 5 : l + 6]
1941 document.body[i : i + 1]= ["\\begin_layout FirstName"]
1942 # delete the Argument inset
1943 del( document.body[endInset - 2 : endInset + 3])
1944 del( document.body[lineArg : beginPlain + 1])
1945 document.body[i + 4 : i + 4]= ["\\begin_layout FamilyName"] + Arg2 + ["\\end_layout"] + [""]
1948 def revert_moderncv_2(document):
1949 " Reverts the phone inset of moderncv to the obsoleted mobile or fax "
1951 if document.textclass != "moderncv":
1958 i = find_token(document.body, "\\begin_layout Phone", i)
1961 j = find_end_of_layout(document.body, i)
1963 document.warning("Malformed LyX document: Can't find end of Phone layout")
1966 lineArg = find_token(document.body, "\\begin_inset Argument 1", i)
1967 if lineArg > j and j != 0:
1971 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", lineArg)
1972 # we have to assure that no other inset is in the Argument
1973 beginInset = find_token(document.body, "\\begin_inset", beginPlain)
1974 endInset = find_token(document.body, "\\end_inset", beginPlain)
1977 while beginInset < endInset and beginInset != -1:
1978 beginInset = find_token(document.body, "\\begin_inset", k)
1979 endInset = find_token(document.body, "\\end_inset", l)
1982 Arg = document.body[beginPlain + 1 : beginPlain + 2]
1984 if Arg[0] == "mobile":
1985 document.body[i : i + 1]= ["\\begin_layout Mobile"]
1987 document.body[i : i + 1]= ["\\begin_layout Fax"]
1988 # delete the Argument inset
1989 del(document.body[endInset - 2 : endInset + 1])
1990 del(document.body[lineArg : beginPlain + 3])
1994 def convert_moderncv_phone(document):
1995 " Convert the Fax and Mobile inset of moderncv to the new phone inset "
1997 if document.textclass != "moderncv":
2004 "Mobile" : "mobile",
2008 rx = re.compile(r'^\\begin_layout (\S+)$')
2010 # substitute \fax and \mobile by \phone[fax] and \phone[mobile], respectively
2011 i = find_token(document.body, "\\begin_layout", i)
2015 m = rx.match(document.body[i])
2019 if val not in list(phone_dict.keys()):
2022 j = find_end_of_layout(document.body, i)
2024 document.warning("Malformed LyX document: Can't find end of Mobile layout")
2028 document.body[i : i + 1] = ["\\begin_layout Phone", "\\begin_inset Argument 1", "status open", "",
2029 "\\begin_layout Plain Layout", phone_dict[val], "\\end_layout", "",
2033 def convert_moderncv_name(document):
2034 " Convert the FirstName and LastName layout of moderncv to the general Name layout "
2036 if document.textclass != "moderncv":
2039 fnb = 0 # Begin of FirstName inset
2040 fne = 0 # End of FirstName inset
2041 lnb = 0 # Begin of LastName (FamilyName) inset
2042 lne = 0 # End of LastName (FamilyName) inset
2043 nb = 0 # Begin of substituting Name inset
2044 ne = 0 # End of substituting Name inset
2045 FirstName = [] # FirstName content
2046 FamilyName = [] # LastName content
2050 fnb = find_token(document.body, "\\begin_layout FirstName", fnb)
2052 fne = find_end_of_layout(document.body, fnb)
2054 document.warning("Malformed LyX document: Can't find end of FirstName layout")
2056 FirstName = document.body[fnb + 1 : fne]
2058 lnb = find_token(document.body, "\\begin_layout FamilyName", lnb)
2060 lne = find_end_of_layout(document.body, lnb)
2062 document.warning("Malformed LyX document: Can't find end of FamilyName layout")
2064 FamilyName = document.body[lnb + 1 : lne]
2065 # Determine the region for the substituting Name layout
2066 if fnb == -1 and lnb == -1: # Neither FirstName nor FamilyName exists -> Do nothing
2068 elif fnb == -1: # Only FamilyName exists -> New Name insets replaces that
2071 elif lnb == -1: # Only FirstName exists -> New Name insets replaces that
2074 elif fne > lne: # FirstName position before FamilyName -> New Name insets spans
2075 nb = lnb # from FamilyName begin
2076 ne = fne # to FirstName end
2077 else: # FirstName position before FamilyName -> New Name insets spans
2078 nb = fnb # from FirstName begin
2079 ne = lne # to FamilyName end
2081 # Insert the substituting layout now. If FirstName exists, use an otpional argument.
2083 document.body[nb : ne + 1] = ["\\begin_layout Name"] + FamilyName + ["\\end_layout", ""]
2085 document.body[nb : ne + 1] = ["\\begin_layout Name", "\\begin_inset Argument 1", "status open", "",
2086 "\\begin_layout Plain Layout"] + FirstName + ["\\end_layout", "",
2087 "\\end_inset", ""] + FamilyName + ["\\end_layout", ""]
2090 def revert_achemso(document):
2091 " Reverts the flex inset Latin to TeX code "
2093 if document.textclass != "achemso":
2098 i = find_token(document.body, "\\begin_inset Flex Latin", i)
2100 j = find_end_of_inset(document.body, i)
2104 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2105 endPlain = find_end_of_layout(document.body, beginPlain)
2106 content = lyx2latex(document, document.body[beginPlain : endPlain])
2107 document.body[i:j + 1] = put_cmd_in_ert("\\latin{" + content + "}")
2109 document.warning("Malformed LyX document: Can't find end of flex inset Latin")
2114 fontsettings = ["\\font_roman", "\\font_sans", "\\font_typewriter", "\\font_math", \
2115 "\\font_sf_scale", "\\font_tt_scale"]
2116 fontdefaults = ["default", "default", "default", "auto", "100", "100"]
2117 fontquotes = [True, True, True, True, False, False]
2119 def convert_fontsettings(document):
2120 " Duplicate font settings "
2122 i = find_token(document.header, "\\use_non_tex_fonts ", 0)
2124 document.warning("Malformed LyX document: No \\use_non_tex_fonts!")
2125 use_non_tex_fonts = "false"
2127 use_non_tex_fonts = get_value(document.header, "\\use_non_tex_fonts", i)
2129 for f in fontsettings:
2130 i = find_token(document.header, f + " ", 0)
2132 document.warning("Malformed LyX document: No " + f + "!")
2134 # note that with i = -1, this will insert at the end
2136 value = fontdefaults[j]
2138 value = document.header[i][len(f):].strip()
2140 if use_non_tex_fonts == "true":
2141 document.header[i:i+1] = [f + ' "' + fontdefaults[j] + '" "' + value + '"']
2143 document.header[i:i+1] = [f + ' "' + value + '" "' + fontdefaults[j] + '"']
2145 if use_non_tex_fonts == "true":
2146 document.header[i:i+1] = [f + ' ' + fontdefaults[j] + ' ' + value]
2148 document.header[i:i+1] = [f + ' ' + value + ' ' + fontdefaults[j]]
2152 def revert_fontsettings(document):
2153 " Merge font settings "
2155 i = find_token(document.header, "\\use_non_tex_fonts ", 0)
2157 document.warning("Malformed LyX document: No \\use_non_tex_fonts!")
2158 use_non_tex_fonts = "false"
2160 use_non_tex_fonts = get_value(document.header, "\\use_non_tex_fonts", i)
2162 for f in fontsettings:
2163 i = find_token(document.header, f + " ", 0)
2165 document.warning("Malformed LyX document: No " + f + "!")
2168 line = get_value(document.header, f, i)
2171 q2 = line.find('"', q1+1)
2172 q3 = line.find('"', q2+1)
2173 q4 = line.find('"', q3+1)
2174 if q1 == -1 or q2 == -1 or q3 == -1 or q4 == -1:
2175 document.warning("Malformed LyX document: Missing quotes!")
2178 if use_non_tex_fonts == "true":
2179 document.header[i:i+1] = [f + ' ' + line[q3+1:q4]]
2181 document.header[i:i+1] = [f + ' ' + line[q1+1:q2]]
2183 if use_non_tex_fonts == "true":
2184 document.header[i:i+1] = [f + ' ' + line.split()[1]]
2186 document.header[i:i+1] = [f + ' ' + line.split()[0]]
2190 def revert_solution(document):
2191 " Reverts the solution environment of the theorem module to TeX code "
2193 # Do we use one of the modules that provides Solution?
2195 mods = document.get_module_list()
2197 if mod == "theorems-std" or mod == "theorems-bytype" \
2198 or mod == "theorems-ams" or mod == "theorems-ams-bytype":
2208 i = find_token(document.body, "\\begin_layout Solution", i)
2212 is_starred = document.body[i].startswith("\\begin_layout Solution*")
2213 if is_starred == True:
2215 LyXName = "Solution*"
2216 theoremName = "newtheorem*"
2219 LyXName = "Solution"
2220 theoremName = "newtheorem"
2222 j = find_end_of_layout(document.body, i)
2224 document.warning("Malformed LyX document: Can't find end of " + LyXName + " layout")
2228 # if this is not a consecutive env, add start command
2231 begcmd = put_cmd_in_ert("\\begin{%s}" % (LaTeXName))
2233 # has this a consecutive theorem of same type?
2234 consecutive = document.body[j + 2] == "\\begin_layout " + LyXName
2236 # if this is not followed by a consecutive env, add end command
2238 document.body[j : j + 1] = put_cmd_in_ert("\\end{%s}" % (LaTeXName)) + ["\\end_layout"]
2240 document.body[i : i + 1] = ["\\begin_layout Standard", ""] + begcmd
2242 add_to_preamble(document, "\\theoremstyle{definition}")
2243 if is_starred or mod == "theorems-bytype" or mod == "theorems-ams-bytype":
2244 add_to_preamble(document, "\\%s{%s}{\\protect\\solutionname}" % \
2245 (theoremName, LaTeXName))
2246 else: # mod == "theorems-std" or mod == "theorems-ams" and not is_starred
2247 add_to_preamble(document, "\\%s{%s}[thm]{\\protect\\solutionname}" % \
2248 (theoremName, LaTeXName))
2250 add_to_preamble(document, "\\providecommand{\solutionname}{Solution}")
2254 def revert_verbatim_star(document):
2255 from lyx_2_1 import revert_verbatim
2256 revert_verbatim(document, True)
2259 def convert_save_props(document):
2260 " Add save_transient_properties parameter. "
2261 i = find_token(document.header, '\\begin_header', 0)
2263 document.warning("Malformed lyx document: Missing '\\begin_header'.")
2265 document.header.insert(i + 1, '\\save_transient_properties true')
2268 def revert_save_props(document):
2269 " Remove save_transient_properties parameter. "
2270 i = find_token(document.header, "\\save_transient_properties", 0)
2273 del document.header[i]
2276 def convert_info_tabular_feature(document):
2278 return arg.replace("inset-modify tabular", "tabular-feature")
2279 convert_info_insets(document, "shortcut(s)?|icon", f)
2282 def revert_info_tabular_feature(document):
2284 return arg.replace("tabular-feature", "inset-modify tabular")
2285 convert_info_insets(document, "shortcut(s)?|icon", f)
2292 supported_versions = ["2.2.0", "2.2"]
2294 [475, [convert_separator]],
2295 # nothing to do for 476: We consider it a bug that older versions
2296 # did not load amsmath automatically for these commands, and do not
2297 # want to hardcode amsmath off.
2303 [481, [convert_dashes]],
2304 [482, [convert_phrases]],
2305 [483, [convert_specialchar]],
2310 [488, [convert_newgloss]],
2311 [489, [convert_BoxFeatures]],
2312 [490, [convert_origin]],
2314 [492, [convert_colorbox]],
2317 [495, [convert_subref]],
2318 [496, [convert_nounzip]],
2319 [497, [convert_external_bbox]],
2321 [499, [convert_moderncv_phone, convert_moderncv_name]],
2323 [501, [convert_fontsettings]],
2326 [504, [convert_save_props]],
2328 [506, [convert_info_tabular_feature]],
2329 [507, [convert_longtable_label]],
2330 [508, [convert_parbreak]]
2334 [507, [revert_parbreak]],
2335 [506, [revert_longtable_label]],
2336 [505, [revert_info_tabular_feature]],
2338 [503, [revert_save_props]],
2339 [502, [revert_verbatim_star]],
2340 [501, [revert_solution]],
2341 [500, [revert_fontsettings]],
2342 [499, [revert_achemso]],
2343 [498, [revert_moderncv_1, revert_moderncv_2]],
2344 [497, [revert_tcolorbox_1, revert_tcolorbox_2,
2345 revert_tcolorbox_3, revert_tcolorbox_4, revert_tcolorbox_5,
2346 revert_tcolorbox_6, revert_tcolorbox_7, revert_tcolorbox_8]],
2347 [496, [revert_external_bbox]],
2348 [495, []], # nothing to do since the noUnzip parameter was optional
2349 [494, [revert_subref]],
2350 [493, [revert_jss]],
2351 [492, [revert_mathmulticol]],
2352 [491, [revert_colorbox]],
2353 [490, [revert_textcolor]],
2354 [489, [revert_origin]],
2355 [488, [revert_BoxFeatures]],
2356 [487, [revert_newgloss, revert_glossgroup]],
2357 [486, [revert_forest]],
2358 [485, [revert_ex_itemargs]],
2359 [484, [revert_sigplan_doi]],
2360 [483, [revert_georgian]],
2361 [482, [revert_specialchar]],
2362 [481, [revert_phrases]],
2363 [480, [revert_dashes]],
2364 [479, [revert_question_env]],
2365 [478, [revert_beamer_lemma]],
2366 [477, [revert_xarrow]],
2367 [476, [revert_swissgerman]],
2368 [475, [revert_smash]],
2369 [474, [revert_separator]]
2373 if __name__ == "__main__":