1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # -*- coding: utf-8 -*-
4 # Copyright (C) 2011 The LyX team
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 """ Convert files to the file format generated by lyx 2.1"""
26 # Uncomment only what you need to import, please.
28 from parser_tools import del_token, find_token, find_token_backwards, find_end_of, find_end_of_inset, \
29 find_end_of_layout, find_re, get_option_value, get_value, get_quoted_value, \
32 #from parser_tools import find_token, find_end_of, find_tokens, \
33 #find_token_exact, find_end_of_inset, find_end_of_layout, \
34 #is_in_inset, del_token, check_token
36 from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, get_ert
38 #from lyx2lyx_tools import insert_to_preamble, \
39 # lyx2latex, latex_length, revert_flex_inset, \
40 # revert_font_attrs, hex2ratio, str2bool
42 ####################################################################
43 # Private helper functions
45 #def remove_option(lines, m, option):
46 #''' removes option from line m. returns whether we did anything '''
47 #l = lines[m].find(option)
50 #val = lines[m][l:].split('"')[1]
51 #lines[m] = lines[m][:l - 1] + lines[m][l+len(option + '="' + val + '"'):]
55 ###############################################################################
57 ### Conversion and reversion routines
59 ###############################################################################
61 def revert_visible_space(document):
62 "Revert InsetSpace visible into its ERT counterpart"
65 i = find_token(document.body, "\\begin_inset space \\textvisiblespace{}", i)
68 end = find_end_of_inset(document.body, i)
69 subst = put_cmd_in_ert("\\textvisiblespace{}")
70 document.body[i:end + 1] = subst
73 def convert_undertilde(document):
74 " Load undertilde automatically "
75 i = find_token(document.header, "\\use_mathdots" , 0)
77 i = find_token(document.header, "\\use_mhchem" , 0)
79 i = find_token(document.header, "\\use_esint" , 0)
81 document.warning("Malformed LyX document: Can't find \\use_mathdots.")
83 j = find_token(document.preamble, "\\usepackage{undertilde}", 0)
85 document.header.insert(i + 1, "\\use_undertilde 0")
87 document.header.insert(i + 1, "\\use_undertilde 2")
88 del document.preamble[j]
91 def revert_undertilde(document):
92 " Load undertilde if used in the document "
93 undertilde = find_token(document.header, "\\use_undertilde" , 0)
95 document.warning("No \\use_undertilde line. Assuming auto.")
97 val = get_value(document.header, "\\use_undertilde", undertilde)
98 del document.header[undertilde]
102 document.warning("Invalid \\use_undertilde value: " + val + ". Assuming auto.")
103 # probably usedots has not been changed, but be safe.
111 add_to_preamble(document, ["\\usepackage{undertilde}"])
114 # so we are in the auto case. we want to load undertilde if \utilde is used.
117 i = find_token(document.body, '\\begin_inset Formula', i)
120 j = find_end_of_inset(document.body, i)
122 document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
125 code = "\n".join(document.body[i:j])
126 if code.find("\\utilde") != -1:
127 add_to_preamble(document, ["\\@ifundefined{utilde}{\\usepackage{undertilde}}"])
132 def revert_negative_space(document):
133 "Revert InsetSpace negmedspace and negthickspace into its TeX-code counterpart"
138 i = find_token(document.body, "\\begin_inset space \\negmedspace{}", i)
140 j = find_token(document.body, "\\begin_inset space \\negthickspace{}", j)
142 # load amsmath in the preamble if not already loaded if we are at the end of checking
144 i = find_token(document.header, "\\use_amsmath 2", 0)
146 add_to_preamble(document, ["\\@ifundefined{negthickspace}{\\usepackage{amsmath}}"])
150 end = find_end_of_inset(document.body, i)
151 subst = put_cmd_in_ert("\\negmedspace{}")
152 document.body[i:end + 1] = subst
153 j = find_token(document.body, "\\begin_inset space \\negthickspace{}", j)
156 end = find_end_of_inset(document.body, j)
157 subst = put_cmd_in_ert("\\negthickspace{}")
158 document.body[j:end + 1] = subst
162 def revert_math_spaces(document):
163 "Revert formulas with protected custom space and protected hfills to TeX-code"
166 i = find_token(document.body, "\\begin_inset Formula", i)
169 j = document.body[i].find("\\hspace*")
171 end = find_end_of_inset(document.body, i)
172 subst = put_cmd_in_ert(document.body[i][21:])
173 document.body[i:end + 1] = subst
177 def convert_japanese_encodings(document):
178 " Rename the japanese encodings to names understood by platex "
180 "EUC-JP-pLaTeX": "euc",
182 "SJIS-pLaTeX": "sjis"
184 i = find_token(document.header, "\\inputencoding" , 0)
187 val = get_value(document.header, "\\inputencoding", i)
188 if val in jap_enc_dict.keys():
189 document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
192 def revert_japanese_encodings(document):
193 " Revert the japanese encodings name changes "
195 "euc": "EUC-JP-pLaTeX",
197 "sjis": "SJIS-pLaTeX"
199 i = find_token(document.header, "\\inputencoding" , 0)
202 val = get_value(document.header, "\\inputencoding", i)
203 if val in jap_enc_dict.keys():
204 document.header[i] = "\\inputencoding %s" % jap_enc_dict[val]
207 def revert_justification(document):
208 " Revert the \\justification buffer param"
209 if not del_token(document.header, '\\justification', 0):
210 document.warning("Malformed LyX document: Missing \\justification.")
213 def revert_australian(document):
214 "Set English language variants Australian and Newzealand to English"
216 if document.language == "australian" or document.language == "newzealand":
217 document.language = "english"
218 i = find_token(document.header, "\\language", 0)
220 document.header[i] = "\\language english"
223 j = find_token(document.body, "\\lang australian", j)
225 j = find_token(document.body, "\\lang newzealand", 0)
229 document.body[j] = document.body[j].replace("\\lang newzealand", "\\lang english")
231 document.body[j] = document.body[j].replace("\\lang australian", "\\lang english")
235 def convert_biblio_style(document):
236 "Add a sensible default for \\biblio_style based on the citation engine."
237 i = find_token(document.header, "\\cite_engine", 0)
239 engine = get_value(document.header, "\\cite_engine", i).split("_")[0]
240 style = {"basic": "plain", "natbib": "plainnat", "jurabib": "jurabib"}
241 document.header.insert(i + 1, "\\biblio_style " + style[engine])
244 def revert_biblio_style(document):
245 "BibTeX insets with default option use the style defined by \\biblio_style."
246 i = find_token(document.header, "\\biblio_style" , 0)
248 document.warning("No \\biblio_style line. Nothing to do.")
251 default_style = get_value(document.header, "\\biblio_style", i)
252 del document.header[i]
254 # We are looking for bibtex insets having the default option
257 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
260 j = find_end_of_inset(document.body, i)
262 document.warning("Malformed LyX document: Can't find end of bibtex inset at line " + str(i))
265 k = find_token(document.body, "options", i, j)
267 options = get_quoted_value(document.body, "options", k)
268 if "default" in options.split(","):
269 document.body[k] = 'options "%s"' \
270 % options.replace("default", default_style)
274 def handle_longtable_captions(document, forward):
277 begin_table = find_token(document.body, '<lyxtabular version=', begin_table)
278 if begin_table == -1:
280 end_table = find_end_of(document.body, begin_table, '<lyxtabular', '</lyxtabular>')
282 document.warning("Malformed LyX document: Could not find end of table.")
285 fline = find_token(document.body, "<features", begin_table, end_table)
287 document.warning("Can't find features for inset at line " + str(begin_table))
290 p = document.body[fline].find("islongtable")
295 numrows = get_option_value(document.body[begin_table], "rows")
297 numrows = int(numrows)
299 document.warning(document.body[begin_table])
300 document.warning("Unable to determine rows!")
301 begin_table = end_table
303 begin_row = begin_table
304 for row in range(numrows):
305 begin_row = find_token(document.body, '<row', begin_row, end_table)
307 document.warning("Can't find row " + str(row + 1))
309 end_row = find_end_of(document.body, begin_row, '<row', '</row>')
311 document.warning("Can't find end of row " + str(row + 1))
314 if (get_option_value(document.body[begin_row], 'caption') == 'true' and
315 get_option_value(document.body[begin_row], 'endfirsthead') != 'true' and
316 get_option_value(document.body[begin_row], 'endhead') != 'true' and
317 get_option_value(document.body[begin_row], 'endfoot') != 'true' and
318 get_option_value(document.body[begin_row], 'endlastfoot') != 'true'):
319 document.body[begin_row] = set_option_value(document.body[begin_row], 'caption', 'true", endfirsthead="true')
320 elif get_option_value(document.body[begin_row], 'caption') == 'true':
321 if get_option_value(document.body[begin_row], 'endfirsthead') == 'true':
322 document.body[begin_row] = set_option_value(document.body[begin_row], 'endfirsthead', 'false')
323 if get_option_value(document.body[begin_row], 'endhead') == 'true':
324 document.body[begin_row] = set_option_value(document.body[begin_row], 'endhead', 'false')
325 if get_option_value(document.body[begin_row], 'endfoot') == 'true':
326 document.body[begin_row] = set_option_value(document.body[begin_row], 'endfoot', 'false')
327 if get_option_value(document.body[begin_row], 'endlastfoot') == 'true':
328 document.body[begin_row] = set_option_value(document.body[begin_row], 'endlastfoot', 'false')
330 # since there could be a tabular inside this one, we
331 # cannot jump to end.
335 def convert_longtable_captions(document):
336 "Add a firsthead flag to caption rows"
337 handle_longtable_captions(document, True)
340 def revert_longtable_captions(document):
341 "remove head/foot flag from caption rows"
342 handle_longtable_captions(document, False)
345 def convert_use_packages(document):
346 "use_xxx yyy => use_package xxx yyy"
347 packages = ["amsmath", "esint", "mathdots", "mhchem", "undertilde"]
349 i = find_token(document.header, "\\use_%s" % p, 0)
351 value = get_value(document.header, "\\use_%s" % p, i)
352 document.header[i] = "\\use_package %s %s" % (p, value)
355 def revert_use_packages(document):
356 "use_package xxx yyy => use_xxx yyy"
357 packages = ["amsmath", "esint", "mathdots", "mhchem", "undertilde"]
358 # the order is arbitrary for the use_package version, and not all packages need to be given.
359 # Ensure a complete list and correct order (important for older LyX versions and especially lyx2lyx)
362 regexp = re.compile(r'(\\use_package\s+%s)' % p)
363 i = find_re(document.header, regexp, j)
365 value = get_value(document.header, "\\use_package %s" % p, i).split()[1]
366 del document.header[i]
368 document.header.insert(j, "\\use_%s %s" % (p, value))
372 def convert_use_mathtools(document):
373 "insert use_package mathtools"
374 i = find_token(document.header, "\\use_package", 0)
376 document.warning("Malformed LyX document: Can't find \\use_package.")
378 j = find_token(document.preamble, "\\usepackage{mathtools}", 0)
380 document.header.insert(i + 1, "\\use_package mathtools 0")
382 document.header.insert(i + 1, "\\use_package mathtools 2")
383 del document.preamble[j]
386 def revert_use_mathtools(document):
387 "remove use_package mathtools"
388 regexp = re.compile(r'(\\use_package\s+mathtools)')
389 i = find_re(document.header, regexp, 0)
390 value = "1" # default is auto
392 value = get_value(document.header, "\\use_package" , i).split()[1]
393 del document.header[i]
394 if value == "2": # on
395 add_to_preamble(document, ["\\usepackage{mathtools}"])
396 elif value == "1": # auto
397 commands = ["mathclap", "mathllap", "mathrlap", \
398 "lgathered", "rgathered", "vcentcolon", "dblcolon", \
399 "coloneqq", "Coloneqq", "coloneq", "Coloneq", "eqqcolon", \
400 "Eqqcolon", "eqcolon", "Eqcolon", "colonapprox", \
401 "Colonapprox", "colonsim", "Colonsim"]
404 i = find_token(document.body, '\\begin_inset Formula', i)
407 j = find_end_of_inset(document.body, i)
409 document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
412 code = "\n".join(document.body[i:j])
414 if code.find("\\%s" % c) != -1:
415 add_to_preamble(document, ["\\usepackage{mathtools}"])
420 def convert_cite_engine_type(document):
421 "Determine the \\cite_engine_type from the citation engine."
422 i = find_token(document.header, "\\cite_engine", 0)
425 engine = get_value(document.header, "\\cite_engine", i)
427 engine, type = engine.split("_")
429 type = {"basic": "numerical", "jurabib": "authoryear"}[engine]
430 document.header[i] = "\\cite_engine " + engine
431 document.header.insert(i + 1, "\\cite_engine_type " + type)
434 def revert_cite_engine_type(document):
435 "Natbib had the type appended with an underscore."
436 engine_type = "numerical"
437 i = find_token(document.header, "\\cite_engine_type" , 0)
439 document.warning("No \\cite_engine_type line. Assuming numerical.")
441 engine_type = get_value(document.header, "\\cite_engine_type", i)
442 del document.header[i]
444 # We are looking for the natbib citation engine
445 i = find_token(document.header, "\\cite_engine natbib", 0)
448 document.header[i] = "\\cite_engine natbib_" + engine_type
451 def revert_cancel(document):
452 "add cancel to the preamble if necessary"
453 commands = ["cancelto", "cancel", "bcancel", "xcancel"]
456 i = find_token(document.body, '\\begin_inset Formula', i)
459 j = find_end_of_inset(document.body, i)
461 document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
464 code = "\n".join(document.body[i:j])
466 if code.find("\\%s" % c) != -1:
467 add_to_preamble(document, ["\\usepackage{cancel}"])
472 def revert_verbatim(document):
473 " Revert verbatim einvironments completely to TeX-code. "
476 subst_end = ['\end_layout', '', '\\begin_layout Plain Layout',
478 '\\begin_layout Plain Layout', '', '',
481 '\\end_layout', '', '\\end_inset',
482 '', '', '\\end_layout']
483 subst_begin = ['\\begin_layout Standard', '\\noindent',
484 '\\begin_inset ERT', 'status collapsed', '',
485 '\\begin_layout Plain Layout', '', '', '\\backslash',
487 '\\end_layout', '', '\\begin_layout Plain Layout', '']
489 i = find_token(document.body, "\\begin_layout Verbatim", i)
492 j = find_end_of_layout(document.body, i)
494 document.warning("Malformed lyx document: Can't find end of Verbatim layout")
497 # delete all line breaks insets (there are no other insets)
500 n = find_token(document.body, "\\begin_inset Newline newline", l)
502 n = find_token(document.body, "\\begin_inset Newline linebreak", l)
505 m = find_end_of_inset(document.body, n)
506 del(document.body[m:m+1])
507 document.body[n:n+1] = ['\end_layout', '', '\\begin_layout Plain Layout']
510 # consecutive verbatim environments need to be connected
511 k = find_token(document.body, "\\begin_layout Verbatim", j)
512 if k == j + 2 and consecutive == False:
514 document.body[j:j+1] = ['\end_layout', '', '\\begin_layout Plain Layout']
515 document.body[i:i+1] = subst_begin
517 if k == j + 2 and consecutive == True:
518 document.body[j:j+1] = ['\end_layout', '', '\\begin_layout Plain Layout']
519 del(document.body[i:i+1])
521 if k != j + 2 and consecutive == True:
522 document.body[j:j+1] = subst_end
523 # the next paragraph must not be indented
524 document.body[j+19:j+19] = ['\\noindent']
525 del(document.body[i:i+1])
529 document.body[j:j+1] = subst_end
530 # the next paragraph must not be indented
531 document.body[j+19:j+19] = ['\\noindent']
532 document.body[i:i+1] = subst_begin
535 def revert_tipa(document):
536 " Revert native TIPA insets to mathed or ERT. "
539 i = find_token(document.body, "\\begin_inset IPA", i)
542 j = find_end_of_inset(document.body, i)
544 document.warning("Malformed lyx document: Can't find end of IPA inset")
548 n = find_token(document.body, "\\begin_layout", i, j)
550 document.warning("Malformed lyx document: IPA inset has no embedded layout")
553 m = find_end_of_layout(document.body, n)
555 document.warning("Malformed lyx document: Can't find end of embedded layout")
558 content = document.body[n+1:m]
559 p = find_token(document.body, "\\begin_layout", m, j)
560 if p != -1 or len(content) > 1:
562 content = document.body[i+1:j]
564 # IPA insets with multiple pars need to be wrapped by \begin{IPA}...\end{IPA}
565 document.body[i:j+1] = ['\\end_layout', '', '\\begin_layout Standard'] + put_cmd_in_ert("\\begin{IPA}") + ['\\end_layout'] + content + ['\\begin_layout Standard'] + put_cmd_in_ert("\\end{IPA}")
566 add_to_preamble(document, ["\\usepackage{tipa,tipx}"])
568 # single-par IPA insets can be reverted to mathed
569 document.body[i:j+1] = ["\\begin_inset Formula $\\text{\\textipa{" + content[0] + "}}$", "\\end_inset"]
573 def revert_cell_rotation(document):
574 "Revert cell rotations to TeX-code"
576 load_rotating = False
580 # first, let's find out if we need to do anything
581 i = find_token(document.body, '<cell ', i)
584 j = document.body[i].find('rotate="')
586 k = document.body[i].find('"', j + 8)
587 value = document.body[i][j + 8 : k]
589 rgx = re.compile(r' rotate="[^"]+?"')
590 # remove rotate option
591 document.body[i] = rgx.sub('', document.body[i])
593 rgx = re.compile(r' rotate="[^"]+?"')
594 document.body[i] = rgx.sub('rotate="true"', document.body[i])
596 rgx = re.compile(r' rotate="[^"]+?"')
598 # remove rotate option
599 document.body[i] = rgx.sub('', document.body[i])
601 document.body[i + 5 : i + 5] = \
602 put_cmd_in_ert("\\end{turn}")
603 document.body[i + 4 : i + 4] = \
604 put_cmd_in_ert("\\begin{turn}{" + value + "}")
610 add_to_preamble(document, ["\\@ifundefined{turnbox}{\usepackage{rotating}}{}"])
613 def convert_cell_rotation(document):
614 'Convert cell rotation statements from "true" to "90"'
618 # first, let's find out if we need to do anything
619 i = find_token(document.body, '<cell ', i)
622 j = document.body[i].find('rotate="true"')
624 rgx = re.compile(r'rotate="[^"]+?"')
625 # convert "true" to "90"
626 document.body[i] = rgx.sub('rotate="90"', document.body[i])
631 def revert_table_rotation(document):
632 "Revert table rotations to TeX-code"
634 load_rotating = False
638 # first, let's find out if we need to do anything
639 i = find_token(document.body, '<features ', i)
642 j = document.body[i].find('rotate="')
644 end_table = find_token(document.body, '</lyxtabular>', j)
645 k = document.body[i].find('"', j + 8)
646 value = document.body[i][j + 8 : k]
648 rgx = re.compile(r' rotate="[^"]+?"')
649 # remove rotate option
650 document.body[i] = rgx.sub('', document.body[i])
652 rgx = re.compile(r'rotate="[^"]+?"')
653 document.body[i] = rgx.sub('rotate="true"', document.body[i])
655 rgx = re.compile(r' rotate="[^"]+?"')
657 # remove rotate option
658 document.body[i] = rgx.sub('', document.body[i])
660 document.body[end_table + 3 : end_table + 3] = \
661 put_cmd_in_ert("\\end{turn}")
662 document.body[i - 2 : i - 2] = \
663 put_cmd_in_ert("\\begin{turn}{" + value + "}")
669 add_to_preamble(document, ["\\@ifundefined{turnbox}{\usepackage{rotating}}{}"])
672 def convert_table_rotation(document):
673 'Convert table rotation statements from "true" to "90"'
677 # first, let's find out if we need to do anything
678 i = find_token(document.body, '<features ', i)
681 j = document.body[i].find('rotate="true"')
683 rgx = re.compile(r'rotate="[^"]+?"')
684 # convert "true" to "90"
685 document.body[i] = rgx.sub('rotate="90"', document.body[i])
690 def convert_listoflistings(document):
691 'Convert ERT \lstlistoflistings to TOC lstlistoflistings inset'
692 # We can support roundtrip because the command is so simple
695 i = find_token(document.body, "\\begin_inset ERT", i)
698 j = find_end_of_inset(document.body, i)
700 document.warning("Malformed lyx document: Can't find end of ERT inset")
703 ert = get_ert(document.body, i)
704 if ert == "\\lstlistoflistings{}":
705 document.body[i:j] = ["\\begin_inset CommandInset toc", "LatexCommand lstlistoflistings", ""]
711 def revert_listoflistings(document):
712 'Convert TOC lstlistoflistings inset to ERT lstlistoflistings'
715 i = find_token(document.body, "\\begin_inset CommandInset toc", i)
718 if document.body[i+1] == "LatexCommand lstlistoflistings":
719 j = find_end_of_inset(document.body, i)
721 document.warning("Malformed lyx document: Can't find end of TOC inset")
724 subst = put_cmd_in_ert("\\lstlistoflistings{}")
725 document.body[i:j+1] = subst
726 add_to_preamble(document, ["\\usepackage{listings}"])
730 def convert_use_amssymb(document):
731 "insert use_package amssymb"
732 regexp = re.compile(r'(\\use_package\s+amsmath)')
733 i = find_re(document.header, regexp, 0)
735 document.warning("Malformed LyX document: Can't find \\use_package amsmath.")
737 value = get_value(document.header, "\\use_package" , i).split()[1]
740 useamsmath = int(value)
742 document.warning("Invalid \\use_package amsmath: " + value + ". Assuming auto.")
744 j = find_token(document.preamble, "\\usepackage{amssymb}", 0)
746 document.header.insert(i + 1, "\\use_package amssymb %d" % useamsmath)
748 document.header.insert(i + 1, "\\use_package amssymb 2")
749 del document.preamble[j]
752 def revert_use_amssymb(document):
753 "remove use_package amssymb"
754 regexp1 = re.compile(r'(\\use_package\s+amsmath)')
755 regexp2 = re.compile(r'(\\use_package\s+amssymb)')
756 i = find_re(document.header, regexp1, 0)
757 j = find_re(document.header, regexp2, 0)
758 value1 = "1" # default is auto
759 value2 = "1" # default is auto
761 value1 = get_value(document.header, "\\use_package" , i).split()[1]
763 value2 = get_value(document.header, "\\use_package" , j).split()[1]
764 del document.header[j]
765 if value1 != value2 and value2 == "2": # on
766 add_to_preamble(document, ["\\usepackage{amssymb}"])
769 def revert_ancientgreek(document):
770 "Set the document language for ancientgreek to greek"
772 if document.language == "ancientgreek":
773 document.language = "greek"
774 i = find_token(document.header, "\\language", 0)
776 document.header[i] = "\\language greek"
779 j = find_token(document.body, "\\lang ancientgreek", j)
783 document.body[j] = document.body[j].replace("\\lang ancientgreek", "\\lang greek")
787 def revert_languages(document):
788 "Set the document language for new supported languages to English"
791 "coptic", "divehi", "hindi", "kurmanji", "lao", "marathi", "occitan", "sanskrit",
792 "syriac", "tamil", "telugu", "urdu"
794 for n in range(len(languages)):
795 if document.language == languages[n]:
796 document.language = "english"
797 i = find_token(document.header, "\\language", 0)
799 document.header[i] = "\\language english"
801 while j < len(document.body):
802 j = find_token(document.body, "\\lang " + languages[n], j)
804 document.body[j] = document.body[j].replace("\\lang " + languages[n], "\\lang english")
807 j = len(document.body)
810 def convert_armenian(document):
811 "Use polyglossia and thus non-TeX fonts for Armenian"
813 if document.language == "armenian":
814 i = find_token(document.header, "\\use_non_tex_fonts", 0)
816 document.header[i] = "\\use_non_tex_fonts true"
819 def revert_armenian(document):
820 "Use ArmTeX and thus TeX fonts for Armenian"
822 if document.language == "armenian":
823 i = find_token(document.header, "\\use_non_tex_fonts", 0)
825 document.header[i] = "\\use_non_tex_fonts false"
828 def revert_libertine(document):
829 " Revert native libertine font definition to LaTeX "
831 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
832 i = find_token(document.header, "\\font_roman libertine", 0)
835 j = find_token(document.header, "\\font_osf true", 0)
838 preamble = "\\usepackage"
840 document.header[j] = "\\font_osf false"
842 preamble += "[lining]"
843 preamble += "{libertine-type1}"
844 add_to_preamble(document, [preamble])
845 document.header[i] = "\\font_roman default"
848 def revert_txtt(document):
849 " Revert native txtt font definition to LaTeX "
851 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
852 i = find_token(document.header, "\\font_typewriter txtt", 0)
854 preamble = "\\renewcommand{\\ttdefault}{txtt}"
855 add_to_preamble(document, [preamble])
856 document.header[i] = "\\font_typewriter default"
859 def revert_mathdesign(document):
860 " Revert native mathdesign font definition to LaTeX "
862 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
868 i = find_token(document.header, "\\font_roman", 0)
871 val = get_value(document.header, "\\font_roman", i)
872 if val in mathdesign_dict.keys():
873 preamble = "\\usepackage[%s" % mathdesign_dict[val]
875 j = find_token(document.header, "\\font_osf true", 0)
878 document.header[j] = "\\font_osf false"
879 l = find_token(document.header, "\\font_sc true", 0)
882 document.header[l] = "\\font_sc false"
884 preamble += ",expert"
885 preamble += "]{mathdesign}"
886 add_to_preamble(document, [preamble])
887 document.header[i] = "\\font_roman default"
890 def revert_texgyre(document):
891 " Revert native TeXGyre font definition to LaTeX "
893 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
894 texgyre_fonts = ["tgadventor", "tgbonum", "tgchorus", "tgcursor", \
895 "tgheros", "tgpagella", "tgschola", "tgtermes"]
896 i = find_token(document.header, "\\font_roman", 0)
898 val = get_value(document.header, "\\font_roman", i)
899 if val in texgyre_fonts:
900 preamble = "\\usepackage{%s}" % val
901 add_to_preamble(document, [preamble])
902 document.header[i] = "\\font_roman default"
903 i = find_token(document.header, "\\font_sans", 0)
905 val = get_value(document.header, "\\font_sans", i)
906 if val in texgyre_fonts:
907 preamble = "\\usepackage{%s}" % val
908 add_to_preamble(document, [preamble])
909 document.header[i] = "\\font_sans default"
910 i = find_token(document.header, "\\font_typewriter", 0)
912 val = get_value(document.header, "\\font_typewriter", i)
913 if val in texgyre_fonts:
914 preamble = "\\usepackage{%s}" % val
915 add_to_preamble(document, [preamble])
916 document.header[i] = "\\font_typewriter default"
919 def revert_ipadeco(document):
920 " Revert IPA decorations to ERT "
923 i = find_token(document.body, "\\begin_inset IPADeco", i)
926 end = find_end_of_inset(document.body, i)
928 document.warning("Can't find end of inset at line " + str(i))
931 line = document.body[i]
932 rx = re.compile(r'\\begin_inset IPADeco (.*)$')
934 decotype = m.group(1)
935 if decotype != "toptiebar" and decotype != "bottomtiebar":
936 document.warning("Invalid IPADeco type: " + decotype)
939 blay = find_token(document.body, "\\begin_layout Plain Layout", i, end)
941 document.warning("Can't find layout for inset at line " + str(i))
944 bend = find_end_of_layout(document.body, blay)
946 document.warning("Malformed LyX document: Could not find end of IPADeco inset's layout.")
949 substi = ["\\begin_inset ERT", "status collapsed", "",
950 "\\begin_layout Plain Layout", "", "", "\\backslash",
951 decotype + "{", "\\end_layout", "", "\\end_inset"]
952 substj = ["\\size default", "", "\\begin_inset ERT", "status collapsed", "",
953 "\\begin_layout Plain Layout", "", "}", "\\end_layout", "", "\\end_inset"]
954 # do the later one first so as not to mess up the numbering
955 document.body[bend:end + 1] = substj
956 document.body[i:blay + 1] = substi
957 i = end + len(substi) + len(substj) - (end - bend) - (blay - i) - 2
958 add_to_preamble(document, "\\usepackage{tipa}")
961 def revert_ipachar(document):
962 ' Revert \\IPAChar to ERT '
965 while i < len(document.body):
966 m = re.match(r'(.*)\\IPAChar \\(\w+\{\w+\})(.*)', document.body[i])
974 'status collapsed', '',
975 '\\begin_layout Standard',
976 '', '', '\\backslash',
981 document.body[i: i+1] = subst
986 add_to_preamble(document, "\\usepackage{tone}")
989 def revert_minionpro(document):
990 " Revert native MinionPro font definition to LaTeX "
992 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
993 i = find_token(document.header, "\\font_roman minionpro", 0)
996 j = find_token(document.header, "\\font_osf true", 0)
999 preamble = "\\usepackage"
1001 document.header[j] = "\\font_osf false"
1004 preamble += "{MinionPro}"
1005 add_to_preamble(document, [preamble])
1006 document.header[i] = "\\font_roman default"
1009 def revert_mathfonts(document):
1010 " Revert native math font definitions to LaTeX "
1012 i = find_token(document.header, "\\font_math", 0)
1015 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1016 val = get_value(document.header, "\\font_math", i)
1017 if val == "eulervm":
1018 add_to_preamble(document, "\\usepackage{eulervm}")
1019 elif val == "default":
1021 "lmodern": "\\renewcommand{\\rmdefault}{lmr}",
1022 "minionpro": "\\usepackage[onlytext,lf]{MinionPro}",
1023 "minionpro-osf": "\\usepackage[onlytext]{MinionPro}",
1024 "palatino": "\\renewcommand{\\rmdefault}{ppl}",
1025 "palatino-osf": "\\renewcommand{\\rmdefault}{pplj}",
1026 "times": "\\renewcommand{\\rmdefault}{ptm}",
1027 "utopia": "\\renewcommand{\\rmdefault}{futs}",
1028 "utopia-osf": "\\renewcommand{\\rmdefault}{futj}",
1030 j = find_token(document.header, "\\font_roman", 0)
1032 rm = get_value(document.header, "\\font_roman", j)
1033 k = find_token(document.header, "\\font_osf true", 0)
1036 if rm in mathfont_dict.keys():
1037 add_to_preamble(document, mathfont_dict[rm])
1038 document.header[j] = "\\font_roman default"
1040 document.header[k] = "\\font_osf false"
1041 del document.header[i]
1044 def revert_mdnomath(document):
1045 " Revert mathdesign and fourier without math "
1047 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1049 "md-charter": "mdbch",
1050 "md-utopia": "mdput",
1051 "md-garamond": "mdugm"
1053 i = find_token(document.header, "\\font_roman", 0)
1056 val = get_value(document.header, "\\font_roman", i)
1057 if val in mathdesign_dict.keys():
1058 j = find_token(document.header, "\\font_math", 0)
1060 document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
1061 mval = get_value(document.header, "\\font_math", j)
1062 if mval == "default":
1063 document.header[i] = "\\font_roman default"
1064 add_to_preamble(document, "\\renewcommand{\\rmdefault}{%s}" % mathdesign_dict[val])
1066 document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
1069 def convert_mdnomath(document):
1070 " Change mathdesign font name "
1072 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1074 "mdbch": "md-charter",
1075 "mdput": "md-utopia",
1076 "mdugm": "md-garamond"
1078 i = find_token(document.header, "\\font_roman", 0)
1081 val = get_value(document.header, "\\font_roman", i)
1082 if val in mathdesign_dict.keys():
1083 document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
1086 def revert_newtxmath(document):
1087 " Revert native newtxmath definitions to LaTeX "
1089 i = find_token(document.header, "\\font_math", 0)
1092 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1093 val = get_value(document.header, "\\font_math", i)
1095 "libertine-ntxm": "\\usepackage[libertine]{newtxmath}",
1096 "minion-ntxm": "\\usepackage[minion]{newtxmath}",
1097 "newtxmath": "\\usepackage{newtxmath}",
1099 if val in mathfont_dict.keys():
1100 add_to_preamble(document, mathfont_dict[val])
1101 document.header[i] = "\\font_math auto"
1104 def revert_biolinum(document):
1105 " Revert native biolinum font definition to LaTeX "
1107 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1108 i = find_token(document.header, "\\font_sans biolinum", 0)
1111 j = find_token(document.header, "\\font_osf true", 0)
1114 preamble = "\\usepackage"
1117 preamble += "{biolinum-type1}"
1118 add_to_preamble(document, [preamble])
1119 document.header[i] = "\\font_sans default"
1122 def revert_uop(document):
1123 " Revert native URW Classico (Optima) font definition to LaTeX "
1125 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1126 i = find_token(document.header, "\\font_sans uop", 0)
1128 preamble = "\\renewcommand{\\sfdefault}{uop}"
1129 add_to_preamble(document, [preamble])
1130 document.header[i] = "\\font_sans default"
1133 def convert_latexargs(document):
1134 " Convert InsetArgument to new syntax "
1136 if find_token(document.body, "\\begin_inset Argument", 0) == -1:
1140 # A list of layouts (document classes) with only optional or no arguments.
1141 # These can be safely converted to the new syntax
1142 # (I took the liberty to add some of my personal layouts/modules here; JSP)
1143 safe_layouts = ["aa", "aapaper", "aastex", "achemso", "acmsiggraph", "AEA",
1144 "agu-dtd", "agums", "agutex", "amsart", "amsbook", "apa",
1145 "arab-article", "armenian-article", "article-beamer", "article",
1146 "beamer", "book", "broadway", "chess", "cl2emult", "ctex-article",
1147 "ctex-book", "ctex-report", "dinbrief", "docbook-book", "docbook-chapter",
1148 "docbook", "docbook-section", "doublecol-new", "dtk", "ectaart", "egs",
1149 "elsarticle", "elsart", "entcs", "europecv", "extarticle", "extbook",
1150 "extletter", "extreport", "foils", "frletter", "g-brief2", "g-brief",
1151 "heb-article", "heb-letter", "hollywood", "IEEEtran", "ijmpc", "ijmpd",
1152 "iopart", "isprs", "jarticle", "jasatex", "jbook", "jgrga", "jreport",
1153 "jsarticle", "jsbeamer", "jsbook", "jss", "kluwer", "latex8", "letter", "lettre",
1154 "literate-article", "literate-book", "literate-report", "llncs", "ltugboat",
1155 "memoir", "moderncv", "mwart", "mwbk", "mwrep", "paper", "powerdot",
1156 "recipebook", "report", "revtex4", "revtex", "scrartcl", "scrarticle-beamer",
1157 "scrbook", "scrlettr", "scrlttr2", "scrreprt", "seminar", "siamltex",
1158 "sigplanconf", "simplecv", "singlecol", "singlecol-new", "slides", "spie",
1159 "svglobal3", "svglobal", "svjog", "svmono", "svmult", "svprobth", "tarticle",
1160 "tbook", "treport", "tufte-book", "tufte-handout"]
1161 # A list of "safe" modules, same as above
1162 safe_modules = ["biblatex", "beameraddons", "beamersession", "braille", "customHeadersFooters",
1163 "endnotes", "enumitem", "eqs-within-sections", "figs-within-sections", "fix-cm",
1164 "fixltx2e", "foottoend", "hanging", "jscharstyles", "knitr", "lilypond",
1165 "linguistics", "linguisticx", "logicalmkup", "minimalistic", "nomindex", "noweb",
1166 "pdfcomment", "sweave", "tabs-within-sections", "theorems-ams-bytype",
1167 "theorems-ams-extended-bytype", "theorems-ams-extended", "theorems-ams", "theorems-bytype",
1168 "theorems-chap-bytype", "theorems-chap", "theorems-named", "theorems-sec-bytype",
1169 "theorems-sec", "theorems-starred", "theorems-std", "todonotes"]
1170 # Modules we need to take care of
1171 caveat_modules = ["initials"]
1172 # information about the relevant styles in caveat_modules (number of opt and req args)
1173 # use this if we get more caveat_modules. For now, use hard coding (see below).
1174 # initials = [{'Layout' : 'Initial', 'opt' : 1, 'req' : 1}]
1176 # Is this a known safe layout?
1177 safe_layout = document.textclass in safe_layouts
1179 document.warning("Lyx2lyx knows nothing about textclass '%s'. "
1180 "Please check if short title insets have been converted correctly."
1181 % document.textclass)
1182 # Do we use unsafe or unknown modules
1183 mods = document.get_module_list()
1184 unknown_modules = False
1185 used_caveat_modules = list()
1187 if mod in safe_modules:
1189 if mod in caveat_modules:
1190 used_caveat_modules.append(mod)
1192 unknown_modules = True
1193 document.warning("Lyx2lyx knows nothing about module '%s'. "
1194 "Please check if short title insets have been converted correctly."
1199 i = find_token(document.body, "\\begin_inset Argument", i)
1203 if not safe_layout or unknown_modules:
1204 # We cannot do more here since we have no access to this layout.
1205 # InsetArgument itself will do the real work
1206 # (see InsetArgument::updateBuffer())
1207 document.body[i] = "\\begin_inset Argument 999"
1211 # Find beginning and end of the containing paragraph
1212 parbeg = find_token_backwards(document.body, "\\begin_layout", i)
1214 document.warning("Malformed lyx document: Can't find parent paragraph layout")
1216 parend = find_end_of_layout(document.body, parbeg)
1218 document.warning("Malformed lyx document: Can't find end of parent paragraph layout")
1222 if len(used_caveat_modules) > 0:
1223 # We know for now that this must be the initials module with the Initial layout
1224 # If we get more such modules, we need some automating.
1225 layoutname = get_value(document.body, "\\begin_layout", parbeg)
1226 if layoutname == "Initial":
1227 # Layout has 1 opt and 1 req arg.
1228 # Count the actual arguments
1230 for p in range(parbeg, parend):
1231 if document.body[p] == "\\begin_inset Argument":
1236 # Collect all arguments in this paragraph
1238 for p in range(parbeg, parend):
1239 if document.body[p] == "\\begin_inset Argument":
1241 if allowed_opts != -1:
1242 # We have less arguments than opt + required.
1243 # required must take precedence.
1244 if argnr > allowed_opts and argnr < first_req:
1246 document.body[p] = "\\begin_inset Argument %d" % argnr
1250 def revert_latexargs(document):
1251 " Revert InsetArgument to old syntax "
1254 rx = re.compile(r'^\\begin_inset Argument (\d+)$')
1257 # Search for Argument insets
1258 i = find_token(document.body, "\\begin_inset Argument", i)
1261 m = rx.match(document.body[i])
1263 # No ID: inset already reverted
1266 # Find beginning and end of the containing paragraph
1267 parbeg = find_token_backwards(document.body, "\\begin_layout", i)
1269 document.warning("Malformed lyx document: Can't find parent paragraph layout")
1271 parend = find_end_of_layout(document.body, parbeg)
1273 document.warning("Malformed lyx document: Can't find end of parent paragraph layout")
1275 # Collect all arguments in this paragraph
1277 for p in range(parbeg, parend):
1278 m = rx.match(document.body[p])
1280 val = int(m.group(1))
1281 j = find_end_of_inset(document.body, p)
1282 # Revert to old syntax
1283 document.body[p] = "\\begin_inset Argument"
1285 document.warning("Malformed lyx document: Can't find end of Argument inset")
1288 args[val] = document.body[p : j + 1]
1290 realparend = realparend - len(document.body[p : j + 1])
1291 # Remove arg inset at this position
1292 del document.body[p : j + 1]
1295 # Now sort the arg insets
1297 for f in sorted(args):
1300 # Insert the sorted arg insets at paragraph begin
1301 document.body[parbeg + 1:parbeg + 1] = subst
1303 i = parbeg + 1 + len(subst)
1306 def revert_Argument_to_TeX_brace(document, line, n, nmax, environment):
1308 Reverts an InsetArgument to TeX-code
1310 revert_Argument_to_TeX_brace(document, LineOfBeginLayout, StartArgument, EndArgument, isEnvironment)
1311 LineOfBeginLayout is the line of the \begin_layout statement
1312 StartArgument is the number of the first argument that needs to be converted
1313 EndArgument is the number of the last argument that needs to be converted or the last defined one
1314 isEnvironment must be true, if the layout id for a LaTeX environment
1317 while lineArg != -1 and n < nmax + 1:
1318 lineArg = find_token(document.body, "\\begin_inset Argument " + str(n), line)
1320 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", lineArg)
1321 # we have to assure that no other inset is in the Argument
1322 beginInset = find_token(document.body, "\\begin_inset", beginPlain)
1323 endInset = find_token(document.body, "\\end_inset", beginPlain)
1326 while beginInset < endInset and beginInset != -1:
1327 beginInset = find_token(document.body, "\\begin_inset", k)
1328 endInset = find_token(document.body, "\\end_inset", l)
1331 if environment == False:
1332 document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}{")
1333 del(document.body[lineArg : beginPlain + 1])
1335 document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}")
1336 document.body[lineArg : beginPlain + 1] = put_cmd_in_ert("{")
1340 def revert_IEEEtran(document):
1342 Reverts InsetArgument of
1345 Biography without photo
1348 if document.textclass == "IEEEtran":
1354 i = find_token(document.body, "\\begin_layout Page headings", i)
1356 revert_Argument_to_TeX_brace(document, i, 1, 1, False)
1359 j = find_token(document.body, "\\begin_layout Biography without photo", j)
1361 revert_Argument_to_TeX_brace(document, j, 1, 1, True)
1364 k = find_token(document.body, "\\begin_layout Biography", k)
1365 kA = find_token(document.body, "\\begin_layout Biography without photo", k)
1366 if k == kA and k != -1:
1370 # start with the second argument, therefore 2
1371 revert_Argument_to_TeX_brace(document, k, 2, 2, True)
1373 if i == -1 and j == -1 and k == -1:
1377 def convert_Argument_to_TeX_brace(document, line, n, nmax, environment):
1379 Converts TeX code to an InsetArgument
1380 !!! Be careful if the braces are different in your case as expected here:
1381 - }{ separates mandatory arguments of commands
1382 - { and } surround a mandatory argument of an environment
1384 convert_Argument_to_TeX_brace(document, LineOfBeginLayout, StartArgument, EndArgument, isEnvironment)
1385 LineOfBeginLayout is the line of the \begin_layout statement
1386 StartArgument is the number of the first ERT that needs to be converted
1387 EndArgument is the number of the last ERT that needs to be converted
1388 isEnvironment must be true, if the layout id for a LaTeX environment
1391 - this routine will fail if the user has additional TeX-braces (there is nothing we can do)
1392 - this routine can currently handle only one mandatory argument of environments
1394 - support the case that }{ is in the file in 2 separate ERTs
1397 while lineArg != -1 and n < nmax + 1:
1398 lineArg = find_token(document.body, "\\begin_inset ERT", lineArg)
1399 if environment == False and lineArg != -1:
1400 bracePair = find_token(document.body, "}{", lineArg)
1401 # assure that the "}{" is in this ERT (5 is or files saved with LyX 2.0, 4 for files exported by LyX 2.1)
1402 if bracePair == lineArg + 5 or bracePair == lineArg + 4:
1403 end = find_token(document.body, "\\end_inset", bracePair)
1404 document.body[lineArg : end + 1] = ["\\end_layout", "", "\\end_inset"]
1406 document.body[line + 1 : line + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
1408 document.body[endn + 1 : endn + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
1412 lineArg = lineArg + 1
1413 if environment == True and lineArg != -1:
1414 opening = find_token(document.body, "{", lineArg)
1415 if opening == lineArg + 5 or opening == lineArg + 4: # assure that the "{" is in this ERT
1416 end = find_token(document.body, "\\end_inset", opening)
1417 document.body[lineArg : end + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
1419 lineArg2 = find_token(document.body, "\\begin_inset ERT", lineArg)
1420 closing = find_token(document.body, "}", lineArg2)
1421 if closing == lineArg2 + 5 or closing == lineArg2 + 4: # assure that the "}" is in this ERT
1422 end2 = find_token(document.body, "\\end_inset", closing)
1423 document.body[lineArg2 : end2 + 1] = ["\\end_layout", "", "\\end_inset"]
1425 lineArg = lineArg + 1
1428 def convert_IEEEtran(document):
1433 Biography without photo
1436 if document.textclass == "IEEEtran":
1442 i = find_token(document.body, "\\begin_layout Page headings", i)
1444 convert_Argument_to_TeX_brace(document, i, 1, 1, False)
1447 j = find_token(document.body, "\\begin_layout Biography without photo", j)
1449 convert_Argument_to_TeX_brace(document, j, 1, 1, True)
1452 # assure that we don't handle Biography Biography without photo
1453 k = find_token(document.body, "\\begin_layout Biography", k)
1454 kA = find_token(document.body, "\\begin_layout Biography without photo", k - 1)
1455 if k == kA and k != -1:
1459 # the argument we want to convert is the second one
1460 convert_Argument_to_TeX_brace(document, k, 2, 2, True)
1462 if i == -1 and j == -1 and k == -1:
1466 def revert_AASTeX(document):
1467 " Reverts InsetArgument of Altaffilation to TeX-code "
1468 if document.textclass == "aastex":
1472 i = find_token(document.body, "\\begin_layout Altaffilation", i)
1474 revert_Argument_to_TeX_brace(document, i, 1, 1, False)
1480 def convert_AASTeX(document):
1481 " Converts ERT of Altaffilation to InsetArgument "
1482 if document.textclass == "aastex":
1486 i = find_token(document.body, "\\begin_layout Altaffilation", i)
1488 convert_Argument_to_TeX_brace(document, i, 1, 1, False)
1494 def revert_AGUTeX(document):
1495 " Reverts InsetArgument of Author affiliation to TeX-code "
1496 if document.textclass == "agutex":
1500 i = find_token(document.body, "\\begin_layout Author affiliation", i)
1502 revert_Argument_to_TeX_brace(document, i, 1, 1, False)
1508 def convert_AGUTeX(document):
1509 " Converts ERT of Author affiliation to InsetArgument "
1510 if document.textclass == "agutex":
1514 i = find_token(document.body, "\\begin_layout Author affiliation", i)
1516 convert_Argument_to_TeX_brace(document, i, 1, 1, False)
1522 def revert_IJMP(document):
1523 " Reverts InsetArgument of MarkBoth to TeX-code "
1524 if document.textclass == "ijmpc" or document.textclass == "ijmpd":
1528 i = find_token(document.body, "\\begin_layout MarkBoth", i)
1530 revert_Argument_to_TeX_brace(document, i, 1, 1, False)
1536 def convert_IJMP(document):
1537 " Converts ERT of MarkBoth to InsetArgument "
1538 if document.textclass == "ijmpc" or document.textclass == "ijmpd":
1542 i = find_token(document.body, "\\begin_layout MarkBoth", i)
1544 convert_Argument_to_TeX_brace(document, i, 1, 1, False)
1550 def revert_SIGPLAN(document):
1551 " Reverts InsetArgument of MarkBoth to TeX-code "
1552 if document.textclass == "sigplanconf":
1557 i = find_token(document.body, "\\begin_layout Conference", i)
1559 revert_Argument_to_TeX_brace(document, i, 1, 1, False)
1562 j = find_token(document.body, "\\begin_layout Author", j)
1564 revert_Argument_to_TeX_brace(document, j, 1, 2, False)
1566 if i == -1 and j == -1:
1570 def convert_SIGPLAN(document):
1571 " Converts ERT of MarkBoth to InsetArgument "
1572 if document.textclass == "sigplanconf":
1577 i = find_token(document.body, "\\begin_layout Conference", i)
1579 convert_Argument_to_TeX_brace(document, i, 1, 1, False)
1582 j = find_token(document.body, "\\begin_layout Author", j)
1584 convert_Argument_to_TeX_brace(document, j, 1, 2, False)
1586 if i == -1 and j == -1:
1590 def revert_literate(document):
1591 " Revert Literate document to old format "
1592 if del_token(document.header, "noweb", 0):
1593 document.textclass = "literate-" + document.textclass
1596 i = find_token(document.body, "\\begin_layout Chunk", i)
1599 document.body[i] = "\\begin_layout Scrap"
1603 def convert_literate(document):
1604 " Convert Literate document to new format"
1605 i = find_token(document.header, "\\textclass", 0)
1606 if (i != -1) and "literate-" in document.header[i]:
1607 document.textclass = document.header[i].replace("\\textclass literate-", "")
1608 j = find_token(document.header, "\\begin_modules", 0)
1610 document.header.insert(j + 1, "noweb")
1612 document.header.insert(i + 1, "\\end_modules")
1613 document.header.insert(i + 1, "noweb")
1614 document.header.insert(i + 1, "\\begin_modules")
1617 i = find_token(document.body, "\\begin_layout Scrap", i)
1620 document.body[i] = "\\begin_layout Chunk"
1624 def revert_itemargs(document):
1625 " Reverts \\item arguments to TeX-code "
1627 i = find_token(document.body, "\\begin_inset Argument item:", 0)
1628 j = find_end_of_inset(document.body, i)
1631 lastlay = find_token_backwards(document.body, "\\begin_layout", i)
1632 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1633 endLayout = find_token(document.body, "\\end_layout", beginPlain)
1634 endInset = find_token(document.body, "\\end_inset", endLayout)
1635 content = document.body[beginPlain + 1 : endLayout]
1636 del document.body[i:j+1]
1637 subst = put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
1638 document.body[lastlay + 1:lastlay + 1] = subst
1646 supported_versions = ["2.1.0","2.1"]
1649 [415, [convert_undertilde]],
1651 [417, [convert_japanese_encodings]],
1654 [420, [convert_biblio_style]],
1655 [421, [convert_longtable_captions]],
1656 [422, [convert_use_packages]],
1657 [423, [convert_use_mathtools]],
1658 [424, [convert_cite_engine_type]],
1662 [428, [convert_cell_rotation]],
1663 [429, [convert_table_rotation]],
1664 [430, [convert_listoflistings]],
1665 [431, [convert_use_amssymb]],
1667 [433, [convert_armenian]],
1675 [441, [convert_mdnomath]],
1680 [446, [convert_latexargs]],
1681 [447, [convert_IEEEtran, convert_AASTeX, convert_AGUTeX, convert_IJMP, convert_SIGPLAN]],
1682 [448, [convert_literate]],
1687 [448, [revert_itemargs]],
1688 [447, [revert_literate]],
1689 [446, [revert_IEEEtran, revert_AASTeX, revert_AGUTeX, revert_IJMP, revert_SIGPLAN]],
1690 [445, [revert_latexargs]],
1691 [444, [revert_uop]],
1692 [443, [revert_biolinum]],
1694 [441, [revert_newtxmath]],
1695 [440, [revert_mdnomath]],
1696 [439, [revert_mathfonts]],
1697 [438, [revert_minionpro]],
1698 [437, [revert_ipadeco, revert_ipachar]],
1699 [436, [revert_texgyre]],
1700 [435, [revert_mathdesign]],
1701 [434, [revert_txtt]],
1702 [433, [revert_libertine]],
1703 [432, [revert_armenian]],
1704 [431, [revert_languages, revert_ancientgreek]],
1705 [430, [revert_use_amssymb]],
1706 [429, [revert_listoflistings]],
1707 [428, [revert_table_rotation]],
1708 [427, [revert_cell_rotation]],
1709 [426, [revert_tipa]],
1710 [425, [revert_verbatim]],
1711 [424, [revert_cancel]],
1712 [423, [revert_cite_engine_type]],
1713 [422, [revert_use_mathtools]],
1714 [421, [revert_use_packages]],
1715 [420, [revert_longtable_captions]],
1716 [419, [revert_biblio_style]],
1717 [418, [revert_australian]],
1718 [417, [revert_justification]],
1719 [416, [revert_japanese_encodings]],
1720 [415, [revert_negative_space, revert_math_spaces]],
1721 [414, [revert_undertilde]],
1722 [413, [revert_visible_space]]
1726 if __name__ == "__main__":