1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2016 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.3"""
25 # Uncomment only what you need to import, please.
27 from parser_tools import (del_token, del_value, del_complete_lines,
28 find_complete_lines, find_end_of, find_end_of_layout, find_end_of_inset,
29 find_re, find_token, find_token_backwards,
30 get_containing_layout, get_bool_value, get_value, get_quoted_value)
31 # find_tokens, find_token_exact, is_in_inset,
32 # check_token, get_option_value
34 from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, revert_font_attrs, \
36 # get_ert, lyx2latex, \
37 # lyx2verbatim, length_in_bp, convert_info_insets
38 # latex_length, revert_flex_inset, hex2ratio, str2bool
40 ####################################################################
41 # Private helper functions
45 ###############################################################################
47 ### Conversion and reversion routines
49 ###############################################################################
51 def convert_microtype(document):
52 " Add microtype settings. "
53 i = find_token(document.header, "\\font_tt_scale" , 0)
55 document.warning("Malformed LyX document: Can't find \\font_tt_scale.")
56 i = len(document.header) - 1
58 j = find_token(document.preamble, "\\usepackage{microtype}", 0)
60 document.header.insert(i + 1, "\\use_microtype false")
62 document.header.insert(i + 1, "\\use_microtype true")
63 del document.preamble[j]
66 def revert_microtype(document):
67 " Remove microtype settings. "
68 i = find_token(document.header, "\\use_microtype", 0)
71 use_microtype = get_bool_value(document.header, "\\use_microtype" , i)
72 del document.header[i]
74 add_to_preamble(document, ["\\usepackage{microtype}"])
77 def convert_dateinset(document):
78 ' Convert date external inset to ERT '
81 i = find_token(document.body, "\\begin_inset External", i)
84 j = find_end_of_inset(document.body, i)
86 document.warning("Malformed lyx document: Missing '\\end_inset' in convert_dateinset.")
89 if get_value(document.body, 'template', i, j) == "Date":
90 document.body[i : j + 1] = put_cmd_in_ert("\\today ")
95 def convert_inputenc(document):
96 " Replace no longer supported input encoding settings. "
97 i = find_token(document.header, "\\inputenc", 0)
100 if get_value(document.header, "\\inputencoding", i) == "pt254":
101 document.header[i] = "\\inputencoding pt154"
104 def convert_ibranches(document):
105 ' Add "inverted 0" to branch insets'
108 i = find_token(document.body, "\\begin_inset Branch", i)
111 document.body.insert(i + 1, "inverted 0")
115 def revert_ibranches(document):
116 ' Convert inverted branches to explicit anti-branches'
117 # Get list of branches
121 i = find_token(document.header, "\\branch", i)
124 branch = document.header[i][8:].strip()
125 if document.header[i+1].startswith("\\selected "):
126 #document.warning(document.header[i+1])
127 #document.warning(document.header[i+1][10])
128 selected = int(document.header[i+1][10])
130 document.warning("Malformed LyX document: No selection indicator for branch " + branch)
133 # the value tells us whether the branch is selected
134 ourbranches[document.header[i][8:].strip()] = selected
137 # Figure out what inverted branches, if any, have been used
138 # and convert them to "Anti-OldBranch"
142 i = find_token(document.body, "\\begin_inset Branch", i)
145 if not document.body[i+1].startswith("inverted "):
146 document.warning("Malformed LyX document: Missing 'inverted' tag!")
149 inverted = document.body[i+1][9]
150 #document.warning(document.body[i+1])
153 branch = document.body[i][20:].strip()
154 #document.warning(branch)
155 if not branch in ibranches:
156 antibranch = "Anti-" + branch
157 while antibranch in ibranches:
158 antibranch = "x" + antibranch
159 ibranches[branch] = antibranch
161 antibranch = ibranches[branch]
162 #document.warning(antibranch)
163 document.body[i] = "\\begin_inset Branch " + antibranch
165 # remove "inverted" key
166 del document.body[i+1]
169 # now we need to add the new branches to the header
170 for old, new in ibranches.items():
171 i = find_token(document.header, "\\branch " + old, 0)
173 document.warning("Can't find branch %s even though we found it before!" % (old))
175 j = find_token(document.header, "\\end_branch", i)
177 document.warning("Malformed LyX document! Can't find end of branch " + old)
179 # ourbranches[old] - 1 inverts the selection status of the old branch
180 lines = ["\\branch " + new,
181 "\\selected " + str(ourbranches[old] - 1)]
182 # these are the old lines telling us color, etc.
183 lines += document.header[i+2 : j+1]
184 document.header[i:i] = lines
187 def revert_beamer_article_styles(document):
188 " Include (scr)article styles in beamer article "
190 beamer_articles = ["article-beamer", "scrarticle-beamer"]
191 if document.textclass not in beamer_articles:
194 inclusion = "article.layout"
195 if document.textclass == "scrarticle-beamer":
196 inclusion = "scrartcl.layout"
198 i = find_token(document.header, "\\begin_local_layout", 0)
200 k = find_token(document.header, "\\language", 0)
202 # this should not happen
203 document.warning("Malformed LyX document! No \\language header found!")
205 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
208 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
210 # this should not happen
211 document.warning("Malformed LyX document: Can't find end of local layout!")
214 document.header[i+1 : i+1] = [
215 "### Inserted by lyx2lyx (more [scr]article styles) ###",
216 "Input " + inclusion,
217 "Input beamer.layout",
218 "Provides geometry 0",
219 "Provides hyperref 0",
228 " \\usepackage{beamerarticle,pgf}",
229 " % this default might be overridden by plain title style",
230 " \\newcommand\makebeamertitle{\\frame{\\maketitle}}%",
231 " \\AtBeginDocument{",
232 " \\let\\origtableofcontents=\\tableofcontents",
233 " \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
234 " \\def\\gobbletableofcontents#1{\\origtableofcontents}",
237 "### End of insertion by lyx2lyx (more [scr]article styles) ###"
241 def convert_beamer_article_styles(document):
242 " Remove included (scr)article styles in beamer article "
244 beamer_articles = ["article-beamer", "scrarticle-beamer"]
245 if document.textclass not in beamer_articles:
248 i = find_token(document.header, "\\begin_local_layout", 0)
252 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
254 # this should not happen
255 document.warning("Malformed LyX document: Can't find end of local layout!")
258 k = find_token(document.header, "### Inserted by lyx2lyx (more [scr]article styles) ###", i, j)
260 l = find_token(document.header, "### End of insertion by lyx2lyx (more [scr]article styles) ###", i, j)
262 # this should not happen
263 document.warning("End of lyx2lyx local layout insertion not found!")
266 if k == i + 1 and l == j - 1:
267 # that was all the local layout there was
268 document.header[i : j + 1] = []
270 document.header[k : l + 1] = []
273 def revert_bosnian(document):
274 "Set the document language to English but assure Bosnian output"
276 if document.language == "bosnian":
277 document.language = "english"
278 i = find_token(document.header, "\\language bosnian", 0)
280 document.header[i] = "\\language english"
281 j = find_token(document.header, "\\language_package default", 0)
283 document.header[j] = "\\language_package babel"
284 k = find_token(document.header, "\\options", 0)
286 document.header[k] = document.header[k].replace("\\options", "\\options bosnian,")
288 l = find_token(document.header, "\\use_default_options", 0)
289 document.header.insert(l + 1, "\\options bosnian")
292 def revert_friulan(document):
293 "Set the document language to English but assure Friulan output"
295 if document.language == "friulan":
296 document.language = "english"
297 i = find_token(document.header, "\\language friulan", 0)
299 document.header[i] = "\\language english"
300 j = find_token(document.header, "\\language_package default", 0)
302 document.header[j] = "\\language_package babel"
303 k = find_token(document.header, "\\options", 0)
305 document.header[k] = document.header[k].replace("\\options", "\\options friulan,")
307 l = find_token(document.header, "\\use_default_options", 0)
308 document.header.insert(l + 1, "\\options friulan")
311 def revert_macedonian(document):
312 "Set the document language to English but assure Macedonian output"
314 if document.language == "macedonian":
315 document.language = "english"
316 i = find_token(document.header, "\\language macedonian", 0)
318 document.header[i] = "\\language english"
319 j = find_token(document.header, "\\language_package default", 0)
321 document.header[j] = "\\language_package babel"
322 k = find_token(document.header, "\\options", 0)
324 document.header[k] = document.header[k].replace("\\options", "\\options macedonian,")
326 l = find_token(document.header, "\\use_default_options", 0)
327 document.header.insert(l + 1, "\\options macedonian")
330 def revert_piedmontese(document):
331 "Set the document language to English but assure Piedmontese output"
333 if document.language == "piedmontese":
334 document.language = "english"
335 i = find_token(document.header, "\\language piedmontese", 0)
337 document.header[i] = "\\language english"
338 j = find_token(document.header, "\\language_package default", 0)
340 document.header[j] = "\\language_package babel"
341 k = find_token(document.header, "\\options", 0)
343 document.header[k] = document.header[k].replace("\\options", "\\options piedmontese,")
345 l = find_token(document.header, "\\use_default_options", 0)
346 document.header.insert(l + 1, "\\options piedmontese")
349 def revert_romansh(document):
350 "Set the document language to English but assure Romansh output"
352 if document.language == "romansh":
353 document.language = "english"
354 i = find_token(document.header, "\\language romansh", 0)
356 document.header[i] = "\\language english"
357 j = find_token(document.header, "\\language_package default", 0)
359 document.header[j] = "\\language_package babel"
360 k = find_token(document.header, "\\options", 0)
362 document.header[k] = document.header[k].replace("\\options", "\\options romansh,")
364 l = find_token(document.header, "\\use_default_options", 0)
365 document.header.insert(l + 1, "\\options romansh")
368 def revert_amharic(document):
369 "Set the document language to English but assure Amharic output"
371 if document.language == "amharic":
372 document.language = "english"
373 i = find_token(document.header, "\\language amharic", 0)
375 document.header[i] = "\\language english"
376 j = find_token(document.header, "\\language_package default", 0)
378 document.header[j] = "\\language_package default"
379 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{amharic}}"])
380 document.body[2 : 2] = ["\\begin_layout Standard",
381 "\\begin_inset ERT", "status open", "",
382 "\\begin_layout Plain Layout", "", "",
384 "resetdefaultlanguage{amharic}",
385 "\\end_layout", "", "\\end_inset", "", "",
389 def revert_asturian(document):
390 "Set the document language to English but assure Asturian output"
392 if document.language == "asturian":
393 document.language = "english"
394 i = find_token(document.header, "\\language asturian", 0)
396 document.header[i] = "\\language english"
397 j = find_token(document.header, "\\language_package default", 0)
399 document.header[j] = "\\language_package default"
400 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{asturian}}"])
401 document.body[2 : 2] = ["\\begin_layout Standard",
402 "\\begin_inset ERT", "status open", "",
403 "\\begin_layout Plain Layout", "", "",
405 "resetdefaultlanguage{asturian}",
406 "\\end_layout", "", "\\end_inset", "", "",
410 def revert_kannada(document):
411 "Set the document language to English but assure Kannada output"
413 if document.language == "kannada":
414 document.language = "english"
415 i = find_token(document.header, "\\language kannada", 0)
417 document.header[i] = "\\language english"
418 j = find_token(document.header, "\\language_package default", 0)
420 document.header[j] = "\\language_package default"
421 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{kannada}}"])
422 document.body[2 : 2] = ["\\begin_layout Standard",
423 "\\begin_inset ERT", "status open", "",
424 "\\begin_layout Plain Layout", "", "",
426 "resetdefaultlanguage{kannada}",
427 "\\end_layout", "", "\\end_inset", "", "",
431 def revert_khmer(document):
432 "Set the document language to English but assure Khmer output"
434 if document.language == "khmer":
435 document.language = "english"
436 i = find_token(document.header, "\\language khmer", 0)
438 document.header[i] = "\\language english"
439 j = find_token(document.header, "\\language_package default", 0)
441 document.header[j] = "\\language_package default"
442 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{khmer}}"])
443 document.body[2 : 2] = ["\\begin_layout Standard",
444 "\\begin_inset ERT", "status open", "",
445 "\\begin_layout Plain Layout", "", "",
447 "resetdefaultlanguage{khmer}",
448 "\\end_layout", "", "\\end_inset", "", "",
452 def revert_urdu(document):
453 "Set the document language to English but assure Urdu output"
455 if document.language == "urdu":
456 document.language = "english"
457 i = find_token(document.header, "\\language urdu", 0)
459 document.header[i] = "\\language english"
460 j = find_token(document.header, "\\language_package default", 0)
462 document.header[j] = "\\language_package default"
463 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{urdu}}"])
464 document.body[2 : 2] = ["\\begin_layout Standard",
465 "\\begin_inset ERT", "status open", "",
466 "\\begin_layout Plain Layout", "", "",
468 "resetdefaultlanguage{urdu}",
469 "\\end_layout", "", "\\end_inset", "", "",
473 def revert_syriac(document):
474 "Set the document language to English but assure Syriac output"
476 if document.language == "syriac":
477 document.language = "english"
478 i = find_token(document.header, "\\language syriac", 0)
480 document.header[i] = "\\language english"
481 j = find_token(document.header, "\\language_package default", 0)
483 document.header[j] = "\\language_package default"
484 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{syriac}}"])
485 document.body[2 : 2] = ["\\begin_layout Standard",
486 "\\begin_inset ERT", "status open", "",
487 "\\begin_layout Plain Layout", "", "",
489 "resetdefaultlanguage{syriac}",
490 "\\end_layout", "", "\\end_inset", "", "",
494 def revert_quotes(document):
495 " Revert Quote Insets in verbatim or Hebrew context to plain quotes "
497 # First handle verbatim insets
500 while i < len(document.body):
501 words = document.body[i].split()
502 if len(words) > 1 and words[0] == "\\begin_inset" and \
503 ( words[1] in ["ERT", "listings"] or ( len(words) > 2 and words[2] in ["URL", "Chunk", "Sweave", "S/R"]) ):
504 j = find_end_of_inset(document.body, i)
506 document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
510 k = find_token(document.body, '\\begin_inset Quotes', i, j)
514 l = find_end_of_inset(document.body, k)
516 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
520 if document.body[k].endswith("s"):
522 document.body[k:l+1] = [replace]
527 # Now verbatim layouts
530 while i < len(document.body):
531 words = document.body[i].split()
532 if len(words) > 1 and words[0] == "\\begin_layout" and \
533 words[1] in ["Verbatim", "Verbatim*", "Code", "Author_Email", "Author_URL"]:
534 j = find_end_of_layout(document.body, i)
536 document.warning("Malformed LyX document: Can't find end of " + words[1] + " layout at line " + str(i))
540 k = find_token(document.body, '\\begin_inset Quotes', i, j)
544 l = find_end_of_inset(document.body, k)
546 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
550 if document.body[k].endswith("s"):
552 document.body[k:l+1] = [replace]
558 if not document.language == "hebrew" and find_token(document.body, '\\lang hebrew', 0) == -1:
564 k = find_token(document.body, '\\begin_inset Quotes', i)
567 l = find_end_of_inset(document.body, k)
569 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
573 parent = get_containing_layout(document.body, k)
574 ql = find_token_backwards(document.body, "\\lang", k)
575 if ql == -1 or ql < parent[1]:
576 hebrew = document.language == "hebrew"
577 elif document.body[ql] == "\\lang hebrew":
581 if document.body[k].endswith("s"):
583 document.body[k:l+1] = [replace]
587 def revert_iopart(document):
588 " Input new styles via local layout "
589 if document.textclass != "iopart":
592 i = find_token(document.header, "\\begin_local_layout", 0)
594 k = find_token(document.header, "\\language", 0)
596 # this should not happen
597 document.warning("Malformed LyX document! No \\language header found!")
599 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
602 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
604 # this should not happen
605 document.warning("Malformed LyX document! Can't find end of local layout!")
608 document.header[i+1 : i+1] = [
609 "### Inserted by lyx2lyx (stdlayouts) ###",
610 "Input stdlayouts.inc",
611 "### End of insertion by lyx2lyx (stdlayouts) ###"
615 def convert_iopart(document):
616 " Remove local layout we added, if it is there "
617 if document.textclass != "iopart":
620 i = find_token(document.header, "\\begin_local_layout", 0)
624 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
626 # this should not happen
627 document.warning("Malformed LyX document! Can't find end of local layout!")
630 k = find_token(document.header, "### Inserted by lyx2lyx (stdlayouts) ###", i, j)
632 l = find_token(document.header, "### End of insertion by lyx2lyx (stdlayouts) ###", i, j)
634 # this should not happen
635 document.warning("End of lyx2lyx local layout insertion not found!")
637 if k == i + 1 and l == j - 1:
638 # that was all the local layout there was
639 document.header[i : j + 1] = []
641 document.header[k : l + 1] = []
644 def convert_quotestyle(document):
645 " Convert \\quotes_language to \\quotes_style "
646 i = find_token(document.header, "\\quotes_language", 0)
648 document.warning("Malformed LyX document! Can't find \\quotes_language!")
650 val = get_value(document.header, "\\quotes_language", i)
651 document.header[i] = "\\quotes_style " + val
654 def revert_quotestyle(document):
655 " Revert \\quotes_style to \\quotes_language "
656 i = find_token(document.header, "\\quotes_style", 0)
658 document.warning("Malformed LyX document! Can't find \\quotes_style!")
660 val = get_value(document.header, "\\quotes_style", i)
661 document.header[i] = "\\quotes_language " + val
664 def revert_plainquote(document):
665 " Revert plain quote insets "
667 # First, revert style setting
668 i = find_token(document.header, "\\quotes_style plain", 0)
670 document.header[i] = "\\quotes_style english"
676 k = find_token(document.body, '\\begin_inset Quotes q', i)
679 l = find_end_of_inset(document.body, k)
681 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
685 if document.body[k].endswith("s"):
687 document.body[k:l+1] = [replace]
691 def convert_frenchquotes(document):
692 " Convert french quote insets to swiss "
694 # First, revert style setting
695 i = find_token(document.header, "\\quotes_style french", 0)
697 document.header[i] = "\\quotes_style swiss"
702 i = find_token(document.body, '\\begin_inset Quotes f', i)
705 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
706 newval = val.replace("f", "c", 1)
707 document.body[i] = document.body[i].replace(val, newval)
711 def revert_swissquotes(document):
712 " Revert swiss quote insets to french "
714 # First, revert style setting
715 i = find_token(document.header, "\\quotes_style swiss", 0)
717 document.header[i] = "\\quotes_style french"
722 i = find_token(document.body, '\\begin_inset Quotes c', i)
725 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
726 newval = val.replace("c", "f", 1)
727 document.body[i] = document.body[i].replace(val, newval)
731 def revert_britishquotes(document):
732 " Revert british quote insets to english "
734 # First, revert style setting
735 i = find_token(document.header, "\\quotes_style british", 0)
737 document.header[i] = "\\quotes_style english"
742 i = find_token(document.body, '\\begin_inset Quotes b', i)
745 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
746 newval = val.replace("b", "e", 1)
749 newval = newval.replace("d", "s")
752 newval = newval.replace("s", "d")
753 document.body[i] = document.body[i].replace(val, newval)
757 def revert_swedishgquotes(document):
758 " Revert swedish quote insets "
760 # First, revert style setting
761 i = find_token(document.header, "\\quotes_style swedishg", 0)
763 document.header[i] = "\\quotes_style danish"
768 i = find_token(document.body, '\\begin_inset Quotes w', i)
771 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
774 newval = val.replace("w", "a", 1).replace("r", "l")
777 newval = val.replace("w", "s", 1)
778 document.body[i] = document.body[i].replace(val, newval)
782 def revert_frenchquotes(document):
783 " Revert french inner quote insets "
787 i = find_token(document.body, '\\begin_inset Quotes f', i)
790 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
793 newval = val.replace("f", "e", 1).replace("s", "d")
794 document.body[i] = document.body[i].replace(val, newval)
798 def revert_frenchinquotes(document):
799 " Revert inner frenchin quote insets "
801 # First, revert style setting
802 i = find_token(document.header, "\\quotes_style frenchin", 0)
804 document.header[i] = "\\quotes_style french"
809 i = find_token(document.body, '\\begin_inset Quotes i', i)
812 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
813 newval = val.replace("i", "f", 1)
816 newval = newval.replace("s", "d")
817 document.body[i] = document.body[i].replace(val, newval)
821 def revert_russianquotes(document):
822 " Revert russian quote insets "
824 # First, revert style setting
825 i = find_token(document.header, "\\quotes_style russian", 0)
827 document.header[i] = "\\quotes_style french"
832 i = find_token(document.body, '\\begin_inset Quotes r', i)
835 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
839 newval = val.replace("r", "g", 1).replace("s", "d")
842 newval = val.replace("r", "f", 1)
843 document.body[i] = document.body[i].replace(val, newval)
847 def revert_dynamicquotes(document):
848 " Revert dynamic quote insets "
850 # First, revert header
851 i = find_token(document.header, "\\dynamic_quotes", 0)
853 del document.header[i]
857 i = find_token(document.header, "\\quotes_style", 0)
859 document.warning("Malformed document! Missing \\quotes_style")
861 style = get_value(document.header, "\\quotes_style", i)
864 if style == "english":
866 elif style == "swedish":
868 elif style == "german":
870 elif style == "polish":
872 elif style == "swiss":
874 elif style == "danish":
876 elif style == "plain":
878 elif style == "british":
880 elif style == "swedishg":
882 elif style == "french":
884 elif style == "frenchin":
886 elif style == "russian":
889 # now transform the insets
892 i = find_token(document.body, '\\begin_inset Quotes x', i)
895 document.body[i] = document.body[i].replace("x", s)
899 def revert_cjkquotes(document):
900 " Revert cjk quote insets "
904 i = find_token(document.header, "\\quotes_style", 0)
906 document.warning("Malformed document! Missing \\quotes_style")
908 style = get_value(document.header, "\\quotes_style", i)
910 global_cjk = style.find("cjk") != -1
913 document.header[i] = "\\quotes_style english"
914 # transform dynamic insets
916 if style == "cjkangle":
920 i = find_token(document.body, '\\begin_inset Quotes x', i)
923 document.body[i] = document.body[i].replace("x", s)
926 cjk_langs = ["chinese-simplified", "chinese-traditional", "japanese", "japanese-cjk", "korean"]
931 k = find_token(document.body, '\\begin_inset Quotes j', i)
934 l = find_end_of_inset(document.body, k)
936 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
940 parent = get_containing_layout(document.body, k)
941 ql = find_token_backwards(document.body, "\\lang", k)
942 if ql == -1 or ql < parent[1]:
943 cjk = document.language in cjk_langs
944 elif document.body[ql].split()[1] in cjk_langs:
946 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
953 replace = [u"\u300E"]
955 replace = ["\\begin_inset Formula $\\llceil$", "\\end_inset"]
959 replace = [u"\u300F"]
961 replace = ["\\begin_inset Formula $\\rrfloor$", "\\end_inset"]
967 replace = [u"\u300C"]
969 replace = ["\\begin_inset Formula $\\lceil$", "\\end_inset"]
973 replace = [u"\u300D"]
975 replace = ["\\begin_inset Formula $\\rfloor$", "\\end_inset"]
977 document.body[k:l+1] = replace
983 k = find_token(document.body, '\\begin_inset Quotes k', i)
986 l = find_end_of_inset(document.body, k)
988 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
992 parent = get_containing_layout(document.body, k)
993 ql = find_token_backwards(document.body, "\\lang", k)
994 if ql == -1 or ql < parent[1]:
995 cjk = document.language in cjk_langs
996 elif document.body[ql].split()[1] in cjk_langs:
998 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
1003 # inner opening mark
1005 replace = [u"\u3008"]
1007 replace = ["\\begin_inset Formula $\\langle$", "\\end_inset"]
1009 # inner closing mark
1011 replace = [u"\u3009"]
1013 replace = ["\\begin_inset Formula $\\rangle$", "\\end_inset"]
1017 # outer opening mark
1019 replace = [u"\u300A"]
1021 replace = ["\\begin_inset Formula $\\langle\\kern -2.5pt\\langle$", "\\end_inset"]
1023 # outer closing mark
1025 replace = [u"\u300B"]
1027 replace = ["\\begin_inset Formula $\\rangle\\kern -2.5pt\\rangle$", "\\end_inset"]
1029 document.body[k:l+1] = replace
1033 def revert_crimson(document):
1034 " Revert native Cochineal/Crimson font definition to LaTeX "
1036 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1038 i = find_token(document.header, "\\font_roman \"cochineal\"", 0)
1041 j = find_token(document.header, "\\font_osf true", 0)
1044 preamble = "\\usepackage"
1046 document.header[j] = "\\font_osf false"
1047 preamble += "[proportional,osf]"
1048 preamble += "{cochineal}"
1049 add_to_preamble(document, [preamble])
1050 document.header[i] = document.header[i].replace("cochineal", "default")
1053 def revert_cochinealmath(document):
1054 " Revert cochineal newtxmath definitions to LaTeX "
1056 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1057 i = find_token(document.header, "\\font_math \"cochineal-ntxm\"", 0)
1059 add_to_preamble(document, "\\usepackage[cochineal]{newtxmath}")
1060 document.header[i] = document.header[i].replace("cochineal-ntxm", "auto")
1063 def revert_labelonly(document):
1064 " Revert labelonly tag for InsetRef "
1067 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1070 j = find_end_of_inset(document.body, i)
1072 document.warning("Can't find end of reference inset at line %d!!" %(i))
1075 k = find_token(document.body, "LatexCommand labelonly", i, j)
1079 label = get_quoted_value(document.body, "reference", i, j)
1081 document.warning("Can't find label for reference at line %d!" %(i))
1084 document.body[i:j+1] = put_cmd_in_ert([label])
1088 def revert_plural_refs(document):
1089 " Revert plural and capitalized references "
1090 i = find_token(document.header, "\\use_refstyle 1", 0)
1091 use_refstyle = (i != 0)
1095 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1098 j = find_end_of_inset(document.body, i)
1100 document.warning("Can't find end of reference inset at line %d!!" %(i))
1104 plural = caps = suffix = False
1105 k = find_token(document.body, "LaTeXCommand formatted", i, j)
1106 if k != -1 and use_refstyle:
1107 plural = get_bool_value(document.body, "plural", i, j, False)
1108 caps = get_bool_value(document.body, "caps", i, j, False)
1109 label = get_quoted_value(document.body, "reference", i, j)
1112 (prefix, suffix) = label.split(":", 1)
1114 document.warning("No `:' separator in formatted reference at line %d!" % (i))
1116 document.warning("Can't find label for reference at line %d!" % (i))
1118 # this effectively tests also for use_refstyle and a formatted reference
1119 # we do this complicated test because we would otherwise do this erasure
1120 # over and over and over
1121 if not ((plural or caps) and suffix):
1122 del_token(document.body, "plural", i, j)
1123 del_token(document.body, "caps", i, j - 1) # since we deleted a line
1128 prefix = prefix[0].title() + prefix[1:]
1129 cmd = "\\" + prefix + "ref"
1132 cmd += "{" + suffix + "}"
1133 document.body[i:j+1] = put_cmd_in_ert([cmd])
1137 def revert_noprefix(document):
1138 " Revert labelonly tags with 'noprefix' set "
1141 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1144 j = find_end_of_inset(document.body, i)
1146 document.warning("Can't find end of reference inset at line %d!!" %(i))
1149 k = find_token(document.body, "LatexCommand labelonly", i, j)
1152 noprefix = get_bool_value(document.body, "noprefix", i, j)
1154 # either it was not a labelonly command, or else noprefix was not set.
1155 # in that case, we just delete the option.
1156 del_token(document.body, "noprefix", i, j)
1159 label = get_quoted_value(document.body, "reference", i, j)
1161 document.warning("Can't find label for reference at line %d!" %(i))
1165 (prefix, suffix) = label.split(":", 1)
1167 document.warning("No `:' separator in formatted reference at line %d!" % (i))
1168 # we'll leave this as an ordinary labelonly reference
1169 del_token(document.body, "noprefix", i, j)
1172 document.body[i:j+1] = put_cmd_in_ert([suffix])
1176 def revert_biblatex(document):
1177 " Revert biblatex support "
1183 # 1. Get cite engine
1185 i = find_token(document.header, "\\cite_engine", 0)
1187 document.warning("Malformed document! Missing \\cite_engine")
1189 engine = get_value(document.header, "\\cite_engine", i)
1191 # 2. Store biblatex state and revert to natbib
1193 if engine in ["biblatex", "biblatex-natbib"]:
1195 document.header[i] = "\\cite_engine natbib"
1197 # 3. Store and remove new document headers
1199 i = find_token(document.header, "\\biblatex_bibstyle", 0)
1201 bibstyle = get_value(document.header, "\\biblatex_bibstyle", i)
1202 del document.header[i]
1205 i = find_token(document.header, "\\biblatex_citestyle", 0)
1207 citestyle = get_value(document.header, "\\biblatex_citestyle", i)
1208 del document.header[i]
1211 i = find_token(document.header, "\\biblio_options", 0)
1213 biblio_options = get_value(document.header, "\\biblio_options", i)
1214 del document.header[i]
1217 bbxopts = "[natbib=true"
1219 bbxopts += ",bibstyle=" + bibstyle
1221 bbxopts += ",citestyle=" + citestyle
1222 if biblio_options != "":
1223 bbxopts += "," + biblio_options
1225 add_to_preamble(document, "\\usepackage" + bbxopts + "{biblatex}")
1235 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
1238 j = find_end_of_inset(document.body, i)
1240 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1243 bibs = get_quoted_value(document.body, "bibfiles", i, j)
1244 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1247 bibresources += bibs.split(",")
1249 document.warning("Can't find bibfiles for bibtex inset at line %d!" %(i))
1250 # remove biblatexopts line
1251 k = find_token(document.body, "biblatexopts", i, j)
1253 del document.body[k]
1254 # Re-find inset end line
1255 j = find_end_of_inset(document.body, i)
1256 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1258 pcmd = "printbibliography"
1260 pcmd += "[" + opts + "]"
1261 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1262 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1263 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1264 "status open", "", "\\begin_layout Plain Layout" ]
1265 repl += document.body[i:j+1]
1266 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1267 document.body[i:j+1] = repl
1273 for b in bibresources:
1274 add_to_preamble(document, "\\addbibresource{" + b + ".bib}")
1276 # 2. Citation insets
1278 # Specific citation insets used in biblatex that need to be reverted to ERT
1281 "citebyear" : "citeyear",
1282 "citeyear" : "cite*",
1283 "Footcite" : "Smartcite",
1284 "footcite" : "smartcite",
1285 "Autocite" : "Autocite",
1286 "autocite" : "autocite",
1287 "citetitle" : "citetitle",
1288 "citetitle*" : "citetitle*",
1289 "fullcite" : "fullcite",
1290 "footfullcite" : "footfullcite",
1291 "supercite" : "supercite",
1292 "citeauthor" : "citeauthor",
1293 "citeauthor*" : "citeauthor*",
1294 "Citeauthor" : "Citeauthor",
1295 "Citeauthor*" : "Citeauthor*"
1298 # All commands accepted by LyX < 2.3. Everything else throws an error.
1299 old_citations = [ "cite", "nocite", "citet", "citep", "citealt", "citealp",\
1300 "citeauthor", "citeyear", "citeyearpar", "citet*", "citep*",\
1301 "citealt*", "citealp*", "citeauthor*", "Citet", "Citep",\
1302 "Citealt", "Citealp", "Citeauthor", "Citet*", "Citep*",\
1303 "Citealt*", "Citealp*", "Citeauthor*", "fullcite", "footcite",\
1304 "footcitet", "footcitep", "footcitealt", "footcitealp",\
1305 "footciteauthor", "footciteyear", "footciteyearpar",\
1306 "citefield", "citetitle", "cite*" ]
1310 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
1313 j = find_end_of_inset(document.body, i)
1315 document.warning("Can't find end of citation inset at line %d!!" %(i))
1318 k = find_token(document.body, "LatexCommand", i, j)
1320 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
1323 cmd = get_value(document.body, "LatexCommand", k)
1324 if biblatex and cmd in list(new_citations.keys()):
1325 pre = get_quoted_value(document.body, "before", i, j)
1326 post = get_quoted_value(document.body, "after", i, j)
1327 key = get_quoted_value(document.body, "key", i, j)
1329 document.warning("Citation inset at line %d does not have a key!" %(i))
1331 # Replace known new commands with ERT
1332 res = "\\" + new_citations[cmd]
1334 res += "[" + pre + "]"
1336 res += "[" + post + "]"
1339 res += "{" + key + "}"
1340 document.body[i:j+1] = put_cmd_in_ert([res])
1341 elif cmd not in old_citations:
1342 # Reset unknown commands to cite. This is what LyX does as well
1343 # (but LyX 2.2 would break on unknown commands)
1344 document.body[k] = "LatexCommand cite"
1345 document.warning("Reset unknown cite command '%s' with cite" % cmd)
1348 # Emulate the old biblatex-workaround (pretend natbib in order to use the styles)
1350 i = find_token(document.header, "\\begin_local_layout", 0)
1352 k = find_token(document.header, "\\language", 0)
1354 # this should not happen
1355 document.warning("Malformed LyX document! No \\language header found!")
1357 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
1360 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
1362 # this should not happen
1363 document.warning("Malformed LyX document! Can't find end of local layout!")
1366 document.header[i+1 : i+1] = [
1367 "### Inserted by lyx2lyx (biblatex emulation) ###",
1368 "Provides natbib 1",
1369 "### End of insertion by lyx2lyx (biblatex emulation) ###"
1373 def revert_citekeyonly(document):
1374 " Revert keyonly cite command to ERT "
1378 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
1381 j = find_end_of_inset(document.body, i)
1383 document.warning("Can't find end of citation inset at line %d!!" %(i))
1386 k = find_token(document.body, "LatexCommand", i, j)
1388 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
1391 cmd = get_value(document.body, "LatexCommand", k)
1392 if cmd != "keyonly":
1396 key = get_quoted_value(document.body, "key", i, j)
1398 document.warning("Citation inset at line %d does not have a key!" %(i))
1399 # Replace known new commands with ERT
1400 document.body[i:j+1] = put_cmd_in_ert([key])
1405 def revert_bibpackopts(document):
1406 " Revert support for natbib/jurabib package options "
1409 i = find_token(document.header, "\\cite_engine", 0)
1411 document.warning("Malformed document! Missing \\cite_engine")
1413 engine = get_value(document.header, "\\cite_engine", i)
1416 if engine not in ["natbib", "jurabib"]:
1419 i = find_token(document.header, "\\biblio_options", 0)
1421 # Nothing to do if we have no options
1424 biblio_options = get_value(document.header, "\\biblio_options", i)
1425 del document.header[i]
1427 if not biblio_options:
1428 # Nothing to do for empty options
1431 i = find_token(document.header, "\\begin_local_layout", 0)
1433 k = find_token(document.header, "\\language", 0)
1435 # this should not happen
1436 document.warning("Malformed LyX document! No \\language header found!")
1438 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
1441 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
1443 # this should not happen
1444 document.warning("Malformed LyX document! Can't find end of local layout!")
1447 document.header[i+1 : i+1] = [
1448 "### Inserted by lyx2lyx (bibliography package options) ###",
1449 "PackageOptions " + engine + " " + biblio_options,
1450 "### End of insertion by lyx2lyx (bibliography package options) ###"
1454 def revert_qualicites(document):
1455 " Revert qualified citation list commands to ERT "
1457 # Citation insets that support qualified lists, with their LaTeX code
1461 "citet" : "textcites",
1462 "Citet" : "Textcites",
1463 "citep" : "parencites",
1464 "Citep" : "Parencites",
1465 "Footcite" : "Smartcites",
1466 "footcite" : "smartcites",
1467 "Autocite" : "Autocites",
1468 "autocite" : "autocites",
1473 i = find_token(document.header, "\\cite_engine", 0)
1475 document.warning("Malformed document! Missing \\cite_engine")
1477 engine = get_value(document.header, "\\cite_engine", i)
1479 biblatex = engine in ["biblatex", "biblatex-natbib"]
1483 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
1486 j = find_end_of_inset(document.body, i)
1488 document.warning("Can't find end of citation inset at line %d!!" %(i))
1491 pres = find_token(document.body, "pretextlist", i, j)
1492 posts = find_token(document.body, "posttextlist", i, j)
1493 if pres == -1 and posts == -1:
1497 pretexts = get_quoted_value(document.body, "pretextlist", pres)
1498 posttexts = get_quoted_value(document.body, "posttextlist", posts)
1499 k = find_token(document.body, "LatexCommand", i, j)
1501 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
1504 cmd = get_value(document.body, "LatexCommand", k)
1505 if biblatex and cmd in list(ql_citations.keys()):
1506 pre = get_quoted_value(document.body, "before", i, j)
1507 post = get_quoted_value(document.body, "after", i, j)
1508 key = get_quoted_value(document.body, "key", i, j)
1510 document.warning("Citation inset at line %d does not have a key!" %(i))
1512 keys = key.split(",")
1513 prelist = pretexts.split("\t")
1516 ppp = pp.split(" ", 1)
1517 premap[ppp[0]] = ppp[1]
1518 postlist = posttexts.split("\t")
1521 ppp = pp.split(" ", 1)
1522 postmap[ppp[0]] = ppp[1]
1523 # Replace known new commands with ERT
1524 if "(" in pre or ")" in pre:
1525 pre = "{" + pre + "}"
1526 if "(" in post or ")" in post:
1527 post = "{" + post + "}"
1528 res = "\\" + ql_citations[cmd]
1530 res += "(" + pre + ")"
1532 res += "(" + post + ")"
1536 if premap.get(kk, "") != "":
1537 res += "[" + premap[kk] + "]"
1538 if postmap.get(kk, "") != "":
1539 res += "[" + postmap[kk] + "]"
1540 elif premap.get(kk, "") != "":
1542 res += "{" + kk + "}"
1543 document.body[i:j+1] = put_cmd_in_ert([res])
1545 # just remove the params
1546 del document.body[posttexts]
1547 del document.body[pretexts]
1551 command_insets = ["bibitem", "citation", "href", "index_print", "nomenclature"]
1552 def convert_literalparam(document):
1553 " Add param literal "
1555 for inset in command_insets:
1558 i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
1561 j = find_end_of_inset(document.body, i)
1563 document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
1566 while i < j and document.body[i].strip() != '':
1568 # href is already fully latexified. Here we can switch off literal.
1570 document.body.insert(i, "literal \"false\"")
1572 document.body.insert(i, "literal \"true\"")
1576 def revert_literalparam(document):
1577 " Remove param literal "
1579 for inset in command_insets:
1582 i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
1585 j = find_end_of_inset(document.body, i)
1587 document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
1590 k = find_token(document.body, 'literal', i, j)
1594 del document.body[k]
1598 def revert_multibib(document):
1599 " Revert multibib support "
1601 # 1. Get cite engine
1603 i = find_token(document.header, "\\cite_engine", 0)
1605 document.warning("Malformed document! Missing \\cite_engine")
1607 engine = get_value(document.header, "\\cite_engine", i)
1609 # 2. Do we use biblatex?
1611 if engine in ["biblatex", "biblatex-natbib"]:
1614 # 3. Store and remove multibib document header
1616 i = find_token(document.header, "\\multibib", 0)
1618 multibib = get_value(document.header, "\\multibib", i)
1619 del document.header[i]
1624 # 4. The easy part: Biblatex
1626 i = find_token(document.header, "\\biblio_options", 0)
1628 k = find_token(document.header, "\\use_bibtopic", 0)
1630 # this should not happen
1631 document.warning("Malformed LyX document! No \\use_bibtopic header found!")
1633 document.header[k-1 : k-1] = ["\\biblio_options " + "refsection=" + multibib]
1635 biblio_options = get_value(document.header, "\\biblio_options", i)
1637 biblio_options += ","
1638 biblio_options += "refsection=" + multibib
1639 document.header[i] = "\\biblio_options " + biblio_options
1644 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
1647 j = find_end_of_inset(document.body, i)
1649 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1652 btprint = get_quoted_value(document.body, "btprint", i, j)
1653 if btprint != "bibbysection":
1656 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1657 # change btprint line
1658 k = find_token(document.body, "btprint", i, j)
1660 document.body[k] = "btprint \"btPrintCited\""
1661 # Insert ERT \\bibbysection and wrap bibtex inset to a Note
1662 pcmd = "bibbysection"
1664 pcmd += "[" + opts + "]"
1665 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1666 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1667 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1668 "status open", "", "\\begin_layout Plain Layout" ]
1669 repl += document.body[i:j+1]
1670 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1671 document.body[i:j+1] = repl
1677 # 5. More tricky: Bibtex/Bibtopic
1678 k = find_token(document.header, "\\use_bibtopic", 0)
1680 # this should not happen
1681 document.warning("Malformed LyX document! No \\use_bibtopic header found!")
1683 document.header[k] = "\\use_bibtopic true"
1685 # Possible units. This assumes that the LyX name follows the std,
1686 # which might not always be the case. But it's as good as we can get.
1689 "chapter" : "Chapter",
1690 "section" : "Section",
1691 "subsection" : "Subsection",
1694 if multibib not in units.keys():
1695 document.warning("Unknown multibib value `%s'!" % nultibib)
1697 unit = units[multibib]
1701 i = find_token(document.body, "\\begin_layout " + unit, i)
1705 document.body[i-1 : i-1] = ["\\begin_layout Standard",
1706 "\\begin_inset ERT", "status open", "",
1707 "\\begin_layout Plain Layout", "", "",
1709 "end{btUnit}", "\\end_layout",
1710 "\\begin_layout Plain Layout", "",
1713 "\\end_layout", "", "\\end_inset", "", "",
1717 document.body[i-1 : i-1] = ["\\begin_layout Standard",
1718 "\\begin_inset ERT", "status open", "",
1719 "\\begin_layout Plain Layout", "", "",
1722 "\\end_layout", "", "\\end_inset", "", "",
1729 i = find_token(document.body, "\\end_body", i)
1730 document.body[i-1 : i-1] = ["\\begin_layout Standard",
1731 "\\begin_inset ERT", "status open", "",
1732 "\\begin_layout Plain Layout", "", "",
1735 "\\end_layout", "", "\\end_inset", "", "",
1739 def revert_chapterbib(document):
1740 " Revert chapterbib support "
1742 # 1. Get cite engine
1744 i = find_token(document.header, "\\cite_engine", 0)
1746 document.warning("Malformed document! Missing \\cite_engine")
1748 engine = get_value(document.header, "\\cite_engine", i)
1750 # 2. Do we use biblatex?
1752 if engine in ["biblatex", "biblatex-natbib"]:
1755 # 3. Store multibib document header value
1757 i = find_token(document.header, "\\multibib", 0)
1759 multibib = get_value(document.header, "\\multibib", i)
1761 if not multibib or multibib != "child":
1765 # 4. remove multibib header
1766 del document.header[i]
1770 # find include insets
1773 i = find_token(document.body, "\\begin_inset CommandInset include", i)
1776 j = find_end_of_inset(document.body, i)
1778 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1781 parent = get_containing_layout(document.body, i)
1784 # Insert ERT \\newrefsection before inset
1785 beg = ["\\begin_layout Standard",
1786 "\\begin_inset ERT", "status open", "",
1787 "\\begin_layout Plain Layout", "", "",
1790 "\\end_layout", "", "\\end_inset", "", "",
1792 document.body[parbeg-1:parbeg-1] = beg
1797 # 6. Bibtex/Bibtopic
1798 i = find_token(document.header, "\\use_bibtopic", 0)
1800 # this should not happen
1801 document.warning("Malformed LyX document! No \\use_bibtopic header found!")
1803 if get_value(document.header, "\\use_bibtopic", i) == "true":
1804 # find include insets
1807 i = find_token(document.body, "\\begin_inset CommandInset include", i)
1810 j = find_end_of_inset(document.body, i)
1812 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1815 parent = get_containing_layout(document.body, i)
1819 # Insert wrap inset into \\begin{btUnit}...\\end{btUnit}
1820 beg = ["\\begin_layout Standard",
1821 "\\begin_inset ERT", "status open", "",
1822 "\\begin_layout Plain Layout", "", "",
1825 "\\end_layout", "", "\\end_inset", "", "",
1827 end = ["\\begin_layout Standard",
1828 "\\begin_inset ERT", "status open", "",
1829 "\\begin_layout Plain Layout", "", "",
1832 "\\end_layout", "", "\\end_inset", "", "",
1834 document.body[parend+1:parend+1] = end
1835 document.body[parbeg-1:parbeg-1] = beg
1836 j += len(beg) + len(end)
1840 # 7. Chapterbib proper
1841 add_to_preamble(document, ["\\usepackage{chapterbib}"])
1844 def convert_dashligatures(document):
1845 "Set 'use_dash_ligatures' according to content."
1846 # Look for and remove dashligatures workaround from 2.3->2.2 reversion,
1847 # set use_dash_ligatures to True if found, to None else.
1848 use_dash_ligatures = del_complete_lines(document.preamble,
1849 ['% Added by lyx2lyx',
1850 r'\renewcommand{\textendash}{--}',
1851 r'\renewcommand{\textemdash}{---}']) or None
1853 if use_dash_ligatures is None:
1854 # Look for dashes (Documents by LyX 2.1 or older have "\twohyphens\n"
1855 # or "\threehyphens\n" as interim representation for -- an ---.)
1856 has_literal_dashes = False
1857 has_ligature_dashes = False
1859 for i, line in enumerate(document.body):
1860 # Skip some document parts where dashes are not converted
1861 if (i < j) or line.startswith("\\labelwidthstring"):
1863 if line.startswith("\\begin_inset"):
1865 it = line.split()[1]
1868 if (it in ["CommandInset", "ERT", "External", "Formula",
1869 "FormulaMacro", "Graphics", "IPA", "listings"]
1870 or line.endswith("Flex Code")):
1871 j = find_end_of_inset(document.body, i)
1873 document.warning("Malformed LyX document: Can't "
1874 "find end of %s inset at line %d." % (itype, i))
1876 if line == "\\begin_layout LyX-Code":
1877 j = find_end_of_layout(document.body, i)
1879 document.warning("Malformed LyX document: "
1880 "Can't find end of %s layout at line %d" % (words[1],i))
1882 # literal dash followed by a word or no-break space:
1883 if re.search(u"[\u2013\u2014]([\w\u00A0]|$)", line,
1885 has_literal_dashes = True
1886 # ligature dash followed by word or no-break space on next line:
1887 if (re.search(r"(\\twohyphens|\\threehyphens)", line) and
1888 re.match(u"[\w\u00A0]", document.body[i+1], flags=re.UNICODE)):
1889 has_ligature_dashes = True
1890 if has_literal_dashes and has_ligature_dashes:
1891 # TODO: insert a warning note in the document?
1892 document.warning('This document contained both literal and '
1893 '"ligature" dashes.\n Line breaks may have changed. '
1894 'See UserGuide chapter 3.9.1 for details.')
1895 elif has_literal_dashes:
1896 use_dash_ligatures = False
1897 elif has_ligature_dashes:
1898 use_dash_ligatures = True
1899 # insert the setting if there is a preferred value
1900 if use_dash_ligatures is not None:
1901 i = find_token(document.header, "\\graphics")
1902 document.header.insert(i, "\\use_dash_ligatures %s"
1903 % str(use_dash_ligatures).lower())
1906 def revert_dashligatures(document):
1907 """Remove font ligature settings for en- and em-dashes.
1908 Revert conversion of \twodashes or \threedashes to literal dashes."""
1909 use_dash_ligatures = del_value(document.header, "\\use_dash_ligatures")
1910 if use_dash_ligatures != "true" or document.backend != "latex":
1914 for i, line in enumerate(document.body):
1915 # Skip some document parts where dashes are not converted
1916 if (i < j) or line.startswith("\\labelwidthstring"):
1917 new_body.append(line)
1919 if (line.startswith("\\begin_inset ") and
1920 line[13:].split()[0] in ["CommandInset", "ERT", "External",
1921 "Formula", "FormulaMacro", "Graphics", "IPA", "listings"]
1922 or line == "\\begin_inset Flex Code"):
1923 j = find_end_of_inset(document.body, i)
1925 document.warning("Malformed LyX document: Can't find end of "
1926 + words[1] + " inset at line " + str(i))
1927 new_body.append(line)
1929 if line == "\\begin_layout LyX-Code":
1930 j = find_end_of_layout(document.body, i)
1932 document.warning("Malformed LyX document: "
1933 "Can't find end of %s layout at line %d" % (words[1],i))
1934 new_body.append(line)
1936 # TODO: skip replacement in typewriter fonts
1937 line = line.replace(u'\u2013', '\\twohyphens\n')
1938 line = line.replace(u'\u2014', '\\threehyphens\n')
1939 lines = line.split('\n')
1940 new_body.extend(line.split('\n'))
1941 document.body = new_body
1942 # redefine the dash LICRs to use ligature dashes:
1943 add_to_preamble(document, [r'\renewcommand{\textendash}{--}',
1944 r'\renewcommand{\textemdash}{---}'])
1947 def revert_noto(document):
1948 " Revert Noto font definitions to LaTeX "
1950 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1952 i = find_token(document.header, "\\font_roman \"NotoSerif-TLF\"", 0)
1954 add_to_preamble(document, ["\\renewcommand{\\rmdefault}{NotoSerif-TLF}"])
1955 document.header[i] = document.header[i].replace("NotoSerif-TLF", "default")
1956 i = find_token(document.header, "\\font_sans \"NotoSans-TLF\"", 0)
1958 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{NotoSans-TLF}"])
1959 document.header[i] = document.header[i].replace("NotoSans-TLF", "default")
1960 i = find_token(document.header, "\\font_typewriter \"NotoMono-TLF\"", 0)
1962 add_to_preamble(document, ["\\renewcommand{\\ttdefault}{NotoMono-TLF}"])
1963 document.header[i] = document.header[i].replace("NotoMono-TLF", "default")
1966 def revert_xout(document):
1967 " Reverts \\xout font attribute "
1968 changed = revert_font_attrs(document.body, "\\xout", "\\xout")
1970 insert_to_preamble(document, \
1971 ['% for proper cross-out',
1972 '\\PassOptionsToPackage{normalem}{ulem}',
1973 '\\usepackage{ulem}'])
1976 def convert_mathindent(document):
1977 """Add the \\is_math_indent tag.
1979 k = find_token(document.header, "\\quotes_style") # where to insert
1980 # check if the document uses the class option "fleqn"
1981 options = get_value(document.header, "\\options")
1982 if 'fleqn' in options:
1983 document.header.insert(k, "\\is_math_indent 1")
1984 # delete the fleqn option
1985 i = find_token(document.header, "\\options")
1986 options = [option for option in options.split(",")
1987 if option.strip() != "fleqn"]
1989 document.header[i] = "\\options " + ",".join(options)
1991 del document.header[i]
1993 document.header.insert(k, "\\is_math_indent 0")
1995 def revert_mathindent(document):
1996 " Define mathindent if set in the document "
1997 # emulate and delete \math_indentation
1998 value = get_value(document.header, "\\math_indentation",
1999 default="default", delete=True)
2000 if value != "default":
2001 add_to_preamble(document, [r"\setlength{\mathindent}{%s}"%value])
2002 # delete \is_math_indent and emulate via document class option
2003 if not get_bool_value(document.header, "\\is_math_indent", delete=True):
2005 i = find_token(document.header, "\\options")
2007 document.header[i] = document.header[i].replace("\\options ",
2010 l = find_token(document.header, "\\use_default_options")
2011 document.header.insert(l, "\\options fleqn")
2014 def revert_baselineskip(document):
2015 " Revert baselineskips to TeX code "
2020 regexp = re.compile(r'^.*baselineskip%.*$')
2021 i = find_re(document.body, regexp, i)
2024 vspaceLine = find_token(document.body, "\\begin_inset VSpace", i)
2026 # output VSpace inset as TeX code
2027 # first read out the values
2028 beg = document.body[i].rfind("VSpace ");
2029 end = document.body[i].rfind("baselineskip%");
2030 baselineskip = float(document.body[i][beg + 7:end]);
2031 # we store the value in percent, thus divide by 100
2032 baselineskip = baselineskip/100;
2033 baselineskip = str(baselineskip);
2034 # check if it is the starred version
2035 if document.body[i].find('*') != -1:
2039 # now output TeX code
2040 endInset = find_end_of_inset(document.body, i)
2042 document.warning("Malformed LyX document: Missing '\\end_inset' of VSpace inset.")
2045 document.body[vspaceLine: endInset + 1] = put_cmd_in_ert("\\vspace" + star + '{' + baselineskip + "\\baselineskip}")
2046 hspaceLine = find_token(document.body, "\\begin_inset space \\hspace", i - 1)
2047 document.warning("hspaceLine: " + str(hspaceLine))
2048 document.warning("i: " + str(i))
2049 if hspaceLine == i - 1:
2050 # output space inset as TeX code
2051 # first read out the values
2052 beg = document.body[i].rfind("\\length ");
2053 end = document.body[i].rfind("baselineskip%");
2054 baselineskip = float(document.body[i][beg + 7:end]);
2055 document.warning("baselineskip: " + str(baselineskip))
2056 # we store the value in percent, thus divide by 100
2057 baselineskip = baselineskip/100;
2058 baselineskip = str(baselineskip);
2059 # check if it is the starred version
2060 if document.body[i-1].find('*') != -1:
2064 # now output TeX code
2065 endInset = find_end_of_inset(document.body, i)
2067 document.warning("Malformed LyX document: Missing '\\end_inset' of space inset.")
2070 document.body[hspaceLine: endInset + 1] = put_cmd_in_ert("\\hspace" + star + '{' + baselineskip + "\\baselineskip}")
2075 def revert_rotfloat(document):
2076 " Revert placement options for rotated floats "
2081 i = find_token(document.body, "sideways true", i)
2083 regexp = re.compile(r'^.*placement.*$')
2084 j = find_re(document.body, regexp, i-2)
2092 # we found a sideways float with placement options
2093 # at first store the placement
2094 beg = document.body[i-2].rfind(" ");
2095 placement = document.body[i-2][beg+1:]
2096 # check if the option'H' is used
2097 if placement.find("H") != -1:
2098 add_to_preamble(document, ["\\usepackage{float}"])
2099 # now check if it is a starred type
2100 if document.body[i-1].find("wide true") != -1:
2104 # store the float type
2105 beg = document.body[i-3].rfind(" ");
2106 fType = document.body[i-3][beg+1:]
2107 # now output TeX code
2108 endInset = find_end_of_inset(document.body, i-3)
2110 document.warning("Malformed LyX document: Missing '\\end_inset' of Float inset.")
2113 document.body[endInset-2: endInset+1] = put_cmd_in_ert("\\end{sideways" + fType + star + '}')
2114 document.body[i-3: i+2] = put_cmd_in_ert("\\begin{sideways" + fType + star + "}[" + placement + ']')
2115 add_to_preamble(document, ["\\usepackage{rotfloat}"])
2120 allowbreak_emulation = [r"\begin_inset space \hspace{}",
2125 def convert_allowbreak(document):
2126 " Zero widths Space-inset -> \SpecialChar allowbreak. "
2127 lines = document.body
2128 i = find_complete_lines(lines, allowbreak_emulation, 2)
2130 lines[i-1:i+4] = [lines[i-1] + r"\SpecialChar allowbreak"]
2131 i = find_complete_lines(lines, allowbreak_emulation, i)
2134 def revert_allowbreak(document):
2135 " \SpecialChar allowbreak -> Zero widths Space-inset. "
2137 lines = document.body
2138 while i < len(lines):
2139 if lines[i].endswith(r"\SpecialChar allowbreak"):
2140 lines[i:i+1] = [lines[i].replace(r"\SpecialChar allowbreak", "")
2141 ] + allowbreak_emulation
2147 def convert_mathnumberpos(document):
2148 " add the \\math_number_before tag "
2149 # check if the document uses the class option "leqno"
2150 k = find_token(document.header, "\\quotes_style", 0)
2151 m = find_token(document.header, "\\options", 0)
2152 regexp = re.compile(r'^.*leqno.*')
2153 i = find_re(document.header, regexp, 0)
2154 if i != -1 and i == m:
2155 document.header.insert(k, "\\math_number_before 1")
2156 # delete the found option
2157 document.header[i] = document.header[i].replace(",leqno", "")
2158 document.header[i] = document.header[i].replace(", leqno", "")
2159 document.header[i] = document.header[i].replace("leqno,", "")
2160 j = find_re(document.header, regexp, 0)
2162 # then we have leqno as the only option
2163 del document.header[i]
2165 document.header.insert(k, "\\math_number_before 0")
2168 def revert_mathnumberpos(document):
2169 " add the document class option leqno"
2170 regexp = re.compile(r'(\\math_number_before 1)')
2171 i = find_re(document.header, regexp, 0)
2173 regexp = re.compile(r'(\\math_number_before)')
2174 j = find_re(document.header, regexp, 0)
2175 del document.header[j]
2177 k = find_token(document.header, "\\options", 0)
2179 document.header[k] = document.header[k].replace("\\options", "\\options leqno,")
2180 del document.header[i]
2182 l = find_token(document.header, "\\use_default_options", 0)
2183 document.header.insert(l, "\\options leqno")
2184 del document.header[i + 1]
2187 def convert_mathnumberingname(document):
2188 " rename the \\math_number_before tag to \\math_numbering_side "
2189 regexp = re.compile(r'(\\math_number_before 1)')
2190 i = find_re(document.header, regexp, 0)
2192 document.header[i] = "\\math_numbering_side left"
2193 regexp = re.compile(r'(\\math_number_before 0)')
2194 i = find_re(document.header, regexp, 0)
2196 document.header[i] = "\\math_numbering_side default"
2197 # check if the document uses the class option "reqno"
2198 k = find_token(document.header, "\\math_numbering_side", 0)
2199 m = find_token(document.header, "\\options", 0)
2200 regexp = re.compile(r'^.*reqno.*')
2201 i = find_re(document.header, regexp, 0)
2202 if i != -1 and i == m:
2203 document.header[k] = "\\math_numbering_side right"
2204 # delete the found option
2205 document.header[i] = document.header[i].replace(",reqno", "")
2206 document.header[i] = document.header[i].replace(", reqno", "")
2207 document.header[i] = document.header[i].replace("reqno,", "")
2208 j = find_re(document.header, regexp, 0)
2210 # then we have reqno as the only option
2211 del document.header[i]
2214 def revert_mathnumberingname(document):
2215 " rename the \\math_numbering_side tag back to \\math_number_before "
2217 regexp = re.compile(r'(\\math_numbering_side left)')
2218 i = find_re(document.header, regexp, 0)
2220 document.header[i] = "\\math_number_before 1"
2221 # add the option reqno and delete the tag
2222 regexp = re.compile(r'(\\math_numbering_side right)')
2223 i = find_re(document.header, regexp, 0)
2225 document.header[i] = "\\math_number_before 0"
2226 k = find_token(document.header, "\\options", 0)
2228 document.header[k] = document.header[k].replace("\\options", "\\options reqno,")
2230 l = find_token(document.header, "\\use_default_options", 0)
2231 document.header.insert(l, "\\options reqno")
2232 # add the math_number_before tag
2233 regexp = re.compile(r'(\\math_numbering_side default)')
2234 i = find_re(document.header, regexp, 0)
2236 document.header[i] = "\\math_number_before 0"
2239 def convert_minted(document):
2240 " add the \\use_minted tag "
2241 i = find_token(document.header, "\\index ")
2242 document.header.insert(i, "\\use_minted 0")
2245 def revert_minted(document):
2246 " remove the \\use_minted tag "
2247 i = find_token(document.header, "\\use_minted", 0)
2249 document.header.pop(i)
2256 supported_versions = ["2.3.0", "2.3"]
2258 [509, [convert_microtype]],
2259 [510, [convert_dateinset]],
2260 [511, [convert_ibranches]],
2261 [512, [convert_beamer_article_styles]],
2265 [516, [convert_inputenc]],
2267 [518, [convert_iopart]],
2268 [519, [convert_quotestyle]],
2270 [521, [convert_frenchquotes]],
2281 [532, [convert_literalparam]],
2284 [535, [convert_dashligatures]],
2287 [538, [convert_mathindent]],
2290 [541, [convert_allowbreak]],
2291 [542, [convert_mathnumberpos]],
2292 [543, [convert_mathnumberingname]],
2293 [544, [convert_minted]]
2297 [543, [revert_minted]],
2298 [542, [revert_mathnumberingname]],
2299 [541, [revert_mathnumberpos]],
2300 [540, [revert_allowbreak]],
2301 [539, [revert_rotfloat]],
2302 [538, [revert_baselineskip]],
2303 [537, [revert_mathindent]],
2304 [536, [revert_xout]],
2305 [535, [revert_noto]],
2306 [534, [revert_dashligatures]],
2307 [533, [revert_chapterbib]],
2308 [532, [revert_multibib]],
2309 [531, [revert_literalparam]],
2310 [530, [revert_qualicites]],
2311 [529, [revert_bibpackopts]],
2312 [528, [revert_citekeyonly]],
2313 [527, [revert_biblatex]],
2314 [526, [revert_noprefix]],
2315 [525, [revert_plural_refs]],
2316 [524, [revert_labelonly]],
2317 [523, [revert_crimson, revert_cochinealmath]],
2318 [522, [revert_cjkquotes]],
2319 [521, [revert_dynamicquotes]],
2320 [520, [revert_britishquotes, revert_swedishgquotes, revert_frenchquotes, revert_frenchinquotes, revert_russianquotes, revert_swissquotes]],
2321 [519, [revert_plainquote]],
2322 [518, [revert_quotestyle]],
2323 [517, [revert_iopart]],
2324 [516, [revert_quotes]],
2326 [514, [revert_urdu, revert_syriac]],
2327 [513, [revert_amharic, revert_asturian, revert_kannada, revert_khmer]],
2328 [512, [revert_bosnian, revert_friulan, revert_macedonian, revert_piedmontese, revert_romansh]],
2329 [511, [revert_beamer_article_styles]],
2330 [510, [revert_ibranches]],
2332 [508, [revert_microtype]]
2336 if __name__ == "__main__":