1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2016 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.3"""
25 # Uncomment only what you need to import, please.
27 from parser_tools import find_end_of, find_token_backwards, find_end_of_layout, \
28 find_token, find_end_of_inset, get_value, get_bool_value, \
29 get_containing_layout, get_quoted_value, del_token, find_re
30 # find_tokens, find_token_exact, is_in_inset, \
31 # check_token, get_option_value
33 from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, revert_font_attrs, \
35 # get_ert, lyx2latex, \
36 # lyx2verbatim, length_in_bp, convert_info_insets
37 # latex_length, revert_flex_inset, hex2ratio, str2bool
39 ####################################################################
40 # Private helper functions
44 ###############################################################################
46 ### Conversion and reversion routines
48 ###############################################################################
50 def convert_microtype(document):
51 " Add microtype settings. "
52 i = find_token(document.header, "\\font_tt_scale" , 0)
54 document.warning("Malformed LyX document: Can't find \\font_tt_scale.")
55 i = len(document.header) - 1
57 j = find_token(document.preamble, "\\usepackage{microtype}", 0)
59 document.header.insert(i + 1, "\\use_microtype false")
61 document.header.insert(i + 1, "\\use_microtype true")
62 del document.preamble[j]
65 def revert_microtype(document):
66 " Remove microtype settings. "
67 i = find_token(document.header, "\\use_microtype", 0)
70 use_microtype = get_bool_value(document.header, "\\use_microtype" , i)
71 del document.header[i]
73 add_to_preamble(document, ["\\usepackage{microtype}"])
76 def convert_dateinset(document):
77 ' Convert date external inset to ERT '
80 i = find_token(document.body, "\\begin_inset External", i)
83 j = find_end_of_inset(document.body, i)
85 document.warning("Malformed lyx document: Missing '\\end_inset' in convert_dateinset.")
88 if get_value(document.body, 'template', i, j) == "Date":
89 document.body[i : j + 1] = put_cmd_in_ert("\\today ")
94 def convert_inputenc(document):
95 " Replace no longer supported input encoding settings. "
96 i = find_token(document.header, "\\inputenc", 0)
99 if get_value(document.header, "\\inputencoding", i) == "pt254":
100 document.header[i] = "\\inputencoding pt154"
103 def convert_ibranches(document):
104 ' Add "inverted 0" to branch insets'
107 i = find_token(document.body, "\\begin_inset Branch", i)
110 document.body.insert(i + 1, "inverted 0")
114 def revert_ibranches(document):
115 ' Convert inverted branches to explicit anti-branches'
116 # Get list of branches
120 i = find_token(document.header, "\\branch", i)
123 branch = document.header[i][8:].strip()
124 if document.header[i+1].startswith("\\selected "):
125 #document.warning(document.header[i+1])
126 #document.warning(document.header[i+1][10])
127 selected = int(document.header[i+1][10])
129 document.warning("Malformed LyX document: No selection indicator for branch " + branch)
132 # the value tells us whether the branch is selected
133 ourbranches[document.header[i][8:].strip()] = selected
136 # Figure out what inverted branches, if any, have been used
137 # and convert them to "Anti-OldBranch"
141 i = find_token(document.body, "\\begin_inset Branch", i)
144 if not document.body[i+1].startswith("inverted "):
145 document.warning("Malformed LyX document: Missing 'inverted' tag!")
148 inverted = document.body[i+1][9]
149 #document.warning(document.body[i+1])
152 branch = document.body[i][20:].strip()
153 #document.warning(branch)
154 if not branch in ibranches:
155 antibranch = "Anti-" + branch
156 while antibranch in ibranches:
157 antibranch = "x" + antibranch
158 ibranches[branch] = antibranch
160 antibranch = ibranches[branch]
161 #document.warning(antibranch)
162 document.body[i] = "\\begin_inset Branch " + antibranch
164 # remove "inverted" key
165 del document.body[i+1]
168 # now we need to add the new branches to the header
169 for old, new in ibranches.items():
170 i = find_token(document.header, "\\branch " + old, 0)
172 document.warning("Can't find branch %s even though we found it before!" % (old))
174 j = find_token(document.header, "\\end_branch", i)
176 document.warning("Malformed LyX document! Can't find end of branch " + old)
178 # ourbranches[old] - 1 inverts the selection status of the old branch
179 lines = ["\\branch " + new,
180 "\\selected " + str(ourbranches[old] - 1)]
181 # these are the old lines telling us color, etc.
182 lines += document.header[i+2 : j+1]
183 document.header[i:i] = lines
186 def revert_beamer_article_styles(document):
187 " Include (scr)article styles in beamer article "
189 beamer_articles = ["article-beamer", "scrarticle-beamer"]
190 if document.textclass not in beamer_articles:
193 inclusion = "article.layout"
194 if document.textclass == "scrarticle-beamer":
195 inclusion = "scrartcl.layout"
197 i = find_token(document.header, "\\begin_local_layout", 0)
199 k = find_token(document.header, "\\language", 0)
201 # this should not happen
202 document.warning("Malformed LyX document! No \\language header found!")
204 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
207 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
209 # this should not happen
210 document.warning("Malformed LyX document: Can't find end of local layout!")
213 document.header[i+1 : i+1] = [
214 "### Inserted by lyx2lyx (more [scr]article styles) ###",
215 "Input " + inclusion,
216 "Input beamer.layout",
217 "Provides geometry 0",
218 "Provides hyperref 0",
227 " \\usepackage{beamerarticle,pgf}",
228 " % this default might be overridden by plain title style",
229 " \\newcommand\makebeamertitle{\\frame{\\maketitle}}%",
230 " \\AtBeginDocument{",
231 " \\let\\origtableofcontents=\\tableofcontents",
232 " \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
233 " \\def\\gobbletableofcontents#1{\\origtableofcontents}",
236 "### End of insertion by lyx2lyx (more [scr]article styles) ###"
240 def convert_beamer_article_styles(document):
241 " Remove included (scr)article styles in beamer article "
243 beamer_articles = ["article-beamer", "scrarticle-beamer"]
244 if document.textclass not in beamer_articles:
247 i = find_token(document.header, "\\begin_local_layout", 0)
251 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
253 # this should not happen
254 document.warning("Malformed LyX document: Can't find end of local layout!")
257 k = find_token(document.header, "### Inserted by lyx2lyx (more [scr]article styles) ###", i, j)
259 l = find_token(document.header, "### End of insertion by lyx2lyx (more [scr]article styles) ###", i, j)
261 # this should not happen
262 document.warning("End of lyx2lyx local layout insertion not found!")
265 if k == i + 1 and l == j - 1:
266 # that was all the local layout there was
267 document.header[i : j + 1] = []
269 document.header[k : l + 1] = []
272 def revert_bosnian(document):
273 "Set the document language to English but assure Bosnian output"
275 if document.language == "bosnian":
276 document.language = "english"
277 i = find_token(document.header, "\\language bosnian", 0)
279 document.header[i] = "\\language english"
280 j = find_token(document.header, "\\language_package default", 0)
282 document.header[j] = "\\language_package babel"
283 k = find_token(document.header, "\\options", 0)
285 document.header[k] = document.header[k].replace("\\options", "\\options bosnian,")
287 l = find_token(document.header, "\\use_default_options", 0)
288 document.header.insert(l + 1, "\\options bosnian")
291 def revert_friulan(document):
292 "Set the document language to English but assure Friulan output"
294 if document.language == "friulan":
295 document.language = "english"
296 i = find_token(document.header, "\\language friulan", 0)
298 document.header[i] = "\\language english"
299 j = find_token(document.header, "\\language_package default", 0)
301 document.header[j] = "\\language_package babel"
302 k = find_token(document.header, "\\options", 0)
304 document.header[k] = document.header[k].replace("\\options", "\\options friulan,")
306 l = find_token(document.header, "\\use_default_options", 0)
307 document.header.insert(l + 1, "\\options friulan")
310 def revert_macedonian(document):
311 "Set the document language to English but assure Macedonian output"
313 if document.language == "macedonian":
314 document.language = "english"
315 i = find_token(document.header, "\\language macedonian", 0)
317 document.header[i] = "\\language english"
318 j = find_token(document.header, "\\language_package default", 0)
320 document.header[j] = "\\language_package babel"
321 k = find_token(document.header, "\\options", 0)
323 document.header[k] = document.header[k].replace("\\options", "\\options macedonian,")
325 l = find_token(document.header, "\\use_default_options", 0)
326 document.header.insert(l + 1, "\\options macedonian")
329 def revert_piedmontese(document):
330 "Set the document language to English but assure Piedmontese output"
332 if document.language == "piedmontese":
333 document.language = "english"
334 i = find_token(document.header, "\\language piedmontese", 0)
336 document.header[i] = "\\language english"
337 j = find_token(document.header, "\\language_package default", 0)
339 document.header[j] = "\\language_package babel"
340 k = find_token(document.header, "\\options", 0)
342 document.header[k] = document.header[k].replace("\\options", "\\options piedmontese,")
344 l = find_token(document.header, "\\use_default_options", 0)
345 document.header.insert(l + 1, "\\options piedmontese")
348 def revert_romansh(document):
349 "Set the document language to English but assure Romansh output"
351 if document.language == "romansh":
352 document.language = "english"
353 i = find_token(document.header, "\\language romansh", 0)
355 document.header[i] = "\\language english"
356 j = find_token(document.header, "\\language_package default", 0)
358 document.header[j] = "\\language_package babel"
359 k = find_token(document.header, "\\options", 0)
361 document.header[k] = document.header[k].replace("\\options", "\\options romansh,")
363 l = find_token(document.header, "\\use_default_options", 0)
364 document.header.insert(l + 1, "\\options romansh")
367 def revert_amharic(document):
368 "Set the document language to English but assure Amharic output"
370 if document.language == "amharic":
371 document.language = "english"
372 i = find_token(document.header, "\\language amharic", 0)
374 document.header[i] = "\\language english"
375 j = find_token(document.header, "\\language_package default", 0)
377 document.header[j] = "\\language_package default"
378 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{amharic}}"])
379 document.body[2 : 2] = ["\\begin_layout Standard",
380 "\\begin_inset ERT", "status open", "",
381 "\\begin_layout Plain Layout", "", "",
383 "resetdefaultlanguage{amharic}",
384 "\\end_layout", "", "\\end_inset", "", "",
388 def revert_asturian(document):
389 "Set the document language to English but assure Asturian output"
391 if document.language == "asturian":
392 document.language = "english"
393 i = find_token(document.header, "\\language asturian", 0)
395 document.header[i] = "\\language english"
396 j = find_token(document.header, "\\language_package default", 0)
398 document.header[j] = "\\language_package default"
399 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{asturian}}"])
400 document.body[2 : 2] = ["\\begin_layout Standard",
401 "\\begin_inset ERT", "status open", "",
402 "\\begin_layout Plain Layout", "", "",
404 "resetdefaultlanguage{asturian}",
405 "\\end_layout", "", "\\end_inset", "", "",
409 def revert_kannada(document):
410 "Set the document language to English but assure Kannada output"
412 if document.language == "kannada":
413 document.language = "english"
414 i = find_token(document.header, "\\language kannada", 0)
416 document.header[i] = "\\language english"
417 j = find_token(document.header, "\\language_package default", 0)
419 document.header[j] = "\\language_package default"
420 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{kannada}}"])
421 document.body[2 : 2] = ["\\begin_layout Standard",
422 "\\begin_inset ERT", "status open", "",
423 "\\begin_layout Plain Layout", "", "",
425 "resetdefaultlanguage{kannada}",
426 "\\end_layout", "", "\\end_inset", "", "",
430 def revert_khmer(document):
431 "Set the document language to English but assure Khmer output"
433 if document.language == "khmer":
434 document.language = "english"
435 i = find_token(document.header, "\\language khmer", 0)
437 document.header[i] = "\\language english"
438 j = find_token(document.header, "\\language_package default", 0)
440 document.header[j] = "\\language_package default"
441 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{khmer}}"])
442 document.body[2 : 2] = ["\\begin_layout Standard",
443 "\\begin_inset ERT", "status open", "",
444 "\\begin_layout Plain Layout", "", "",
446 "resetdefaultlanguage{khmer}",
447 "\\end_layout", "", "\\end_inset", "", "",
451 def revert_urdu(document):
452 "Set the document language to English but assure Urdu output"
454 if document.language == "urdu":
455 document.language = "english"
456 i = find_token(document.header, "\\language urdu", 0)
458 document.header[i] = "\\language english"
459 j = find_token(document.header, "\\language_package default", 0)
461 document.header[j] = "\\language_package default"
462 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{urdu}}"])
463 document.body[2 : 2] = ["\\begin_layout Standard",
464 "\\begin_inset ERT", "status open", "",
465 "\\begin_layout Plain Layout", "", "",
467 "resetdefaultlanguage{urdu}",
468 "\\end_layout", "", "\\end_inset", "", "",
472 def revert_syriac(document):
473 "Set the document language to English but assure Syriac output"
475 if document.language == "syriac":
476 document.language = "english"
477 i = find_token(document.header, "\\language syriac", 0)
479 document.header[i] = "\\language english"
480 j = find_token(document.header, "\\language_package default", 0)
482 document.header[j] = "\\language_package default"
483 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{syriac}}"])
484 document.body[2 : 2] = ["\\begin_layout Standard",
485 "\\begin_inset ERT", "status open", "",
486 "\\begin_layout Plain Layout", "", "",
488 "resetdefaultlanguage{syriac}",
489 "\\end_layout", "", "\\end_inset", "", "",
493 def revert_quotes(document):
494 " Revert Quote Insets in verbatim or Hebrew context to plain quotes "
496 # First handle verbatim insets
499 while i < len(document.body):
500 words = document.body[i].split()
501 if len(words) > 1 and words[0] == "\\begin_inset" and \
502 ( words[1] in ["ERT", "listings"] or ( len(words) > 2 and words[2] in ["URL", "Chunk", "Sweave", "S/R"]) ):
503 j = find_end_of_inset(document.body, i)
505 document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
509 k = find_token(document.body, '\\begin_inset Quotes', i, j)
513 l = find_end_of_inset(document.body, k)
515 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
519 if document.body[k].endswith("s"):
521 document.body[k:l+1] = [replace]
526 # Now verbatim layouts
529 while i < len(document.body):
530 words = document.body[i].split()
531 if len(words) > 1 and words[0] == "\\begin_layout" and \
532 words[1] in ["Verbatim", "Verbatim*", "Code", "Author_Email", "Author_URL"]:
533 j = find_end_of_layout(document.body, i)
535 document.warning("Malformed LyX document: Can't find end of " + words[1] + " layout at line " + str(i))
539 k = find_token(document.body, '\\begin_inset Quotes', i, j)
543 l = find_end_of_inset(document.body, k)
545 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
549 if document.body[k].endswith("s"):
551 document.body[k:l+1] = [replace]
557 if not document.language == "hebrew" and find_token(document.body, '\\lang hebrew', 0) == -1:
563 k = find_token(document.body, '\\begin_inset Quotes', i)
566 l = find_end_of_inset(document.body, k)
568 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
572 parent = get_containing_layout(document.body, k)
573 ql = find_token_backwards(document.body, "\\lang", k)
574 if ql == -1 or ql < parent[1]:
575 hebrew = document.language == "hebrew"
576 elif document.body[ql] == "\\lang hebrew":
580 if document.body[k].endswith("s"):
582 document.body[k:l+1] = [replace]
586 def revert_iopart(document):
587 " Input new styles via local layout "
588 if document.textclass != "iopart":
591 i = find_token(document.header, "\\begin_local_layout", 0)
593 k = find_token(document.header, "\\language", 0)
595 # this should not happen
596 document.warning("Malformed LyX document! No \\language header found!")
598 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
601 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
603 # this should not happen
604 document.warning("Malformed LyX document! Can't find end of local layout!")
607 document.header[i+1 : i+1] = [
608 "### Inserted by lyx2lyx (stdlayouts) ###",
609 "Input stdlayouts.inc",
610 "### End of insertion by lyx2lyx (stdlayouts) ###"
614 def convert_iopart(document):
615 " Remove local layout we added, if it is there "
616 if document.textclass != "iopart":
619 i = find_token(document.header, "\\begin_local_layout", 0)
623 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
625 # this should not happen
626 document.warning("Malformed LyX document! Can't find end of local layout!")
629 k = find_token(document.header, "### Inserted by lyx2lyx (stdlayouts) ###", i, j)
631 l = find_token(document.header, "### End of insertion by lyx2lyx (stdlayouts) ###", i, j)
633 # this should not happen
634 document.warning("End of lyx2lyx local layout insertion not found!")
636 if k == i + 1 and l == j - 1:
637 # that was all the local layout there was
638 document.header[i : j + 1] = []
640 document.header[k : l + 1] = []
643 def convert_quotestyle(document):
644 " Convert \\quotes_language to \\quotes_style "
645 i = find_token(document.header, "\\quotes_language", 0)
647 document.warning("Malformed LyX document! Can't find \\quotes_language!")
649 val = get_value(document.header, "\\quotes_language", i)
650 document.header[i] = "\\quotes_style " + val
653 def revert_quotestyle(document):
654 " Revert \\quotes_style to \\quotes_language "
655 i = find_token(document.header, "\\quotes_style", 0)
657 document.warning("Malformed LyX document! Can't find \\quotes_style!")
659 val = get_value(document.header, "\\quotes_style", i)
660 document.header[i] = "\\quotes_language " + val
663 def revert_plainquote(document):
664 " Revert plain quote insets "
666 # First, revert style setting
667 i = find_token(document.header, "\\quotes_style plain", 0)
669 document.header[i] = "\\quotes_style english"
675 k = find_token(document.body, '\\begin_inset Quotes q', i)
678 l = find_end_of_inset(document.body, k)
680 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
684 if document.body[k].endswith("s"):
686 document.body[k:l+1] = [replace]
690 def convert_frenchquotes(document):
691 " Convert french quote insets to swiss "
693 # First, revert style setting
694 i = find_token(document.header, "\\quotes_style french", 0)
696 document.header[i] = "\\quotes_style swiss"
701 i = find_token(document.body, '\\begin_inset Quotes f', i)
704 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
705 newval = val.replace("f", "c", 1)
706 document.body[i] = document.body[i].replace(val, newval)
710 def revert_swissquotes(document):
711 " Revert swiss quote insets to french "
713 # First, revert style setting
714 i = find_token(document.header, "\\quotes_style swiss", 0)
716 document.header[i] = "\\quotes_style french"
721 i = find_token(document.body, '\\begin_inset Quotes c', i)
724 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
725 newval = val.replace("c", "f", 1)
726 document.body[i] = document.body[i].replace(val, newval)
730 def revert_britishquotes(document):
731 " Revert british quote insets to english "
733 # First, revert style setting
734 i = find_token(document.header, "\\quotes_style british", 0)
736 document.header[i] = "\\quotes_style english"
741 i = find_token(document.body, '\\begin_inset Quotes b', i)
744 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
745 newval = val.replace("b", "e", 1)
748 newval = newval.replace("d", "s")
751 newval = newval.replace("s", "d")
752 document.body[i] = document.body[i].replace(val, newval)
756 def revert_swedishgquotes(document):
757 " Revert swedish quote insets "
759 # First, revert style setting
760 i = find_token(document.header, "\\quotes_style swedishg", 0)
762 document.header[i] = "\\quotes_style danish"
767 i = find_token(document.body, '\\begin_inset Quotes w', i)
770 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
773 newval = val.replace("w", "a", 1).replace("r", "l")
776 newval = val.replace("w", "s", 1)
777 document.body[i] = document.body[i].replace(val, newval)
781 def revert_frenchquotes(document):
782 " Revert french inner quote insets "
786 i = find_token(document.body, '\\begin_inset Quotes f', i)
789 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
792 newval = val.replace("f", "e", 1).replace("s", "d")
793 document.body[i] = document.body[i].replace(val, newval)
797 def revert_frenchinquotes(document):
798 " Revert inner frenchin quote insets "
800 # First, revert style setting
801 i = find_token(document.header, "\\quotes_style frenchin", 0)
803 document.header[i] = "\\quotes_style french"
808 i = find_token(document.body, '\\begin_inset Quotes i', i)
811 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
812 newval = val.replace("i", "f", 1)
815 newval = newval.replace("s", "d")
816 document.body[i] = document.body[i].replace(val, newval)
820 def revert_russianquotes(document):
821 " Revert russian quote insets "
823 # First, revert style setting
824 i = find_token(document.header, "\\quotes_style russian", 0)
826 document.header[i] = "\\quotes_style french"
831 i = find_token(document.body, '\\begin_inset Quotes r', i)
834 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
838 newval = val.replace("r", "g", 1).replace("s", "d")
841 newval = val.replace("r", "f", 1)
842 document.body[i] = document.body[i].replace(val, newval)
846 def revert_dynamicquotes(document):
847 " Revert dynamic quote insets "
849 # First, revert header
850 i = find_token(document.header, "\\dynamic_quotes", 0)
852 del document.header[i]
856 i = find_token(document.header, "\\quotes_style", 0)
858 document.warning("Malformed document! Missing \\quotes_style")
860 style = get_value(document.header, "\\quotes_style", i)
863 if style == "english":
865 elif style == "swedish":
867 elif style == "german":
869 elif style == "polish":
871 elif style == "swiss":
873 elif style == "danish":
875 elif style == "plain":
877 elif style == "british":
879 elif style == "swedishg":
881 elif style == "french":
883 elif style == "frenchin":
885 elif style == "russian":
888 # now transform the insets
891 i = find_token(document.body, '\\begin_inset Quotes x', i)
894 document.body[i] = document.body[i].replace("x", s)
898 def revert_cjkquotes(document):
899 " Revert cjk quote insets "
903 i = find_token(document.header, "\\quotes_style", 0)
905 document.warning("Malformed document! Missing \\quotes_style")
907 style = get_value(document.header, "\\quotes_style", i)
909 global_cjk = style.find("cjk") != -1
912 document.header[i] = "\\quotes_style english"
913 # transform dynamic insets
915 if style == "cjkangle":
919 i = find_token(document.body, '\\begin_inset Quotes x', i)
922 document.body[i] = document.body[i].replace("x", s)
925 cjk_langs = ["chinese-simplified", "chinese-traditional", "japanese", "japanese-cjk", "korean"]
930 k = find_token(document.body, '\\begin_inset Quotes j', i)
933 l = find_end_of_inset(document.body, k)
935 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
939 parent = get_containing_layout(document.body, k)
940 ql = find_token_backwards(document.body, "\\lang", k)
941 if ql == -1 or ql < parent[1]:
942 cjk = document.language in cjk_langs
943 elif document.body[ql].split()[1] in cjk_langs:
945 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
952 replace = [u"\u300E"]
954 replace = ["\\begin_inset Formula $\\llceil$", "\\end_inset"]
958 replace = [u"\u300F"]
960 replace = ["\\begin_inset Formula $\\rrfloor$", "\\end_inset"]
966 replace = [u"\u300C"]
968 replace = ["\\begin_inset Formula $\\lceil$", "\\end_inset"]
972 replace = [u"\u300D"]
974 replace = ["\\begin_inset Formula $\\rfloor$", "\\end_inset"]
976 document.body[k:l+1] = replace
982 k = find_token(document.body, '\\begin_inset Quotes k', i)
985 l = find_end_of_inset(document.body, k)
987 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
991 parent = get_containing_layout(document.body, k)
992 ql = find_token_backwards(document.body, "\\lang", k)
993 if ql == -1 or ql < parent[1]:
994 cjk = document.language in cjk_langs
995 elif document.body[ql].split()[1] in cjk_langs:
997 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
1002 # inner opening mark
1004 replace = [u"\u3008"]
1006 replace = ["\\begin_inset Formula $\\langle$", "\\end_inset"]
1008 # inner closing mark
1010 replace = [u"\u3009"]
1012 replace = ["\\begin_inset Formula $\\rangle$", "\\end_inset"]
1016 # outer opening mark
1018 replace = [u"\u300A"]
1020 replace = ["\\begin_inset Formula $\\langle\\kern -2.5pt\\langle$", "\\end_inset"]
1022 # outer closing mark
1024 replace = [u"\u300B"]
1026 replace = ["\\begin_inset Formula $\\rangle\\kern -2.5pt\\rangle$", "\\end_inset"]
1028 document.body[k:l+1] = replace
1032 def revert_crimson(document):
1033 " Revert native Cochineal/Crimson font definition to LaTeX "
1035 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1037 i = find_token(document.header, "\\font_roman \"cochineal\"", 0)
1040 j = find_token(document.header, "\\font_osf true", 0)
1043 preamble = "\\usepackage"
1045 document.header[j] = "\\font_osf false"
1046 preamble += "[proportional,osf]"
1047 preamble += "{cochineal}"
1048 add_to_preamble(document, [preamble])
1049 document.header[i] = document.header[i].replace("cochineal", "default")
1052 def revert_cochinealmath(document):
1053 " Revert cochineal newtxmath definitions to LaTeX "
1055 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1056 i = find_token(document.header, "\\font_math \"cochineal-ntxm\"", 0)
1058 add_to_preamble(document, "\\usepackage[cochineal]{newtxmath}")
1059 document.header[i] = document.header[i].replace("cochineal-ntxm", "auto")
1062 def revert_labelonly(document):
1063 " Revert labelonly tag for InsetRef "
1066 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1069 j = find_end_of_inset(document.body, i)
1071 document.warning("Can't find end of reference inset at line %d!!" %(i))
1074 k = find_token(document.body, "LatexCommand labelonly", i, j)
1078 label = get_quoted_value(document.body, "reference", i, j)
1080 document.warning("Can't find label for reference at line %d!" %(i))
1083 document.body[i:j+1] = put_cmd_in_ert([label])
1087 def revert_plural_refs(document):
1088 " Revert plural and capitalized references "
1089 i = find_token(document.header, "\\use_refstyle 1", 0)
1090 use_refstyle = (i != 0)
1094 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1097 j = find_end_of_inset(document.body, i)
1099 document.warning("Can't find end of reference inset at line %d!!" %(i))
1103 plural = caps = suffix = False
1104 k = find_token(document.body, "LaTeXCommand formatted", i, j)
1105 if k != -1 and use_refstyle:
1106 plural = get_bool_value(document.body, "plural", i, j, False)
1107 caps = get_bool_value(document.body, "caps", i, j, False)
1108 label = get_quoted_value(document.body, "reference", i, j)
1111 (prefix, suffix) = label.split(":", 1)
1113 document.warning("No `:' separator in formatted reference at line %d!" % (i))
1115 document.warning("Can't find label for reference at line %d!" % (i))
1117 # this effectively tests also for use_refstyle and a formatted reference
1118 # we do this complicated test because we would otherwise do this erasure
1119 # over and over and over
1120 if not ((plural or caps) and suffix):
1121 del_token(document.body, "plural", i, j)
1122 del_token(document.body, "caps", i, j - 1) # since we deleted a line
1127 prefix = prefix[0].title() + prefix[1:]
1128 cmd = "\\" + prefix + "ref"
1131 cmd += "{" + suffix + "}"
1132 document.body[i:j+1] = put_cmd_in_ert([cmd])
1136 def revert_noprefix(document):
1137 " Revert labelonly tags with 'noprefix' set "
1140 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1143 j = find_end_of_inset(document.body, i)
1145 document.warning("Can't find end of reference inset at line %d!!" %(i))
1148 k = find_token(document.body, "LatexCommand labelonly", i, j)
1151 noprefix = get_bool_value(document.body, "noprefix", i, j)
1153 # either it was not a labelonly command, or else noprefix was not set.
1154 # in that case, we just delete the option.
1155 del_token(document.body, "noprefix", i, j)
1158 label = get_quoted_value(document.body, "reference", i, j)
1160 document.warning("Can't find label for reference at line %d!" %(i))
1164 (prefix, suffix) = label.split(":", 1)
1166 document.warning("No `:' separator in formatted reference at line %d!" % (i))
1167 # we'll leave this as an ordinary labelonly reference
1168 del_token(document.body, "noprefix", i, j)
1171 document.body[i:j+1] = put_cmd_in_ert([suffix])
1175 def revert_biblatex(document):
1176 " Revert biblatex support "
1182 # 1. Get cite engine
1184 i = find_token(document.header, "\\cite_engine", 0)
1186 document.warning("Malformed document! Missing \\cite_engine")
1188 engine = get_value(document.header, "\\cite_engine", i)
1190 # 2. Store biblatex state and revert to natbib
1192 if engine in ["biblatex", "biblatex-natbib"]:
1194 document.header[i] = "\\cite_engine natbib"
1196 # 3. Store and remove new document headers
1198 i = find_token(document.header, "\\biblatex_bibstyle", 0)
1200 bibstyle = get_value(document.header, "\\biblatex_bibstyle", i)
1201 del document.header[i]
1204 i = find_token(document.header, "\\biblatex_citestyle", 0)
1206 citestyle = get_value(document.header, "\\biblatex_citestyle", i)
1207 del document.header[i]
1210 i = find_token(document.header, "\\biblio_options", 0)
1212 biblio_options = get_value(document.header, "\\biblio_options", i)
1213 del document.header[i]
1216 bbxopts = "[natbib=true"
1218 bbxopts += ",bibstyle=" + bibstyle
1220 bbxopts += ",citestyle=" + citestyle
1221 if biblio_options != "":
1222 bbxopts += "," + biblio_options
1224 add_to_preamble(document, "\\usepackage" + bbxopts + "{biblatex}")
1234 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
1237 j = find_end_of_inset(document.body, i)
1239 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1242 bibs = get_quoted_value(document.body, "bibfiles", i, j)
1243 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1246 bibresources += bibs.split(",")
1248 document.warning("Can't find bibfiles for bibtex inset at line %d!" %(i))
1249 # remove biblatexopts line
1250 k = find_token(document.body, "biblatexopts", i, j)
1252 del document.body[k]
1253 # Re-find inset end line
1254 j = find_end_of_inset(document.body, i)
1255 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1257 pcmd = "printbibliography"
1259 pcmd += "[" + opts + "]"
1260 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1261 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1262 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1263 "status open", "", "\\begin_layout Plain Layout" ]
1264 repl += document.body[i:j+1]
1265 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1266 document.body[i:j+1] = repl
1272 for b in bibresources:
1273 add_to_preamble(document, "\\addbibresource{" + b + ".bib}")
1275 # 2. Citation insets
1277 # Specific citation insets used in biblatex that need to be reverted to ERT
1280 "citebyear" : "citeyear",
1281 "citeyear" : "cite*",
1282 "Footcite" : "Smartcite",
1283 "footcite" : "smartcite",
1284 "Autocite" : "Autocite",
1285 "autocite" : "autocite",
1286 "citetitle" : "citetitle",
1287 "citetitle*" : "citetitle*",
1288 "fullcite" : "fullcite",
1289 "footfullcite" : "footfullcite",
1290 "supercite" : "supercite",
1291 "citeauthor" : "citeauthor",
1292 "citeauthor*" : "citeauthor*",
1293 "Citeauthor" : "Citeauthor",
1294 "Citeauthor*" : "Citeauthor*"
1297 # All commands accepted by LyX < 2.3. Everything else throws an error.
1298 old_citations = [ "cite", "nocite", "citet", "citep", "citealt", "citealp",\
1299 "citeauthor", "citeyear", "citeyearpar", "citet*", "citep*",\
1300 "citealt*", "citealp*", "citeauthor*", "Citet", "Citep",\
1301 "Citealt", "Citealp", "Citeauthor", "Citet*", "Citep*",\
1302 "Citealt*", "Citealp*", "Citeauthor*", "fullcite", "footcite",\
1303 "footcitet", "footcitep", "footcitealt", "footcitealp",\
1304 "footciteauthor", "footciteyear", "footciteyearpar",\
1305 "citefield", "citetitle", "cite*" ]
1309 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
1312 j = find_end_of_inset(document.body, i)
1314 document.warning("Can't find end of citation inset at line %d!!" %(i))
1317 k = find_token(document.body, "LatexCommand", i, j)
1319 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
1322 cmd = get_value(document.body, "LatexCommand", k)
1323 if biblatex and cmd in list(new_citations.keys()):
1324 pre = get_quoted_value(document.body, "before", i, j)
1325 post = get_quoted_value(document.body, "after", i, j)
1326 key = get_quoted_value(document.body, "key", i, j)
1328 document.warning("Citation inset at line %d does not have a key!" %(i))
1330 # Replace known new commands with ERT
1331 res = "\\" + new_citations[cmd]
1333 res += "[" + pre + "]"
1335 res += "[" + post + "]"
1338 res += "{" + key + "}"
1339 document.body[i:j+1] = put_cmd_in_ert([res])
1340 elif cmd not in old_citations:
1341 # Reset unknown commands to cite. This is what LyX does as well
1342 # (but LyX 2.2 would break on unknown commands)
1343 document.body[k] = "LatexCommand cite"
1344 document.warning("Reset unknown cite command '%s' with cite" % cmd)
1347 # Emulate the old biblatex-workaround (pretend natbib in order to use the styles)
1349 i = find_token(document.header, "\\begin_local_layout", 0)
1351 k = find_token(document.header, "\\language", 0)
1353 # this should not happen
1354 document.warning("Malformed LyX document! No \\language header found!")
1356 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
1359 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
1361 # this should not happen
1362 document.warning("Malformed LyX document! Can't find end of local layout!")
1365 document.header[i+1 : i+1] = [
1366 "### Inserted by lyx2lyx (biblatex emulation) ###",
1367 "Provides natbib 1",
1368 "### End of insertion by lyx2lyx (biblatex emulation) ###"
1372 def revert_citekeyonly(document):
1373 " Revert keyonly cite command to ERT "
1377 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
1380 j = find_end_of_inset(document.body, i)
1382 document.warning("Can't find end of citation inset at line %d!!" %(i))
1385 k = find_token(document.body, "LatexCommand", i, j)
1387 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
1390 cmd = get_value(document.body, "LatexCommand", k)
1391 if cmd != "keyonly":
1395 key = get_quoted_value(document.body, "key", i, j)
1397 document.warning("Citation inset at line %d does not have a key!" %(i))
1398 # Replace known new commands with ERT
1399 document.body[i:j+1] = put_cmd_in_ert([key])
1404 def revert_bibpackopts(document):
1405 " Revert support for natbib/jurabib package options "
1408 i = find_token(document.header, "\\cite_engine", 0)
1410 document.warning("Malformed document! Missing \\cite_engine")
1412 engine = get_value(document.header, "\\cite_engine", i)
1415 if engine not in ["natbib", "jurabib"]:
1418 i = find_token(document.header, "\\biblio_options", 0)
1420 # Nothing to do if we have no options
1423 biblio_options = get_value(document.header, "\\biblio_options", i)
1424 del document.header[i]
1426 if not biblio_options:
1427 # Nothing to do for empty options
1430 i = find_token(document.header, "\\begin_local_layout", 0)
1432 k = find_token(document.header, "\\language", 0)
1434 # this should not happen
1435 document.warning("Malformed LyX document! No \\language header found!")
1437 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
1440 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
1442 # this should not happen
1443 document.warning("Malformed LyX document! Can't find end of local layout!")
1446 document.header[i+1 : i+1] = [
1447 "### Inserted by lyx2lyx (bibliography package options) ###",
1448 "PackageOptions " + engine + " " + biblio_options,
1449 "### End of insertion by lyx2lyx (bibliography package options) ###"
1453 def revert_qualicites(document):
1454 " Revert qualified citation list commands to ERT "
1456 # Citation insets that support qualified lists, with their LaTeX code
1460 "citet" : "textcites",
1461 "Citet" : "Textcites",
1462 "citep" : "parencites",
1463 "Citep" : "Parencites",
1464 "Footcite" : "Smartcites",
1465 "footcite" : "smartcites",
1466 "Autocite" : "Autocites",
1467 "autocite" : "autocites",
1472 i = find_token(document.header, "\\cite_engine", 0)
1474 document.warning("Malformed document! Missing \\cite_engine")
1476 engine = get_value(document.header, "\\cite_engine", i)
1478 biblatex = engine in ["biblatex", "biblatex-natbib"]
1482 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
1485 j = find_end_of_inset(document.body, i)
1487 document.warning("Can't find end of citation inset at line %d!!" %(i))
1490 pres = find_token(document.body, "pretextlist", i, j)
1491 posts = find_token(document.body, "posttextlist", i, j)
1492 if pres == -1 and posts == -1:
1496 pretexts = get_quoted_value(document.body, "pretextlist", pres)
1497 posttexts = get_quoted_value(document.body, "posttextlist", posts)
1498 k = find_token(document.body, "LatexCommand", i, j)
1500 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
1503 cmd = get_value(document.body, "LatexCommand", k)
1504 if biblatex and cmd in list(ql_citations.keys()):
1505 pre = get_quoted_value(document.body, "before", i, j)
1506 post = get_quoted_value(document.body, "after", i, j)
1507 key = get_quoted_value(document.body, "key", i, j)
1509 document.warning("Citation inset at line %d does not have a key!" %(i))
1511 keys = key.split(",")
1512 prelist = pretexts.split("\t")
1515 ppp = pp.split(" ", 1)
1516 premap[ppp[0]] = ppp[1]
1517 postlist = posttexts.split("\t")
1520 ppp = pp.split(" ", 1)
1521 postmap[ppp[0]] = ppp[1]
1522 # Replace known new commands with ERT
1523 if "(" in pre or ")" in pre:
1524 pre = "{" + pre + "}"
1525 if "(" in post or ")" in post:
1526 post = "{" + post + "}"
1527 res = "\\" + ql_citations[cmd]
1529 res += "(" + pre + ")"
1531 res += "(" + post + ")"
1535 if premap.get(kk, "") != "":
1536 res += "[" + premap[kk] + "]"
1537 if postmap.get(kk, "") != "":
1538 res += "[" + postmap[kk] + "]"
1539 elif premap.get(kk, "") != "":
1541 res += "{" + kk + "}"
1542 document.body[i:j+1] = put_cmd_in_ert([res])
1544 # just remove the params
1545 del document.body[posttexts]
1546 del document.body[pretexts]
1550 command_insets = ["bibitem", "citation", "href", "index_print", "nomenclature"]
1551 def convert_literalparam(document):
1552 " Add param literal "
1554 for inset in command_insets:
1557 i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
1560 j = find_end_of_inset(document.body, i)
1562 document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
1565 while i < j and document.body[i].strip() != '':
1567 # href is already fully latexified. Here we can switch off literal.
1569 document.body.insert(i, "literal \"false\"")
1571 document.body.insert(i, "literal \"true\"")
1575 def revert_literalparam(document):
1576 " Remove param literal "
1578 for inset in command_insets:
1581 i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
1584 j = find_end_of_inset(document.body, i)
1586 document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
1589 k = find_token(document.body, 'literal', i, j)
1593 del document.body[k]
1597 def revert_multibib(document):
1598 " Revert multibib support "
1600 # 1. Get cite engine
1602 i = find_token(document.header, "\\cite_engine", 0)
1604 document.warning("Malformed document! Missing \\cite_engine")
1606 engine = get_value(document.header, "\\cite_engine", i)
1608 # 2. Do we use biblatex?
1610 if engine in ["biblatex", "biblatex-natbib"]:
1613 # 3. Store and remove multibib document header
1615 i = find_token(document.header, "\\multibib", 0)
1617 multibib = get_value(document.header, "\\multibib", i)
1618 del document.header[i]
1623 # 4. The easy part: Biblatex
1625 i = find_token(document.header, "\\biblio_options", 0)
1627 k = find_token(document.header, "\\use_bibtopic", 0)
1629 # this should not happen
1630 document.warning("Malformed LyX document! No \\use_bibtopic header found!")
1632 document.header[k-1 : k-1] = ["\\biblio_options " + "refsection=" + multibib]
1634 biblio_options = get_value(document.header, "\\biblio_options", i)
1636 biblio_options += ","
1637 biblio_options += "refsection=" + multibib
1638 document.header[i] = "\\biblio_options " + biblio_options
1643 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
1646 j = find_end_of_inset(document.body, i)
1648 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1651 btprint = get_quoted_value(document.body, "btprint", i, j)
1652 if btprint != "bibbysection":
1655 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1656 # change btprint line
1657 k = find_token(document.body, "btprint", i, j)
1659 document.body[k] = "btprint \"btPrintCited\""
1660 # Insert ERT \\bibbysection and wrap bibtex inset to a Note
1661 pcmd = "bibbysection"
1663 pcmd += "[" + opts + "]"
1664 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1665 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1666 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1667 "status open", "", "\\begin_layout Plain Layout" ]
1668 repl += document.body[i:j+1]
1669 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1670 document.body[i:j+1] = repl
1676 # 5. More tricky: Bibtex/Bibtopic
1677 k = find_token(document.header, "\\use_bibtopic", 0)
1679 # this should not happen
1680 document.warning("Malformed LyX document! No \\use_bibtopic header found!")
1682 document.header[k] = "\\use_bibtopic true"
1684 # Possible units. This assumes that the LyX name follows the std,
1685 # which might not always be the case. But it's as good as we can get.
1688 "chapter" : "Chapter",
1689 "section" : "Section",
1690 "subsection" : "Subsection",
1693 if multibib not in units.keys():
1694 document.warning("Unknown multibib value `%s'!" % nultibib)
1696 unit = units[multibib]
1700 i = find_token(document.body, "\\begin_layout " + unit, i)
1704 document.body[i-1 : i-1] = ["\\begin_layout Standard",
1705 "\\begin_inset ERT", "status open", "",
1706 "\\begin_layout Plain Layout", "", "",
1708 "end{btUnit}", "\\end_layout",
1709 "\\begin_layout Plain Layout", "",
1712 "\\end_layout", "", "\\end_inset", "", "",
1716 document.body[i-1 : i-1] = ["\\begin_layout Standard",
1717 "\\begin_inset ERT", "status open", "",
1718 "\\begin_layout Plain Layout", "", "",
1721 "\\end_layout", "", "\\end_inset", "", "",
1728 i = find_token(document.body, "\\end_body", i)
1729 document.body[i-1 : i-1] = ["\\begin_layout Standard",
1730 "\\begin_inset ERT", "status open", "",
1731 "\\begin_layout Plain Layout", "", "",
1734 "\\end_layout", "", "\\end_inset", "", "",
1738 def revert_chapterbib(document):
1739 " Revert chapterbib support "
1741 # 1. Get cite engine
1743 i = find_token(document.header, "\\cite_engine", 0)
1745 document.warning("Malformed document! Missing \\cite_engine")
1747 engine = get_value(document.header, "\\cite_engine", i)
1749 # 2. Do we use biblatex?
1751 if engine in ["biblatex", "biblatex-natbib"]:
1754 # 3. Store multibib document header value
1756 i = find_token(document.header, "\\multibib", 0)
1758 multibib = get_value(document.header, "\\multibib", i)
1760 if not multibib or multibib != "child":
1764 # 4. remove multibib header
1765 del document.header[i]
1769 # find include insets
1772 i = find_token(document.body, "\\begin_inset CommandInset include", i)
1775 j = find_end_of_inset(document.body, i)
1777 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1780 parent = get_containing_layout(document.body, i)
1783 # Insert ERT \\newrefsection before inset
1784 beg = ["\\begin_layout Standard",
1785 "\\begin_inset ERT", "status open", "",
1786 "\\begin_layout Plain Layout", "", "",
1789 "\\end_layout", "", "\\end_inset", "", "",
1791 document.body[parbeg-1:parbeg-1] = beg
1796 # 6. Bibtex/Bibtopic
1797 i = find_token(document.header, "\\use_bibtopic", 0)
1799 # this should not happen
1800 document.warning("Malformed LyX document! No \\use_bibtopic header found!")
1802 if get_value(document.header, "\\use_bibtopic", i) == "true":
1803 # find include insets
1806 i = find_token(document.body, "\\begin_inset CommandInset include", i)
1809 j = find_end_of_inset(document.body, i)
1811 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1814 parent = get_containing_layout(document.body, i)
1818 # Insert wrap inset into \\begin{btUnit}...\\end{btUnit}
1819 beg = ["\\begin_layout Standard",
1820 "\\begin_inset ERT", "status open", "",
1821 "\\begin_layout Plain Layout", "", "",
1824 "\\end_layout", "", "\\end_inset", "", "",
1826 end = ["\\begin_layout Standard",
1827 "\\begin_inset ERT", "status open", "",
1828 "\\begin_layout Plain Layout", "", "",
1831 "\\end_layout", "", "\\end_inset", "", "",
1833 document.body[parend+1:parend+1] = end
1834 document.body[parbeg-1:parbeg-1] = beg
1835 j += len(beg) + len(end)
1839 # 7. Chapterbib proper
1840 add_to_preamble(document, ["\\usepackage{chapterbib}"])
1843 def convert_dashligatures(document):
1844 " Remove a zero-length space (U+200B) after en- and em-dashes. "
1846 i = find_token(document.header, "\\use_microtype", 0)
1848 if document.initial_format > 474 and document.initial_format < 509:
1849 # This was created by LyX 2.2
1850 document.header[i+1:i+1] = ["\\use_dash_ligatures false"]
1852 # This was created by LyX 2.1 or earlier
1853 document.header[i+1:i+1] = ["\\use_dash_ligatures true"]
1856 while i < len(document.body):
1857 words = document.body[i].split()
1858 # Skip some document parts where dashes are not converted
1859 if len(words) > 1 and words[0] == "\\begin_inset" and \
1860 words[1] in ["CommandInset", "ERT", "External", "Formula", \
1861 "FormulaMacro", "Graphics", "IPA", "listings"]:
1862 j = find_end_of_inset(document.body, i)
1864 document.warning("Malformed LyX document: Can't find end of " \
1865 + words[1] + " inset at line " + str(i))
1870 if len(words) > 0 and words[0] in ["\\leftindent", \
1871 "\\paragraph_spacing", "\\align", "\\labelwidthstring"]:
1877 j = document.body[i].find(u"\u2013", start) # en-dash
1878 k = document.body[i].find(u"\u2014", start) # em-dash
1879 if j == -1 and k == -1:
1881 if j == -1 or (k != -1 and k < j):
1883 after = document.body[i][j+1:]
1884 if after.startswith(u"\u200B"):
1885 document.body[i] = document.body[i][:j+1] + after[1:]
1887 if len(after) == 0 and document.body[i+1].startswith(u"\u200B"):
1888 document.body[i+1] = document.body[i+1][1:]
1894 def revert_dashligatures(document):
1895 " Remove font ligature settings for en- and em-dashes. "
1896 i = find_token(document.header, "\\use_dash_ligatures", 0)
1899 use_dash_ligatures = get_bool_value(document.header, "\\use_dash_ligatures", i)
1900 del document.header[i]
1901 use_non_tex_fonts = False
1902 i = find_token(document.header, "\\use_non_tex_fonts", 0)
1904 use_non_tex_fonts = get_bool_value(document.header, "\\use_non_tex_fonts", i)
1905 if not use_dash_ligatures or use_non_tex_fonts:
1908 # Add a zero-length space (U+200B) after en- and em-dashes
1910 while i < len(document.body):
1911 words = document.body[i].split()
1912 # Skip some document parts where dashes are not converted
1913 if len(words) > 1 and words[0] == "\\begin_inset" and \
1914 words[1] in ["CommandInset", "ERT", "External", "Formula", \
1915 "FormulaMacro", "Graphics", "IPA", "listings"]:
1916 j = find_end_of_inset(document.body, i)
1918 document.warning("Malformed LyX document: Can't find end of " \
1919 + words[1] + " inset at line " + str(i))
1924 if len(words) > 0 and words[0] in ["\\leftindent", \
1925 "\\paragraph_spacing", "\\align", "\\labelwidthstring"]:
1931 j = document.body[i].find(u"\u2013", start) # en-dash
1932 k = document.body[i].find(u"\u2014", start) # em-dash
1933 if j == -1 and k == -1:
1935 if j == -1 or (k != -1 and k < j):
1937 after = document.body[i][j+1:]
1938 document.body[i] = document.body[i][:j+1] + u"\u200B" + after
1943 def revert_noto(document):
1944 " Revert Noto font definitions to LaTeX "
1946 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1948 i = find_token(document.header, "\\font_roman \"NotoSerif-TLF\"", 0)
1950 add_to_preamble(document, ["\\renewcommand{\\rmdefault}{NotoSerif-TLF}"])
1951 document.header[i] = document.header[i].replace("NotoSerif-TLF", "default")
1952 i = find_token(document.header, "\\font_sans \"NotoSans-TLF\"", 0)
1954 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{NotoSans-TLF}"])
1955 document.header[i] = document.header[i].replace("NotoSans-TLF", "default")
1956 i = find_token(document.header, "\\font_typewriter \"NotoMono-TLF\"", 0)
1958 add_to_preamble(document, ["\\renewcommand{\\ttdefault}{NotoMono-TLF}"])
1959 document.header[i] = document.header[i].replace("NotoMono-TLF", "default")
1962 def revert_xout(document):
1963 " Reverts \\xout font attribute "
1964 changed = revert_font_attrs(document.body, "\\xout", "\\xout")
1966 insert_to_preamble(document, \
1967 ['% for proper cross-out',
1968 '\\PassOptionsToPackage{normalem}{ulem}',
1969 '\\usepackage{ulem}'])
1972 def convert_mathindent(document):
1973 " add the \\is_math_indent tag "
1974 # check if the document uses the class option "fleqn"
1975 k = find_token(document.header, "\\quotes_style", 0)
1976 regexp = re.compile(r'^.*fleqn.*')
1977 i = find_re(document.header, regexp, 0)
1979 document.header.insert(k, "\\is_math_indent 1")
1980 # delete the found option
1981 document.header[i] = document.header[i].replace(",fleqn", "")
1982 document.header[i] = document.header[i].replace(", fleqn", "")
1983 document.header[i] = document.header[i].replace("fleqn,", "")
1984 j = find_re(document.header, regexp, 0)
1986 # then we have fleqn as the only option
1987 del document.header[i]
1989 document.header.insert(k, "\\is_math_indent 0")
1992 def revert_mathindent(document):
1993 " Define mathindent if set in the document "
1994 # first output the length
1995 regexp = re.compile(r'(\\math_indentation)')
1996 i = find_re(document.header, regexp, 0)
1998 value = get_value(document.header, "\\math_indentation" , i).split()[0]
1999 if value != "default":
2000 add_to_preamble(document, ["\\setlength{\\mathindent}{" + value + '}'])
2001 del document.header[i]
2002 # now set the document class option
2003 regexp = re.compile(r'(\\is_math_indent 1)')
2004 i = find_re(document.header, regexp, 0)
2006 regexp = re.compile(r'(\\is_math_indent)')
2007 j = find_re(document.header, regexp, 0)
2008 del document.header[j]
2010 k = find_token(document.header, "\\options", 0)
2012 document.header[k] = document.header[k].replace("\\options", "\\options fleqn,")
2013 del document.header[i]
2015 l = find_token(document.header, "\\use_default_options", 0)
2016 document.header.insert(l, "\\options fleqn")
2017 del document.header[i + 1]
2020 def revert_baselineskip(document):
2021 " Revert baselineskips to TeX code "
2026 regexp = re.compile(r'^.*baselineskip%.*$')
2027 i = find_re(document.body, regexp, i)
2030 vspaceLine = find_token(document.body, "\\begin_inset VSpace", i)
2032 # output VSpace inset as TeX code
2033 # first read out the values
2034 beg = document.body[i].rfind("VSpace ");
2035 end = document.body[i].rfind("baselineskip%");
2036 baselineskip = float(document.body[i][beg + 7:end]);
2037 # we store the value in percent, thus divide by 100
2038 baselineskip = baselineskip/100;
2039 baselineskip = str(baselineskip);
2040 # check if it is the starred version
2041 if document.body[i].find('*') != -1:
2045 # now output TeX code
2046 endInset = find_end_of_inset(document.body, i)
2048 document.warning("Malformed LyX document: Missing '\\end_inset' of VSpace inset.")
2051 document.body[vspaceLine: endInset + 1] = put_cmd_in_ert("\\vspace" + star + '{' + baselineskip + "\\baselineskip}")
2052 hspaceLine = find_token(document.body, "\\begin_inset space \\hspace", i - 1)
2053 document.warning("hspaceLine: " + str(hspaceLine))
2054 document.warning("i: " + str(i))
2055 if hspaceLine == i - 1:
2056 # output space inset as TeX code
2057 # first read out the values
2058 beg = document.body[i].rfind("\\length ");
2059 end = document.body[i].rfind("baselineskip%");
2060 baselineskip = float(document.body[i][beg + 7:end]);
2061 document.warning("baselineskip: " + str(baselineskip))
2062 # we store the value in percent, thus divide by 100
2063 baselineskip = baselineskip/100;
2064 baselineskip = str(baselineskip);
2065 # check if it is the starred version
2066 if document.body[i-1].find('*') != -1:
2070 # now output TeX code
2071 endInset = find_end_of_inset(document.body, i)
2073 document.warning("Malformed LyX document: Missing '\\end_inset' of space inset.")
2076 document.body[hspaceLine: endInset + 1] = put_cmd_in_ert("\\hspace" + star + '{' + baselineskip + "\\baselineskip}")
2081 def revert_rotfloat(document):
2082 " Revert placement options for rotated floats "
2087 i = find_token(document.body, "sideways true", i)
2089 regexp = re.compile(r'^.*placement.*$')
2090 j = find_re(document.body, regexp, i-2)
2098 # we found a sideways float with placement options
2099 # at first store the placement
2100 beg = document.body[i-2].rfind(" ");
2101 placement = document.body[i-2][beg+1:]
2102 # check if the option'H' is used
2103 if placement.find("H") != -1:
2104 add_to_preamble(document, ["\\usepackage{float}"])
2105 # now check if it is a starred type
2106 if document.body[i-1].find("wide true") != -1:
2110 # store the float type
2111 beg = document.body[i-3].rfind(" ");
2112 fType = document.body[i-3][beg+1:]
2113 # now output TeX code
2114 endInset = find_end_of_inset(document.body, i-3)
2116 document.warning("Malformed LyX document: Missing '\\end_inset' of Float inset.")
2119 document.body[endInset-2: endInset+1] = put_cmd_in_ert("\\end{sideways" + fType + star + '}')
2120 document.body[i-3: i+2] = put_cmd_in_ert("\\begin{sideways" + fType + star + "}[" + placement + ']')
2121 add_to_preamble(document, ["\\usepackage{rotfloat}"])
2126 def convert_allowbreak(document):
2127 " Zero widths Space-inset -> \SpecialChar allowbreak. "
2128 body = "\n".join(document.body)
2129 body = body.replace("\\begin_inset space \hspace{}\n"
2132 "\\SpecialChar allowbreak\n")
2133 document.body = body.split("\n")
2136 def revert_allowbreak(document):
2137 " \SpecialChar allowbreak -> Zero widths Space-inset. "
2138 body = "\n".join(document.body)
2139 body = body.replace("\\SpecialChar allowbreak\n",
2140 "\\begin_inset space \hspace{}\n"
2143 document.body = body.split("\n")
2146 def convert_mathnumberpos(document):
2147 " add the \\math_number_before tag "
2148 # check if the document uses the class option "leqno"
2149 k = find_token(document.header, "\\quotes_style", 0)
2150 m = find_token(document.header, "\\options", 0)
2151 regexp = re.compile(r'^.*leqno.*')
2152 i = find_re(document.header, regexp, 0)
2153 if i != -1 and i == m:
2154 document.header.insert(k, "\\math_number_before 1")
2155 # delete the found option
2156 document.header[i] = document.header[i].replace(",leqno", "")
2157 document.header[i] = document.header[i].replace(", leqno", "")
2158 document.header[i] = document.header[i].replace("leqno,", "")
2159 j = find_re(document.header, regexp, 0)
2161 # then we have leqno as the only option
2162 del document.header[i]
2164 document.header.insert(k, "\\math_number_before 0")
2167 def revert_mathnumberpos(document):
2168 " add the document class option leqno"
2169 regexp = re.compile(r'(\\math_number_before 1)')
2170 i = find_re(document.header, regexp, 0)
2172 regexp = re.compile(r'(\\math_number_before)')
2173 j = find_re(document.header, regexp, 0)
2174 del document.header[j]
2176 k = find_token(document.header, "\\options", 0)
2178 document.header[k] = document.header[k].replace("\\options", "\\options leqno,")
2179 del document.header[i]
2181 l = find_token(document.header, "\\use_default_options", 0)
2182 document.header.insert(l, "\\options leqno")
2183 del document.header[i + 1]
2186 def convert_mathnumberingname(document):
2187 " rename the \\math_number_before tag to \\math_numbering_side "
2188 regexp = re.compile(r'(\\math_number_before 1)')
2189 i = find_re(document.header, regexp, 0)
2191 document.header[i] = "\\math_numbering_side left"
2192 regexp = re.compile(r'(\\math_number_before 0)')
2193 i = find_re(document.header, regexp, 0)
2195 document.header[i] = "\\math_numbering_side default"
2196 # check if the document uses the class option "reqno"
2197 k = find_token(document.header, "\\math_numbering_side", 0)
2198 m = find_token(document.header, "\\options", 0)
2199 regexp = re.compile(r'^.*reqno.*')
2200 i = find_re(document.header, regexp, 0)
2201 if i != -1 and i == m:
2202 document.header[k] = "\\math_numbering_side right"
2203 # delete the found option
2204 document.header[i] = document.header[i].replace(",reqno", "")
2205 document.header[i] = document.header[i].replace(", reqno", "")
2206 document.header[i] = document.header[i].replace("reqno,", "")
2207 j = find_re(document.header, regexp, 0)
2209 # then we have reqno as the only option
2210 del document.header[i]
2213 def revert_mathnumberingname(document):
2214 " rename the \\math_numbering_side tag back to \\math_number_before "
2216 regexp = re.compile(r'(\\math_numbering_side left)')
2217 i = find_re(document.header, regexp, 0)
2219 document.header[i] = "\\math_number_before 1"
2220 # add the option reqno and delete the tag
2221 regexp = re.compile(r'(\\math_numbering_side right)')
2222 i = find_re(document.header, regexp, 0)
2224 document.header[i] = "\\math_number_before 0"
2225 k = find_token(document.header, "\\options", 0)
2227 document.header[k] = document.header[k].replace("\\options", "\\options reqno,")
2229 l = find_token(document.header, "\\use_default_options", 0)
2230 document.header.insert(l, "\\options reqno")
2231 # add the math_number_before tag
2232 regexp = re.compile(r'(\\math_numbering_side default)')
2233 i = find_re(document.header, regexp, 0)
2235 document.header[i] = "\\math_number_before 0"
2238 def convert_minted(document):
2239 " add the \\use_minted tag "
2240 document.header.insert(-1, "\\use_minted 0")
2243 def revert_minted(document):
2244 " remove the \\use_minted tag "
2245 i = find_token(document.header, "\\use_minted", 0)
2247 document.header.pop(i)
2254 supported_versions = ["2.3.0", "2.3"]
2256 [509, [convert_microtype]],
2257 [510, [convert_dateinset]],
2258 [511, [convert_ibranches]],
2259 [512, [convert_beamer_article_styles]],
2263 [516, [convert_inputenc]],
2265 [518, [convert_iopart]],
2266 [519, [convert_quotestyle]],
2268 [521, [convert_frenchquotes]],
2279 [532, [convert_literalparam]],
2282 [535, [convert_dashligatures]],
2285 [538, [convert_mathindent]],
2288 [541, [convert_allowbreak]],
2289 [542, [convert_mathnumberpos]],
2290 [543, [convert_mathnumberingname]],
2291 [544, [convert_minted]]
2295 [543, [revert_minted]],
2296 [542, [revert_mathnumberingname]],
2297 [541, [revert_mathnumberpos]],
2298 [540, [revert_allowbreak]],
2299 [539, [revert_rotfloat]],
2300 [538, [revert_baselineskip]],
2301 [537, [revert_mathindent]],
2302 [536, [revert_xout]],
2303 [535, [revert_noto]],
2304 [534, [revert_dashligatures]],
2305 [533, [revert_chapterbib]],
2306 [532, [revert_multibib]],
2307 [531, [revert_literalparam]],
2308 [530, [revert_qualicites]],
2309 [529, [revert_bibpackopts]],
2310 [528, [revert_citekeyonly]],
2311 [527, [revert_biblatex]],
2312 [526, [revert_noprefix]],
2313 [525, [revert_plural_refs]],
2314 [524, [revert_labelonly]],
2315 [523, [revert_crimson, revert_cochinealmath]],
2316 [522, [revert_cjkquotes]],
2317 [521, [revert_dynamicquotes]],
2318 [520, [revert_britishquotes, revert_swedishgquotes, revert_frenchquotes, revert_frenchinquotes, revert_russianquotes, revert_swissquotes]],
2319 [519, [revert_plainquote]],
2320 [518, [revert_quotestyle]],
2321 [517, [revert_iopart]],
2322 [516, [revert_quotes]],
2324 [514, [revert_urdu, revert_syriac]],
2325 [513, [revert_amharic, revert_asturian, revert_kannada, revert_khmer]],
2326 [512, [revert_bosnian, revert_friulan, revert_macedonian, revert_piedmontese, revert_romansh]],
2327 [511, [revert_beamer_article_styles]],
2328 [510, [revert_ibranches]],
2330 [508, [revert_microtype]]
2334 if __name__ == "__main__":