1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2016 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.3"""
25 # Uncomment only what you need to import, please.
27 from parser_tools import find_end_of, find_token_backwards, find_end_of_layout, \
28 find_token, find_end_of_inset, get_value, get_bool_value, \
29 get_containing_layout, get_quoted_value, del_token
30 # find_tokens, find_token_exact, is_in_inset, \
31 # check_token, get_option_value
33 from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert
34 # get_ert, lyx2latex, \
35 # lyx2verbatim, length_in_bp, convert_info_insets
36 # insert_to_preamble, latex_length, revert_flex_inset, \
37 # revert_font_attrs, hex2ratio, str2bool
39 ####################################################################
40 # Private helper functions
44 ###############################################################################
46 ### Conversion and reversion routines
48 ###############################################################################
50 def convert_microtype(document):
51 " Add microtype settings. "
52 i = find_token(document.header, "\\font_tt_scale" , 0)
54 document.warning("Malformed LyX document: Can't find \\font_tt_scale.")
55 i = len(document.header) - 1
57 j = find_token(document.preamble, "\\usepackage{microtype}", 0)
59 document.header.insert(i + 1, "\\use_microtype false")
61 document.header.insert(i + 1, "\\use_microtype true")
62 del document.preamble[j]
65 def revert_microtype(document):
66 " Remove microtype settings. "
67 i = find_token(document.header, "\\use_microtype", 0)
70 use_microtype = get_bool_value(document.header, "\\use_microtype" , i)
71 del document.header[i]
73 add_to_preamble(document, ["\\usepackage{microtype}"])
76 def convert_dateinset(document):
77 ' Convert date external inset to ERT '
80 i = find_token(document.body, "\\begin_inset External", i)
83 j = find_end_of_inset(document.body, i)
85 document.warning("Malformed lyx document: Missing '\\end_inset' in convert_dateinset.")
88 if get_value(document.body, 'template', i, j) == "Date":
89 document.body[i : j + 1] = put_cmd_in_ert("\\today ")
94 def convert_inputenc(document):
95 " Replace no longer supported input encoding settings. "
96 i = find_token(document.header, "\\inputenc", 0)
99 if get_value(document.header, "\\inputencoding", i) == "pt254":
100 document.header[i] = "\\inputencoding pt154"
103 def convert_ibranches(document):
104 ' Add "inverted 0" to branch insets'
107 i = find_token(document.body, "\\begin_inset Branch", i)
110 document.body.insert(i + 1, "inverted 0")
114 def revert_ibranches(document):
115 ' Convert inverted branches to explicit anti-branches'
116 # Get list of branches
120 i = find_token(document.header, "\\branch", i)
123 branch = document.header[i][8:].strip()
124 if document.header[i+1].startswith("\\selected "):
125 #document.warning(document.header[i+1])
126 #document.warning(document.header[i+1][10])
127 selected = int(document.header[i+1][10])
129 document.warning("Malformed LyX document: No selection indicator for branch " + branch)
132 # the value tells us whether the branch is selected
133 ourbranches[document.header[i][8:].strip()] = selected
136 # Figure out what inverted branches, if any, have been used
137 # and convert them to "Anti-OldBranch"
141 i = find_token(document.body, "\\begin_inset Branch", i)
144 if not document.body[i+1].startswith("inverted "):
145 document.warning("Malformed LyX document: Missing 'inverted' tag!")
148 inverted = document.body[i+1][9]
149 #document.warning(document.body[i+1])
152 branch = document.body[i][20:].strip()
153 #document.warning(branch)
154 if not branch in ibranches:
155 antibranch = "Anti-" + branch
156 while antibranch in ibranches:
157 antibranch = "x" + antibranch
158 ibranches[branch] = antibranch
160 antibranch = ibranches[branch]
161 #document.warning(antibranch)
162 document.body[i] = "\\begin_inset Branch " + antibranch
164 # remove "inverted" key
165 del document.body[i+1]
168 # now we need to add the new branches to the header
169 for old, new in ibranches.iteritems():
170 i = find_token(document.header, "\\branch " + old, 0)
172 document.warning("Can't find branch %s even though we found it before!" % (old))
174 j = find_token(document.header, "\\end_branch", i)
176 document.warning("Malformed LyX document! Can't find end of branch " + old)
178 # ourbranches[old] - 1 inverts the selection status of the old branch
179 lines = ["\\branch " + new,
180 "\\selected " + str(ourbranches[old] - 1)]
181 # these are the old lines telling us color, etc.
182 lines += document.header[i+2 : j+1]
183 document.header[i:i] = lines
186 def revert_beamer_article_styles(document):
187 " Include (scr)article styles in beamer article "
189 beamer_articles = ["article-beamer", "scrarticle-beamer"]
190 if document.textclass not in beamer_articles:
193 inclusion = "article.layout"
194 if document.textclass == "scrarticle-beamer":
195 inclusion = "scrartcl.layout"
197 i = find_token(document.header, "\\begin_local_layout", 0)
199 k = find_token(document.header, "\\language", 0)
201 # this should not happen
202 document.warning("Malformed LyX document! No \\language header found!")
204 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
207 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
209 # this should not happen
210 document.warning("Malformed LyX document: Can't find end of local layout!")
213 document.header[i+1 : i+1] = [
214 "### Inserted by lyx2lyx (more [scr]article styles) ###",
215 "Input " + inclusion,
216 "Input beamer.layout",
217 "Provides geometry 0",
218 "Provides hyperref 0",
227 " \\usepackage{beamerarticle,pgf}",
228 " % this default might be overridden by plain title style",
229 " \\newcommand\makebeamertitle{\\frame{\\maketitle}}%",
230 " \\AtBeginDocument{",
231 " \\let\\origtableofcontents=\\tableofcontents",
232 " \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
233 " \\def\\gobbletableofcontents#1{\\origtableofcontents}",
236 "### End of insertion by lyx2lyx (more [scr]article styles) ###"
240 def convert_beamer_article_styles(document):
241 " Remove included (scr)article styles in beamer article "
243 beamer_articles = ["article-beamer", "scrarticle-beamer"]
244 if document.textclass not in beamer_articles:
247 i = find_token(document.header, "\\begin_local_layout", 0)
251 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
253 # this should not happen
254 document.warning("Malformed LyX document: Can't find end of local layout!")
257 k = find_token(document.header, "### Inserted by lyx2lyx (more [scr]article styles) ###", i, j)
259 l = find_token(document.header, "### End of insertion by lyx2lyx (more [scr]article styles) ###", i, j)
261 # this should not happen
262 document.warning("End of lyx2lyx local layout insertion not found!")
265 if k == i + 1 and l == j - 1:
266 # that was all the local layout there was
267 document.header[i : j + 1] = []
269 document.header[k : l + 1] = []
272 def revert_bosnian(document):
273 "Set the document language to English but assure Bosnian output"
275 if document.language == "bosnian":
276 document.language = "english"
277 i = find_token(document.header, "\\language bosnian", 0)
279 document.header[i] = "\\language english"
280 j = find_token(document.header, "\\language_package default", 0)
282 document.header[j] = "\\language_package babel"
283 k = find_token(document.header, "\\options", 0)
285 document.header[k] = document.header[k].replace("\\options", "\\options bosnian,")
287 l = find_token(document.header, "\\use_default_options", 0)
288 document.header.insert(l + 1, "\\options bosnian")
291 def revert_friulan(document):
292 "Set the document language to English but assure Friulan output"
294 if document.language == "friulan":
295 document.language = "english"
296 i = find_token(document.header, "\\language friulan", 0)
298 document.header[i] = "\\language english"
299 j = find_token(document.header, "\\language_package default", 0)
301 document.header[j] = "\\language_package babel"
302 k = find_token(document.header, "\\options", 0)
304 document.header[k] = document.header[k].replace("\\options", "\\options friulan,")
306 l = find_token(document.header, "\\use_default_options", 0)
307 document.header.insert(l + 1, "\\options friulan")
310 def revert_macedonian(document):
311 "Set the document language to English but assure Macedonian output"
313 if document.language == "macedonian":
314 document.language = "english"
315 i = find_token(document.header, "\\language macedonian", 0)
317 document.header[i] = "\\language english"
318 j = find_token(document.header, "\\language_package default", 0)
320 document.header[j] = "\\language_package babel"
321 k = find_token(document.header, "\\options", 0)
323 document.header[k] = document.header[k].replace("\\options", "\\options macedonian,")
325 l = find_token(document.header, "\\use_default_options", 0)
326 document.header.insert(l + 1, "\\options macedonian")
329 def revert_piedmontese(document):
330 "Set the document language to English but assure Piedmontese output"
332 if document.language == "piedmontese":
333 document.language = "english"
334 i = find_token(document.header, "\\language piedmontese", 0)
336 document.header[i] = "\\language english"
337 j = find_token(document.header, "\\language_package default", 0)
339 document.header[j] = "\\language_package babel"
340 k = find_token(document.header, "\\options", 0)
342 document.header[k] = document.header[k].replace("\\options", "\\options piedmontese,")
344 l = find_token(document.header, "\\use_default_options", 0)
345 document.header.insert(l + 1, "\\options piedmontese")
348 def revert_romansh(document):
349 "Set the document language to English but assure Romansh output"
351 if document.language == "romansh":
352 document.language = "english"
353 i = find_token(document.header, "\\language romansh", 0)
355 document.header[i] = "\\language english"
356 j = find_token(document.header, "\\language_package default", 0)
358 document.header[j] = "\\language_package babel"
359 k = find_token(document.header, "\\options", 0)
361 document.header[k] = document.header[k].replace("\\options", "\\options romansh,")
363 l = find_token(document.header, "\\use_default_options", 0)
364 document.header.insert(l + 1, "\\options romansh")
367 def revert_amharic(document):
368 "Set the document language to English but assure Amharic output"
370 if document.language == "amharic":
371 document.language = "english"
372 i = find_token(document.header, "\\language amharic", 0)
374 document.header[i] = "\\language english"
375 j = find_token(document.header, "\\language_package default", 0)
377 document.header[j] = "\\language_package default"
378 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{amharic}}"])
379 document.body[2 : 2] = ["\\begin_layout Standard",
380 "\\begin_inset ERT", "status open", "",
381 "\\begin_layout Plain Layout", "", "",
383 "resetdefaultlanguage{amharic}",
384 "\\end_layout", "", "\\end_inset", "", "",
388 def revert_asturian(document):
389 "Set the document language to English but assure Asturian output"
391 if document.language == "asturian":
392 document.language = "english"
393 i = find_token(document.header, "\\language asturian", 0)
395 document.header[i] = "\\language english"
396 j = find_token(document.header, "\\language_package default", 0)
398 document.header[j] = "\\language_package default"
399 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{asturian}}"])
400 document.body[2 : 2] = ["\\begin_layout Standard",
401 "\\begin_inset ERT", "status open", "",
402 "\\begin_layout Plain Layout", "", "",
404 "resetdefaultlanguage{asturian}",
405 "\\end_layout", "", "\\end_inset", "", "",
409 def revert_kannada(document):
410 "Set the document language to English but assure Kannada output"
412 if document.language == "kannada":
413 document.language = "english"
414 i = find_token(document.header, "\\language kannada", 0)
416 document.header[i] = "\\language english"
417 j = find_token(document.header, "\\language_package default", 0)
419 document.header[j] = "\\language_package default"
420 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{kannada}}"])
421 document.body[2 : 2] = ["\\begin_layout Standard",
422 "\\begin_inset ERT", "status open", "",
423 "\\begin_layout Plain Layout", "", "",
425 "resetdefaultlanguage{kannada}",
426 "\\end_layout", "", "\\end_inset", "", "",
430 def revert_khmer(document):
431 "Set the document language to English but assure Khmer output"
433 if document.language == "khmer":
434 document.language = "english"
435 i = find_token(document.header, "\\language khmer", 0)
437 document.header[i] = "\\language english"
438 j = find_token(document.header, "\\language_package default", 0)
440 document.header[j] = "\\language_package default"
441 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{khmer}}"])
442 document.body[2 : 2] = ["\\begin_layout Standard",
443 "\\begin_inset ERT", "status open", "",
444 "\\begin_layout Plain Layout", "", "",
446 "resetdefaultlanguage{khmer}",
447 "\\end_layout", "", "\\end_inset", "", "",
451 def revert_urdu(document):
452 "Set the document language to English but assure Urdu output"
454 if document.language == "urdu":
455 document.language = "english"
456 i = find_token(document.header, "\\language urdu", 0)
458 document.header[i] = "\\language english"
459 j = find_token(document.header, "\\language_package default", 0)
461 document.header[j] = "\\language_package default"
462 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{urdu}}"])
463 document.body[2 : 2] = ["\\begin_layout Standard",
464 "\\begin_inset ERT", "status open", "",
465 "\\begin_layout Plain Layout", "", "",
467 "resetdefaultlanguage{urdu}",
468 "\\end_layout", "", "\\end_inset", "", "",
472 def revert_syriac(document):
473 "Set the document language to English but assure Syriac output"
475 if document.language == "syriac":
476 document.language = "english"
477 i = find_token(document.header, "\\language syriac", 0)
479 document.header[i] = "\\language english"
480 j = find_token(document.header, "\\language_package default", 0)
482 document.header[j] = "\\language_package default"
483 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{syriac}}"])
484 document.body[2 : 2] = ["\\begin_layout Standard",
485 "\\begin_inset ERT", "status open", "",
486 "\\begin_layout Plain Layout", "", "",
488 "resetdefaultlanguage{syriac}",
489 "\\end_layout", "", "\\end_inset", "", "",
493 def revert_quotes(document):
494 " Revert Quote Insets in verbatim or Hebrew context to plain quotes "
496 # First handle verbatim insets
499 while i < len(document.body):
500 words = document.body[i].split()
501 if len(words) > 1 and words[0] == "\\begin_inset" and \
502 ( words[1] in ["ERT", "listings"] or ( len(words) > 2 and words[2] in ["URL", "Chunk", "Sweave", "S/R"]) ):
503 j = find_end_of_inset(document.body, i)
505 document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
509 k = find_token(document.body, '\\begin_inset Quotes', i, j)
513 l = find_end_of_inset(document.body, k)
515 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
519 if document.body[k].endswith("s"):
521 document.body[k:l+1] = [replace]
526 # Now verbatim layouts
529 while i < len(document.body):
530 words = document.body[i].split()
531 if len(words) > 1 and words[0] == "\\begin_layout" and \
532 words[1] in ["Verbatim", "Verbatim*", "Code", "Author_Email", "Author_URL"]:
533 j = find_end_of_layout(document.body, i)
535 document.warning("Malformed LyX document: Can't find end of " + words[1] + " layout at line " + str(i))
539 k = find_token(document.body, '\\begin_inset Quotes', i, j)
543 l = find_end_of_inset(document.body, k)
545 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
549 if document.body[k].endswith("s"):
551 document.body[k:l+1] = [replace]
557 if not document.language == "hebrew" and find_token(document.body, '\\lang hebrew', 0) == -1:
563 k = find_token(document.body, '\\begin_inset Quotes', i)
566 l = find_end_of_inset(document.body, k)
568 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
572 parent = get_containing_layout(document.body, k)
573 ql = find_token_backwards(document.body, "\\lang", k)
574 if ql == -1 or ql < parent[1]:
575 hebrew = document.language == "hebrew"
576 elif document.body[ql] == "\\lang hebrew":
580 if document.body[k].endswith("s"):
582 document.body[k:l+1] = [replace]
586 def revert_iopart(document):
587 " Input new styles via local layout "
588 if document.textclass != "iopart":
591 i = find_token(document.header, "\\begin_local_layout", 0)
593 k = find_token(document.header, "\\language", 0)
595 # this should not happen
596 document.warning("Malformed LyX document! No \\language header found!")
598 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
601 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
603 # this should not happen
604 document.warning("Malformed LyX document! Can't find end of local layout!")
607 document.header[i+1 : i+1] = [
608 "### Inserted by lyx2lyx (stdlayouts) ###",
609 "Input stdlayouts.inc",
610 "### End of insertion by lyx2lyx (stdlayouts) ###"
614 def convert_iopart(document):
615 " Remove local layout we added, if it is there "
616 if document.textclass != "iopart":
619 i = find_token(document.header, "\\begin_local_layout", 0)
623 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
625 # this should not happen
626 document.warning("Malformed LyX document! Can't find end of local layout!")
629 k = find_token(document.header, "### Inserted by lyx2lyx (stdlayouts) ###", i, j)
631 l = find_token(document.header, "### End of insertion by lyx2lyx (stdlayouts) ###", i, j)
633 # this should not happen
634 document.warning("End of lyx2lyx local layout insertion not found!")
636 if k == i + 1 and l == j - 1:
637 # that was all the local layout there was
638 document.header[i : j + 1] = []
640 document.header[k : l + 1] = []
643 def convert_quotestyle(document):
644 " Convert \\quotes_language to \\quotes_style "
645 i = find_token(document.header, "\\quotes_language", 0)
647 document.warning("Malformed LyX document! Can't find \\quotes_language!")
649 val = get_value(document.header, "\\quotes_language", i)
650 document.header[i] = "\\quotes_style " + val
653 def revert_quotestyle(document):
654 " Revert \\quotes_style to \\quotes_language "
655 i = find_token(document.header, "\\quotes_style", 0)
657 document.warning("Malformed LyX document! Can't find \\quotes_style!")
659 val = get_value(document.header, "\\quotes_style", i)
660 document.header[i] = "\\quotes_language " + val
663 def revert_plainquote(document):
664 " Revert plain quote insets "
666 # First, revert style setting
667 i = find_token(document.header, "\\quotes_style plain", 0)
669 document.header[i] = "\\quotes_style english"
675 k = find_token(document.body, '\\begin_inset Quotes q', i)
678 l = find_end_of_inset(document.body, k)
680 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
684 if document.body[k].endswith("s"):
686 document.body[k:l+1] = [replace]
690 def convert_frenchquotes(document):
691 " Convert french quote insets to swiss "
693 # First, revert style setting
694 i = find_token(document.header, "\\quotes_style french", 0)
696 document.header[i] = "\\quotes_style swiss"
701 i = find_token(document.body, '\\begin_inset Quotes f', i)
704 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
705 newval = val.replace("f", "c", 1)
706 document.body[i] = document.body[i].replace(val, newval)
710 def revert_swissquotes(document):
711 " Revert swiss quote insets to french "
713 # First, revert style setting
714 i = find_token(document.header, "\\quotes_style swiss", 0)
716 document.header[i] = "\\quotes_style french"
721 i = find_token(document.body, '\\begin_inset Quotes c', i)
724 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
725 newval = val.replace("c", "f", 1)
726 document.body[i] = document.body[i].replace(val, newval)
730 def revert_britishquotes(document):
731 " Revert british quote insets to english "
733 # First, revert style setting
734 i = find_token(document.header, "\\quotes_style british", 0)
736 document.header[i] = "\\quotes_style english"
741 i = find_token(document.body, '\\begin_inset Quotes b', i)
744 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
745 newval = val.replace("b", "e", 1)
748 newval = newval.replace("d", "s")
751 newval = newval.replace("s", "d")
752 document.body[i] = document.body[i].replace(val, newval)
756 def revert_swedishgquotes(document):
757 " Revert swedish quote insets "
759 # First, revert style setting
760 i = find_token(document.header, "\\quotes_style swedishg", 0)
762 document.header[i] = "\\quotes_style danish"
767 i = find_token(document.body, '\\begin_inset Quotes w', i)
770 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
773 newval = val.replace("w", "a", 1).replace("r", "l")
776 newval = val.replace("w", "s", 1)
777 document.body[i] = document.body[i].replace(val, newval)
781 def revert_frenchquotes(document):
782 " Revert french inner quote insets "
786 i = find_token(document.body, '\\begin_inset Quotes f', i)
789 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
792 newval = val.replace("f", "e", 1).replace("s", "d")
793 document.body[i] = document.body[i].replace(val, newval)
797 def revert_frenchinquotes(document):
798 " Revert inner frenchin quote insets "
800 # First, revert style setting
801 i = find_token(document.header, "\\quotes_style frenchin", 0)
803 document.header[i] = "\\quotes_style french"
808 i = find_token(document.body, '\\begin_inset Quotes i', i)
811 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
812 newval = val.replace("i", "f", 1)
815 newval = newval.replace("s", "d")
816 document.body[i] = document.body[i].replace(val, newval)
820 def revert_russianquotes(document):
821 " Revert russian quote insets "
823 # First, revert style setting
824 i = find_token(document.header, "\\quotes_style russian", 0)
826 document.header[i] = "\\quotes_style french"
831 i = find_token(document.body, '\\begin_inset Quotes r', i)
834 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
838 newval = val.replace("r", "g", 1).replace("s", "d")
841 newval = val.replace("r", "f", 1)
842 document.body[i] = document.body[i].replace(val, newval)
846 def revert_dynamicquotes(document):
847 " Revert dynamic quote insets "
849 # First, revert header
850 i = find_token(document.header, "\\dynamic_quotes", 0)
852 del document.header[i]
856 i = find_token(document.header, "\\quotes_style", 0)
858 document.warning("Malformed document! Missing \\quotes_style")
860 style = get_value(document.header, "\\quotes_style", i)
863 if style == "english":
865 elif style == "swedish":
867 elif style == "german":
869 elif style == "polish":
871 elif style == "swiss":
873 elif style == "danish":
875 elif style == "plain":
877 elif style == "british":
879 elif style == "swedishg":
881 elif style == "french":
883 elif style == "frenchin":
885 elif style == "russian":
888 # now transform the insets
891 i = find_token(document.body, '\\begin_inset Quotes x', i)
894 document.body[i] = document.body[i].replace("x", s)
898 def revert_cjkquotes(document):
899 " Revert cjk quote insets "
903 i = find_token(document.header, "\\quotes_style", 0)
905 document.warning("Malformed document! Missing \\quotes_style")
907 style = get_value(document.header, "\\quotes_style", i)
909 global_cjk = style.find("cjk") != -1
912 document.header[i] = "\\quotes_style english"
913 # transform dynamic insets
915 if style == "cjkangle":
919 i = find_token(document.body, '\\begin_inset Quotes x', i)
922 document.body[i] = document.body[i].replace("x", s)
925 cjk_langs = ["chinese-simplified", "chinese-traditional", "japanese", "japanese-cjk", "korean"]
930 k = find_token(document.body, '\\begin_inset Quotes j', i)
933 l = find_end_of_inset(document.body, k)
935 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
939 parent = get_containing_layout(document.body, k)
940 ql = find_token_backwards(document.body, "\\lang", k)
941 if ql == -1 or ql < parent[1]:
942 cjk = document.language in cjk_langs
943 elif document.body[ql].split()[1] in cjk_langs:
945 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
952 replace = [u"\u300E"]
954 replace = ["\\begin_inset Formula $\\llceil$", "\\end_inset"]
958 replace = [u"\u300F"]
960 replace = ["\\begin_inset Formula $\\rrfloor$", "\\end_inset"]
966 replace = [u"\u300C"]
968 replace = ["\\begin_inset Formula $\\lceil$", "\\end_inset"]
972 replace = [u"\u300D"]
974 replace = ["\\begin_inset Formula $\\rfloor$", "\\end_inset"]
976 document.body[k:l+1] = replace
982 k = find_token(document.body, '\\begin_inset Quotes k', i)
985 l = find_end_of_inset(document.body, k)
987 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
991 parent = get_containing_layout(document.body, k)
992 ql = find_token_backwards(document.body, "\\lang", k)
993 if ql == -1 or ql < parent[1]:
994 cjk = document.language in cjk_langs
995 elif document.body[ql].split()[1] in cjk_langs:
997 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
1002 # inner opening mark
1004 replace = [u"\u3008"]
1006 replace = ["\\begin_inset Formula $\\langle$", "\\end_inset"]
1008 # inner closing mark
1010 replace = [u"\u3009"]
1012 replace = ["\\begin_inset Formula $\\rangle$", "\\end_inset"]
1016 # outer opening mark
1018 replace = [u"\u300A"]
1020 replace = ["\\begin_inset Formula $\\langle\\kern -2.5pt\\langle$", "\\end_inset"]
1022 # outer closing mark
1024 replace = [u"\u300B"]
1026 replace = ["\\begin_inset Formula $\\rangle\\kern -2.5pt\\rangle$", "\\end_inset"]
1028 document.body[k:l+1] = replace
1032 def revert_crimson(document):
1033 " Revert native Cochineal/Crimson font definition to LaTeX "
1035 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1037 i = find_token(document.header, "\\font_roman \"cochineal\"", 0)
1040 j = find_token(document.header, "\\font_osf true", 0)
1043 preamble = "\\usepackage"
1045 document.header[j] = "\\font_osf false"
1046 preamble += "[proportional,osf]"
1047 preamble += "{cochineal}"
1048 add_to_preamble(document, [preamble])
1049 document.header[i] = document.header[i].replace("cochineal", "default")
1052 def revert_cochinealmath(document):
1053 " Revert cochineal newtxmath definitions to LaTeX "
1055 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1056 i = find_token(document.header, "\\font_math \"cochineal-ntxm\"", 0)
1058 add_to_preamble(document, "\\usepackage[cochineal]{newtxmath}")
1059 document.header[i] = document.header[i].replace("cochineal-ntxm", "auto")
1062 def revert_labelonly(document):
1063 " Revert labelonly tag for InsetRef "
1066 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1069 j = find_end_of_inset(document.body, i)
1071 document.warning("Can't find end of reference inset at line %d!!" %(i))
1074 k = find_token(document.body, "LatexCommand labelonly", i, j)
1078 label = get_quoted_value(document.body, "reference", i, j)
1080 document.warning("Can't find label for reference at line %d!" %(i))
1083 document.body[i:j+1] = put_cmd_in_ert([label])
1087 def revert_plural_refs(document):
1088 " Revert plural and capitalized references "
1089 i = find_token(document.header, "\\use_refstyle 1", 0)
1090 use_refstyle = (i != 0)
1094 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1097 j = find_end_of_inset(document.body, i)
1099 document.warning("Can't find end of reference inset at line %d!!" %(i))
1103 plural = caps = suffix = False
1104 k = find_token(document.body, "LaTeXCommand formatted", i, j)
1105 if k != -1 and use_refstyle:
1106 plural = get_bool_value(document.body, "plural", i, j, False)
1107 caps = get_bool_value(document.body, "caps", i, j, False)
1108 label = get_quoted_value(document.body, "reference", i, j)
1111 (prefix, suffix) = label.split(":", 1)
1113 document.warning("No `:' separator in formatted reference at line %d!" % (i))
1115 document.warning("Can't find label for reference at line %d!" % (i))
1117 # this effectively tests also for use_refstyle and a formatted reference
1118 # we do this complicated test because we would otherwise do this erasure
1119 # over and over and over
1120 if not ((plural or caps) and suffix):
1121 del_token(document.body, "plural", i, j)
1122 del_token(document.body, "caps", i, j - 1) # since we deleted a line
1127 prefix = prefix[0].title() + prefix[1:]
1128 cmd = "\\" + prefix + "ref"
1131 cmd += "{" + suffix + "}"
1132 document.body[i:j+1] = put_cmd_in_ert([cmd])
1136 def revert_noprefix(document):
1137 " Revert labelonly tags with 'noprefix' set "
1140 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1143 j = find_end_of_inset(document.body, i)
1145 document.warning("Can't find end of reference inset at line %d!!" %(i))
1148 k = find_token(document.body, "LatexCommand labelonly", i, j)
1152 noprefix = get_bool_value(document.body, "noprefix", i, j)
1154 del_token(document.body, "noprefix", i, j)
1157 label = get_quoted_value(document.body, "reference", i, j)
1159 document.warning("Can't find label for reference at line %d!" %(i))
1163 (prefix, suffix) = label.split(":", 1)
1165 document.warning("No `:' separator in formatted reference at line %d!" % (i))
1166 # we'll leave this as an ordinary labelonly reference
1167 del_token(document.body, "noprefix", i, j)
1170 document.body[i:j+1] = put_cmd_in_ert([suffix])
1174 def revert_biblatex(document):
1175 " Revert biblatex support "
1181 # 1. Get cite engine
1183 i = find_token(document.header, "\\cite_engine", 0)
1185 document.warning("Malformed document! Missing \\cite_engine")
1187 engine = get_value(document.header, "\\cite_engine", i)
1189 # 2. Store biblatex state and revert to natbib
1191 if engine in ["biblatex", "biblatex-natbib"]:
1193 document.header[i] = "\\cite_engine natbib"
1195 # 3. Store and remove new document headers
1197 i = find_token(document.header, "\\biblatex_bibstyle", 0)
1199 bibstyle = get_value(document.header, "\\biblatex_bibstyle", i)
1200 del document.header[i]
1203 i = find_token(document.header, "\\biblatex_citestyle", 0)
1205 citestyle = get_value(document.header, "\\biblatex_citestyle", i)
1206 del document.header[i]
1209 i = find_token(document.header, "\\biblio_options", 0)
1211 biblio_options = get_value(document.header, "\\biblio_options", i)
1212 del document.header[i]
1215 bbxopts = "[natbib=true"
1217 bbxopts += ",bibstyle=" + bibstyle
1219 bbxopts += ",citestyle=" + citestyle
1220 if biblio_options != "":
1221 bbxopts += "," + biblio_options
1223 add_to_preamble(document, "\\usepackage" + bbxopts + "{biblatex}")
1233 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
1236 j = find_end_of_inset(document.body, i)
1238 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1241 bibs = get_quoted_value(document.body, "bibfiles", i, j)
1242 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1245 bibresources += bibs.split(",")
1247 document.warning("Can't find bibfiles for bibtex inset at line %d!" %(i))
1248 # remove biblatexopts line
1249 k = find_token(document.body, "biblatexopts", i, j)
1251 del document.body[k]
1252 # Re-find inset end line
1253 j = find_end_of_inset(document.body, i)
1254 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1256 pcmd = "printbibliography"
1258 pcmd += "[" + opts + "]"
1259 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1260 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1261 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1262 "status open", "", "\\begin_layout Plain Layout" ]
1263 repl += document.body[i:j+1]
1264 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1265 document.body[i:j+1] = repl
1271 for b in bibresources:
1272 add_to_preamble(document, "\\addbibresource{" + b + ".bib}")
1274 # 2. Citation insets
1276 # Specific citation insets used in biblatex that need to be reverted to ERT
1279 "citebyear" : "citeyear",
1280 "citeyear" : "cite*",
1281 "Footcite" : "Smartcite",
1282 "footcite" : "smartcite",
1283 "Autocite" : "Autocite",
1284 "autocite" : "autocite",
1285 "citetitle" : "citetitle",
1286 "citetitle*" : "citetitle*",
1287 "fullcite" : "fullcite",
1288 "footfullcite" : "footfullcite",
1289 "supercite" : "supercite",
1290 "citeauthor" : "citeauthor",
1291 "citeauthor*" : "citeauthor*",
1292 "Citeauthor" : "Citeauthor",
1293 "Citeauthor*" : "Citeauthor*"
1296 # All commands accepted by LyX < 2.3. Everything else throws an error.
1297 old_citations = [ "cite", "nocite", "citet", "citep", "citealt", "citealp",\
1298 "citeauthor", "citeyear", "citeyearpar", "citet*", "citep*",\
1299 "citealt*", "citealp*", "citeauthor*", "Citet", "Citep",\
1300 "Citealt", "Citealp", "Citeauthor", "Citet*", "Citep*",\
1301 "Citealt*", "Citealp*", "Citeauthor*", "fullcite", "footcite",\
1302 "footcitet", "footcitep", "footcitealt", "footcitealp",\
1303 "footciteauthor", "footciteyear", "footciteyearpar",\
1304 "citefield", "citetitle", "cite*" ]
1308 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
1311 j = find_end_of_inset(document.body, i)
1313 document.warning("Can't find end of citation inset at line %d!!" %(i))
1316 k = find_token(document.body, "LatexCommand", i, j)
1318 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
1321 cmd = get_value(document.body, "LatexCommand", k)
1322 if biblatex and cmd in list(new_citations.keys()):
1323 pre = get_quoted_value(document.body, "before", i, j)
1324 post = get_quoted_value(document.body, "after", i, j)
1325 key = get_quoted_value(document.body, "key", i, j)
1327 document.warning("Citation inset at line %d does not have a key!" %(i))
1329 # Replace known new commands with ERT
1330 res = "\\" + new_citations[cmd]
1332 res += "[" + pre + "]"
1334 res += "[" + post + "]"
1337 res += "{" + key + "}"
1338 document.body[i:j+1] = put_cmd_in_ert([res])
1339 elif cmd not in old_citations:
1340 # Reset unknown commands to cite. This is what LyX does as well
1341 # (but LyX 2.2 would break on unknown commands)
1342 document.body[k] = "LatexCommand cite"
1343 document.warning("Reset unknown cite command '%s' with cite" % cmd)
1346 # Emulate the old biblatex-workaround (pretend natbib in order to use the styles)
1348 i = find_token(document.header, "\\begin_local_layout", 0)
1350 k = find_token(document.header, "\\language", 0)
1352 # this should not happen
1353 document.warning("Malformed LyX document! No \\language header found!")
1355 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
1358 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
1360 # this should not happen
1361 document.warning("Malformed LyX document! Can't find end of local layout!")
1364 document.header[i+1 : i+1] = [
1365 "### Inserted by lyx2lyx (biblatex emulation) ###",
1366 "Provides natbib 1",
1367 "### End of insertion by lyx2lyx (biblatex emulation) ###"
1371 def revert_citekeyonly(document):
1372 " Revert keyonly cite command to ERT "
1376 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
1379 j = find_end_of_inset(document.body, i)
1381 document.warning("Can't find end of citation inset at line %d!!" %(i))
1384 k = find_token(document.body, "LatexCommand", i, j)
1386 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
1389 cmd = get_value(document.body, "LatexCommand", k)
1390 if cmd != "keyonly":
1394 key = get_quoted_value(document.body, "key", i, j)
1396 document.warning("Citation inset at line %d does not have a key!" %(i))
1397 # Replace known new commands with ERT
1398 document.body[i:j+1] = put_cmd_in_ert([key])
1403 def revert_bibpackopts(document):
1404 " Revert support for natbib/jurabib package options "
1407 i = find_token(document.header, "\\cite_engine", 0)
1409 document.warning("Malformed document! Missing \\cite_engine")
1411 engine = get_value(document.header, "\\cite_engine", i)
1414 if engine not in ["natbib", "jurabib"]:
1417 i = find_token(document.header, "\\biblio_options", 0)
1419 # Nothing to do if we have no options
1422 biblio_options = get_value(document.header, "\\biblio_options", i)
1423 del document.header[i]
1425 if not biblio_options:
1426 # Nothing to do for empty options
1429 i = find_token(document.header, "\\begin_local_layout", 0)
1431 k = find_token(document.header, "\\language", 0)
1433 # this should not happen
1434 document.warning("Malformed LyX document! No \\language header found!")
1436 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
1439 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
1441 # this should not happen
1442 document.warning("Malformed LyX document! Can't find end of local layout!")
1445 document.header[i+1 : i+1] = [
1446 "### Inserted by lyx2lyx (bibliography package options) ###",
1447 "PackageOptions " + engine + " " + biblio_options,
1448 "### End of insertion by lyx2lyx (bibliography package options) ###"
1452 def revert_qualicites(document):
1453 " Revert qualified citation list commands to ERT "
1455 # Citation insets that support qualified lists, with their LaTeX code
1459 "citet" : "textcites",
1460 "Citet" : "Textcites",
1461 "citep" : "parencites",
1462 "Citep" : "Parencites",
1463 "Footcite" : "Smartcites",
1464 "footcite" : "smartcites",
1465 "Autocite" : "Autocites",
1466 "autocite" : "autocites",
1471 i = find_token(document.header, "\\cite_engine", 0)
1473 document.warning("Malformed document! Missing \\cite_engine")
1475 engine = get_value(document.header, "\\cite_engine", i)
1477 biblatex = engine in ["biblatex", "biblatex-natbib"]
1481 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
1484 j = find_end_of_inset(document.body, i)
1486 document.warning("Can't find end of citation inset at line %d!!" %(i))
1489 pres = find_token(document.body, "pretextlist", i, j)
1490 posts = find_token(document.body, "posttextlist", i, j)
1491 if pres == -1 and posts == -1:
1495 pretexts = get_quoted_value(document.body, "pretextlist", pres)
1496 posttexts = get_quoted_value(document.body, "posttextlist", posts)
1497 k = find_token(document.body, "LatexCommand", i, j)
1499 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
1502 cmd = get_value(document.body, "LatexCommand", k)
1503 if biblatex and cmd in list(ql_citations.keys()):
1504 pre = get_quoted_value(document.body, "before", i, j)
1505 post = get_quoted_value(document.body, "after", i, j)
1506 key = get_quoted_value(document.body, "key", i, j)
1508 document.warning("Citation inset at line %d does not have a key!" %(i))
1510 keys = key.split(",")
1511 prelist = pretexts.split("\t")
1514 ppp = pp.split(" ", 1)
1515 premap[ppp[0]] = ppp[1]
1516 postlist = posttexts.split("\t")
1519 ppp = pp.split(" ", 1)
1520 postmap[ppp[0]] = ppp[1]
1521 # Replace known new commands with ERT
1522 if "(" in pre or ")" in pre:
1523 pre = "{" + pre + "}"
1524 if "(" in post or ")" in post:
1525 post = "{" + post + "}"
1526 res = "\\" + ql_citations[cmd]
1528 res += "(" + pre + ")"
1530 res += "(" + post + ")"
1534 if premap.get(kk, "") != "":
1535 res += "[" + premap[kk] + "]"
1536 if postmap.get(kk, "") != "":
1537 res += "[" + postmap[kk] + "]"
1538 elif premap.get(kk, "") != "":
1540 res += "{" + kk + "}"
1541 document.body[i:j+1] = put_cmd_in_ert([res])
1543 # just remove the params
1544 del document.body[posttexts]
1545 del document.body[pretexts]
1549 command_insets = ["bibitem", "citation", "href", "index_print", "nomenclature"]
1550 def convert_literalparam(document):
1551 " Add param literal "
1553 # These already had some sort of latexify method
1554 latexified_insets = ["href", "index_print", "nomenclature"]
1556 for inset in command_insets:
1559 i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
1562 j = find_end_of_inset(document.body, i)
1564 document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
1567 while i < j and document.body[i].strip() != '':
1569 if inset in latexified_insets:
1570 document.body.insert(i, "literal \"false\"")
1572 document.body.insert(i, "literal \"true\"")
1576 def revert_literalparam(document):
1577 " Remove param literal "
1579 for inset in command_insets:
1582 i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
1585 j = find_end_of_inset(document.body, i)
1587 document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
1590 k = find_token(document.body, 'literal', i, j)
1594 del document.body[k]
1598 def revert_multibib(document):
1599 " Revert multibib support "
1601 # 1. Get cite engine
1603 i = find_token(document.header, "\\cite_engine", 0)
1605 document.warning("Malformed document! Missing \\cite_engine")
1607 engine = get_value(document.header, "\\cite_engine", i)
1609 # 2. Do we use biblatex?
1611 if engine in ["biblatex", "biblatex-natbib"]:
1614 # 3. Store and remove multibib document header
1616 i = find_token(document.header, "\\multibib", 0)
1618 multibib = get_value(document.header, "\\multibib", i)
1619 del document.header[i]
1624 # 4. The easy part: Biblatex
1626 i = find_token(document.header, "\\biblio_options", 0)
1628 k = find_token(document.header, "\\use_bibtopic", 0)
1630 # this should not happen
1631 document.warning("Malformed LyX document! No \\use_bibtopic header found!")
1633 document.header[k-1 : k-1] = ["\\biblio_options " + "refsection=" + multibib]
1635 biblio_options = get_value(document.header, "\\biblio_options", i)
1637 biblio_options += ","
1638 biblio_options += "refsection=" + multibib
1639 document.header[i] = "\\biblio_options " + biblio_options
1644 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
1647 j = find_end_of_inset(document.body, i)
1649 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1652 btprint = get_quoted_value(document.body, "btprint", i, j)
1653 if btprint != "bibbysection":
1656 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1657 # change btprint line
1658 k = find_token(document.body, "btprint", i, j)
1660 document.body[k] = "btprint \"btPrintCited\""
1661 # Insert ERT \\bibbysection and wrap bibtex inset to a Note
1662 pcmd = "bibbysection"
1664 pcmd += "[" + opts + "]"
1665 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1666 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1667 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1668 "status open", "", "\\begin_layout Plain Layout" ]
1669 repl += document.body[i:j+1]
1670 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1671 document.body[i:j+1] = repl
1677 # 5. More tricky: Bibtex/Bibtopic
1678 k = find_token(document.header, "\\use_bibtopic", 0)
1680 # this should not happen
1681 document.warning("Malformed LyX document! No \\use_bibtopic header found!")
1683 document.header[k] = "\\use_bibtopic true"
1685 # Possible units. This assumes that the LyX name follows the std,
1686 # which might not always be the case. But it's as good as we can get.
1689 "chapter" : "Chapter",
1690 "section" : "Section",
1691 "subsection" : "Subsection",
1694 if multibib not in units.keys():
1695 document.warning("Unknown multibib value `%s'!" % nultibib)
1697 unit = units[multibib]
1701 i = find_token(document.body, "\\begin_layout " + unit, i)
1705 document.body[i-1 : i-1] = ["\\begin_layout Standard",
1706 "\\begin_inset ERT", "status open", "",
1707 "\\begin_layout Plain Layout", "", "",
1709 "end{btUnit}", "\\end_layout",
1710 "\\begin_layout Plain Layout", "",
1713 "\\end_layout", "", "\\end_inset", "", "",
1717 document.body[i-1 : i-1] = ["\\begin_layout Standard",
1718 "\\begin_inset ERT", "status open", "",
1719 "\\begin_layout Plain Layout", "", "",
1722 "\\end_layout", "", "\\end_inset", "", "",
1729 i = find_token(document.body, "\\end_body", i)
1730 document.body[i-1 : i-1] = ["\\begin_layout Standard",
1731 "\\begin_inset ERT", "status open", "",
1732 "\\begin_layout Plain Layout", "", "",
1735 "\\end_layout", "", "\\end_inset", "", "",
1739 def revert_chapterbib(document):
1740 " Revert chapterbib support "
1742 # 1. Get cite engine
1744 i = find_token(document.header, "\\cite_engine", 0)
1746 document.warning("Malformed document! Missing \\cite_engine")
1748 engine = get_value(document.header, "\\cite_engine", i)
1750 # 2. Do we use biblatex?
1752 if engine in ["biblatex", "biblatex-natbib"]:
1755 # 3. Store multibib document header value
1757 i = find_token(document.header, "\\multibib", 0)
1759 multibib = get_value(document.header, "\\multibib", i)
1761 if not multibib or multibib != "child":
1765 # 4. remove multibib header
1766 del document.header[i]
1770 # find include insets
1773 i = find_token(document.body, "\\begin_inset CommandInset include", i)
1776 j = find_end_of_inset(document.body, i)
1778 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1781 parent = get_containing_layout(document.body, i)
1784 # Insert ERT \\newrefsection before inset
1785 beg = ["\\begin_layout Standard",
1786 "\\begin_inset ERT", "status open", "",
1787 "\\begin_layout Plain Layout", "", "",
1790 "\\end_layout", "", "\\end_inset", "", "",
1792 document.body[parbeg-1:parbeg-1] = beg
1797 # 6. Bibtex/Bibtopic
1798 i = find_token(document.header, "\\use_bibtopic", 0)
1800 # this should not happen
1801 document.warning("Malformed LyX document! No \\use_bibtopic header found!")
1803 if get_value(document.header, "\\use_bibtopic", i) == "true":
1804 # find include insets
1807 i = find_token(document.body, "\\begin_inset CommandInset include", i)
1810 j = find_end_of_inset(document.body, i)
1812 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1815 parent = get_containing_layout(document.body, i)
1819 # Insert wrap inset into \\begin{btUnit}...\\end{btUnit}
1820 beg = ["\\begin_layout Standard",
1821 "\\begin_inset ERT", "status open", "",
1822 "\\begin_layout Plain Layout", "", "",
1825 "\\end_layout", "", "\\end_inset", "", "",
1827 end = ["\\begin_layout Standard",
1828 "\\begin_inset ERT", "status open", "",
1829 "\\begin_layout Plain Layout", "", "",
1832 "\\end_layout", "", "\\end_inset", "", "",
1834 document.body[parend+1:parend+1] = end
1835 document.body[parbeg-1:parbeg-1] = beg
1836 j += len(beg) + len(end)
1840 # 7. Chapterbib proper
1841 add_to_preamble(document, ["\\usepackage{chapterbib}"])
1844 def convert_dashligatures(document):
1845 " Remove a zero-length space (U+200B) after en- and em-dashes. "
1847 i = find_token(document.header, "\\use_microtype", 0)
1849 if document.start > 474 and document.start < 509:
1850 # This was created by LyX 2.2
1851 document.header[i+1:i+1] = ["\\use_dash_ligatures false"]
1853 # This was created by LyX 2.1 or earlier
1854 document.header[i+1:i+1] = ["\\use_dash_ligatures true"]
1857 while i < len(document.body):
1858 words = document.body[i].split()
1859 # Skip some document parts where dashes are not converted
1860 if len(words) > 1 and words[0] == "\\begin_inset" and \
1861 words[1] in ["CommandInset", "ERT", "External", "Formula", \
1862 "FormulaMacro", "Graphics", "IPA", "listings"]:
1863 j = find_end_of_inset(document.body, i)
1865 document.warning("Malformed LyX document: Can't find end of " \
1866 + words[1] + " inset at line " + str(i))
1871 if len(words) > 0 and words[0] in ["\\leftindent", \
1872 "\\paragraph_spacing", "\\align", "\\labelwidthstring"]:
1878 j = document.body[i].find(u"\u2013", start) # en-dash
1879 k = document.body[i].find(u"\u2014", start) # em-dash
1880 if j == -1 and k == -1:
1882 if j == -1 or (k != -1 and k < j):
1884 after = document.body[i][j+1:]
1885 if after.startswith(u"\u200B"):
1886 document.body[i] = document.body[i][:j+1] + after[1:]
1888 if len(after) == 0 and document.body[i+1].startswith(u"\u200B"):
1889 document.body[i+1] = document.body[i+1][1:]
1895 def revert_dashligatures(document):
1896 " Remove font ligature settings for en- and em-dashes. "
1897 i = find_token(document.header, "\\use_dash_ligatures", 0)
1900 use_dash_ligatures = get_bool_value(document.header, "\\use_dash_ligatures", i)
1901 del document.header[i]
1902 use_non_tex_fonts = False
1903 i = find_token(document.header, "\\use_non_tex_fonts", 0)
1905 use_non_tex_fonts = get_bool_value(document.header, "\\use_non_tex_fonts", i)
1906 if not use_dash_ligatures or use_non_tex_fonts:
1909 # Add a zero-length space (U+200B) after en- and em-dashes
1911 while i < len(document.body):
1912 words = document.body[i].split()
1913 # Skip some document parts where dashes are not converted
1914 if len(words) > 1 and words[0] == "\\begin_inset" and \
1915 words[1] in ["CommandInset", "ERT", "External", "Formula", \
1916 "FormulaMacro", "Graphics", "IPA", "listings"]:
1917 j = find_end_of_inset(document.body, i)
1919 document.warning("Malformed LyX document: Can't find end of " \
1920 + words[1] + " inset at line " + str(i))
1925 if len(words) > 0 and words[0] in ["\\leftindent", \
1926 "\\paragraph_spacing", "\\align", "\\labelwidthstring"]:
1932 j = document.body[i].find(u"\u2013", start) # en-dash
1933 k = document.body[i].find(u"\u2014", start) # em-dash
1934 if j == -1 and k == -1:
1936 if j == -1 or (k != -1 and k < j):
1938 after = document.body[i][j+1:]
1939 document.body[i] = document.body[i][:j+1] + u"\u200B" + after
1948 supported_versions = ["2.3.0", "2.3"]
1950 [509, [convert_microtype]],
1951 [510, [convert_dateinset]],
1952 [511, [convert_ibranches]],
1953 [512, [convert_beamer_article_styles]],
1957 [516, [convert_inputenc]],
1959 [518, [convert_iopart]],
1960 [519, [convert_quotestyle]],
1962 [521, [convert_frenchquotes]],
1973 [532, [convert_literalparam]],
1976 [535, [convert_dashligatures]]
1980 [534, [revert_dashligatures]],
1981 [533, [revert_chapterbib]],
1982 [532, [revert_multibib]],
1983 [531, [revert_literalparam]],
1984 [530, [revert_qualicites]],
1985 [529, [revert_bibpackopts]],
1986 [528, [revert_citekeyonly]],
1987 [527, [revert_biblatex]],
1988 [526, [revert_noprefix]],
1989 [525, [revert_plural_refs]],
1990 [524, [revert_labelonly]],
1991 [523, [revert_crimson, revert_cochinealmath]],
1992 [522, [revert_cjkquotes]],
1993 [521, [revert_dynamicquotes]],
1994 [520, [revert_britishquotes, revert_swedishgquotes, revert_frenchquotes, revert_frenchinquotes, revert_russianquotes, revert_swissquotes]],
1995 [519, [revert_plainquote]],
1996 [518, [revert_quotestyle]],
1997 [517, [revert_iopart]],
1998 [516, [revert_quotes]],
2000 [514, [revert_urdu, revert_syriac]],
2001 [513, [revert_amharic, revert_asturian, revert_kannada, revert_khmer]],
2002 [512, [revert_bosnian, revert_friulan, revert_macedonian, revert_piedmontese, revert_romansh]],
2003 [511, [revert_beamer_article_styles]],
2004 [510, [revert_ibranches]],
2006 [508, [revert_microtype]]
2010 if __name__ == "__main__":