1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # -*- coding: utf-8 -*-
4 # Copyright (C) 2016 The LyX team
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 """ Convert files to the file format generated by lyx 2.3"""
26 # Uncomment only what you need to import, please.
28 from parser_tools import find_end_of, find_token_backwards, find_end_of_layout, \
29 find_token, find_end_of_inset, get_value, get_bool_value, \
30 get_containing_layout, get_quoted_value, del_token
31 # find_tokens, find_token_exact, is_in_inset, \
32 # check_token, get_option_value
34 from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert
35 # get_ert, lyx2latex, \
36 # lyx2verbatim, length_in_bp, convert_info_insets
37 # insert_to_preamble, latex_length, revert_flex_inset, \
38 # revert_font_attrs, hex2ratio, str2bool
40 ####################################################################
41 # Private helper functions
45 ###############################################################################
47 ### Conversion and reversion routines
49 ###############################################################################
51 def convert_microtype(document):
52 " Add microtype settings. "
53 i = find_token(document.header, "\\font_tt_scale" , 0)
55 document.warning("Malformed LyX document: Can't find \\font_tt_scale.")
56 i = len(document.header) - 1
58 j = find_token(document.preamble, "\\usepackage{microtype}", 0)
60 document.header.insert(i + 1, "\\use_microtype false")
62 document.header.insert(i + 1, "\\use_microtype true")
63 del document.preamble[j]
66 def revert_microtype(document):
67 " Remove microtype settings. "
68 i = find_token(document.header, "\\use_microtype", 0)
71 use_microtype = get_bool_value(document.header, "\\use_microtype" , i)
72 del document.header[i]
74 add_to_preamble(document, ["\\usepackage{microtype}"])
77 def convert_dateinset(document):
78 ' Convert date external inset to ERT '
81 i = find_token(document.body, "\\begin_inset External", i)
84 j = find_end_of_inset(document.body, i)
86 document.warning("Malformed lyx document: Missing '\\end_inset' in convert_dateinset.")
89 if get_value(document.body, 'template', i, j) == "Date":
90 document.body[i : j + 1] = put_cmd_in_ert("\\today ")
95 def convert_inputenc(document):
96 " Replace no longer supported input encoding settings. "
97 i = find_token(document.header, "\\inputenc", 0)
100 if get_value(document.header, "\\inputencoding", i) == "pt254":
101 document.header[i] = "\\inputencoding pt154"
104 def convert_ibranches(document):
105 ' Add "inverted 0" to branch insets'
108 i = find_token(document.body, "\\begin_inset Branch", i)
111 document.body.insert(i + 1, "inverted 0")
115 def revert_ibranches(document):
116 ' Convert inverted branches to explicit anti-branches'
117 # Get list of branches
121 i = find_token(document.header, "\\branch", i)
124 branch = document.header[i][8:].strip()
125 if document.header[i+1].startswith("\\selected "):
126 #document.warning(document.header[i+1])
127 #document.warning(document.header[i+1][10])
128 selected = int(document.header[i+1][10])
130 document.warning("Malformed LyX document: No selection indicator for branch " + branch)
133 # the value tells us whether the branch is selected
134 ourbranches[document.header[i][8:].strip()] = selected
137 # Figure out what inverted branches, if any, have been used
138 # and convert them to "Anti-OldBranch"
142 i = find_token(document.body, "\\begin_inset Branch", i)
145 if not document.body[i+1].startswith("inverted "):
146 document.warning("Malformed LyX document: Missing 'inverted' tag!")
149 inverted = document.body[i+1][9]
150 #document.warning(document.body[i+1])
153 branch = document.body[i][20:].strip()
154 #document.warning(branch)
155 if not branch in ibranches:
156 antibranch = "Anti-" + branch
157 while antibranch in ibranches:
158 antibranch = "x" + antibranch
159 ibranches[branch] = antibranch
161 antibranch = ibranches[branch]
162 #document.warning(antibranch)
163 document.body[i] = "\\begin_inset Branch " + antibranch
165 # remove "inverted" key
166 del document.body[i+1]
169 # now we need to add the new branches to the header
170 for old, new in ibranches.iteritems():
171 i = find_token(document.header, "\\branch " + old, 0)
173 document.warning("Can't find branch %s even though we found it before!" % (old))
175 j = find_token(document.header, "\\end_branch", i)
177 document.warning("Malformed LyX document! Can't find end of branch " + old)
179 # ourbranches[old] - 1 inverts the selection status of the old branch
180 lines = ["\\branch " + new,
181 "\\selected " + str(ourbranches[old] - 1)]
182 # these are the old lines telling us color, etc.
183 lines += document.header[i+2 : j+1]
184 document.header[i:i] = lines
187 def revert_beamer_article_styles(document):
188 " Include (scr)article styles in beamer article "
190 beamer_articles = ["article-beamer", "scrarticle-beamer"]
191 if document.textclass not in beamer_articles:
194 inclusion = "article.layout"
195 if document.textclass == "scrarticle-beamer":
196 inclusion = "scrartcl.layout"
198 i = find_token(document.header, "\\begin_local_layout", 0)
200 k = find_token(document.header, "\\language", 0)
202 # this should not happen
203 document.warning("Malformed LyX document! No \\language header found!")
205 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
208 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
210 # this should not happen
211 document.warning("Malformed LyX document: Can't find end of local layout!")
214 document.header[i+1 : i+1] = [
215 "### Inserted by lyx2lyx (more [scr]article styles) ###",
216 "Input " + inclusion,
217 "Input beamer.layout",
218 "Provides geometry 0",
219 "Provides hyperref 0",
228 " \\usepackage{beamerarticle,pgf}",
229 " % this default might be overridden by plain title style",
230 " \\newcommand\makebeamertitle{\\frame{\\maketitle}}%",
231 " \\AtBeginDocument{",
232 " \\let\\origtableofcontents=\\tableofcontents",
233 " \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
234 " \\def\\gobbletableofcontents#1{\\origtableofcontents}",
237 "### End of insertion by lyx2lyx (more [scr]article styles) ###"
241 def convert_beamer_article_styles(document):
242 " Remove included (scr)article styles in beamer article "
244 beamer_articles = ["article-beamer", "scrarticle-beamer"]
245 if document.textclass not in beamer_articles:
248 i = find_token(document.header, "\\begin_local_layout", 0)
252 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
254 # this should not happen
255 document.warning("Malformed LyX document: Can't find end of local layout!")
258 k = find_token(document.header, "### Inserted by lyx2lyx (more [scr]article styles) ###", i, j)
260 l = find_token(document.header, "### End of insertion by lyx2lyx (more [scr]article styles) ###", i, j)
262 # this should not happen
263 document.warning("End of lyx2lyx local layout insertion not found!")
266 if k == i + 1 and l == j - 1:
267 # that was all the local layout there was
268 document.header[i : j + 1] = []
270 document.header[k : l + 1] = []
273 def revert_bosnian(document):
274 "Set the document language to English but assure Bosnian output"
276 if document.language == "bosnian":
277 document.language = "english"
278 i = find_token(document.header, "\\language bosnian", 0)
280 document.header[i] = "\\language english"
281 j = find_token(document.header, "\\language_package default", 0)
283 document.header[j] = "\\language_package babel"
284 k = find_token(document.header, "\\options", 0)
286 document.header[k] = document.header[k].replace("\\options", "\\options bosnian,")
288 l = find_token(document.header, "\\use_default_options", 0)
289 document.header.insert(l + 1, "\\options bosnian")
292 def revert_friulan(document):
293 "Set the document language to English but assure Friulan output"
295 if document.language == "friulan":
296 document.language = "english"
297 i = find_token(document.header, "\\language friulan", 0)
299 document.header[i] = "\\language english"
300 j = find_token(document.header, "\\language_package default", 0)
302 document.header[j] = "\\language_package babel"
303 k = find_token(document.header, "\\options", 0)
305 document.header[k] = document.header[k].replace("\\options", "\\options friulan,")
307 l = find_token(document.header, "\\use_default_options", 0)
308 document.header.insert(l + 1, "\\options friulan")
311 def revert_macedonian(document):
312 "Set the document language to English but assure Macedonian output"
314 if document.language == "macedonian":
315 document.language = "english"
316 i = find_token(document.header, "\\language macedonian", 0)
318 document.header[i] = "\\language english"
319 j = find_token(document.header, "\\language_package default", 0)
321 document.header[j] = "\\language_package babel"
322 k = find_token(document.header, "\\options", 0)
324 document.header[k] = document.header[k].replace("\\options", "\\options macedonian,")
326 l = find_token(document.header, "\\use_default_options", 0)
327 document.header.insert(l + 1, "\\options macedonian")
330 def revert_piedmontese(document):
331 "Set the document language to English but assure Piedmontese output"
333 if document.language == "piedmontese":
334 document.language = "english"
335 i = find_token(document.header, "\\language piedmontese", 0)
337 document.header[i] = "\\language english"
338 j = find_token(document.header, "\\language_package default", 0)
340 document.header[j] = "\\language_package babel"
341 k = find_token(document.header, "\\options", 0)
343 document.header[k] = document.header[k].replace("\\options", "\\options piedmontese,")
345 l = find_token(document.header, "\\use_default_options", 0)
346 document.header.insert(l + 1, "\\options piedmontese")
349 def revert_romansh(document):
350 "Set the document language to English but assure Romansh output"
352 if document.language == "romansh":
353 document.language = "english"
354 i = find_token(document.header, "\\language romansh", 0)
356 document.header[i] = "\\language english"
357 j = find_token(document.header, "\\language_package default", 0)
359 document.header[j] = "\\language_package babel"
360 k = find_token(document.header, "\\options", 0)
362 document.header[k] = document.header[k].replace("\\options", "\\options romansh,")
364 l = find_token(document.header, "\\use_default_options", 0)
365 document.header.insert(l + 1, "\\options romansh")
368 def revert_amharic(document):
369 "Set the document language to English but assure Amharic output"
371 if document.language == "amharic":
372 document.language = "english"
373 i = find_token(document.header, "\\language amharic", 0)
375 document.header[i] = "\\language english"
376 j = find_token(document.header, "\\language_package default", 0)
378 document.header[j] = "\\language_package default"
379 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{amharic}}"])
380 document.body[2 : 2] = ["\\begin_layout Standard",
381 "\\begin_inset ERT", "status open", "",
382 "\\begin_layout Plain Layout", "", "",
384 "resetdefaultlanguage{amharic}",
385 "\\end_layout", "", "\\end_inset", "", "",
389 def revert_asturian(document):
390 "Set the document language to English but assure Asturian output"
392 if document.language == "asturian":
393 document.language = "english"
394 i = find_token(document.header, "\\language asturian", 0)
396 document.header[i] = "\\language english"
397 j = find_token(document.header, "\\language_package default", 0)
399 document.header[j] = "\\language_package default"
400 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{asturian}}"])
401 document.body[2 : 2] = ["\\begin_layout Standard",
402 "\\begin_inset ERT", "status open", "",
403 "\\begin_layout Plain Layout", "", "",
405 "resetdefaultlanguage{asturian}",
406 "\\end_layout", "", "\\end_inset", "", "",
410 def revert_kannada(document):
411 "Set the document language to English but assure Kannada output"
413 if document.language == "kannada":
414 document.language = "english"
415 i = find_token(document.header, "\\language kannada", 0)
417 document.header[i] = "\\language english"
418 j = find_token(document.header, "\\language_package default", 0)
420 document.header[j] = "\\language_package default"
421 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{kannada}}"])
422 document.body[2 : 2] = ["\\begin_layout Standard",
423 "\\begin_inset ERT", "status open", "",
424 "\\begin_layout Plain Layout", "", "",
426 "resetdefaultlanguage{kannada}",
427 "\\end_layout", "", "\\end_inset", "", "",
431 def revert_khmer(document):
432 "Set the document language to English but assure Khmer output"
434 if document.language == "khmer":
435 document.language = "english"
436 i = find_token(document.header, "\\language khmer", 0)
438 document.header[i] = "\\language english"
439 j = find_token(document.header, "\\language_package default", 0)
441 document.header[j] = "\\language_package default"
442 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{khmer}}"])
443 document.body[2 : 2] = ["\\begin_layout Standard",
444 "\\begin_inset ERT", "status open", "",
445 "\\begin_layout Plain Layout", "", "",
447 "resetdefaultlanguage{khmer}",
448 "\\end_layout", "", "\\end_inset", "", "",
452 def revert_urdu(document):
453 "Set the document language to English but assure Urdu output"
455 if document.language == "urdu":
456 document.language = "english"
457 i = find_token(document.header, "\\language urdu", 0)
459 document.header[i] = "\\language english"
460 j = find_token(document.header, "\\language_package default", 0)
462 document.header[j] = "\\language_package default"
463 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{urdu}}"])
464 document.body[2 : 2] = ["\\begin_layout Standard",
465 "\\begin_inset ERT", "status open", "",
466 "\\begin_layout Plain Layout", "", "",
468 "resetdefaultlanguage{urdu}",
469 "\\end_layout", "", "\\end_inset", "", "",
473 def revert_syriac(document):
474 "Set the document language to English but assure Syriac output"
476 if document.language == "syriac":
477 document.language = "english"
478 i = find_token(document.header, "\\language syriac", 0)
480 document.header[i] = "\\language english"
481 j = find_token(document.header, "\\language_package default", 0)
483 document.header[j] = "\\language_package default"
484 add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{syriac}}"])
485 document.body[2 : 2] = ["\\begin_layout Standard",
486 "\\begin_inset ERT", "status open", "",
487 "\\begin_layout Plain Layout", "", "",
489 "resetdefaultlanguage{syriac}",
490 "\\end_layout", "", "\\end_inset", "", "",
494 def revert_quotes(document):
495 " Revert Quote Insets in verbatim or Hebrew context to plain quotes "
497 # First handle verbatim insets
500 while i < len(document.body):
501 words = document.body[i].split()
502 if len(words) > 1 and words[0] == "\\begin_inset" and \
503 ( words[1] in ["ERT", "listings"] or ( len(words) > 2 and words[2] in ["URL", "Chunk", "Sweave", "S/R"]) ):
504 j = find_end_of_inset(document.body, i)
506 document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
510 k = find_token(document.body, '\\begin_inset Quotes', i, j)
514 l = find_end_of_inset(document.body, k)
516 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
520 if document.body[k].endswith("s"):
522 document.body[k:l+1] = [replace]
527 # Now verbatim layouts
530 while i < len(document.body):
531 words = document.body[i].split()
532 if len(words) > 1 and words[0] == "\\begin_layout" and \
533 words[1] in ["Verbatim", "Verbatim*", "Code", "Author_Email", "Author_URL"]:
534 j = find_end_of_layout(document.body, i)
536 document.warning("Malformed LyX document: Can't find end of " + words[1] + " layout at line " + str(i))
540 k = find_token(document.body, '\\begin_inset Quotes', i, j)
544 l = find_end_of_inset(document.body, k)
546 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
550 if document.body[k].endswith("s"):
552 document.body[k:l+1] = [replace]
558 if not document.language == "hebrew" and find_token(document.body, '\\lang hebrew', 0) == -1:
564 k = find_token(document.body, '\\begin_inset Quotes', i)
567 l = find_end_of_inset(document.body, k)
569 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
573 parent = get_containing_layout(document.body, k)
574 ql = find_token_backwards(document.body, "\\lang", k)
575 if ql == -1 or ql < parent[1]:
576 hebrew = document.language == "hebrew"
577 elif document.body[ql] == "\\lang hebrew":
581 if document.body[k].endswith("s"):
583 document.body[k:l+1] = [replace]
587 def revert_iopart(document):
588 " Input new styles via local layout "
589 if document.textclass != "iopart":
592 i = find_token(document.header, "\\begin_local_layout", 0)
594 k = find_token(document.header, "\\language", 0)
596 # this should not happen
597 document.warning("Malformed LyX document! No \\language header found!")
599 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
602 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
604 # this should not happen
605 document.warning("Malformed LyX document! Can't find end of local layout!")
608 document.header[i+1 : i+1] = [
609 "### Inserted by lyx2lyx (stdlayouts) ###",
610 "Input stdlayouts.inc",
611 "### End of insertion by lyx2lyx (stdlayouts) ###"
615 def convert_iopart(document):
616 " Remove local layout we added, if it is there "
617 if document.textclass != "iopart":
620 i = find_token(document.header, "\\begin_local_layout", 0)
624 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
626 # this should not happen
627 document.warning("Malformed LyX document! Can't find end of local layout!")
630 k = find_token(document.header, "### Inserted by lyx2lyx (stdlayouts) ###", i, j)
632 l = find_token(document.header, "### End of insertion by lyx2lyx (stdlayouts) ###", i, j)
634 # this should not happen
635 document.warning("End of lyx2lyx local layout insertion not found!")
637 if k == i + 1 and l == j - 1:
638 # that was all the local layout there was
639 document.header[i : j + 1] = []
641 document.header[k : l + 1] = []
644 def convert_quotestyle(document):
645 " Convert \\quotes_language to \\quotes_style "
646 i = find_token(document.header, "\\quotes_language", 0)
648 document.warning("Malformed LyX document! Can't find \\quotes_language!")
650 val = get_value(document.header, "\\quotes_language", i)
651 document.header[i] = "\\quotes_style " + val
654 def revert_quotestyle(document):
655 " Revert \\quotes_style to \\quotes_language "
656 i = find_token(document.header, "\\quotes_style", 0)
658 document.warning("Malformed LyX document! Can't find \\quotes_style!")
660 val = get_value(document.header, "\\quotes_style", i)
661 document.header[i] = "\\quotes_language " + val
664 def revert_plainquote(document):
665 " Revert plain quote insets "
667 # First, revert style setting
668 i = find_token(document.header, "\\quotes_style plain", 0)
670 document.header[i] = "\\quotes_style english"
676 k = find_token(document.body, '\\begin_inset Quotes q', i)
679 l = find_end_of_inset(document.body, k)
681 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
685 if document.body[k].endswith("s"):
687 document.body[k:l+1] = [replace]
691 def convert_frenchquotes(document):
692 " Convert french quote insets to swiss "
694 # First, revert style setting
695 i = find_token(document.header, "\\quotes_style french", 0)
697 document.header[i] = "\\quotes_style swiss"
702 i = find_token(document.body, '\\begin_inset Quotes f', i)
705 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
706 newval = val.replace("f", "c", 1)
707 document.body[i] = document.body[i].replace(val, newval)
711 def revert_swissquotes(document):
712 " Revert swiss quote insets to french "
714 # First, revert style setting
715 i = find_token(document.header, "\\quotes_style swiss", 0)
717 document.header[i] = "\\quotes_style french"
722 i = find_token(document.body, '\\begin_inset Quotes c', i)
725 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
726 newval = val.replace("c", "f", 1)
727 document.body[i] = document.body[i].replace(val, newval)
731 def revert_britishquotes(document):
732 " Revert british quote insets to english "
734 # First, revert style setting
735 i = find_token(document.header, "\\quotes_style british", 0)
737 document.header[i] = "\\quotes_style english"
742 i = find_token(document.body, '\\begin_inset Quotes b', i)
745 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
746 newval = val.replace("b", "e", 1)
749 newval = newval.replace("d", "s")
752 newval = newval.replace("s", "d")
753 document.body[i] = document.body[i].replace(val, newval)
757 def revert_swedishgquotes(document):
758 " Revert swedish quote insets "
760 # First, revert style setting
761 i = find_token(document.header, "\\quotes_style swedishg", 0)
763 document.header[i] = "\\quotes_style danish"
768 i = find_token(document.body, '\\begin_inset Quotes w', i)
771 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
774 newval = val.replace("w", "a", 1).replace("r", "l")
777 newval = val.replace("w", "s", 1)
778 document.body[i] = document.body[i].replace(val, newval)
782 def revert_frenchquotes(document):
783 " Revert french inner quote insets "
787 i = find_token(document.body, '\\begin_inset Quotes f', i)
790 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
793 newval = val.replace("f", "e", 1).replace("s", "d")
794 document.body[i] = document.body[i].replace(val, newval)
798 def revert_frenchinquotes(document):
799 " Revert inner frenchin quote insets "
801 # First, revert style setting
802 i = find_token(document.header, "\\quotes_style frenchin", 0)
804 document.header[i] = "\\quotes_style french"
809 i = find_token(document.body, '\\begin_inset Quotes i', i)
812 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
813 newval = val.replace("i", "f", 1)
816 newval = newval.replace("s", "d")
817 document.body[i] = document.body[i].replace(val, newval)
821 def revert_russianquotes(document):
822 " Revert russian quote insets "
824 # First, revert style setting
825 i = find_token(document.header, "\\quotes_style russian", 0)
827 document.header[i] = "\\quotes_style french"
832 i = find_token(document.body, '\\begin_inset Quotes r', i)
835 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
839 newval = val.replace("r", "g", 1).replace("s", "d")
842 newval = val.replace("r", "f", 1)
843 document.body[i] = document.body[i].replace(val, newval)
847 def revert_dynamicquotes(document):
848 " Revert dynamic quote insets "
850 # First, revert header
851 i = find_token(document.header, "\\dynamic_quotes", 0)
853 del document.header[i]
857 i = find_token(document.header, "\\quotes_style", 0)
859 document.warning("Malformed document! Missing \\quotes_style")
861 style = get_value(document.header, "\\quotes_style", i)
864 if style == "english":
866 elif style == "swedish":
868 elif style == "german":
870 elif style == "polish":
872 elif style == "swiss":
874 elif style == "danish":
876 elif style == "plain":
878 elif style == "british":
880 elif style == "swedishg":
882 elif style == "french":
884 elif style == "frenchin":
886 elif style == "russian":
889 # now transform the insets
892 i = find_token(document.body, '\\begin_inset Quotes x', i)
895 document.body[i] = document.body[i].replace("x", s)
899 def revert_cjkquotes(document):
900 " Revert cjk quote insets "
904 i = find_token(document.header, "\\quotes_style", 0)
906 document.warning("Malformed document! Missing \\quotes_style")
908 style = get_value(document.header, "\\quotes_style", i)
910 global_cjk = style.find("cjk") != -1
913 document.header[i] = "\\quotes_style english"
914 # transform dynamic insets
916 if style == "cjkangle":
920 i = find_token(document.body, '\\begin_inset Quotes x', i)
923 document.body[i] = document.body[i].replace("x", s)
926 cjk_langs = ["chinese-simplified", "chinese-traditional", "japanese", "japanese-cjk", "korean"]
931 k = find_token(document.body, '\\begin_inset Quotes j', i)
934 l = find_end_of_inset(document.body, k)
936 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
940 parent = get_containing_layout(document.body, k)
941 ql = find_token_backwards(document.body, "\\lang", k)
942 if ql == -1 or ql < parent[1]:
943 cjk = document.language in cjk_langs
944 elif document.body[ql].split()[1] in cjk_langs:
946 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
953 replace = [u"\u300E"]
955 replace = ["\\begin_inset Formula $\\llceil$", "\\end_inset"]
959 replace = [u"\u300F"]
961 replace = ["\\begin_inset Formula $\\rrfloor$", "\\end_inset"]
967 replace = [u"\u300C"]
969 replace = ["\\begin_inset Formula $\\lceil$", "\\end_inset"]
973 replace = [u"\u300D"]
975 replace = ["\\begin_inset Formula $\\rfloor$", "\\end_inset"]
977 document.body[k:l+1] = replace
983 k = find_token(document.body, '\\begin_inset Quotes k', i)
986 l = find_end_of_inset(document.body, k)
988 document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
992 parent = get_containing_layout(document.body, k)
993 ql = find_token_backwards(document.body, "\\lang", k)
994 if ql == -1 or ql < parent[1]:
995 cjk = document.language in cjk_langs
996 elif document.body[ql].split()[1] in cjk_langs:
998 val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
1003 # inner opening mark
1005 replace = [u"\u3008"]
1007 replace = ["\\begin_inset Formula $\\langle$", "\\end_inset"]
1009 # inner closing mark
1011 replace = [u"\u3009"]
1013 replace = ["\\begin_inset Formula $\\rangle$", "\\end_inset"]
1017 # outer opening mark
1019 replace = [u"\u300A"]
1021 replace = ["\\begin_inset Formula $\\langle\\kern -2.5pt\\langle$", "\\end_inset"]
1023 # outer closing mark
1025 replace = [u"\u300B"]
1027 replace = ["\\begin_inset Formula $\\rangle\\kern -2.5pt\\rangle$", "\\end_inset"]
1029 document.body[k:l+1] = replace
1033 def revert_crimson(document):
1034 " Revert native Cochineal/Crimson font definition to LaTeX "
1036 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1038 i = find_token(document.header, "\\font_roman \"cochineal\"", 0)
1041 j = find_token(document.header, "\\font_osf true", 0)
1044 preamble = "\\usepackage"
1046 document.header[j] = "\\font_osf false"
1047 preamble += "[proportional,osf]"
1048 preamble += "{cochineal}"
1049 add_to_preamble(document, [preamble])
1050 document.header[i] = document.header[i].replace("cochineal", "default")
1053 def revert_cochinealmath(document):
1054 " Revert cochineal newtxmath definitions to LaTeX "
1056 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
1057 i = find_token(document.header, "\\font_math \"cochineal-ntxm\"", 0)
1059 add_to_preamble(document, "\\usepackage[cochineal]{newtxmath}")
1060 document.header[i] = document.header[i].replace("cochineal-ntxm", "auto")
1063 def revert_labelonly(document):
1064 " Revert labelonly tag for InsetRef "
1067 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1070 j = find_end_of_inset(document.body, i)
1072 document.warning("Can't find end of reference inset at line %d!!" %(i))
1075 k = find_token(document.body, "LatexCommand labelonly", i, j)
1079 label = get_quoted_value(document.body, "reference", i, j)
1081 document.warning("Can't find label for reference at line %d!" %(i))
1084 document.body[i:j+1] = put_cmd_in_ert([label])
1088 def revert_plural_refs(document):
1089 " Revert plural and capitalized references "
1090 i = find_token(document.header, "\\use_refstyle 1", 0)
1091 use_refstyle = (i != 0)
1095 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1098 j = find_end_of_inset(document.body, i)
1100 document.warning("Can't find end of reference inset at line %d!!" %(i))
1104 plural = caps = suffix = False
1105 k = find_token(document.body, "LaTeXCommand formatted", i, j)
1106 if k != -1 and use_refstyle:
1107 plural = get_bool_value(document.body, "plural", i, j, False)
1108 caps = get_bool_value(document.body, "caps", i, j, False)
1109 label = get_quoted_value(document.body, "reference", i, j)
1112 (prefix, suffix) = label.split(":", 1)
1114 document.warning("No `:' separator in formatted reference at line %d!" % (i))
1116 document.warning("Can't find label for reference at line %d!" % (i))
1118 # this effectively tests also for use_refstyle and a formatted reference
1119 # we do this complicated test because we would otherwise do this erasure
1120 # over and over and over
1121 if not ((plural or caps) and suffix):
1122 del_token(document.body, "plural", i, j)
1123 del_token(document.body, "caps", i, j - 1) # since we deleted a line
1128 prefix = prefix[0].title() + prefix[1:]
1129 cmd = "\\" + prefix + "ref"
1132 cmd += "{" + suffix + "}"
1133 document.body[i:j+1] = put_cmd_in_ert([cmd])
1137 def revert_noprefix(document):
1138 " Revert labelonly tags with 'noprefix' set "
1141 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
1144 j = find_end_of_inset(document.body, i)
1146 document.warning("Can't find end of reference inset at line %d!!" %(i))
1149 k = find_token(document.body, "LatexCommand labelonly", i, j)
1153 noprefix = get_bool_value(document.body, "noprefix", i, j)
1155 del_token(document.body, "noprefix", i, j)
1158 label = get_quoted_value(document.body, "reference", i, j)
1160 document.warning("Can't find label for reference at line %d!" %(i))
1164 (prefix, suffix) = label.split(":", 1)
1166 document.warning("No `:' separator in formatted reference at line %d!" % (i))
1167 # we'll leave this as an ordinary labelonly reference
1168 del_token(document.body, "noprefix", i, j)
1171 document.body[i:j+1] = put_cmd_in_ert([suffix])
1175 def revert_biblatex(document):
1176 " Revert biblatex support "
1182 # 1. Get cite engine
1184 i = find_token(document.header, "\\cite_engine", 0)
1186 document.warning("Malformed document! Missing \\cite_engine")
1188 engine = get_value(document.header, "\\cite_engine", i)
1190 # 2. Store biblatex state and revert to natbib
1192 if engine in ["biblatex", "biblatex-natbib"]:
1194 document.header[i] = "\\cite_engine natbib"
1196 # 3. Store and remove new document headers
1198 i = find_token(document.header, "\\biblatex_bibstyle", 0)
1200 bibstyle = get_value(document.header, "\\biblatex_bibstyle", i)
1201 del document.header[i]
1204 i = find_token(document.header, "\\biblatex_citestyle", 0)
1206 citestyle = get_value(document.header, "\\biblatex_citestyle", i)
1207 del document.header[i]
1210 i = find_token(document.header, "\\biblio_options", 0)
1212 biblio_options = get_value(document.header, "\\biblio_options", i)
1213 del document.header[i]
1216 bbxopts = "[natbib=true"
1218 bbxopts += ",bibstyle=" + bibstyle
1220 bbxopts += ",citestyle=" + citestyle
1221 if biblio_options != "":
1222 bbxopts += "," + biblio_options
1224 add_to_preamble(document, "\\usepackage" + bbxopts + "{biblatex}")
1234 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
1237 j = find_end_of_inset(document.body, i)
1239 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1242 bibs = get_quoted_value(document.body, "bibfiles", i, j)
1243 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1246 bibresources += bibs.split(",")
1248 document.warning("Can't find bibfiles for bibtex inset at line %d!" %(i))
1249 # remove biblatexopts line
1250 k = find_token(document.body, "biblatexopts", i, j)
1252 del document.body[k]
1253 # Re-find inset end line
1254 j = find_end_of_inset(document.body, i)
1255 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1257 pcmd = "printbibliography"
1259 pcmd += "[" + opts + "]"
1260 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1261 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1262 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1263 "status open", "", "\\begin_layout Plain Layout" ]
1264 repl += document.body[i:j+1]
1265 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1266 document.body[i:j+1] = repl
1272 for b in bibresources:
1273 add_to_preamble(document, "\\addbibresource{" + b + ".bib}")
1275 # 2. Citation insets
1277 # Specific citation insets used in biblatex that need to be reverted to ERT
1280 "citebyear" : "citeyear",
1281 "citeyear" : "cite*",
1282 "Footcite" : "Smartcite",
1283 "footcite" : "smartcite",
1284 "Autocite" : "Autocite",
1285 "autocite" : "autocite",
1286 "citetitle" : "citetitle",
1287 "citetitle*" : "citetitle*",
1288 "fullcite" : "fullcite",
1289 "footfullcite" : "footfullcite",
1290 "supercite" : "supercite",
1291 "citeauthor" : "citeauthor",
1292 "citeauthor*" : "citeauthor*",
1293 "Citeauthor" : "Citeauthor",
1294 "Citeauthor*" : "Citeauthor*"
1297 # All commands accepted by LyX < 2.3. Everything else throws an error.
1298 old_citations = [ "cite", "nocite", "citet", "citep", "citealt", "citealp",\
1299 "citeauthor", "citeyear", "citeyearpar", "citet*", "citep*",\
1300 "citealt*", "citealp*", "citeauthor*", "Citet", "Citep",\
1301 "Citealt", "Citealp", "Citeauthor", "Citet*", "Citep*",\
1302 "Citealt*", "Citealp*", "Citeauthor*", "fullcite", "footcite",\
1303 "footcitet", "footcitep", "footcitealt", "footcitealp",\
1304 "footciteauthor", "footciteyear", "footciteyearpar",\
1305 "citefield", "citetitle", "cite*" ]
1309 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
1312 j = find_end_of_inset(document.body, i)
1314 document.warning("Can't find end of citation inset at line %d!!" %(i))
1317 k = find_token(document.body, "LatexCommand", i, j)
1319 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
1322 cmd = get_value(document.body, "LatexCommand", k)
1323 if biblatex and cmd in list(new_citations.keys()):
1324 pre = get_quoted_value(document.body, "before", i, j)
1325 post = get_quoted_value(document.body, "after", i, j)
1326 key = get_quoted_value(document.body, "key", i, j)
1328 document.warning("Citation inset at line %d does not have a key!" %(i))
1330 # Replace known new commands with ERT
1331 res = "\\" + new_citations[cmd]
1333 res += "[" + pre + "]"
1335 res += "[" + post + "]"
1338 res += "{" + key + "}"
1339 document.body[i:j+1] = put_cmd_in_ert([res])
1340 elif cmd not in old_citations:
1341 # Reset unknown commands to cite. This is what LyX does as well
1342 # (but LyX 2.2 would break on unknown commands)
1343 document.body[k] = "LatexCommand cite"
1344 document.warning("Reset unknown cite command '%s' with cite" % cmd)
1347 # Emulate the old biblatex-workaround (pretend natbib in order to use the styles)
1349 i = find_token(document.header, "\\begin_local_layout", 0)
1351 k = find_token(document.header, "\\language", 0)
1353 # this should not happen
1354 document.warning("Malformed LyX document! No \\language header found!")
1356 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
1359 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
1361 # this should not happen
1362 document.warning("Malformed LyX document! Can't find end of local layout!")
1365 document.header[i+1 : i+1] = [
1366 "### Inserted by lyx2lyx (biblatex emulation) ###",
1367 "Provides natbib 1",
1368 "### End of insertion by lyx2lyx (biblatex emulation) ###"
1372 def revert_citekeyonly(document):
1373 " Revert keyonly cite command to ERT "
1377 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
1380 j = find_end_of_inset(document.body, i)
1382 document.warning("Can't find end of citation inset at line %d!!" %(i))
1385 k = find_token(document.body, "LatexCommand", i, j)
1387 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
1390 cmd = get_value(document.body, "LatexCommand", k)
1391 if cmd != "keyonly":
1395 key = get_quoted_value(document.body, "key", i, j)
1397 document.warning("Citation inset at line %d does not have a key!" %(i))
1398 # Replace known new commands with ERT
1399 document.body[i:j+1] = put_cmd_in_ert([key])
1404 def revert_bibpackopts(document):
1405 " Revert support for natbib/jurabib package options "
1408 i = find_token(document.header, "\\cite_engine", 0)
1410 document.warning("Malformed document! Missing \\cite_engine")
1412 engine = get_value(document.header, "\\cite_engine", i)
1415 if engine not in ["natbib", "jurabib"]:
1418 i = find_token(document.header, "\\biblio_options", 0)
1420 # Nothing to do if we have no options
1423 biblio_options = get_value(document.header, "\\biblio_options", i)
1424 del document.header[i]
1426 if not biblio_options:
1427 # Nothing to do for empty options
1430 i = find_token(document.header, "\\begin_local_layout", 0)
1432 k = find_token(document.header, "\\language", 0)
1434 # this should not happen
1435 document.warning("Malformed LyX document! No \\language header found!")
1437 document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
1440 j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
1442 # this should not happen
1443 document.warning("Malformed LyX document! Can't find end of local layout!")
1446 document.header[i+1 : i+1] = [
1447 "### Inserted by lyx2lyx (bibliography package options) ###",
1448 "PackageOptions " + engine + " " + biblio_options,
1449 "### End of insertion by lyx2lyx (bibliography package options) ###"
1453 def revert_qualicites(document):
1454 " Revert qualified citation list commands to ERT "
1456 # Citation insets that support qualified lists, with their LaTeX code
1460 "citet" : "textcites",
1461 "Citet" : "Textcites",
1462 "citep" : "parencites",
1463 "Citep" : "Parencites",
1464 "Footcite" : "Smartcites",
1465 "footcite" : "smartcites",
1466 "Autocite" : "Autocites",
1467 "autocite" : "autocites",
1472 i = find_token(document.header, "\\cite_engine", 0)
1474 document.warning("Malformed document! Missing \\cite_engine")
1476 engine = get_value(document.header, "\\cite_engine", i)
1478 biblatex = engine in ["biblatex", "biblatex-natbib"]
1482 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
1485 j = find_end_of_inset(document.body, i)
1487 document.warning("Can't find end of citation inset at line %d!!" %(i))
1490 pres = find_token(document.body, "pretextlist", i, j)
1491 posts = find_token(document.body, "posttextlist", i, j)
1492 if pres == -1 and posts == -1:
1496 pretexts = get_quoted_value(document.body, "pretextlist", pres)
1497 posttexts = get_quoted_value(document.body, "posttextlist", posts)
1498 k = find_token(document.body, "LatexCommand", i, j)
1500 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
1503 cmd = get_value(document.body, "LatexCommand", k)
1504 if biblatex and cmd in list(ql_citations.keys()):
1505 pre = get_quoted_value(document.body, "before", i, j)
1506 post = get_quoted_value(document.body, "after", i, j)
1507 key = get_quoted_value(document.body, "key", i, j)
1509 document.warning("Citation inset at line %d does not have a key!" %(i))
1511 keys = key.split(",")
1512 prelist = pretexts.split("\t")
1515 ppp = pp.split(" ", 1)
1516 premap[ppp[0]] = ppp[1]
1517 postlist = posttexts.split("\t")
1520 ppp = pp.split(" ", 1)
1521 postmap[ppp[0]] = ppp[1]
1522 # Replace known new commands with ERT
1523 if "(" in pre or ")" in pre:
1524 pre = "{" + pre + "}"
1525 if "(" in post or ")" in post:
1526 post = "{" + post + "}"
1527 res = "\\" + ql_citations[cmd]
1529 res += "(" + pre + ")"
1531 res += "(" + post + ")"
1535 if premap.get(kk, "") != "":
1536 res += "[" + premap[kk] + "]"
1537 if postmap.get(kk, "") != "":
1538 res += "[" + postmap[kk] + "]"
1539 elif premap.get(kk, "") != "":
1541 res += "{" + kk + "}"
1542 document.body[i:j+1] = put_cmd_in_ert([res])
1544 # just remove the params
1545 del document.body[posttexts]
1546 del document.body[pretexts]
1550 command_insets = ["bibitem", "citation", "href", "index_print", "nomenclature"]
1551 def convert_literalparam(document):
1552 " Add param literal "
1554 # These already had some sort of latexify method
1555 latexified_insets = ["href", "index_print", "nomenclature"]
1557 for inset in command_insets:
1560 i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
1563 j = find_end_of_inset(document.body, i)
1565 document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
1568 while i < j and document.body[i].strip() != '':
1570 if inset in latexified_insets:
1571 document.body.insert(i, "literal \"false\"")
1573 document.body.insert(i, "literal \"true\"")
1577 def revert_literalparam(document):
1578 " Remove param literal "
1580 for inset in command_insets:
1583 i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
1586 j = find_end_of_inset(document.body, i)
1588 document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
1591 k = find_token(document.body, 'literal', i, j)
1595 del document.body[k]
1599 def revert_multibib(document):
1600 " Revert multibib support "
1602 # 1. Get cite engine
1604 i = find_token(document.header, "\\cite_engine", 0)
1606 document.warning("Malformed document! Missing \\cite_engine")
1608 engine = get_value(document.header, "\\cite_engine", i)
1610 # 2. Do we use biblatex?
1612 if engine in ["biblatex", "biblatex-natbib"]:
1615 # 3. Store and remove multibib document header
1617 i = find_token(document.header, "\\multibib", 0)
1619 multibib = get_value(document.header, "\\multibib", i)
1620 del document.header[i]
1625 # 4. The easy part: Biblatex
1627 i = find_token(document.header, "\\biblio_options", 0)
1629 k = find_token(document.header, "\\use_bibtopic", 0)
1631 # this should not happen
1632 document.warning("Malformed LyX document! No \\use_bibtopic header found!")
1634 document.header[k-1 : k-1] = ["\\biblio_options " + "refsection=" + multibib]
1636 biblio_options = get_value(document.header, "\\biblio_options", i)
1638 biblio_options += ","
1639 biblio_options += "refsection=" + multibib
1640 document.header[i] = "\\biblio_options " + biblio_options
1645 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
1648 j = find_end_of_inset(document.body, i)
1650 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1653 btprint = get_quoted_value(document.body, "btprint", i, j)
1654 if btprint != "bibbysection":
1657 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1658 # change btprint line
1659 k = find_token(document.body, "btprint", i, j)
1661 document.body[k] = "btprint \"btPrintCited\""
1662 # Insert ERT \\bibbysection and wrap bibtex inset to a Note
1663 pcmd = "bibbysection"
1665 pcmd += "[" + opts + "]"
1666 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1667 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1668 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1669 "status open", "", "\\begin_layout Plain Layout" ]
1670 repl += document.body[i:j+1]
1671 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1672 document.body[i:j+1] = repl
1678 # 5. More tricky: Bibtex/Bibtopic
1679 k = find_token(document.header, "\\use_bibtopic", 0)
1681 # this should not happen
1682 document.warning("Malformed LyX document! No \\use_bibtopic header found!")
1684 document.header[k] = "\\use_bibtopic true"
1686 # Possible units. This assumes that the LyX name follows the std,
1687 # which might not always be the case. But it's as good as we can get.
1690 "chapter" : "Chapter",
1691 "section" : "Section",
1692 "subsection" : "Subsection",
1695 if multibib not in units.keys():
1696 document.warning("Unknown multibib value `%s'!" % nultibib)
1698 unit = units[multibib]
1702 i = find_token(document.body, "\\begin_layout " + unit, i)
1706 document.body[i-1 : i-1] = ["\\begin_layout Standard",
1707 "\\begin_inset ERT", "status open", "",
1708 "\\begin_layout Plain Layout", "", "",
1710 "end{btUnit}", "\\end_layout",
1711 "\\begin_layout Plain Layout", "",
1714 "\\end_layout", "", "\\end_inset", "", "",
1718 document.body[i-1 : i-1] = ["\\begin_layout Standard",
1719 "\\begin_inset ERT", "status open", "",
1720 "\\begin_layout Plain Layout", "", "",
1723 "\\end_layout", "", "\\end_inset", "", "",
1730 i = find_token(document.body, "\\end_body", i)
1731 document.body[i-1 : i-1] = ["\\begin_layout Standard",
1732 "\\begin_inset ERT", "status open", "",
1733 "\\begin_layout Plain Layout", "", "",
1736 "\\end_layout", "", "\\end_inset", "", "",
1740 def revert_chapterbib(document):
1741 " Revert chapterbib support "
1743 # 1. Get cite engine
1745 i = find_token(document.header, "\\cite_engine", 0)
1747 document.warning("Malformed document! Missing \\cite_engine")
1749 engine = get_value(document.header, "\\cite_engine", i)
1751 # 2. Do we use biblatex?
1753 if engine in ["biblatex", "biblatex-natbib"]:
1756 # 3. Store multibib document header value
1758 i = find_token(document.header, "\\multibib", 0)
1760 multibib = get_value(document.header, "\\multibib", i)
1762 if not multibib or multibib != "child":
1766 # 4. remove multibib header
1767 del document.header[i]
1771 # find include insets
1774 i = find_token(document.body, "\\begin_inset CommandInset include", i)
1777 j = find_end_of_inset(document.body, i)
1779 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1782 parent = get_containing_layout(document.body, i)
1785 # Insert ERT \\newrefsection before inset
1786 beg = ["\\begin_layout Standard",
1787 "\\begin_inset ERT", "status open", "",
1788 "\\begin_layout Plain Layout", "", "",
1791 "\\end_layout", "", "\\end_inset", "", "",
1793 document.body[parbeg-1:parbeg-1] = beg
1798 # 6. Bibtex/Bibtopic
1799 i = find_token(document.header, "\\use_bibtopic", 0)
1801 # this should not happen
1802 document.warning("Malformed LyX document! No \\use_bibtopic header found!")
1804 if get_value(document.header, "\\use_bibtopic", i) == "true":
1805 # find include insets
1808 i = find_token(document.body, "\\begin_inset CommandInset include", i)
1811 j = find_end_of_inset(document.body, i)
1813 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1816 parent = get_containing_layout(document.body, i)
1820 # Insert wrap inset into \\begin{btUnit}...\\end{btUnit}
1821 beg = ["\\begin_layout Standard",
1822 "\\begin_inset ERT", "status open", "",
1823 "\\begin_layout Plain Layout", "", "",
1826 "\\end_layout", "", "\\end_inset", "", "",
1828 end = ["\\begin_layout Standard",
1829 "\\begin_inset ERT", "status open", "",
1830 "\\begin_layout Plain Layout", "", "",
1833 "\\end_layout", "", "\\end_inset", "", "",
1835 document.body[parend+1:parend+1] = end
1836 document.body[parbeg-1:parbeg-1] = beg
1837 j += len(beg) + len(end)
1841 # 7. Chapterbib proper
1842 add_to_preamble(document, ["\\usepackage{chapterbib}"])
1845 def convert_dashligatures(document):
1846 " Remove a zero-length space (U+200B) after en- and em-dashes. "
1848 i = find_token(document.header, "\\use_microtype", 0)
1850 if document.start > 474 and document.start < 509:
1851 # This was created by LyX 2.2
1852 document.header[i+1:i+1] = ["\\use_dash_ligatures false"]
1854 # This was created by LyX 2.1 or earlier
1855 document.header[i+1:i+1] = ["\\use_dash_ligatures true"]
1858 while i < len(document.body):
1859 words = document.body[i].split()
1860 # Skip some document parts where dashes are not converted
1861 if len(words) > 1 and words[0] == "\\begin_inset" and \
1862 words[1] in ["CommandInset", "ERT", "External", "Formula", \
1863 "FormulaMacro", "Graphics", "IPA", "listings"]:
1864 j = find_end_of_inset(document.body, i)
1866 document.warning("Malformed LyX document: Can't find end of " \
1867 + words[1] + " inset at line " + str(i))
1872 if len(words) > 0 and words[0] in ["\\leftindent", \
1873 "\\paragraph_spacing", "\\align", "\\labelwidthstring"]:
1879 j = document.body[i].find(u"\u2013", start) # en-dash
1880 k = document.body[i].find(u"\u2014", start) # em-dash
1881 if j == -1 and k == -1:
1883 if j == -1 or (k != -1 and k < j):
1885 after = document.body[i][j+1:]
1886 if after.startswith(u"\u200B"):
1887 document.body[i] = document.body[i][:j+1] + after[1:]
1889 if len(after) == 0 and document.body[i+1].startswith(u"\u200B"):
1890 document.body[i+1] = document.body[i+1][1:]
1896 def revert_dashligatures(document):
1897 " Remove font ligature settings for en- and em-dashes. "
1898 i = find_token(document.header, "\\use_dash_ligatures", 0)
1901 use_dash_ligatures = get_bool_value(document.header, "\\use_dash_ligatures", i)
1902 del document.header[i]
1903 use_non_tex_fonts = False
1904 i = find_token(document.header, "\\use_non_tex_fonts", 0)
1906 use_non_tex_fonts = get_bool_value(document.header, "\\use_non_tex_fonts", i)
1907 if not use_dash_ligatures or use_non_tex_fonts:
1910 # Add a zero-length space (U+200B) after en- and em-dashes
1912 while i < len(document.body):
1913 words = document.body[i].split()
1914 # Skip some document parts where dashes are not converted
1915 if len(words) > 1 and words[0] == "\\begin_inset" and \
1916 words[1] in ["CommandInset", "ERT", "External", "Formula", \
1917 "FormulaMacro", "Graphics", "IPA", "listings"]:
1918 j = find_end_of_inset(document.body, i)
1920 document.warning("Malformed LyX document: Can't find end of " \
1921 + words[1] + " inset at line " + str(i))
1926 if len(words) > 0 and words[0] in ["\\leftindent", \
1927 "\\paragraph_spacing", "\\align", "\\labelwidthstring"]:
1933 j = document.body[i].find(u"\u2013", start) # en-dash
1934 k = document.body[i].find(u"\u2014", start) # em-dash
1935 if j == -1 and k == -1:
1937 if j == -1 or (k != -1 and k < j):
1939 after = document.body[i][j+1:]
1940 document.body[i] = document.body[i][:j+1] + u"\u200B" + after
1949 supported_versions = ["2.3.0", "2.3"]
1951 [509, [convert_microtype]],
1952 [510, [convert_dateinset]],
1953 [511, [convert_ibranches]],
1954 [512, [convert_beamer_article_styles]],
1958 [516, [convert_inputenc]],
1960 [518, [convert_iopart]],
1961 [519, [convert_quotestyle]],
1963 [521, [convert_frenchquotes]],
1974 [532, [convert_literalparam]],
1977 [535, [convert_dashligatures]]
1981 [534, [revert_dashligatures]],
1982 [533, [revert_chapterbib]],
1983 [532, [revert_multibib]],
1984 [531, [revert_literalparam]],
1985 [530, [revert_qualicites]],
1986 [529, [revert_bibpackopts]],
1987 [528, [revert_citekeyonly]],
1988 [527, [revert_biblatex]],
1989 [526, [revert_noprefix]],
1990 [525, [revert_plural_refs]],
1991 [524, [revert_labelonly]],
1992 [523, [revert_crimson, revert_cochinealmath]],
1993 [522, [revert_cjkquotes]],
1994 [521, [revert_dynamicquotes]],
1995 [520, [revert_britishquotes, revert_swedishgquotes, revert_frenchquotes, revert_frenchinquotes, revert_russianquotes, revert_swissquotes]],
1996 [519, [revert_plainquote]],
1997 [518, [revert_quotestyle]],
1998 [517, [revert_iopart]],
1999 [516, [revert_quotes]],
2001 [514, [revert_urdu, revert_syriac]],
2002 [513, [revert_amharic, revert_asturian, revert_kannada, revert_khmer]],
2003 [512, [revert_bosnian, revert_friulan, revert_macedonian, revert_piedmontese, revert_romansh]],
2004 [511, [revert_beamer_article_styles]],
2005 [510, [revert_ibranches]],
2007 [508, [revert_microtype]]
2011 if __name__ == "__main__":