1 # This file is part of lyx2lyx
2 # -*- coding: utf-8 -*-
3 # Copyright (C) 2006 José Matos <jamatos@lyx.org>
4 # Copyright (C) 2004-2006 Georg Baum <Georg.Baum@post.rwth-aachen.de>
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 """ Convert files to the file format generated by lyx 1.5"""
25 from parser_tools import find_re, find_token, find_token_backwards, find_token_exact, find_tokens, find_end_of, get_value
26 from LyX import get_encoding
29 ####################################################################
30 # Private helper functions
32 def find_end_of_inset(lines, i):
33 " Find end of inset, where lines[i] is included."
34 return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
36 def find_end_of_layout(lines, i):
37 " Find end of layout, where lines[i] is included."
38 return find_end_of(lines, i, "\\begin_layout", "\\end_layout")
40 # End of helper functions
41 ####################################################################
45 # Notes: Framed/Shaded
48 def revert_framed(document):
49 "Revert framed notes. "
52 i = find_tokens(document.body, ["\\begin_inset Note Framed", "\\begin_inset Note Shaded"], i)
56 document.body[i] = "\\begin_inset Note"
64 roman_fonts = {'default' : 'default', 'ae' : 'ae',
65 'times' : 'times', 'palatino' : 'palatino',
66 'helvet' : 'default', 'avant' : 'default',
67 'newcent' : 'newcent', 'bookman' : 'bookman',
69 sans_fonts = {'default' : 'default', 'ae' : 'default',
70 'times' : 'default', 'palatino' : 'default',
71 'helvet' : 'helvet', 'avant' : 'avant',
72 'newcent' : 'default', 'bookman' : 'default',
74 typewriter_fonts = {'default' : 'default', 'ae' : 'default',
75 'times' : 'default', 'palatino' : 'default',
76 'helvet' : 'default', 'avant' : 'default',
77 'newcent' : 'default', 'bookman' : 'default',
78 'pslatex' : 'courier'}
80 def convert_font_settings(document):
81 " Convert font settings. "
83 i = find_token_exact(document.header, "\\fontscheme", i)
85 document.warning("Malformed LyX document: Missing `\\fontscheme'.")
87 font_scheme = get_value(document.header, "\\fontscheme", i, i + 1)
89 document.warning("Malformed LyX document: Empty `\\fontscheme'.")
90 font_scheme = 'default'
91 if not font_scheme in roman_fonts.keys():
92 document.warning("Malformed LyX document: Unknown `\\fontscheme' `%s'." % font_scheme)
93 font_scheme = 'default'
94 document.header[i:i+1] = ['\\font_roman %s' % roman_fonts[font_scheme],
95 '\\font_sans %s' % sans_fonts[font_scheme],
96 '\\font_typewriter %s' % typewriter_fonts[font_scheme],
97 '\\font_default_family default',
100 '\\font_sf_scale 100',
101 '\\font_tt_scale 100']
104 def revert_font_settings(document):
105 " Revert font settings. "
108 fonts = {'roman' : 'default', 'sans' : 'default', 'typewriter' : 'default'}
109 for family in 'roman', 'sans', 'typewriter':
110 name = '\\font_%s' % family
111 i = find_token_exact(document.header, name, i)
113 document.warning("Malformed LyX document: Missing `%s'." % name)
116 if (insert_line < 0):
118 fonts[family] = get_value(document.header, name, i, i + 1)
119 del document.header[i]
120 i = find_token_exact(document.header, '\\font_default_family', i)
122 document.warning("Malformed LyX document: Missing `\\font_default_family'.")
123 font_default_family = 'default'
125 font_default_family = get_value(document.header, "\\font_default_family", i, i + 1)
126 del document.header[i]
127 i = find_token_exact(document.header, '\\font_sc', i)
129 document.warning("Malformed LyX document: Missing `\\font_sc'.")
132 font_sc = get_value(document.header, '\\font_sc', i, i + 1)
133 del document.header[i]
134 if font_sc != 'false':
135 document.warning("Conversion of '\\font_sc' not yet implemented.")
136 i = find_token_exact(document.header, '\\font_osf', i)
138 document.warning("Malformed LyX document: Missing `\\font_osf'.")
141 font_osf = get_value(document.header, '\\font_osf', i, i + 1)
142 del document.header[i]
143 i = find_token_exact(document.header, '\\font_sf_scale', i)
145 document.warning("Malformed LyX document: Missing `\\font_sf_scale'.")
146 font_sf_scale = '100'
148 font_sf_scale = get_value(document.header, '\\font_sf_scale', i, i + 1)
149 del document.header[i]
150 if font_sf_scale != '100':
151 document.warning("Conversion of '\\font_sf_scale' not yet implemented.")
152 i = find_token_exact(document.header, '\\font_tt_scale', i)
154 document.warning("Malformed LyX document: Missing `\\font_tt_scale'.")
155 font_tt_scale = '100'
157 font_tt_scale = get_value(document.header, '\\font_tt_scale', i, i + 1)
158 del document.header[i]
159 if font_tt_scale != '100':
160 document.warning("Conversion of '\\font_tt_scale' not yet implemented.")
161 for font_scheme in roman_fonts.keys():
162 if (roman_fonts[font_scheme] == fonts['roman'] and
163 sans_fonts[font_scheme] == fonts['sans'] and
164 typewriter_fonts[font_scheme] == fonts['typewriter']):
165 document.header.insert(insert_line, '\\fontscheme %s' % font_scheme)
166 if font_default_family != 'default':
167 document.preamble.append('\\renewcommand{\\familydefault}{\\%s}' % font_default_family)
168 if font_osf == 'true':
169 document.warning("Ignoring `\\font_osf = true'")
171 font_scheme = 'default'
172 document.header.insert(insert_line, '\\fontscheme %s' % font_scheme)
173 if fonts['roman'] == 'cmr':
174 document.preamble.append('\\renewcommand{\\rmdefault}{cmr}')
175 if font_osf == 'true':
176 document.preamble.append('\\usepackage{eco}')
178 for font in 'lmodern', 'charter', 'utopia', 'beraserif', 'ccfonts', 'chancery':
179 if fonts['roman'] == font:
180 document.preamble.append('\\usepackage{%s}' % font)
181 for font in 'cmss', 'lmss', 'cmbr':
182 if fonts['sans'] == font:
183 document.preamble.append('\\renewcommand{\\sfdefault}{%s}' % font)
184 for font in 'berasans':
185 if fonts['sans'] == font:
186 document.preamble.append('\\usepackage{%s}' % font)
187 for font in 'cmtt', 'lmtt', 'cmtl':
188 if fonts['typewriter'] == font:
189 document.preamble.append('\\renewcommand{\\ttdefault}{%s}' % font)
190 for font in 'courier', 'beramono', 'luximono':
191 if fonts['typewriter'] == font:
192 document.preamble.append('\\usepackage{%s}' % font)
193 if font_default_family != 'default':
194 document.preamble.append('\\renewcommand{\\familydefault}{\\%s}' % font_default_family)
195 if font_osf == 'true':
196 document.warning("Ignoring `\\font_osf = true'")
199 def revert_booktabs(document):
200 " We remove the booktabs flag or everything else will become a mess. "
201 re_row = re.compile(r'^<row.*space="[^"]+".*>$')
202 re_tspace = re.compile(r'\s+topspace="[^"]+"')
203 re_bspace = re.compile(r'\s+bottomspace="[^"]+"')
204 re_ispace = re.compile(r'\s+interlinespace="[^"]+"')
207 i = find_token(document.body, "\\begin_inset Tabular", i)
210 j = find_end_of_inset(document.body, i + 1)
212 document.warning("Malformed LyX document: Could not find end of tabular.")
214 for k in range(i, j):
215 if re.search('^<features.* booktabs="true".*>$', document.body[k]):
216 document.warning("Converting 'booktabs' table to normal table.")
217 document.body[k] = document.body[k].replace(' booktabs="true"', '')
218 if re.search(re_row, document.body[k]):
219 document.warning("Removing extra row space.")
220 document.body[k] = re_tspace.sub('', document.body[k])
221 document.body[k] = re_bspace.sub('', document.body[k])
222 document.body[k] = re_ispace.sub('', document.body[k])
226 def convert_multiencoding(document, forward):
227 """ Fix files with multiple encodings.
228 Files with an inputencoding of "auto" or "default" and multiple languages
229 where at least two languages have different default encodings are encoded
230 in multiple encodings for file formats < 249. These files are incorrectly
231 read and written (as if the whole file was in the encoding of the main
233 This is not true for files written by CJK-LyX, they are always in the locale
237 - converts from fake unicode values to true unicode if forward is true, and
238 - converts from true unicode values to fake unicode if forward is false.
239 document.encoding must be set to the old value (format 248) in both cases.
241 We do this here and not in LyX.py because it is far easier to do the
242 necessary parsing in modern formats than in ancient ones.
244 if document.cjk_encoding != '':
246 encoding_stack = [document.encoding]
247 lang_re = re.compile(r"^\\lang\s(\S+)")
248 if document.inputencoding == "auto" or document.inputencoding == "default":
249 for i in range(len(document.body)):
250 result = lang_re.match(document.body[i])
252 language = result.group(1)
253 if language == "default":
254 document.warning("Resetting encoding from %s to %s." % (encoding_stack[-1], document.encoding), 3)
255 encoding_stack[-1] = document.encoding
257 from lyx2lyx_lang import lang
258 document.warning("Setting encoding from %s to %s." % (encoding_stack[-1], lang[language][3]), 3)
259 encoding_stack[-1] = lang[language][3]
260 elif find_token(document.body, "\\begin_layout", i, i + 1) == i:
261 document.warning("Adding nested encoding %s." % encoding_stack[-1], 3)
262 encoding_stack.append(encoding_stack[-1])
263 elif find_token(document.body, "\\end_layout", i, i + 1) == i:
264 document.warning("Removing nested encoding %s." % encoding_stack[-1], 3)
265 if len(encoding_stack) == 1:
266 # Don't remove the document encoding from the stack
267 document.warning("Malformed LyX document: Unexpected `\\end_layout'.")
269 del encoding_stack[-1]
270 if encoding_stack[-1] != document.encoding:
272 # This line has been incorrectly interpreted as if it was
273 # encoded in 'encoding'.
274 # Convert back to the 8bit string that was in the file.
275 orig = document.body[i].encode(document.encoding)
276 # Convert the 8bit string that was in the file to unicode
277 # with the correct encoding.
278 document.body[i] = orig.decode(encoding_stack[-1])
280 # Convert unicode to the 8bit string that will be written
281 # to the file with the correct encoding.
282 orig = document.body[i].encode(encoding_stack[-1])
283 # Convert the 8bit string that will be written to the
284 # file to fake unicode with the encoding that will later
285 # be used when writing to the file.
286 document.body[i] = orig.decode(document.encoding)
289 def convert_utf8(document):
290 " Set document encoding to UTF-8. "
291 convert_multiencoding(document, True)
292 document.encoding = "utf8"
295 def revert_utf8(document):
296 " Set document encoding to the value corresponding to inputencoding. "
297 i = find_token(document.header, "\\inputencoding", 0)
299 document.header.append("\\inputencoding auto")
300 elif get_value(document.header, "\\inputencoding", i) == "utf8":
301 document.header[i] = "\\inputencoding auto"
302 document.inputencoding = get_value(document.header, "\\inputencoding", 0)
303 document.encoding = get_encoding(document.language, document.inputencoding, 248, document.cjk_encoding)
304 convert_multiencoding(document, False)
307 def revert_cs_label(document):
308 " Remove status flag of charstyle label. "
311 i = find_token(document.body, "\\begin_inset CharStyle", i)
314 # Seach for a line starting 'show_label'
315 # If it is not there, break with a warning message
318 if (document.body[i][:10] == "show_label"):
321 elif (document.body[i][:13] == "\\begin_layout"):
322 document.warning("Malformed LyX document: Missing 'show_label'.")
329 def convert_bibitem(document):
331 \bibitem [option]{argument}
335 \begin_inset LatexCommand bibitem
341 This must be called after convert_commandparams.
345 i = find_token(document.body, "\\bibitem", i)
348 j = document.body[i].find('[') + 1
349 k = document.body[i].rfind(']')
350 if j == 0: # No optional argument found
353 option = document.body[i][j:k]
354 j = document.body[i].rfind('{') + 1
355 k = document.body[i].rfind('}')
356 argument = document.body[i][j:k]
357 lines = ['\\begin_inset LatexCommand bibitem']
359 lines.append('label "%s"' % option.replace('"', '\\"'))
360 lines.append('key "%s"' % argument.replace('"', '\\"'))
362 lines.append('\\end_inset')
363 document.body[i:i+1] = lines
367 commandparams_info = {
368 # command : [option1, option2, argument]
369 "bibitem" : ["label", "", "key"],
370 "bibtex" : ["options", "btprint", "bibfiles"],
371 "cite" : ["after", "before", "key"],
372 "citet" : ["after", "before", "key"],
373 "citep" : ["after", "before", "key"],
374 "citealt" : ["after", "before", "key"],
375 "citealp" : ["after", "before", "key"],
376 "citeauthor" : ["after", "before", "key"],
377 "citeyear" : ["after", "before", "key"],
378 "citeyearpar" : ["after", "before", "key"],
379 "citet*" : ["after", "before", "key"],
380 "citep*" : ["after", "before", "key"],
381 "citealt*" : ["after", "before", "key"],
382 "citealp*" : ["after", "before", "key"],
383 "citeauthor*" : ["after", "before", "key"],
384 "Citet" : ["after", "before", "key"],
385 "Citep" : ["after", "before", "key"],
386 "Citealt" : ["after", "before", "key"],
387 "Citealp" : ["after", "before", "key"],
388 "Citeauthor" : ["after", "before", "key"],
389 "Citet*" : ["after", "before", "key"],
390 "Citep*" : ["after", "before", "key"],
391 "Citealt*" : ["after", "before", "key"],
392 "Citealp*" : ["after", "before", "key"],
393 "Citeauthor*" : ["after", "before", "key"],
394 "citefield" : ["after", "before", "key"],
395 "citetitle" : ["after", "before", "key"],
396 "cite*" : ["after", "before", "key"],
397 "hfill" : ["", "", ""],
398 "index" : ["", "", "name"],
399 "printindex" : ["", "", "name"],
400 "label" : ["", "", "name"],
401 "eqref" : ["name", "", "reference"],
402 "pageref" : ["name", "", "reference"],
403 "prettyref" : ["name", "", "reference"],
404 "ref" : ["name", "", "reference"],
405 "vpageref" : ["name", "", "reference"],
406 "vref" : ["name", "", "reference"],
407 "tableofcontents" : ["", "", "type"],
408 "htmlurl" : ["name", "", "target"],
409 "url" : ["name", "", "target"]}
412 def convert_commandparams(document):
415 \begin_inset LatexCommand \cmdname[opt1][opt2]{arg}
420 \begin_inset LatexCommand cmdname
426 name1, name2 and name3 can be different for each command.
428 # \begin_inset LatexCommand bibitem was not the official version (see
429 # convert_bibitem()), but could be read in, so we convert it here, too.
433 i = find_token(document.body, "\\begin_inset LatexCommand", i)
436 command = document.body[i][26:].strip()
438 document.warning("Malformed LyX document: Missing LatexCommand name.")
442 # The following parser is taken from the original InsetCommandParams::scanCommand
448 # Used to handle things like \command[foo[bar]]{foo{bar}}
452 if ((state == "CMDNAME" and c == ' ') or
453 (state == "CMDNAME" and c == '[') or
454 (state == "CMDNAME" and c == '{')):
456 if ((state == "OPTION" and c == ']') or
457 (state == "SECOPTION" and c == ']') or
458 (state == "CONTENT" and c == '}')):
462 nestdepth = nestdepth - 1
463 if ((state == "OPTION" and c == '[') or
464 (state == "SECOPTION" and c == '[') or
465 (state == "CONTENT" and c == '{')):
466 nestdepth = nestdepth + 1
467 if state == "CMDNAME":
469 elif state == "OPTION":
471 elif state == "SECOPTION":
473 elif state == "CONTENT":
478 elif c == '[' and b != ']':
480 nestdepth = 0 # Just to be sure
481 elif c == '[' and b == ']':
483 nestdepth = 0 # Just to be sure
486 nestdepth = 0 # Just to be sure
489 # Now we have parsed the command, output the parameters
490 lines = ["\\begin_inset LatexCommand %s" % name]
492 if commandparams_info[name][0] == "":
493 document.warning("Ignoring invalid option `%s' of command `%s'." % (option1, name))
495 lines.append('%s "%s"' % (commandparams_info[name][0], option1.replace('"', '\\"')))
497 if commandparams_info[name][1] == "":
498 document.warning("Ignoring invalid second option `%s' of command `%s'." % (option2, name))
500 lines.append('%s "%s"' % (commandparams_info[name][1], option2.replace('"', '\\"')))
502 if commandparams_info[name][2] == "":
503 document.warning("Ignoring invalid argument `%s' of command `%s'." % (argument, name))
505 lines.append('%s "%s"' % (commandparams_info[name][2], argument.replace('"', '\\"')))
506 document.body[i:i+1] = lines
510 def revert_commandparams(document):
511 regex = re.compile(r'(\S+)\s+(.+)')
514 i = find_token(document.body, "\\begin_inset LatexCommand", i)
517 name = document.body[i].split()[2]
518 j = find_end_of_inset(document.body, i + 1)
523 for k in range(i + 1, j):
524 match = re.match(regex, document.body[k])
526 pname = match.group(1)
527 pvalue = match.group(2)
528 if pname == "preview":
529 preview_line = document.body[k]
530 elif (commandparams_info[name][0] != "" and
531 pname == commandparams_info[name][0]):
532 option1 = pvalue.strip('"').replace('\\"', '"')
533 elif (commandparams_info[name][1] != "" and
534 pname == commandparams_info[name][1]):
535 option2 = pvalue.strip('"').replace('\\"', '"')
536 elif (commandparams_info[name][2] != "" and
537 pname == commandparams_info[name][2]):
538 argument = pvalue.strip('"').replace('\\"', '"')
539 elif document.body[k].strip() != "":
540 document.warning("Ignoring unknown contents `%s' in command inset %s." % (document.body[k], name))
541 if name == "bibitem":
543 lines = ["\\bibitem {%s}" % argument]
545 lines = ["\\bibitem [%s]{%s}" % (option1, argument)]
549 lines = ["\\begin_inset LatexCommand \\%s{%s}" % (name, argument)]
551 lines = ["\\begin_inset LatexCommand \\%s[][%s]{%s}" % (name, option2, argument)]
554 lines = ["\\begin_inset LatexCommand \\%s[%s]{%s}" % (name, option1, argument)]
556 lines = ["\\begin_inset LatexCommand \\%s[%s][%s]{%s}" % (name, option1, option2, argument)]
557 if name != "bibitem":
558 if preview_line != "":
559 lines.append(preview_line)
561 lines.append('\\end_inset')
562 document.body[i:j+1] = lines
566 def revert_nomenclature(document):
567 " Convert nomenclature entry to ERT. "
568 regex = re.compile(r'(\S+)\s+(.+)')
572 i = find_token(document.body, "\\begin_inset LatexCommand nomenclature", i)
576 j = find_end_of_inset(document.body, i + 1)
581 for k in range(i + 1, j):
582 match = re.match(regex, document.body[k])
584 name = match.group(1)
585 value = match.group(2)
586 if name == "preview":
587 preview_line = document.body[k]
588 elif name == "symbol":
589 symbol = value.strip('"').replace('\\"', '"')
590 elif name == "description":
591 description = value.strip('"').replace('\\"', '"')
592 elif name == "prefix":
593 prefix = value.strip('"').replace('\\"', '"')
594 elif document.body[k].strip() != "":
595 document.warning("Ignoring unknown contents `%s' in nomenclature inset." % document.body[k])
597 command = 'nomenclature{%s}{%s}' % (symbol, description)
599 command = 'nomenclature[%s]{%s}{%s}' % (prefix, symbol, description)
600 document.body[i:j+1] = ['\\begin_inset ERT',
603 '\\begin_layout %s' % document.default_layout,
612 if use_nomencl and find_token(document.preamble, '\\usepackage{nomencl}[2005/09/22]', 0) == -1:
613 document.preamble.append('\\usepackage{nomencl}[2005/09/22]')
614 document.preamble.append('\\makenomenclature')
617 def revert_printnomenclature(document):
618 " Convert printnomenclature to ERT. "
619 regex = re.compile(r'(\S+)\s+(.+)')
623 i = find_token(document.body, "\\begin_inset LatexCommand printnomenclature", i)
627 j = find_end_of_inset(document.body, i + 1)
630 for k in range(i + 1, j):
631 match = re.match(regex, document.body[k])
633 name = match.group(1)
634 value = match.group(2)
635 if name == "preview":
636 preview_line = document.body[k]
637 elif name == "labelwidth":
638 labelwidth = value.strip('"').replace('\\"', '"')
639 elif document.body[k].strip() != "":
640 document.warning("Ignoring unknown contents `%s' in printnomenclature inset." % document.body[k])
642 command = 'nomenclature{}'
644 command = 'nomenclature[%s]' % labelwidth
645 document.body[i:j+1] = ['\\begin_inset ERT',
648 '\\begin_layout %s' % document.default_layout,
657 if use_nomencl and find_token(document.preamble, '\\usepackage{nomencl}[2005/09/22]', 0) == -1:
658 document.preamble.append('\\usepackage{nomencl}[2005/09/22]')
659 document.preamble.append('\\makenomenclature')
662 def convert_esint(document):
663 " Add \\use_esint setting to header. "
664 i = find_token(document.header, "\\cite_engine", 0)
666 document.warning("Malformed LyX document: Missing `\\cite_engine'.")
668 # 0 is off, 1 is auto, 2 is on.
669 document.header.insert(i, '\\use_esint 0')
672 def revert_esint(document):
673 " Remove \\use_esint setting from header. "
674 i = find_token(document.header, "\\use_esint", 0)
676 document.warning("Malformed LyX document: Missing `\\use_esint'.")
678 use_esint = document.header[i].split()[1]
679 del document.header[i]
680 # 0 is off, 1 is auto, 2 is on.
682 document.preamble.append('\\usepackage{esint}')
685 def revert_clearpage(document):
689 i = find_token(document.body, "\\clearpage", i)
692 document.body[i:i+1] = ['\\begin_inset ERT',
695 '\\begin_layout %s' % document.default_layout,
706 def revert_cleardoublepage(document):
707 " cleardoublepage -> ERT "
710 i = find_token(document.body, "\\cleardoublepage", i)
713 document.body[i:i+1] = ['\\begin_inset ERT',
716 '\\begin_layout %s' % document.default_layout,
727 def convert_lyxline(document):
728 " remove fontsize commands for \lyxline "
729 # The problematic is: The old \lyxline definition doesn't handle the fontsize
730 # to change the line thickness. The new definiton does this so that imported
731 # \lyxlines would have a different line thickness. The eventual fontsize command
732 # before \lyxline is therefore removed to get the same output.
733 fontsizes = ["tiny", "scriptsize", "footnotesize", "small", "normalsize",
734 "large", "Large", "LARGE", "huge", "Huge"]
735 for n in range(0, len(fontsizes)):
738 while i < len(document.body):
739 i = find_token(document.body, "\\size " + fontsizes[n], i)
740 k = find_token(document.body, "\\lyxline", i)
741 # the corresponding fontsize command is always 2 lines before the \lyxline
742 if (i != -1 and k == i+2):
743 document.body[i:i+1] = []
749 def revert_encodings(document):
750 " Set new encodings to auto. "
751 encodings = ["8859-6", "8859-8", "cp437", "cp437de", "cp850", "cp852",
752 "cp855", "cp858", "cp862", "cp865", "cp866", "cp1250",
753 "cp1252", "cp1256", "cp1257", "latin10", "pt254", "tis620-0"]
754 i = find_token(document.header, "\\inputencoding", 0)
756 document.header.append("\\inputencoding auto")
758 inputenc = get_value(document.header, "\\inputencoding", i)
759 if inputenc in encodings:
760 document.header[i] = "\\inputencoding auto"
761 document.inputencoding = get_value(document.header, "\\inputencoding", 0)
764 def convert_caption(document):
765 " Convert caption layouts to caption insets. "
768 i = find_token(document.body, "\\begin_layout Caption", i)
771 j = find_end_of_layout(document.body, i)
773 document.warning("Malformed LyX document: Missing `\\end_layout'.")
776 document.body[j:j] = ["\\end_layout", "", "\\end_inset", "", ""]
777 document.body[i:i+1] = ["\\begin_layout %s" % document.default_layout,
778 "\\begin_inset Caption", "",
779 "\\begin_layout %s" % document.default_layout]
783 def revert_caption(document):
784 " Convert caption insets to caption layouts. "
785 " This assumes that the text class has a caption style. "
788 i = find_token(document.body, "\\begin_inset Caption", i)
792 # We either need to delete the previous \begin_layout line, or we
793 # need to end the previous layout if this inset is not in the first
794 # position of the paragraph.
795 layout_before = find_token_backwards(document.body, "\\begin_layout", i)
796 if layout_before == -1:
797 document.warning("Malformed LyX document: Missing `\\begin_layout'.")
799 layout_line = document.body[layout_before]
800 del_layout_before = True
801 l = layout_before + 1
803 if document.body[l] != "":
804 del_layout_before = False
807 if del_layout_before:
808 del document.body[layout_before:i]
811 document.body[i:i] = ["\\end_layout", ""]
814 # Find start of layout in the inset and end of inset
815 j = find_token(document.body, "\\begin_layout", i)
817 document.warning("Malformed LyX document: Missing `\\begin_layout'.")
819 k = find_end_of_inset(document.body, i)
821 document.warning("Malformed LyX document: Missing `\\end_inset'.")
824 # We either need to delete the following \end_layout line, or we need
825 # to restart the old layout if this inset is not at the paragraph end.
826 layout_after = find_token(document.body, "\\end_layout", k)
827 if layout_after == -1:
828 document.warning("Malformed LyX document: Missing `\\end_layout'.")
830 del_layout_after = True
832 while l < layout_after:
833 if document.body[l] != "":
834 del_layout_after = False
838 del document.body[k+1:layout_after+1]
840 document.body[k+1:k+1] = [layout_line, ""]
842 # delete \begin_layout and \end_inset and replace \begin_inset with
843 # "\begin_layout Caption". This works because we can only have one
844 # paragraph in the caption inset: The old \end_layout will be recycled.
846 if document.body[k] == "":
849 if document.body[j] == "":
851 document.body[i] = "\\begin_layout Caption"
852 if document.body[i+1] == "":
853 del document.body[i+1]
857 # Accents of InsetLaTeXAccent
859 "`" : u'\u0300', # grave
860 "'" : u'\u0301', # acute
861 "^" : u'\u0302', # circumflex
862 "~" : u'\u0303', # tilde
863 "=" : u'\u0304', # macron
864 "u" : u'\u0306', # breve
865 "." : u'\u0307', # dot above
866 "\"": u'\u0308', # diaresis
867 "r" : u'\u030a', # ring above
868 "H" : u'\u030b', # double acute
869 "v" : u'\u030c', # caron
870 "b" : u'\u0320', # minus sign below
871 "d" : u'\u0323', # dot below
872 "c" : u'\u0327', # cedilla
873 "k" : u'\u0328', # ogonek
874 "t" : u'\u0361' # tie. This is special: It spans two characters, but
875 # only one is given as argument, so we don't need to
876 # treat it differently.
880 # special accents of InsetLaTeXAccent without argument
881 special_accent_map = {
882 'i' : u'\u0131', # dotless i
883 'j' : u'\u0237', # dotless j
884 'l' : u'\u0142', # l with stroke
885 'L' : u'\u0141' # L with stroke
889 # special accent arguments of InsetLaTeXAccent
891 '\\i' : u'\u0131', # dotless i
892 '\\j' : u'\u0237' # dotless j
896 def _convert_accent(accent, accented_char):
900 if type in special_accent_map:
901 return special_accent_map[type]
902 # a missing char is treated as space by LyX
904 elif type == 'q' and char in ['t', 'd', 'l', 'L']:
905 # Special caron, only used with t, d, l and L.
906 # It is not in the map because we convert it to the same unicode
907 # character as the normal caron: \q{} is only defined if babel with
908 # the czech or slovak language is used, and the normal caron
909 # produces the correct output if the T1 font encoding is used.
910 # For the same reason we never convert to \q{} in the other direction.
912 elif char in accented_map:
913 char = accented_map[char]
914 elif (len(char) > 1):
915 # We can only convert accents on a single char
917 a = accent_map.get(type)
919 return unicodedata.normalize("NFKC", "%s%s" % (char, a))
923 def convert_ertbackslash(body, i, ert, default_layout):
924 r""" -------------------------------------------------------------------------------------------
925 Convert backslashes and '\n' into valid ERT code, append the converted
926 text to body[i] and return the (maybe incremented) line index i"""
930 body[i] = body[i] + '\\backslash '
934 body[i+1:i+1] = ['\\end_layout', '', '\\begin_layout %s' % default_layout, '']
937 body[i] = body[i] + c
941 def convert_accent(document):
942 # The following forms are supported by LyX:
943 # '\i \"{a}' (standard form, as written by LyX)
944 # '\i \"{}' (standard form, as written by LyX if the accented char is a space)
945 # '\i \"{ }' (also accepted if the accented char is a space)
946 # '\i \" a' (also accepted)
947 # '\i \"' (also accepted)
948 re_wholeinset = re.compile(r'^(.*)(\\i\s+)(.*)$')
949 re_contents = re.compile(r'^([^\s{]+)(.*)$')
950 re_accentedcontents = re.compile(r'^\s*{?([^{}]*)}?\s*$')
953 i = find_re(document.body, re_wholeinset, i)
956 match = re_wholeinset.match(document.body[i])
957 prefix = match.group(1)
958 contents = match.group(3).strip()
959 match = re_contents.match(contents)
961 # Strip first char (always \)
962 accent = match.group(1)[1:]
963 accented_contents = match.group(2).strip()
964 match = re_accentedcontents.match(accented_contents)
965 accented_char = match.group(1)
966 converted = _convert_accent(accent, accented_char)
969 contents = '%s{%s}' % (accent, accented_char),
971 document.body[i] = '%s%s' % (prefix, converted)
974 document.warning("Converting unknown InsetLaTeXAccent `\\i %s' to ERT." % contents)
975 document.body[i] = prefix
976 document.body[i+1:i+1] = ['\\begin_inset ERT',
979 '\\begin_layout %s' % document.default_layout,
983 i = convert_ertbackslash(document.body, i + 7,
985 document.default_layout)
986 document.body[i+1:i+1] = ['\\end_layout',
992 def revert_accent(document):
993 inverse_accent_map = {}
995 inverse_accent_map[accent_map[k]] = k
996 inverse_special_accent_map = {}
997 for k in special_accent_map:
998 inverse_special_accent_map[special_accent_map[k]] = k
999 inverse_accented_map = {}
1000 for k in accented_map:
1001 inverse_accented_map[accented_map[k]] = k
1003 # Since LyX may insert a line break within a word we must combine all
1004 # words before unicode normalization.
1005 # We do this only if the next line starts with an accent, otherwise we
1006 # would create things like '\begin_inset ERTstatus'.
1007 numberoflines = len(document.body)
1008 for i in range(numberoflines-1):
1009 if document.body[i] == '' or document.body[i+1] == '' or document.body[i][-1] == ' ':
1011 if (document.body[i+1][0] in inverse_accent_map):
1012 # the last character of this line and the first of the next line
1013 # form probably a surrogate pair.
1014 while (len(document.body[i+1]) > 0 and document.body[i+1][0] != ' '):
1015 document.body[i] += document.body[i+1][0]
1016 document.body[i+1] = document.body[i+1][1:]
1018 # Normalize to "Normal form D" (NFD, also known as canonical decomposition).
1019 # This is needed to catch all accented characters.
1020 for i in range(numberoflines):
1021 # Unfortunately we have a mixture of unicode strings and plain strings,
1022 # because we never use u'xxx' for string literals, but 'xxx'.
1023 # Therefore we may have to try two times to normalize the data.
1025 document.body[i] = unicodedata.normalize("NFKD", document.body[i])
1027 document.body[i] = unicodedata.normalize("NFKD", unicode(document.body[i], 'utf-8'))
1029 # Replace accented characters with InsetLaTeXAccent
1030 # Do not convert characters that can be represented in the chosen
1032 encoding_stack = [get_encoding(document.language, document.inputencoding, 248, document.cjk_encoding)]
1033 lang_re = re.compile(r"^\\lang\s(\S+)")
1034 for i in range(len(document.body)):
1036 if (document.inputencoding == "auto" or document.inputencoding == "default") and document.cjk_encoding != '':
1037 # Track the encoding of the current line
1038 result = lang_re.match(document.body[i])
1040 language = result.group(1)
1041 if language == "default":
1042 encoding_stack[-1] = document.encoding
1044 from lyx2lyx_lang import lang
1045 encoding_stack[-1] = lang[language][3]
1047 elif find_token(document.body, "\\begin_layout", i, i + 1) == i:
1048 encoding_stack.append(encoding_stack[-1])
1050 elif find_token(document.body, "\\end_layout", i, i + 1) == i:
1051 del encoding_stack[-1]
1054 for j in range(len(document.body[i])):
1055 # dotless i and dotless j are both in special_accent_map and can
1056 # occur as an accented character, so we need to test that the
1057 # following character is no accent
1058 if (document.body[i][j] in inverse_special_accent_map and
1059 (j == len(document.body[i]) - 1 or document.body[i][j+1] not in inverse_accent_map)):
1060 accent = document.body[i][j]
1062 dummy = accent.encode(encoding_stack[-1])
1063 except UnicodeEncodeError:
1064 # Insert the rest of the line as new line
1065 if j < len(document.body[i]) - 1:
1066 document.body[i+1:i+1] = document.body[i][j+1:]
1067 # Delete the accented character
1069 document.body[i] = document.body[i][:j-1]
1071 document.body[i] = u''
1072 # Finally add the InsetLaTeXAccent
1073 document.body[i] += "\\i \\%s{}" % inverse_special_accent_map[accent]
1075 elif j > 0 and document.body[i][j] in inverse_accent_map:
1076 accented_char = document.body[i][j-1]
1077 if accented_char == ' ':
1078 # Conform to LyX output
1080 elif accented_char in inverse_accented_map:
1081 accented_char = inverse_accented_map[accented_char]
1082 accent = document.body[i][j]
1084 dummy = unicodedata.normalize("NFKC", accented_char + accent).encode(encoding_stack[-1])
1085 except UnicodeEncodeError:
1086 # Insert the rest of the line as new line
1087 if j < len(document.body[i]) - 1:
1088 document.body[i+1:i+1] = document.body[i][j+1:]
1089 # Delete the accented characters
1091 document.body[i] = document.body[i][:j-2]
1093 document.body[i] = u''
1094 # Finally add the InsetLaTeXAccent
1095 document.body[i] += "\\i \\%s{%s}" % (inverse_accent_map[accent], accented_char)
1097 # Normalize to "Normal form C" (NFC, pre-composed characters) again
1098 for i in range(numberoflines):
1099 document.body[i] = unicodedata.normalize("NFKC", document.body[i])
1102 def normalize_font_whitespace(document):
1103 """ Before format 259 the font changes were ignored if a
1104 whitespace was the first or last character in the sequence, this function
1105 transfers the whitespace outside."""
1107 if document.backend != "latex":
1110 lines = document.body
1112 char_properties = {"\\series": "default",
1113 "\\emph": "default",
1115 "\\shape": "default",
1117 "\\family": "default"}
1121 while i < len(lines):
1122 words = lines[i].split()
1124 if len(words) > 0 and words[0] == "\\begin_layout":
1125 # a new paragraph resets all font changes
1128 elif len(words) > 1 and words[0] in char_properties.keys():
1129 # we have a font change
1130 if char_properties[words[0]] == words[1]:
1131 # property gets reset
1132 if words[0] in changes.keys():
1133 del changes[words[0]]
1134 defaultproperty = True
1137 changes[words[0]] = words[1]
1138 defaultproperty = False
1140 # We need to explicitly reset all changed properties if we find
1141 # a space below, because LyX 1.4 would output the space after
1142 # closing the previous change and before starting the new one,
1143 # and closing a font change means to close all properties, not
1144 # just the changed one.
1146 if lines[i-1] and lines[i-1][-1] == " ":
1147 lines[i-1] = lines[i-1][:-1]
1148 # a space before the font change
1150 for k in changes.keys():
1151 # exclude property k because that is already in lines[i]
1153 added_lines[1:1] = ["%s %s" % (k, changes[k])]
1154 for k in changes.keys():
1155 # exclude property k because that must be added below anyway
1157 added_lines[0:0] = ["%s %s" % (k, char_properties[k])]
1159 # Property is reset in lines[i], so add the new stuff afterwards
1160 lines[i+1:i+1] = added_lines
1162 # Reset property for the space
1163 added_lines[0:0] = ["%s %s" % (words[0], char_properties[words[0]])]
1164 lines[i:i] = added_lines
1165 i = i + len(added_lines)
1167 elif lines[i+1] and lines[i+1][0] == " " and (len(changes) > 0 or not defaultproperty):
1168 # a space after the font change
1169 if (lines[i+1] == " " and lines[i+2]):
1170 next_words = lines[i+2].split()
1171 if len(next_words) > 0 and next_words[0] == words[0]:
1172 # a single blank with a property different from the
1173 # previous and the next line must not be changed
1176 lines[i+1] = lines[i+1][1:]
1178 for k in changes.keys():
1179 # exclude property k because that is already in lines[i]
1181 added_lines[1:1] = ["%s %s" % (k, changes[k])]
1182 for k in changes.keys():
1183 # exclude property k because that must be added below anyway
1185 added_lines[0:0] = ["%s %s" % (k, char_properties[k])]
1186 # Reset property for the space
1187 added_lines[0:0] = ["%s %s" % (words[0], char_properties[words[0]])]
1188 lines[i:i] = added_lines
1189 i = i + len(added_lines)
1194 def revert_utf8x(document):
1195 " Set utf8x encoding to utf8. "
1196 i = find_token(document.header, "\\inputencoding", 0)
1198 document.header.append("\\inputencoding auto")
1200 inputenc = get_value(document.header, "\\inputencoding", i)
1201 if inputenc == "utf8x":
1202 document.header[i] = "\\inputencoding utf8"
1203 document.inputencoding = get_value(document.header, "\\inputencoding", 0)
1206 def revert_utf8plain(document):
1207 " Set utf8plain encoding to utf8. "
1208 i = find_token(document.header, "\\inputencoding", 0)
1210 document.header.append("\\inputencoding auto")
1212 inputenc = get_value(document.header, "\\inputencoding", i)
1213 if inputenc == "utf8-plain":
1214 document.header[i] = "\\inputencoding utf8"
1215 document.inputencoding = get_value(document.header, "\\inputencoding", 0)
1218 def revert_beamer_alert(document):
1219 " Revert beamer's \\alert inset back to ERT. "
1222 i = find_token(document.body, "\\begin_inset CharStyle Alert", i)
1225 document.body[i] = "\\begin_inset ERT"
1228 if (document.body[i][:13] == "\\begin_layout"):
1229 # Insert the \alert command
1230 document.body[i + 1] = "\\alert{" + document.body[i + 1] + '}'
1237 def revert_beamer_structure(document):
1238 " Revert beamer's \\structure inset back to ERT. "
1241 i = find_token(document.body, "\\begin_inset CharStyle Structure", i)
1244 document.body[i] = "\\begin_inset ERT"
1247 if (document.body[i][:13] == "\\begin_layout"):
1248 document.body[i + 1] = "\\structure{" + document.body[i + 1] + '}'
1255 def convert_changes(document):
1256 " Switch output_changes off if tracking_changes is off. "
1257 i = find_token(document.header, '\\tracking_changes', 0)
1259 document.warning("Malformed lyx document: Missing '\\tracking_changes'.")
1261 j = find_token(document.header, '\\output_changes', 0)
1263 document.warning("Malformed lyx document: Missing '\\output_changes'.")
1265 tracking_changes = get_value(document.header, "\\tracking_changes", i)
1266 output_changes = get_value(document.header, "\\output_changes", j)
1267 if tracking_changes == "false" and output_changes == "true":
1268 document.header[j] = "\\output_changes false"
1271 def revert_ascii(document):
1272 " Set ascii encoding to auto. "
1273 i = find_token(document.header, "\\inputencoding", 0)
1275 document.header.append("\\inputencoding auto")
1277 inputenc = get_value(document.header, "\\inputencoding", i)
1278 if inputenc == "ascii":
1279 document.header[i] = "\\inputencoding auto"
1280 document.inputencoding = get_value(document.header, "\\inputencoding", 0)
1283 def normalize_language_name(document):
1284 lang = { "brazil": "brazilian",
1285 "portuges": "portuguese"}
1287 if document.language in lang:
1288 document.language = lang[document.language]
1289 i = find_token(document.header, "\\language", 0)
1290 document.header[i] = "\\language %s" % document.language
1293 def revert_language_name(document):
1294 lang = { "brazilian": "brazil",
1295 "portuguese": "portuges"}
1297 if document.language in lang:
1298 document.language = lang[document.language]
1299 i = find_token(document.header, "\\language", 0)
1300 document.header[i] = "\\language %s" % document.language
1303 # \textclass cv -> \textclass simplecv
1304 def convert_cv_textclass(document):
1305 if document.textclass == "cv":
1306 document.textclass = "simplecv"
1309 def revert_cv_textclass(document):
1310 if document.textclass == "simplecv":
1311 document.textclass = "cv"
1314 def convert_tableborder(document):
1315 # The problematic is: LyX double the table cell border as it ignores the "|" character in
1316 # the cell arguments. A fix takes care of this and therefore the "|" has to be removed
1318 while i < len(document.body):
1319 h = document.body[i].find("leftline=\"true\"", 0, len(document.body[i]))
1320 k = document.body[i].find("|>{", 0, len(document.body[i]))
1321 # the two tokens have to be in one line
1322 if (h != -1 and k != -1):
1324 document.body[i] = document.body[i][:k] + document.body[i][k+1:len(document.body[i])-1]
1328 def revert_tableborder(document):
1330 while i < len(document.body):
1331 h = document.body[i].find("leftline=\"true\"", 0, len(document.body[i]))
1332 k = document.body[i].find(">{", 0, len(document.body[i]))
1333 # the two tokens have to be in one line
1334 if (h != -1 and k != -1):
1336 document.body[i] = document.body[i][:k] + '|' + document.body[i][k:]
1340 def revert_armenian(document):
1342 # set inputencoding from armscii8 to auto
1343 if document.inputencoding == "armscii8":
1344 i = find_token(document.header, "\\inputencoding", 0)
1346 document.header[i] = "\\inputencoding auto"
1347 # check if preamble exists, if not k is set to -1
1350 while i < len(document.preamble):
1352 k = document.preamble[i].find("\\", 0, len(document.preamble[i]))
1354 k = document.preamble[i].find("%", 0, len(document.preamble[i]))
1356 # add the entry \usepackage{armtex} to the document preamble
1357 if document.language == "armenian":
1358 # set the armtex entry as the first preamble line
1360 document.preamble[0:0] = ["\\usepackage{armtex}"]
1361 # create the preamble when it doesn't exist
1363 document.preamble.append('\\usepackage{armtex}')
1364 # Set document language from armenian to english
1365 if document.language == "armenian":
1366 document.language = "english"
1367 i = find_token(document.header, "\\language", 0)
1369 document.header[i] = "\\language english"
1372 def revert_CJK(document):
1373 " Set CJK encodings to default and languages chinese, japanese and korean to english. "
1374 encodings = ["Bg5", "Bg5+", "GB", "GBt", "GBK", "JIS",
1375 "KS", "SJIS", "UTF8", "EUC-TW", "EUC-JP"]
1376 i = find_token(document.header, "\\inputencoding", 0)
1378 document.header.append("\\inputencoding auto")
1380 inputenc = get_value(document.header, "\\inputencoding", i)
1381 if inputenc in encodings:
1382 document.header[i] = "\\inputencoding default"
1383 document.inputencoding = get_value(document.header, "\\inputencoding", 0)
1385 if document.language == "chinese-simplified" or \
1386 document.language == "chinese-traditional" or \
1387 document.language == "japanese" or document.language == "korean":
1388 document.language = "english"
1389 i = find_token(document.header, "\\language", 0)
1391 document.header[i] = "\\language english"
1394 def revert_preamble_listings_params(document):
1395 " Revert preamble option \listings_params "
1396 i = find_token(document.header, "\\listings_params", 0)
1398 document.preamble.append('\\usepackage{listings}')
1399 document.preamble.append('\\lstset{%s}' % document.header[i].split()[1].strip('"'))
1400 document.header.pop(i);
1403 def revert_listings_inset(document):
1404 r''' Revert listings inset to \lstinline or \begin, \end lstlisting, translate
1408 lstparams "language=Delphi"
1412 \begin_layout Standard
1422 \begin_layout Standard
1426 lstinline[language=Delphi]{var i = 10;}
1431 There can be an caption inset in this inset
1433 \begin_layout Standard
1434 \begin_inset Caption
1436 \begin_layout Standard
1438 \begin_inset LatexCommand label
1454 i = find_token(document.body, '\\begin_inset listings', i)
1458 if not '\\usepackage{listings}' in document.preamble:
1459 document.preamble.append('\\usepackage{listings}')
1460 j = find_end_of_inset(document.body, i + 1)
1462 # this should not happen
1468 for line in range(i + 1, i + 4):
1469 if document.body[line].startswith('inline'):
1470 inline = document.body[line].split()[1]
1471 if document.body[line].startswith('lstparams'):
1472 params = document.body[line].split()[1].strip('"')
1473 if document.body[line].startswith('status'):
1474 status = document.body[line].split()[1].strip()
1479 cap = find_token(document.body, '\\begin_inset Caption', i)
1481 cap_end = find_end_of_inset(document.body, cap + 1)
1483 # this should not happen
1486 lbl = find_token(document.body, '\\begin_inset LatexCommand label', cap + 1)
1488 lbl_end = find_end_of_inset(document.body, lbl + 1)
1490 # this should not happen
1495 for line in document.body[lbl : lbl_end + 1]:
1496 if line.startswith('name '):
1497 label = line.split()[1].strip('"')
1499 for line in document.body[cap : lbl ] + document.body[lbl_end + 1 : cap_end + 1]:
1500 if not line.startswith('\\'):
1501 caption += line.strip()
1504 # looking for the oneline code for lstinline
1505 inlinecode = document.body[find_end_of_layout(document.body,
1506 find_token(document.body, '\\begin_layout Standard', i + 1) +1 ) - 1]
1507 if len(caption) > 0:
1508 if len(params) == 0:
1509 params = 'caption={%s}' % caption
1511 params += ',caption={%s}' % caption
1513 if len(params) == 0:
1514 params = 'label={%s}' % label
1516 params += ',label={%s}' % label
1518 params = '[%s]' % params
1519 params = params.replace('\\', '\\backslash\n')
1520 if inline == 'true':
1521 document.body[i:(j+1)] = [r'\begin_inset ERT',
1522 'status %s' % status,
1523 r'\begin_layout Standard',
1527 'lstinline%s{%s}' % (params, inlinecode),
1532 document.body[i: j+1] = [r'\begin_inset ERT',
1533 'status %s' % status,
1535 r'\begin_layout Standard',
1539 r'begin{lstlisting}%s' % params,
1541 ] + document.body[k : j - 1] + \
1543 r'\begin_layout Standard',
1552 def revert_include_listings(document):
1553 r''' Revert lstinputlisting Include option , translate
1554 \begin_inset Include \lstinputlisting{file}[opt]
1564 \begin_layout Standard
1568 lstinputlisting{file}[opt]
1576 i = find_token(document.body, r'\begin_inset Include \lstinputlisting', i)
1580 if not '\\usepackage{listings}' in document.preamble:
1581 document.preamble.append('\\usepackage{listings}')
1582 j = find_end_of_inset(document.body, i + 1)
1584 # this should not happen
1586 # find command line lstinputlisting{file}[options]
1587 cmd, file, option = '', '', ''
1588 if re.match(r'\\(lstinputlisting){([.\w]*)}(.*)', document.body[i].split()[2]):
1589 cmd, file, option = re.match(r'\\(lstinputlisting){([.\w]*)}(.*)', document.body[i].split()[2]).groups()
1590 option = option.replace('\\', '\\backslash\n')
1591 document.body[i : j + 1] = [r'\begin_inset ERT',
1594 r'\begin_layout Standard',
1598 '%s%s{%s}' % (cmd, option, file),
1608 supported_versions = ["1.5.0","1.5"]
1609 convert = [[246, []],
1610 [247, [convert_font_settings]],
1612 [249, [convert_utf8]],
1615 [252, [convert_commandparams, convert_bibitem]],
1617 [254, [convert_esint]],
1620 [257, [convert_caption]],
1621 [258, [convert_lyxline]],
1622 [259, [convert_accent, normalize_font_whitespace]],
1624 [261, [convert_changes]],
1626 [263, [normalize_language_name]],
1627 [264, [convert_cv_textclass]],
1628 [265, [convert_tableborder]],
1635 revert = [[269, [revert_beamer_alert, revert_beamer_structure]],
1636 [268, [revert_preamble_listings_params, revert_listings_inset, revert_include_listings]],
1637 [267, [revert_CJK]],
1638 [266, [revert_utf8plain]],
1639 [265, [revert_armenian]],
1640 [264, [revert_tableborder]],
1641 [263, [revert_cv_textclass]],
1642 [262, [revert_language_name]],
1643 [261, [revert_ascii]],
1645 [259, [revert_utf8x]],
1648 [256, [revert_caption]],
1649 [255, [revert_encodings]],
1650 [254, [revert_clearpage, revert_cleardoublepage]],
1651 [253, [revert_esint]],
1652 [252, [revert_nomenclature, revert_printnomenclature]],
1653 [251, [revert_commandparams]],
1654 [250, [revert_cs_label]],
1656 [248, [revert_accent, revert_utf8]],
1657 [247, [revert_booktabs]],
1658 [246, [revert_font_settings]],
1659 [245, [revert_framed]]]
1662 if __name__ == "__main__":