1 # This file is part of lyx2lyx
2 # -*- coding: iso-8859-1 -*-
3 # Copyright (C) 2002 Dekel Tsur <dekel@lyx.org>
4 # Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
5 # Copyright (C) 2004-2005 Georg Baum <Georg.Baum@post.rwth-aachen.de>
7 # This program is free software; you can redistribute it and/or
8 # modify it under the terms of the GNU General Public License
9 # as published by the Free Software Foundation; either version 2
10 # of the License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
17 # You should have received a copy of the GNU General Public License
18 # along with this program; if not, write to the Free Software
19 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 from os import access, F_OK
24 from parser_tools import find_token, find_end_of_inset, get_next_paragraph, \
25 get_paragraph, get_value, del_token, is_nonempty_line,\
26 find_tokens, find_end_of, find_token2, find_re
28 from string import replace, split, find, strip, join
30 from lyx_0_12 import update_latexaccents
33 # Remove \color default
35 def remove_color_default(file):
38 i = find_token(file.body, "\\color default", i)
41 file.body[i] = replace(file.body[i], "\\color default",
48 def add_end_header(file):
49 file.header.append("\\end_header");
52 def rm_end_header(file):
53 i = find_token(file.header, "\\end_header", 0)
60 # \SpecialChar ~ -> \InsetSpace ~
62 def convert_spaces(file):
63 for i in range(len(file.body)):
64 file.body[i] = replace(file.body[i],"\\SpecialChar ~","\\InsetSpace ~")
67 def revert_spaces(file):
68 for i in range(len(file.body)):
69 file.body[i] = replace(file.body[i],"\\InsetSpace ~", "\\SpecialChar ~")
73 # equivalent to lyx::support::escape()
75 def lyx_support_escape(lab):
76 hexdigit = ['0', '1', '2', '3', '4', '5', '6', '7',
77 '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
81 if o >= 128 or c == '=' or c == '%':
83 enc = enc + hexdigit[o >> 4]
84 enc = enc + hexdigit[o & 15]
91 # \begin_inset LatexCommand \eqref -> ERT
93 def revert_eqref(file):
94 regexp = re.compile(r'^\\begin_inset\s+LatexCommand\s+\\eqref')
97 i = find_re(file.body, regexp, i)
100 eqref = lyx_support_escape(regexp.sub("", file.body[i]))
101 file.body[i:i+1] = ["\\begin_inset ERT", "status Collapsed", "",
102 "\\layout Standard", "", "\\backslash ",
110 def convert_bibtex(file):
111 for i in range(len(file.body)):
112 file.body[i] = replace(file.body[i],"\\begin_inset LatexCommand \\BibTeX",
113 "\\begin_inset LatexCommand \\bibtex")
116 def revert_bibtex(file):
117 for i in range(len(file.body)):
118 file.body[i] = replace(file.body[i], "\\begin_inset LatexCommand \\bibtex",
119 "\\begin_inset LatexCommand \\BibTeX")
125 def remove_insetparent(file):
128 i = find_token(file.body, "\\begin_inset LatexCommand \\lyxparent", i)
137 def convert_external(file):
138 external_rexp = re.compile(r'\\begin_inset External ([^,]*),"([^"]*)",')
139 external_header = "\\begin_inset External"
142 i = find_token(file.body, external_header, i)
145 look = external_rexp.search(file.body[i])
148 args[0] = look.group(1)
149 args[1] = look.group(2)
150 #FIXME: if the previous search fails then warn
152 if args[0] == "RasterImage":
153 # Convert a RasterImage External Inset to a Graphics Inset.
154 top = "\\begin_inset Graphics"
156 filename = "\tfilename " + args[1]
157 file.body[i:i+1] = [top, filename]
160 # Convert the old External Inset format to the new.
161 top = external_header
162 template = "\ttemplate " + args[0]
164 filename = "\tfilename " + args[1]
165 file.body[i:i+1] = [top, template, filename]
168 file.body[i:i+1] = [top, template]
172 def revert_external_1(file):
173 external_header = "\\begin_inset External"
176 i = find_token(file.body, external_header, i)
180 template = split(file.body[i+1])
184 filename = split(file.body[i+1])
188 params = split(file.body[i+1])
190 if file.body[i+1]: del file.body[i+1]
192 file.body[i] = file.body[i] + " " + template[0]+ ', "' + filename[0] + '", " '+ join(params[1:]) + '"'
196 def revert_external_2(file):
197 draft_token = '\tdraft'
200 i = find_token(file.body, '\\begin_inset External', i)
203 j = find_end_of_inset(file.body, i + 1)
205 #this should not happen
207 k = find_token(file.body, draft_token, i+1, j-1)
208 if (k != -1 and len(draft_token) == len(file.body[k])):
216 def convert_comment(file):
218 comment = "\\layout Comment"
220 i = find_token(file.body, comment, i)
224 file.body[i:i+1] = ["\\layout Standard","","",
225 "\\begin_inset Comment",
232 i = find_token(file.body, "\\layout", i)
234 i = len(file.body) - 1
235 file.body[i:i] = ["\\end_inset","",""]
238 j = find_token(file.body, '\\begin_deeper', old_i, i)
239 if j == -1: j = i + 1
240 k = find_token(file.body, '\\begin_inset', old_i, i)
241 if k == -1: k = i + 1
246 i = find_end_of( file.body, i, "\\begin_deeper","\\end_deeper")
248 #This case should not happen
249 #but if this happens deal with it greacefully adding
250 #the missing \end_deeper.
251 i = len(file.body) - 1
252 file.body[i:i] = ["\end_deeper",""]
260 i = find_end_of( file.body, i, "\\begin_inset","\\end_inset")
262 #This case should not happen
263 #but if this happens deal with it greacefully adding
264 #the missing \end_inset.
265 i = len(file.body) - 1
266 file.body[i:i] = ["\\end_inset","","","\\end_inset","",""]
272 if find(file.body[i], comment) == -1:
273 file.body[i:i] = ["\\end_inset"]
276 file.body[i:i+1] = ["\\layout Standard"]
280 def revert_comment(file):
283 i = find_tokens(file.body, ["\\begin_inset Comment", "\\begin_inset Greyedout"], i)
287 file.body[i] = "\\begin_inset Note"
294 def add_end_layout(file):
295 i = find_token(file.body, '\\layout', 0)
301 struct_stack = ["\\layout"]
304 i = find_tokens(file.body, ["\\begin_inset", "\\end_inset", "\\layout",
305 "\\begin_deeper", "\\end_deeper", "\\the_end"], i)
307 token = split(file.body[i])[0]
309 if token == "\\begin_inset":
310 struct_stack.append(token)
314 if token == "\\end_inset":
315 tail = struct_stack.pop()
316 if tail == "\\layout":
317 file.body.insert(i,"")
318 file.body.insert(i,"\\end_layout")
320 #Check if it is the correct tag
325 if token == "\\layout":
326 tail = struct_stack.pop()
328 file.body.insert(i,"")
329 file.body.insert(i,"\\end_layout")
332 struct_stack.append(tail)
334 struct_stack.append(token)
337 if token == "\\begin_deeper":
338 file.body.insert(i,"")
339 file.body.insert(i,"\\end_layout")
341 struct_stack.append(token)
344 if token == "\\end_deeper":
345 if struct_stack[-1] == '\\layout':
346 file.body.insert(i, '\\end_layout')
353 file.body.insert(i, "")
354 file.body.insert(i, "\\end_layout")
358 def rm_end_layout(file):
361 i = find_token(file.body, '\\end_layout', i)
370 # Handle change tracking keywords
372 def insert_tracking_changes(file):
373 i = find_token(file.header, "\\tracking_changes", 0)
375 file.header.append("\\tracking_changes 0")
378 def rm_tracking_changes(file):
379 i = find_token(file.header, "\\author", 0)
383 i = find_token(file.header, "\\tracking_changes", 0)
389 def rm_body_changes(file):
392 i = find_token(file.body, "\\change_", i)
400 # \layout -> \begin_layout
402 def layout2begin_layout(file):
405 i = find_token(file.body, '\\layout', i)
409 file.body[i] = replace(file.body[i], '\\layout', '\\begin_layout')
413 def begin_layout2layout(file):
416 i = find_token(file.body, '\\begin_layout', i)
420 file.body[i] = replace(file.body[i], '\\begin_layout', '\\layout')
425 # valignment="center" -> valignment="middle"
427 def convert_valignment_middle(body, start, end):
428 for i in range(start, end):
429 if re.search('^<(column|cell) .*valignment="center".*>$', body[i]):
430 body[i] = replace(body[i], 'valignment="center"', 'valignment="middle"')
433 def convert_table_valignment_middle(file):
434 regexp = re.compile(r'^\\begin_inset\s+Tabular')
437 i = find_re(file.body, regexp, i)
440 j = find_end_of_inset(file.body, i + 1)
442 #this should not happen
443 convert_valignment_middle(file.body, i + 1, len(file.body))
445 convert_valignment_middle(file.body, i + 1, j)
449 def revert_table_valignment_middle(body, start, end):
450 for i in range(start, end):
451 if re.search('^<(column|cell) .*valignment="middle".*>$', body[i]):
452 body[i] = replace(body[i], 'valignment="middle"', 'valignment="center"')
455 def revert_valignment_middle(file):
456 regexp = re.compile(r'^\\begin_inset\s+Tabular')
459 i = find_re(file.body, regexp, i)
462 j = find_end_of_inset(file.body, i + 1)
464 #this should not happen
465 revert_table_valignment_middle(file.body, i + 1, len(file.body))
467 revert_table_valignment_middle(file.body, i + 1, j)
472 # \the_end -> \end_document
474 def convert_end_document(file):
475 i = find_token(file.body, "\\the_end", 0)
477 file.body.append("\\end_document")
479 file.body[i] = "\\end_document"
482 def revert_end_document(file):
483 i = find_token(file.body, "\\end_document", 0)
485 file.body.append("\\the_end")
487 file.body[i] = "\\the_end"
491 # Convert line and page breaks
494 #\line_top \line_bottom \pagebreak_top \pagebreak_bottom \added_space_top xxx \added_space_bottom yyy
498 #\begin layout Standard
503 #\begin_inset VSpace xxx
507 #\begin_layout Standard
511 #\begin_layout Standard
513 #\begin_inset VSpace xxx
520 def convert_breaks(file):
521 par_params = ('added_space_bottom', 'added_space_top', 'align',
522 'labelwidthstring', 'line_bottom', 'line_top', 'noindent',
523 'pagebreak_bottom', 'pagebreak_top', 'paragraph_spacing',
527 i = find_token(file.body, "\\begin_layout", i)
532 # Merge all paragraph parameters into a single line
533 # We cannot check for '\\' only because paragraphs may start e.g.
535 while file.body[i + 1][:1] == '\\' and split(file.body[i + 1][1:])[0] in par_params:
536 file.body[i] = file.body[i + 1] + ' ' + file.body[i]
539 line_top = find(file.body[i],"\\line_top")
540 line_bot = find(file.body[i],"\\line_bottom")
541 pb_top = find(file.body[i],"\\pagebreak_top")
542 pb_bot = find(file.body[i],"\\pagebreak_bottom")
543 vspace_top = find(file.body[i],"\\added_space_top")
544 vspace_bot = find(file.body[i],"\\added_space_bottom")
546 if line_top == -1 and line_bot == -1 and pb_bot == -1 and pb_top == -1 and vspace_top == -1 and vspace_bot == -1:
549 for tag in "\\line_top", "\\line_bottom", "\\pagebreak_top", "\\pagebreak_bottom":
550 file.body[i] = replace(file.body[i], tag, "")
553 # the position could be change because of the removal of other
554 # paragraph properties above
555 vspace_top = find(file.body[i],"\\added_space_top")
556 tmp_list = split(file.body[i][vspace_top:])
557 vspace_top_value = tmp_list[1]
558 file.body[i] = file.body[i][:vspace_top] + join(tmp_list[2:])
561 # the position could be change because of the removal of other
562 # paragraph properties above
563 vspace_bot = find(file.body[i],"\\added_space_bottom")
564 tmp_list = split(file.body[i][vspace_bot:])
565 vspace_bot_value = tmp_list[1]
566 file.body[i] = file.body[i][:vspace_bot] + join(tmp_list[2:])
568 file.body[i] = strip(file.body[i])
571 # Create an empty paragraph for line and page break that belong
572 # above the paragraph
573 if pb_top !=-1 or line_top != -1 or vspace_top != -1:
575 paragraph_above = ['','\\begin_layout Standard','','']
578 paragraph_above.extend(['\\newpage ',''])
581 paragraph_above.extend(['\\begin_inset VSpace ' + vspace_top_value,'\\end_inset','',''])
584 paragraph_above.extend(['\\lyxline ',''])
586 paragraph_above.extend(['\\end_layout',''])
588 #inset new paragraph above the current paragraph
589 file.body[i-2:i-2] = paragraph_above
590 i = i + len(paragraph_above)
592 # Ensure that nested style are converted later.
593 k = find_end_of(file.body, i, "\\begin_layout", "\\end_layout")
598 if pb_bot !=-1 or line_bot != -1 or vspace_bot != -1:
600 paragraph_below = ['','\\begin_layout Standard','','']
603 paragraph_below.extend(['\\lyxline ',''])
606 paragraph_below.extend(['\\begin_inset VSpace ' + vspace_bot_value,'\\end_inset','',''])
609 paragraph_below.extend(['\\newpage ',''])
611 paragraph_below.extend(['\\end_layout',''])
613 #inset new paragraph above the current paragraph
614 file.body[k + 1: k + 1] = paragraph_below
620 def convert_note(file):
623 i = find_tokens(file.body, ["\\begin_inset Note",
624 "\\begin_inset Comment",
625 "\\begin_inset Greyedout"], i)
629 file.body[i] = file.body[i][0:13] + 'Note ' + file.body[i][13:]
633 def revert_note(file):
634 note_header = "\\begin_inset Note "
637 i = find_token(file.body, note_header, i)
641 file.body[i] = "\\begin_inset " + file.body[i][len(note_header):]
648 def convert_box(file):
651 i = find_tokens(file.body, ["\\begin_inset Boxed",
652 "\\begin_inset Doublebox",
653 "\\begin_inset Frameless",
654 "\\begin_inset ovalbox",
655 "\\begin_inset Ovalbox",
656 "\\begin_inset Shadowbox"], i)
660 file.body[i] = file.body[i][0:13] + 'Box ' + file.body[i][13:]
664 def revert_box(file):
665 box_header = "\\begin_inset Box "
668 i = find_token(file.body, box_header, i)
672 file.body[i] = "\\begin_inset " + file.body[i][len(box_header):]
679 def convert_collapsable(file):
682 i = find_tokens(file.body, ["\\begin_inset Box",
683 "\\begin_inset Branch",
684 "\\begin_inset CharStyle",
685 "\\begin_inset Float",
686 "\\begin_inset Foot",
687 "\\begin_inset Marginal",
688 "\\begin_inset Note",
689 "\\begin_inset OptArg",
690 "\\begin_inset Wrap"], i)
694 # Seach for a line starting 'collapsed'
695 # If, however, we find a line starting '\begin_layout'
696 # (_always_ present) then break with a warning message
699 if (file.body[i] == "collapsed false"):
700 file.body[i] = "status open"
702 elif (file.body[i] == "collapsed true"):
703 file.body[i] = "status collapsed"
705 elif (file.body[i][:13] == "\\begin_layout"):
706 file.warning("Malformed LyX file: Missing 'collapsed'.")
713 def revert_collapsable(file):
716 i = find_tokens(file.body, ["\\begin_inset Box",
717 "\\begin_inset Branch",
718 "\\begin_inset CharStyle",
719 "\\begin_inset Float",
720 "\\begin_inset Foot",
721 "\\begin_inset Marginal",
722 "\\begin_inset Note",
723 "\\begin_inset OptArg",
724 "\\begin_inset Wrap"], i)
728 # Seach for a line starting 'status'
729 # If, however, we find a line starting '\begin_layout'
730 # (_always_ present) then break with a warning message
733 if (file.body[i] == "status open"):
734 file.body[i] = "collapsed false"
736 elif (file.body[i] == "status collapsed" or
737 file.body[i] == "status inlined"):
738 file.body[i] = "collapsed true"
740 elif (file.body[i][:13] == "\\begin_layout"):
741 file.warning("Malformed LyX file: Missing 'status'.")
751 def convert_ert(file):
754 i = find_token(file.body, "\\begin_inset ERT", i)
758 # Seach for a line starting 'status'
759 # If, however, we find a line starting '\begin_layout'
760 # (_always_ present) then break with a warning message
763 if (file.body[i] == "status Open"):
764 file.body[i] = "status open"
766 elif (file.body[i] == "status Collapsed"):
767 file.body[i] = "status collapsed"
769 elif (file.body[i] == "status Inlined"):
770 file.body[i] = "status inlined"
772 elif (file.body[i][:13] == "\\begin_layout"):
773 file.warning("Malformed LyX file: Missing 'status'.")
780 def revert_ert(file):
783 i = find_token(file.body, "\\begin_inset ERT", i)
787 # Seach for a line starting 'status'
788 # If, however, we find a line starting '\begin_layout'
789 # (_always_ present) then break with a warning message
792 if (file.body[i] == "status open"):
793 file.body[i] = "status Open"
795 elif (file.body[i] == "status collapsed"):
796 file.body[i] = "status Collapsed"
798 elif (file.body[i] == "status inlined"):
799 file.body[i] = "status Inlined"
801 elif (file.body[i][:13] == "\\begin_layout"):
802 file.warning("Malformed LyX file : Missing 'status'.")
812 def convert_minipage(file):
813 """ Convert minipages to the box inset.
814 We try to use the same order of arguments as lyx does.
817 inner_pos = ["c","t","b","s"]
821 i = find_token(file.body, "\\begin_inset Minipage", i)
825 file.body[i] = "\\begin_inset Box Frameless"
828 # convert old to new position using the pos list
829 if file.body[i][:8] == "position":
830 file.body[i] = 'position "%s"' % pos[int(file.body[i][9])]
832 file.body.insert(i, 'position "%s"' % pos[0])
835 file.body.insert(i, 'hor_pos "c"')
837 file.body.insert(i, 'has_inner_box 1')
840 # convert the inner_position
841 if file.body[i][:14] == "inner_position":
842 file.body[i] = 'inner_pos "%s"' % inner_pos[int(file.body[i][15])]
844 file.body.insert('inner_pos "%s"' % inner_pos[0])
847 # We need this since the new file format has a height and width
848 # in a different order.
849 if file.body[i][:6] == "height":
850 height = file.body[i][6:]
851 # test for default value of 221 and convert it accordingly
852 if height == ' "0pt"':
858 if file.body[i][:5] == "width":
859 width = file.body[i][5:]
864 if file.body[i][:9] == "collapsed":
865 if file.body[i][9:] == "true":
873 file.body.insert(i, 'use_parbox 0')
875 file.body.insert(i, 'width' + width)
877 file.body.insert(i, 'special "none"')
879 file.body.insert(i, 'height' + height)
881 file.body.insert(i, 'height_special "totalheight"')
883 file.body.insert(i, 'status ' + status)
887 # -------------------------------------------------------------------------------------------
888 # Convert backslashes and '\n' into valid ERT code, append the converted
889 # text to body[i] and return the (maybe incremented) line index i
890 def convert_ertbackslash(body, i, ert):
893 body[i] = body[i] + '\\backslash '
897 body[i+1:i+1] = ['\\newline ', '']
900 body[i] = body[i] + c
904 def convert_vspace(file):
906 # Get default spaceamount
907 i = find_token(file.header, '\\defskip', 0)
909 defskipamount = 'medskip'
911 defskipamount = split(file.header[i])[1]
916 i = find_token(file.body, '\\begin_inset VSpace', i)
919 spaceamount = split(file.body[i])[2]
921 # Are we at the beginning or end of a paragraph?
923 start = get_paragraph(file.body, i) + 1
924 for k in range(start, i):
925 if is_nonempty_line(file.body[k]):
929 j = find_end_of_inset(file.body, i)
931 file.warning("Malformed LyX file: Missing '\\end_inset'.")
934 end = get_next_paragraph(file.body, i)
935 for k in range(j + 1, end):
936 if is_nonempty_line(file.body[k]):
940 # Convert to paragraph formatting if we are at the beginning or end
941 # of a paragraph and the resulting paragraph would not be empty
942 if ((paragraph_start and not paragraph_end) or
943 (paragraph_end and not paragraph_start)):
944 # The order is important: del and insert invalidate some indices
948 file.body.insert(start, '\\added_space_top ' + spaceamount + ' ')
950 file.body.insert(start, '\\added_space_bottom ' + spaceamount + ' ')
954 file.body[i:i+1] = ['\\begin_inset ERT', 'status Collapsed', '',
955 '\\layout Standard', '', '\\backslash ']
957 if spaceamount[-1] == '*':
958 spaceamount = spaceamount[:-1]
963 # Replace defskip by the actual value
964 if spaceamount == 'defskip':
965 spaceamount = defskipamount
967 # LaTeX does not know \\smallskip* etc
969 if spaceamount == 'smallskip':
970 spaceamount = '\\smallskipamount'
971 elif spaceamount == 'medskip':
972 spaceamount = '\\medskipamount'
973 elif spaceamount == 'bigskip':
974 spaceamount = '\\bigskipamount'
975 elif spaceamount == 'vfill':
976 spaceamount = '\\fill'
978 # Finally output the LaTeX code
979 if (spaceamount == 'smallskip' or spaceamount == 'medskip' or
980 spaceamount == 'bigskip' or spaceamount == 'vfill'):
981 file.body.insert(i, spaceamount)
984 file.body.insert(i, 'vspace*{')
986 file.body.insert(i, 'vspace{')
987 i = convert_ertbackslash(file.body, i, spaceamount)
988 file.body[i] = file.body[i] + '}'
992 # Convert a LyX length into a LaTeX length
993 def convert_len(len, special):
994 units = {"text%":"\\textwidth", "col%":"\\columnwidth",
995 "page%":"\\pagewidth", "line%":"\\linewidth",
996 "theight%":"\\textheight", "pheight%":"\\pageheight"}
998 # Convert special lengths
999 if special != 'none':
1000 len = '%f\\' % len2value(len) + special
1002 # Convert LyX units to LaTeX units
1003 for unit in units.keys():
1004 if find(len, unit) != -1:
1005 len = '%f' % (len2value(len) / 100) + units[unit]
1011 # Convert a LyX length into valid ERT code and append it to body[i]
1012 # Return the (maybe incremented) line index i
1013 def convert_ertlen(body, i, len, special):
1014 # Convert backslashes and insert the converted length into body
1015 return convert_ertbackslash(body, i, convert_len(len, special))
1018 # Return the value of len without the unit in numerical form
1020 result = re.search('([+-]?[0-9.]+)', len)
1022 return float(result.group(1))
1023 # No number means 1.0
1027 # Convert text to ERT and insert it at body[i]
1028 # Return the index of the line after the inserted ERT
1029 def insert_ert(body, i, status, text):
1030 body[i:i] = ['\\begin_inset ERT', 'status ' + status, '',
1031 '\\layout Standard', '']
1033 i = convert_ertbackslash(body, i, text) + 1
1034 body[i:i] = ['', '\\end_inset', '']
1039 # Add text to the preamble if it is not already there.
1040 # Only the first line is checked!
1041 def add_to_preamble(file, text):
1042 i = find_token(file.header, '\\begin_preamble', 0)
1044 file.warning("Malformed LyX file: Missing '\\begin_preamble'.")
1046 j = find_token(file.header, '\\end_preamble', i)
1048 file.warning("Malformed LyX file: Missing '\\end_preamble'.")
1050 if find_token(file.header, text[0], i, j) != -1:
1052 file.header[j:j] = text
1055 def convert_frameless_box(file):
1056 pos = ['t', 'c', 'b']
1057 inner_pos = ['c', 't', 'b', 's']
1060 i = find_token(file.body, '\\begin_inset Frameless', i)
1063 j = find_end_of_inset(file.body, i)
1065 file.warning("Malformed LyX file: Missing '\\end_inset'.")
1072 params = {'position':'0', 'hor_pos':'c', 'has_inner_box':'1',
1073 'inner_pos':'1', 'use_parbox':'0', 'width':'100col%',
1074 'special':'none', 'height':'1in',
1075 'height_special':'totalheight', 'collapsed':'false'}
1076 for key in params.keys():
1077 value = replace(get_value(file.body, key, i, j), '"', '')
1079 if key == 'position':
1080 # convert new to old position: 'position "t"' -> 0
1081 value = find_token(pos, value, 0)
1084 elif key == 'inner_pos':
1085 # convert inner position
1086 value = find_token(inner_pos, value, 0)
1091 j = del_token(file.body, key, i, j)
1094 # Convert to minipage or ERT?
1095 # Note that the inner_position and height parameters of a minipage
1096 # inset are ignored and not accessible for the user, although they
1097 # are present in the file format and correctly read in and written.
1098 # Therefore we convert to ERT if they do not have their LaTeX
1099 # defaults. These are:
1100 # - the value of "position" for "inner_pos"
1101 # - "\totalheight" for "height"
1102 if (params['use_parbox'] != '0' or
1103 params['has_inner_box'] != '1' or
1104 params['special'] != 'none' or
1105 inner_pos[params['inner_pos']] != pos[params['position']] or
1106 params['height_special'] != 'totalheight' or
1107 len2value(params['height']) != 1.0):
1109 # Here we know that this box is not supported in file format 224.
1110 # Therefore we need to convert it to ERT. We can't simply convert
1111 # the beginning and end of the box to ERT, because the
1112 # box inset may contain layouts that are different from the
1113 # surrounding layout. After the conversion the contents of the
1114 # box inset is on the same level as the surrounding text, and
1115 # paragraph layouts and align parameters can get mixed up.
1117 # A possible solution for this problem:
1118 # Convert the box to a minipage and redefine the minipage
1119 # environment in ERT so that the original box is simulated.
1120 # For minipages we could do this in a way that the width and
1121 # position can still be set from LyX, but this did not work well.
1122 # This is not possible for parboxes either, so we convert the
1123 # original box to ERT, put the minipage inset inside the box
1124 # and redefine the minipage environment to be empty.
1126 # Commands that are independant of a particular box can go to
1128 # We need to define lyxtolyxrealminipage with 3 optional
1129 # arguments although LyX 1.3 uses only the first one.
1130 # Otherwise we will get LaTeX errors if this document is
1131 # converted to format 225 or above again (LyX 1.4 uses all
1132 # optional arguments).
1133 add_to_preamble(file,
1134 ['% Commands inserted by lyx2lyx for frameless boxes',
1135 '% Save the original minipage environment',
1136 '\\let\\lyxtolyxrealminipage\\minipage',
1137 '\\let\\endlyxtolyxrealminipage\\endminipage',
1138 '% Define an empty lyxtolyximinipage environment',
1139 '% with 3 optional arguments',
1140 '\\newenvironment{lyxtolyxiiiminipage}[4]{}{}',
1141 '\\newenvironment{lyxtolyxiiminipage}[2][\\lyxtolyxargi]%',
1142 ' {\\begin{lyxtolyxiiiminipage}{\\lyxtolyxargi}{\\lyxtolyxargii}{#1}{#2}}%',
1143 ' {\\end{lyxtolyxiiiminipage}}',
1144 '\\newenvironment{lyxtolyximinipage}[1][\\totalheight]%',
1145 ' {\\def\\lyxtolyxargii{{#1}}\\begin{lyxtolyxiiminipage}}%',
1146 ' {\\end{lyxtolyxiiminipage}}',
1147 '\\newenvironment{lyxtolyxminipage}[1][c]%',
1148 ' {\\def\\lyxtolyxargi{{#1}}\\begin{lyxtolyximinipage}}',
1149 ' {\\end{lyxtolyximinipage}}'])
1151 if params['use_parbox'] != '0':
1154 ert = '\\begin{lyxtolyxrealminipage}'
1156 # convert optional arguments only if not latex default
1157 if (pos[params['position']] != 'c' or
1158 inner_pos[params['inner_pos']] != pos[params['position']] or
1159 params['height_special'] != 'totalheight' or
1160 len2value(params['height']) != 1.0):
1161 ert = ert + '[' + pos[params['position']] + ']'
1162 if (inner_pos[params['inner_pos']] != pos[params['position']] or
1163 params['height_special'] != 'totalheight' or
1164 len2value(params['height']) != 1.0):
1165 ert = ert + '[' + convert_len(params['height'],
1166 params['height_special']) + ']'
1167 if inner_pos[params['inner_pos']] != pos[params['position']]:
1168 ert = ert + '[' + inner_pos[params['inner_pos']] + ']'
1170 ert = ert + '{' + convert_len(params['width'],
1171 params['special']) + '}'
1173 if params['use_parbox'] != '0':
1175 ert = ert + '\\let\\minipage\\lyxtolyxminipage%\n'
1176 ert = ert + '\\let\\endminipage\\endlyxtolyxminipage%\n'
1179 i = insert_ert(file.body, i, 'Collapsed', ert)
1180 j = j + i - old_i - 1
1182 file.body[i:i] = ['\\begin_inset Minipage',
1183 'position %d' % params['position'],
1186 'width "' + params['width'] + '"',
1187 'collapsed ' + params['collapsed']]
1191 # Restore the original minipage environment since we may have
1192 # minipages inside this box.
1193 # Start a new paragraph because the following may be nonstandard
1194 file.body[i:i] = ['\\layout Standard', '', '']
1197 ert = '\\let\\minipage\\lyxtolyxrealminipage%\n'
1198 ert = ert + '\\let\\endminipage\\lyxtolyxrealendminipage%'
1200 i = insert_ert(file.body, i, 'Collapsed', ert)
1201 j = j + i - old_i - 1
1203 # Redefine the minipage end before the inset end.
1204 # Start a new paragraph because the previous may be nonstandard
1205 file.body[j:j] = ['\\layout Standard', '', '']
1207 ert = '\\let\\endminipage\\endlyxtolyxminipage'
1208 j = insert_ert(file.body, j, 'Collapsed', ert)
1210 file.body.insert(j, '')
1213 # LyX writes '%\n' after each box. Therefore we need to end our
1214 # ERT with '%\n', too, since this may swallow a following space.
1215 if params['use_parbox'] != '0':
1218 ert = '\\end{lyxtolyxrealminipage}%\n'
1219 j = insert_ert(file.body, j, 'Collapsed', ert)
1221 # We don't need to restore the original minipage after the inset
1222 # end because the scope of the redefinition is the original box.
1226 # Convert to minipage
1227 file.body[i:i] = ['\\begin_inset Minipage',
1228 'position %d' % params['position'],
1229 'inner_position %d' % params['inner_pos'],
1230 'height "' + params['height'] + '"',
1231 'width "' + params['width'] + '"',
1232 'collapsed ' + params['collapsed']]
1239 def convert_jurabib(file):
1240 i = find_token(file.header, '\\use_numerical_citations', 0)
1242 file.warning("Malformed lyx file: Missing '\\use_numerical_citations'.")
1244 file.header.insert(i + 1, '\\use_jurabib 0')
1247 def revert_jurabib(file):
1248 i = find_token(file.header, '\\use_jurabib', 0)
1250 file.warning("Malformed lyx file: Missing '\\use_jurabib'.")
1252 if get_value(file.header, '\\use_jurabib', 0) != "0":
1253 file.warning("Conversion of '\\use_jurabib = 1' not yet implemented.")
1254 # Don't remove '\\use_jurabib' so that people will get warnings by lyx
1262 def convert_bibtopic(file):
1263 i = find_token(file.header, '\\use_jurabib', 0)
1265 file.warning("Malformed lyx file: Missing '\\use_jurabib'.")
1267 file.header.insert(i + 1, '\\use_bibtopic 0')
1270 def revert_bibtopic(file):
1271 i = find_token(file.header, '\\use_bibtopic', 0)
1273 file.warning("Malformed lyx file: Missing '\\use_bibtopic'.")
1275 if get_value(file.header, '\\use_bibtopic', 0) != "0":
1276 file.warning("Conversion of '\\use_bibtopic = 1' not yet implemented.")
1277 # Don't remove '\\use_jurabib' so that people will get warnings by lyx
1284 def convert_float(file):
1287 i = find_token(file.body, '\\begin_inset Float', i)
1290 # Seach for a line starting 'wide'
1291 # If, however, we find a line starting '\begin_layout'
1292 # (_always_ present) then break with a warning message
1295 if (file.body[i][:4] == "wide"):
1296 file.body.insert(i + 1, 'sideways false')
1298 elif (file.body[i][:13] == "\\begin_layout"):
1299 file.warning("Malformed lyx file: Missing 'wide'.")
1305 def revert_float(file):
1308 i = find_token(file.body, '\\begin_inset Float', i)
1311 j = find_end_of_inset(file.body, i)
1313 file.warning("Malformed lyx file: Missing '\\end_inset'.")
1316 if get_value(file.body, 'sideways', i, j) != "false":
1317 file.warning("Conversion of 'sideways true' not yet implemented.")
1318 # Don't remove 'sideways' so that people will get warnings by lyx
1321 del_token(file.body, 'sideways', i, j)
1325 def convert_graphics(file):
1326 """ Add extension to filenames of insetgraphics if necessary.
1330 i = find_token(file.body, "\\begin_inset Graphics", i)
1334 j = find_token2(file.body, "filename", i)
1338 filename = split(file.body[j])[1]
1339 absname = os.path.normpath(os.path.join(file.dir, filename))
1340 if file.input == stdin and not os.path.isabs(filename):
1341 # We don't know the directory and cannot check the file.
1342 # We could use a heuristic and take the current directory,
1343 # and we could try to find out if filename has an extension,
1344 # but that would be just guesses and could be wrong.
1345 file.warning("""Warning: Can not determine whether file
1347 needs an extension when reading from standard input.
1348 You may need to correct the file manually or run
1349 lyx2lyx again with the .lyx file as commandline argument.""" % filename)
1351 # This needs to be the same algorithm as in pre 233 insetgraphics
1352 if access(absname, F_OK):
1354 if access(absname + ".ps", F_OK):
1355 file.body[j] = replace(file.body[j], filename, filename + ".ps")
1357 if access(absname + ".eps", F_OK):
1358 file.body[j] = replace(file.body[j], filename, filename + ".eps")
1362 # Convert firstname and surname from styles -> char styles
1364 def convert_names(file):
1365 """ Convert in the docbook backend from firstname and surname style
1368 if file.backend != "docbook":
1374 i = find_token(file.body, "\\begin_layout Author", i)
1379 while file.body[i] == "":
1382 if file.body[i][:11] != "\\end_layout" or file.body[i+2][:13] != "\\begin_deeper":
1387 i = find_end_of( file.body, i+3, "\\begin_deeper","\\end_deeper")
1389 # something is really wrong, abort
1390 file.warning("Missing \\end_deeper, after style Author.")
1391 file.warning("Aborted attempt to parse FirstName and Surname.")
1393 firstname, surname = "", ""
1395 name = file.body[k:i]
1397 j = find_token(name, "\\begin_layout FirstName", 0)
1400 while(name[j] != "\\end_layout"):
1401 firstname = firstname + name[j]
1404 j = find_token(name, "\\begin_layout Surname", 0)
1407 while(name[j] != "\\end_layout"):
1408 surname = surname + name[j]
1412 del file.body[k+2:i+1]
1414 file.body[k-1:k-1] = ["", "",
1415 "\\begin_inset CharStyle Firstname",
1418 "\\begin_layout Standard",
1426 "\\begin_inset CharStyle Surname",
1429 "\\begin_layout Standard",
1438 def revert_names(file):
1439 """ Revert in the docbook backend from firstname and surname char style
1442 if file.backend != "docbook":
1447 # \use_natbib 1 \cite_engine <style>
1448 # \use_numerical_citations 0 -> where <style> is one of
1449 # \use_jurabib 0 "basic", "natbib_authoryear",
1450 # "natbib_numerical" or "jurabib"
1451 def convert_cite_engine(file):
1452 a = find_token(file.header, "\\use_natbib", 0)
1454 file.warning("Malformed lyx file: Missing '\\use_natbib'.")
1457 b = find_token(file.header, "\\use_numerical_citations", 0)
1458 if b == -1 or b != a+1:
1459 file.warning("Malformed lyx file: Missing '\\use_numerical_citations'.")
1462 c = find_token(file.header, "\\use_jurabib", 0)
1463 if c == -1 or c != b+1:
1464 file.warning("Malformed lyx file: Missing '\\use_jurabib'.")
1467 use_natbib = int(split(file.header[a])[1])
1468 use_numerical_citations = int(split(file.header[b])[1])
1469 use_jurabib = int(split(file.header[c])[1])
1471 cite_engine = "basic"
1473 if use_numerical_citations:
1474 cite_engine = "natbib_numerical"
1476 cite_engine = "natbib_authoryear"
1478 cite_engine = "jurabib"
1480 del file.header[a:c+1]
1481 file.header.insert(a, "\\cite_engine " + cite_engine)
1484 def revert_cite_engine(file):
1485 i = find_token(file.header, "\\cite_engine", 0)
1487 file.warning("Malformed lyx file: Missing '\\cite_engine'.")
1490 cite_engine = split(file.header[i])[1]
1495 if cite_engine == "natbib_numerical":
1498 elif cite_engine == "natbib_authoryear":
1500 elif cite_engine == "jurabib":
1504 file.header.insert(i, "\\use_jurabib " + use_jurabib)
1505 file.header.insert(i, "\\use_numerical_citations " + use_numerical)
1506 file.header.insert(i, "\\use_natbib " + use_natbib)
1512 def convert_paperpackage(file):
1513 i = find_token(file.header, "\\paperpackage", 0)
1515 file.warning("Malformed lyx file: Missing '\\paperpackage'.")
1518 packages = {'a4':'none', 'a4wide':'a4', 'widemarginsa4':'a4wide'}
1519 paperpackage = split(file.header[i])[1]
1520 file.header[i] = replace(file.header[i], paperpackage, packages[paperpackage])
1523 def revert_paperpackage(file):
1524 i = find_token(file.header, "\\paperpackage", 0)
1526 file.warning("Malformed lyx file: Missing '\\paperpackage'.")
1529 packages = {'none':'a4', 'a4':'a4wide', 'a4wide':'widemarginsa4',
1531 paperpackage = split(file.header[i])[1]
1532 file.header[i] = replace(file.header[i], paperpackage, packages[paperpackage])
1538 def convert_bullets(file):
1541 i = find_token(file.header, "\\bullet", i)
1544 if file.header[i][:12] == '\\bulletLaTeX':
1545 file.header[i] = file.header[i] + ' ' + strip(file.header[i+1])
1548 file.header[i] = file.header[i] + ' ' + strip(file.header[i+1]) +\
1549 ' ' + strip(file.header[i+2]) + ' ' + strip(file.header[i+3])
1551 del file.header[i+1:i + n]
1555 def revert_bullets(file):
1558 i = find_token(file.header, "\\bullet", i)
1561 if file.header[i][:12] == '\\bulletLaTeX':
1562 n = find(file.header[i], '"')
1564 file.warning("Malformed header.")
1567 file.header[i:i+1] = [file.header[i][:n-1],'\t' + file.header[i][n:], '\\end_bullet']
1570 frag = split(file.header[i])
1572 file.warning("Malformed header.")
1575 file.header[i:i+1] = [frag[0] + ' ' + frag[1],
1584 # \begin_header and \begin_document
1586 def add_begin_header(file):
1587 i = find_token(file.header, '\\lyxformat', 0)
1588 file.header.insert(i+1, '\\begin_header')
1589 file.header.insert(i+1, '\\begin_document')
1592 def remove_begin_header(file):
1593 i = find_token(file.header, "\\begin_document", 0)
1596 i = find_token(file.header, "\\begin_header", 0)
1602 # \begin_file.body and \end_file.body
1604 def add_begin_body(file):
1605 file.body.insert(0, '\\begin_body')
1606 file.body.insert(1, '')
1607 i = find_token(file.body, "\\end_document", 0)
1608 file.body.insert(i, '\\end_body')
1610 def remove_begin_body(file):
1611 i = find_token(file.body, "\\begin_body", 0)
1614 if not file.body[i]:
1616 i = find_token(file.body, "\\end_body", 0)
1624 def normalize_papersize(file):
1625 i = find_token(file.header, '\\papersize', 0)
1629 tmp = split(file.header[i])
1630 if tmp[1] == "Default":
1631 file.header[i] = '\\papersize default'
1633 if tmp[1] == "Custom":
1634 file.header[i] = '\\papersize custom'
1637 def denormalize_papersize(file):
1638 i = find_token(file.header, '\\papersize', 0)
1642 tmp = split(file.header[i])
1643 if tmp[1] == "custom":
1644 file.header[i] = '\\papersize Custom'
1648 # Strip spaces at end of command line
1650 def strip_end_space(file):
1651 for i in range(len(file.body)):
1652 if file.body[i][:1] == '\\':
1653 file.body[i] = strip(file.body[i])
1657 # Use boolean values for \use_geometry, \use_bibtopic and \tracking_changes
1659 def use_x_boolean(file):
1660 bin2bool = {'0': 'false', '1': 'true'}
1661 for use in '\\use_geometry', '\\use_bibtopic', '\\tracking_changes':
1662 i = find_token(file.header, use, 0)
1665 decompose = split(file.header[i])
1666 file.header[i] = decompose[0] + ' ' + bin2bool[decompose[1]]
1669 def use_x_binary(file):
1670 bool2bin = {'false': '0', 'true': '1'}
1671 for use in '\\use_geometry', '\\use_bibtopic', '\\tracking_changes':
1672 i = find_token(file.header, use, 0)
1675 decompose = split(file.header[i])
1676 file.header[i] = decompose[0] + ' ' + bool2bin[decompose[1]]
1679 # Place all the paragraph parameters in their own line
1681 def normalize_paragraph_params(file):
1683 allowed_parameters = '\\paragraph_spacing', '\\noindent', '\\align', '\\labelwidthstring', "\\start_of_appendix"
1687 i = find_token(file.body, '\\begin_layout', i)
1693 if strip(body[i]) and split(body[i])[0] not in allowed_parameters:
1696 j = find(body[i],'\\', 1)
1699 body[i:i+1] = [strip(body[i][:j]), body[i][j:]]
1705 # Add/remove output_changes parameter
1707 def convert_output_changes (file):
1708 i = find_token(file.header, '\\tracking_changes', 0)
1710 file.warning("Malformed lyx file: Missing '\\tracking_changes'.")
1712 file.header.insert(i+1, '\\output_changes true')
1715 def revert_output_changes (file):
1716 i = find_token(file.header, '\\output_changes', 0)
1723 # Convert paragraph breaks and sanitize paragraphs
1725 def convert_ert_paragraphs(file):
1726 forbidden_settings = [
1727 # paragraph parameters
1728 '\\paragraph_spacing', '\\labelwidthstring',
1729 '\\start_of_appendix', '\\noindent',
1730 '\\leftindent', '\\align',
1732 '\\family', '\\series', '\\shape', '\\size',
1733 '\\emph', '\\numeric', '\\bar', '\\noun',
1734 '\\color', '\\lang']
1737 i = find_token(file.body, '\\begin_inset ERT', i)
1740 j = find_end_of_inset(file.body, i)
1742 file.warning("Malformed lyx file: Missing '\\end_inset'.")
1746 # convert non-standard paragraphs to standard
1749 k = find_token(file.body, "\\begin_layout", k, j)
1752 file.body[k] = "\\begin_layout Standard"
1755 # remove all paragraph parameters and font settings
1758 if (strip(file.body[k]) and
1759 split(file.body[k])[0] in forbidden_settings):
1765 # insert an empty paragraph before each paragraph but the first
1769 k = find_token(file.body, "\\begin_layout Standard", k, j)
1776 file.body[k:k] = ["\\begin_layout Standard", "",
1781 # convert \\newline to new paragraph
1784 k = find_token(file.body, "\\newline", k, j)
1787 file.body[k:k+1] = ["\\end_layout", "", "\\begin_layout Standard"]
1794 # Remove double paragraph breaks
1796 def revert_ert_paragraphs(file):
1799 i = find_token(file.body, '\\begin_inset ERT', i)
1802 j = find_end_of_inset(file.body, i)
1804 file.warning("Malformed lyx file: Missing '\\end_inset'.")
1808 # replace paragraph breaks with \newline
1811 k = find_token(file.body, "\\end_layout", k, j)
1812 l = find_token(file.body, "\\begin_layout", k, j)
1813 if k == -1 or l == -1:
1815 file.body[k:l+1] = ["\\newline"]
1819 # replace double \newlines with paragraph breaks
1822 k = find_token(file.body, "\\newline", k, j)
1826 while file.body[l] == "":
1828 if strip(file.body[l]) and split(file.body[l])[0] == "\\newline":
1829 file.body[k:l+1] = ["\\end_layout", "",
1830 "\\begin_layout Standard"]
1842 convert = [[223, [insert_tracking_changes, add_end_header, remove_color_default,
1843 convert_spaces, convert_bibtex, remove_insetparent]],
1844 [224, [convert_external, convert_comment]],
1845 [225, [add_end_layout, layout2begin_layout, convert_end_document,
1846 convert_table_valignment_middle, convert_breaks]],
1847 [226, [convert_note]],
1848 [227, [convert_box]],
1849 [228, [convert_collapsable, convert_ert]],
1850 [229, [convert_minipage]],
1851 [230, [convert_jurabib]],
1852 [231, [convert_float]],
1853 [232, [convert_bibtopic]],
1854 [233, [convert_graphics, convert_names]],
1855 [234, [convert_cite_engine]],
1856 [235, [convert_paperpackage]],
1857 [236, [convert_bullets, add_begin_header, add_begin_body,
1858 normalize_papersize, strip_end_space]],
1859 [237, [use_x_boolean]],
1860 [238, [update_latexaccents]],
1861 [239, [normalize_paragraph_params]],
1862 [240, [convert_output_changes]],
1863 [241, [convert_ert_paragraphs]]]
1865 revert = [[240, [revert_ert_paragraphs]],
1866 [239, [revert_output_changes]],
1869 [236, [use_x_binary]],
1870 [235, [denormalize_papersize, remove_begin_body,remove_begin_header,
1872 [234, [revert_paperpackage]],
1873 [233, [revert_cite_engine]],
1874 [232, [revert_names]],
1875 [231, [revert_bibtopic]],
1876 [230, [revert_float]],
1877 [229, [revert_jurabib]],
1879 [227, [revert_collapsable, revert_ert]],
1880 [226, [revert_box, revert_external_2]],
1881 [225, [revert_note]],
1882 [224, [rm_end_layout, begin_layout2layout, revert_end_document,
1883 revert_valignment_middle, convert_vspace, convert_frameless_box]],
1884 [223, [revert_external_2, revert_comment, revert_eqref]],
1885 [221, [rm_end_header, revert_spaces, revert_bibtex,
1886 rm_tracking_changes, rm_body_changes]]]
1889 if __name__ == "__main__":