# This file is part of lyx2lyx
-# -*- coding: iso-8859-1 -*-
+# -*- coding: utf-8 -*-
# Copyright (C) 2002 Dekel Tsur <dekel@lyx.org>
-# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
+# Copyright (C) 2002-2004 José Matos <jamatos@lyx.org>
# Copyright (C) 2004-2005 Georg Baum <Georg.Baum@post.rwth-aachen.de>
#
# This program is free software; you can redistribute it and/or
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+""" Convert files to the file format generated by lyx 1.4"""
import re
from os import access, F_OK
import os.path
-from parser_tools import find_token, find_end_of_inset, get_next_paragraph, \
- get_paragraph, get_value, del_token, is_nonempty_line,\
- find_tokens, find_end_of, find_token2, find_re
+from parser_tools import check_token, find_token, \
+ get_value, is_nonempty_line, \
+ find_tokens, find_end_of, find_beginning_of, find_token_exact, find_tokens_exact, \
+ find_re, find_tokens_backwards
from sys import stdin
-from string import replace, split, find, strip, join
from lyx_0_12 import update_latexaccents
-##
-# Remove \color default
-#
-def remove_color_default(file):
+####################################################################
+# Private helper functions
+
+def get_layout(line, default_layout):
+ " Get layout, if empty return the default layout."
+ tokens = line.split()
+ if len(tokens) > 1:
+ return tokens[1]
+ return default_layout
+
+
+def get_paragraph(lines, i, format):
+ "Finds the paragraph that contains line i."
+
+ if format < 225:
+ begin_layout = "\\layout"
+ else:
+ begin_layout = "\\begin_layout"
+ while i != -1:
+ i = find_tokens_backwards(lines, ["\\end_inset", begin_layout], i)
+ if i == -1: return -1
+ if check_token(lines[i], begin_layout):
+ return i
+ i = find_beginning_of_inset(lines, i)
+ return -1
+
+
+def find_beginning_of_inset(lines, i):
+ " Find beginning of inset, where lines[i] is included."
+ return find_beginning_of(lines, i, "\\begin_inset", "\\end_inset")
+
+
+def get_next_paragraph(lines, i, format):
+ "Finds the paragraph after the paragraph that contains line i."
+
+ if format < 225:
+ tokens = ["\\begin_inset", "\\layout", "\\end_float", "\\the_end"]
+ elif format < 236:
+ tokens = ["\\begin_inset", "\\begin_layout", "\\end_float", "\\end_document"]
+ else:
+ tokens = ["\\begin_inset", "\\begin_layout", "\\end_float", "\\end_body", "\\end_document"]
+ while i != -1:
+ i = find_tokens(lines, tokens, i)
+ if not check_token(lines[i], "\\begin_inset"):
+ return i
+ i = find_end_of_inset(lines, i)
+ return -1
+
+
+def find_end_of_inset(lines, i):
+ "Finds the matching \end_inset"
+ return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
+
+def del_token(lines, token, start, end):
+ """ del_token(lines, token, start, end) -> int
+
+ Find the lower line in lines where token is the first element and
+ delete that line.
+
+ Returns the number of lines remaining."""
+
+ k = find_token_exact(lines, token, start, end)
+ if k == -1:
+ return end
+ else:
+ del lines[k]
+ return end - 1
+
+# End of helper functions
+####################################################################
+
+def remove_color_default(document):
+ " Remove \color default"
i = 0
- while 1:
- i = find_token(file.body, "\\color default", i)
+ while True:
+ i = find_token(document.body, "\\color default", i)
if i == -1:
return
- file.body[i] = replace(file.body[i], "\\color default",
- "\\color inherit")
+ document.body[i] = document.body[i].replace("\\color default",
+ "\\color inherit")
-##
-# Add \end_header
-#
-def add_end_header(file):
- file.header.append("\\end_header");
+def add_end_header(document):
+ " Add \end_header"
+ document.header.append("\\end_header");
-def rm_end_header(file):
- i = find_token(file.header, "\\end_header", 0)
+def rm_end_header(document):
+ " Remove \end_header"
+ i = find_token(document.header, "\\end_header", 0)
if i == -1:
return
- del file.header[i]
+ del document.header[i]
-##
-# \SpecialChar ~ -> \InsetSpace ~
-#
-def convert_spaces(file):
- for i in range(len(file.body)):
- file.body[i] = replace(file.body[i],"\\SpecialChar ~","\\InsetSpace ~")
+def convert_amsmath(document):
+ " Convert \\use_amsmath"
+ i = find_token(document.header, "\\use_amsmath", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing '\\use_amsmath'.")
+ return
+ tokens = document.header[i].split()
+ if len(tokens) != 2:
+ document.warning("Malformed LyX document: Could not parse line '%s'." % document.header[i])
+ use_amsmath = '0'
+ else:
+ use_amsmath = tokens[1]
+ # old: 0 == off, 1 == on
+ # new: 0 == off, 1 == auto, 2 == on
+ # translate off -> auto, since old format 'off' means auto in reality
+ if use_amsmath == '0':
+ document.header[i] = "\\use_amsmath 1"
+ else:
+ document.header[i] = "\\use_amsmath 2"
-def revert_spaces(file):
- for i in range(len(file.body)):
- file.body[i] = replace(file.body[i],"\\InsetSpace ~", "\\SpecialChar ~")
+def revert_amsmath(document):
+ " Revert \\use_amsmath"
+ i = find_token(document.header, "\\use_amsmath", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing '\\use_amsmath'.")
+ return
+ tokens = document.header[i].split()
+ if len(tokens) != 2:
+ document.warning("Malformed LyX document: Could not parse line '%s'." % document.header[i])
+ use_amsmath = '0'
+ else:
+ use_amsmath = tokens[1]
+ # old: 0 == off, 1 == on
+ # new: 0 == off, 1 == auto, 2 == on
+ # translate auto -> off, since old format 'off' means auto in reality
+ if use_amsmath == '2':
+ document.header[i] = "\\use_amsmath 1"
+ else:
+ document.header[i] = "\\use_amsmath 0"
+
+
+def convert_spaces(document):
+ " \SpecialChar ~ -> \InsetSpace ~"
+ for i in range(len(document.body)):
+ document.body[i] = document.body[i].replace("\\SpecialChar ~",
+ "\\InsetSpace ~")
+
+
+def revert_spaces(document):
+ " \InsetSpace ~ -> \SpecialChar ~"
+ regexp = re.compile(r'(.*)(\\InsetSpace\s+)(\S+)')
+ i = 0
+ while True:
+ i = find_re(document.body, regexp, i)
+ if i == -1:
+ break
+ space = regexp.match(document.body[i]).group(3)
+ prepend = regexp.match(document.body[i]).group(1)
+ if space == '~':
+ document.body[i] = regexp.sub(prepend + '\\SpecialChar ~', document.body[i])
+ i = i + 1
+ else:
+ document.body[i] = regexp.sub(prepend, document.body[i])
+ document.body[i+1:i+1] = ''
+ if space == "\\space":
+ space = "\\ "
+ i = insert_ert(document.body, i+1, 'Collapsed', space, document.format - 1, document.default_layout)
+
+
+def rename_spaces(document):
+ """ \InsetSpace \, -> \InsetSpace \thinspace{}
+ \InsetSpace \space -> \InsetSpace \space{}"""
+ for i in range(len(document.body)):
+ document.body[i] = document.body[i].replace("\\InsetSpace \\space",
+ "\\InsetSpace \\space{}")
+ document.body[i] = document.body[i].replace("\\InsetSpace \,",
+ "\\InsetSpace \\thinspace{}")
+
+
+def revert_space_names(document):
+ """ \InsetSpace \thinspace{} -> \InsetSpace \,
+ \InsetSpace \space{} -> \InsetSpace \space"""
+ for i in range(len(document.body)):
+ document.body[i] = document.body[i].replace("\\InsetSpace \\space{}",
+ "\\InsetSpace \\space")
+ document.body[i] = document.body[i].replace("\\InsetSpace \\thinspace{}",
+ "\\InsetSpace \\,")
-##
-# equivalent to lyx::support::escape()
-#
def lyx_support_escape(lab):
+ " Equivalent to pre-unicode lyx::support::escape()"
hexdigit = ['0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
enc = ""
return enc;
-##
-# \begin_inset LatexCommand \eqref -> ERT
-#
-def revert_eqref(file):
+def revert_eqref(document):
+ "\\begin_inset LatexCommand \\eqref -> ERT"
regexp = re.compile(r'^\\begin_inset\s+LatexCommand\s+\\eqref')
i = 0
- while 1:
- i = find_re(file.body, regexp, i)
+ while True:
+ i = find_re(document.body, regexp, i)
if i == -1:
break
- eqref = lyx_support_escape(regexp.sub("", file.body[i]))
- file.body[i:i+1] = ["\\begin_inset ERT", "status Collapsed", "",
- "\\layout Standard", "", "\\backslash ",
+ eqref = lyx_support_escape(regexp.sub("", document.body[i]))
+ document.body[i:i+1] = ["\\begin_inset ERT", "status Collapsed", "",
+ '\\layout %s' % document.default_layout, "", "\\backslash ",
"eqref" + eqref]
i = i + 7
-##
-# BibTeX changes
-#
-def convert_bibtex(file):
- for i in range(len(file.body)):
- file.body[i] = replace(file.body[i],"\\begin_inset LatexCommand \\BibTeX",
- "\\begin_inset LatexCommand \\bibtex")
+def convert_bibtex(document):
+ " Convert BibTeX changes."
+ for i in range(len(document.body)):
+ document.body[i] = document.body[i].replace("\\begin_inset LatexCommand \\BibTeX",
+ "\\begin_inset LatexCommand \\bibtex")
-def revert_bibtex(file):
- for i in range(len(file.body)):
- file.body[i] = replace(file.body[i], "\\begin_inset LatexCommand \\bibtex",
- "\\begin_inset LatexCommand \\BibTeX")
+def revert_bibtex(document):
+ " Revert BibTeX changes."
+ for i in range(len(document.body)):
+ document.body[i] = document.body[i].replace("\\begin_inset LatexCommand \\bibtex",
+ "\\begin_inset LatexCommand \\BibTeX")
-##
-# Remove \lyxparent
-#
-def remove_insetparent(file):
+def remove_insetparent(document):
+ " Remove \lyxparent"
i = 0
- while 1:
- i = find_token(file.body, "\\begin_inset LatexCommand \\lyxparent", i)
+ while True:
+ i = find_token(document.body, "\\begin_inset LatexCommand \\lyxparent", i)
if i == -1:
break
- del file.body[i:i+3]
+ del document.body[i:i+3]
-##
-# Inset External
-#
-def convert_external(file):
+def convert_external(document):
+ " Convert inset External."
external_rexp = re.compile(r'\\begin_inset External ([^,]*),"([^"]*)",')
external_header = "\\begin_inset External"
i = 0
- while 1:
- i = find_token(file.body, external_header, i)
+ while True:
+ i = find_token(document.body, external_header, i)
if i == -1:
break
- look = external_rexp.search(file.body[i])
+ look = external_rexp.search(document.body[i])
args = ['','']
if look:
args[0] = look.group(1)
top = "\\begin_inset Graphics"
if args[1]:
filename = "\tfilename " + args[1]
- file.body[i:i+1] = [top, filename]
+ document.body[i:i+1] = [top, filename]
i = i + 1
else:
# Convert the old External Inset format to the new.
template = "\ttemplate " + args[0]
if args[1]:
filename = "\tfilename " + args[1]
- file.body[i:i+1] = [top, template, filename]
+ document.body[i:i+1] = [top, template, filename]
i = i + 2
else:
- file.body[i:i+1] = [top, template]
+ document.body[i:i+1] = [top, template]
i = i + 1
-def revert_external_1(file):
+def revert_external_1(document):
+ " Revert inset External."
external_header = "\\begin_inset External"
i = 0
- while 1:
- i = find_token(file.body, external_header, i)
+ while True:
+ i = find_token(document.body, external_header, i)
if i == -1:
break
- template = split(file.body[i+1])
+ template = document.body[i+1].split()
template.reverse()
- del file.body[i+1]
+ del document.body[i+1]
- filename = split(file.body[i+1])
+ filename = document.body[i+1].split()
filename.reverse()
- del file.body[i+1]
+ del document.body[i+1]
- params = split(file.body[i+1])
+ params = document.body[i+1].split()
params.reverse()
- if file.body[i+1]: del file.body[i+1]
+ if document.body[i+1]: del document.body[i+1]
- file.body[i] = file.body[i] + " " + template[0]+ ', "' + filename[0] + '", " '+ join(params[1:]) + '"'
+ document.body[i] = document.body[i] + " " + template[0]+ ', "' + filename[0] + '", " '+ " ".join(params[1:]) + '"'
i = i + 1
-def revert_external_2(file):
+def revert_external_2(document):
+ " Revert inset External. (part II)"
draft_token = '\tdraft'
i = 0
- while 1:
- i = find_token(file.body, '\\begin_inset External', i)
+ while True:
+ i = find_token(document.body, '\\begin_inset External', i)
if i == -1:
break
- j = find_end_of_inset(file.body, i + 1)
+ j = find_end_of_inset(document.body, i + 1)
if j == -1:
#this should not happen
break
- k = find_token(file.body, draft_token, i+1, j-1)
- if (k != -1 and len(draft_token) == len(file.body[k])):
- del file.body[k]
+ k = find_token(document.body, draft_token, i+1, j-1)
+ if (k != -1 and len(draft_token) == len(document.body[k])):
+ del document.body[k]
i = j + 1
-##
-# Comment
-#
-def convert_comment(file):
+def convert_comment(document):
+ " Convert \\layout comment"
i = 0
comment = "\\layout Comment"
- while 1:
- i = find_token(file.body, comment, i)
+ while True:
+ i = find_token(document.body, comment, i)
if i == -1:
return
- file.body[i:i+1] = ["\\layout Standard","","",
+ document.body[i:i+1] = ['\\layout %s' % document.default_layout,"","",
"\\begin_inset Comment",
"collapsed true","",
- "\\layout Standard"]
+ '\\layout %s' % document.default_layout]
i = i + 7
- while 1:
+ while True:
old_i = i
- i = find_token(file.body, "\\layout", i)
+ i = find_token(document.body, "\\layout", i)
if i == -1:
- i = len(file.body) - 1
- file.body[i:i] = ["\\end_inset","",""]
+ i = len(document.body) - 1
+ document.body[i:i] = ["\\end_inset","",""]
return
- j = find_token(file.body, '\\begin_deeper', old_i, i)
+ j = find_token(document.body, '\\begin_deeper', old_i, i)
if j == -1: j = i + 1
- k = find_token(file.body, '\\begin_inset', old_i, i)
+ k = find_token(document.body, '\\begin_inset', old_i, i)
if k == -1: k = i + 1
if j < i and j < k:
i = j
- del file.body[i]
- i = find_end_of( file.body, i, "\\begin_deeper","\\end_deeper")
+ del document.body[i]
+ i = find_end_of( document.body, i, "\\begin_deeper","\\end_deeper")
if i == -1:
#This case should not happen
#but if this happens deal with it greacefully adding
#the missing \end_deeper.
- i = len(file.body) - 1
- file.body[i:i] = ["\end_deeper",""]
+ i = len(document.body) - 1
+ document.body[i:i] = ["\\end_deeper",""]
return
else:
- del file.body[i]
+ del document.body[i]
continue
if k < i:
i = k
- i = find_end_of( file.body, i, "\\begin_inset","\\end_inset")
+ i = find_end_of( document.body, i, "\\begin_inset","\\end_inset")
if i == -1:
#This case should not happen
#but if this happens deal with it greacefully adding
#the missing \end_inset.
- i = len(file.body) - 1
- file.body[i:i] = ["\\end_inset","","","\\end_inset","",""]
+ i = len(document.body) - 1
+ document.body[i:i] = ["\\end_inset","","","\\end_inset","",""]
return
else:
i = i + 1
continue
- if find(file.body[i], comment) == -1:
- file.body[i:i] = ["\\end_inset"]
+ if document.body[i].find(comment) == -1:
+ document.body[i:i] = ["\\end_inset"]
i = i + 1
break
- file.body[i:i+1] = ["\\layout Standard"]
+ document.body[i:i+1] = ['\\layout %s' % document.default_layout]
i = i + 1
-def revert_comment(file):
+def revert_comment(document):
+ " Revert comments"
i = 0
- while 1:
- i = find_tokens(file.body, ["\\begin_inset Comment", "\\begin_inset Greyedout"], i)
+ while True:
+ i = find_tokens(document.body, ["\\begin_inset Comment", "\\begin_inset Greyedout"], i)
if i == -1:
return
- file.body[i] = "\\begin_inset Note"
+ document.body[i] = "\\begin_inset Note"
i = i + 1
-##
-# Add \end_layout
-#
-def add_end_layout(file):
- i = find_token(file.body, '\\layout', 0)
+def add_end_layout(document):
+ " Add \end_layout"
+ i = find_token(document.body, '\\layout', 0)
if i == -1:
return
i = i + 1
struct_stack = ["\\layout"]
- while 1:
- i = find_tokens(file.body, ["\\begin_inset", "\\end_inset", "\\layout",
+ while True:
+ i = find_tokens(document.body, ["\\begin_inset", "\\end_inset", "\\layout",
"\\begin_deeper", "\\end_deeper", "\\the_end"], i)
if i != -1:
- token = split(file.body[i])[0]
+ token = document.body[i].split()[0]
else:
- file.warning("Truncated file.")
- i = len(file.body)
- file.body.insert(i, '\\the_end')
+ document.warning("Truncated document.")
+ i = len(document.body)
+ document.body.insert(i, '\\the_end')
token = ""
if token == "\\begin_inset":
if token == "\\end_inset":
tail = struct_stack.pop()
if tail == "\\layout":
- file.body.insert(i,"")
- file.body.insert(i,"\\end_layout")
+ document.body.insert(i,"")
+ document.body.insert(i,"\\end_layout")
i = i + 2
#Check if it is the correct tag
struct_stack.pop()
if token == "\\layout":
tail = struct_stack.pop()
if tail == token:
- file.body.insert(i,"")
- file.body.insert(i,"\\end_layout")
+ document.body.insert(i,"")
+ document.body.insert(i,"\\end_layout")
i = i + 3
else:
struct_stack.append(tail)
continue
if token == "\\begin_deeper":
- file.body.insert(i,"")
- file.body.insert(i,"\\end_layout")
+ document.body.insert(i,"")
+ document.body.insert(i,"\\end_layout")
i = i + 3
+ # consecutive begin_deeper only insert one end_layout
+ while document.body[i].startswith('\\begin_deeper'):
+ i += 1
struct_stack.append(token)
continue
if token == "\\end_deeper":
if struct_stack[-1] == '\\layout':
- file.body.insert(i, '\\end_layout')
+ document.body.insert(i, '\\end_layout')
i = i + 1
struct_stack.pop()
i = i + 1
continue
#case \end_document
- file.body.insert(i, "")
- file.body.insert(i, "\\end_layout")
+ document.body.insert(i, "")
+ document.body.insert(i, "\\end_layout")
return
-def rm_end_layout(file):
+def rm_end_layout(document):
+ " Remove \end_layout"
i = 0
- while 1:
- i = find_token(file.body, '\\end_layout', i)
+ while True:
+ i = find_token(document.body, '\\end_layout', i)
if i == -1:
return
- del file.body[i]
+ del document.body[i]
-##
-# Handle change tracking keywords
-#
-def insert_tracking_changes(file):
- i = find_token(file.header, "\\tracking_changes", 0)
+def insert_tracking_changes(document):
+ " Handle change tracking keywords."
+ i = find_token(document.header, "\\tracking_changes", 0)
if i == -1:
- file.header.append("\\tracking_changes 0")
+ document.header.append("\\tracking_changes 0")
-def rm_tracking_changes(file):
- i = find_token(file.header, "\\author", 0)
+def rm_tracking_changes(document):
+ " Remove change tracking keywords."
+ i = find_token(document.header, "\\author", 0)
if i != -1:
- del file.header[i]
+ del document.header[i]
- i = find_token(file.header, "\\tracking_changes", 0)
+ i = find_token(document.header, "\\tracking_changes", 0)
if i == -1:
return
- del file.header[i]
+ del document.header[i]
-def rm_body_changes(file):
+def rm_body_changes(document):
+ " Remove body changes."
i = 0
- while 1:
- i = find_token(file.body, "\\change_", i)
+ while True:
+ i = find_token(document.body, "\\change_", i)
if i == -1:
return
- del file.body[i]
+ del document.body[i]
-##
-# \layout -> \begin_layout
-#
-def layout2begin_layout(file):
+def layout2begin_layout(document):
+ " \layout -> \begin_layout "
i = 0
- while 1:
- i = find_token(file.body, '\\layout', i)
+ while True:
+ i = find_token(document.body, '\\layout', i)
if i == -1:
return
- file.body[i] = replace(file.body[i], '\\layout', '\\begin_layout')
+ document.body[i] = document.body[i].replace('\\layout', '\\begin_layout')
i = i + 1
-def begin_layout2layout(file):
+def begin_layout2layout(document):
+ " \begin_layout -> \layout "
i = 0
- while 1:
- i = find_token(file.body, '\\begin_layout', i)
+ while True:
+ i = find_token(document.body, '\\begin_layout', i)
if i == -1:
return
- file.body[i] = replace(file.body[i], '\\begin_layout', '\\layout')
+ document.body[i] = document.body[i].replace('\\begin_layout', '\\layout')
i = i + 1
-##
-# valignment="center" -> valignment="middle"
-#
def convert_valignment_middle(body, start, end):
+ 'valignment="center" -> valignment="middle"'
for i in range(start, end):
if re.search('^<(column|cell) .*valignment="center".*>$', body[i]):
- body[i] = replace(body[i], 'valignment="center"', 'valignment="middle"')
+ body[i] = body[i].replace('valignment="center"', 'valignment="middle"')
-def convert_table_valignment_middle(file):
+def convert_table_valignment_middle(document):
+ " Convert table valignment, center -> middle"
regexp = re.compile(r'^\\begin_inset\s+Tabular')
i = 0
- while 1:
- i = find_re(file.body, regexp, i)
+ while True:
+ i = find_re(document.body, regexp, i)
if i == -1:
return
- j = find_end_of_inset(file.body, i + 1)
+ j = find_end_of_inset(document.body, i + 1)
if j == -1:
#this should not happen
- convert_valignment_middle(file.body, i + 1, len(file.body))
+ convert_valignment_middle(document.body, i + 1, len(document.body))
return
- convert_valignment_middle(file.body, i + 1, j)
+ convert_valignment_middle(document.body, i + 1, j)
i = j + 1
def revert_table_valignment_middle(body, start, end):
+ " valignment, middle -> center"
for i in range(start, end):
if re.search('^<(column|cell) .*valignment="middle".*>$', body[i]):
- body[i] = replace(body[i], 'valignment="middle"', 'valignment="center"')
+ body[i] = body[i].replace('valignment="middle"', 'valignment="center"')
-def revert_valignment_middle(file):
+def revert_valignment_middle(document):
+ " Convert table valignment, middle -> center"
regexp = re.compile(r'^\\begin_inset\s+Tabular')
i = 0
- while 1:
- i = find_re(file.body, regexp, i)
+ while True:
+ i = find_re(document.body, regexp, i)
if i == -1:
return
- j = find_end_of_inset(file.body, i + 1)
+ j = find_end_of_inset(document.body, i + 1)
if j == -1:
#this should not happen
- revert_table_valignment_middle(file.body, i + 1, len(file.body))
+ revert_table_valignment_middle(document.body, i + 1, len(document.body))
return
- revert_table_valignment_middle(file.body, i + 1, j)
+ revert_table_valignment_middle(document.body, i + 1, j)
i = j + 1
-##
-# \the_end -> \end_document
-#
-def convert_end_document(file):
- i = find_token(file.body, "\\the_end", 0)
+def convert_end_document(document):
+ "\\the_end -> \\end_document"
+ i = find_token(document.body, "\\the_end", 0)
if i == -1:
- file.body.append("\\end_document")
+ document.body.append("\\end_document")
return
- file.body[i] = "\\end_document"
+ document.body[i] = "\\end_document"
-def revert_end_document(file):
- i = find_token(file.body, "\\end_document", 0)
+def revert_end_document(document):
+ "\\end_document -> \\the_end"
+ i = find_token(document.body, "\\end_document", 0)
if i == -1:
- file.body.append("\\the_end")
+ document.body.append("\\the_end")
return
- file.body[i] = "\\the_end"
+ document.body[i] = "\\the_end"
-##
-# Convert line and page breaks
-# Old:
-#\layout Standard
-#\line_top \line_bottom \pagebreak_top \pagebreak_bottom \added_space_top xxx \added_space_bottom yyy
-#0
-#
-# New:
-#\begin layout Standard
-#
-#\newpage
-#
-#\lyxline
-#\begin_inset VSpace xxx
-#\end_inset
-#
-#\end_layout
-#\begin_layout Standard
-#
-#0
-#\end_layout
-#\begin_layout Standard
-#
-#\begin_inset VSpace xxx
-#\end_inset
-#\lyxline
-#
-#\newpage
-#
-#\end_layout
-def convert_breaks(file):
+def convert_breaks(document):
+ r"""
+Convert line and page breaks
+ Old:
+\layout Standard
+\line_top \line_bottom \pagebreak_top \pagebreak_bottom \added_space_top xxx \added_space_bottom yyy
+0
+
+ New:
+\begin layout Standard
+
+\newpage
+
+\lyxline
+\begin_inset ERT
+\begin layout Standard
+\backslash
+vspace{-1\backslash
+parskip}
+\end_layout
+\end_inset
+
+\begin_inset VSpace xxx
+\end_inset
+
+0
+
+\begin_inset VSpace xxx
+\end_inset
+\lyxline
+
+\newpage
+
+\end_layout
+ """
par_params = ('added_space_bottom', 'added_space_top', 'align',
'labelwidthstring', 'line_bottom', 'line_top', 'noindent',
'pagebreak_bottom', 'pagebreak_top', 'paragraph_spacing',
'start_of_appendix')
+ font_attributes = ['\\family', '\\series', '\\shape', '\\emph',
+ '\\numeric', '\\bar', '\\noun', '\\color', '\\lang']
+ attribute_values = ['default', 'default', 'default', 'default',
+ 'default', 'default', 'default', 'none', document.language]
i = 0
- while 1:
- i = find_token(file.body, "\\begin_layout", i)
+ while True:
+ i = find_token(document.body, "\\begin_layout", i)
if i == -1:
return
+ layout = get_layout(document.body[i], document.default_layout)
i = i + 1
# Merge all paragraph parameters into a single line
# We cannot check for '\\' only because paragraphs may start e.g.
# with '\\backslash'
- while file.body[i + 1][:1] == '\\' and split(file.body[i + 1][1:])[0] in par_params:
- file.body[i] = file.body[i + 1] + ' ' + file.body[i]
- del file.body[i+1]
+ while document.body[i + 1][:1] == '\\' and document.body[i + 1][1:].split()[0] in par_params:
+ document.body[i] = document.body[i + 1] + ' ' + document.body[i]
+ del document.body[i+1]
- line_top = find(file.body[i],"\\line_top")
- line_bot = find(file.body[i],"\\line_bottom")
- pb_top = find(file.body[i],"\\pagebreak_top")
- pb_bot = find(file.body[i],"\\pagebreak_bottom")
- vspace_top = find(file.body[i],"\\added_space_top")
- vspace_bot = find(file.body[i],"\\added_space_bottom")
+ line_top = document.body[i].find("\\line_top")
+ line_bot = document.body[i].find("\\line_bottom")
+ pb_top = document.body[i].find("\\pagebreak_top")
+ pb_bot = document.body[i].find("\\pagebreak_bottom")
+ vspace_top = document.body[i].find("\\added_space_top")
+ vspace_bot = document.body[i].find("\\added_space_bottom")
if line_top == -1 and line_bot == -1 and pb_bot == -1 and pb_top == -1 and vspace_top == -1 and vspace_bot == -1:
continue
+ # Do we have a nonstandard paragraph? We need to create new paragraphs
+ # if yes to avoid putting lyxline etc. inside of special environments.
+ # This is wrong for itemize and enumerate environments, but it is
+ # impossible to convert these correctly.
+ # We want to avoid new paragraphs if possible becauase we want to
+ # inherit font sizes.
+ nonstandard = 0
+ if (not document.is_default_layout(layout) or
+ document.body[i].find("\\align") != -1 or
+ document.body[i].find("\\labelwidthstring") != -1 or
+ document.body[i].find("\\noindent") != -1):
+ nonstandard = 1
+
+ # get the font size of the beginning of this paragraph, since we need
+ # it for the lyxline inset
+ j = i + 1
+ while not is_nonempty_line(document.body[j]):
+ j = j + 1
+ size_top = ""
+ if document.body[j].find("\\size") != -1:
+ size_top = document.body[j].split()[1]
+
for tag in "\\line_top", "\\line_bottom", "\\pagebreak_top", "\\pagebreak_bottom":
- file.body[i] = replace(file.body[i], tag, "")
+ document.body[i] = document.body[i].replace(tag, "")
if vspace_top != -1:
# the position could be change because of the removal of other
# paragraph properties above
- vspace_top = find(file.body[i],"\\added_space_top")
- tmp_list = split(file.body[i][vspace_top:])
+ vspace_top = document.body[i].find("\\added_space_top")
+ tmp_list = document.body[i][vspace_top:].split()
vspace_top_value = tmp_list[1]
- file.body[i] = file.body[i][:vspace_top] + join(tmp_list[2:])
+ document.body[i] = document.body[i][:vspace_top] + " ".join(tmp_list[2:])
if vspace_bot != -1:
# the position could be change because of the removal of other
# paragraph properties above
- vspace_bot = find(file.body[i],"\\added_space_bottom")
- tmp_list = split(file.body[i][vspace_bot:])
+ vspace_bot = document.body[i].find("\\added_space_bottom")
+ tmp_list = document.body[i][vspace_bot:].split()
vspace_bot_value = tmp_list[1]
- file.body[i] = file.body[i][:vspace_bot] + join(tmp_list[2:])
+ document.body[i] = document.body[i][:vspace_bot] + " ".join(tmp_list[2:])
- file.body[i] = strip(file.body[i])
+ document.body[i] = document.body[i].strip()
i = i + 1
- # Create an empty paragraph for line and page break that belong
- # above the paragraph
+ # Create an empty paragraph or paragraph fragment for line and
+ # page break that belong above the paragraph
if pb_top !=-1 or line_top != -1 or vspace_top != -1:
- paragraph_above = ['','\\begin_layout Standard','','']
+ paragraph_above = list()
+ if nonstandard:
+ # We need to create an extra paragraph for nonstandard environments
+ paragraph_above = ['\\begin_layout %s' % document.default_layout, '']
if pb_top != -1:
paragraph_above.extend(['\\newpage ',''])
paragraph_above.extend(['\\begin_inset VSpace ' + vspace_top_value,'\\end_inset','',''])
if line_top != -1:
- paragraph_above.extend(['\\lyxline ',''])
-
- paragraph_above.extend(['\\end_layout',''])
+ if size_top != '':
+ paragraph_above.extend(['\\size ' + size_top + ' '])
+ # We need an additional vertical space of -\parskip.
+ # We can't use the vspace inset because it does not know \parskip.
+ paragraph_above.extend(['\\lyxline ', '', ''])
+ insert_ert(paragraph_above, len(paragraph_above) - 1, 'Collapsed',
+ '\\vspace{-1\\parskip}\n', document.format + 1, document.default_layout)
+ paragraph_above.extend([''])
+
+ if nonstandard:
+ paragraph_above.extend(['\\end_layout ',''])
+ # insert new paragraph above the current paragraph
+ document.body[i-2:i-2] = paragraph_above
+ else:
+ # insert new lines at the beginning of the current paragraph
+ document.body[i:i] = paragraph_above
- #inset new paragraph above the current paragraph
- file.body[i-2:i-2] = paragraph_above
i = i + len(paragraph_above)
# Ensure that nested style are converted later.
- k = find_end_of(file.body, i, "\\begin_layout", "\\end_layout")
+ k = find_end_of(document.body, i, "\\begin_layout", "\\end_layout")
if k == -1:
return
if pb_bot !=-1 or line_bot != -1 or vspace_bot != -1:
- paragraph_below = ['','\\begin_layout Standard','','']
+ # get the font size of the end of this paragraph
+ size_bot = size_top
+ j = i + 1
+ while j < k:
+ if document.body[j].find("\\size") != -1:
+ size_bot = document.body[j].split()[1]
+ j = j + 1
+ elif document.body[j].find("\\begin_inset") != -1:
+ # skip insets
+ j = find_end_of_inset(document.body, j)
+ else:
+ j = j + 1
+
+ paragraph_below = list()
+ if nonstandard:
+ # We need to create an extra paragraph for nonstandard environments
+ paragraph_below = ['', '\\begin_layout %s' % document.default_layout, '']
+ else:
+ for a in range(len(font_attributes)):
+ if find_token(document.body, font_attributes[a], i, k) != -1:
+ paragraph_below.extend([font_attributes[a] + ' ' + attribute_values[a]])
if line_bot != -1:
+ if nonstandard and size_bot != '':
+ paragraph_below.extend(['\\size ' + size_bot + ' '])
paragraph_below.extend(['\\lyxline ',''])
+ if size_bot != '':
+ paragraph_below.extend(['\\size default '])
if vspace_bot != -1:
paragraph_below.extend(['\\begin_inset VSpace ' + vspace_bot_value,'\\end_inset','',''])
if pb_bot != -1:
paragraph_below.extend(['\\newpage ',''])
- paragraph_below.extend(['\\end_layout',''])
-
- #inset new paragraph above the current paragraph
- file.body[k + 1: k + 1] = paragraph_below
+ if nonstandard:
+ paragraph_below.extend(['\\end_layout '])
+ # insert new paragraph below the current paragraph
+ document.body[k+1:k+1] = paragraph_below
+ else:
+ # insert new lines at the end of the current paragraph
+ document.body[k:k] = paragraph_below
-##
-# Notes
-#
-def convert_note(file):
+def convert_note(document):
+ " Convert Notes. "
i = 0
- while 1:
- i = find_tokens(file.body, ["\\begin_inset Note",
+ while True:
+ i = find_tokens(document.body, ["\\begin_inset Note",
"\\begin_inset Comment",
"\\begin_inset Greyedout"], i)
if i == -1:
break
- file.body[i] = file.body[i][0:13] + 'Note ' + file.body[i][13:]
+ document.body[i] = document.body[i][0:13] + 'Note ' + document.body[i][13:]
i = i + 1
-def revert_note(file):
+def revert_note(document):
+ " Revert Notes. "
note_header = "\\begin_inset Note "
i = 0
- while 1:
- i = find_token(file.body, note_header, i)
+ while True:
+ i = find_token(document.body, note_header, i)
if i == -1:
break
- file.body[i] = "\\begin_inset " + file.body[i][len(note_header):]
+ document.body[i] = "\\begin_inset " + document.body[i][len(note_header):]
i = i + 1
-##
-# Box
-#
-def convert_box(file):
+def convert_box(document):
+ " Convert Boxes. "
i = 0
- while 1:
- i = find_tokens(file.body, ["\\begin_inset Boxed",
+ while True:
+ i = find_tokens(document.body, ["\\begin_inset Boxed",
"\\begin_inset Doublebox",
"\\begin_inset Frameless",
"\\begin_inset ovalbox",
if i == -1:
break
- file.body[i] = file.body[i][0:13] + 'Box ' + file.body[i][13:]
+ document.body[i] = document.body[i][0:13] + 'Box ' + document.body[i][13:]
i = i + 1
-def revert_box(file):
+def revert_box(document):
+ " Revert Boxes."
box_header = "\\begin_inset Box "
i = 0
- while 1:
- i = find_token(file.body, box_header, i)
+ while True:
+ i = find_token(document.body, box_header, i)
if i == -1:
break
- file.body[i] = "\\begin_inset " + file.body[i][len(box_header):]
+ document.body[i] = "\\begin_inset " + document.body[i][len(box_header):]
i = i + 1
-##
-# Collapse
-#
-def convert_collapsable(file):
+def convert_collapsible(document):
+ " Convert collapsed insets. "
i = 0
- while 1:
- i = find_tokens(file.body, ["\\begin_inset Box",
+ while True:
+ i = find_tokens_exact(document.body, ["\\begin_inset Box",
"\\begin_inset Branch",
"\\begin_inset CharStyle",
"\\begin_inset Float",
# If, however, we find a line starting '\begin_layout'
# (_always_ present) then break with a warning message
i = i + 1
- while 1:
- if (file.body[i] == "collapsed false"):
- file.body[i] = "status open"
+ while True:
+ if (document.body[i] == "collapsed false"):
+ document.body[i] = "status open"
break
- elif (file.body[i] == "collapsed true"):
- file.body[i] = "status collapsed"
+ elif (document.body[i] == "collapsed true"):
+ document.body[i] = "status collapsed"
break
- elif (file.body[i][:13] == "\\begin_layout"):
- file.warning("Malformed LyX file: Missing 'collapsed'.")
+ elif (document.body[i][:13] == "\\begin_layout"):
+ document.warning("Malformed LyX document: Missing 'collapsed'.")
break
i = i + 1
i = i + 1
-def revert_collapsable(file):
+def revert_collapsible(document):
+ " Revert collapsed insets. "
i = 0
- while 1:
- i = find_tokens(file.body, ["\\begin_inset Box",
+ while True:
+ i = find_tokens_exact(document.body, ["\\begin_inset Box",
"\\begin_inset Branch",
"\\begin_inset CharStyle",
"\\begin_inset Float",
# If, however, we find a line starting '\begin_layout'
# (_always_ present) then break with a warning message
i = i + 1
- while 1:
- if (file.body[i] == "status open"):
- file.body[i] = "collapsed false"
+ while True:
+ if (document.body[i] == "status open"):
+ document.body[i] = "collapsed false"
break
- elif (file.body[i] == "status collapsed" or
- file.body[i] == "status inlined"):
- file.body[i] = "collapsed true"
+ elif (document.body[i] == "status collapsed" or
+ document.body[i] == "status inlined"):
+ document.body[i] = "collapsed true"
break
- elif (file.body[i][:13] == "\\begin_layout"):
- file.warning("Malformed LyX file: Missing 'status'.")
+ elif (document.body[i][:13] == "\\begin_layout"):
+ document.warning("Malformed LyX document: Missing 'status'.")
break
i = i + 1
i = i + 1
-##
-# ERT
-#
-def convert_ert(file):
+def convert_ert(document):
+ " Convert ERT. "
i = 0
- while 1:
- i = find_token(file.body, "\\begin_inset ERT", i)
+ while True:
+ i = find_token(document.body, "\\begin_inset ERT", i)
if i == -1:
break
# If, however, we find a line starting '\begin_layout'
# (_always_ present) then break with a warning message
i = i + 1
- while 1:
- if (file.body[i] == "status Open"):
- file.body[i] = "status open"
+ while True:
+ if (document.body[i] == "status Open"):
+ document.body[i] = "status open"
break
- elif (file.body[i] == "status Collapsed"):
- file.body[i] = "status collapsed"
+ elif (document.body[i] == "status Collapsed"):
+ document.body[i] = "status collapsed"
break
- elif (file.body[i] == "status Inlined"):
- file.body[i] = "status inlined"
+ elif (document.body[i] == "status Inlined"):
+ document.body[i] = "status inlined"
break
- elif (file.body[i][:13] == "\\begin_layout"):
- file.warning("Malformed LyX file: Missing 'status'.")
+ elif (document.body[i][:13] == "\\begin_layout"):
+ document.warning("Malformed LyX document: Missing 'status'.")
break
i = i + 1
i = i + 1
-def revert_ert(file):
+def revert_ert(document):
+ " Revert ERT. "
i = 0
- while 1:
- i = find_token(file.body, "\\begin_inset ERT", i)
+ while True:
+ i = find_token(document.body, "\\begin_inset ERT", i)
if i == -1:
break
# If, however, we find a line starting '\begin_layout'
# (_always_ present) then break with a warning message
i = i + 1
- while 1:
- if (file.body[i] == "status open"):
- file.body[i] = "status Open"
+ while True:
+ if (document.body[i] == "status open"):
+ document.body[i] = "status Open"
break
- elif (file.body[i] == "status collapsed"):
- file.body[i] = "status Collapsed"
+ elif (document.body[i] == "status collapsed"):
+ document.body[i] = "status Collapsed"
break
- elif (file.body[i] == "status inlined"):
- file.body[i] = "status Inlined"
+ elif (document.body[i] == "status inlined"):
+ document.body[i] = "status Inlined"
break
- elif (file.body[i][:13] == "\\begin_layout"):
- file.warning("Malformed LyX file : Missing 'status'.")
+ elif (document.body[i][:13] == "\\begin_layout"):
+ document.warning("Malformed LyX document : Missing 'status'.")
break
i = i + 1
i = i + 1
-##
-# Minipages
-#
-def convert_minipage(file):
+def convert_minipage(document):
""" Convert minipages to the box inset.
We try to use the same order of arguments as lyx does.
"""
inner_pos = ["c","t","b","s"]
i = 0
- while 1:
- i = find_token(file.body, "\\begin_inset Minipage", i)
+ while True:
+ i = find_token(document.body, "\\begin_inset Minipage", i)
if i == -1:
return
- file.body[i] = "\\begin_inset Box Frameless"
+ document.body[i] = "\\begin_inset Box Frameless"
i = i + 1
# convert old to new position using the pos list
- if file.body[i][:8] == "position":
- file.body[i] = 'position "%s"' % pos[int(file.body[i][9])]
+ if document.body[i][:8] == "position":
+ document.body[i] = 'position "%s"' % pos[int(document.body[i][9])]
else:
- file.body.insert(i, 'position "%s"' % pos[0])
+ document.body.insert(i, 'position "%s"' % pos[0])
i = i + 1
- file.body.insert(i, 'hor_pos "c"')
+ document.body.insert(i, 'hor_pos "c"')
i = i + 1
- file.body.insert(i, 'has_inner_box 1')
+ document.body.insert(i, 'has_inner_box 1')
i = i + 1
# convert the inner_position
- if file.body[i][:14] == "inner_position":
- file.body[i] = 'inner_pos "%s"' % inner_pos[int(file.body[i][15])]
+ if document.body[i][:14] == "inner_position":
+ innerpos = inner_pos[int(document.body[i][15])]
+ del document.body[i]
else:
- file.body.insert('inner_pos "%s"' % inner_pos[0])
- i = i + 1
+ innerpos = inner_pos[0]
# We need this since the new file format has a height and width
# in a different order.
- if file.body[i][:6] == "height":
- height = file.body[i][6:]
+ if document.body[i][:6] == "height":
+ height = document.body[i][6:]
# test for default value of 221 and convert it accordingly
if height == ' "0pt"' or height == ' "0"':
height = ' "1pt"'
- del file.body[i]
+ del document.body[i]
else:
height = ' "1pt"'
- if file.body[i][:5] == "width":
- width = file.body[i][5:]
- del file.body[i]
+ if document.body[i][:5] == "width":
+ width = document.body[i][5:]
+ del document.body[i]
else:
width = ' "0"'
- if file.body[i][:9] == "collapsed":
- if file.body[i][9:] == "true":
- status = "collapsed"
+ if document.body[i][:9] == "collapsed":
+ if document.body[i][9:] == "true":
+ status = "collapsed"
else:
- status = "open"
- del file.body[i]
+ status = "open"
+ del document.body[i]
else:
- status = "collapsed"
+ status = "collapsed"
+
+ # Handle special default case:
+ if height == ' "1pt"' and innerpos == 'c':
+ innerpos = 't'
- file.body.insert(i, 'use_parbox 0')
+ document.body.insert(i, 'inner_pos "' + innerpos + '"')
i = i + 1
- file.body.insert(i, 'width' + width)
+ document.body.insert(i, 'use_parbox 0')
i = i + 1
- file.body.insert(i, 'special "none"')
+ document.body.insert(i, 'width' + width)
i = i + 1
- file.body.insert(i, 'height' + height)
+ document.body.insert(i, 'special "none"')
i = i + 1
- file.body.insert(i, 'height_special "totalheight"')
+ document.body.insert(i, 'height' + height)
i = i + 1
- file.body.insert(i, 'status ' + status)
+ document.body.insert(i, 'height_special "totalheight"')
i = i + 1
+ document.body.insert(i, 'status ' + status)
+ i = i + 1
+
+def convert_ertbackslash(body, i, ert, format, default_layout):
+ r""" -------------------------------------------------------------------------------------------
+ Convert backslashes and '\n' into valid ERT code, append the converted
+ text to body[i] and return the (maybe incremented) line index i"""
-# -------------------------------------------------------------------------------------------
-# Convert backslashes and '\n' into valid ERT code, append the converted
-# text to body[i] and return the (maybe incremented) line index i
-def convert_ertbackslash(body, i, ert):
for c in ert:
- if c == '\\':
- body[i] = body[i] + '\\backslash '
- i = i + 1
- body.insert(i, '')
- elif c == '\n':
- body[i+1:i+1] = ['\\newline ', '']
- i = i + 2
- else:
- body[i] = body[i] + c
+ if c == '\\':
+ body[i] = body[i] + '\\backslash '
+ i = i + 1
+ body.insert(i, '')
+ elif c == '\n':
+ if format <= 240:
+ body[i+1:i+1] = ['\\newline ', '']
+ i = i + 2
+ else:
+ body[i+1:i+1] = ['\\end_layout', '', '\\begin_layout %s' % default_layout, '']
+ i = i + 4
+ else:
+ body[i] = body[i] + c
return i
-def convert_vspace(file):
+def ert2latex(lines, format):
+ r""" Converts lines in ERT code to LaTeX
+ The surrounding \begin_layout ... \end_layout pair must not be included"""
+
+ backslash = re.compile(r'\\backslash\s*$')
+ newline = re.compile(r'\\newline\s*$')
+ if format <= 224:
+ begin_layout = re.compile(r'\\layout\s*\S+$')
+ else:
+ begin_layout = re.compile(r'\\begin_layout\s*\S+$')
+ end_layout = re.compile(r'\\end_layout\s*$')
+ ert = ''
+ for i in range(len(lines)):
+ line = backslash.sub('\\\\', lines[i])
+ if format <= 240:
+ if begin_layout.match(line):
+ line = '\n\n'
+ else:
+ line = newline.sub('\n', line)
+ else:
+ if begin_layout.match(line):
+ line = '\n'
+ if format > 224 and end_layout.match(line):
+ line = ''
+ ert = ert + line
+ return ert
+
+
+def get_par_params(lines, i):
+ """ get all paragraph parameters. They can be all on one line or on several lines.
+ lines[i] must be the first parameter line"""
+ par_params = ('added_space_bottom', 'added_space_top', 'align',
+ 'labelwidthstring', 'line_bottom', 'line_top', 'noindent',
+ 'pagebreak_bottom', 'pagebreak_top', 'paragraph_spacing',
+ 'start_of_appendix')
+ # We cannot check for '\\' only because paragraphs may start e.g.
+ # with '\\backslash'
+ params = ''
+ while lines[i][:1] == '\\' and lines[i][1:].split()[0] in par_params:
+ params = params + ' ' + lines[i].strip()
+ i = i + 1
+ return params.strip()
+
+
+def lyxsize2latexsize(lyxsize):
+ " Convert LyX font size to LaTeX fontsize. "
+ sizes = {"tiny" : "tiny", "scriptsize" : "scriptsize",
+ "footnotesize" : "footnotesize", "small" : "small",
+ "normal" : "normalsize", "large" : "large", "larger" : "Large",
+ "largest" : "LARGE", "huge" : "huge", "giant" : "Huge"}
+ if lyxsize in sizes:
+ return '\\' + sizes[lyxsize]
+ return ''
+
+
+def revert_breaks(document):
+ """ Change vspace insets, page breaks and lyxlines to paragraph options
+ (if possible) or ERT"""
# Get default spaceamount
- i = find_token(file.header, '\\defskip', 0)
+ i = find_token(document.header, '\\defskip', 0)
if i == -1:
- defskipamount = 'medskip'
+ defskipamount = 'medskip'
else:
- defskipamount = split(file.header[i])[1]
+ defskipamount = document.header[i].split()[1]
+
+ keys = {"\\begin_inset" : "vspace", "\\lyxline" : "lyxline",
+ "\\newpage" : "newpage"}
+ keywords_top = {"vspace" : "\\added_space_top", "lyxline" : "\\line_top",
+ "newpage" : "\\pagebreak_top"}
+ keywords_bot = {"vspace" : "\\added_space_bottom", "lyxline" : "\\line_bottom",
+ "newpage" : "\\pagebreak_bottom"}
+ tokens = ["\\begin_inset VSpace", "\\lyxline", "\\newpage"]
# Convert the insets
i = 0
- while 1:
- i = find_token(file.body, '\\begin_inset VSpace', i)
+ while True:
+ i = find_tokens(document.body, tokens, i)
if i == -1:
return
- spaceamount = split(file.body[i])[2]
-
- # Are we at the beginning or end of a paragraph?
- paragraph_start = 1
- start = get_paragraph(file.body, i) + 1
- for k in range(start, i):
- if is_nonempty_line(file.body[k]):
- paragraph_start = 0
- break
- paragraph_end = 1
- j = find_end_of_inset(file.body, i)
- if j == -1:
- file.warning("Malformed LyX file: Missing '\\end_inset'.")
- i = i + 1
- continue
- end = get_next_paragraph(file.body, i)
- for k in range(j + 1, end):
- if is_nonempty_line(file.body[k]):
- paragraph_end = 0
- break
-
- # Convert to paragraph formatting if we are at the beginning or end
- # of a paragraph and the resulting paragraph would not be empty
- if ((paragraph_start and not paragraph_end) or
- (paragraph_end and not paragraph_start)):
- # The order is important: del and insert invalidate some indices
- del file.body[j]
- del file.body[i]
- if paragraph_start:
- file.body.insert(start, '\\added_space_top ' + spaceamount + ' ')
- else:
- file.body.insert(start, '\\added_space_bottom ' + spaceamount + ' ')
- continue
-
- # Convert to ERT
- file.body[i:i+1] = ['\\begin_inset ERT', 'status Collapsed', '',
- '\\layout Standard', '', '\\backslash ']
- i = i + 6
- if spaceamount[-1] == '*':
- spaceamount = spaceamount[:-1]
- keep = 1
- else:
- keep = 0
-
- # Replace defskip by the actual value
- if spaceamount == 'defskip':
- spaceamount = defskipamount
-
- # LaTeX does not know \\smallskip* etc
- if keep:
- if spaceamount == 'smallskip':
- spaceamount = '\\smallskipamount'
- elif spaceamount == 'medskip':
- spaceamount = '\\medskipamount'
- elif spaceamount == 'bigskip':
- spaceamount = '\\bigskipamount'
- elif spaceamount == 'vfill':
- spaceamount = '\\fill'
-
- # Finally output the LaTeX code
- if (spaceamount == 'smallskip' or spaceamount == 'medskip' or
- spaceamount == 'bigskip' or spaceamount == 'vfill'):
- file.body.insert(i, spaceamount)
- else :
- if keep:
- file.body.insert(i, 'vspace*{')
- else:
- file.body.insert(i, 'vspace{')
- i = convert_ertbackslash(file.body, i, spaceamount)
- file.body[i] = file.body[i] + '}'
- i = i + 1
+
+ # Are we at the beginning of a paragraph?
+ paragraph_start = 1
+ this_par = get_paragraph(document.body, i, document.format - 1)
+ start = this_par + 1
+ params = get_par_params(document.body, start)
+ size = "normal"
+ # Paragraph parameters may be on one or more lines.
+ # Find the start of the real paragraph text.
+ while document.body[start][:1] == '\\' and document.body[start].split()[0] in params:
+ start = start + 1
+ for k in range(start, i):
+ if document.body[k].find("\\size") != -1:
+ # store font size
+ size = document.body[k].split()[1]
+ elif is_nonempty_line(document.body[k]):
+ paragraph_start = 0
+ break
+ # Find the end of the real paragraph text.
+ next_par = get_next_paragraph(document.body, i, document.format - 1)
+ if next_par == -1:
+ document.warning("Malformed LyX document: Missing next paragraph.")
+ i = i + 1
+ continue
+
+ # first line of our insets
+ inset_start = i
+ # last line of our insets
+ inset_end = inset_start
+ # Are we at the end of a paragraph?
+ paragraph_end = 1
+ # start and end line numbers to delete if we convert this inset
+ del_lines = list()
+ # is this inset a lyxline above a paragraph?
+ top = list()
+ # raw inset information
+ lines = list()
+ # name of this inset
+ insets = list()
+ # font size of this inset
+ sizes = list()
+
+ # Detect subsequent lyxline, vspace and pagebreak insets created by convert_breaks()
+ n = 0
+ k = inset_start
+ while k < next_par:
+ if find_tokens(document.body, tokens, k) == k:
+ # inset to convert
+ lines.append(document.body[k].split())
+ insets.append(keys[lines[n][0]])
+ del_lines.append([k, k])
+ top.append(0)
+ sizes.append(size)
+ n = n + 1
+ inset_end = k
+ elif document.body[k].find("\\size") != -1:
+ # store font size
+ size = document.body[k].split()[1]
+ elif find_token(document.body, "\\begin_inset ERT", k) == k:
+ ert_begin = find_token(document.body, "\\layout", k) + 1
+ if ert_begin == 0:
+ document.warning("Malformed LyX document: Missing '\\layout'.")
+ continue
+ ert_end = find_end_of_inset(document.body, k)
+ if ert_end == -1:
+ document.warning("Malformed LyX document: Missing '\\end_inset'.")
+ continue
+ ert = ert2latex(document.body[ert_begin:ert_end], document.format - 1)
+ if (n > 0 and insets[n - 1] == "lyxline" and
+ ert == '\\vspace{-1\\parskip}\n'):
+ # vspace ERT created by convert_breaks() for top lyxline
+ top[n - 1] = 1
+ del_lines[n - 1][1] = ert_end
+ inset_end = ert_end
+ k = ert_end
+ else:
+ paragraph_end = 0
+ break
+ elif (n > 0 and insets[n - 1] == "vspace" and
+ find_token(document.body, "\\end_inset", k) == k):
+ # ignore end of vspace inset
+ del_lines[n - 1][1] = k
+ inset_end = k
+ elif is_nonempty_line(document.body[k]):
+ paragraph_end = 0
+ break
+ k = k + 1
+
+ # Determine space amount for vspace insets
+ spaceamount = list()
+ arguments = list()
+ for k in range(n):
+ if insets[k] == "vspace":
+ spaceamount.append(lines[k][2])
+ arguments.append(' ' + spaceamount[k] + ' ')
+ else:
+ spaceamount.append('')
+ arguments.append(' ')
+
+ # Can we convert to top paragraph parameters?
+ before = 0
+ if ((n == 3 and insets[0] == "newpage" and insets[1] == "vspace" and
+ insets[2] == "lyxline" and top[2]) or
+ (n == 2 and
+ ((insets[0] == "newpage" and insets[1] == "vspace") or
+ (insets[0] == "newpage" and insets[1] == "lyxline" and top[1]) or
+ (insets[0] == "vspace" and insets[1] == "lyxline" and top[1]))) or
+ (n == 1 and insets[0] == "lyxline" and top[0])):
+ # These insets have been created before a paragraph by
+ # convert_breaks()
+ before = 1
+
+ # Can we convert to bottom paragraph parameters?
+ after = 0
+ if ((n == 3 and insets[0] == "lyxline" and not top[0] and
+ insets[1] == "vspace" and insets[2] == "newpage") or
+ (n == 2 and
+ ((insets[0] == "lyxline" and not top[0] and insets[1] == "vspace") or
+ (insets[0] == "lyxline" and not top[0] and insets[1] == "newpage") or
+ (insets[0] == "vspace" and insets[1] == "newpage"))) or
+ (n == 1 and insets[0] == "lyxline" and not top[0])):
+ # These insets have been created after a paragraph by
+ # convert_breaks()
+ after = 1
+
+ if paragraph_start and paragraph_end:
+ # We are in a paragraph of our own.
+ # We must not delete this paragraph if it has parameters
+ if params == '':
+ # First try to merge with the previous paragraph.
+ # We try the previous paragraph first because we would
+ # otherwise need ERT for two subsequent vspaces.
+ prev_par = get_paragraph(document.body, this_par - 1, document.format - 1) + 1
+ if prev_par > 0 and not before:
+ prev_params = get_par_params(document.body, prev_par + 1)
+ ert = 0
+ # determine font size
+ prev_size = "normal"
+ k = prev_par + 1
+ while document.body[k][:1] == '\\' and document.body[k].split()[0] in prev_params:
+ k = k + 1
+ while k < this_par:
+ if document.body[k].find("\\size") != -1:
+ prev_size = document.body[k].split()[1]
+ break
+ elif document.body[k].find("\\begin_inset") != -1:
+ # skip insets
+ k = find_end_of_inset(document.body, k)
+ elif is_nonempty_line(document.body[k]):
+ break
+ k = k + 1
+ for k in range(n):
+ if (keywords_bot[insets[k]] in prev_params or
+ (insets[k] == "lyxline" and sizes[k] != prev_size)):
+ ert = 1
+ break
+ if not ert:
+ for k in range(n):
+ document.body.insert(prev_par + 1,
+ keywords_bot[insets[k]] + arguments[k])
+ del document.body[this_par+n:next_par-1+n]
+ i = this_par + n
+ continue
+ # Then try next paragraph
+ if next_par > 0 and not after:
+ next_params = get_par_params(document.body, next_par + 1)
+ ert = 0
+ while document.body[k][:1] == '\\' and document.body[k].split()[0] in next_params:
+ k = k + 1
+ # determine font size
+ next_size = "normal"
+ k = next_par + 1
+ while k < this_par:
+ if document.body[k].find("\\size") != -1:
+ next_size = document.body[k].split()[1]
+ break
+ elif is_nonempty_line(document.body[k]):
+ break
+ k = k + 1
+ for k in range(n):
+ if (keywords_top[insets[k]] in next_params or
+ (insets[k] == "lyxline" and sizes[k] != next_size)):
+ ert = 1
+ break
+ if not ert:
+ for k in range(n):
+ document.body.insert(next_par + 1,
+ keywords_top[insets[k]] + arguments[k])
+ del document.body[this_par:next_par-1]
+ i = this_par
+ continue
+ elif paragraph_start or paragraph_end:
+ # Convert to paragraph formatting if we are at the beginning or end
+ # of a paragraph and the resulting paragraph would not be empty
+ # The order is important: del and insert invalidate some indices
+ if paragraph_start:
+ keywords = keywords_top
+ else:
+ keywords = keywords_bot
+ ert = 0
+ for k in range(n):
+ if keywords[insets[k]] in params:
+ ert = 1
+ break
+ if not ert:
+ for k in range(n):
+ document.body.insert(this_par + 1,
+ keywords[insets[k]] + arguments[k])
+ for j in range(k, n):
+ del_lines[j][0] = del_lines[j][0] + 1
+ del_lines[j][1] = del_lines[j][1] + 1
+ del document.body[del_lines[k][0]:del_lines[k][1]+1]
+ deleted = del_lines[k][1] - del_lines[k][0] + 1
+ for j in range(k + 1, n):
+ del_lines[j][0] = del_lines[j][0] - deleted
+ del_lines[j][1] = del_lines[j][1] - deleted
+ i = this_par
+ continue
+
+ # Convert the first inset to ERT.
+ # The others are converted in the next loop runs (if they exist)
+ if insets[0] == "vspace":
+ document.body[i:i+1] = ['\\begin_inset ERT', 'status Collapsed', '',
+ '\\layout %s' % document.default_layout, '', '\\backslash ']
+ i = i + 6
+ if spaceamount[0][-1] == '*':
+ spaceamount[0] = spaceamount[0][:-1]
+ keep = 1
+ else:
+ keep = 0
+
+ # Replace defskip by the actual value
+ if spaceamount[0] == 'defskip':
+ spaceamount[0] = defskipamount
+
+ # LaTeX does not know \\smallskip* etc
+ if keep:
+ if spaceamount[0] == 'smallskip':
+ spaceamount[0] = '\\smallskipamount'
+ elif spaceamount[0] == 'medskip':
+ spaceamount[0] = '\\medskipamount'
+ elif spaceamount[0] == 'bigskip':
+ spaceamount[0] = '\\bigskipamount'
+ elif spaceamount[0] == 'vfill':
+ spaceamount[0] = '\\fill'
+
+ # Finally output the LaTeX code
+ if (spaceamount[0] == 'smallskip' or spaceamount[0] == 'medskip' or
+ spaceamount[0] == 'bigskip' or spaceamount[0] == 'vfill'):
+ document.body.insert(i, spaceamount[0] + '{}')
+ else :
+ if keep:
+ document.body.insert(i, 'vspace*{')
+ else:
+ document.body.insert(i, 'vspace{')
+ i = convert_ertbackslash(document.body, i, spaceamount[0], document.format - 1, document.default_layout)
+ document.body[i] = document.body[i] + '}'
+ i = i + 1
+ elif insets[0] == "lyxline":
+ document.body[i] = ''
+ latexsize = lyxsize2latexsize(size)
+ if latexsize == '':
+ document.warning("Could not convert LyX fontsize '%s' to LaTeX font size." % size)
+ latexsize = '\\normalsize'
+ i = insert_ert(document.body, i, 'Collapsed',
+ '\\lyxline{%s}' % latexsize,
+ document.format - 1, document.default_layout)
+ # We use \providecommand so that we don't get an error if native
+ # lyxlines are used (LyX writes first its own preamble and then
+ # the user specified one)
+ add_to_preamble(document,
+ ['% Commands inserted by lyx2lyx for lyxlines',
+ '\\providecommand{\\lyxline}[1]{',
+ ' {#1 \\vspace{1ex} \\hrule width \\columnwidth \\vspace{1ex}}'
+ '}'])
+ elif insets[0] == "newpage":
+ document.body[i] = ''
+ i = insert_ert(document.body, i, 'Collapsed', '\\newpage{}',
+ document.format - 1, document.default_layout)
# Convert a LyX length into a LaTeX length
# Convert special lengths
if special != 'none':
- len = '%f\\' % len2value(len) + special
+ len = '%f\\' % len2value(len) + special
# Convert LyX units to LaTeX units
- for unit in units.keys():
- if find(len, unit) != -1:
- len = '%f' % (len2value(len) / 100) + units[unit]
- break
+ for unit in list(units.keys()):
+ if len.find(unit) != -1:
+ len = '%f' % (len2value(len) / 100) + units[unit]
+ break
return len
-# Convert a LyX length into valid ERT code and append it to body[i]
-# Return the (maybe incremented) line index i
-def convert_ertlen(body, i, len, special):
- # Convert backslashes and insert the converted length into body
- return convert_ertbackslash(body, i, convert_len(len, special))
+def convert_ertlen(body, i, len, special, format, default_layout):
+ """ Convert a LyX length into valid ERT code and append it to body[i]
+ Return the (maybe incremented) line index i
+ Convert backslashes and insert the converted length into body. """
+ return convert_ertbackslash(body, i, convert_len(len, special), format, default_layout)
-# Return the value of len without the unit in numerical form
def len2value(len):
+ " Return the value of len without the unit in numerical form. "
result = re.search('([+-]?[0-9.]+)', len)
if result:
- return float(result.group(1))
+ return float(result.group(1))
# No number means 1.0
return 1.0
-# Convert text to ERT and insert it at body[i]
-# Return the index of the line after the inserted ERT
-def insert_ert(body, i, status, text):
- body[i:i] = ['\\begin_inset ERT', 'status ' + status, '',
- '\\layout Standard', '']
- i = i + 5
- i = convert_ertbackslash(body, i, text) + 1
+def insert_ert(body, i, status, text, format, default_layout):
+ """ Convert text to ERT and insert it at body[i]
+ Return the index of the line after the inserted ERT"""
+
+ body[i:i] = ['\\begin_inset ERT', 'status ' + status, '']
+ i = i + 3
+ if format <= 224:
+ body[i:i] = ['\\layout %s' % default_layout, '']
+ else:
+ body[i:i] = ['\\begin_layout %s' % default_layout, '']
+ i = i + 1 # i points now to the just created empty line
+ i = convert_ertbackslash(body, i, text, format, default_layout) + 1
+ if format > 224:
+ body[i:i] = ['\\end_layout']
+ i = i + 1
body[i:i] = ['', '\\end_inset', '']
i = i + 3
return i
-# Add text to the preamble if it is not already there.
-# Only the first line is checked!
-def add_to_preamble(file, text):
- i = find_token(file.header, '\\begin_preamble', 0)
- if i == -1:
- file.header.extend(['\\begin_preamble'] + text + ['\\end_preamble'])
- return
+def add_to_preamble(document, text):
+ """ Add text to the preamble if it is not already there.
+ Only the first line is checked!"""
- j = find_token(file.header, '\\end_preamble', i)
- if j == -1:
- file.warning("Malformed LyX file: Missing '\\end_preamble'.")
- file.warning("Adding it now and hoping for the best.")
- file.header.append('\\end_preamble')
- j = len(file.header)
-
- if find_token(file.header, text[0], i, j) != -1:
+ if find_token(document.preamble, text[0], 0) != -1:
return
- file.header[j:j] = text
+
+ document.preamble.extend(text)
-def convert_frameless_box(file):
+def convert_frameless_box(document):
+ " Convert frameless box."
pos = ['t', 'c', 'b']
inner_pos = ['c', 't', 'b', 's']
i = 0
- while 1:
- i = find_token(file.body, '\\begin_inset Frameless', i)
+ while True:
+ i = find_token(document.body, '\\begin_inset Frameless', i)
if i == -1:
return
- j = find_end_of_inset(file.body, i)
- if j == -1:
- file.warning("Malformed LyX file: Missing '\\end_inset'.")
- i = i + 1
- continue
- del file.body[i]
- j = j - 1
-
- # Gather parameters
- params = {'position':0, 'hor_pos':'c', 'has_inner_box':'1',
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Missing '\\end_inset'.")
+ i = i + 1
+ continue
+ del document.body[i]
+ j = j - 1
+
+ # Gather parameters
+ params = {'position':0, 'hor_pos':'c', 'has_inner_box':'1',
'inner_pos':1, 'use_parbox':'0', 'width':'100col%',
- 'special':'none', 'height':'1in',
- 'height_special':'totalheight', 'collapsed':'false'}
- for key in params.keys():
- value = replace(get_value(file.body, key, i, j), '"', '')
- if value != "":
- if key == 'position':
- # convert new to old position: 'position "t"' -> 0
- value = find_token(pos, value, 0)
- if value != -1:
- params[key] = value
- elif key == 'inner_pos':
- # convert inner position
- value = find_token(inner_pos, value, 0)
- if value != -1:
- params[key] = value
- else:
- params[key] = value
- j = del_token(file.body, key, i, j)
- i = i + 1
-
- # Convert to minipage or ERT?
- # Note that the inner_position and height parameters of a minipage
- # inset are ignored and not accessible for the user, although they
- # are present in the file format and correctly read in and written.
- # Therefore we convert to ERT if they do not have their LaTeX
- # defaults. These are:
- # - the value of "position" for "inner_pos"
- # - "\totalheight" for "height"
- if (params['use_parbox'] != '0' or
- params['has_inner_box'] != '1' or
- params['special'] != 'none' or
- params['height_special'] != 'totalheight' or
- len2value(params['height']) != 1.0):
+ 'special':'none', 'height':'1in',
+ 'height_special':'totalheight', 'collapsed':'false'}
+ for key in list(params.keys()):
+ value = get_value(document.body, key, i, j).replace('"', '')
+ if value != "":
+ if key == 'position':
+ # convert new to old position: 'position "t"' -> 0
+ value = find_token(pos, value, 0)
+ if value != -1:
+ params[key] = value
+ elif key == 'inner_pos':
+ # convert inner position
+ value = find_token(inner_pos, value, 0)
+ if value != -1:
+ params[key] = value
+ else:
+ params[key] = value
+ j = del_token(document.body, key, i, j)
+ i = i + 1
+
+ # Convert to minipage or ERT?
+ # Note that the inner_position and height parameters of a minipage
+ # inset are ignored and not accessible for the user, although they
+ # are present in the file format and correctly read in and written.
+ # Therefore we convert to ERT if they do not have their LaTeX
+ # defaults. These are:
+ # - the value of "position" for "inner_pos"
+ # - "\totalheight" for "height"
+ if (params['use_parbox'] != '0' or
+ params['has_inner_box'] != '1' or
+ params['special'] != 'none' or
+ params['height_special'] != 'totalheight' or
+ len2value(params['height']) != 1.0):
# Here we know that this box is not supported in file format 224.
# Therefore we need to convert it to ERT. We can't simply convert
# Otherwise we will get LaTeX errors if this document is
# converted to format 225 or above again (LyX 1.4 uses all
# optional arguments).
- add_to_preamble(file,
+ add_to_preamble(document,
['% Commands inserted by lyx2lyx for frameless boxes',
'% Save the original minipage environment',
'\\let\\lyxtolyxrealminipage\\minipage',
ert = ert + '\\let\\endminipage\\endlyxtolyxminipage%\n'
old_i = i
- i = insert_ert(file.body, i, 'Collapsed', ert)
+ i = insert_ert(document.body, i, 'Collapsed', ert, document.format - 1, document.default_layout)
j = j + i - old_i - 1
- file.body[i:i] = ['\\begin_inset Minipage',
+ document.body[i:i] = ['\\begin_inset Minipage',
'position %d' % params['position'],
'inner_position 1',
'height "1in"',
# Restore the original minipage environment since we may have
# minipages inside this box.
# Start a new paragraph because the following may be nonstandard
- file.body[i:i] = ['\\layout Standard', '', '']
+ document.body[i:i] = ['\\layout %s' % document.default_layout, '', '']
i = i + 2
j = j + 3
ert = '\\let\\minipage\\lyxtolyxrealminipage%\n'
ert = ert + '\\let\\endminipage\\lyxtolyxrealendminipage%'
old_i = i
- i = insert_ert(file.body, i, 'Collapsed', ert)
+ i = insert_ert(document.body, i, 'Collapsed', ert, document.format - 1, document.default_layout)
j = j + i - old_i - 1
# Redefine the minipage end before the inset end.
# Start a new paragraph because the previous may be nonstandard
- file.body[j:j] = ['\\layout Standard', '', '']
+ document.body[j:j] = ['\\layout %s' % document.default_layout, '', '']
j = j + 2
ert = '\\let\\endminipage\\endlyxtolyxminipage'
- j = insert_ert(file.body, j, 'Collapsed', ert)
- j = j + 1
- file.body.insert(j, '')
- j = j + 1
+ j = insert_ert(document.body, j, 'Collapsed', ert, document.format - 1, document.default_layout)
+ j = j + 1
+ document.body.insert(j, '')
+ j = j + 1
# LyX writes '%\n' after each box. Therefore we need to end our
# ERT with '%\n', too, since this may swallow a following space.
ert = '}%\n'
else:
ert = '\\end{lyxtolyxrealminipage}%\n'
- j = insert_ert(file.body, j, 'Collapsed', ert)
+ j = insert_ert(document.body, j, 'Collapsed', ert, document.format - 1, document.default_layout)
# We don't need to restore the original minipage after the inset
# end because the scope of the redefinition is the original box.
- else:
+ else:
+
+ # Convert to minipage
+ document.body[i:i] = ['\\begin_inset Minipage',
+ 'position %d' % params['position'],
+ 'inner_position %d' % params['inner_pos'],
+ 'height "' + params['height'] + '"',
+ 'width "' + params['width'] + '"',
+ 'collapsed ' + params['collapsed']]
+ i = i + 6
- # Convert to minipage
- file.body[i:i] = ['\\begin_inset Minipage',
- 'position %d' % params['position'],
- 'inner_position %d' % params['inner_pos'],
- 'height "' + params['height'] + '"',
- 'width "' + params['width'] + '"',
- 'collapsed ' + params['collapsed']]
- i = i + 6
-##
-# Convert jurabib
-#
+def remove_branches(document):
+ " Remove branches. "
+ i = 0
+ while True:
+ i = find_token(document.header, "\\branch", i)
+ if i == -1:
+ break
+ document.warning("Removing branch %s." % document.header[i].split()[1])
+ j = find_token(document.header, "\\end_branch", i)
+ if j == -1:
+ document.warning("Malformed LyX document: Missing '\\end_branch'.")
+ break
+ del document.header[i:j+1]
+
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset Branch", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Missing '\\end_inset'.")
+ i = i + 1
+ continue
+ del document.body[i]
+ del document.body[j - 1]
+ # Seach for a line starting 'collapsed'
+ # If, however, we find a line starting '\layout'
+ # (_always_ present) then break with a warning message
+ collapsed_found = 0
+ while True:
+ if (document.body[i][:9] == "collapsed"):
+ del document.body[i]
+ collapsed_found = 1
+ continue
+ elif (document.body[i][:7] == "\\layout"):
+ if collapsed_found == 0:
+ document.warning("Malformed LyX document: Missing 'collapsed'.")
+ # Delete this new paragraph, since it would not appear in
+ # .tex output. This avoids also empty paragraphs.
+ del document.body[i]
+ break
+ i = i + 1
-def convert_jurabib(file):
- i = find_token(file.header, '\\use_numerical_citations', 0)
+
+def convert_jurabib(document):
+ " Convert jurabib. "
+ i = find_token(document.header, '\\use_numerical_citations', 0)
if i == -1:
- file.warning("Malformed lyx file: Missing '\\use_numerical_citations'.")
+ document.warning("Malformed lyx document: Missing '\\use_numerical_citations'.")
return
- file.header.insert(i + 1, '\\use_jurabib 0')
+ document.header.insert(i + 1, '\\use_jurabib 0')
-def revert_jurabib(file):
- i = find_token(file.header, '\\use_jurabib', 0)
+def revert_jurabib(document):
+ " Revert jurabib. "
+ i = find_token(document.header, '\\use_jurabib', 0)
if i == -1:
- file.warning("Malformed lyx file: Missing '\\use_jurabib'.")
+ document.warning("Malformed lyx document: Missing '\\use_jurabib'.")
return
- if get_value(file.header, '\\use_jurabib', 0) != "0":
- file.warning("Conversion of '\\use_jurabib = 1' not yet implemented.")
+ if get_value(document.header, '\\use_jurabib', 0) != "0":
+ document.warning("Conversion of '\\use_jurabib = 1' not yet implemented.")
# Don't remove '\\use_jurabib' so that people will get warnings by lyx
return
- del file.header[i]
+ del document.header[i]
-##
-# Convert bibtopic
-#
-def convert_bibtopic(file):
- i = find_token(file.header, '\\use_jurabib', 0)
+def convert_bibtopic(document):
+ " Convert bibtopic. "
+ i = find_token(document.header, '\\use_jurabib', 0)
if i == -1:
- file.warning("Malformed lyx file: Missing '\\use_jurabib'.")
+ document.warning("Malformed lyx document: Missing '\\use_jurabib'.")
return
- file.header.insert(i + 1, '\\use_bibtopic 0')
+ document.header.insert(i + 1, '\\use_bibtopic 0')
-def revert_bibtopic(file):
- i = find_token(file.header, '\\use_bibtopic', 0)
+def revert_bibtopic(document):
+ " Revert bibtopic. "
+ i = find_token(document.header, '\\use_bibtopic', 0)
if i == -1:
- file.warning("Malformed lyx file: Missing '\\use_bibtopic'.")
+ document.warning("Malformed lyx document: Missing '\\use_bibtopic'.")
return
- if get_value(file.header, '\\use_bibtopic', 0) != "0":
- file.warning("Conversion of '\\use_bibtopic = 1' not yet implemented.")
+ if get_value(document.header, '\\use_bibtopic', 0) != "0":
+ document.warning("Conversion of '\\use_bibtopic = 1' not yet implemented.")
# Don't remove '\\use_jurabib' so that people will get warnings by lyx
- del file.header[i]
+ del document.header[i]
-##
-# Sideway Floats
-#
-def convert_float(file):
+def convert_float(document):
+ " Convert sideway floats. "
i = 0
- while 1:
- i = find_token(file.body, '\\begin_inset Float', i)
+ while True:
+ i = find_token_exact(document.body, '\\begin_inset Float', i)
if i == -1:
return
# Seach for a line starting 'wide'
# If, however, we find a line starting '\begin_layout'
# (_always_ present) then break with a warning message
i = i + 1
- while 1:
- if (file.body[i][:4] == "wide"):
- file.body.insert(i + 1, 'sideways false')
+ while True:
+ if (document.body[i][:4] == "wide"):
+ document.body.insert(i + 1, 'sideways false')
break
- elif (file.body[i][:13] == "\\begin_layout"):
- file.warning("Malformed lyx file: Missing 'wide'.")
+ elif (document.body[i][:13] == "\\begin_layout"):
+ document.warning("Malformed lyx document: Missing 'wide'.")
break
i = i + 1
i = i + 1
-def revert_float(file):
+def revert_float(document):
+ " Revert sideways floats. "
i = 0
- while 1:
- i = find_token(file.body, '\\begin_inset Float', i)
+ while True:
+ i = find_token_exact(document.body, '\\begin_inset Float', i)
if i == -1:
return
- j = find_end_of_inset(file.body, i)
+ line = document.body[i]
+ r = re.compile(r'\\begin_inset Float (.*)$')
+ m = r.match(line)
+ floattype = m.group(1)
+ if floattype != "figure" and floattype != "table":
+ i = i + 1
+ continue
+ j = find_end_of_inset(document.body, i)
if j == -1:
- file.warning("Malformed lyx file: Missing '\\end_inset'.")
+ document.warning("Malformed lyx document: Missing '\\end_inset'.")
i = i + 1
continue
- if get_value(file.body, 'sideways', i, j) != "false":
- file.warning("Conversion of 'sideways true' not yet implemented.")
- # Don't remove 'sideways' so that people will get warnings by lyx
+ if get_value(document.body, 'sideways', i, j) != "false":
+ l = find_token(document.body, "\\begin_layout Standard", i + 1, j)
+ if l == -1:
+ document.warning("Malformed LyX document: Missing `\\begin_layout Standard' in Float inset.")
+ return
+ document.body[j] = '\\layout Standard\n\\begin_inset ERT\nstatus Collapsed\n\n' \
+ '\\layout Standard\n\n\n\\backslash\n' \
+ 'end{sideways' + floattype + '}\n\n\\end_inset\n'
+ del document.body[i+1:l-1]
+ document.body[i] = '\\begin_inset ERT\nstatus Collapsed\n\n' \
+ '\\layout Standard\n\n\n\\backslash\n' \
+ 'begin{sideways' + floattype + '}\n\n\\end_inset\n\n'
+ add_to_preamble(document,
+ ['\\usepackage{rotfloat}\n'])
i = i + 1
continue
- del_token(file.body, 'sideways', i, j)
+ del_token(document.body, 'sideways', i, j)
i = i + 1
-def convert_graphics(file):
- """ Add extension to filenames of insetgraphics if necessary.
+def convert_graphics(document):
+ """ Add extension to documentnames of insetgraphics if necessary.
"""
i = 0
- while 1:
- i = find_token(file.body, "\\begin_inset Graphics", i)
+ while True:
+ i = find_token(document.body, "\\begin_inset Graphics", i)
if i == -1:
return
- j = find_token2(file.body, "filename", i)
+ j = find_token_exact(document.body, "documentname", i)
if j == -1:
return
i = i + 1
- filename = split(file.body[j])[1]
- absname = os.path.normpath(os.path.join(file.dir, filename))
- if file.input == stdin and not os.path.isabs(filename):
- # We don't know the directory and cannot check the file.
- # We could use a heuristic and take the current directory,
- # and we could try to find out if filename has an extension,
- # but that would be just guesses and could be wrong.
- file.warning("""Warning: Can not determine whether file
+ filename = document.body[j].split()[1]
+ if document.dir == u'' and not os.path.isabs(filename):
+ # We don't know the directory and cannot check the document.
+ # We could use a heuristic and take the current directory,
+ # and we could try to find out if documentname has an extension,
+ # but that would be just guesses and could be wrong.
+ document.warning("""Warning: Cannot determine whether document
%s
needs an extension when reading from standard input.
- You may need to correct the file manually or run
- lyx2lyx again with the .lyx file as commandline argument.""" % filename)
- continue
- # This needs to be the same algorithm as in pre 233 insetgraphics
- if access(absname, F_OK):
- continue
- if access(absname + ".ps", F_OK):
- file.body[j] = replace(file.body[j], filename, filename + ".ps")
- continue
- if access(absname + ".eps", F_OK):
- file.body[j] = replace(file.body[j], filename, filename + ".eps")
+ You may need to correct the document manually or run
+ lyx2lyx again with the .lyx document as commandline argument.""" % filename)
+ continue
+ absname = os.path.normpath(os.path.join(document.dir, filename))
+ # This needs to be the same algorithm as in pre 233 insetgraphics
+ if access(absname, F_OK):
+ continue
+ if access(absname + ".ps", F_OK):
+ document.body[j] = document.body[j].replace(filename, filename + ".ps")
+ continue
+ if access(absname + ".eps", F_OK):
+ document.body[j] = document.body[j].replace(filename, filename + ".eps")
-##
-# Convert firstname and surname from styles -> char styles
-#
-def convert_names(file):
+def convert_names(document):
""" Convert in the docbook backend from firstname and surname style
to charstyles.
"""
- if file.backend != "docbook":
+ if document.backend != "docbook":
return
i = 0
- while 1:
- i = find_token(file.body, "\\begin_layout Author", i)
+ while True:
+ i = find_token(document.body, "\\begin_layout Author", i)
if i == -1:
return
i = i + 1
- while file.body[i] == "":
+ while document.body[i] == "":
i = i + 1
- if file.body[i][:11] != "\\end_layout" or file.body[i+2][:13] != "\\begin_deeper":
+ if document.body[i][:11] != "\\end_layout" or document.body[i+2][:13] != "\\begin_deeper":
i = i + 1
continue
k = i
- i = find_end_of( file.body, i+3, "\\begin_deeper","\\end_deeper")
+ i = find_end_of( document.body, i+3, "\\begin_deeper","\\end_deeper")
if i == -1:
# something is really wrong, abort
- file.warning("Missing \\end_deeper, after style Author.")
- file.warning("Aborted attempt to parse FirstName and Surname.")
+ document.warning("Missing \\end_deeper, after style Author.")
+ document.warning("Aborted attempt to parse FirstName and Surname.")
return
firstname, surname = "", ""
- name = file.body[k:i]
+ name = document.body[k:i]
j = find_token(name, "\\begin_layout FirstName", 0)
if j != -1:
j = j + 1
# delete name
- del file.body[k+2:i+1]
+ del document.body[k+2:i+1]
- file.body[k-1:k-1] = ["", "",
+ document.body[k-1:k-1] = ["", "",
"\\begin_inset CharStyle Firstname",
"status inlined",
"",
- "\\begin_layout Standard",
+ '\\begin_layout %s' % document.default_layout,
"",
"%s" % firstname,
"\end_layout",
"\\begin_inset CharStyle Surname",
"status inlined",
"",
- "\\begin_layout Standard",
+ '\\begin_layout %s' % document.default_layout,
"",
"%s" % surname,
"\\end_layout",
""]
-def revert_names(file):
+def revert_names(document):
""" Revert in the docbook backend from firstname and surname char style
to styles.
"""
- if file.backend != "docbook":
+ if document.backend != "docbook":
return
-##
-# \use_natbib 1 \cite_engine <style>
-# \use_numerical_citations 0 -> where <style> is one of
-# \use_jurabib 0 "basic", "natbib_authoryear",
-# "natbib_numerical" or "jurabib"
-def convert_cite_engine(file):
- a = find_token(file.header, "\\use_natbib", 0)
+def convert_cite_engine(document):
+ r""" \use_natbib 1 \cite_engine <style>
+ \use_numerical_citations 0 -> where <style> is one of
+ \use_jurabib 0 "basic", "natbib_authoryear","""
+
+ a = find_token(document.header, "\\use_natbib", 0)
if a == -1:
- file.warning("Malformed lyx file: Missing '\\use_natbib'.")
+ document.warning("Malformed lyx document: Missing '\\use_natbib'.")
return
- b = find_token(file.header, "\\use_numerical_citations", 0)
+ b = find_token(document.header, "\\use_numerical_citations", 0)
if b == -1 or b != a+1:
- file.warning("Malformed lyx file: Missing '\\use_numerical_citations'.")
+ document.warning("Malformed lyx document: Missing '\\use_numerical_citations'.")
return
- c = find_token(file.header, "\\use_jurabib", 0)
+ c = find_token(document.header, "\\use_jurabib", 0)
if c == -1 or c != b+1:
- file.warning("Malformed lyx file: Missing '\\use_jurabib'.")
+ document.warning("Malformed lyx document: Missing '\\use_jurabib'.")
return
- use_natbib = int(split(file.header[a])[1])
- use_numerical_citations = int(split(file.header[b])[1])
- use_jurabib = int(split(file.header[c])[1])
+ use_natbib = int(document.header[a].split()[1])
+ use_numerical_citations = int(document.header[b].split()[1])
+ use_jurabib = int(document.header[c].split()[1])
cite_engine = "basic"
if use_natbib:
elif use_jurabib:
cite_engine = "jurabib"
- del file.header[a:c+1]
- file.header.insert(a, "\\cite_engine " + cite_engine)
+ del document.header[a:c+1]
+ document.header.insert(a, "\\cite_engine " + cite_engine)
-def revert_cite_engine(file):
- i = find_token(file.header, "\\cite_engine", 0)
+def revert_cite_engine(document):
+ " Revert the cite engine. "
+ i = find_token(document.header, "\\cite_engine", 0)
if i == -1:
- file.warning("Malformed lyx file: Missing '\\cite_engine'.")
+ document.warning("Malformed lyx document: Missing '\\cite_engine'.")
return
- cite_engine = split(file.header[i])[1]
+ cite_engine = document.header[i].split()[1]
use_natbib = '0'
use_numerical = '0'
elif cite_engine == "jurabib":
use_jurabib = '1'
- del file.header[i]
- file.header.insert(i, "\\use_jurabib " + use_jurabib)
- file.header.insert(i, "\\use_numerical_citations " + use_numerical)
- file.header.insert(i, "\\use_natbib " + use_natbib)
+ del document.header[i]
+ document.header.insert(i, "\\use_jurabib " + use_jurabib)
+ document.header.insert(i, "\\use_numerical_citations " + use_numerical)
+ document.header.insert(i, "\\use_natbib " + use_natbib)
-##
-# Paper package
-#
-def convert_paperpackage(file):
- i = find_token(file.header, "\\paperpackage", 0)
+def convert_paperpackage(document):
+ " Convert paper package. "
+ i = find_token(document.header, "\\paperpackage", 0)
if i == -1:
return
packages = {'default':'none','a4':'none', 'a4wide':'a4', 'widemarginsa4':'a4wide'}
- if len(split(file.header[i])) > 1:
- paperpackage = split(file.header[i])[1]
+ if len(document.header[i].split()) > 1:
+ paperpackage = document.header[i].split()[1]
+ document.header[i] = document.header[i].replace(paperpackage, packages[paperpackage])
else:
- paperpackage = "default"
- file.header[i] = replace(file.header[i], paperpackage, packages[paperpackage])
+ document.header[i] = document.header[i] + ' widemarginsa4'
-def revert_paperpackage(file):
- i = find_token(file.header, "\\paperpackage", 0)
+def revert_paperpackage(document):
+ " Revert paper package. "
+ i = find_token(document.header, "\\paperpackage", 0)
if i == -1:
return
packages = {'none':'a4', 'a4':'a4wide', 'a4wide':'widemarginsa4',
'widemarginsa4':'', 'default': 'default'}
- if len(split(file.header[i])) > 1:
- paperpackage = split(file.header[i])[1]
+ if len(document.header[i].split()) > 1:
+ paperpackage = document.header[i].split()[1]
else:
paperpackage = 'default'
- file.header[i] = replace(file.header[i], paperpackage, packages[paperpackage])
+ document.header[i] = document.header[i].replace(paperpackage, packages[paperpackage])
-##
-# Bullets
-#
-def convert_bullets(file):
+def convert_bullets(document):
+ " Convert bullets. "
i = 0
- while 1:
- i = find_token(file.header, "\\bullet", i)
+ while True:
+ i = find_token(document.header, "\\bullet", i)
if i == -1:
return
- if file.header[i][:12] == '\\bulletLaTeX':
- file.header[i] = file.header[i] + ' ' + strip(file.header[i+1])
+ if document.header[i][:12] == '\\bulletLaTeX':
+ document.header[i] = document.header[i] + ' ' + document.header[i+1].strip()
n = 3
else:
- file.header[i] = file.header[i] + ' ' + strip(file.header[i+1]) +\
- ' ' + strip(file.header[i+2]) + ' ' + strip(file.header[i+3])
+ document.header[i] = document.header[i] + ' ' + document.header[i+1].strip() +\
+ ' ' + document.header[i+2].strip() + ' ' + document.header[i+3].strip()
n = 5
- del file.header[i+1:i + n]
+ del document.header[i+1:i + n]
i = i + 1
-def revert_bullets(file):
+def revert_bullets(document):
+ " Revert bullets. "
i = 0
- while 1:
- i = find_token(file.header, "\\bullet", i)
+ while True:
+ i = find_token(document.header, "\\bullet", i)
if i == -1:
return
- if file.header[i][:12] == '\\bulletLaTeX':
- n = find(file.header[i], '"')
+ if document.header[i][:12] == '\\bulletLaTeX':
+ n = document.header[i].find('"')
if n == -1:
- file.warning("Malformed header.")
+ document.warning("Malformed header.")
return
else:
- file.header[i:i+1] = [file.header[i][:n-1],'\t' + file.header[i][n:], '\\end_bullet']
+ document.header[i:i+1] = [document.header[i][:n-1],'\t' + document.header[i][n:], '\\end_bullet']
i = i + 3
else:
- frag = split(file.header[i])
+ frag = document.header[i].split()
if len(frag) != 5:
- file.warning("Malformed header.")
+ document.warning("Malformed header.")
return
else:
- file.header[i:i+1] = [frag[0] + ' ' + frag[1],
+ document.header[i:i+1] = [frag[0] + ' ' + frag[1],
'\t' + frag[2],
'\t' + frag[3],
'\t' + frag[4],
i = i + 5
-##
-# \begin_header and \begin_document
-#
-def add_begin_header(file):
- i = find_token(file.header, '\\lyxformat', 0)
- file.header.insert(i+1, '\\begin_header')
- file.header.insert(i+1, '\\begin_document')
+def add_begin_header(document):
+ r" Add \begin_header and \begin_document. "
+ i = find_token(document.header, '\\lyxformat', 0)
+ document.header.insert(i+1, '\\begin_header')
+ document.header.insert(i+1, '\\begin_document')
-def remove_begin_header(file):
- i = find_token(file.header, "\\begin_document", 0)
+def remove_begin_header(document):
+ r" Remove \begin_header and \begin_document. "
+ i = find_token(document.header, "\\begin_document", 0)
if i != -1:
- del file.header[i]
- i = find_token(file.header, "\\begin_header", 0)
+ del document.header[i]
+ i = find_token(document.header, "\\begin_header", 0)
if i != -1:
- del file.header[i]
+ del document.header[i]
-##
-# \begin_file.body and \end_file.body
-#
-def add_begin_body(file):
- file.body.insert(0, '\\begin_body')
- file.body.insert(1, '')
- i = find_token(file.body, "\\end_document", 0)
- file.body.insert(i, '\\end_body')
-
-def remove_begin_body(file):
- i = find_token(file.body, "\\begin_body", 0)
+def add_begin_body(document):
+ r" Add and \begin_document and \end_document"
+ document.body.insert(0, '\\begin_body')
+ document.body.insert(1, '')
+ i = find_token(document.body, "\\end_document", 0)
+ document.body.insert(i, '\\end_body')
+
+def remove_begin_body(document):
+ r" Remove \begin_body and \end_body"
+ i = find_token(document.body, "\\begin_body", 0)
if i != -1:
- del file.body[i]
- if not file.body[i]:
- del file.body[i]
- i = find_token(file.body, "\\end_body", 0)
+ del document.body[i]
+ if not document.body[i]:
+ del document.body[i]
+ i = find_token(document.body, "\\end_body", 0)
if i != -1:
- del file.body[i]
+ del document.body[i]
-##
-# \papersize
-#
-def normalize_papersize(file):
- i = find_token(file.header, '\\papersize', 0)
+def normalize_papersize(document):
+ r" Normalize \papersize"
+ i = find_token(document.header, '\\papersize', 0)
if i == -1:
return
- tmp = split(file.header[i])
+ tmp = document.header[i].split()
if tmp[1] == "Default":
- file.header[i] = '\\papersize default'
+ document.header[i] = '\\papersize default'
return
if tmp[1] == "Custom":
- file.header[i] = '\\papersize custom'
+ document.header[i] = '\\papersize custom'
-def denormalize_papersize(file):
- i = find_token(file.header, '\\papersize', 0)
+def denormalize_papersize(document):
+ r" Revert \papersize"
+ i = find_token(document.header, '\\papersize', 0)
if i == -1:
return
- tmp = split(file.header[i])
+ tmp = document.header[i].split()
if tmp[1] == "custom":
- file.header[i] = '\\papersize Custom'
+ document.header[i] = '\\papersize Custom'
-##
-# Strip spaces at end of command line
-#
-def strip_end_space(file):
- for i in range(len(file.body)):
- if file.body[i][:1] == '\\':
- file.body[i] = strip(file.body[i])
+def strip_end_space(document):
+ " Strip spaces at end of command line. "
+ for i in range(len(document.body)):
+ if document.body[i][:1] == '\\':
+ document.body[i] = document.body[i].strip()
-##
-# Use boolean values for \use_geometry, \use_bibtopic and \tracking_changes
-#
-def use_x_boolean(file):
+def use_x_boolean(document):
+ r" Use boolean values for \use_geometry, \use_bibtopic and \tracking_changes"
bin2bool = {'0': 'false', '1': 'true'}
for use in '\\use_geometry', '\\use_bibtopic', '\\tracking_changes':
- i = find_token(file.header, use, 0)
+ i = find_token(document.header, use, 0)
if i == -1:
continue
- decompose = split(file.header[i])
- file.header[i] = decompose[0] + ' ' + bin2bool[decompose[1]]
+ decompose = document.header[i].split()
+ document.header[i] = decompose[0] + ' ' + bin2bool[decompose[1]]
-def use_x_binary(file):
+def use_x_binary(document):
+ r" Use digit values for \use_geometry, \use_bibtopic and \tracking_changes"
bool2bin = {'false': '0', 'true': '1'}
for use in '\\use_geometry', '\\use_bibtopic', '\\tracking_changes':
- i = find_token(file.header, use, 0)
+ i = find_token(document.header, use, 0)
if i == -1:
continue
- decompose = split(file.header[i])
- file.header[i] = decompose[0] + ' ' + bool2bin[decompose[1]]
+ decompose = document.header[i].split()
+ document.header[i] = decompose[0] + ' ' + bool2bin[decompose[1]]
-##
-# Place all the paragraph parameters in their own line
-#
-def normalize_paragraph_params(file):
- body = file.body
- allowed_parameters = '\\paragraph_spacing', '\\noindent', '\\align', '\\labelwidthstring', "\\start_of_appendix"
+
+def normalize_paragraph_params(document):
+ " Place all the paragraph parameters in their own line. "
+ body = document.body
+
+ allowed_parameters = '\\paragraph_spacing', '\\noindent', \
+ '\\align', '\\labelwidthstring', "\\start_of_appendix", \
+ "\\leftindent"
i = 0
- while 1:
- i = find_token(file.body, '\\begin_layout', i)
+ while True:
+ i = find_token(document.body, '\\begin_layout', i)
if i == -1:
return
i = i + 1
- while 1:
- if strip(body[i]) and split(body[i])[0] not in allowed_parameters:
+ while True:
+ if body[i].strip() and body[i].split()[0] not in allowed_parameters:
break
- j = find(body[i],'\\', 1)
+ j = body[i].find('\\', 1)
if j != -1:
- body[i:i+1] = [strip(body[i][:j]), body[i][j:]]
+ body[i:i+1] = [body[i][:j].strip(), body[i][j:]]
i = i + 1
-##
-# Add/remove output_changes parameter
-#
-def convert_output_changes (file):
- i = find_token(file.header, '\\tracking_changes', 0)
+def convert_output_changes (document):
+ " Add output_changes parameter. "
+ i = find_token(document.header, '\\tracking_changes', 0)
if i == -1:
- file.warning("Malformed lyx file: Missing '\\tracking_changes'.")
+ document.warning("Malformed lyx document: Missing '\\tracking_changes'.")
return
- file.header.insert(i+1, '\\output_changes true')
+ document.header.insert(i+1, '\\output_changes true')
-def revert_output_changes (file):
- i = find_token(file.header, '\\output_changes', 0)
+def revert_output_changes (document):
+ " Remove output_changes parameter. "
+ i = find_token(document.header, '\\output_changes', 0)
if i == -1:
return
- del file.header[i]
+ del document.header[i]
-##
-# Convert paragraph breaks and sanitize paragraphs
-#
-def convert_ert_paragraphs(file):
+def convert_ert_paragraphs(document):
+ " Convert paragraph breaks and sanitize paragraphs. "
forbidden_settings = [
# paragraph parameters
'\\paragraph_spacing', '\\labelwidthstring',
'\\emph', '\\numeric', '\\bar', '\\noun',
'\\color', '\\lang']
i = 0
- while 1:
- i = find_token(file.body, '\\begin_inset ERT', i)
+ while True:
+ i = find_token(document.body, '\\begin_inset ERT', i)
if i == -1:
return
- j = find_end_of_inset(file.body, i)
+ j = find_end_of_inset(document.body, i)
if j == -1:
- file.warning("Malformed lyx file: Missing '\\end_inset'.")
+ document.warning("Malformed lyx document: Missing '\\end_inset'.")
i = i + 1
continue
# convert non-standard paragraphs to standard
k = i
- while 1:
- k = find_token(file.body, "\\begin_layout", k, j)
+ while True:
+ k = find_token(document.body, "\\begin_layout", k, j)
if k == -1:
break
- file.body[k] = "\\begin_layout Standard"
+ document.body[k] = '\\begin_layout %s' % document.default_layout
k = k + 1
# remove all paragraph parameters and font settings
k = i
while k < j:
- if (strip(file.body[k]) and
- split(file.body[k])[0] in forbidden_settings):
- del file.body[k]
+ if (document.body[k].strip() and
+ document.body[k].split()[0] in forbidden_settings):
+ del document.body[k]
j = j - 1
else:
k = k + 1
# insert an empty paragraph before each paragraph but the first
k = i
first_pagraph = 1
- while 1:
- k = find_token(file.body, "\\begin_layout Standard", k, j)
+ while True:
+ k = find_token(document.body, "\\begin_layout", k, j)
if k == -1:
break
if first_pagraph:
first_pagraph = 0
k = k + 1
continue
- file.body[k:k] = ["\\begin_layout Standard", "",
+ document.body[k:k] = ['\\begin_layout %s' % document.default_layout, "",
"\\end_layout", ""]
k = k + 5
j = j + 4
# convert \\newline to new paragraph
k = i
- while 1:
- k = find_token(file.body, "\\newline", k, j)
+ while True:
+ k = find_token(document.body, "\\newline", k, j)
if k == -1:
break
- file.body[k:k+1] = ["\\end_layout", "", "\\begin_layout Standard"]
- k = k + 4
- j = j + 3
+ document.body[k:k+1] = ["\\end_layout", "", '\\begin_layout %s' % document.default_layout]
+ k = k + 3
+ j = j + 2
+ # We need an empty line if document.default_layout == ''
+ if document.body[k] != '':
+ document.body.insert(k, '')
+ k = k + 1
+ j = j + 1
i = i + 1
-##
-# Remove double paragraph breaks
-#
-def revert_ert_paragraphs(file):
+def revert_ert_paragraphs(document):
+ " Remove double paragraph breaks. "
i = 0
- while 1:
- i = find_token(file.body, '\\begin_inset ERT', i)
+ while True:
+ i = find_token(document.body, '\\begin_inset ERT', i)
if i == -1:
return
- j = find_end_of_inset(file.body, i)
+ j = find_end_of_inset(document.body, i)
if j == -1:
- file.warning("Malformed lyx file: Missing '\\end_inset'.")
+ document.warning("Malformed lyx document: Missing '\\end_inset'.")
i = i + 1
continue
# replace paragraph breaks with \newline
k = i
- while 1:
- k = find_token(file.body, "\\end_layout", k, j)
- l = find_token(file.body, "\\begin_layout", k, j)
+ while True:
+ k = find_token(document.body, "\\end_layout", k, j)
+ l = find_token(document.body, "\\begin_layout", k, j)
if k == -1 or l == -1:
break
- file.body[k:l+1] = ["\\newline"]
+ document.body[k:l+1] = ["\\newline"]
j = j - l + k
k = k + 1
# replace double \newlines with paragraph breaks
k = i
- while 1:
- k = find_token(file.body, "\\newline", k, j)
+ while True:
+ k = find_token(document.body, "\\newline", k, j)
if k == -1:
break
l = k + 1
- while file.body[l] == "":
+ while document.body[l] == "":
l = l + 1
- if strip(file.body[l]) and split(file.body[l])[0] == "\\newline":
- file.body[k:l+1] = ["\\end_layout", "",
- "\\begin_layout Standard"]
+ if document.body[l].strip() and document.body[l].split()[0] == "\\newline":
+ document.body[k:l+1] = ["\\end_layout", "",
+ '\\begin_layout %s' % document.default_layout]
j = j - l + k + 2
k = k + 3
+ # We need an empty line if document.default_layout == ''
+ if document.body[l+1] != '':
+ document.body.insert(l+1, '')
+ k = k + 1
+ j = j + 1
else:
k = k + 1
i = i + 1
-def convert_french(file):
+def convert_french(document):
+ " Convert frenchb. "
regexp = re.compile(r'^\\language\s+frenchb')
- i = find_re(file.header, regexp, 0)
+ i = find_re(document.header, regexp, 0)
if i != -1:
- file.header[i] = "\\language french"
+ document.header[i] = "\\language french"
# Change language in the document body
regexp = re.compile(r'^\\lang\s+frenchb')
i = 0
- while 1:
- i = find_re(file.body, regexp, i)
+ while True:
+ i = find_re(document.body, regexp, i)
if i == -1:
break
- file.body[i] = "\\lang french"
+ document.body[i] = "\\lang french"
i = i + 1
-def remove_paperpackage(file):
- i = find_token(file.header, '\\paperpackage', 0)
+def remove_paperpackage(document):
+ " Remove paper package. "
+ i = find_token(document.header, '\\paperpackage', 0)
if i == -1:
return
- paperpackage = split(file.header[i])[1]
+ paperpackage = document.header[i].split()[1]
- if paperpackage in ("a4", "a4wide", "widemarginsa4"):
- j = find_token(file.header, '\\begin_preamble', 0)
- conv = {"a4":"\\usepackage{a4}","a4wide": "\\usepackage{a4wide}",
- "widemarginsa4": "\\usepackage[widemargins]{a4}"}
- if j == -1:
- # Add preamble
- j = len(file.header) - 2
- file.header[j:j]=["\\begin_preamble",
- conv[paperpackage],"\\end_preamble"]
- else:
- file.header[j+1:j+1] = conv[paperpackage]
+ del document.header[i]
- del file.header[i]
+ if paperpackage not in ("a4", "a4wide", "widemarginsa4"):
+ return
- i = find_token(file.header, '\\papersize', 0)
+ conv = {"a4":"\\usepackage{a4}","a4wide": "\\usepackage{a4wide}",
+ "widemarginsa4": "\\usepackage[widemargins]{a4}"}
+ # for compatibility we ensure it is the first entry in preamble
+ document.preamble[0:0] = [conv[paperpackage]]
+
+ i = find_token(document.header, '\\papersize', 0)
if i != -1:
- file.header[i] = "\\papersize default"
+ document.header[i] = "\\papersize default"
+
+
+def remove_quotestimes(document):
+ " Remove quotestimes. "
+ i = find_token(document.header, '\\quotes_times', 0)
+ if i == -1:
+ return
+ del document.header[i]
+
+
+def convert_sgml_paragraphs(document):
+ " Convert SGML paragraphs. "
+ if document.backend != "docbook":
+ return
+
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_layout SGML", i)
+
+ if i == -1:
+ return
+
+ document.body[i] = "\\begin_layout Standard"
+ j = find_token(document.body, "\\end_layout", i)
+
+ document.body[j+1:j+1] = ['','\\end_inset','','','\\end_layout']
+ document.body[i+1:i+1] = ['\\begin_inset ERT','status inlined','','\\begin_layout Standard','']
+ i = i + 10
##
-# Convertion hub
+# Conversion hub
#
-convert = [[223, [insert_tracking_changes, add_end_header, remove_color_default,
- convert_spaces, convert_bibtex, remove_insetparent]],
+supported_versions = ["1.4.%d" % i for i in range(3)] + ["1.4"]
+convert = [[222, [insert_tracking_changes, add_end_header, convert_amsmath]],
+ [223, [remove_color_default, convert_spaces, convert_bibtex, remove_insetparent]],
[224, [convert_external, convert_comment]],
[225, [add_end_layout, layout2begin_layout, convert_end_document,
convert_table_valignment_middle, convert_breaks]],
[226, [convert_note]],
[227, [convert_box]],
- [228, [convert_collapsable, convert_ert]],
+ [228, [convert_collapsible, convert_ert]],
[229, [convert_minipage]],
[230, [convert_jurabib]],
[231, [convert_float]],
[240, [convert_output_changes]],
[241, [convert_ert_paragraphs]],
[242, [convert_french]],
- [243, [remove_paperpackage]]]
+ [243, [remove_paperpackage]],
+ [244, [rename_spaces]],
+ [245, [remove_quotestimes, convert_sgml_paragraphs]]]
-revert = [[242, []],
+revert = [[244, []],
+ [243, [revert_space_names]],
+ [242, []],
[241, []],
[240, [revert_ert_paragraphs]],
[239, [revert_output_changes]],
[230, [revert_float]],
[229, [revert_jurabib]],
[228, []],
- [227, [revert_collapsable, revert_ert]],
+ [227, [revert_collapsible, revert_ert]],
[226, [revert_box, revert_external_2]],
[225, [revert_note]],
[224, [rm_end_layout, begin_layout2layout, revert_end_document,
- revert_valignment_middle, convert_vspace, convert_frameless_box]],
+ revert_valignment_middle, revert_breaks, convert_frameless_box,
+ remove_branches]],
[223, [revert_external_2, revert_comment, revert_eqref]],
- [221, [rm_end_header, revert_spaces, revert_bibtex,
- rm_tracking_changes, rm_body_changes]]]
+ [222, [revert_spaces, revert_bibtex]],
+ [221, [revert_amsmath, rm_end_header, rm_tracking_changes, rm_body_changes]]]
if __name__ == "__main__":