floats = {
"footnote": ["\\begin_inset Foot",
- "collapsed true"],
+ "collapsed true"],
"margin": ["\\begin_inset Marginal",
- "collapsed true"],
+ "collapsed true"],
"fig": ["\\begin_inset Float figure",
- "wide false",
- "collapsed false"],
+ "wide false",
+ "collapsed false"],
"tab": ["\\begin_inset Float table",
- "wide false",
- "collapsed false"],
+ "wide false",
+ "collapsed false"],
"alg": ["\\begin_inset Float algorithm",
- "wide false",
- "collapsed false"],
+ "wide false",
+ "collapsed false"],
"wide-fig": ["\\begin_inset Float figure",
- "wide true",
- "collapsed false"],
+ "wide true",
+ "collapsed false"],
"wide-tab": ["\\begin_inset Float table",
- "wide true",
- "collapsed false"]
+ "wide true",
+ "collapsed false"]
}
font_tokens = ["\\family", "\\series", "\\shape", "\\size", "\\emph",
- "\\bar", "\\noun", "\\color", "\\lang", "\\latex"]
+ "\\bar", "\\noun", "\\color", "\\lang", "\\latex"]
pextra_type3_rexp = re.compile(r".*\\pextra_type\s+3")
pextra_rexp = re.compile(r"\\pextra_type\s+(\S+)"+\
- r"(\s+\\pextra_alignment\s+(\S+))?"+\
- r"(\s+\\pextra_hfill\s+(\S+))?"+\
- r"(\s+\\pextra_start_minipage\s+(\S+))?"+\
- r"(\s+(\\pextra_widthp?)\s+(\S*))?")
+ r"(\s+\\pextra_alignment\s+(\S+))?"+\
+ r"(\s+\\pextra_hfill\s+(\S+))?"+\
+ r"(\s+\\pextra_start_minipage\s+(\S+))?"+\
+ r"(\s+(\\pextra_widthp?)\s+(\S*))?")
def get_width(mo):
if mo.group(10):
- if mo.group(9) == "\\pextra_widthp":
- return mo.group(10)+"col%"
- else:
- return mo.group(10)
+ if mo.group(9) == "\\pextra_widthp":
+ return mo.group(10)+"col%"
+ else:
+ return mo.group(10)
else:
- return "100col%"
+ return "100col%"
#
lines = file.body
i = 0
while 1:
- i = find_token(lines, "\\begin_float", i)
- if i == -1:
- break
- # There are no nested floats, so finding the end of the float is simple
- j = find_token(lines, "\\end_float", i+1)
-
- floattype = string.split(lines[i])[1]
- if not floats.has_key(floattype):
- file.warning("Error! Unknown float type " + floattype)
- floattype = "fig"
-
- # skip \end_deeper tokens
- i2 = i+1
- while check_token(lines[i2], "\\end_deeper"):
- i2 = i2+1
- if i2 > i+1:
- j2 = get_next_paragraph(lines, j + 1, file.format + 1)
- lines[j2:j2] = ["\\end_deeper "]*(i2-(i+1))
-
- new = floats[floattype]+[""]
-
- # Check if the float is floatingfigure
- k = find_re(lines, pextra_type3_rexp, i, j)
- if k != -1:
- mo = pextra_rexp.search(lines[k])
- width = get_width(mo)
- lines[k] = re.sub(pextra_rexp, "", lines[k])
- new = ["\\begin_inset Wrap figure",
- 'width "%s"' % width,
- "collapsed false",
- ""]
-
- new = new+lines[i2:j]+["\\end_inset ", ""]
-
- # After a float, all font attributes are reseted.
- # We need to output '\foo default' for every attribute foo
- # whose value is not default before the float.
- # The check here is not accurate, but it doesn't matter
- # as extra '\foo default' commands are ignored.
- # In fact, it might be safer to output '\foo default' for all
- # font attributes.
- k = get_paragraph(lines, i, file.format + 1)
- flag = 0
- for token in font_tokens:
- if find_token(lines, token, k, i) != -1:
- if not flag:
- # This is not necessary, but we want the output to be
- # as similar as posible to the lyx format
- flag = 1
- new.append("")
- if token == "\\lang":
- new.append(token+" "+ file.language)
- else:
- new.append(token+" default ")
-
- lines[i:j+1] = new
- i = i+1
+ i = find_token(lines, "\\begin_float", i)
+ if i == -1:
+ break
+ # There are no nested floats, so finding the end of the float is simple
+ j = find_token(lines, "\\end_float", i+1)
+
+ floattype = string.split(lines[i])[1]
+ if not floats.has_key(floattype):
+ file.warning("Error! Unknown float type " + floattype)
+ floattype = "fig"
+
+ # skip \end_deeper tokens
+ i2 = i+1
+ while check_token(lines[i2], "\\end_deeper"):
+ i2 = i2+1
+ if i2 > i+1:
+ j2 = get_next_paragraph(lines, j + 1, file.format + 1)
+ lines[j2:j2] = ["\\end_deeper "]*(i2-(i+1))
+
+ new = floats[floattype]+[""]
+
+ # Check if the float is floatingfigure
+ k = find_re(lines, pextra_type3_rexp, i, j)
+ if k != -1:
+ mo = pextra_rexp.search(lines[k])
+ width = get_width(mo)
+ lines[k] = re.sub(pextra_rexp, "", lines[k])
+ new = ["\\begin_inset Wrap figure",
+ 'width "%s"' % width,
+ "collapsed false",
+ ""]
+
+ new = new+lines[i2:j]+["\\end_inset ", ""]
+
+ # After a float, all font attributes are reseted.
+ # We need to output '\foo default' for every attribute foo
+ # whose value is not default before the float.
+ # The check here is not accurate, but it doesn't matter
+ # as extra '\foo default' commands are ignored.
+ # In fact, it might be safer to output '\foo default' for all
+ # font attributes.
+ k = get_paragraph(lines, i, file.format + 1)
+ flag = 0
+ for token in font_tokens:
+ if find_token(lines, token, k, i) != -1:
+ if not flag:
+ # This is not necessary, but we want the output to be
+ # as similar as posible to the lyx format
+ flag = 1
+ new.append("")
+ if token == "\\lang":
+ new.append(token+" "+ file.language)
+ else:
+ new.append(token+" default ")
+
+ lines[i:j+1] = new
+ i = i+1
pextra_type2_rexp = re.compile(r".*\\pextra_type\s+[12]")
i = 0
flag = 0
while 1:
- i = find_re(lines, pextra_type2_rexp, i)
- if i == -1:
- break
+ i = find_re(lines, pextra_type2_rexp, i)
+ if i == -1:
+ break
# Sometimes the \pextra_widthp argument comes in it own
# line. If that happens insert it back in this line.
lines[i] = lines[i] + ' ' + lines[i+1]
del lines[i+1]
- mo = pextra_rexp.search(lines[i])
+ mo = pextra_rexp.search(lines[i])
width = get_width(mo)
if mo.group(1) == "1":
continue
# handle \pextra_type 2 (minipage)
- position = mo.group(3)
- hfill = mo.group(5)
- lines[i] = re.sub(pextra_rexp, "", lines[i])
-
- start = ["\\begin_inset Minipage",
- "position " + position,
- "inner_position 0",
- 'height "0pt"',
- 'width "%s"' % width,
- "collapsed false"
- ]
- if flag:
- flag = 0
- if hfill:
- start = ["","\hfill",""]+start
- else:
- start = ['\\layout %s' % file.default_layout,''] + start
-
- j0 = find_token_backwards(lines,"\\layout", i-1)
- j = get_next_paragraph(lines, i, file.format + 1)
-
- count = 0
- while 1:
- # collect more paragraphs to the minipage
- count = count+1
- if j == -1 or not check_token(lines[j], "\\layout"):
- break
- i = find_re(lines, pextra_type2_rexp2, j+1)
- if i == -1:
- break
- mo = pextra_rexp.search(lines[i])
- if not mo:
- break
- if mo.group(7) == "1":
- flag = 1
- break
- lines[i] = re.sub(pextra_rexp, "", lines[i])
- j = find_tokens(lines, ["\\layout", "\\end_float"], i+1)
-
- mid = lines[j0:j]
- end = ["\\end_inset "]
-
- lines[j0:j] = start+mid+end
- i = i+1
+ position = mo.group(3)
+ hfill = mo.group(5)
+ lines[i] = re.sub(pextra_rexp, "", lines[i])
+
+ start = ["\\begin_inset Minipage",
+ "position " + position,
+ "inner_position 0",
+ 'height "0pt"',
+ 'width "%s"' % width,
+ "collapsed false"
+ ]
+ if flag:
+ flag = 0
+ if hfill:
+ start = ["","\hfill",""]+start
+ else:
+ start = ['\\layout %s' % file.default_layout,''] + start
+
+ j0 = find_token_backwards(lines,"\\layout", i-1)
+ j = get_next_paragraph(lines, i, file.format + 1)
+
+ count = 0
+ while 1:
+ # collect more paragraphs to the minipage
+ count = count+1
+ if j == -1 or not check_token(lines[j], "\\layout"):
+ break
+ i = find_re(lines, pextra_type2_rexp2, j+1)
+ if i == -1:
+ break
+ mo = pextra_rexp.search(lines[i])
+ if not mo:
+ break
+ if mo.group(7) == "1":
+ flag = 1
+ break
+ lines[i] = re.sub(pextra_rexp, "", lines[i])
+ j = find_tokens(lines, ["\\layout", "\\end_float"], i+1)
+
+ mid = lines[j0:j]
+ end = ["\\end_inset "]
+
+ lines[j0:j] = start+mid+end
+ i = i+1
def is_empty(lines):
lines = file.body
i = 0
while 1:
- i = find_tokens(lines, ["\\latex latex", "\\layout LaTeX"], i)
- if i == -1:
- break
- j = i+1
- while 1:
+ i = find_tokens(lines, ["\\latex latex", "\\layout LaTeX"], i)
+ if i == -1:
+ break
+ j = i+1
+ while 1:
# \end_inset is for ert inside a tabular cell. The other tokens
# are obvious.
- j = find_tokens(lines, ["\\latex default", "\\layout", "\\begin_inset", "\\end_inset", "\\end_float", "\\the_end"],
- j)
- if check_token(lines[j], "\\begin_inset"):
- j = find_end_of_inset(lines, j)+1
- else:
- break
-
- if check_token(lines[j], "\\layout"):
- while j-1 >= 0 and check_token(lines[j-1], "\\begin_deeper"):
- j = j-1
-
- # We need to remove insets, special chars & font commands from ERT text
- new = []
- new2 = []
- if check_token(lines[i], "\\layout LaTeX"):
- new = ['\layout %s' % file.default_layout, "", ""]
-
- k = i+1
- while 1:
- k2 = find_re(lines, ert_rexp, k, j)
- inset = hfill = specialchar = 0
- if k2 == -1:
- k2 = j
- elif check_token(lines[k2], "\\begin_inset"):
- inset = 1
+ j = find_tokens(lines, ["\\latex default", "\\layout", "\\begin_inset", "\\end_inset", "\\end_float", "\\the_end"],
+ j)
+ if check_token(lines[j], "\\begin_inset"):
+ j = find_end_of_inset(lines, j)+1
+ else:
+ break
+
+ if check_token(lines[j], "\\layout"):
+ while j-1 >= 0 and check_token(lines[j-1], "\\begin_deeper"):
+ j = j-1
+
+ # We need to remove insets, special chars & font commands from ERT text
+ new = []
+ new2 = []
+ if check_token(lines[i], "\\layout LaTeX"):
+ new = ['\layout %s' % file.default_layout, "", ""]
+
+ k = i+1
+ while 1:
+ k2 = find_re(lines, ert_rexp, k, j)
+ inset = hfill = specialchar = 0
+ if k2 == -1:
+ k2 = j
+ elif check_token(lines[k2], "\\begin_inset"):
+ inset = 1
elif check_token(lines[k2], "\\hfill"):
hfill = 1
del lines[k2]
j = j-1
- else:
- specialchar = 1
- mo = spchar_rexp.match(lines[k2])
- lines[k2] = mo.group(1)
- specialchar_str = mo.group(2)
- k2 = k2+1
-
- tmp = []
- for line in lines[k:k2]:
+ else:
+ specialchar = 1
+ mo = spchar_rexp.match(lines[k2])
+ lines[k2] = mo.group(1)
+ specialchar_str = mo.group(2)
+ k2 = k2+1
+
+ tmp = []
+ for line in lines[k:k2]:
# Move some lines outside the ERT inset:
- if move_rexp.match(line):
- if new2 == []:
- # This is not necessary, but we want the output to be
- # as similar as posible to the lyx format
- new2 = [""]
- new2.append(line)
- elif not check_token(line, "\\latex"):
- tmp.append(line)
-
- if is_empty(tmp):
- if filter(lambda x:x != "", tmp) != []:
- if new == []:
- # This is not necessary, but we want the output to be
- # as similar as posible to the lyx format
- lines[i-1] = lines[i-1]+" "
- else:
- new = new+[" "]
- else:
- new = new+ert_begin+tmp+["\\end_inset ", ""]
-
- if inset:
- k3 = find_end_of_inset(lines, k2)
- new = new+[""]+lines[k2:k3+1]+[""] # Put an empty line after \end_inset
- k = k3+1
- # Skip the empty line after \end_inset
- if not is_nonempty_line(lines[k]):
- k = k+1
- new.append("")
+ if move_rexp.match(line):
+ if new2 == []:
+ # This is not necessary, but we want the output to be
+ # as similar as posible to the lyx format
+ new2 = [""]
+ new2.append(line)
+ elif not check_token(line, "\\latex"):
+ tmp.append(line)
+
+ if is_empty(tmp):
+ if filter(lambda x:x != "", tmp) != []:
+ if new == []:
+ # This is not necessary, but we want the output to be
+ # as similar as posible to the lyx format
+ lines[i-1] = lines[i-1]+" "
+ else:
+ new = new+[" "]
+ else:
+ new = new+ert_begin+tmp+["\\end_inset ", ""]
+
+ if inset:
+ k3 = find_end_of_inset(lines, k2)
+ new = new+[""]+lines[k2:k3+1]+[""] # Put an empty line after \end_inset
+ k = k3+1
+ # Skip the empty line after \end_inset
+ if not is_nonempty_line(lines[k]):
+ k = k+1
+ new.append("")
elif hfill:
new = new + ["\\hfill", ""]
k = k2
- elif specialchar:
- if new == []:
- # This is not necessary, but we want the output to be
- # as similar as posible to the lyx format
- lines[i-1] = lines[i-1]+specialchar_str
- new = [""]
- else:
- new = new+[specialchar_str, ""]
- k = k2
- else:
- break
-
- new = new+new2
- if not check_token(lines[j], "\\latex "):
- new = new+[""]+[lines[j]]
- lines[i:j+1] = new
- i = i+1
+ elif specialchar:
+ if new == []:
+ # This is not necessary, but we want the output to be
+ # as similar as posible to the lyx format
+ lines[i-1] = lines[i-1]+specialchar_str
+ new = [""]
+ else:
+ new = new+[specialchar_str, ""]
+ k = k2
+ else:
+ break
+
+ new = new+new2
+ if not check_token(lines[j], "\\latex "):
+ new = new+[""]+[lines[j]]
+ lines[i:j+1] = new
+ i = i+1
# Delete remaining "\latex xxx" tokens
i = 0
while 1:
- i = find_token(lines, "\\latex ", i)
- if i == -1:
- break
- del lines[i]
+ i = find_token(lines, "\\latex ", i)
+ if i == -1:
+ break
+ del lines[i]
# ERT insert are hidden feature of lyx 1.1.6. This might be removed in the future.
lines = file.body
i = 0
while 1:
- i = find_token(lines, "\\begin_inset ERT", i)
- if i == -1:
- break
- j = find_end_of_inset(lines, i)
- k = find_token(lines, "\\layout", i+1)
- l = get_paragraph(lines, i, file.format + 1)
- if lines[k] == lines[l]: # same layout
- k = k+1
- new = lines[k:j]
- lines[i:j+1] = new
- i = i+1
+ i = find_token(lines, "\\begin_inset ERT", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(lines, i)
+ k = find_token(lines, "\\layout", i+1)
+ l = get_paragraph(lines, i, file.format + 1)
+ if lines[k] == lines[l]: # same layout
+ k = k+1
+ new = lines[k:j]
+ lines[i:j+1] = new
+ i = i+1
def is_ert_paragraph(file, i):
i = find_nonempty_line(lines, i+1)
if not check_token(lines[i], "\\begin_inset ERT"):
- return 0
+ return 0
j = find_end_of_inset(lines, i)
k = find_nonempty_line(lines, j+1)
lines = file.body
i = 0
while 1:
- i = find_token(lines, "\\begin_inset ERT", i)
- if i == -1:
- break
- j = get_paragraph(lines, i, file.format + 1)
- count = 0
- text = []
- while is_ert_paragraph(file, j):
-
- count = count+1
- i2 = find_token(lines, "\\layout", j+1)
- k = find_token(lines, "\\end_inset", i2+1)
- text = text+lines[i2:k]
- j = find_token(lines, "\\layout", k+1)
- if j == -1:
- break
-
- if count >= 2:
- j = find_token(lines, "\\layout", i+1)
- lines[j:k] = text
+ i = find_token(lines, "\\begin_inset ERT", i)
+ if i == -1:
+ break
+ j = get_paragraph(lines, i, file.format + 1)
+ count = 0
+ text = []
+ while is_ert_paragraph(file, j):
+
+ count = count+1
+ i2 = find_token(lines, "\\layout", j+1)
+ k = find_token(lines, "\\end_inset", i2+1)
+ text = text+lines[i2:k]
+ j = find_token(lines, "\\layout", k+1)
+ if j == -1:
+ break
+
+ if count >= 2:
+ j = find_token(lines, "\\layout", i+1)
+ lines[j:k] = text
- i = i+1
+ i = i+1
oldunits = ["pt", "cm", "in", "text%", "col%"]
def get_length(lines, name, start, end):
i = find_token(lines, name, start, end)
if i == -1:
- return ""
+ return ""
x = string.split(lines[i])
return x[2]+oldunits[int(x[1])]
def write_attribute(x, token, value):
if value != "":
- x.append("\t"+token+" "+value)
+ x.append("\t"+token+" "+value)
def remove_figinset(file):
lines = file.body
i = 0
while 1:
- i = find_token(lines, "\\begin_inset Figure", i)
- if i == -1:
- break
- j = find_end_of_inset(lines, i)
-
- if ( len(string.split(lines[i])) > 2 ):
- lyxwidth = string.split(lines[i])[3]+"pt"
- lyxheight = string.split(lines[i])[4]+"pt"
- else:
- lyxwidth = ""
- lyxheight = ""
-
- filename = get_value(lines, "file", i+1, j)
-
- width = get_length(lines, "width", i+1, j)
- # what does width=5 mean ?
- height = get_length(lines, "height", i+1, j)
- rotateAngle = get_value(lines, "angle", i+1, j)
- if width == "" and height == "":
- size_type = "0"
- else:
- size_type = "1"
-
- flags = get_value(lines, "flags", i+1, j)
- x = int(flags)%4
- if x == 1:
- display = "monochrome"
- elif x == 2:
- display = "gray"
- else:
- display = "color"
-
- subcaptionText = ""
- subcaptionLine = find_token(lines, "subcaption", i+1, j)
- if subcaptionLine != -1:
+ i = find_token(lines, "\\begin_inset Figure", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(lines, i)
+
+ if ( len(string.split(lines[i])) > 2 ):
+ lyxwidth = string.split(lines[i])[3]+"pt"
+ lyxheight = string.split(lines[i])[4]+"pt"
+ else:
+ lyxwidth = ""
+ lyxheight = ""
+
+ filename = get_value(lines, "file", i+1, j)
+
+ width = get_length(lines, "width", i+1, j)
+ # what does width=5 mean ?
+ height = get_length(lines, "height", i+1, j)
+ rotateAngle = get_value(lines, "angle", i+1, j)
+ if width == "" and height == "":
+ size_type = "0"
+ else:
+ size_type = "1"
+
+ flags = get_value(lines, "flags", i+1, j)
+ x = int(flags)%4
+ if x == 1:
+ display = "monochrome"
+ elif x == 2:
+ display = "gray"
+ else:
+ display = "color"
+
+ subcaptionText = ""
+ subcaptionLine = find_token(lines, "subcaption", i+1, j)
+ if subcaptionLine != -1:
subcaptionText = lines[subcaptionLine][11:]
- if subcaptionText != "":
- subcaptionText = '"'+subcaptionText+'"'
-
- k = find_token(lines, "subfigure", i+1,j)
- if k == -1:
- subcaption = 0
- else:
- subcaption = 1
-
- new = ["\\begin_inset Graphics FormatVersion 1"]
- write_attribute(new, "filename", filename)
- write_attribute(new, "display", display)
- if subcaption:
- new.append("\tsubcaption")
- write_attribute(new, "subcaptionText", subcaptionText)
- write_attribute(new, "size_type", size_type)
- write_attribute(new, "width", width)
- write_attribute(new, "height", height)
- if rotateAngle != "":
- new.append("\trotate")
- write_attribute(new, "rotateAngle", rotateAngle)
- write_attribute(new, "rotateOrigin", "leftBaseline")
- write_attribute(new, "lyxsize_type", "1")
- write_attribute(new, "lyxwidth", lyxwidth)
- write_attribute(new, "lyxheight", lyxheight)
- new = new + ["\\end_inset"]
- lines[i:j+1] = new
+ if subcaptionText != "":
+ subcaptionText = '"'+subcaptionText+'"'
+
+ k = find_token(lines, "subfigure", i+1,j)
+ if k == -1:
+ subcaption = 0
+ else:
+ subcaption = 1
+
+ new = ["\\begin_inset Graphics FormatVersion 1"]
+ write_attribute(new, "filename", filename)
+ write_attribute(new, "display", display)
+ if subcaption:
+ new.append("\tsubcaption")
+ write_attribute(new, "subcaptionText", subcaptionText)
+ write_attribute(new, "size_type", size_type)
+ write_attribute(new, "width", width)
+ write_attribute(new, "height", height)
+ if rotateAngle != "":
+ new.append("\trotate")
+ write_attribute(new, "rotateAngle", rotateAngle)
+ write_attribute(new, "rotateOrigin", "leftBaseline")
+ write_attribute(new, "lyxsize_type", "1")
+ write_attribute(new, "lyxwidth", lyxwidth)
+ write_attribute(new, "lyxheight", lyxheight)
+ new = new + ["\\end_inset"]
+ lines[i:j+1] = new
##
if i == -1:
break
- for k in get_tabular_lines(lines, i):
- if check_token(lines[k], "<lyxtabular"):
- lines[k] = string.replace(lines[k], 'version="2"', 'version="3"')
- elif check_token(lines[k], "<column"):
- lines[k] = string.replace(lines[k], 'width=""', 'width="0pt"')
+ for k in get_tabular_lines(lines, i):
+ if check_token(lines[k], "<lyxtabular"):
+ lines[k] = string.replace(lines[k], 'version="2"', 'version="3"')
+ elif check_token(lines[k], "<column"):
+ lines[k] = string.replace(lines[k], 'width=""', 'width="0pt"')
- if line_re.match(lines[k]):
- lines[k] = re.sub(attr_re, "", lines[k])
+ if line_re.match(lines[k]):
+ lines[k] = re.sub(attr_re, "", lines[k])
- i = i+1
+ i = i+1
##
# simple data structure to deal with long table info
class row:
def __init__(self):
- self.endhead = false # header row
- self.endfirsthead = false # first header row
- self.endfoot = false # footer row
- self.endlastfoot = false # last footer row
+ self.endhead = false # header row
+ self.endfirsthead = false # first header row
+ self.endfoot = false # footer row
+ self.endlastfoot = false # last footer row
def haveLTFoot(row_info):
for j in range(rows):
i = find_token(body, '<row', i)
- self.endfoot = false # footer row
- self.endlastfoot = false # last footer row
+ self.endfoot = false # footer row
+ self.endlastfoot = false # last footer row
if row_info[j].endhead:
insert_attribute(body, i, 'endhead="true"')
lines = file.body
i = 0
while 1:
- i = find_token(lines, "\\begin_inset Float ", i)
- if i == -1:
- break
+ i = find_token(lines, "\\begin_inset Float ", i)
+ if i == -1:
+ break
j = find_token(lines, "collapsed", i)
if j != -1:
lines[j:j] = ["wide false"]
lines = file.body
i = 0
while 1:
- i = find_token(lines, "\\begin_inset LatexCommand \\listof", i)
- if i == -1:
- break
+ i = find_token(lines, "\\begin_inset LatexCommand \\listof", i)
+ if i == -1:
+ break
type = re.search(r"listof(\w*)", lines[i]).group(1)[:-1]
lines[i] = "\\begin_inset FloatList "+type
i = i+1
lines = file.header
i = find_token(lines, "\\use_amsmath", 0)
if i == -1:
- return
+ return
lines[i+1:i+1] = ["\\use_natbib 0",
- "\use_numerical_citations 0"]
+ "\use_numerical_citations 0"]
convert = [[220, [change_header, change_listof, fix_oldfloatinset,
lines = file.body
i = 0
while 1:
- i = find_token(lines, "\\begin_inset Graphics", i)
- if i == -1:
- break
- j = find_end_of_inset(lines, i)
+ i = find_token(lines, "\\begin_inset Graphics", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(lines, i)
- lines[i] = "\\begin_inset Graphics"
+ lines[i] = "\\begin_inset Graphics"
- if get_value(lines, "display", i, j) == "default":
- j = del_token(lines, "display", i, j)
- if get_value(lines, "rotateOrigin", i, j) == "leftBaseline":
- j = del_token(lines, "rotateOrigin", i, j)
+ if get_value(lines, "display", i, j) == "default":
+ j = del_token(lines, "display", i, j)
+ if get_value(lines, "rotateOrigin", i, j) == "leftBaseline":
+ j = del_token(lines, "rotateOrigin", i, j)
- k = find_token_exact(lines, "rotate", i, j)
- if k != -1:
- del lines[k]
- j = j-1
- else:
- j = del_token(lines, "rotateAngle", i, j)
+ k = find_token_exact(lines, "rotate", i, j)
+ if k != -1:
+ del lines[k]
+ j = j-1
+ else:
+ j = del_token(lines, "rotateAngle", i, j)
- k = find_token_exact(lines, "size_type", i, j)
- if k == -1:
- k = find_token_exact(lines, "size_kind", i, j)
- if k != -1:
- size_type = string.split(lines[k])[1]
- del lines[k]
- j = j-1
- if size_type in ["0", "original"]:
- j = del_token(lines, "width", i, j)
- j = del_token(lines, "height", i, j)
- j = del_token(lines, "scale", i, j)
- elif size_type in ["2", "scale"]:
- j = del_token(lines, "width", i, j)
- j = del_token(lines, "height", i, j)
- if get_value(lines, "scale", i, j) == "100":
- j = del_token(lines, "scale", i, j)
- else:
- j = del_token(lines, "scale", i, j)
+ k = find_token_exact(lines, "size_type", i, j)
+ if k == -1:
+ k = find_token_exact(lines, "size_kind", i, j)
+ if k != -1:
+ size_type = string.split(lines[k])[1]
+ del lines[k]
+ j = j-1
+ if size_type in ["0", "original"]:
+ j = del_token(lines, "width", i, j)
+ j = del_token(lines, "height", i, j)
+ j = del_token(lines, "scale", i, j)
+ elif size_type in ["2", "scale"]:
+ j = del_token(lines, "width", i, j)
+ j = del_token(lines, "height", i, j)
+ if get_value(lines, "scale", i, j) == "100":
+ j = del_token(lines, "scale", i, j)
+ else:
+ j = del_token(lines, "scale", i, j)
- k = find_token_exact(lines, "lyxsize_type", i, j)
- if k == -1:
- k = find_token_exact(lines, "lyxsize_kind", i, j)
- if k != -1:
- lyxsize_type = string.split(lines[k])[1]
- del lines[k]
- j = j-1
- j = del_token(lines, "lyxwidth", i, j)
- j = del_token(lines, "lyxheight", i, j)
- if lyxsize_type not in ["2", "scale"] or \
- get_value(lines, "lyxscale", i, j) == "100":
- j = del_token(lines, "lyxscale", i, j)
+ k = find_token_exact(lines, "lyxsize_type", i, j)
+ if k == -1:
+ k = find_token_exact(lines, "lyxsize_kind", i, j)
+ if k != -1:
+ lyxsize_type = string.split(lines[k])[1]
+ del lines[k]
+ j = j-1
+ j = del_token(lines, "lyxwidth", i, j)
+ j = del_token(lines, "lyxheight", i, j)
+ if lyxsize_type not in ["2", "scale"] or \
+ get_value(lines, "lyxscale", i, j) == "100":
+ j = del_token(lines, "lyxscale", i, j)
- i = i+1
+ i = i+1
def change_tabular(file):
import os.path
from parser_tools import find_token, find_end_of_inset, get_next_paragraph, \
get_paragraph, get_value, del_token, is_nonempty_line,\
- find_tokens, find_end_of, find_token_exact, find_tokens_exact,\
+ find_tokens, find_end_of, find_token_exact, find_tokens_exact,\
find_re, get_layout
from sys import stdin
from string import replace, split, find, strip, join
if i == -1:
break
space = regexp.match(file.body[i]).group(3)
- prepend = regexp.match(file.body[i]).group(1)
+ prepend = regexp.match(file.body[i]).group(1)
if space == '~':
file.body[i] = regexp.sub(prepend + '\\SpecialChar ~', file.body[i])
i = i + 1
else:
file.body[i] = regexp.sub(prepend, file.body[i])
file.body[i+1:i+1] = ''
- if space == "\\space":
- space = "\\ "
+ if space == "\\space":
+ space = "\\ "
i = insert_ert(file.body, i+1, 'Collapsed', space, file.format - 1, file.default_layout)
##
def rename_spaces(file):
for i in range(len(file.body)):
file.body[i] = replace(file.body[i],"\\InsetSpace \\space","\\InsetSpace \\space{}")
- file.body[i] = replace(file.body[i],"\\InsetSpace \,","\\InsetSpace \\thinspace{}")
+ file.body[i] = replace(file.body[i],"\\InsetSpace \,","\\InsetSpace \\thinspace{}")
def revert_space_names(file):
for i in range(len(file.body)):
file.body[i] = replace(file.body[i],"\\InsetSpace \\space{}","\\InsetSpace \\space")
- file.body[i] = replace(file.body[i],"\\InsetSpace \\thinspace{}","\\InsetSpace \\,")
+ file.body[i] = replace(file.body[i],"\\InsetSpace \\thinspace{}","\\InsetSpace \\,")
##
while 1:
old_i = i
- i = find_token(file.body, "\\layout", i)
+ i = find_token(file.body, "\\layout", i)
if i == -1:
i = len(file.body) - 1
file.body[i:i] = ["\\end_inset","",""]
if file.body[i][:9] == "collapsed":
if file.body[i][9:] == "true":
- status = "collapsed"
+ status = "collapsed"
else:
- status = "open"
+ status = "open"
del file.body[i]
else:
- status = "collapsed"
+ status = "collapsed"
# Handle special default case:
if height == ' "1pt"' and innerpos == 'c':
# text to body[i] and return the (maybe incremented) line index i
def convert_ertbackslash(body, i, ert, format, default_layout):
for c in ert:
- if c == '\\':
- body[i] = body[i] + '\\backslash '
- i = i + 1
- body.insert(i, '')
- elif c == '\n':
+ if c == '\\':
+ body[i] = body[i] + '\\backslash '
+ i = i + 1
+ body.insert(i, '')
+ elif c == '\n':
if format <= 240:
body[i+1:i+1] = ['\\newline ', '']
i = i + 2
else:
body[i+1:i+1] = ['\\end_layout', '', '\\begin_layout %s' % default_layout, '']
i = i + 4
- else:
- body[i] = body[i] + c
+ else:
+ body[i] = body[i] + c
return i
params = ''
while lines[i][:1] == '\\' and split(lines[i][1:])[0] in par_params:
params = params + ' ' + strip(lines[i])
- i = i + 1
+ i = i + 1
return strip(params)
# Convert special lengths
if special != 'none':
- len = '%f\\' % len2value(len) + special
+ len = '%f\\' % len2value(len) + special
# Convert LyX units to LaTeX units
for unit in units.keys():
- if find(len, unit) != -1:
- len = '%f' % (len2value(len) / 100) + units[unit]
- break
+ if find(len, unit) != -1:
+ len = '%f' % (len2value(len) / 100) + units[unit]
+ break
return len
def len2value(len):
result = re.search('([+-]?[0-9.]+)', len)
if result:
- return float(result.group(1))
+ return float(result.group(1))
# No number means 1.0
return 1.0
i = find_token(file.body, '\\begin_inset Frameless', i)
if i == -1:
return
- j = find_end_of_inset(file.body, i)
- if j == -1:
- file.warning("Malformed LyX file: Missing '\\end_inset'.")
- i = i + 1
- continue
- del file.body[i]
- j = j - 1
-
- # Gather parameters
- params = {'position':0, 'hor_pos':'c', 'has_inner_box':'1',
+ j = find_end_of_inset(file.body, i)
+ if j == -1:
+ file.warning("Malformed LyX file: Missing '\\end_inset'.")
+ i = i + 1
+ continue
+ del file.body[i]
+ j = j - 1
+
+ # Gather parameters
+ params = {'position':0, 'hor_pos':'c', 'has_inner_box':'1',
'inner_pos':1, 'use_parbox':'0', 'width':'100col%',
- 'special':'none', 'height':'1in',
- 'height_special':'totalheight', 'collapsed':'false'}
- for key in params.keys():
- value = replace(get_value(file.body, key, i, j), '"', '')
- if value != "":
- if key == 'position':
- # convert new to old position: 'position "t"' -> 0
- value = find_token(pos, value, 0)
- if value != -1:
- params[key] = value
- elif key == 'inner_pos':
- # convert inner position
- value = find_token(inner_pos, value, 0)
- if value != -1:
- params[key] = value
- else:
- params[key] = value
- j = del_token(file.body, key, i, j)
- i = i + 1
-
- # Convert to minipage or ERT?
- # Note that the inner_position and height parameters of a minipage
- # inset are ignored and not accessible for the user, although they
- # are present in the file format and correctly read in and written.
- # Therefore we convert to ERT if they do not have their LaTeX
- # defaults. These are:
- # - the value of "position" for "inner_pos"
- # - "\totalheight" for "height"
- if (params['use_parbox'] != '0' or
- params['has_inner_box'] != '1' or
- params['special'] != 'none' or
- params['height_special'] != 'totalheight' or
- len2value(params['height']) != 1.0):
+ 'special':'none', 'height':'1in',
+ 'height_special':'totalheight', 'collapsed':'false'}
+ for key in params.keys():
+ value = replace(get_value(file.body, key, i, j), '"', '')
+ if value != "":
+ if key == 'position':
+ # convert new to old position: 'position "t"' -> 0
+ value = find_token(pos, value, 0)
+ if value != -1:
+ params[key] = value
+ elif key == 'inner_pos':
+ # convert inner position
+ value = find_token(inner_pos, value, 0)
+ if value != -1:
+ params[key] = value
+ else:
+ params[key] = value
+ j = del_token(file.body, key, i, j)
+ i = i + 1
+
+ # Convert to minipage or ERT?
+ # Note that the inner_position and height parameters of a minipage
+ # inset are ignored and not accessible for the user, although they
+ # are present in the file format and correctly read in and written.
+ # Therefore we convert to ERT if they do not have their LaTeX
+ # defaults. These are:
+ # - the value of "position" for "inner_pos"
+ # - "\totalheight" for "height"
+ if (params['use_parbox'] != '0' or
+ params['has_inner_box'] != '1' or
+ params['special'] != 'none' or
+ params['height_special'] != 'totalheight' or
+ len2value(params['height']) != 1.0):
# Here we know that this box is not supported in file format 224.
# Therefore we need to convert it to ERT. We can't simply convert
j = j + 2
ert = '\\let\\endminipage\\endlyxtolyxminipage'
j = insert_ert(file.body, j, 'Collapsed', ert, file.format - 1, file.default_layout)
- j = j + 1
+ j = j + 1
file.body.insert(j, '')
- j = j + 1
+ j = j + 1
# LyX writes '%\n' after each box. Therefore we need to end our
# ERT with '%\n', too, since this may swallow a following space.
# We don't need to restore the original minipage after the inset
# end because the scope of the redefinition is the original box.
- else:
+ else:
- # Convert to minipage
- file.body[i:i] = ['\\begin_inset Minipage',
- 'position %d' % params['position'],
- 'inner_position %d' % params['inner_pos'],
- 'height "' + params['height'] + '"',
- 'width "' + params['width'] + '"',
- 'collapsed ' + params['collapsed']]
- i = i + 6
+ # Convert to minipage
+ file.body[i:i] = ['\\begin_inset Minipage',
+ 'position %d' % params['position'],
+ 'inner_position %d' % params['inner_pos'],
+ 'height "' + params['height'] + '"',
+ 'width "' + params['width'] + '"',
+ 'collapsed ' + params['collapsed']]
+ i = i + 6
def remove_branches(file):
if i == -1:
return
- j = find_token_exact(file.body, "filename", i)
+ j = find_token_exact(file.body, "filename", i)
if j == -1:
return
i = i + 1
- filename = split(file.body[j])[1]
- absname = os.path.normpath(os.path.join(file.dir, filename))
- if file.input == stdin and not os.path.isabs(filename):
- # We don't know the directory and cannot check the file.
- # We could use a heuristic and take the current directory,
- # and we could try to find out if filename has an extension,
- # but that would be just guesses and could be wrong.
- file.warning("""Warning: Can not determine whether file
+ filename = split(file.body[j])[1]
+ absname = os.path.normpath(os.path.join(file.dir, filename))
+ if file.input == stdin and not os.path.isabs(filename):
+ # We don't know the directory and cannot check the file.
+ # We could use a heuristic and take the current directory,
+ # and we could try to find out if filename has an extension,
+ # but that would be just guesses and could be wrong.
+ file.warning("""Warning: Can not determine whether file
%s
needs an extension when reading from standard input.
You may need to correct the file manually or run
lyx2lyx again with the .lyx file as commandline argument.""" % filename)
- continue
- # This needs to be the same algorithm as in pre 233 insetgraphics
- if access(absname, F_OK):
- continue
- if access(absname + ".ps", F_OK):
- file.body[j] = replace(file.body[j], filename, filename + ".ps")
- continue
- if access(absname + ".eps", F_OK):
- file.body[j] = replace(file.body[j], filename, filename + ".eps")
+ continue
+ # This needs to be the same algorithm as in pre 233 insetgraphics
+ if access(absname, F_OK):
+ continue
+ if access(absname + ".ps", F_OK):
+ file.body[j] = replace(file.body[j], filename, filename + ".ps")
+ continue
+ if access(absname + ".eps", F_OK):
+ file.body[j] = replace(file.body[j], filename, filename + ".eps")
##
[241, [convert_ert_paragraphs]],
[242, [convert_french]],
[243, [remove_paperpackage]],
- [244, [rename_spaces]],
- [245, [remove_quotestimes, convert_sgml_paragraphs]]]
+ [244, [rename_spaces]],
+ [245, [remove_quotestimes, convert_sgml_paragraphs]]]
revert = [[244, []],
- [243, [revert_space_names]],
- [242, []],
+ [243, [revert_space_names]],
+ [242, []],
[241, []],
[240, [revert_ert_paragraphs]],
[239, [revert_output_changes]],
def check_token(line, token):
if line[:len(token)] == token:
- return 1
+ return 1
return 0
# we can ignore this
def find_token(lines, token, start, end = 0):
if end == 0:
- end = len(lines)
+ end = len(lines)
m = len(token)
for i in xrange(start, end):
- if lines[i][:m] == token:
- return i
+ if lines[i][:m] == token:
+ return i
return -1
def find_token_exact(lines, token, start, end = 0):
if end == 0:
- end = len(lines)
+ end = len(lines)
for i in xrange(start, end):
x = string.split(lines[i])
y = string.split(token)
def find_tokens(lines, tokens, start, end = 0):
if end == 0:
- end = len(lines)
+ end = len(lines)
for i in xrange(start, end):
- for token in tokens:
- if lines[i][:len(token)] == token:
- return i
+ for token in tokens:
+ if lines[i][:len(token)] == token:
+ return i
return -1
def find_tokens_exact(lines, tokens, start, end = 0):
if end == 0:
- end = len(lines)
+ end = len(lines)
for i in xrange(start, end):
for token in tokens:
x = string.split(lines[i])
def find_re(lines, rexp, start, end = 0):
if end == 0:
- end = len(lines)
+ end = len(lines)
for i in xrange(start, end):
- if rexp.match(lines[i]):
- return i
+ if rexp.match(lines[i]):
+ return i
return -1
def find_token_backwards(lines, token, start):
m = len(token)
for i in xrange(start, -1, -1):
- line = lines[i]
- if line[:m] == token:
- return i
+ line = lines[i]
+ if line[:m] == token:
+ return i
return -1
def find_tokens_backwards(lines, tokens, start):
for i in xrange(start, -1, -1):
- line = lines[i]
- for token in tokens:
- if line[:len(token)] == token:
- return i
+ line = lines[i]
+ for token in tokens:
+ if line[:len(token)] == token:
+ return i
return -1
def get_value(lines, token, start, end = 0):
i = find_token_exact(lines, token, start, end)
if i == -1:
- return ""
+ return ""
if len(string.split(lines[i])) > 1:
return string.split(lines[i])[1]
else:
def del_token(lines, token, i, j):
k = find_token_exact(lines, token, i, j)
if k == -1:
- return j
+ return j
else:
- del lines[k]
- return j-1
+ del lines[k]
+ return j-1
# Finds the paragraph that contains line i.
else:
begin_layout = "\\begin_layout"
while i != -1:
- i = find_tokens_backwards(lines, ["\\end_inset", begin_layout], i)
+ i = find_tokens_backwards(lines, ["\\end_inset", begin_layout], i)
if i == -1: return -1
- if check_token(lines[i], begin_layout):
- return i
- i = find_beginning_of_inset(lines, i)
+ if check_token(lines[i], begin_layout):
+ return i
+ i = find_beginning_of_inset(lines, i)
return -1
else:
tokens = ["\\begin_inset", "\\begin_layout", "\\end_float", "\\end_body", "\\end_document"]
while i != -1:
- i = find_tokens(lines, tokens, i)
- if not check_token(lines[i], "\\begin_inset"):
- return i
- i = find_end_of_inset(lines, i)
+ i = find_tokens(lines, tokens, i)
+ if not check_token(lines[i], "\\begin_inset"):
+ return i
+ i = find_end_of_inset(lines, i)
return -1
count = 1
n = len(lines)
while i < n:
- i = find_tokens(lines, [end_token, start_token], i+1)
- if check_token(lines[i], start_token):
- count = count+1
- else:
- count = count-1
- if count == 0:
- return i
+ i = find_tokens(lines, [end_token, start_token], i+1)
+ if check_token(lines[i], start_token):
+ count = count+1
+ else:
+ count = count-1
+ if count == 0:
+ return i
return -1
def find_beginning_of(lines, i, start_token, end_token):
count = 1
while i > 0:
- i = find_tokens_backwards(lines, [start_token, end_token], i-1)
- if check_token(lines[i], end_token):
- count = count+1
- else:
- count = count-1
- if count == 0:
- return i
+ i = find_tokens_backwards(lines, [start_token, end_token], i-1)
+ if check_token(lines[i], end_token):
+ count = count+1
+ else:
+ count = count-1
+ if count == 0:
+ return i
return -1
i = i+1
j = find_end_of_tabular(lines, i)
if j == -1:
- return []
+ return []
while i <= j:
- if check_token(lines[i], "\\begin_inset"):
- i = find_end_of_inset(lines, i)+1
- else:
- result.append(i)
- i = i+1
+ if check_token(lines[i], "\\begin_inset"):
+ i = find_end_of_inset(lines, i)+1
+ else:
+ result.append(i)
+ i = i+1
return result
def find_nonempty_line(lines, start, end = 0):
if end == 0:
- end = len(lines)
+ end = len(lines)
for i in xrange(start, end):
- if is_nonempty_line(lines[i]):
- return i
+ if is_nonempty_line(lines[i]):
+ return i
return -1