# This file is part of lyx2lyx
-# -*- coding: iso-8859-1 -*-
-# Copyright (C) 2002-2004 Dekel Tsur <dekel@lyx.org>, José Matos <jamatos@lyx.org>
+# -*- coding: utf-8 -*-
+# Copyright (C) 2002-2011 Dekel Tsur <dekel@lyx.org>,
+# José Matos <jamatos@lyx.org>, Richard Heck <rgheck@comcast.net>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+'''
+This module offers several free functions to help parse lines.
+More documentaton is below, but here is a quick guide to what
+they do. Optional arguments are marked by brackets.
+
+find_token(lines, token, start[, end[, ignorews]]):
+ Returns the first line i, start <= i < end, on which
+ token is found at the beginning. Returns -1 if not
+ found.
+ If ignorews is (given and) True, then differences
+ in whitespace do not count, except that there must be no
+ extra whitespace following token itself.
+
+find_token_exact(lines, token, start[, end]):
+ As find_token, but with ignorews set to True.
+
+find_tokens(lines, tokens, start[, end[, ignorews]]):
+ Returns the first line i, start <= i < end, on which
+ one of the tokens in tokens is found at the beginning.
+ Returns -1 if not found.
+ If ignorews is (given and) True, then differences
+ in whitespace do not count, except that there must be no
+ extra whitespace following token itself.
+
+find_tokens_exact(lines, token, start[, end]):
+ As find_tokens, but with ignorews True.
+
+find_token_backwards(lines, token, start):
+find_tokens_backwards(lines, tokens, start):
+ As before, but look backwards.
+
+find_re(lines, rexp, start[, end]):
+ As find_token, but rexp is a regular expression object,
+ so it has to be passed as e.g.: re.compile(r'...').
+
+get_value(lines, token, start[, end[, default]):
+ Similar to find_token, but it returns what follows the
+ token on the found line. Example:
+ get_value(document.header, "\\use_xetex", 0)
+ will find a line like:
+ \\use_xetex true
+ and, in that case, return "true". (Note that whitespace
+ is stripped.) The final argument, default, defaults to "",
+ and is what is returned if we do not find anything. So you
+ can use that to set a default.
+
+get_quoted_value(lines, token, start[, end[, default]):
+ Similar to get_value, but it will strip quotes off the
+ value, if they are present. So use this one for cases
+ where the value is normally quoted.
+
+get_option_value(line, option):
+ This assumes we have a line with something like:
+ option="value"
+ and returns value. Returns "" if not found.
+
+del_token(lines, token, start[, end]):
+ Like find_token, but deletes the line if it finds one.
+ Returns True if a line got deleted, otherwise False.
+
+find_beginning_of(lines, i, start_token, end_token):
+ Here, start_token and end_token are meant to be a matching
+ pair, like "\\begin_layout" and "\\end_layout". We look for
+ the start_token that pairs with the end_token that occurs
+ on or after line i. Returns -1 if not found.
+ So, in the layout case, this would find the \\begin_layout
+ for the layout line i is in.
+ Example:
+ ec = find_token(document.body, "</cell", i)
+ bc = find_beginning_of(document.body, ec, \
+ "<cell", "</cell")
+ Now, assuming no -1s, bc-ec wraps the cell for line i.
+
+find_end_of(lines, i, start_token, end_token):
+ Like find_beginning_of, but looking for the matching
+ end_token. This might look like:
+ bc = find_token_(document.body, "<cell", i)
+ ec = find_end_of(document.body, bc, "<cell", "</cell")
+ Now, assuming no -1s, bc-ec wrap the next cell.
+
+find_end_of_inset(lines, i):
+ Specialization of find_end_of for insets.
+
+find_end_of_layout(lines, i):
+ Specialization of find_end_of for layouts.
+
+find_end_of_sequence(lines, i):
+ Find the end of the sequence of layouts of the same kind.
+ Considers nesting. If the last paragraph in sequence is nested,
+ the position of the last \end_deeper is returned, else
+ the position of the last \end_layout.
+
+is_in_inset(lines, i, inset):
+ Checks if line i is in an inset of the given type.
+ If so, returns starting and ending lines. Otherwise,
+ returns False.
+ Example:
+ is_in_inset(document.body, i, "\\begin_inset Tabular")
+ returns False unless i is within a table. If it is, then
+ it returns the line on which the table begins and the one
+ on which it ends. Note that this pair will evaulate to
+ boolean True, so
+ if is_in_inset(...):
+ will do what you expect.
+
+get_containing_inset(lines, i):
+ Finds out what kind of inset line i is within. Returns a
+ list containing what follows \begin_inset on the line
+ on which the inset begins, plus the starting and ending line.
+ Returns False on any kind of error or if it isn't in an inset.
+ So get_containing_inset(document.body, i) might return:
+ ("CommandInset ref", 300, 306)
+ if i is within an InsetRef beginning on line 300 and ending
+ on line 306.
+
+get_containing_layout(lines, i):
+ As get_containing_inset, but for layout. Additionally returns the
+ position of real paragraph start (after par params) as 4th value.
+
+find_nonempty_line(lines, start[, end):
+ Finds the next non-empty line.
+
+check_token(line, token):
+ Does line begin with token?
+
+is_nonempty_line(line):
+ Does line contain something besides whitespace?
+
+count_pars_in_inset(lines, i):
+ Counts the paragraphs inside an inset.
+
+'''
-import string
import re
+# Utilities for one line
def check_token(line, token):
- if line[:len(token)] == token:
- return 1
- return 0
+ """ check_token(line, token) -> bool
-# We need to check that the char after the token is space, but I think
-# we can ignore this
-def find_token(lines, token, start, end = 0):
- if end == 0:
- end = len(lines)
+ Return True if token is present in line and is the first element
+ else returns False."""
+
+ return line[:len(token)] == token
+
+
+def is_nonempty_line(line):
+ """ is_nonempty_line(line) -> bool
+
+ Return False if line is either empty or it has only whitespaces,
+ else return True."""
+ return line != " "*len(line)
+
+
+# Utilities for a list of lines
+def find_token(lines, token, start, end = 0, ignorews = False):
+ """ find_token(lines, token, start[[, end], ignorews]) -> int
+
+ Return the lowest line where token is found, and is the first
+ element, in lines[start, end].
+
+ If ignorews is True (default is False), then differences in
+ whitespace are ignored, except that there must be no extra
+ whitespace following token itself.
+
+ Return -1 on failure."""
+
+ if end == 0 or end > len(lines):
+ end = len(lines)
m = len(token)
- for i in xrange(start, end):
- if lines[i][:m] == token:
- return i
+ for i in range(start, end):
+ if ignorews:
+ x = lines[i].split()
+ y = token.split()
+ if len(x) < len(y):
+ continue
+ if x[:len(y)] == y:
+ return i
+ else:
+ if lines[i][:m] == token:
+ return i
return -1
-def find_token2(lines, token, start, end = 0):
- if end == 0:
- end = len(lines)
- for i in xrange(start, end):
- x = string.split(lines[i])
- if len(x) > 0 and x[0] == token:
- return i
- return -1
-def find_tokens(lines, tokens, start, end = 0):
- if end == 0:
- end = len(lines)
- for i in xrange(start, end):
- line = lines[i]
- for token in tokens:
- if line[:len(token)] == token:
- return i
+def find_token_exact(lines, token, start, end = 0):
+ return find_token(lines, token, start, end, True)
+
+
+def find_tokens(lines, tokens, start, end = 0, ignorews = False):
+ """ find_tokens(lines, tokens, start[[, end], ignorews]) -> int
+
+ Return the lowest line where one token in tokens is found, and is
+ the first element, in lines[start, end].
+
+ Return -1 on failure."""
+ if end == 0 or end > len(lines):
+ end = len(lines)
+
+ for i in range(start, end):
+ for token in tokens:
+ if ignorews:
+ x = lines[i].split()
+ y = token.split()
+ if len(x) < len(y):
+ continue
+ if x[:len(y)] == y:
+ return i
+ else:
+ if lines[i][:len(token)] == token:
+ return i
return -1
+
+def find_tokens_exact(lines, tokens, start, end = 0):
+ return find_tokens(lines, tokens, start, end, True)
+
+
def find_re(lines, rexp, start, end = 0):
- if end == 0:
- end = len(lines)
- for i in xrange(start, end):
- if rexp.match(lines[i]):
- return i
+ """ find_token_re(lines, rexp, start[, end]) -> int
+
+ Return the lowest line where rexp, a regular expression, is found
+ in lines[start, end].
+
+ Return -1 on failure."""
+
+ if end == 0 or end > len(lines):
+ end = len(lines)
+ for i in range(start, end):
+ if rexp.match(lines[i]):
+ return i
return -1
+
def find_token_backwards(lines, token, start):
+ """ find_token_backwards(lines, token, start) -> int
+
+ Return the highest line where token is found, and is the first
+ element, in lines[start, end].
+
+ Return -1 on failure."""
m = len(token)
- for i in xrange(start, -1, -1):
- line = lines[i]
- if line[:m] == token:
- return i
+ for i in range(start, -1, -1):
+ line = lines[i]
+ if line[:m] == token:
+ return i
return -1
+
def find_tokens_backwards(lines, tokens, start):
- for i in xrange(start, -1, -1):
- line = lines[i]
- for token in tokens:
- if line[:len(token)] == token:
- return i
+ """ find_tokens_backwards(lines, token, start) -> int
+
+ Return the highest line where token is found, and is the first
+ element, in lines[end, start].
+
+ Return -1 on failure."""
+ for i in range(start, -1, -1):
+ line = lines[i]
+ for token in tokens:
+ if line[:len(token)] == token:
+ return i
return -1
-def get_value(lines, token, start, end = 0):
- i = find_token2(lines, token, start, end)
+
+def get_value(lines, token, start, end = 0, default = ""):
+ """ get_value(lines, token, start[[, end], default]) -> string
+
+ Find the next line that looks like:
+ token followed by other stuff
+ Returns "followed by other stuff" with leading and trailing
+ whitespace removed.
+ """
+
+ i = find_token_exact(lines, token, start, end)
if i == -1:
- return ""
- if len(string.split(lines[i])) > 1:
- return string.split(lines[i])[1]
- else:
- return ""
-
-def del_token(lines, token, i, j):
- k = find_token2(lines, token, i, j)
+ return default
+ l = lines[i].split(None, 1)
+ if len(l) > 1:
+ return l[1].strip()
+ return default
+
+
+def get_quoted_value(lines, token, start, end = 0, default = ""):
+ """ get_quoted_value(lines, token, start[[, end], default]) -> string
+
+ Find the next line that looks like:
+ token "followed by other stuff"
+ Returns "followed by other stuff" with leading and trailing
+ whitespace and quotes removed. If there are no quotes, that is OK too.
+ So use get_value to preserve possible quotes, this one to remove them,
+ if they are there.
+ Note that we will NOT strip quotes from default!
+ """
+ val = get_value(lines, token, start, end, "")
+ if not val:
+ return default
+ return val.strip('"')
+
+
+def get_option_value(line, option):
+ rx = option + '\s*=\s*"([^"]+)"'
+ rx = re.compile(rx)
+ m = rx.search(line)
+ if not m:
+ return ""
+ return m.group(1)
+
+
+def set_option_value(line, option, value):
+ rx = '(' + option + '\s*=\s*")[^"]+"'
+ rx = re.compile(rx)
+ m = rx.search(line)
+ if not m:
+ return line
+ return re.sub(rx, '\g<1>' + value + '"', line)
+
+
+def del_token(lines, token, start, end = 0):
+ """ del_token(lines, token, start, end) -> int
+
+ Find the first line in lines where token is the first element
+ and delete that line. Returns True if we deleted a line, False
+ if we did not."""
+
+ k = find_token_exact(lines, token, start, end)
if k == -1:
- return j
- else:
- del lines[k]
- return j-1
-
-# Finds the paragraph that contains line i.
-def get_paragraph(lines, i):
- while i != -1:
- i = find_tokens_backwards(lines, ["\\end_inset", "\\layout"], i)
- if i == -1: return -1
- if check_token(lines[i], "\\layout"):
- return i
- i = find_beginning_of_inset(lines, i)
- return -1
+ return False
+ del lines[k]
+ return True
-# Finds the paragraph after the paragraph that contains line i.
-def get_next_paragraph(lines, i):
- while i != -1:
- i = find_tokens(lines, ["\\begin_inset", "\\layout", "\\end_float", "\\the_end"], i)
- if not check_token(lines[i], "\\begin_inset"):
- return i
- i = find_end_of_inset(lines, i)
- return -1
-
-def find_end_of(lines, i, start_token, end_token):
- count = 1
- n = len(lines)
- while i < n:
- i = find_tokens(lines, [end_token, start_token], i+1)
- if check_token(lines[i], start_token):
- count = count+1
- else:
- count = count-1
- if count == 0:
- return i
- return -1
-# Finds the matching \end_inset
def find_beginning_of(lines, i, start_token, end_token):
count = 1
while i > 0:
- i = find_tokens_backwards(lines, [start_token, end_token], i-1)
- if check_token(lines[i], end_token):
- count = count+1
- else:
- count = count-1
- if count == 0:
- return i
+ i = find_tokens_backwards(lines, [start_token, end_token], i-1)
+ if i == -1:
+ return -1
+ if check_token(lines[i], end_token):
+ count = count+1
+ else:
+ count = count-1
+ if count == 0:
+ return i
return -1
-# Finds the matching \end_inset
-def find_end_of_inset(lines, i):
- return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
-# Finds the matching \end_inset
-def find_beginning_of_inset(lines, i):
- return find_beginning_of(lines, i, "\\begin_inset", "\\end_inset")
-
-def find_end_of_tabular(lines, i):
- return find_end_of(lines, i, "<lyxtabular", "</lyxtabular")
-
-def get_tabular_lines(lines, i):
- result = []
- i = i+1
- j = find_end_of_tabular(lines, i)
- if j == -1:
- return []
-
- while i <= j:
- if check_token(lines[i], "\\begin_inset"):
- i = find_end_of_inset(lines, i)+1
- else:
- result.append(i)
- i = i+1
- return result
+def find_end_of(lines, i, start_token, end_token):
+ count = 1
+ n = len(lines)
+ while i < n:
+ i = find_tokens(lines, [end_token, start_token], i+1)
+ if i == -1:
+ return -1
+ if check_token(lines[i], start_token):
+ count = count+1
+ else:
+ count = count-1
+ if count == 0:
+ return i
+ return -1
-def is_nonempty_line(line):
- return line != " "*len(line)
def find_nonempty_line(lines, start, end = 0):
if end == 0:
- end = len(lines)
- for i in xrange(start, end):
- if is_nonempty_line(lines[i]):
- return i
+ end = len(lines)
+ for i in range(start, end):
+ if is_nonempty_line(lines[i]):
+ return i
return -1
-##
-# Tools for file reading
-#
-def read_file(header, body, opt):
- """Reads a file into the header and body parts"""
- preamble = 0
-
- while 1:
- line = opt.input.readline()
- if not line:
- opt.error("Invalid LyX file.")
-
- line = line[:-1]
- if check_token(line, '\\begin_preamble'):
- preamble = 1
- if check_token(line, '\\end_preamble'):
- preamble = 0
-
- if not preamble:
- line = string.strip(line)
-
- if not line and not preamble:
- break
-
- header.append(line)
-
- while 1:
- line = opt.input.readline()
- if not line:
- break
- body.append(line[:-1])
-
-def write_file(header, body, opt):
- for line in header:
- opt.output.write(line+"\n")
- opt.output.write("\n")
- for line in body:
- opt.output.write(line+"\n")
-
-##
-# lyx version
-#
-original_version = re.compile(r"\#LyX (\S*)")
-def read_version(header):
- for line in header:
- if line[0] != "#":
- return None
-
- result = original_version.match(line)
- if result:
- return result.group(1)
- return None
-
-def set_version(lines, version):
- lines[0] = "#LyX %s created this file. For more info see http://www.lyx.org/" % version
- if lines[1][0] == '#':
- del lines[1]
+def find_end_of_inset(lines, i):
+ " Find end of inset, where lines[i] is included."
+ return find_end_of(lines, i, "\\begin_inset", "\\end_inset")
-##
-# file format version
-#
-format_re = re.compile(r"(\d)[\.,]?(\d\d)")
-fileformat = re.compile(r"\\lyxformat\s*(\S*)")
-lst_ft = [210, 215, 216, 217, 218, 220, 221, 223, 224, 225, 226, 227, 228, 229,
- 230, 231, 232]
-
-format_relation = [("0_10", [210], ["0.10.7","0.10"]),
- ("0_12", [215], ["0.12","0.12.1","0.12"]),
- ("1_0_0", [215], ["1.0.0","1.0"]),
- ("1_0_1", [215], ["1.0.1","1.0.2","1.0.3","1.0.4", "1.1.2","1.1"]),
- ("1_1_4", [215], ["1.1.4","1.1"]),
- ("1_1_5", [216], ["1.1.5","1.1.5fix1","1.1.5fix2","1.1"]),
- ("1_1_6", [217], ["1.1.6","1.1.6fix1","1.1.6fix2","1.1"]),
- ("1_1_6fix3", [218], ["1.1.6fix3","1.1.6fix4","1.1"]),
- ("1_2", [220], ["1.2.0","1.2.1","1.2.3","1.2.4","1.2"]),
- ("1_3", [221], ["1.3.0","1.3.1","1.3.2","1.3.3","1.3.4","1.3"]),
- ("1_4", [223,224,225,226,227,228,229,230,231,232], ["1.4.0cvs","1.4"])]
-
-def lyxformat(format, opt):
- result = format_re.match(format)
- if result:
- format = int(result.group(1) + result.group(2))
- else:
- opt.error(str(format) + ": " + "Invalid LyX file.")
-
- if format in lst_ft:
- return format
-
- opt.error(str(format) + ": " + "Format no supported.")
- return None
-
-def read_format(header, opt):
- for line in header:
- result = fileformat.match(line)
- if result:
- return lyxformat(result.group(1), opt)
- else:
- opt.error("Invalid LyX File.")
- return None
-
-def set_format(lines, number):
- if int(number) <= 217:
- number = float(number)/100
- i = find_token(lines, "\\lyxformat", 0)
- lines[i] = "\\lyxformat %s" % number
-
-def get_end_format():
- return format_relation[-1:][0][1][-1:][0]
-
-def chain(opt, initial_version):
- """ This is where all the decisions related with the convertion are taken"""
-
- format = opt.format
- if opt.start:
- if opt.start != format:
- opt.warning("%s: %s %s" % ("Proposed file format and input file formats do not match:", opt.start, format))
- else:
- opt.start = format
-
- if not opt.end:
- opt.end = get_end_format()
-
- correct_version = 0
-
- for rel in format_relation:
- if initial_version in rel[2]:
- if format in rel[1]:
- initial_step = rel[0]
- correct_version = 1
- break
-
- if not correct_version:
- if format <= 215:
- opt.warning("Version does not match file format, discarding it.")
- for rel in format_relation:
- if format in rel[1]:
- initial_step = rel[0]
- break
- else:
- # This should not happen, really.
- opt.error("Format not supported.")
-
- # Find the final step
- for rel in format_relation:
- if opt.end in rel[1]:
- final_step = rel[0]
- break
- else:
- opt.error("Format not supported.")
-
- # Convertion mode, back or forth
- steps = []
- if (initial_step, opt.start) < (final_step, opt.end):
- mode = "convert"
- first_step = 1
- for step in format_relation:
- if initial_step <= step[0] <= final_step:
- if first_step and len(step[1]) == 1:
- first_step = 0
- continue
- steps.append(step[0])
- else:
- mode = "revert"
- for step in format_relation:
- if final_step <= step[0] <= initial_step:
- steps.insert(0, step[0])
- if step[1][-1:] == opt.end:
- del steps[0]
+def find_end_of_layout(lines, i):
+ " Find end of layout, where lines[i] is included."
+ return find_end_of(lines, i, "\\begin_layout", "\\end_layout")
+
+
+def is_in_inset(lines, i, inset):
+ '''
+ Checks if line i is in an inset of the given type.
+ If so, returns starting and ending lines.
+ Otherwise, returns False.
+ Example:
+ is_in_inset(document.body, i, "\\begin_inset Tabular")
+ returns False unless i is within a table. If it is, then
+ it returns the line on which the table begins and the one
+ on which it ends. Note that this pair will evaulate to
+ boolean True, so
+ if is_in_inset(...):
+ will do what you expect.
+ '''
+ defval = (-1, -1)
+ stins = find_token_backwards(lines, inset, i)
+ if stins == -1:
+ return defval
+ endins = find_end_of_inset(lines, stins)
+ # note that this includes the notfound case.
+ if endins < i:
+ return defval
+ return (stins, endins)
+
+
+def get_containing_inset(lines, i):
+ '''
+ Finds out what kind of inset line i is within. Returns a
+ list containing (i) what follows \begin_inset on the line
+ on which the inset begins, plus the starting and ending line.
+ Returns False on any kind of error or if it isn't in an inset.
+ '''
+ j = i
+ while True:
+ stins = find_token_backwards(lines, "\\begin_inset", j)
+ if stins == -1:
+ return False
+ endins = find_end_of_inset(lines, stins)
+ if endins > j:
+ break
+ j = stins - 1
+
+ if endins < i:
+ return False
+
+ inset = get_value(lines, "\\begin_inset", stins)
+ if inset == "":
+ # shouldn't happen
+ return False
+ return (inset, stins, endins)
+
+
+def get_containing_layout(lines, i):
+ '''
+ Finds out what kind of layout line i is within. Returns a
+ list containing what follows \begin_layout on the line
+ on which the layout begins, plus the starting and ending line
+ and the start of the paragraph (after all params). I.e, returns:
+ (layoutname, layoutstart, layoutend, startofcontent)
+ Returns False on any kind of error.
+ '''
+ j = i
+ while True:
+ stlay = find_token_backwards(lines, "\\begin_layout", j)
+ if stlay == -1:
+ return False
+ endlay = find_end_of_layout(lines, stlay)
+ if endlay > i:
+ break
+ j = stlay - 1
+
+ if endlay < i:
+ return False
+
+ lay = get_value(lines, "\\begin_layout", stlay)
+ if lay == "":
+ # shouldn't happen
+ return False
+ par_params = ["\\noindent", "\\indent", "\\indent-toggle", "\\leftindent",
+ "\\start_of_appendix", "\\paragraph_spacing", "\\align",
+ "\\labelwidthstring"]
+ stpar = stlay
+ while True:
+ stpar += 1
+ if lines[stpar].split(' ', 1)[0] not in par_params:
+ break
+ return (lay, stlay, endlay, stpar)
+
+
+def count_pars_in_inset(lines, i):
+ '''
+ Counts the paragraphs within this inset
+ '''
+ ins = get_containing_inset(lines, i)
+ if ins == -1:
+ return -1
+ pars = 0
+ for j in range(ins[1], ins[2]):
+ m = re.match(r'\\begin_layout (.*)', lines[j])
+ if m and get_containing_inset(lines, j)[0] == ins[0]:
+ pars += 1
+
+ return pars
+
+
+def find_end_of_sequence(lines, i):
+ '''
+ Returns the end of a sequence of identical layouts.
+ '''
+ lay = get_containing_layout(lines, i)
+ if lay == False:
+ return -1
+ layout = lay[0]
+ endlay = lay[2]
+ i = endlay
+ while True:
+ m = re.match(r'\\begin_layout (.*)', lines[i])
+ if m and m.group(1) != layout:
+ return endlay
+ elif lines[i] == "\\begin_deeper":
+ j = find_end_of(lines, i, "\\begin_deeper", "\\end_deeper")
+ if j != -1:
+ i = j
+ endlay = j
+ continue
+ if m and m.group(1) == layout:
+ endlay = find_end_of_layout(lines, i)
+ i = endlay
+ continue
+ if i == len(lines) - 1:
+ break
+ i = i + 1
+
+ return endlay
- return mode, steps