X-Git-Url: https://git.lyx.org/gitweb/?a=blobdiff_plain;f=lib%2Flyx2lyx%2Fparser_tools.py;h=3b1322974e6878e058be8533b7ca8c73c60cca63;hb=9da74fe2078e24e1e7891784ecbfe33ff77e7f85;hp=4c698903b5cd2388b6c35d1267cad6de115d9a51;hpb=84242142094f0247be9cdc32bbbc534073ac2922;p=lyx.git diff --git a/lib/lyx2lyx/parser_tools.py b/lib/lyx2lyx/parser_tools.py index 4c698903b5..3b1322974e 100644 --- a/lib/lyx2lyx/parser_tools.py +++ b/lib/lyx2lyx/parser_tools.py @@ -1,6 +1,7 @@ # This file is part of lyx2lyx -# -*- coding: iso-8859-1 -*- -# Copyright (C) 2002-2004 Dekel Tsur , José Matos +# -*- coding: utf-8 -*- +# Copyright (C) 2002-2011 Dekel Tsur , +# José Matos , Richard Heck # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License @@ -14,115 +15,351 @@ # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + + +''' +This modules offer several free functions to help parse lines. +More documentaton is below, but here is a quick guide to what +they do. Optional arguments are marked by brackets. + +find_token(lines, token, start[, end[, ignorews]]): + Returns the first line i, start <= i < end, on which + token is found at the beginning. Returns -1 if not + found. + If ignorews is (given and) True, then differences + in whitespace do not count, except that there must be no + extra whitespace following token itself. + +find_token_exact(lines, token, start[, end]): + As find_token, but with ignorews True. + +find_tokens(lines, tokens, start[, end[, ignorews]]): + Returns the first line i, start <= i < end, on which + oen of the tokens in tokens is found at the beginning. + Returns -1 if not found. + If ignorews is (given and) True, then differences + in whitespace do not count, except that there must be no + extra whitespace following token itself. + +find_tokens_exact(lines, token, start[, end]): + As find_tokens, but with ignorews True. + +find_token_backwards(lines, token, start): +find_tokens_backwards(lines, tokens, start): + As before, but look backwards. + +find_re(lines, rexp, start[, end]): + As find_token, but rexp is a regular expression object, + so it has to be passed as e.g.: re.compile(r'...'). + +get_value(lines, token, start[, end[, default]): + Similar to find_token, but it returns what follows the + token on the found line. Example: + get_value(document.header, "\use_xetex", 0) + will find a line like: + \use_xetex true + and, in that case, return "true". (Note that whitespace + is stripped.) The final argument, default, defaults to "", + and is what is returned if we do not find anything. So you + can use that to set a default. + +get_quoted_value(lines, token, start[, end[, default]): + Similar to get_value, but it will strip quotes off the + value, if they are present. So use this one for cases + where the value is normally quoted. + +get_option_value(line, option): + This assumes we have a line with something like: + option="value" + and returns value. Returns "" if not found. + +del_token(lines, token, start[, end]): + Like find_token, but deletes the line if it finds one. + Returns True if a line got deleted, otherwise False. + +find_beginning_of(lines, i, start_token, end_token): + Here, start_token and end_token are meant to be a matching + pair, like "\begin_layout" and "\end_layout". We look for + the start_token that pairs with the end_token that occurs + on or after line i. Returns -1 if not found. + So, in the layout case, this would find the \begin_layout + for the layout line i is in. + Example: + ec = find_token(document.body, " bool + Return True if token is present in line and is the first element + else returns False.""" -# We need to check that the char after the token is space, but I think -# we can ignore this -def find_token(lines, token, start, end = 0): - if end == 0: - end = len(lines) + return line[:len(token)] == token + + +def is_nonempty_line(line): + """ is_nonempty_line(line) -> bool + + Return False if line is either empty or it has only whitespaces, + else return True.""" + return line != " "*len(line) + + +# Utilities for a list of lines +def find_token(lines, token, start, end = 0, ignorews = False): + """ find_token(lines, token, start[[, end], ignorews]) -> int + + Return the lowest line where token is found, and is the first + element, in lines[start, end]. + + If ignorews is True (default is False), then differences in + whitespace are ignored, except that there must be no extra + whitespace following token itself. + + Return -1 on failure.""" + + if end == 0 or end > len(lines): + end = len(lines) m = len(token) for i in xrange(start, end): - if lines[i][:m] == token: - return i + if ignorews: + x = lines[i].split() + y = token.split() + if len(x) < len(y): + continue + if x[:len(y)] == y: + return i + else: + if lines[i][:m] == token: + return i return -1 -def find_token2(lines, token, start, end = 0): - if end == 0: - end = len(lines) - for i in xrange(start, end): - x = string.split(lines[i]) - if len(x) > 0 and x[0] == token: - return i - return -1 +def find_token_exact(lines, token, start, end = 0): + return find_token(lines, token, start, end, True) -def find_tokens(lines, tokens, start, end = 0): - if end == 0: - end = len(lines) +def find_tokens(lines, tokens, start, end = 0, ignorews = False): + """ find_tokens(lines, tokens, start[[, end], ignorews]) -> int + + Return the lowest line where one token in tokens is found, and is + the first element, in lines[start, end]. + + Return -1 on failure.""" + if end == 0 or end > len(lines): + end = len(lines) + for i in xrange(start, end): - line = lines[i] - for token in tokens: - if line[:len(token)] == token: - return i + for token in tokens: + if ignorews: + x = lines[i].split() + y = token.split() + if len(x) < len(y): + continue + if x[:len(y)] == y: + return i + else: + if lines[i][:len(token)] == token: + return i return -1 +def find_tokens_exact(lines, tokens, start, end = 0): + return find_tokens(lines, tokens, start, end, True) + + def find_re(lines, rexp, start, end = 0): - if end == 0: - end = len(lines) + """ find_token_re(lines, rexp, start[, end]) -> int + + Return the lowest line where rexp, a regular expression, is found + in lines[start, end]. + + Return -1 on failure.""" + + if end == 0 or end > len(lines): + end = len(lines) for i in xrange(start, end): - if rexp.match(lines[i]): - return i + if rexp.match(lines[i]): + return i return -1 def find_token_backwards(lines, token, start): + """ find_token_backwards(lines, token, start) -> int + + Return the highest line where token is found, and is the first + element, in lines[start, end]. + + Return -1 on failure.""" m = len(token) for i in xrange(start, -1, -1): - line = lines[i] - if line[:m] == token: - return i + line = lines[i] + if line[:m] == token: + return i return -1 def find_tokens_backwards(lines, tokens, start): + """ find_tokens_backwards(lines, token, start) -> int + + Return the highest line where token is found, and is the first + element, in lines[end, start]. + + Return -1 on failure.""" for i in xrange(start, -1, -1): - line = lines[i] - for token in tokens: - if line[:len(token)] == token: - return i + line = lines[i] + for token in tokens: + if line[:len(token)] == token: + return i return -1 -def get_value(lines, token, start, end = 0): - i = find_token2(lines, token, start, end) - if i == -1: - return "" - if len(string.split(lines[i])) > 1: - return string.split(lines[i])[1] - else: - return "" +def get_value(lines, token, start, end = 0, default = ""): + """ get_value(lines, token, start[[, end], default]) -> string + Find the next line that looks like: + token followed by other stuff + Returns "followed by other stuff" with leading and trailing + whitespace removed. + """ -def del_token(lines, token, i, j): - k = find_token2(lines, token, i, j) + i = find_token_exact(lines, token, start, end) + if i == -1: + return default + l = lines[i].split(None, 1) + if len(l) > 1: + return l[1].strip() + return default + + +def get_quoted_value(lines, token, start, end = 0, default = ""): + """ get_quoted_value(lines, token, start[[, end], default]) -> string + + Find the next line that looks like: + token "followed by other stuff" + Returns "followed by other stuff" with leading and trailing + whitespace and quotes removed. If there are no quotes, that is OK too. + So use get_value to preserve possible quotes, this one to remove them, + if they are there. + Note that we will NOT strip quotes from default! + """ + val = get_value(lines, token, start, end, "") + if not val: + return default + return val.strip('"') + + +def get_option_value(line, option): + rx = option + '\s*=\s*"([^"]+)"' + rx = re.compile(rx) + m = rx.search(line) + if not m: + return "" + return m.group(1) + + +def set_option_value(line, option, value): + rx = '(' + option + '\s*=\s*")[^"]+"' + rx = re.compile(rx) + m = rx.search(line) + if not m: + return line + return re.sub(rx, '\g<1>' + value + '"', line) + + +def del_token(lines, token, start, end = 0): + """ del_token(lines, token, start, end) -> int + + Find the first line in lines where token is the first element + and delete that line. Returns True if we deleted a line, False + if we did not.""" + + k = find_token_exact(lines, token, start, end) if k == -1: - return j - else: - del lines[k] - return j-1 - - -# Finds the paragraph that contains line i. -def get_paragraph(lines, i): - while i != -1: - i = find_tokens_backwards(lines, ["\\end_inset", "\\layout"], i) - if i == -1: return -1 - if check_token(lines[i], "\\layout"): - return i - i = find_beginning_of_inset(lines, i) - return -1 + return False + del lines[k] + return True -# Finds the paragraph after the paragraph that contains line i. -def get_next_paragraph(lines, i): - while i != -1: - i = find_tokens(lines, ["\\begin_inset", "\\layout", "\\end_float", "\\the_end"], i) - if not check_token(lines[i], "\\begin_inset"): - return i - i = find_end_of_inset(lines, i) +def find_beginning_of(lines, i, start_token, end_token): + count = 1 + while i > 0: + i = find_tokens_backwards(lines, [start_token, end_token], i-1) + if i == -1: + return -1 + if check_token(lines[i], end_token): + count = count+1 + else: + count = count-1 + if count == 0: + return i return -1 @@ -130,263 +367,163 @@ def find_end_of(lines, i, start_token, end_token): count = 1 n = len(lines) while i < n: - i = find_tokens(lines, [end_token, start_token], i+1) - if check_token(lines[i], start_token): - count = count+1 - else: - count = count-1 - if count == 0: - return i - return -1 - - -# Finds the matching \end_inset -def find_beginning_of(lines, i, start_token, end_token): - count = 1 - while i > 0: - i = find_tokens_backwards(lines, [start_token, end_token], i-1) - if check_token(lines[i], end_token): - count = count+1 - else: - count = count-1 - if count == 0: - return i + i = find_tokens(lines, [end_token, start_token], i+1) + if i == -1: + return -1 + if check_token(lines[i], start_token): + count = count+1 + else: + count = count-1 + if count == 0: + return i return -1 -# Finds the matching \end_inset -def find_end_of_inset(lines, i): - return find_end_of(lines, i, "\\begin_inset", "\\end_inset") - - -# Finds the matching \end_inset -def find_beginning_of_inset(lines, i): - return find_beginning_of(lines, i, "\\begin_inset", "\\end_inset") - - -def find_end_of_tabular(lines, i): - return find_end_of(lines, i, " j: + break + j = stins - 1 + + inset = get_value(lines, "\\begin_inset", stins) + if inset == "": + # shouldn't happen + return False + return (inset, stins, endins) + + +def get_containing_layout(lines, i): + ''' + Finds out what kind of layout line i is within. Returns a + list containing (i) what follows \begin_layout on the line + on which the layout begins, plus the starting and ending line + and the start of the apargraph (after all params). + Returns False on any kind of error. + ''' + j = i + while True: + stlay = find_token_backwards(lines, "\\begin_layout", j) + if stlay == -1: + return False + endlay = find_end_of_layout(lines, stlay) + if endlay > i: + break + j = stlay - 1 + + lay = get_value(lines, "\\begin_layout", stlay) + if lay == "": + # shouldn't happen + return False + par_params = ["\\noindent", "\\indent", "\\indent-toggle", "\\leftindent", + "\\start_of_appendix", "\\paragraph_spacing single", + "\\paragraph_spacing onehalf", "\\paragraph_spacing double", + "\\paragraph_spacing other", "\\align", "\\labelwidthstring"] + stpar = stlay + while True: + stpar += 1 + if lines[stpar] not in par_params: + break + return (lay, stlay, endlay, stpar) + + +def count_pars_in_inset(lines, i): + ''' + Counts the paragraphs within this inset + ''' + ins = get_containing_inset(lines, i) + if ins == -1: + return -1 + pars = 0 + for j in range(ins[1], ins[2]): + m = re.match(r'\\begin_layout (.*)', lines[j]) + if m and get_containing_inset(lines, j)[0] == ins[0]: + pars += 1 + + return pars + + +def find_end_of_sequence(lines, i): + ''' + Returns the end of a sequence of identical layouts. + ''' + lay = get_containing_layout(lines, i) + if lay == False: + return -1 + layout = lay[0] + endlay = lay[2] + i = endlay + while True: + m = re.match(r'\\begin_layout (.*)', lines[i]) + if m and m.group(1) != layout: + return endlay + elif lines[i] == "\\begin_deeper": + j = find_end_of(lines, i, "\\begin_deeper", "\\end_deeper") + if j != -1: + i = j + endlay = j + continue + if m and m.group(1) == layout: + endlay = find_end_of_layout(lines, i) + i = endlay + continue + if i == len(lines) - 1: + break + i = i + 1 + + return endlay -## -# file format version -# -format_re = re.compile(r"(\d)[\.,]?(\d\d)") -fileformat = re.compile(r"\\lyxformat\s*(\S*)") -lst_ft = [210, 215, 216, 217, 218, 220, 221, 223, 224, 225, 226, 227, 228, 229, - 230, 231, 232, 233, 234] - -format_relation = [("0_10", [210], ["0.10.7","0.10"]), - ("0_12", [215], ["0.12","0.12.1","0.12"]), - ("1_0_0", [215], ["1.0.0","1.0"]), - ("1_0_1", [215], ["1.0.1","1.0.2","1.0.3","1.0.4", "1.1.2","1.1"]), - ("1_1_4", [215], ["1.1.4","1.1"]), - ("1_1_5", [216], ["1.1.5","1.1.5fix1","1.1.5fix2","1.1"]), - ("1_1_6", [217], ["1.1.6","1.1.6fix1","1.1.6fix2","1.1"]), - ("1_1_6fix3", [218], ["1.1.6fix3","1.1.6fix4","1.1"]), - ("1_2", [220], ["1.2.0","1.2.1","1.2.3","1.2.4","1.2"]), - ("1_3", [221], ["1.3.0","1.3.1","1.3.2","1.3.3","1.3.4","1.3"]), - ("1_4", [223,224,225,226,227,228,229,230,231,232,233,234], ["1.4.0cvs","1.4"])] - - -def lyxformat(format, opt): - result = format_re.match(format) - if result: - format = int(result.group(1) + result.group(2)) - else: - opt.error(str(format) + ": " + "Invalid LyX file.") - - if format in lst_ft: - return format - - opt.error(str(format) + ": " + "Format not supported.") - return None - - -def read_format(header, opt): - for line in header: - result = fileformat.match(line) - if result: - return lyxformat(result.group(1), opt) - else: - opt.error("Invalid LyX File.") - return None - - -def set_format(lines, number): - if int(number) <= 217: - number = float(number)/100 - i = find_token(lines, "\\lyxformat", 0) - lines[i] = "\\lyxformat %s" % number - - -def get_end_format(): - return format_relation[-1:][0][1][-1:][0] - - -def get_backend(textclass): - if textclass == "linuxdoc" or textclass == "manpage": - return "linuxdoc" - if textclass[:7] == "docbook": - return "docbook" - return "latex" - - -def chain(opt, initial_version): - """ This is where all the decisions related with the convertion are taken""" - - format = opt.format - if opt.start: - if opt.start != format: - opt.warning("%s: %s %s" % ("Proposed file format and input file formats do not match:", opt.start, format)) - else: - opt.start = format - - if not opt.end: - opt.end = get_end_format() - - correct_version = 0 - - for rel in format_relation: - if initial_version in rel[2]: - if format in rel[1]: - initial_step = rel[0] - correct_version = 1 - break - - if not correct_version: - if format <= 215: - opt.warning("Version does not match file format, discarding it.") - for rel in format_relation: - if format in rel[1]: - initial_step = rel[0] - break - else: - # This should not happen, really. - opt.error("Format not supported.") - - # Find the final step - for rel in format_relation: - if opt.end in rel[1]: - final_step = rel[0] - break - else: - opt.error("Format not supported.") - - # Convertion mode, back or forth - steps = [] - if (initial_step, opt.start) < (final_step, opt.end): - mode = "convert" - first_step = 1 - for step in format_relation: - if initial_step <= step[0] <= final_step: - if first_step and len(step[1]) == 1: - first_step = 0 - continue - steps.append(step[0]) - else: - mode = "revert" - relation_format = format_relation - relation_format.reverse() - last_step = None - - for step in relation_format: - if final_step <= step[0] <= initial_step: - steps.append(step[0]) - last_step = step - - if last_step[1][-1] == opt.end: - steps.pop() - - return mode, steps