X-Git-Url: https://git.lyx.org/gitweb/?a=blobdiff_plain;f=lib%2Flyx2lyx%2Fparser_tools.py;h=e32ac5dc4fca7d879a933e8f9c0d7550aa800716;hb=b33b352171813f99a8fbf8e8b6f6c445025d94f8;hp=9a01514ef8ce43955f9bd066ce55be450444ba5d;hpb=3f47eb1883907f99b5e62e9082c3bb56d4da201a;p=lyx.git diff --git a/lib/lyx2lyx/parser_tools.py b/lib/lyx2lyx/parser_tools.py index 9a01514ef8..e32ac5dc4f 100644 --- a/lib/lyx2lyx/parser_tools.py +++ b/lib/lyx2lyx/parser_tools.py @@ -1,6 +1,6 @@ # This file is part of lyx2lyx # -*- coding: utf-8 -*- -# Copyright (C) 2002-2010 Dekel Tsur , +# Copyright (C) 2002-2011 Dekel Tsur , # José Matos , Richard Heck # # This program is free software; you can redistribute it and/or @@ -15,7 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ''' @@ -23,23 +23,27 @@ This modules offer several free functions to help parse lines. More documentaton is below, but here is a quick guide to what they do. Optional arguments are marked by brackets. -find_token(lines, token, start[, end[, exact]]): +find_token(lines, token, start[, end[, ignorews]]): Returns the first line i, start <= i < end, on which token is found at the beginning. Returns -1 if not - found. If exact is (given and) True, then differences - in whitespace do not count. + found. + If ignorews is (given and) True, then differences + in whitespace do not count, except that there must be no + extra whitespace following token itself. find_token_exact(lines, token, start[, end]): - Badly named. As find_token, but with ignorews True. + As find_token, but with ignorews True. find_tokens(lines, tokens, start[, end[, ignorews]]): Returns the first line i, start <= i < end, on which oen of the tokens in tokens is found at the beginning. - Returns -1 if not found. If ignorews is (given and) True, - then differences in whitespace do not count. + Returns -1 if not found. + If ignorews is (given and) True, then differences + in whitespace do not count, except that there must be no + extra whitespace following token itself. find_tokens_exact(lines, token, start[, end]): - Badly named. As find_tokens, but with ignorews True. + As find_tokens, but with ignorews True. find_token_backwards(lines, token, start): find_tokens_backwards(lines, tokens, start): @@ -65,6 +69,11 @@ get_quoted_value(lines, token, start[, end[, default]): value, if they are present. So use this one for cases where the value is normally quoted. +get_option_value(line, option): + This assumes we have a line with something like: + option="value" + and returns value. Returns "" if not found. + del_token(lines, token, start[, end]): Like find_token, but deletes the line if it finds one. Returns True if a line got deleted, otherwise False. @@ -133,6 +142,8 @@ is_nonempty_line(line): ''' +import re + # Utilities for one line def check_token(line, token): """ check_token(line, token) -> bool @@ -152,14 +163,15 @@ def is_nonempty_line(line): # Utilities for a list of lines -def find_token(lines, token, start, end = 0, exact = False): - """ find_token(lines, token, start[[, end], exact]) -> int +def find_token(lines, token, start, end = 0, ignorews = False): + """ find_token(lines, token, start[[, end], ignorews]) -> int Return the lowest line where token is found, and is the first element, in lines[start, end]. - If exact is True (default is False), then differences in - whitespace are ignored. + If ignorews is True (default is False), then differences in + whitespace are ignored, except that there must be no extra + whitespace following token itself. Return -1 on failure.""" @@ -167,7 +179,7 @@ def find_token(lines, token, start, end = 0, exact = False): end = len(lines) m = len(token) for i in xrange(start, end): - if exact: + if ignorews: x = lines[i].split() y = token.split() if len(x) < len(y): @@ -184,8 +196,8 @@ def find_token_exact(lines, token, start, end = 0): return find_token(lines, token, start, end, True) -def find_tokens(lines, tokens, start, end = 0, exact = False): - """ find_tokens(lines, tokens, start[[, end], exact]) -> int +def find_tokens(lines, tokens, start, end = 0, ignorews = False): + """ find_tokens(lines, tokens, start[[, end], ignorews]) -> int Return the lowest line where one token in tokens is found, and is the first element, in lines[start, end]. @@ -196,7 +208,7 @@ def find_tokens(lines, tokens, start, end = 0, exact = False): for i in xrange(start, end): for token in tokens: - if exact: + if ignorews: x = lines[i].split() y = token.split() if len(x) < len(y): @@ -294,6 +306,24 @@ def get_quoted_value(lines, token, start, end = 0, default = ""): return val.strip('"') +def get_option_value(line, option): + rx = option + '\s*=\s*"([^"]+)"' + rx = re.compile(rx) + m = rx.search(line) + if not m: + return "" + return m.group(1) + + +def set_option_value(line, option, value): + rx = '(' + option + '\s*=\s*")[^"]+"' + rx = re.compile(rx) + m = rx.search(line) + if not m: + return line + return re.sub(rx, '\g<1>' + value + '"', line) + + def del_token(lines, token, start, end = 0): """ del_token(lines, token, start, end) -> int