+ while document.body[i] == "":
+ i = i + 1
+
+ if document.body[i][:11] != "\\end_layout" or document.body[i+2][:13] != "\\begin_deeper":
+ i = i + 1
+ continue
+
+ k = i
+ i = find_end_of( document.body, i+3, "\\begin_deeper","\\end_deeper")
+ if i == -1:
+ # something is really wrong, abort
+ document.warning("Missing \\end_deeper, after style Author.")
+ document.warning("Aborted attempt to parse FirstName and Surname.")
+ return
+ firstname, surname = "", ""
+
+ name = document.body[k:i]
+
+ j = find_token(name, "\\begin_layout FirstName", 0)
+ if j != -1:
+ j = j + 1
+ while(name[j] != "\\end_layout"):
+ firstname = firstname + name[j]
+ j = j + 1
+
+ j = find_token(name, "\\begin_layout Surname", 0)
+ if j != -1:
+ j = j + 1
+ while(name[j] != "\\end_layout"):
+ surname = surname + name[j]
+ j = j + 1
+
+ # delete name
+ del document.body[k+2:i+1]
+
+ document.body[k-1:k-1] = ["", "",
+ "\\begin_inset CharStyle Firstname",
+ "status inlined",
+ "",
+ '\\begin_layout %s' % document.default_layout,
+ "",
+ "%s" % firstname,
+ "\end_layout",
+ "",
+ "\end_inset",
+ "",
+ "",
+ "\\begin_inset CharStyle Surname",
+ "status inlined",
+ "",
+ '\\begin_layout %s' % document.default_layout,
+ "",
+ "%s" % surname,
+ "\\end_layout",
+ "",
+ "\\end_inset",
+ ""]
+
+
+def revert_names(document):
+ """ Revert in the docbook backend from firstname and surname char style
+ to styles.
+ """
+ if document.backend != "docbook":
+ return
+
+
+def convert_cite_engine(document):
+ r""" \use_natbib 1 \cite_engine <style>
+ \use_numerical_citations 0 -> where <style> is one of
+ \use_jurabib 0 "basic", "natbib_authoryear","""
+
+ a = find_token(document.header, "\\use_natbib", 0)
+ if a == -1:
+ document.warning("Malformed lyx document: Missing '\\use_natbib'.")
+ return
+
+ b = find_token(document.header, "\\use_numerical_citations", 0)
+ if b == -1 or b != a+1:
+ document.warning("Malformed lyx document: Missing '\\use_numerical_citations'.")
+ return
+
+ c = find_token(document.header, "\\use_jurabib", 0)
+ if c == -1 or c != b+1:
+ document.warning("Malformed lyx document: Missing '\\use_jurabib'.")
+ return
+
+ use_natbib = int(document.header[a].split()[1])
+ use_numerical_citations = int(document.header[b].split()[1])
+ use_jurabib = int(document.header[c].split()[1])
+
+ cite_engine = "basic"
+ if use_natbib:
+ if use_numerical_citations:
+ cite_engine = "natbib_numerical"
+ else:
+ cite_engine = "natbib_authoryear"
+ elif use_jurabib:
+ cite_engine = "jurabib"
+
+ del document.header[a:c+1]
+ document.header.insert(a, "\\cite_engine " + cite_engine)
+
+
+def revert_cite_engine(document):
+ " Revert the cite engine. "
+ i = find_token(document.header, "\\cite_engine", 0)
+ if i == -1:
+ document.warning("Malformed lyx document: Missing '\\cite_engine'.")
+ return
+
+ cite_engine = document.header[i].split()[1]
+
+ use_natbib = '0'
+ use_numerical = '0'
+ use_jurabib = '0'
+ if cite_engine == "natbib_numerical":
+ use_natbib = '1'
+ use_numerical = '1'
+ elif cite_engine == "natbib_authoryear":
+ use_natbib = '1'
+ elif cite_engine == "jurabib":
+ use_jurabib = '1'
+
+ del document.header[i]
+ document.header.insert(i, "\\use_jurabib " + use_jurabib)
+ document.header.insert(i, "\\use_numerical_citations " + use_numerical)
+ document.header.insert(i, "\\use_natbib " + use_natbib)
+
+
+def convert_paperpackage(document):
+ " Convert paper package. "
+ i = find_token(document.header, "\\paperpackage", 0)
+ if i == -1:
+ return
+
+ packages = {'default':'none','a4':'none', 'a4wide':'a4', 'widemarginsa4':'a4wide'}
+ if len(document.header[i].split()) > 1:
+ paperpackage = document.header[i].split()[1]
+ document.header[i] = document.header[i].replace(paperpackage, packages[paperpackage])
+ else:
+ document.header[i] = document.header[i] + ' widemarginsa4'
+
+
+def revert_paperpackage(document):
+ " Revert paper package. "
+ i = find_token(document.header, "\\paperpackage", 0)
+ if i == -1:
+ return
+
+ packages = {'none':'a4', 'a4':'a4wide', 'a4wide':'widemarginsa4',
+ 'widemarginsa4':'', 'default': 'default'}
+ if len(document.header[i].split()) > 1:
+ paperpackage = document.header[i].split()[1]
+ else:
+ paperpackage = 'default'
+ document.header[i] = document.header[i].replace(paperpackage, packages[paperpackage])
+
+
+def convert_bullets(document):
+ " Convert bullets. "
+ i = 0
+ while 1:
+ i = find_token(document.header, "\\bullet", i)
+ if i == -1:
+ return
+ if document.header[i][:12] == '\\bulletLaTeX':
+ document.header[i] = document.header[i] + ' ' + document.header[i+1].strip()
+ n = 3
+ else:
+ document.header[i] = document.header[i] + ' ' + document.header[i+1].strip() +\
+ ' ' + document.header[i+2].strip() + ' ' + document.header[i+3].strip()
+ n = 5
+ del document.header[i+1:i + n]
+ i = i + 1
+
+
+def revert_bullets(document):
+ " Revert bullets. "
+ i = 0
+ while 1:
+ i = find_token(document.header, "\\bullet", i)
+ if i == -1:
+ return
+ if document.header[i][:12] == '\\bulletLaTeX':
+ n = document.header[i].find('"')
+ if n == -1:
+ document.warning("Malformed header.")
+ return
+ else:
+ document.header[i:i+1] = [document.header[i][:n-1],'\t' + document.header[i][n:], '\\end_bullet']
+ i = i + 3
+ else:
+ frag = document.header[i].split()
+ if len(frag) != 5:
+ document.warning("Malformed header.")
+ return
+ else:
+ document.header[i:i+1] = [frag[0] + ' ' + frag[1],
+ '\t' + frag[2],
+ '\t' + frag[3],
+ '\t' + frag[4],
+ '\\end_bullet']
+ i = i + 5
+
+
+def add_begin_header(document):
+ r" Add \begin_header and \begin_document. "
+ i = find_token(document.header, '\\lyxformat', 0)
+ document.header.insert(i+1, '\\begin_header')
+ document.header.insert(i+1, '\\begin_document')
+
+
+def remove_begin_header(document):
+ r" Remove \begin_header and \begin_document. "
+ i = find_token(document.header, "\\begin_document", 0)
+ if i != -1:
+ del document.header[i]
+ i = find_token(document.header, "\\begin_header", 0)
+ if i != -1:
+ del document.header[i]
+
+
+def add_begin_body(document):
+ r" Add and \begin_document and \end_document"
+ document.body.insert(0, '\\begin_body')
+ document.body.insert(1, '')
+ i = find_token(document.body, "\\end_document", 0)
+ document.body.insert(i, '\\end_body')
+
+def remove_begin_body(document):
+ r" Remove \begin_body and \end_body"
+ i = find_token(document.body, "\\begin_body", 0)
+ if i != -1:
+ del document.body[i]
+ if not document.body[i]:
+ del document.body[i]
+ i = find_token(document.body, "\\end_body", 0)
+ if i != -1:
+ del document.body[i]
+
+
+def normalize_papersize(document):
+ r" Normalize \papersize"
+ i = find_token(document.header, '\\papersize', 0)
+ if i == -1:
+ return
+
+ tmp = document.header[i].split()
+ if tmp[1] == "Default":
+ document.header[i] = '\\papersize default'
+ return
+ if tmp[1] == "Custom":
+ document.header[i] = '\\papersize custom'
+
+
+def denormalize_papersize(document):
+ r" Revert \papersize"
+ i = find_token(document.header, '\\papersize', 0)
+ if i == -1:
+ return
+
+ tmp = document.header[i].split()
+ if tmp[1] == "custom":
+ document.header[i] = '\\papersize Custom'
+
+
+def strip_end_space(document):
+ " Strip spaces at end of command line. "
+ for i in range(len(document.body)):
+ if document.body[i][:1] == '\\':
+ document.body[i] = document.body[i].strip()
+
+
+def use_x_boolean(document):
+ r" Use boolean values for \use_geometry, \use_bibtopic and \tracking_changes"
+ bin2bool = {'0': 'false', '1': 'true'}
+ for use in '\\use_geometry', '\\use_bibtopic', '\\tracking_changes':
+ i = find_token(document.header, use, 0)
+ if i == -1:
+ continue
+ decompose = document.header[i].split()
+ document.header[i] = decompose[0] + ' ' + bin2bool[decompose[1]]
+
+
+def use_x_binary(document):
+ r" Use digit values for \use_geometry, \use_bibtopic and \tracking_changes"
+ bool2bin = {'false': '0', 'true': '1'}
+ for use in '\\use_geometry', '\\use_bibtopic', '\\tracking_changes':
+ i = find_token(document.header, use, 0)
+ if i == -1:
+ continue
+ decompose = document.header[i].split()
+ document.header[i] = decompose[0] + ' ' + bool2bin[decompose[1]]
+
+
+def normalize_paragraph_params(document):
+ " Place all the paragraph parameters in their own line. "
+ body = document.body
+
+ allowed_parameters = '\\paragraph_spacing', '\\noindent', \
+ '\\align', '\\labelwidthstring', "\\start_of_appendix", \
+ "\\leftindent"
+
+ i = 0
+ while 1:
+ i = find_token(document.body, '\\begin_layout', i)
+ if i == -1:
+ return
+
+ i = i + 1
+ while 1:
+ if body[i].strip() and body[i].split()[0] not in allowed_parameters:
+ break
+
+ j = body[i].find('\\', 1)
+
+ if j != -1:
+ body[i:i+1] = [body[i][:j].strip(), body[i][j:]]
+
+ i = i + 1
+
+
+def convert_output_changes (document):
+ " Add output_changes parameter. "
+ i = find_token(document.header, '\\tracking_changes', 0)
+ if i == -1:
+ document.warning("Malformed lyx document: Missing '\\tracking_changes'.")
+ return
+ document.header.insert(i+1, '\\output_changes true')
+
+
+def revert_output_changes (document):
+ " Remove output_changes parameter. "
+ i = find_token(document.header, '\\output_changes', 0)
+ if i == -1:
+ return
+ del document.header[i]
+
+
+def convert_ert_paragraphs(document):
+ " Convert paragraph breaks and sanitize paragraphs. "
+ forbidden_settings = [
+ # paragraph parameters
+ '\\paragraph_spacing', '\\labelwidthstring',
+ '\\start_of_appendix', '\\noindent',
+ '\\leftindent', '\\align',
+ # font settings
+ '\\family', '\\series', '\\shape', '\\size',
+ '\\emph', '\\numeric', '\\bar', '\\noun',
+ '\\color', '\\lang']
+ i = 0
+ while 1:
+ i = find_token(document.body, '\\begin_inset ERT', i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed lyx document: Missing '\\end_inset'.")
+ i = i + 1
+ continue
+
+ # convert non-standard paragraphs to standard
+ k = i
+ while 1:
+ k = find_token(document.body, "\\begin_layout", k, j)
+ if k == -1:
+ break
+ document.body[k] = '\\begin_layout %s' % document.default_layout
+ k = k + 1
+
+ # remove all paragraph parameters and font settings
+ k = i
+ while k < j:
+ if (document.body[k].strip() and
+ document.body[k].split()[0] in forbidden_settings):
+ del document.body[k]
+ j = j - 1
+ else:
+ k = k + 1
+
+ # insert an empty paragraph before each paragraph but the first
+ k = i
+ first_pagraph = 1
+ while 1:
+ k = find_token(document.body, "\\begin_layout", k, j)
+ if k == -1:
+ break
+ if first_pagraph:
+ first_pagraph = 0
+ k = k + 1
+ continue
+ document.body[k:k] = ['\\begin_layout %s' % document.default_layout, "",
+ "\\end_layout", ""]
+ k = k + 5
+ j = j + 4
+
+ # convert \\newline to new paragraph
+ k = i
+ while 1:
+ k = find_token(document.body, "\\newline", k, j)
+ if k == -1:
+ break
+ document.body[k:k+1] = ["\\end_layout", "", '\\begin_layout %s' % document.default_layout]
+ k = k + 3
+ j = j + 2
+ # We need an empty line if document.default_layout == ''
+ if document.body[k] != '':
+ document.body.insert(k, '')
+ k = k + 1
+ j = j + 1
+ i = i + 1
+
+
+def revert_ert_paragraphs(document):
+ " Remove double paragraph breaks. "
+ i = 0
+ while 1:
+ i = find_token(document.body, '\\begin_inset ERT', i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed lyx document: Missing '\\end_inset'.")
+ i = i + 1
+ continue
+
+ # replace paragraph breaks with \newline
+ k = i
+ while 1:
+ k = find_token(document.body, "\\end_layout", k, j)
+ l = find_token(document.body, "\\begin_layout", k, j)
+ if k == -1 or l == -1:
+ break
+ document.body[k:l+1] = ["\\newline"]
+ j = j - l + k
+ k = k + 1
+
+ # replace double \newlines with paragraph breaks
+ k = i
+ while 1:
+ k = find_token(document.body, "\\newline", k, j)
+ if k == -1:
+ break
+ l = k + 1
+ while document.body[l] == "":
+ l = l + 1
+ if document.body[l].strip() and document.body[l].split()[0] == "\\newline":
+ document.body[k:l+1] = ["\\end_layout", "",
+ '\\begin_layout %s' % document.default_layout]
+ j = j - l + k + 2
+ k = k + 3
+ # We need an empty line if document.default_layout == ''
+ if document.body[l+1] != '':
+ document.body.insert(l+1, '')
+ k = k + 1
+ j = j + 1
+ else:
+ k = k + 1
+ i = i + 1
+
+
+def convert_french(document):
+ " Convert frenchb. "
+ regexp = re.compile(r'^\\language\s+frenchb')
+ i = find_re(document.header, regexp, 0)
+ if i != -1:
+ document.header[i] = "\\language french"
+
+ # Change language in the document body
+ regexp = re.compile(r'^\\lang\s+frenchb')
+ i = 0
+ while 1:
+ i = find_re(document.body, regexp, i)
+ if i == -1:
+ break
+ document.body[i] = "\\lang french"
+ i = i + 1
+
+
+def remove_paperpackage(document):
+ " Remove paper package. "
+ i = find_token(document.header, '\\paperpackage', 0)
+
+ if i == -1:
+ return
+
+ paperpackage = document.header[i].split()[1]
+
+ del document.header[i]
+
+ if paperpackage not in ("a4", "a4wide", "widemarginsa4"):
+ return
+
+ conv = {"a4":"\\usepackage{a4}","a4wide": "\\usepackage{a4wide}",
+ "widemarginsa4": "\\usepackage[widemargins]{a4}"}
+ # for compatibility we ensure it is the first entry in preamble
+ document.preamble[0:0] = [conv[paperpackage]]
+
+ i = find_token(document.header, '\\papersize', 0)
+ if i != -1:
+ document.header[i] = "\\papersize default"
+
+
+def remove_quotestimes(document):
+ " Remove quotestimes. "
+ i = find_token(document.header, '\\quotes_times', 0)
+ if i == -1:
+ return
+ del document.header[i]
+
+
+def convert_sgml_paragraphs(document):
+ " Convert SGML paragraphs. "
+ if document.backend != "docbook":
+ return
+
+ i = 0
+ while 1:
+ i = find_token(document.body, "\\begin_layout SGML", i)
+
+ if i == -1:
+ return
+
+ document.body[i] = "\\begin_layout Standard"
+ j = find_token(document.body, "\\end_layout", i)
+
+ document.body[j+1:j+1] = ['','\\end_inset','','','\\end_layout']
+ document.body[i+1:i+1] = ['\\begin_inset ERT','status inlined','','\\begin_layout Standard','']
+
+ i = i + 10