+ 'widemarginsa4':'', 'default': 'default'}
+ if len(document.header[i].split()) > 1:
+ paperpackage = document.header[i].split()[1]
+ else:
+ paperpackage = 'default'
+ document.header[i] = document.header[i].replace(paperpackage, packages[paperpackage])
+
+
+def convert_bullets(document):
+ " Convert bullets. "
+ i = 0
+ while 1:
+ i = find_token(document.header, "\\bullet", i)
+ if i == -1:
+ return
+ if document.header[i][:12] == '\\bulletLaTeX':
+ document.header[i] = document.header[i] + ' ' + document.header[i+1].strip()
+ n = 3
+ else:
+ document.header[i] = document.header[i] + ' ' + document.header[i+1].strip() +\
+ ' ' + document.header[i+2].strip() + ' ' + document.header[i+3].strip()
+ n = 5
+ del document.header[i+1:i + n]
+ i = i + 1
+
+
+def revert_bullets(document):
+ " Revert bullets. "
+ i = 0
+ while 1:
+ i = find_token(document.header, "\\bullet", i)
+ if i == -1:
+ return
+ if document.header[i][:12] == '\\bulletLaTeX':
+ n = document.header[i].find('"')
+ if n == -1:
+ document.warning("Malformed header.")
+ return
+ else:
+ document.header[i:i+1] = [document.header[i][:n-1],'\t' + document.header[i][n:], '\\end_bullet']
+ i = i + 3
+ else:
+ frag = document.header[i].split()
+ if len(frag) != 5:
+ document.warning("Malformed header.")
+ return
+ else:
+ document.header[i:i+1] = [frag[0] + ' ' + frag[1],
+ '\t' + frag[2],
+ '\t' + frag[3],
+ '\t' + frag[4],
+ '\\end_bullet']
+ i = i + 5
+
+
+def add_begin_header(document):
+ r" Add \begin_header and \begin_document. "
+ i = find_token(document.header, '\\lyxformat', 0)
+ document.header.insert(i+1, '\\begin_header')
+ document.header.insert(i+1, '\\begin_document')
+
+
+def remove_begin_header(document):
+ r" Remove \begin_header and \begin_document. "
+ i = find_token(document.header, "\\begin_document", 0)
+ if i != -1:
+ del document.header[i]
+ i = find_token(document.header, "\\begin_header", 0)
+ if i != -1:
+ del document.header[i]
+
+
+def add_begin_body(document):
+ r" Add and \begin_document and \end_document"
+ document.body.insert(0, '\\begin_body')
+ document.body.insert(1, '')
+ i = find_token(document.body, "\\end_document", 0)
+ document.body.insert(i, '\\end_body')
+
+def remove_begin_body(document):
+ r" Remove \begin_body and \end_body"
+ i = find_token(document.body, "\\begin_body", 0)
+ if i != -1:
+ del document.body[i]
+ if not document.body[i]:
+ del document.body[i]
+ i = find_token(document.body, "\\end_body", 0)
+ if i != -1:
+ del document.body[i]
+
+
+def normalize_papersize(document):
+ r" Normalize \papersize"
+ i = find_token(document.header, '\\papersize', 0)
+ if i == -1:
+ return
+
+ tmp = document.header[i].split()
+ if tmp[1] == "Default":
+ document.header[i] = '\\papersize default'
+ return
+ if tmp[1] == "Custom":
+ document.header[i] = '\\papersize custom'
+
+
+def denormalize_papersize(document):
+ r" Revert \papersize"
+ i = find_token(document.header, '\\papersize', 0)
+ if i == -1:
+ return
+
+ tmp = document.header[i].split()
+ if tmp[1] == "custom":
+ document.header[i] = '\\papersize Custom'
+
+
+def strip_end_space(document):
+ " Strip spaces at end of command line. "
+ for i in range(len(document.body)):
+ if document.body[i][:1] == '\\':
+ document.body[i] = document.body[i].strip()
+
+
+def use_x_boolean(document):
+ r" Use boolean values for \use_geometry, \use_bibtopic and \tracking_changes"
+ bin2bool = {'0': 'false', '1': 'true'}
+ for use in '\\use_geometry', '\\use_bibtopic', '\\tracking_changes':
+ i = find_token(document.header, use, 0)
+ if i == -1:
+ continue
+ decompose = document.header[i].split()
+ document.header[i] = decompose[0] + ' ' + bin2bool[decompose[1]]
+
+
+def use_x_binary(document):
+ r" Use digit values for \use_geometry, \use_bibtopic and \tracking_changes"
+ bool2bin = {'false': '0', 'true': '1'}
+ for use in '\\use_geometry', '\\use_bibtopic', '\\tracking_changes':
+ i = find_token(document.header, use, 0)
+ if i == -1:
+ continue
+ decompose = document.header[i].split()
+ document.header[i] = decompose[0] + ' ' + bool2bin[decompose[1]]
+
+
+def normalize_paragraph_params(document):
+ " Place all the paragraph parameters in their own line. "
+ body = document.body
+
+ allowed_parameters = '\\paragraph_spacing', '\\noindent', \
+ '\\align', '\\labelwidthstring', "\\start_of_appendix", \
+ "\\leftindent"
+
+ i = 0
+ while 1:
+ i = find_token(document.body, '\\begin_layout', i)
+ if i == -1:
+ return
+
+ i = i + 1
+ while 1:
+ if body[i].strip() and body[i].split()[0] not in allowed_parameters:
+ break
+
+ j = body[i].find('\\', 1)
+
+ if j != -1:
+ body[i:i+1] = [body[i][:j].strip(), body[i][j:]]
+
+ i = i + 1
+
+
+def convert_output_changes (document):
+ " Add output_changes parameter. "
+ i = find_token(document.header, '\\tracking_changes', 0)
+ if i == -1:
+ document.warning("Malformed lyx document: Missing '\\tracking_changes'.")
+ return
+ document.header.insert(i+1, '\\output_changes true')
+
+
+def revert_output_changes (document):
+ " Remove output_changes parameter. "
+ i = find_token(document.header, '\\output_changes', 0)
+ if i == -1:
+ return
+ del document.header[i]
+
+
+def convert_ert_paragraphs(document):
+ " Convert paragraph breaks and sanitize paragraphs. "
+ forbidden_settings = [
+ # paragraph parameters
+ '\\paragraph_spacing', '\\labelwidthstring',
+ '\\start_of_appendix', '\\noindent',
+ '\\leftindent', '\\align',
+ # font settings
+ '\\family', '\\series', '\\shape', '\\size',
+ '\\emph', '\\numeric', '\\bar', '\\noun',
+ '\\color', '\\lang']
+ i = 0
+ while 1:
+ i = find_token(document.body, '\\begin_inset ERT', i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed lyx document: Missing '\\end_inset'.")
+ i = i + 1
+ continue
+
+ # convert non-standard paragraphs to standard
+ k = i
+ while 1:
+ k = find_token(document.body, "\\begin_layout", k, j)
+ if k == -1:
+ break
+ document.body[k] = '\\begin_layout %s' % document.default_layout
+ k = k + 1
+
+ # remove all paragraph parameters and font settings
+ k = i
+ while k < j:
+ if (document.body[k].strip() and
+ document.body[k].split()[0] in forbidden_settings):
+ del document.body[k]
+ j = j - 1
+ else:
+ k = k + 1
+
+ # insert an empty paragraph before each paragraph but the first
+ k = i
+ first_pagraph = 1
+ while 1:
+ k = find_token(document.body, "\\begin_layout", k, j)
+ if k == -1:
+ break
+ if first_pagraph:
+ first_pagraph = 0
+ k = k + 1
+ continue
+ document.body[k:k] = ['\\begin_layout %s' % document.default_layout, "",
+ "\\end_layout", ""]
+ k = k + 5
+ j = j + 4
+
+ # convert \\newline to new paragraph
+ k = i
+ while 1:
+ k = find_token(document.body, "\\newline", k, j)
+ if k == -1:
+ break
+ document.body[k:k+1] = ["\\end_layout", "", '\\begin_layout %s' % document.default_layout]
+ k = k + 4
+ j = j + 3
+ # We need an empty line if document.default_layout == ''
+ if document.body[k-1] != '':
+ document.body.insert(k-1, '')
+ k = k + 1
+ j = j + 1
+ i = i + 1
+
+
+def revert_ert_paragraphs(document):
+ " Remove double paragraph breaks. "
+ i = 0
+ while 1:
+ i = find_token(document.body, '\\begin_inset ERT', i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed lyx document: Missing '\\end_inset'.")
+ i = i + 1
+ continue
+
+ # replace paragraph breaks with \newline
+ k = i
+ while 1:
+ k = find_token(document.body, "\\end_layout", k, j)
+ l = find_token(document.body, "\\begin_layout", k, j)
+ if k == -1 or l == -1:
+ break
+ document.body[k:l+1] = ["\\newline"]
+ j = j - l + k
+ k = k + 1
+
+ # replace double \newlines with paragraph breaks
+ k = i
+ while 1:
+ k = find_token(document.body, "\\newline", k, j)
+ if k == -1:
+ break
+ l = k + 1
+ while document.body[l] == "":
+ l = l + 1
+ if document.body[l].strip() and document.body[l].split()[0] == "\\newline":
+ document.body[k:l+1] = ["\\end_layout", "",
+ '\\begin_layout %s' % document.default_layout]
+ j = j - l + k + 2
+ k = k + 3
+ # We need an empty line if document.default_layout == ''
+ if document.body[l+1] != '':
+ document.body.insert(l+1, '')
+ k = k + 1
+ j = j + 1
+ else:
+ k = k + 1
+ i = i + 1
+
+
+def convert_french(document):
+ " Convert frenchb. "
+ regexp = re.compile(r'^\\language\s+frenchb')
+ i = find_re(document.header, regexp, 0)
+ if i != -1:
+ document.header[i] = "\\language french"
+
+ # Change language in the document body
+ regexp = re.compile(r'^\\lang\s+frenchb')
+ i = 0
+ while 1:
+ i = find_re(document.body, regexp, i)
+ if i == -1:
+ break
+ document.body[i] = "\\lang french"
+ i = i + 1
+
+
+def remove_paperpackage(document):
+ " Remove paper package. "
+ i = find_token(document.header, '\\paperpackage', 0)
+
+ if i == -1:
+ return
+
+ paperpackage = document.header[i].split()[1]
+
+ del document.header[i]
+
+ if paperpackage not in ("a4", "a4wide", "widemarginsa4"):
+ return
+
+ conv = {"a4":"\\usepackage{a4}","a4wide": "\\usepackage{a4wide}",
+ "widemarginsa4": "\\usepackage[widemargins]{a4}"}
+ # for compatibility we ensure it is the first entry in preamble
+ document.preamble[0:0] = [conv[paperpackage]]
+
+ i = find_token(document.header, '\\papersize', 0)
+ if i != -1:
+ document.header[i] = "\\papersize default"
+
+
+def remove_quotestimes(document):
+ " Remove quotestimes. "
+ i = find_token(document.header, '\\quotes_times', 0)
+ if i == -1:
+ return
+ del document.header[i]
+
+
+def convert_sgml_paragraphs(document):
+ " Convert SGML paragraphs. "
+ if document.backend != "docbook":
+ return
+
+ i = 0
+ while 1:
+ i = find_token(document.body, "\\begin_layout SGML", i)
+
+ if i == -1:
+ return
+
+ document.body[i] = "\\begin_layout Standard"
+ j = find_token(document.body, "\\end_layout", i)
+
+ document.body[j+1:j+1] = ['','\\end_inset','','','\\end_layout']
+ document.body[i+1:i+1] = ['\\begin_inset ERT','status inlined','','\\begin_layout Standard','']