# Uncomment only what you need to import, please.
-from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
- find_end_of_layout, find_token, find_token_backwards, find_token_exact,
- find_re, get_bool_value,
- get_containing_layout, get_option_value, get_value, get_quoted_value)
-# del_value, del_complete_lines,
-# find_complete_lines, find_end_of,
+from parser_tools import (count_pars_in_inset, del_complete_lines, del_token,
+ find_end_of, find_end_of_inset, find_end_of_layout, find_token,
+ find_token_backwards, find_token_exact, find_re, get_bool_value,
+ get_containing_inset, get_containing_layout, get_option_value, get_value,
+ get_quoted_value)
+# del_value,
+# find_complete_lines,
# find_re, find_substring,
-# get_containing_inset,
# is_in_inset, set_bool_value
# find_tokens, check_token
# Private helper functions
def add_preamble_fonts(document, fontmap):
- " Add collected font-packages with their option to user-preamble"
+ """Add collected font-packages with their option to user-preamble"""
for pkg in fontmap:
if len(fontmap[pkg]) > 0:
self.package = None
self.options = []
self.pkgkey = None # key into pkg2fontmap
- self.osfopt = None # None, string
+ self.osfopt = None # None, string
+ self.osfdef = "false" # "false" or "true"
def addkey(self):
self.pkgkey = createkey(self.package, self.options)
self.pkg2fontmap = dict()
self.pkginmap = dict() # defines, if a map for package exists
- def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None):
- " Expand fontinfo mapping"
+ def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
+ """Expand fontinfo mapping"""
#
# fontlist: list of fontnames, each element
# may contain a ','-separated list of needed options
# scaleopt: one of None, 'scale', 'scaled', or some other string
# to be used in scale option (e.g. scaled=0.7)
# osfopt: None or some other string to be used in osf option
+ # osfdef: "true" if osf is default
for fl in font_list:
fe = fontinfo()
fe.fonttype = font_type
fe.options = flt[1:]
fe.scaleopt = scaleopt
fe.osfopt = osfopt
+ fe.osfdef = osfdef
if pkg == None:
fe.package = font_name
else:
"typewriter", "tt", "plex-mono", "scale")
elif font == 'Adobe':
fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
- fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled")
- fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled")
+ fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
+ fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
elif font == 'Noto':
fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
'NotoSerifThin,thin', 'NotoSerifLight,light',
'NotoSansExtralight,extralight'],
"sans", "sf", "noto-sans", "scaled")
fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
+ elif font == 'Cantarell':
+ fm.expandFontMapping(['cantarell,defaultsans'],
+ "sans", "sf", "cantarell", "scaled", "oldstyle")
+ elif font == 'Chivo':
+ fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
+ 'Chivo,regular', 'ChivoMedium,medium'],
+ "sans", "sf", "Chivo", "scale", "oldstyle")
+ elif font == 'CrimsonPro':
+ fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
+ 'CrimsonProMedium,medium'],
+ "roman", None, "CrimsonPro", None, "lf", "true")
+ elif font == 'Fira':
+ fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
+ 'FiraSansThin,thin', 'FiraSansLight,light',
+ 'FiraSansExtralight,extralight',
+ 'FiraSansUltralight,ultralight'],
+ "sans", "sf", "FiraSans", "scaled", "lf", "true")
+ fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
+ elif font == 'libertinus':
+ fm.expandFontMapping(['libertinus,serif'], "roman", None, "libertinus", None, "osf")
+ fm.expandFontMapping(['libertinusmath'], "math", None, "libertinust1math", None, None)
return fm
-def convert_fonts(document, fm):
- " Handle font definition (LaTeX preamble -> native) "
-
+def convert_fonts(document, fm, osfoption = "osf"):
+ """Handle font definition (LaTeX preamble -> native)"""
rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
rscaleopt = re.compile(r'^scaled?=(.*)')
+ # Check whether we go beyond font option feature introduction
+ haveFontOpts = document.end_format > 580
+
i = 0
- while i < len(document.preamble):
+ while True:
i = find_re(document.preamble, rpkg, i+1)
if i == -1:
return
pkg = mo.group(3)
o = 0
oscale = 1
- osfoption = "osf"
has_osf = False
while o < len(options):
if options[o] == osfoption:
if not pkg in fm.pkginmap:
continue
# determine fontname
- fn = fm.getfontname(pkg, options)
+ fn = None
+ if haveFontOpts:
+ # Try with name-option combination first
+ # (only one default option supported currently)
+ o = 0
+ while o < len(options):
+ opt = options[o]
+ fn = fm.getfontname(pkg, [opt])
+ if fn != None:
+ del options[o]
+ break
+ o += 1
+ continue
+ if fn == None:
+ fn = fm.getfontname(pkg, [])
+ else:
+ fn = fm.getfontname(pkg, options)
if fn == None:
continue
del document.preamble[i]
else:
fontscale = "\\font_" + fontinfo.scaletype + "_scale"
fontinfo.scaleval = oscale
- if has_osf:
- if fontinfo.osfopt == None:
- options.extend("osf")
- continue
- osf = find_token(document.header, "\\font_osf false")
- if osf != -1:
- document.header[osf] = "\\font_osf true"
+ if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
+ if fontinfo.osfopt == None:
+ options.extend(osfoption)
+ continue
+ osf = find_token(document.header, "\\font_osf false")
+ osftag = "\\font_osf"
+ if osf == -1 and fontinfo.fonttype != "math":
+ # Try with newer format
+ osftag = "\\font_" + fontinfo.fonttype + "_osf"
+ osf = find_token(document.header, osftag + " false")
+ if osf != -1:
+ document.header[osf] = osftag + " true"
if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
del document.preamble[i-1]
i -= 1
words = val.split() # ! splits also values like '"DejaVu Sans"'
words[0] = '"' + fn + '"'
document.header[j] = ft + ' ' + ' '.join(words)
+ if haveFontOpts and fontinfo.fonttype != "math":
+ fotag = "\\font_" + fontinfo.fonttype + "_opts"
+ fo = find_token(document.header, fotag)
+ if fo != -1:
+ document.header[fo] = fotag + " \"" + ",".join(options) + "\""
+ else:
+ # Sensible place to insert tag
+ fo = find_token(document.header, "\\font_sf_scale")
+ if fo == -1:
+ document.warning("Malformed LyX document! Missing \\font_sf_scale")
+ else:
+ document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
+
-def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False):
- " Revert native font definition to LaTeX "
+def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
+ """Revert native font definition to LaTeX"""
# fonlist := list of fonts created from the same package
# Empty package means that the font-name is the same as the package-name
# fontmap (key = package, val += found options) will be filled
if not val in fontmap:
fontmap[val] = []
x = -1
- if OnlyWithXOpts:
+ if OnlyWithXOpts or WithXOpts:
if ft == "\\font_math":
return False
regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
elif ft == "\\font_typewriter":
regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
x = find_re(document.header, regexp, 0)
- if x == -1:
+ if x == -1 and OnlyWithXOpts:
return False
- # We need to use this regex since split() does not handle quote protection
- xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
- opts = xopts[1].strip('"').split(",")
- fontmap[val].extend(opts)
- del document.header[x]
+ if x != -1:
+ # We need to use this regex since split() does not handle quote protection
+ xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+ opts = xopts[1].strip('"').split(",")
+ fontmap[val].extend(opts)
+ del document.header[x]
words[0] = '"default"'
document.header[i] = ft + ' ' + ' '.join(words)
if fontinfo.scaleopt != None:
if xval1 != "100":
# set correct scale option
fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
- if fontinfo.osfopt != None and fontinfo.fonttype == "roman":
- osf = find_token(document.header, "\\font_osf true")
+ if fontinfo.osfopt != None:
+ oldval = "true"
+ if fontinfo.osfdef == "true":
+ oldval = "false"
+ osf = find_token(document.header, "\\font_osf " + oldval)
+ if osf == -1 and ft != "\\font_math":
+ # Try with newer format
+ osftag = "\\font_roman_osf " + oldval
+ if ft == "\\font_sans":
+ osftag = "\\font_sans_osf " + oldval
+ elif ft == "\\font_typewriter":
+ osftag = "\\font_typewriter_osf " + oldval
+ osf = find_token(document.header, osftag)
if osf != -1:
- document.header[osf] = "\\font_osf false"
fontmap[val].extend([fontinfo.osfopt])
if len(fontinfo.options) > 0:
fontmap[val].extend(fontinfo.options)
###############################################################################
def convert_inputencoding_namechange(document):
- " Rename inputencoding settings. "
+ """Rename inputencoding settings."""
i = find_token(document.header, "\\inputencoding", 0)
if i == -1:
return
document.header[i] = s.replace("default", "auto-legacy-plain")
def revert_inputencoding_namechange(document):
- " Rename inputencoding settings. "
+ """Rename inputencoding settings."""
i = find_token(document.header, "\\inputencoding", 0)
if i == -1:
return
document.header[i] = s.replace("auto-legacy", "auto")
def convert_notoFonts(document):
- " Handle Noto fonts definition to LaTeX "
+ """Handle Noto fonts definition to LaTeX"""
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(['Noto'])
convert_fonts(document, fm)
def revert_notoFonts(document):
- " Revert native Noto font definition to LaTeX "
+ """Revert native Noto font definition to LaTeX"""
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(['Noto'])
if revert_fonts(document, fm, fontmap):
add_preamble_fonts(document, fontmap)
def convert_latexFonts(document):
- " Handle DejaVu and IBMPlex fonts definition to LaTeX "
+ """Handle DejaVu and IBMPlex fonts definition to LaTeX"""
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(['DejaVu', 'IBM'])
convert_fonts(document, fm)
def revert_latexFonts(document):
- " Revert native DejaVu font definition to LaTeX "
+ """Revert native DejaVu font definition to LaTeX"""
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(['DejaVu', 'IBM'])
if revert_fonts(document, fm, fontmap):
add_preamble_fonts(document, fontmap)
def convert_AdobeFonts(document):
- " Handle Adobe Source fonts definition to LaTeX "
+ """Handle Adobe Source fonts definition to LaTeX"""
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(['Adobe'])
convert_fonts(document, fm)
def revert_AdobeFonts(document):
- " Revert Adobe Source font definition to LaTeX "
+ """Revert Adobe Source font definition to LaTeX"""
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(['Adobe'])
if revert_fonts(document, fm, fontmap):
add_preamble_fonts(document, fontmap)
def removeFrontMatterStyles(document):
- " Remove styles Begin/EndFrontmatter"
+ """Remove styles Begin/EndFrontmatter"""
layouts = ['BeginFrontmatter', 'EndFrontmatter']
tokenend = len('\\begin_layout ')
document.body[i:j+1] = []
def addFrontMatterStyles(document):
- " Use styles Begin/EndFrontmatter for elsarticle"
+ """Use styles Begin/EndFrontmatter for elsarticle"""
if document.textclass != "elsarticle":
return
def convert_lst_literalparam(document):
- " Add param literal to include inset "
+ """Add param literal to include inset"""
i = 0
while True:
def revert_lst_literalparam(document):
- " Remove param literal from include inset "
+ """Remove param literal from include inset"""
i = 0
while True:
def revert_paratype(document):
- " Revert ParaType font definitions to LaTeX "
+ """Revert ParaType font definitions to LaTeX"""
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
preamble = ""
i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
i2 = find_token(document.header, "\\font_sans \"default\"", 0)
i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
- sfval = get_value(document.header, "\\font_sf_scale", 0)
- # cutoff " 100"
- sfval = sfval[:-4]
+
+ sf_scale = 100.0
+ sfval = find_token(document.header, "\\font_sf_scale", 0)
+ if sfval == -1:
+ document.warning("Malformed LyX document: Missing \\font_sf_scale.")
+ else:
+ sfscale = document.header[sfval].split()
+ val = sfscale[1]
+ sfscale[1] = "100"
+ document.header[sfval] = " ".join(sfscale)
+ try:
+ # float() can throw
+ sf_scale = float(val)
+ except:
+ document.warning("Invalid font_sf_scale value: " + val)
+
sfoption = ""
- if sfval != "100":
- sfoption = "scaled=" + format(float(sfval) / 100, '.2f')
+ if sf_scale != "100.0":
+ sfoption = "scaled=" + str(sf_scale / 100.0)
k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
ttval = get_value(document.header, "\\font_tt_scale", 0)
# cutoff " 100"
def revert_xcharter(document):
- " Revert XCharter font definitions to LaTeX "
+ """Revert XCharter font definitions to LaTeX"""
i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
if i == -1:
def revert_lscape(document):
- " Reverts the landscape environment (Landscape module) to TeX-code "
+ """Reverts the landscape environment (Landscape module) to TeX-code"""
if not "landscape" in document.get_module_list():
return
while True:
i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
if i == -1:
- return
+ break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Landscape inset")
document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
add_to_preamble(document, ["\\usepackage{pdflscape}"])
+ document.del_module("landscape")
def convert_fontenc(document):
- " Convert default fontenc setting "
+ """Convert default fontenc setting"""
i = find_token(document.header, "\\fontencoding global", 0)
if i == -1:
def revert_fontenc(document):
- " Revert default fontenc setting "
+ """Revert default fontenc setting"""
i = find_token(document.header, "\\fontencoding auto", 0)
if i == -1:
def revert_nospellcheck(document):
- " Remove nospellcheck font info param "
+ """Remove nospellcheck font info param"""
i = 0
while True:
def revert_floatpclass(document):
- " Remove float placement params 'document' and 'class' "
+ """Remove float placement params 'document' and 'class'"""
del_token(document.header, "\\float_placement class")
i = 0
while True:
- i = find_token(document.body, '\\begin_inset Float', i+1)
+ i = find_token(document.body, '\\begin_inset Float', i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
- k = find_token(document.body, 'placement class', i, i + 2)
+ k = find_token(document.body, 'placement class', i, j)
if k == -1:
- k = find_token(document.body, 'placement document', i, i + 2)
+ k = find_token(document.body, 'placement document', i, j)
if k != -1:
del document.body[k]
continue
def revert_floatalignment(document):
- " Remove float alignment params "
+ """Remove float alignment params"""
galignment = get_value(document.header, "\\float_alignment", delete=True)
i = 0
while True:
- i = find_token(document.body, '\\begin_inset Float', i+1)
+ i = find_token(document.body, '\\begin_inset Float', i + 1)
if i == -1:
break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
continue
- k = find_token(document.body, 'alignment', i, i+4)
+ k = find_token(document.body, 'alignment', i, j)
if k == -1:
i = j
continue
alcmd = put_cmd_in_ert("\\raggedleft{}")
if len(alcmd) > 0:
document.body[l+1:l+1] = alcmd
- i = j
+ # There might be subfloats, so we do not want to move past
+ # the end of the inset.
+ i += 1
def revert_tuftecite(document):
- " Revert \cite commands in tufte classes "
+ """Revert \cite commands in tufte classes"""
tufte = ["tufte-book", "tufte-handout"]
if document.textclass not in tufte:
i = j
+
def revert_stretchcolumn(document):
- " We remove the column varwidth flags or everything else will become a mess. "
+ """We remove the column varwidth flags or everything else will become a mess."""
i = 0
while True:
i = find_token(document.body, "\\begin_inset Tabular", i+1)
def revert_vcolumns(document):
- " Revert standard columns with line breaks etc. "
+ """Revert standard columns with line breaks etc."""
i = 0
needvarwidth = False
needarray = False
def revert_bibencoding(document):
- " Revert bibliography encoding "
+ """Revert bibliography encoding"""
# Get cite engine
engine = "basic"
def convert_vcsinfo(document):
- " Separate vcs Info inset from buffer Info inset. "
+ """Separate vcs Info inset from buffer Info inset."""
types = {
"vcs-revision" : "revision",
def revert_vcsinfo(document):
- " Merge vcs Info inset to buffer Info inset. "
+ """Merge vcs Info inset to buffer Info inset."""
args = ["revision", "tree-revision", "author", "time", "date" ]
i = 0
document.body[tp] = "type \"buffer\""
document.body[arg] = "arg \"vcs-" + argv + "\""
+def revert_vcsinfo_rev_abbrev(document):
+ " Convert abbreviated revisions to regular revisions. "
+
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset Info", i+1)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i+1)
+ if j == -1:
+ document.warning("Malformed LyX document: Could not find end of Info inset.")
+ continue
+ tp = find_token(document.body, 'type', i, j)
+ tpv = get_quoted_value(document.body, "type", tp)
+ if tpv != "vcs":
+ continue
+ arg = find_token(document.body, 'arg', i, j)
+ argv = get_quoted_value(document.body, "arg", arg)
+ if( argv == "revision-abbrev" ):
+ document.body[arg] = "arg \"revision\""
def revert_dateinfo(document):
- " Revert date info insets to static text. "
+ """Revert date info insets to static text."""
# FIXME This currently only considers the main language and uses the system locale
# Ideally, it should honor context languages and switch the locale accordingly.
def revert_timeinfo(document):
- " Revert time info insets to static text. "
+ """Revert time info insets to static text."""
# FIXME This currently only considers the main language and uses the system locale
# Ideally, it should honor context languages and switch the locale accordingly.
}
types = ["time", "fixtime", "modtime" ]
- i = 0
i = find_token(document.header, "\\language", 0)
if i == -1:
# this should not happen
def revert_namenoextinfo(document):
- " Merge buffer Info inset type name-noext to name. "
+ """Merge buffer Info inset type name-noext to name."""
i = 0
while True:
def revert_l7ninfo(document):
- " Revert l7n Info inset to text. "
+ """Revert l7n Info inset to text."""
i = 0
while True:
def revert_listpargs(document):
- " Reverts listpreamble arguments to TeX-code "
+ """Reverts listpreamble arguments to TeX-code"""
i = 0
while True:
i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
def revert_lformatinfo(document):
- " Revert layout format Info inset to text. "
+ """Revert layout format Info inset to text."""
i = 0
while True:
def revert_hebrew_parentheses(document):
- " Store parentheses in Hebrew text reversed"
+ """Store parentheses in Hebrew text reversed"""
# This only exists to keep the convert/revert naming convention
convert_hebrew_parentheses(document)
def revert_malayalam(document):
- " Set the document language to English but assure Malayalam output "
+ """Set the document language to English but assure Malayalam output"""
revert_language(document, "malayalam", "", "malayalam")
def revert_soul(document):
- " Revert soul module flex insets to ERT "
+ """Revert soul module flex insets to ERT"""
flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
def revert_tablestyle(document):
- " Remove tablestyle params "
+ """Remove tablestyle params"""
- i = 0
i = find_token(document.header, "\\tablestyle")
if i != -1:
del document.header[i]
def revert_bibfileencodings(document):
- " Revert individual Biblatex bibliography encodings "
+ """Revert individual Biblatex bibliography encodings"""
# Get cite engine
engine = "basic"
def revert_cmidruletrimming(document):
- " Remove \\cmidrule trimming "
+ """Remove \\cmidrule trimming"""
# FIXME: Revert to TeX code?
i = 0
r'End',
]
+
def convert_ruby_module(document):
- " Use ruby module instead of local module definition "
+ """Use ruby module instead of local module definition"""
if document.del_local_layout(ruby_inset_def):
document.add_module("ruby")
+
def revert_ruby_module(document):
- " Replace ruby module with local module definition "
+ """Replace ruby module with local module definition"""
if document.del_module("ruby"):
document.append_local_layout(ruby_inset_def)
def convert_utf8_japanese(document):
- " Use generic utf8 with Japanese documents."
+ """Use generic utf8 with Japanese documents."""
lang = get_value(document.header, "\\language")
if not lang.startswith("japanese"):
return
or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
document.set_parameter("inputencoding", "utf8")
+
def revert_utf8_japanese(document):
- " Use Japanese utf8 variants with Japanese documents."
+ """Use Japanese utf8 variants with Japanese documents."""
inputenc = get_value(document.header, "\\inputencoding")
if inputenc != "utf8":
return
"\\lineno_options %s" % options]
+def convert_aaencoding(document):
+ " Convert default document option due to encoding change in aa class. "
+
+ if document.textclass != "aa":
+ return
+
+ i = find_token(document.header, "\\use_default_options true")
+ if i == -1:
+ return
+ val = get_value(document.header, "\\inputencoding")
+ if not val:
+ document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
+ return
+ if val == "auto-legacy" or val == "latin9":
+ document.header[i] = "\\use_default_options false"
+ k = find_token(document.header, "\\options")
+ if k == -1:
+ document.header.insert(i, "\\options latin9")
+ else:
+ document.header[k] += ",latin9"
+
+
+def revert_aaencoding(document):
+ " Revert default document option due to encoding change in aa class. "
+
+ if document.textclass != "aa":
+ return
+
+ i = find_token(document.header, "\\use_default_options true")
+ if i == -1:
+ return
+ val = get_value(document.header, "\\inputencoding")
+ if not val:
+ document.warning("Malformed LyX Document! Missing \\inputencoding header.")
+ return
+ if val == "utf8":
+ document.header[i] = "\\use_default_options false"
+ k = find_token(document.header, "\\options", 0)
+ if k == -1:
+ document.header.insert(i, "\\options utf8")
+ else:
+ document.header[k] = document.header[k] + ",utf8"
+
+
def revert_new_languages(document):
"""Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
and Russian (Petrine orthography)."""
"oldrussian": ("", "russian"),
"korean": ("", "korean"),
}
- used_languages = set()
if document.language in new_languages:
- used_languages.add(document.language)
+ used_languages = set((document.language, ))
+ else:
+ used_languages = set()
i = 0
while True:
i = find_token(document.body, "\\lang", i+1)
if i == -1:
break
- if document.body[i][6:].strip() in new_languages:
- used_languages.add(document.language)
+ val = get_value(document.body, "\\lang", i)
+ if val in new_languages:
+ used_languages.add(val)
# Korean is already supported via CJK, so leave as-is for Babel
if ("korean" in used_languages
- and get_bool_value(document.header, "\\use_non_tex_fonts")
- and get_value(document.header, "\\language_package") in ("default", "auto")):
- revert_language(document, "korean", "", "korean")
- used_languages.discard("korean")
+ and (not get_bool_value(document.header, "\\use_non_tex_fonts")
+ or get_value(document.header, "\\language_package") == "babel")):
+ used_languages.discard("korean")
for lang in used_languages:
- revert(lang, *new_languages[lang])
+ revert_language(document, lang, *new_languages[lang])
gloss_inset_def = [
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
endInset = find_end_of_inset(document.body, i)
- endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
+ endPlain = find_end_of_layout(document.body, beginPlain)
precontent = put_cmd_in_ert(cmd)
if len(optargcontent) > 0:
precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
def revert_babelfont(document):
" Reverts the use of \\babelfont to user preamble "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
return
+
i = find_token(document.header, '\\language_package', 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\language_package.")
def revert_minionpro(document):
" Revert native MinionPro font definition (with extra options) to LaTeX "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
regexp = re.compile(r'(\\font_roman_opts)')
def revert_font_opts(document):
" revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
- i = find_token(document.header, '\\language_package', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\language_package.")
- return
- Babel = (get_value(document.header, "\\language_package", 0) == "babel")
+ NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
+ Babel = (get_value(document.header, "\\language_package") == "babel")
# 1. Roman
regexp = re.compile(r'(\\font_roman_opts)')
def revert_plainNotoFonts_xopts(document):
" Revert native (straight) Noto font definition (with extra options) to LaTeX "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
osf = False
def revert_notoFonts_xopts(document):
" Revert native (extended) Noto font definition (with extra options) to LaTeX "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
fontmap = dict()
def revert_IBMFonts_xopts(document):
" Revert native IBM font definition (with extra options) to LaTeX "
-
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
fontmap = dict()
def revert_AdobeFonts_xopts(document):
" Revert native Adobe font definition (with extra options) to LaTeX "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
fontmap = dict()
add_preamble_fonts(document, fontmap)
+def convert_osf(document):
+ " Convert \\font_osf param to new format "
+
+ NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
+
+ i = find_token(document.header, '\\font_osf', 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_osf.")
+ return
+
+ osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
+ osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
+
+ osfval = str2bool(get_value(document.header, "\\font_osf", i))
+ document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
+
+ if NonTeXFonts:
+ document.header.insert(i, "\\font_sans_osf false")
+ document.header.insert(i + 1, "\\font_typewriter_osf false")
+ return
+
+ if osfval:
+ x = find_token(document.header, "\\font_sans", 0)
+ if x == -1:
+ document.warning("Malformed LyX document: Missing \\font_sans.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+ sf = sffont[1].strip('"')
+ if sf in osfsf:
+ document.header.insert(i, "\\font_sans_osf true")
+ else:
+ document.header.insert(i, "\\font_sans_osf false")
+
+ x = find_token(document.header, "\\font_typewriter", 0)
+ if x == -1:
+ document.warning("Malformed LyX document: Missing \\font_typewriter.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+ tt = ttfont[1].strip('"')
+ if tt in osftt:
+ document.header.insert(i + 1, "\\font_typewriter_osf true")
+ else:
+ document.header.insert(i + 1, "\\font_typewriter_osf false")
+
+ else:
+ document.header.insert(i, "\\font_sans_osf false")
+ document.header.insert(i + 1, "\\font_typewriter_osf false")
+
+
+def revert_osf(document):
+ " Revert \\font_*_osf params "
+
+ NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
+
+ i = find_token(document.header, '\\font_roman_osf', 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_roman_osf.")
+ return
+
+ osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
+ document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
+
+ i = find_token(document.header, '\\font_sans_osf', 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_sans_osf.")
+ return
+
+ osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
+ del document.header[i]
+
+ i = find_token(document.header, '\\font_typewriter_osf', 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
+ return
+
+ osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
+ del document.header[i]
+
+ if osfval:
+ i = find_token(document.header, '\\font_osf', 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_osf.")
+ return
+ document.header[i] = "\\font_osf true"
+
+
+def revert_texfontopts(document):
+ " Revert native TeX font definitions (with extra options) to LaTeX "
+
+ if get_bool_value(document.header, "\\use_non_tex_fonts"):
+ return
+
+ rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
+
+ # First the sf (biolinum only)
+ regexp = re.compile(r'(\\font_sans_opts)')
+ x = find_re(document.header, regexp, 0)
+ if x != -1:
+ # We need to use this regex since split() does not handle quote protection
+ sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+ opts = sfopts[1].strip('"')
+ i = find_token(document.header, "\\font_sans", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_sans.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ sans = sffont[1].strip('"')
+ if sans == "biolinum":
+ sf_scale = 100.0
+ sffont[1] = '"default"'
+ document.header[i] = " ".join(sffont)
+ osf = False
+ j = find_token(document.header, "\\font_sans_osf true", 0)
+ if j != -1:
+ osf = True
+ k = find_token(document.header, "\\font_sf_scale", 0)
+ if k == -1:
+ document.warning("Malformed LyX document: Missing \\font_sf_scale.")
+ else:
+ sfscale = document.header[k].split()
+ val = sfscale[1]
+ sfscale[1] = "100"
+ document.header[k] = " ".join(sfscale)
+ try:
+ # float() can throw
+ sf_scale = float(val)
+ except:
+ document.warning("Invalid font_sf_scale value: " + val)
+ preamble = "\\usepackage["
+ if osf:
+ document.header[j] = "\\font_sans_osf false"
+ preamble += "osf,"
+ if sf_scale != 100.0:
+ preamble += 'scaled=' + str(sf_scale / 100.0) + ','
+ preamble += opts
+ preamble += "]{biolinum}"
+ add_to_preamble(document, [preamble])
+ del document.header[x]
+
+ regexp = re.compile(r'(\\font_roman_opts)')
+ x = find_re(document.header, regexp, 0)
+ if x == -1:
+ return
+
+ # We need to use this regex since split() does not handle quote protection
+ romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+ opts = romanopts[1].strip('"')
+
+ i = find_token(document.header, "\\font_roman", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_roman.")
+ return
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ roman = romanfont[1].strip('"')
+ if not roman in rmfonts:
+ return
+ romanfont[1] = '"default"'
+ document.header[i] = " ".join(romanfont)
+ package = roman
+ if roman == "utopia":
+ package = "fourier"
+ elif roman == "palatino":
+ package = "mathpazo"
+ elif roman == "times":
+ package = "mathptmx"
+ elif roman == "xcharter":
+ package = "XCharter"
+ osf = ""
+ j = find_token(document.header, "\\font_roman_osf true", 0)
+ if j != -1:
+ if roman == "cochineal":
+ osf = "proportional,osf,"
+ elif roman == "utopia":
+ osf = "oldstyle,"
+ elif roman == "garamondx":
+ osf = "osfI,"
+ elif roman == "libertine":
+ osf = "osf,"
+ elif roman == "palatino":
+ osf = "osf,"
+ elif roman == "xcharter":
+ osf = "osf,"
+ document.header[j] = "\\font_roman_osf false"
+ k = find_token(document.header, "\\font_sc true", 0)
+ if k != -1:
+ if roman == "utopia":
+ osf += "expert,"
+ if roman == "palatino" and osf == "":
+ osf = "sc,"
+ document.header[k] = "\\font_sc false"
+ preamble = "\\usepackage["
+ preamble += osf
+ preamble += opts
+ preamble += "]{" + package + "}"
+ add_to_preamble(document, [preamble])
+ del document.header[x]
+
+
+def convert_CantarellFont(document):
+ " Handle Cantarell font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fm = createFontMapping(['Cantarell'])
+ convert_fonts(document, fm, "oldstyle")
+
+def revert_CantarellFont(document):
+ " Revert native Cantarell font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fontmap = dict()
+ fm = createFontMapping(['Cantarell'])
+ if revert_fonts(document, fm, fontmap, False, True):
+ add_preamble_fonts(document, fontmap)
+
+def convert_ChivoFont(document):
+ " Handle Chivo font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fm = createFontMapping(['Chivo'])
+ convert_fonts(document, fm, "oldstyle")
+
+def revert_ChivoFont(document):
+ " Revert native Chivo font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fontmap = dict()
+ fm = createFontMapping(['Chivo'])
+ if revert_fonts(document, fm, fontmap, False, True):
+ add_preamble_fonts(document, fontmap)
+
+
+def convert_FiraFont(document):
+ " Handle Fira font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fm = createFontMapping(['Fira'])
+ convert_fonts(document, fm, "lf")
+
+def revert_FiraFont(document):
+ " Revert native Fira font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fontmap = dict()
+ fm = createFontMapping(['Fira'])
+ if revert_fonts(document, fm, fontmap, False, True):
+ add_preamble_fonts(document, fontmap)
+
+
+def convert_Semibolds(document):
+ " Move semibold options to extraopts "
+
+ NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
+
+ i = find_token(document.header, "\\font_roman", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_roman.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ roman = romanfont[1].strip('"')
+ if roman == "IBMPlexSerifSemibold":
+ romanfont[1] = '"IBMPlexSerif"'
+ document.header[i] = " ".join(romanfont)
+
+ if NonTeXFonts == False:
+ regexp = re.compile(r'(\\font_roman_opts)')
+ x = find_re(document.header, regexp, 0)
+ if x == -1:
+ # Sensible place to insert tag
+ fo = find_token(document.header, "\\font_sf_scale")
+ if fo == -1:
+ document.warning("Malformed LyX document! Missing \\font_sf_scale")
+ else:
+ document.header.insert(fo, "\\font_roman_opts \"semibold\"")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+ document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
+
+ i = find_token(document.header, "\\font_sans", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_sans.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ sf = sffont[1].strip('"')
+ if sf == "IBMPlexSansSemibold":
+ sffont[1] = '"IBMPlexSans"'
+ document.header[i] = " ".join(sffont)
+
+ if NonTeXFonts == False:
+ regexp = re.compile(r'(\\font_sans_opts)')
+ x = find_re(document.header, regexp, 0)
+ if x == -1:
+ # Sensible place to insert tag
+ fo = find_token(document.header, "\\font_sf_scale")
+ if fo == -1:
+ document.warning("Malformed LyX document! Missing \\font_sf_scale")
+ else:
+ document.header.insert(fo, "\\font_sans_opts \"semibold\"")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+ document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
+
+ i = find_token(document.header, "\\font_typewriter", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_typewriter.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ tt = ttfont[1].strip('"')
+ if tt == "IBMPlexMonoSemibold":
+ ttfont[1] = '"IBMPlexMono"'
+ document.header[i] = " ".join(ttfont)
+
+ if NonTeXFonts == False:
+ regexp = re.compile(r'(\\font_typewriter_opts)')
+ x = find_re(document.header, regexp, 0)
+ if x == -1:
+ # Sensible place to insert tag
+ fo = find_token(document.header, "\\font_tt_scale")
+ if fo == -1:
+ document.warning("Malformed LyX document! Missing \\font_tt_scale")
+ else:
+ document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+ document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
+
+
+def convert_NotoRegulars(document):
+ " Merge diverse noto reagular fonts "
+
+ i = find_token(document.header, "\\font_roman", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_roman.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ roman = romanfont[1].strip('"')
+ if roman == "NotoSerif-TLF":
+ romanfont[1] = '"NotoSerifRegular"'
+ document.header[i] = " ".join(romanfont)
+
+ i = find_token(document.header, "\\font_sans", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_sans.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ sf = sffont[1].strip('"')
+ if sf == "NotoSans-TLF":
+ sffont[1] = '"NotoSansRegular"'
+ document.header[i] = " ".join(sffont)
+
+ i = find_token(document.header, "\\font_typewriter", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_typewriter.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ tt = ttfont[1].strip('"')
+ if tt == "NotoMono-TLF":
+ ttfont[1] = '"NotoMonoRegular"'
+ document.header[i] = " ".join(ttfont)
+
+
+def convert_CrimsonProFont(document):
+ " Handle CrimsonPro font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fm = createFontMapping(['CrimsonPro'])
+ convert_fonts(document, fm, "lf")
+
+def revert_CrimsonProFont(document):
+ " Revert native CrimsonPro font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fontmap = dict()
+ fm = createFontMapping(['CrimsonPro'])
+ if revert_fonts(document, fm, fontmap, False, True):
+ add_preamble_fonts(document, fontmap)
+
+
+def revert_pagesizes(document):
+ " Revert new page sizes in memoir and KOMA to options "
+
+ if document.textclass != "memoir" and document.textclass[:2] != "scr":
+ return
+
+ i = find_token(document.header, "\\use_geometry true", 0)
+ if i != -1:
+ return
+
+ defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
+
+ i = find_token(document.header, "\\papersize", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\papersize header.")
+ return
+ val = get_value(document.header, "\\papersize", i)
+ if val in defsizes:
+ # nothing to do
+ return
+
+ document.header[i] = "\\papersize default"
+
+ i = find_token(document.header, "\\options", 0)
+ if i == -1:
+ i = find_token(document.header, "\\textclass", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\textclass header.")
+ return
+ document.header.insert(i, "\\options " + val)
+ return
+ document.header[i] = document.header[i] + "," + val
+
+
+def convert_pagesizes(document):
+ " Convert to new page sizes in memoir and KOMA to options "
+
+ if document.textclass != "memoir" and document.textclass[:3] != "scr":
+ return
+
+ i = find_token(document.header, "\\use_geometry true", 0)
+ if i != -1:
+ return
+
+ defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
+
+ i = find_token(document.header, "\\papersize", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\papersize header.")
+ return
+ val = get_value(document.header, "\\papersize", i)
+ if val in defsizes:
+ # nothing to do
+ return
+
+ i = find_token(document.header, "\\use_geometry false", 0)
+ if i != -1:
+ # Maintain use of geometry
+ document.header[1] = "\\use_geometry true"
+
+def revert_komafontsizes(document):
+ " Revert new font sizes in KOMA to options "
+
+ if document.textclass[:3] != "scr":
+ return
+
+ i = find_token(document.header, "\\paperfontsize", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\paperfontsize header.")
+ return
+
+ defsizes = ["default", "10", "11", "12"]
+
+ val = get_value(document.header, "\\paperfontsize", i)
+ if val in defsizes:
+ # nothing to do
+ return
+
+ document.header[i] = "\\paperfontsize default"
+
+ fsize = "fontsize=" + val
+
+ i = find_token(document.header, "\\options", 0)
+ if i == -1:
+ i = find_token(document.header, "\\textclass", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\textclass header.")
+ return
+ document.header.insert(i, "\\options " + fsize)
+ return
+ document.header[i] = document.header[i] + "," + fsize
+
+
+def revert_dupqualicites(document):
+ " Revert qualified citation list commands with duplicate keys to ERT "
+
+ # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
+ # we need to revert those with multiple uses of the same key.
+
+ # Get cite engine
+ engine = "basic"
+ i = find_token(document.header, "\\cite_engine", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\cite_engine")
+ else:
+ engine = get_value(document.header, "\\cite_engine", i)
+
+ if not engine in ["biblatex", "biblatex-natbib"]:
+ return
+
+ # Citation insets that support qualified lists, with their LaTeX code
+ ql_citations = {
+ "cite" : "cites",
+ "Cite" : "Cites",
+ "citet" : "textcites",
+ "Citet" : "Textcites",
+ "citep" : "parencites",
+ "Citep" : "Parencites",
+ "Footcite" : "Smartcites",
+ "footcite" : "smartcites",
+ "Autocite" : "Autocites",
+ "autocite" : "autocites",
+ }
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset citation", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of citation inset at line %d!!" %(i))
+ i += 1
+ continue
+
+ k = find_token(document.body, "LatexCommand", i, j)
+ if k == -1:
+ document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
+ i = j + 1
+ continue
+
+ cmd = get_value(document.body, "LatexCommand", k)
+ if not cmd in list(ql_citations.keys()):
+ i = j + 1
+ continue
+
+ pres = find_token(document.body, "pretextlist", i, j)
+ posts = find_token(document.body, "posttextlist", i, j)
+ if pres == -1 and posts == -1:
+ # nothing to do.
+ i = j + 1
+ continue
+
+ key = get_quoted_value(document.body, "key", i, j)
+ if not key:
+ document.warning("Citation inset at line %d does not have a key!" %(i))
+ i = j + 1
+ continue
+
+ keys = key.split(",")
+ ukeys = list(set(keys))
+ if len(keys) == len(ukeys):
+ # no duplicates.
+ i = j + 1
+ continue
+
+ pretexts = get_quoted_value(document.body, "pretextlist", pres)
+ posttexts = get_quoted_value(document.body, "posttextlist", posts)
+
+ pre = get_quoted_value(document.body, "before", i, j)
+ post = get_quoted_value(document.body, "after", i, j)
+ prelist = pretexts.split("\t")
+ premap = dict()
+ for pp in prelist:
+ ppp = pp.split(" ", 1)
+ val = ""
+ if len(ppp) > 1:
+ val = ppp[1]
+ else:
+ val = ""
+ if ppp[0] in premap:
+ premap[ppp[0]] = premap[ppp[0]] + "\t" + val
+ else:
+ premap[ppp[0]] = val
+ postlist = posttexts.split("\t")
+ postmap = dict()
+ num = 1
+ for pp in postlist:
+ ppp = pp.split(" ", 1)
+ val = ""
+ if len(ppp) > 1:
+ val = ppp[1]
+ else:
+ val = ""
+ if ppp[0] in postmap:
+ postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
+ else:
+ postmap[ppp[0]] = val
+ # Replace known new commands with ERT
+ if "(" in pre or ")" in pre:
+ pre = "{" + pre + "}"
+ if "(" in post or ")" in post:
+ post = "{" + post + "}"
+ res = "\\" + ql_citations[cmd]
+ if pre:
+ res += "(" + pre + ")"
+ if post:
+ res += "(" + post + ")"
+ elif pre:
+ res += "()"
+ for kk in keys:
+ if premap.get(kk, "") != "":
+ akeys = premap[kk].split("\t", 1)
+ akey = akeys[0]
+ if akey != "":
+ res += "[" + akey + "]"
+ if len(akeys) > 1:
+ premap[kk] = "\t".join(akeys[1:])
+ else:
+ premap[kk] = ""
+ if postmap.get(kk, "") != "":
+ akeys = postmap[kk].split("\t", 1)
+ akey = akeys[0]
+ if akey != "":
+ res += "[" + akey + "]"
+ if len(akeys) > 1:
+ postmap[kk] = "\t".join(akeys[1:])
+ else:
+ postmap[kk] = ""
+ elif premap.get(kk, "") != "":
+ res += "[]"
+ res += "{" + kk + "}"
+ document.body[i:j+1] = put_cmd_in_ert([res])
+
+
+def convert_pagesizenames(document):
+ " Convert LyX page sizes names "
+
+ i = find_token(document.header, "\\papersize", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\papersize header.")
+ return
+ oldnames = ["letterpaper", "legalpaper", "executivepaper", \
+ "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
+ "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
+ "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
+ val = get_value(document.header, "\\papersize", i)
+ if val in oldnames:
+ newval = val.replace("paper", "")
+ document.header[i] = "\\papersize " + newval
+
+def revert_pagesizenames(document):
+ " Convert LyX page sizes names "
+
+ i = find_token(document.header, "\\papersize", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\papersize header.")
+ return
+ newnames = ["letter", "legal", "executive", \
+ "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
+ "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
+ "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
+ val = get_value(document.header, "\\papersize", i)
+ if val in newnames:
+ newval = val + "paper"
+ document.header[i] = "\\papersize " + newval
+
+
+def revert_theendnotes(document):
+ " Reverts native support of \\theendnotes to TeX-code "
+
+ if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
+ return
+
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of FloatList inset")
+ continue
+
+ document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
+
+
+def revert_enotez(document):
+ " Reverts native support of enotez package to TeX-code "
+
+ if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
+ return
+
+ use = False
+ if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
+ use = True
+
+ revert_flex_inset(document.body, "Endnote", "\\endnote")
+
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of FloatList inset")
+ continue
+
+ use = True
+ document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
+
+ if use:
+ add_to_preamble(document, ["\\usepackage{enotez}"])
+ document.del_module("enotez")
+ document.del_module("foottoenotez")
+
+
+def revert_memoir_endnotes(document):
+ " Reverts native support of memoir endnotes to TeX-code "
+
+ if document.textclass != "memoir":
+ return
+
+ encommand = "\\pagenote"
+ modules = document.get_module_list()
+ if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
+ encommand = "\\endnote"
+
+ revert_flex_inset(document.body, "Endnote", encommand)
+
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of FloatList inset")
+ continue
+
+ if document.body[i] == "\\begin_inset FloatList pagenote*":
+ document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
+ else:
+ document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
+ add_to_preamble(document, ["\\makepagenote"])
+
+
+def revert_totalheight(document):
+ " Reverts graphics height parameter from totalheight to height "
+
+ relative_heights = {
+ "\\textwidth" : "text%",
+ "\\columnwidth" : "col%",
+ "\\paperwidth" : "page%",
+ "\\linewidth" : "line%",
+ "\\textheight" : "theight%",
+ "\\paperheight" : "pheight%",
+ "\\baselineskip " : "baselineskip%"
+ }
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset Graphics", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of graphics inset at line %d!!" %(i))
+ i += 1
+ continue
+
+ rx = re.compile(r'\s*special\s*(\S+)$')
+ rxx = re.compile(r'(\d*\.*\d+)(\S+)$')
+ k = find_re(document.body, rx, i, j)
+ special = ""
+ oldheight = ""
+ if k != -1:
+ m = rx.match(document.body[k])
+ if m:
+ special = m.group(1)
+ mspecial = special.split(',')
+ for spc in mspecial:
+ if spc.startswith("height="):
+ oldheight = spc.split('=')[1]
+ ms = rxx.search(oldheight)
+ if ms:
+ oldunit = ms.group(2)
+ if oldunit in list(relative_heights.keys()):
+ oldval = str(float(ms.group(1)) * 100)
+ oldunit = relative_heights[oldunit]
+ oldheight = oldval + oldunit
+ mspecial.remove(spc)
+ break
+ if len(mspecial) > 0:
+ special = ",".join(mspecial)
+ else:
+ special = ""
+
+ rx = re.compile(r'(\s*height\s*)(\S+)$')
+ kk = find_re(document.body, rx, i, j)
+ if kk != -1:
+ m = rx.match(document.body[kk])
+ val = ""
+ if m:
+ val = m.group(2)
+ if k != -1:
+ if special != "":
+ val = val + "," + special
+ document.body[k] = "\tspecial " + "totalheight=" + val
+ else:
+ document.body.insert(kk, "\tspecial totalheight=" + val)
+ if oldheight != "":
+ document.body[kk] = m.group(1) + oldheight
+ else:
+ del document.body[kk]
+ elif oldheight != "":
+ if special != "":
+ document.body[k] = "\tspecial " + special
+ document.body.insert(k, "\theight " + oldheight)
+ else:
+ document.body[k] = "\theight " + oldheight
+ i = j + 1
+
+
+def convert_totalheight(document):
+ " Converts graphics height parameter from totalheight to height "
+
+ relative_heights = {
+ "text%" : "\\textwidth",
+ "col%" : "\\columnwidth",
+ "page%" : "\\paperwidth",
+ "line%" : "\\linewidth",
+ "theight%" : "\\textheight",
+ "pheight%" : "\\paperheight",
+ "baselineskip%" : "\\baselineskip"
+ }
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset Graphics", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of graphics inset at line %d!!" %(i))
+ i += 1
+ continue
+
+ rx = re.compile(r'\s*special\s*(\S+)$')
+ k = find_re(document.body, rx, i, j)
+ special = ""
+ newheight = ""
+ if k != -1:
+ m = rx.match(document.body[k])
+ if m:
+ special = m.group(1)
+ mspecial = special.split(',')
+ for spc in mspecial:
+ if spc[:12] == "totalheight=":
+ newheight = spc.split('=')[1]
+ mspecial.remove(spc)
+ break
+ if len(mspecial) > 0:
+ special = ",".join(mspecial)
+ else:
+ special = ""
+
+ rx = re.compile(r'(\s*height\s*)(\d+)(\S+)$')
+ kk = find_re(document.body, rx, i, j)
+ if kk != -1:
+ m = rx.match(document.body[kk])
+ val = ""
+ if m:
+ val = m.group(2)
+ unit = m.group(3)
+ if unit in list(relative_heights.keys()):
+ val = str(float(val) / 100)
+ unit = relative_heights[unit]
+ if k != -1:
+ if special != "":
+ val = val + unit + "," + special
+ document.body[k] = "\tspecial " + "height=" + val
+ else:
+ document.body.insert(kk + 1, "\tspecial height=" + val + unit)
+ if newheight != "":
+ document.body[kk] = m.group(1) + newheight
+ else:
+ del document.body[kk]
+ elif newheight != "":
+ document.body.insert(k, "\theight " + newheight)
+ i = j + 1
+
+
+def convert_changebars(document):
+ " Converts the changebars module to native solution "
+
+ if not "changebars" in document.get_module_list():
+ return
+
+ i = find_token(document.header, "\\output_changes", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\output_changes header.")
+ document.del_module("changebars")
+ return
+
+ document.header.insert(i, "\\change_bars true")
+ document.del_module("changebars")
+
+
+def revert_changebars(document):
+ " Converts native changebar param to module "
+
+ i = find_token(document.header, "\\change_bars", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\change_bars header.")
+ return
+
+ val = get_value(document.header, "\\change_bars", i)
+
+ if val == "true":
+ document.add_module("changebars")
+
+ del document.header[i]
+
+
+def convert_postpone_fragile(document):
+ " Adds false \\postpone_fragile_content buffer param "
+
+ i = find_token(document.header, "\\output_changes", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\output_changes header.")
+ return
+ # Set this to false for old documents (see #2154)
+ document.header.insert(i, "\\postpone_fragile_content false")
+
+
+def revert_postpone_fragile(document):
+ " Remove \\postpone_fragile_content buffer param "
+
+ i = find_token(document.header, "\\postpone_fragile_content", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
+ return
+
+ del document.header[i]
+
+
+def revert_colrow_tracking(document):
+ " Remove change tag from tabular columns/rows "
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset Tabular", i+1)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i+1)
+ if j == -1:
+ document.warning("Malformed LyX document: Could not find end of tabular.")
+ continue
+ for k in range(i, j):
+ m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
+ if m:
+ document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
+ m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
+ if m:
+ document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
+
+
+def convert_counter_maintenance(document):
+ " Convert \\maintain_unincluded_children buffer param from boolean value tro tristate "
+
+ i = find_token(document.header, "\\maintain_unincluded_children", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
+ return
+
+ val = get_value(document.header, "\\maintain_unincluded_children", i)
+
+ if val == "true":
+ document.header[i] = "\\maintain_unincluded_children strict"
+ else:
+ document.header[i] = "\\maintain_unincluded_children no"
+
+
+def revert_counter_maintenance(document):
+ " Revert \\maintain_unincluded_children buffer param to previous boolean value "
+
+ i = find_token(document.header, "\\maintain_unincluded_children", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
+ return
+
+ val = get_value(document.header, "\\maintain_unincluded_children", i)
+
+ if val == "no":
+ document.header[i] = "\\maintain_unincluded_children false"
+ else:
+ document.header[i] = "\\maintain_unincluded_children true"
+
+
+def revert_counter_inset(document):
+ " Revert counter inset to ERT, where possible"
+ i = 0
+ needed_counters = {}
+ while True:
+ i = find_token(document.body, "\\begin_inset CommandInset counter", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of counter inset at line %d!" % i)
+ i += 1
+ continue
+ lyx = get_quoted_value(document.body, "lyxonly", i, j)
+ if lyx == "true":
+ # there is nothing we can do to affect the LyX counters
+ document.body[i : j + 1] = []
+ i = j + 1
+ continue
+ cnt = get_quoted_value(document.body, "counter", i, j)
+ if not cnt:
+ document.warning("No counter given for inset at line %d!" % i)
+ i = j + 1
+ continue
+
+ cmd = get_quoted_value(document.body, "LatexCommand", i, j)
+ document.warning(cmd)
+ ert = ""
+ if cmd == "set":
+ val = get_quoted_value(document.body, "value", i, j)
+ if not val:
+ document.warning("Can't convert counter inset at line %d!" % i)
+ else:
+ ert = put_cmd_in_ert("\\setcounter{%s}{%s}" % (cnt, val))
+ elif cmd == "addto":
+ val = get_quoted_value(document.body, "value", i, j)
+ if not val:
+ document.warning("Can't convert counter inset at line %d!" % i)
+ else:
+ ert = put_cmd_in_ert("\\addtocounter{%s}{%s}" % (cnt, val))
+ elif cmd == "reset":
+ ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
+ elif cmd == "save":
+ needed_counters[cnt] = 1
+ savecnt = "LyXSave" + cnt
+ ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (savecnt, cnt))
+ elif cmd == "restore":
+ needed_counters[cnt] = 1
+ savecnt = "LyXSave" + cnt
+ ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (cnt, savecnt))
+ else:
+ document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
+
+ if ert:
+ document.body[i : j + 1] = ert
+ i += 1
+ continue
+
+ pretext = []
+ for cnt in needed_counters:
+ pretext.append("\\newcounter{LyXSave%s}" % (cnt))
+ if pretext:
+ add_to_preamble(document, pretext)
+
+
+def revert_ams_spaces(document):
+ "Revert InsetSpace medspace and thickspace into their TeX-code counterparts"
+ Found = False
+ insets = ["\\medspace{}", "\\thickspace{}"]
+ for inset in insets:
+ i = 0
+ j = 0
+ i = find_token(document.body, "\\begin_inset space " + inset, i)
+ if i == -1:
+ continue
+ end = find_end_of_inset(document.body, i)
+ subst = put_cmd_in_ert(inset)
+ document.body[i : end + 1] = subst
+ Found = True
+
+ if Found == True:
+ # load amsmath in the preamble if not already loaded
+ i = find_token(document.header, "\\use_package amsmath 2", 0)
+ if i == -1:
+ add_to_preamble(document, ["\\@ifundefined{thickspace}{\\usepackage{amsmath}}{}"])
+ return
+
+
+def convert_parskip(document):
+ " Move old parskip settings to preamble "
+
+ i = find_token(document.header, "\\paragraph_separation skip", 0)
+ if i == -1:
+ return
+
+ j = find_token(document.header, "\\defskip", 0)
+ if j == -1:
+ document.warning("Malformed LyX document! Missing \\defskip.")
+ return
+
+ val = get_value(document.header, "\\defskip", j)
+
+ skipval = "\\medskipamount"
+ if val == "smallskip" or val == "medskip" or val == "bigskip":
+ skipval = "\\" + val + "amount"
+ else:
+ skipval = val
+
+ add_to_preamble(document, ["\\setlength{\\parskip}{" + skipval + "}", "\\setlength{\\parindent}{0pt}"])
+
+ document.header[i] = "\\paragraph_separation indent"
+ document.header[j] = "\\paragraph_indentation default"
+
+
+def revert_parskip(document):
+ " Revert new parskip settings to preamble "
+
+ i = find_token(document.header, "\\paragraph_separation skip", 0)
+ if i == -1:
+ return
+
+ j = find_token(document.header, "\\defskip", 0)
+ if j == -1:
+ document.warning("Malformed LyX document! Missing \\defskip.")
+ return
+
+ val = get_value(document.header, "\\defskip", j)
+
+ skipval = ""
+ if val == "smallskip" or val == "medskip" or val == "bigskip":
+ skipval = "[skip=\\" + val + "amount]"
+ elif val == "fullline":
+ skipval = "[skip=\\baselineskip]"
+ elif val != "halfline":
+ skipval = "[skip={" + val + "}]"
+
+ add_to_preamble(document, ["\\usepackage" + skipval + "{parskip}"])
+
+ document.header[i] = "\\paragraph_separation indent"
+ document.header[j] = "\\paragraph_indentation default"
+
+
+def revert_line_vspaces(document):
+ " Revert fulline and halfline vspaces to TeX "
+ insets = {
+ "fullline*" : "\\vspace*{\\baselineskip}",
+ "fullline" : "\\vspace{\\baselineskip}",
+ "halfline*" : "\\vspace*{0.5\\baselineskip}",
+ "halfline" : "\\vspace{0.5\\baselineskip}",
+ }
+ for inset in insets.keys():
+ i = 0
+ j = 0
+ i = find_token(document.body, "\\begin_inset VSpace " + inset, i)
+ if i == -1:
+ continue
+ end = find_end_of_inset(document.body, i)
+ subst = put_cmd_in_ert(insets[inset])
+ document.body[i : end + 1] = subst
+
+def convert_libertinus_rm_fonts(document):
+ """Handle Libertinus serif fonts definition to LaTeX"""
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fm = createFontMapping(['Libertinus'])
+ convert_fonts(document, fm)
+
+def revert_libertinus_rm_fonts(document):
+ """Revert Libertinus serif font definition to LaTeX"""
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fontmap = dict()
+ fm = createFontMapping(['libertinus'])
+ if revert_fonts(document, fm, fontmap):
+ add_preamble_fonts(document, fontmap)
+
+def revert_libertinus_sftt_fonts(document):
+ " Revert Libertinus sans and tt font definitions to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ preamble = ""
+ # first sf font
+ i = find_token(document.header, "\\font_sans \"LibertinusSans-LF\"", 0)
+ if i != -1:
+ j = find_token(document.header, "\\font_sans_osf true", 0)
+ if j != -1:
+ add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-OsF}"])
+ document.header[j] = "\\font_sans_osf false"
+ else:
+ add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-LF}"])
+ document.header[i] = document.header[i].replace("LibertinusSans-LF", "default")
+ sf_scale = 100.0
+ sfval = find_token(document.header, "\\font_sf_scale", 0)
+ if sfval == -1:
+ document.warning("Malformed LyX document: Missing \\font_sf_scale.")
+ else:
+ sfscale = document.header[sfval].split()
+ val = sfscale[1]
+ sfscale[1] = "100"
+ document.header[sfval] = " ".join(sfscale)
+ try:
+ # float() can throw
+ sf_scale = float(val)
+ except:
+ document.warning("Invalid font_sf_scale value: " + val)
+ if sf_scale != "100.0":
+ add_to_preamble(document, ["\\renewcommand*{\\LibertinusSans@scale}{" + str(sf_scale / 100.0) + "}"])
+ # now tt font
+ i = find_token(document.header, "\\font_typewriter \"LibertinusMono-TLF\"", 0)
+ if i != -1:
+ add_to_preamble(document, ["\\renewcommand{\\ttdefault}{LibertinusMono-TLF}"])
+ document.header[i] = document.header[i].replace("LibertinusMono-TLF", "default")
+ tt_scale = 100.0
+ ttval = find_token(document.header, "\\font_tt_scale", 0)
+ if ttval == -1:
+ document.warning("Malformed LyX document: Missing \\font_tt_scale.")
+ else:
+ ttscale = document.header[ttval].split()
+ val = ttscale[1]
+ ttscale[1] = "100"
+ document.header[ttval] = " ".join(ttscale)
+ try:
+ # float() can throw
+ tt_scale = float(val)
+ except:
+ document.warning("Invalid font_tt_scale value: " + val)
+ if tt_scale != "100.0":
+ add_to_preamble(document, ["\\renewcommand*{\\LibertinusMono@scale}{" + str(tt_scale / 100.0) + "}"])
+
+
+def revert_docbook_table_output(document):
+ i = find_token(document.header, '\\docbook_table_output')
+ if i != -1:
+ del document.header[i]
+
+
+def revert_nopagebreak(document):
+ while True:
+ i = find_token(document.body, "\\begin_inset Newpage nopagebreak")
+ if i == -1:
+ return
+ end = find_end_of_inset(document.body, i)
+ if end == 1:
+ document.warning("Malformed LyX document: Could not find end of Newpage inset.")
+ continue
+ subst = put_cmd_in_ert("\\nopagebreak{}")
+ document.body[i : end + 1] = subst
+
+
+def revert_hrquotes(document):
+ " Revert Hungarian Quotation marks "
+
+ i = find_token(document.header, "\\quotes_style hungarian", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style polish"
+
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset Quotes h")
+ if i == -1:
+ return
+ if document.body[i] == "\\begin_inset Quotes hld":
+ document.body[i] = "\\begin_inset Quotes pld"
+ elif document.body[i] == "\\begin_inset Quotes hrd":
+ document.body[i] = "\\begin_inset Quotes prd"
+ elif document.body[i] == "\\begin_inset Quotes hls":
+ document.body[i] = "\\begin_inset Quotes ald"
+ elif document.body[i] == "\\begin_inset Quotes hrs":
+ document.body[i] = "\\begin_inset Quotes ard"
+
+
+def convert_math_refs(document):
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset Formula", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of inset at line %d of body!" % i)
+ i += 1
+ continue
+ while i < j:
+ document.body[i] = document.body[i].replace("\\prettyref", "\\formatted")
+ i += 1
+
+
+def revert_math_refs(document):
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset Formula", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of inset at line %d of body!" % i)
+ i += 1
+ continue
+ while i < j:
+ document.body[i] = document.body[i].replace("\\formatted", "\\prettyref")
+ if "\\labelonly" in document.body[i]:
+ document.body[i] = re.sub("\\\\labelonly{([^}]+?)}", "\\1", document.body[i])
+ i += 1
+
+
+def convert_branch_colors(document):
+ " Convert branch colors to semantic values "
+
+ i = 0
+ while True:
+ i = find_token(document.header, "\\branch", i)
+ if i == -1:
+ break
+ j = find_token(document.header, "\\end_branch", i)
+ if j == -1:
+ document.warning("Malformed LyX document. Can't find end of branch definition!")
+ break
+ # We only support the standard LyX background for now
+ k = find_token(document.header, "\\color #faf0e6", i, j)
+ if k != -1:
+ document.header[k] = "\\color background"
+ i += 1
+
+
+def revert_branch_colors(document):
+ " Revert semantic branch colors "
+
+ i = 0
+ while True:
+ i = find_token(document.header, "\\branch", i)
+ if i == -1:
+ break
+ j = find_token(document.header, "\\end_branch", i)
+ if j == -1:
+ document.warning("Malformed LyX document. Can't find end of branch definition!")
+ break
+ k = find_token(document.header, "\\color", i, j)
+ if k != -1:
+ bcolor = get_value(document.header, "\\color", k)
+ if bcolor[1] != "#":
+ # this will be read as background by LyX 2.3
+ document.header[k] = "\\color none"
+ i += 1
+
+
+def revert_darkmode_graphics(document):
+ " Revert darkModeSensitive InsetGraphics param "
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset Graphics", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of graphics inset at line %d!!" %(i))
+ i += 1
+ continue
+ k = find_token(document.body, "\tdarkModeSensitive", i, j)
+ if k != -1:
+ del document.body[k]
+ i += 1
+
+
+def revert_branch_darkcols(document):
+ " Revert dark branch colors "
+
+ i = 0
+ while True:
+ i = find_token(document.header, "\\branch", i)
+ if i == -1:
+ break
+ j = find_token(document.header, "\\end_branch", i)
+ if j == -1:
+ document.warning("Malformed LyX document. Can't find end of branch definition!")
+ break
+ k = find_token(document.header, "\\color", i, j)
+ if k != -1:
+ m = re.search('\\\\color (\S+) (\S+)', document.header[k])
+ if m:
+ document.header[k] = "\\color " + m.group(1)
+ i += 1
+
+
+def revert_vcolumns2(document):
+ """Revert varwidth columns with line breaks etc."""
+ i = 0
+ needvarwidth = False
+ needarray = False
+ needcellvarwidth = False
+ try:
+ while True:
+ i = find_token(document.body, "\\begin_inset Tabular", i+1)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Could not find end of tabular.")
+ continue
+
+ # Collect necessary column information
+ m = i + 1
+ nrows = int(document.body[i+1].split('"')[3])
+ ncols = int(document.body[i+1].split('"')[5])
+ col_info = []
+ for k in range(ncols):
+ m = find_token(document.body, "<column", m)
+ width = get_option_value(document.body[m], 'width')
+ varwidth = get_option_value(document.body[m], 'varwidth')
+ alignment = get_option_value(document.body[m], 'alignment')
+ valignment = get_option_value(document.body[m], 'valignment')
+ special = get_option_value(document.body[m], 'special')
+ col_info.append([width, varwidth, alignment, valignment, special, m])
+ m += 1
+
+ # Now parse cells
+ m = i + 1
+ lines = []
+ for row in range(nrows):
+ for col in range(ncols):
+ m = find_token(document.body, "<cell", m)
+ multicolumn = get_option_value(document.body[m], 'multicolumn') != ""
+ multirow = get_option_value(document.body[m], 'multirow') != ""
+ fixedwidth = get_option_value(document.body[m], 'width') != ""
+ rotate = get_option_value(document.body[m], 'rotate')
+ cellalign = get_option_value(document.body[m], 'alignment')
+ cellvalign = get_option_value(document.body[m], 'valignment')
+ # Check for: linebreaks, multipars, non-standard environments
+ begcell = m
+ endcell = find_token(document.body, "</cell>", begcell)
+ vcand = False
+ if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
+ vcand = not fixedwidth
+ elif count_pars_in_inset(document.body, begcell + 2) > 1:
+ vcand = not fixedwidth
+ elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
+ vcand = not fixedwidth
+ colalignment = col_info[col][2]
+ colvalignment = col_info[col][3]
+ if vcand:
+ if rotate == "" and ((colalignment == "left" and colvalignment == "top") or (multicolumn == True and cellalign == "left" and cellvalign == "top")):
+ if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][4] == "":
+ needvarwidth = True
+ col_line = col_info[col][5]
+ needarray = True
+ vval = "V{\\linewidth}"
+ if multicolumn:
+ document.body[m] = document.body[m][:-1] + " special=\"" + vval + "\">"
+ else:
+ document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
+ else:
+ alarg = ""
+ if multicolumn or multirow:
+ if cellvalign == "middle":
+ alarg = "[m]"
+ elif cellvalign == "bottom":
+ alarg = "[b]"
+ else:
+ if colvalignment == "middle":
+ alarg = "[m]"
+ elif colvalignment == "bottom":
+ alarg = "[b]"
+ flt = find_token(document.body, "\\begin_layout", begcell, endcell)
+ elt = find_token_backwards(document.body, "\\end_layout", endcell)
+ if flt != -1 and elt != -1:
+ extralines = []
+ # we need to reset character layouts if necessary
+ el = find_token(document.body, '\\emph on', flt, elt)
+ if el != -1:
+ extralines.append("\\emph default")
+ el = find_token(document.body, '\\noun on', flt, elt)
+ if el != -1:
+ extralines.append("\\noun default")
+ el = find_token(document.body, '\\series', flt, elt)
+ if el != -1:
+ extralines.append("\\series default")
+ el = find_token(document.body, '\\family', flt, elt)
+ if el != -1:
+ extralines.append("\\family default")
+ el = find_token(document.body, '\\shape', flt, elt)
+ if el != -1:
+ extralines.append("\\shape default")
+ el = find_token(document.body, '\\color', flt, elt)
+ if el != -1:
+ extralines.append("\\color inherit")
+ el = find_token(document.body, '\\size', flt, elt)
+ if el != -1:
+ extralines.append("\\size default")
+ el = find_token(document.body, '\\bar under', flt, elt)
+ if el != -1:
+ extralines.append("\\bar default")
+ el = find_token(document.body, '\\uuline on', flt, elt)
+ if el != -1:
+ extralines.append("\\uuline default")
+ el = find_token(document.body, '\\uwave on', flt, elt)
+ if el != -1:
+ extralines.append("\\uwave default")
+ el = find_token(document.body, '\\strikeout on', flt, elt)
+ if el != -1:
+ extralines.append("\\strikeout default")
+ document.body[elt:elt+1] = extralines + put_cmd_in_ert("\\end{cellvarwidth}") + ["\end_layout"]
+ parlang = -1
+ for q in range(flt, elt):
+ if document.body[q] != "" and document.body[q][0] != "\\":
+ break
+ if document.body[q][:5] == "\\lang":
+ parlang = q
+ break
+ if parlang != -1:
+ document.body[parlang+1:parlang+1] = put_cmd_in_ert("\\begin{cellvarwidth}" + alarg)
+ else:
+ document.body[flt+1:flt+1] = put_cmd_in_ert("\\begin{cellvarwidth}" + alarg)
+ needcellvarwidth = True
+ needvarwidth = True
+ # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
+ # with newlines, and we do not want that)
+ while True:
+ endcell = find_token(document.body, "</cell>", begcell)
+ linebreak = False
+ nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
+ if nl == -1:
+ nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
+ if nl == -1:
+ break
+ linebreak = True
+ nle = find_end_of_inset(document.body, nl)
+ del(document.body[nle:nle+1])
+ if linebreak:
+ document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
+ else:
+ document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
+ # Replace parbreaks in multirow with \\endgraf
+ if multirow == True:
+ flt = find_token(document.body, "\\begin_layout", begcell, endcell)
+ if flt != -1:
+ while True:
+ elt = find_end_of_layout(document.body, flt)
+ if elt == -1:
+ document.warning("Malformed LyX document! Missing layout end.")
+ break
+ endcell = find_token(document.body, "</cell>", begcell)
+ flt = find_token(document.body, "\\begin_layout", elt, endcell)
+ if flt == -1:
+ break
+ document.body[elt : flt + 1] = put_cmd_in_ert("\\endgraf{}")
+ m += 1
+
+ i = j
+
+ finally:
+ if needarray == True:
+ add_to_preamble(document, ["\\usepackage{array}"])
+ if needcellvarwidth == True:
+ add_to_preamble(document, ["%% Variable width box for table cells",
+ "\\newenvironment{cellvarwidth}[1][t]",
+ " {\\begin{varwidth}[#1]{\\linewidth}}",
+ " {\\@finalstrut\\@arstrutbox\\end{varwidth}}"])
+ if needvarwidth == True:
+ add_to_preamble(document, ["\\usepackage{varwidth}"])
+
+
+def convert_vcolumns2(document):
+ """Convert varwidth ERT to native"""
+ i = 0
+ try:
+ while True:
+ i = find_token(document.body, "\\begin_inset Tabular", i+1)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Could not find end of tabular.")
+ continue
+
+ # Parse cells
+ nrows = int(document.body[i+1].split('"')[3])
+ ncols = int(document.body[i+1].split('"')[5])
+ m = i + 1
+ lines = []
+ for row in range(nrows):
+ for col in range(ncols):
+ m = find_token(document.body, "<cell", m)
+ multirow = get_option_value(document.body[m], 'multirow') != ""
+ begcell = m
+ endcell = find_token(document.body, "</cell>", begcell)
+ vcand = False
+ cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
+ if cvw != -1:
+ vcand = document.body[cvw - 1] == "\\backslash" and get_containing_inset(document.body, cvw)[0] == "ERT"
+ if vcand:
+ # Remove ERTs with cellvarwidth env
+ ecvw = find_token(document.body, "end{cellvarwidth}", begcell, endcell)
+ if ecvw != -1:
+ if document.body[ecvw - 1] == "\\backslash":
+ eertins = get_containing_inset(document.body, ecvw)
+ if eertins and eertins[0] == "ERT":
+ del document.body[eertins[1] : eertins[2] + 1]
+
+ cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
+ ertins = get_containing_inset(document.body, cvw)
+ if ertins and ertins[0] == "ERT":
+ del(document.body[ertins[1] : ertins[2] + 1])
+
+ # Convert ERT newlines (as cellvarwidth detection relies on that)
+ while True:
+ endcell = find_token(document.body, "</cell>", begcell)
+ nl = find_token(document.body, "\\backslash", begcell, endcell)
+ if nl == -1 or document.body[nl + 2] != "\\backslash":
+ break
+ ertins = get_containing_inset(document.body, nl)
+ if ertins and ertins[0] == "ERT":
+ document.body[ertins[1] : ertins[2] + 1] = ["\\begin_inset Newline newline", "", "\\end_inset"]
+
+ # Same for linebreaks
+ while True:
+ endcell = find_token(document.body, "</cell>", begcell)
+ nl = find_token(document.body, "linebreak", begcell, endcell)
+ if nl == -1 or document.body[nl - 1] != "\\backslash":
+ break
+ ertins = get_containing_inset(document.body, nl)
+ if ertins and ertins[0] == "ERT":
+ document.body[ertins[1] : ertins[2] + 1] = ["\\begin_inset Newline linebreak", "", "\\end_inset"]
+
+ # And \\endgraf
+ if multirow == True:
+ endcell = find_token(document.body, "</cell>", begcell)
+ nl = find_token(document.body, "endgraf{}", begcell, endcell)
+ if nl == -1 or document.body[nl - 1] != "\\backslash":
+ break
+ ertins = get_containing_inset(document.body, nl)
+ if ertins and ertins[0] == "ERT":
+ document.body[ertins[1] : ertins[2] + 1] = ["\\end_layout", "", "\\begin_layout Plain Layout"]
+ m += 1
+
+ i += 1
+
+ finally:
+ del_complete_lines(document.preamble,
+ ['% Added by lyx2lyx',
+ '%% Variable width box for table cells',
+ r'\newenvironment{cellvarwidth}[1][t]',
+ r' {\begin{varwidth}[#1]{\linewidth}}',
+ r' {\@finalstrut\@arstrutbox\end{varwidth}}'])
+ del_complete_lines(document.preamble,
+ ['% Added by lyx2lyx',
+ r'\usepackage{varwidth}'])
+
+
+frontispiece_def = [
+ r'### Inserted by lyx2lyx (frontispiece layout) ###',
+ r'Style Frontispiece',
+ r' CopyStyle Titlehead',
+ r' LatexName frontispiece',
+ r'End',
+]
+
+
+def convert_koma_frontispiece(document):
+ """Remove local KOMA frontispiece definition"""
+ if document.textclass[:3] != "scr":
+ return
+
+ if document.del_local_layout(frontispiece_def):
+ document.add_module("ruby")
+
+
+def revert_koma_frontispiece(document):
+ """Add local KOMA frontispiece definition"""
+ if document.textclass[:3] != "scr":
+ return
+
+ if find_token(document.body, "\\begin_layout Frontispiece", 0) != -1:
+ document.append_local_layout(frontispiece_def)
+
+
+def revert_spellchecker_ignore(document):
+ """Revert document spellchecker dictionary"""
+ while True:
+ i = find_token(document.header, "\\spellchecker_ignore")
+ if i == -1:
+ return
+ del document.header[i]
+
+
+def revert_docbook_mathml_prefix(document):
+ """Revert the DocBook parameter to choose the prefix for the MathML name space"""
+ while True:
+ i = find_token(document.header, "\\docbook_mathml_prefix")
+ if i == -1:
+ return
+ del document.header[i]
+
+def revert_document_metadata(document):
+ """Revert document metadata"""
+ i = 0
+ while True:
+ i = find_token(document.header, "\\begin_metadata", i)
+ if i == -1:
+ return
+ j = find_end_of(document.header, i, "\\begin_metadata", "\\end_metadata")
+ if j == -1:
+ # this should not happen
+ break
+ document.header[i : j + 1] = []
+
##
# Conversion hub
#
[558, [removeFrontMatterStyles]],
[559, []],
[560, []],
- [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
+ [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
[562, []],
[563, []],
[564, []],
- [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
+ [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
[566, [convert_hebrew_parentheses]],
[567, []],
[568, []],
[572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
[573, [convert_inputencoding_namechange]],
[574, [convert_ruby_module, convert_utf8_japanese]],
- [575, [convert_lineno]],
+ [575, [convert_lineno, convert_aaencoding]],
[576, []],
[577, [convert_linggloss]],
[578, []],
[579, []],
- [580, []]
+ [580, []],
+ [581, [convert_osf]],
+ [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
+ [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
+ [584, []],
+ [585, [convert_pagesizes]],
+ [586, []],
+ [587, [convert_pagesizenames]],
+ [588, []],
+ [589, [convert_totalheight]],
+ [590, [convert_changebars]],
+ [591, [convert_postpone_fragile]],
+ [592, []],
+ [593, [convert_counter_maintenance]],
+ [594, []],
+ [595, []],
+ [596, [convert_parskip]],
+ [597, [convert_libertinus_rm_fonts]],
+ [598, []],
+ [599, []],
+ [600, []],
+ [601, [convert_math_refs]],
+ [602, [convert_branch_colors]],
+ [603, []],
+ [604, []],
+ [605, [convert_vcolumns2]],
+ [606, [convert_koma_frontispiece]],
+ [607, []],
+ [608, []],
+ [609, []]
]
-revert = [[579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
+revert = [[608, [revert_document_metadata]],
+ [607, [revert_docbook_mathml_prefix]],
+ [606, [revert_spellchecker_ignore]],
+ [605, [revert_koma_frontispiece]],
+ [604, [revert_vcolumns2]],
+ [603, [revert_branch_darkcols]],
+ [602, [revert_darkmode_graphics]],
+ [601, [revert_branch_colors]],
+ [600, []],
+ [599, [revert_math_refs]],
+ [598, [revert_hrquotes]],
+ [598, [revert_nopagebreak]],
+ [597, [revert_docbook_table_output]],
+ [596, [revert_libertinus_rm_fonts,revert_libertinus_sftt_fonts]],
+ [595, [revert_parskip,revert_line_vspaces]],
+ [594, [revert_ams_spaces]],
+ [593, [revert_counter_inset]],
+ [592, [revert_counter_maintenance]],
+ [591, [revert_colrow_tracking]],
+ [590, [revert_postpone_fragile]],
+ [589, [revert_changebars]],
+ [588, [revert_totalheight]],
+ [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
+ [586, [revert_pagesizenames]],
+ [585, [revert_dupqualicites]],
+ [584, [revert_pagesizes,revert_komafontsizes]],
+ [583, [revert_vcsinfo_rev_abbrev]],
+ [582, [revert_ChivoFont,revert_CrimsonProFont]],
+ [581, [revert_CantarellFont,revert_FiraFont]],
+ [580, [revert_texfontopts,revert_osf]],
+ [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
[578, [revert_babelfont]],
[577, [revert_drs]],
[576, [revert_linggloss, revert_subexarg]],
[575, [revert_new_languages]],
- [574, [revert_lineno]],
+ [574, [revert_lineno, revert_aaencoding]],
[573, [revert_ruby_module, revert_utf8_japanese]],
[572, [revert_inputencoding_namechange]],
[571, [revert_notoFonts]],
[563, [revert_lformatinfo]],
[562, [revert_listpargs]],
[561, [revert_l7ninfo]],
- [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
+ [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
[559, [revert_timeinfo, revert_namenoextinfo]],
[558, [revert_dateinfo]],
[557, [addFrontMatterStyles]],
[551, [revert_floatpclass, revert_floatalignment]],
[550, [revert_nospellcheck]],
[549, [revert_fontenc]],
- [548, []],# dummy format change
+ [548, []], # dummy format change
[547, [revert_lscape]],
[546, [revert_xcharter]],
[545, [revert_paratype]],