self.package = None
self.options = []
self.pkgkey = None # key into pkg2fontmap
- self.osfopt = None # None, string
+ self.osfopt = None # None, string
+ self.osfdef = "false" # "false" or "true"
def addkey(self):
self.pkgkey = createkey(self.package, self.options)
self.pkg2fontmap = dict()
self.pkginmap = dict() # defines, if a map for package exists
- def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None):
+ def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
" Expand fontinfo mapping"
#
# fontlist: list of fontnames, each element
# scaleopt: one of None, 'scale', 'scaled', or some other string
# to be used in scale option (e.g. scaled=0.7)
# osfopt: None or some other string to be used in osf option
+ # osfdef: "true" if osf is default
for fl in font_list:
fe = fontinfo()
fe.fonttype = font_type
fe.options = flt[1:]
fe.scaleopt = scaleopt
fe.osfopt = osfopt
+ fe.osfdef = osfdef
if pkg == None:
fe.package = font_name
else:
'NotoSansExtralight,extralight'],
"sans", "sf", "noto-sans", "scaled")
fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
+ elif font == 'Cantarell':
+ fm.expandFontMapping(['cantarell,defaultsans'],
+ "sans", "sf", "cantarell", "scaled", "oldstyle")
+ elif font == 'Chivo':
+ fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
+ 'Chivo,regular', 'ChivoMedium,medium'],
+ "sans", "sf", "Chivo", "scale", "oldstyle")
+ elif font == 'CrimsonPro':
+ fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
+ 'CrimsonProMedium,medium'],
+ "roman", None, "CrimsonPro", None, "lf", "true")
+ elif font == 'Fira':
+ fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
+ 'FiraSansThin,thin', 'FiraSansLight,light',
+ 'FiraSansExtralight,extralight',
+ 'FiraSansUltralight,ultralight'],
+ "sans", "sf", "FiraSans", "scaled", "lf", "true")
+ fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
return fm
-def convert_fonts(document, fm):
+def convert_fonts(document, fm, osfoption = "osf"):
" Handle font definition (LaTeX preamble -> native) "
rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
rscaleopt = re.compile(r'^scaled?=(.*)')
+ # Check whether we go beyond font option feature introduction
+ haveFontOpts = document.end_format > 580
+
i = 0
- while i < len(document.preamble):
+ while True:
i = find_re(document.preamble, rpkg, i+1)
if i == -1:
return
pkg = mo.group(3)
o = 0
oscale = 1
- osfoption = "osf"
has_osf = False
while o < len(options):
if options[o] == osfoption:
if not pkg in fm.pkginmap:
continue
# determine fontname
- fn = fm.getfontname(pkg, options)
+ fn = None
+ if haveFontOpts:
+ # Try with name-option combination first
+ # (only one default option supported currently)
+ o = 0
+ while o < len(options):
+ opt = options[o]
+ fn = fm.getfontname(pkg, [opt])
+ if fn != None:
+ del options[o]
+ break
+ o += 1
+ continue
+ if fn == None:
+ fn = fm.getfontname(pkg, [])
+ else:
+ fn = fm.getfontname(pkg, options)
if fn == None:
continue
del document.preamble[i]
else:
fontscale = "\\font_" + fontinfo.scaletype + "_scale"
fontinfo.scaleval = oscale
- if has_osf:
- if fontinfo.osfopt == None:
- options.extend("osf")
- continue
- osf = find_token(document.header, "\\font_osf false")
- if osf != -1:
- document.header[osf] = "\\font_osf true"
+ if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
+ if fontinfo.osfopt == None:
+ options.extend(osfoption)
+ continue
+ osf = find_token(document.header, "\\font_osf false")
+ osftag = "\\font_osf"
+ if osf == -1 and fontinfo.fonttype != "math":
+ # Try with newer format
+ osftag = "\\font_" + fontinfo.fonttype + "_osf"
+ osf = find_token(document.header, osftag + " false")
+ if osf != -1:
+ document.header[osf] = osftag + " true"
if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
del document.preamble[i-1]
i -= 1
words = val.split() # ! splits also values like '"DejaVu Sans"'
words[0] = '"' + fn + '"'
document.header[j] = ft + ' ' + ' '.join(words)
+ if haveFontOpts and fontinfo.fonttype != "math":
+ fotag = "\\font_" + fontinfo.fonttype + "_opts"
+ fo = find_token(document.header, fotag)
+ if fo != -1:
+ document.header[fo] = fotag + " \"" + ",".join(options) + "\""
+ else:
+ # Sensible place to insert tag
+ fo = find_token(document.header, "\\font_sf_scale")
+ if fo == -1:
+ document.warning("Malformed LyX document! Missing \\font_sf_scale")
+ else:
+ document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
-def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False):
+
+
+def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
" Revert native font definition to LaTeX "
# fonlist := list of fonts created from the same package
# Empty package means that the font-name is the same as the package-name
if not val in fontmap:
fontmap[val] = []
x = -1
- if OnlyWithXOpts:
+ if OnlyWithXOpts or WithXOpts:
if ft == "\\font_math":
return False
regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
elif ft == "\\font_typewriter":
regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
x = find_re(document.header, regexp, 0)
- if x == -1:
+ if x == -1 and OnlyWithXOpts:
return False
- # We need to use this regex since split() does not handle quote protection
- xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
- opts = xopts[1].strip('"').split(",")
- fontmap[val].extend(opts)
- del document.header[x]
+ if x != -1:
+ # We need to use this regex since split() does not handle quote protection
+ xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+ opts = xopts[1].strip('"').split(",")
+ fontmap[val].extend(opts)
+ del document.header[x]
words[0] = '"default"'
document.header[i] = ft + ' ' + ' '.join(words)
if fontinfo.scaleopt != None:
# set correct scale option
fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
if fontinfo.osfopt != None:
- osf = find_token(document.header, "\\font_osf true")
+ oldval = "true"
+ if fontinfo.osfdef == "true":
+ oldval = "false"
+ osf = find_token(document.header, "\\font_osf " + oldval)
+ if osf == -1 and ft != "\\font_math":
+ # Try with newer format
+ osftag = "\\font_roman_osf " + oldval
+ if ft == "\\font_sans":
+ osftag = "\\font_sans_osf " + oldval
+ elif ft == "\\font_typewriter":
+ osftag = "\\font_typewriter_osf " + oldval
+ osf = find_token(document.header, osftag)
if osf != -1:
fontmap[val].extend([fontinfo.osfopt])
if len(fontinfo.options) > 0:
def convert_notoFonts(document):
" Handle Noto fonts definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(['Noto'])
convert_fonts(document, fm)
def revert_notoFonts(document):
" Revert native Noto font definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(['Noto'])
if revert_fonts(document, fm, fontmap):
def convert_latexFonts(document):
" Handle DejaVu and IBMPlex fonts definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(['DejaVu', 'IBM'])
convert_fonts(document, fm)
def revert_latexFonts(document):
" Revert native DejaVu font definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(['DejaVu', 'IBM'])
if revert_fonts(document, fm, fontmap):
def convert_AdobeFonts(document):
" Handle Adobe Source fonts definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fm = createFontMapping(['Adobe'])
convert_fonts(document, fm)
def revert_AdobeFonts(document):
" Revert Adobe Source font definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
fontmap = dict()
fm = createFontMapping(['Adobe'])
if revert_fonts(document, fm, fontmap):
def revert_paratype(document):
" Revert ParaType font definitions to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
preamble = ""
i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
i2 = find_token(document.header, "\\font_sans \"default\"", 0)
while True:
i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
if i == -1:
- return
+ break
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Landscape inset")
document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
add_to_preamble(document, ["\\usepackage{pdflscape}"])
+ document.del_module("landscape")
def convert_fontenc(document):
i = j
+
def revert_stretchcolumn(document):
" We remove the column varwidth flags or everything else will become a mess. "
i = 0
document.body[tp] = "type \"buffer\""
document.body[arg] = "arg \"vcs-" + argv + "\""
+def revert_vcsinfo_rev_abbrev(document):
+ " Convert abbreviated revisions to regular revisions. "
+
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset Info", i+1)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i+1)
+ if j == -1:
+ document.warning("Malformed LyX document: Could not find end of Info inset.")
+ continue
+ tp = find_token(document.body, 'type', i, j)
+ tpv = get_quoted_value(document.body, "type", tp)
+ if tpv != "vcs":
+ continue
+ arg = find_token(document.body, 'arg', i, j)
+ argv = get_quoted_value(document.body, "arg", arg)
+ if( argv == "revision-abbrev" ):
+ document.body[arg] = "arg \"revision\""
def revert_dateinfo(document):
" Revert date info insets to static text. "
"\\lineno_options %s" % options]
+def convert_aaencoding(document):
+ " Convert default document option due to encoding change in aa class. "
+
+ if document.textclass != "aa":
+ return
+
+ i = find_token(document.header, "\\use_default_options true")
+ if i == -1:
+ return
+ val = get_value(document.header, "\\inputencoding")
+ if not val:
+ document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
+ return
+ if val == "auto-legacy" or val == "latin9":
+ document.header[i] = "\\use_default_options false"
+ k = find_token(document.header, "\\options")
+ if k == -1:
+ document.header.insert(i, "\\options latin9")
+ else:
+ document.header[k] += ",latin9"
+
+
+def revert_aaencoding(document):
+ " Revert default document option due to encoding change in aa class. "
+
+ if document.textclass != "aa":
+ return
+
+ i = find_token(document.header, "\\use_default_options true")
+ if i == -1:
+ return
+ val = get_value(document.header, "\\inputencoding")
+ if not val:
+ document.warning("Malformed LyX Document! Missing \\inputencoding header.")
+ return
+ if val == "utf8":
+ document.header[i] = "\\use_default_options false"
+ k = find_token(document.header, "\\options", 0)
+ if k == -1:
+ document.header.insert(i, "\\options utf8")
+ else:
+ document.header[k] = document.header[k] + ",utf8"
+
+
def revert_new_languages(document):
"""Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
and Russian (Petrine orthography)."""
"oldrussian": ("", "russian"),
"korean": ("", "korean"),
}
- used_languages = set()
if document.language in new_languages:
- used_languages.add(document.language)
+ used_languages = set((document.language, ))
+ else:
+ used_languages = set()
i = 0
while True:
i = find_token(document.body, "\\lang", i+1)
if i == -1:
break
- if document.body[i][6:].strip() in new_languages:
- used_languages.add(document.language)
+ val = get_value(document.body, "\\lang", i)
+ if val in new_languages:
+ used_languages.add(val)
# Korean is already supported via CJK, so leave as-is for Babel
if ("korean" in used_languages
- and get_bool_value(document.header, "\\use_non_tex_fonts")
- and get_value(document.header, "\\language_package") in ("default", "auto")):
- revert_language(document, "korean", "", "korean")
- used_languages.discard("korean")
+ and (not get_bool_value(document.header, "\\use_non_tex_fonts")
+ or get_value(document.header, "\\language_package") == "babel")):
+ used_languages.discard("korean")
for lang in used_languages:
- revert(lang, *new_languages[lang])
+ revert_language(document, lang, *new_languages[lang])
gloss_inset_def = [
beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
endInset = find_end_of_inset(document.body, i)
- endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
+ endPlain = find_end_of_layout(document.body, beginPlain)
precontent = put_cmd_in_ert(cmd)
if len(optargcontent) > 0:
precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
def revert_babelfont(document):
" Reverts the use of \\babelfont to user preamble "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
return
+
i = find_token(document.header, '\\language_package', 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\language_package.")
def revert_minionpro(document):
" Revert native MinionPro font definition (with extra options) to LaTeX "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
regexp = re.compile(r'(\\font_roman_opts)')
def revert_font_opts(document):
" revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
- i = find_token(document.header, '\\language_package', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\language_package.")
- return
- Babel = (get_value(document.header, "\\language_package", 0) == "babel")
+ NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
+ Babel = (get_value(document.header, "\\language_package") == "babel")
# 1. Roman
regexp = re.compile(r'(\\font_roman_opts)')
def revert_plainNotoFonts_xopts(document):
" Revert native (straight) Noto font definition (with extra options) to LaTeX "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
osf = False
def revert_notoFonts_xopts(document):
" Revert native (extended) Noto font definition (with extra options) to LaTeX "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
fontmap = dict()
def revert_IBMFonts_xopts(document):
" Revert native IBM font definition (with extra options) to LaTeX "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
fontmap = dict()
def revert_AdobeFonts_xopts(document):
" Revert native Adobe font definition (with extra options) to LaTeX "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
fontmap = dict()
def convert_osf(document):
" Convert \\font_osf param to new format "
- NonTeXFonts = False
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- else:
- NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
+ NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
i = find_token(document.header, '\\font_osf', 0)
if i == -1:
ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
tt = ttfont[1].strip('"')
if tt in osftt:
- document.header.insert(i + 1, "\\font_sans_osf true")
+ document.header.insert(i + 1, "\\font_typewriter_osf true")
else:
- document.header.insert(i + 1, "\\font_sans_osf false")
+ document.header.insert(i + 1, "\\font_typewriter_osf false")
else:
document.header.insert(i, "\\font_sans_osf false")
def revert_osf(document):
" Revert \\font_*_osf params "
- NonTeXFonts = False
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- else:
- NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
+ NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
i = find_token(document.header, '\\font_roman_osf', 0)
if i == -1:
def revert_texfontopts(document):
" Revert native TeX font definitions (with extra options) to LaTeX "
- i = find_token(document.header, '\\use_non_tex_fonts', 0)
- if i == -1:
- document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
- return
- if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
+ if get_bool_value(document.header, "\\use_non_tex_fonts"):
return
rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
del document.header[x]
+def convert_CantarellFont(document):
+ " Handle Cantarell font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fm = createFontMapping(['Cantarell'])
+ convert_fonts(document, fm, "oldstyle")
+
+def revert_CantarellFont(document):
+ " Revert native Cantarell font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fontmap = dict()
+ fm = createFontMapping(['Cantarell'])
+ if revert_fonts(document, fm, fontmap, False, True):
+ add_preamble_fonts(document, fontmap)
+
+def convert_ChivoFont(document):
+ " Handle Chivo font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fm = createFontMapping(['Chivo'])
+ convert_fonts(document, fm, "oldstyle")
+
+def revert_ChivoFont(document):
+ " Revert native Chivo font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fontmap = dict()
+ fm = createFontMapping(['Chivo'])
+ if revert_fonts(document, fm, fontmap, False, True):
+ add_preamble_fonts(document, fontmap)
+
+
+def convert_FiraFont(document):
+ " Handle Fira font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fm = createFontMapping(['Fira'])
+ convert_fonts(document, fm, "lf")
+
+def revert_FiraFont(document):
+ " Revert native Fira font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fontmap = dict()
+ fm = createFontMapping(['Fira'])
+ if revert_fonts(document, fm, fontmap, False, True):
+ add_preamble_fonts(document, fontmap)
+
+
+def convert_Semibolds(document):
+ " Move semibold options to extraopts "
+
+ NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
+
+ i = find_token(document.header, "\\font_roman", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_roman.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ roman = romanfont[1].strip('"')
+ if roman == "IBMPlexSerifSemibold":
+ romanfont[1] = '"IBMPlexSerif"'
+ document.header[i] = " ".join(romanfont)
+
+ if NonTeXFonts == False:
+ regexp = re.compile(r'(\\font_roman_opts)')
+ x = find_re(document.header, regexp, 0)
+ if x == -1:
+ # Sensible place to insert tag
+ fo = find_token(document.header, "\\font_sf_scale")
+ if fo == -1:
+ document.warning("Malformed LyX document! Missing \\font_sf_scale")
+ else:
+ document.header.insert(fo, "\\font_roman_opts \"semibold\"")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+ document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
+
+ i = find_token(document.header, "\\font_sans", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_sans.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ sf = sffont[1].strip('"')
+ if sf == "IBMPlexSansSemibold":
+ sffont[1] = '"IBMPlexSans"'
+ document.header[i] = " ".join(sffont)
+
+ if NonTeXFonts == False:
+ regexp = re.compile(r'(\\font_sans_opts)')
+ x = find_re(document.header, regexp, 0)
+ if x == -1:
+ # Sensible place to insert tag
+ fo = find_token(document.header, "\\font_sf_scale")
+ if fo == -1:
+ document.warning("Malformed LyX document! Missing \\font_sf_scale")
+ else:
+ document.header.insert(fo, "\\font_sans_opts \"semibold\"")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+ document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
+
+ i = find_token(document.header, "\\font_typewriter", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_typewriter.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ tt = ttfont[1].strip('"')
+ if tt == "IBMPlexMonoSemibold":
+ ttfont[1] = '"IBMPlexMono"'
+ document.header[i] = " ".join(ttfont)
+
+ if NonTeXFonts == False:
+ regexp = re.compile(r'(\\font_typewriter_opts)')
+ x = find_re(document.header, regexp, 0)
+ if x == -1:
+ # Sensible place to insert tag
+ fo = find_token(document.header, "\\font_tt_scale")
+ if fo == -1:
+ document.warning("Malformed LyX document! Missing \\font_tt_scale")
+ else:
+ document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+ document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
+
+
+def convert_NotoRegulars(document):
+ " Merge diverse noto reagular fonts "
+
+ i = find_token(document.header, "\\font_roman", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_roman.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ roman = romanfont[1].strip('"')
+ if roman == "NotoSerif-TLF":
+ romanfont[1] = '"NotoSerifRegular"'
+ document.header[i] = " ".join(romanfont)
+
+ i = find_token(document.header, "\\font_sans", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_sans.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ sf = sffont[1].strip('"')
+ if sf == "NotoSans-TLF":
+ sffont[1] = '"NotoSansRegular"'
+ document.header[i] = " ".join(sffont)
+
+ i = find_token(document.header, "\\font_typewriter", 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Missing \\font_typewriter.")
+ else:
+ # We need to use this regex since split() does not handle quote protection
+ ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+ tt = ttfont[1].strip('"')
+ if tt == "NotoMono-TLF":
+ ttfont[1] = '"NotoMonoRegular"'
+ document.header[i] = " ".join(ttfont)
+
+
+def convert_CrimsonProFont(document):
+ " Handle CrimsonPro font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fm = createFontMapping(['CrimsonPro'])
+ convert_fonts(document, fm, "lf")
+
+def revert_CrimsonProFont(document):
+ " Revert native CrimsonPro font definition to LaTeX "
+
+ if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+ fontmap = dict()
+ fm = createFontMapping(['CrimsonPro'])
+ if revert_fonts(document, fm, fontmap, False, True):
+ add_preamble_fonts(document, fontmap)
+
+
+def revert_pagesizes(document):
+ " Revert new page sizes in memoir and KOMA to options "
+
+ if document.textclass != "memoir" and document.textclass[:2] != "scr":
+ return
+
+ i = find_token(document.header, "\\use_geometry true", 0)
+ if i != -1:
+ return
+
+ defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
+
+ i = find_token(document.header, "\\papersize", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\papersize header.")
+ return
+ val = get_value(document.header, "\\papersize", i)
+ if val in defsizes:
+ # nothing to do
+ return
+
+ document.header[i] = "\\papersize default"
+
+ i = find_token(document.header, "\\options", 0)
+ if i == -1:
+ i = find_token(document.header, "\\textclass", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\textclass header.")
+ return
+ document.header.insert(i, "\\options " + val)
+ return
+ document.header[i] = document.header[i] + "," + val
+
+
+def convert_pagesizes(document):
+ " Convert to new page sizes in memoir and KOMA to options "
+
+ if document.textclass != "memoir" and document.textclass[:3] != "scr":
+ return
+
+ i = find_token(document.header, "\\use_geometry true", 0)
+ if i != -1:
+ return
+
+ defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
+
+ i = find_token(document.header, "\\papersize", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\papersize header.")
+ return
+ val = get_value(document.header, "\\papersize", i)
+ if val in defsizes:
+ # nothing to do
+ return
+
+ i = find_token(document.header, "\\use_geometry false", 0)
+ if i != -1:
+ # Maintain use of geometry
+ document.header[1] = "\\use_geometry true"
+
+def revert_komafontsizes(document):
+ " Revert new font sizes in KOMA to options "
+
+ if document.textclass[:3] != "scr":
+ return
+
+ i = find_token(document.header, "\\paperfontsize", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\paperfontsize header.")
+ return
+
+ defsizes = ["default", "10", "11", "12"]
+
+ val = get_value(document.header, "\\paperfontsize", i)
+ if val in defsizes:
+ # nothing to do
+ return
+
+ document.header[i] = "\\paperfontsize default"
+
+ fsize = "fontsize=" + val
+
+ i = find_token(document.header, "\\options", 0)
+ if i == -1:
+ i = find_token(document.header, "\\textclass", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\textclass header.")
+ return
+ document.header.insert(i, "\\options " + fsize)
+ return
+ document.header[i] = document.header[i] + "," + fsize
+
+
+def revert_dupqualicites(document):
+ " Revert qualified citation list commands with duplicate keys to ERT "
+
+ # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
+ # we need to revert those with multiple uses of the same key.
+
+ # Get cite engine
+ engine = "basic"
+ i = find_token(document.header, "\\cite_engine", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\cite_engine")
+ else:
+ engine = get_value(document.header, "\\cite_engine", i)
+
+ if not engine in ["biblatex", "biblatex-natbib"]:
+ return
+
+ # Citation insets that support qualified lists, with their LaTeX code
+ ql_citations = {
+ "cite" : "cites",
+ "Cite" : "Cites",
+ "citet" : "textcites",
+ "Citet" : "Textcites",
+ "citep" : "parencites",
+ "Citep" : "Parencites",
+ "Footcite" : "Smartcites",
+ "footcite" : "smartcites",
+ "Autocite" : "Autocites",
+ "autocite" : "autocites",
+ }
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset citation", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of citation inset at line %d!!" %(i))
+ i += 1
+ continue
+
+ k = find_token(document.body, "LatexCommand", i, j)
+ if k == -1:
+ document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
+ i = j + 1
+ continue
+
+ cmd = get_value(document.body, "LatexCommand", k)
+ if not cmd in list(ql_citations.keys()):
+ i = j + 1
+ continue
+
+ pres = find_token(document.body, "pretextlist", i, j)
+ posts = find_token(document.body, "posttextlist", i, j)
+ if pres == -1 and posts == -1:
+ # nothing to do.
+ i = j + 1
+ continue
+
+ key = get_quoted_value(document.body, "key", i, j)
+ if not key:
+ document.warning("Citation inset at line %d does not have a key!" %(i))
+ i = j + 1
+ continue
+
+ keys = key.split(",")
+ ukeys = list(set(keys))
+ if len(keys) == len(ukeys):
+ # no duplicates.
+ i = j + 1
+ continue
+
+ pretexts = get_quoted_value(document.body, "pretextlist", pres)
+ posttexts = get_quoted_value(document.body, "posttextlist", posts)
+
+ pre = get_quoted_value(document.body, "before", i, j)
+ post = get_quoted_value(document.body, "after", i, j)
+ prelist = pretexts.split("\t")
+ premap = dict()
+ for pp in prelist:
+ ppp = pp.split(" ", 1)
+ val = ""
+ if len(ppp) > 1:
+ val = ppp[1]
+ else:
+ val = ""
+ if ppp[0] in premap:
+ premap[ppp[0]] = premap[ppp[0]] + "\t" + val
+ else:
+ premap[ppp[0]] = val
+ postlist = posttexts.split("\t")
+ postmap = dict()
+ num = 1
+ for pp in postlist:
+ ppp = pp.split(" ", 1)
+ val = ""
+ if len(ppp) > 1:
+ val = ppp[1]
+ else:
+ val = ""
+ if ppp[0] in postmap:
+ postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
+ else:
+ postmap[ppp[0]] = val
+ # Replace known new commands with ERT
+ if "(" in pre or ")" in pre:
+ pre = "{" + pre + "}"
+ if "(" in post or ")" in post:
+ post = "{" + post + "}"
+ res = "\\" + ql_citations[cmd]
+ if pre:
+ res += "(" + pre + ")"
+ if post:
+ res += "(" + post + ")"
+ elif pre:
+ res += "()"
+ for kk in keys:
+ if premap.get(kk, "") != "":
+ akeys = premap[kk].split("\t", 1)
+ akey = akeys[0]
+ if akey != "":
+ res += "[" + akey + "]"
+ if len(akeys) > 1:
+ premap[kk] = "\t".join(akeys[1:])
+ else:
+ premap[kk] = ""
+ if postmap.get(kk, "") != "":
+ akeys = postmap[kk].split("\t", 1)
+ akey = akeys[0]
+ if akey != "":
+ res += "[" + akey + "]"
+ if len(akeys) > 1:
+ postmap[kk] = "\t".join(akeys[1:])
+ else:
+ postmap[kk] = ""
+ elif premap.get(kk, "") != "":
+ res += "[]"
+ res += "{" + kk + "}"
+ document.body[i:j+1] = put_cmd_in_ert([res])
+
+
+def convert_pagesizenames(document):
+ " Convert LyX page sizes names "
+
+ i = find_token(document.header, "\\papersize", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\papersize header.")
+ return
+ oldnames = ["letterpaper", "legalpaper", "executivepaper", \
+ "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
+ "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
+ "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
+ val = get_value(document.header, "\\papersize", i)
+ if val in oldnames:
+ newval = val.replace("paper", "")
+ document.header[i] = "\\papersize " + newval
+
+def revert_pagesizenames(document):
+ " Convert LyX page sizes names "
+
+ i = find_token(document.header, "\\papersize", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\papersize header.")
+ return
+ newnames = ["letter", "legal", "executive", \
+ "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
+ "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
+ "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
+ val = get_value(document.header, "\\papersize", i)
+ if val in newnames:
+ newval = val + "paper"
+ document.header[i] = "\\papersize " + newval
+
+
+def revert_theendnotes(document):
+ " Reverts native support of \\theendnotes to TeX-code "
+
+ if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
+ return
+
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of FloatList inset")
+ continue
+
+ document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
+
+
+def revert_enotez(document):
+ " Reverts native support of enotez package to TeX-code "
+
+ if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
+ return
+
+ use = False
+ if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
+ use = True
+
+ revert_flex_inset(document.body, "Endnote", "\\endnote")
+
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of FloatList inset")
+ continue
+
+ use = True
+ document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
+
+ if use:
+ add_to_preamble(document, ["\\usepackage{enotez}"])
+ document.del_module("enotez")
+ document.del_module("foottoenotez")
+
+
+def revert_memoir_endnotes(document):
+ " Reverts native support of memoir endnotes to TeX-code "
+
+ if document.textclass != "memoir":
+ return
+
+ encommand = "\\pagenote"
+ modules = document.get_module_list()
+ if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
+ encommand = "\\endnote"
+
+ revert_flex_inset(document.body, "Endnote", encommand)
+
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of FloatList inset")
+ continue
+
+ if document.body[i] == "\\begin_inset FloatList pagenote*":
+ document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
+ else:
+ document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
+ add_to_preamble(document, ["\\makepagenote"])
+
+
+def revert_totalheight(document):
+ " Reverts graphics height parameter from totalheight to height "
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset Graphics", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of graphics inset at line %d!!" %(i))
+ i += 1
+ continue
+
+ rx = re.compile(r'\s*special\s*(\S+)$')
+ k = find_re(document.body, rx, i, j)
+ special = ""
+ oldheight = ""
+ if k != -1:
+ m = rx.match(document.body[k])
+ if m:
+ special = m.group(1)
+ mspecial = special.split(',')
+ for spc in mspecial:
+ if spc[:7] == "height=":
+ oldheight = spc.split('=')[1]
+ mspecial.remove(spc)
+ break
+ if len(mspecial) > 0:
+ special = ",".join(mspecial)
+ else:
+ special = ""
+
+ rx = re.compile(r'(\s*height\s*)(\S+)$')
+ kk = find_re(document.body, rx, i, j)
+ if kk != -1:
+ m = rx.match(document.body[kk])
+ val = ""
+ if m:
+ val = m.group(2)
+ if k != -1:
+ if special != "":
+ val = val + "," + special
+ document.body[k] = "\tspecial " + "totalheight=" + val
+ else:
+ document.body.insert(kk, "\tspecial totalheight=" + val)
+ if oldheight != "":
+ document.body[kk] = m.group(1) + oldheight
+ else:
+ del document.body[kk]
+ elif oldheight != "":
+ if special != "":
+ document.body[k] = "\tspecial " + special
+ document.body.insert(k, "\theight " + oldheight)
+ else:
+ document.body[k] = "\theight " + oldheight
+ i = j + 1
+
+
+def convert_totalheight(document):
+ " Converts graphics height parameter from totalheight to height "
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset Graphics", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of graphics inset at line %d!!" %(i))
+ i += 1
+ continue
+
+ rx = re.compile(r'\s*special\s*(\S+)$')
+ k = find_re(document.body, rx, i, j)
+ special = ""
+ newheight = ""
+ if k != -1:
+ m = rx.match(document.body[k])
+ if m:
+ special = m.group(1)
+ mspecial = special.split(',')
+ for spc in mspecial:
+ if spc[:12] == "totalheight=":
+ newheight = spc.split('=')[1]
+ mspecial.remove(spc)
+ break
+ if len(mspecial) > 0:
+ special = ",".join(mspecial)
+ else:
+ special = ""
+
+ rx = re.compile(r'(\s*height\s*)(\S+)$')
+ kk = find_re(document.body, rx, i, j)
+ if kk != -1:
+ m = rx.match(document.body[kk])
+ val = ""
+ if m:
+ val = m.group(2)
+ if k != -1:
+ if special != "":
+ val = val + "," + special
+ document.body[k] = "\tspecial " + "height=" + val
+ else:
+ document.body.insert(kk + 1, "\tspecial height=" + val)
+ if newheight != "":
+ document.body[kk] = m.group(1) + newheight
+ else:
+ del document.body[kk]
+ elif newheight != "":
+ document.body.insert(k, "\theight " + newheight)
+ i = j + 1
+
+
+def convert_changebars(document):
+ " Converts the changebars module to native solution "
+
+ if not "changebars" in document.get_module_list():
+ return
+
+ i = find_token(document.header, "\\output_changes", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\output_changes header.")
+ document.del_module("changebars")
+ return
+
+ document.header.insert(i, "\\change_bars true")
+ document.del_module("changebars")
+
+
+def revert_changebars(document):
+ " Converts native changebar param to module "
+
+ i = find_token(document.header, "\\change_bars", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\change_bars header.")
+ return
+
+ val = get_value(document.header, "\\change_bars", i)
+
+ if val == "true":
+ document.add_module("changebars")
+
+ del document.header[i]
+
+
+def convert_postpone_fragile(document):
+ " Adds false \\postpone_fragile_content buffer param "
+
+ i = find_token(document.header, "\\output_changes", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\output_changes header.")
+ return
+ # Set this to false for old documents (see #2154)
+ document.header.insert(i, "\\postpone_fragile_content false")
+
+
+def revert_postpone_fragile(document):
+ " Remove \\postpone_fragile_content buffer param "
+
+ i = find_token(document.header, "\\postpone_fragile_content", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
+ return
+
+ del document.header[i]
+
+def revert_colrow_tracking(document):
+ " Remove change tag from tabular columns/rows "
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset Tabular", i+1)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i+1)
+ if j == -1:
+ document.warning("Malformed LyX document: Could not find end of tabular.")
+ continue
+ for k in range(i, j):
+ m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
+ if m:
+ document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
+ m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
+ if m:
+ document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
+
##
# Conversion hub
#
[572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
[573, [convert_inputencoding_namechange]],
[574, [convert_ruby_module, convert_utf8_japanese]],
- [575, [convert_lineno]],
+ [575, [convert_lineno, convert_aaencoding]],
[576, []],
[577, [convert_linggloss]],
[578, []],
[579, []],
[580, []],
- [581, [convert_osf]]
+ [581, [convert_osf]],
+ [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
+ [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
+ [584, []],
+ [585, [convert_pagesizes]],
+ [586, []],
+ [587, [convert_pagesizenames]],
+ [588, []],
+ [589, [convert_totalheight]],
+ [590, [convert_changebars]],
+ [591, [convert_postpone_fragile]],
+ [592, []]
]
-revert = [[580, [revert_texfontopts,revert_osf]],
+revert = [[591, [revert_colrow_tracking]],
+ [590, [revert_postpone_fragile]],
+ [589, [revert_changebars]],
+ [588, [revert_totalheight]],
+ [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
+ [586, [revert_pagesizenames]],
+ [585, [revert_dupqualicites]],
+ [584, [revert_pagesizes,revert_komafontsizes]],
+ [583, [revert_vcsinfo_rev_abbrev]],
+ [582, [revert_ChivoFont,revert_CrimsonProFont]],
+ [581, [revert_CantarellFont,revert_FiraFont]],
+ [580, [revert_texfontopts,revert_osf]],
[579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
[578, [revert_babelfont]],
[577, [revert_drs]],
[576, [revert_linggloss, revert_subexarg]],
[575, [revert_new_languages]],
- [574, [revert_lineno]],
+ [574, [revert_lineno, revert_aaencoding]],
[573, [revert_ruby_module, revert_utf8_japanese]],
[572, [revert_inputencoding_namechange]],
[571, [revert_notoFonts]],