]> git.lyx.org Git - lyx.git/blobdiff - lib/lyx2lyx/lyx_2_4.py
Whitespace cleanup
[lyx.git] / lib / lyx2lyx / lyx_2_4.py
index bbc184e5bedc43c28bd7919db6c59822ea6c3def..3fd876c826797d3c3f906e59117fba09c8104f59 100644 (file)
@@ -26,33 +26,35 @@ from datetime import (datetime, date, time)
 
 # Uncomment only what you need to import, please.
 
-from parser_tools import (count_pars_in_inset, find_end_of_inset, find_end_of_layout,
-                          find_token, find_re, get_bool_value, get_containing_layout,
-                          get_option_value, get_value, get_quoted_value)
-#    del_token, del_value, del_complete_lines,
+from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
+    find_end_of_layout, find_token, find_token_backwards, find_token_exact,
+    find_re, get_bool_value,
+    get_containing_layout, get_option_value, get_value, get_quoted_value)
+#    del_value, del_complete_lines,
 #    find_complete_lines, find_end_of,
-#    find_re, find_substring, find_token_backwards,
+#    find_re, find_substring,
 #    get_containing_inset,
 #    is_in_inset, set_bool_value
-#    find_tokens, find_token_exact, check_token
+#    find_tokens, check_token
 
-from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble)
-#  revert_font_attrs, insert_to_preamble, latex_length
-#  get_ert, lyx2latex, lyx2verbatim, length_in_bp, convert_info_insets
-#  revert_flex_inset, hex2ratio, str2bool
+from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
+                           revert_language, revert_flex_inset, str2bool)
+#  revert_font_attrs, latex_length
+#  get_ert, lyx2verbatim, length_in_bp, convert_info_insets
+#  revert_flex_inset, hex2ratio
 
 ####################################################################
 # Private helper functions
 
 def add_preamble_fonts(document, fontmap):
-    " Add collected font-packages with their option to user-preamble"
+    """Add collected font-packages with their option to user-preamble"""
 
     for pkg in fontmap:
         if len(fontmap[pkg]) > 0:
             xoption = "[" + ",".join(fontmap[pkg]) + "]"
         else:
             xoption = ""
-        preamble = "\\usepackage" + xoption + "{%s}" % pkg
+        preamble = "\\usepackage%s{%s}" % (xoption, pkg)
         add_to_preamble(document, [preamble])
 
 
@@ -70,6 +72,8 @@ class fontinfo:
         self.package = None
         self.options = []
         self.pkgkey = None      # key into pkg2fontmap
+        self.osfopt = None      # None, string
+        self.osfdef = "false"   # "false" or "true"
 
     def addkey(self):
         self.pkgkey = createkey(self.package, self.options)
@@ -80,8 +84,8 @@ class fontmapping:
         self.pkg2fontmap = dict()
         self.pkginmap = dict()  # defines, if a map for package exists
 
-    def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None):
-        " Expand fontinfo mapping"
+    def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
+        """Expand fontinfo mapping"""
         #
         # fontlist:    list of fontnames, each element
         #              may contain a ','-separated list of needed options
@@ -91,6 +95,8 @@ class fontmapping:
         # pkg:         package defining the font. Defaults to fontname if None
         # scaleopt:    one of None, 'scale', 'scaled', or some other string
         #              to be used in scale option (e.g. scaled=0.7)
+        # osfopt:      None or some other string to be used in osf option
+        # osfdef:      "true" if osf is default
         for fl in font_list:
             fe = fontinfo()
             fe.fonttype = font_type
@@ -100,6 +106,8 @@ class fontmapping:
             fe.fontname = font_name
             fe.options = flt[1:]
             fe.scaleopt = scaleopt
+            fe.osfopt = osfopt
+            fe.osfdef = osfdef
             if pkg == None:
                 fe.package = font_name
             else:
@@ -126,7 +134,7 @@ class fontmapping:
             return fontname
         return None
 
-def createFontMapping():
+def createFontMapping(fontlist):
     # Create info for known fonts for the use in
     #   convert_latexFonts() and
     #   revert_latexFonts()
@@ -136,35 +144,72 @@ def createFontMapping():
     # * For now, add DejaVu and IBMPlex only.
     # * Expand, if desired
     fm = fontmapping()
-    fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
-    fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
-    fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
-    fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
-                          'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
-                          'IBMPlexSerifSemibold,semibold'],
-                         "roman", None, "plex-serif")
-    fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
-                          'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
-                          'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
-                         "sans", "sf", "plex-sans", "scale")
-    fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
-                          'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
-                          'IBMPlexMonoSemibold,semibold'],
-                         "typewriter", "tt", "plex-mono", "scale")
-    fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro")
-    fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled")
-    fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled")
+    for font in fontlist:
+        if font == 'DejaVu':
+            fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
+            fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
+            fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
+        elif font == 'IBM':
+            fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
+                                  'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
+                                  'IBMPlexSerifSemibold,semibold'],
+                                 "roman", None, "plex-serif")
+            fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
+                                  'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
+                                  'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
+                                 "sans", "sf", "plex-sans", "scale")
+            fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
+                                  'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
+                                  'IBMPlexMonoSemibold,semibold'],
+                                 "typewriter", "tt", "plex-mono", "scale")
+        elif font == 'Adobe':
+            fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
+            fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
+            fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
+        elif font == 'Noto':
+            fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
+                                  'NotoSerifThin,thin', 'NotoSerifLight,light',
+                                  'NotoSerifExtralight,extralight'],
+                                  "roman", None, "noto-serif", None, "osf")
+            fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
+                                  'NotoSansThin,thin', 'NotoSansLight,light',
+                                  'NotoSansExtralight,extralight'],
+                                  "sans", "sf", "noto-sans", "scaled")
+            fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
+        elif font == 'Cantarell':
+            fm.expandFontMapping(['cantarell,defaultsans'],
+                                  "sans", "sf", "cantarell", "scaled", "oldstyle")
+        elif font == 'Chivo':
+            fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
+                                  'Chivo,regular', 'ChivoMedium,medium'],
+                                  "sans", "sf", "Chivo", "scale", "oldstyle")
+        elif font == 'CrimsonPro':
+            fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
+                                  'CrimsonProMedium,medium'],
+                                  "roman", None, "CrimsonPro", None, "lf", "true")
+        elif font == 'Fira':
+            fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
+                                  'FiraSansThin,thin', 'FiraSansLight,light',
+                                  'FiraSansExtralight,extralight',
+                                  'FiraSansUltralight,ultralight'],
+                                  "sans", "sf", "FiraSans", "scaled", "lf", "true")
+            fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
+        elif font == 'libertinus':
+            fm.expandFontMapping(['libertinus,serif'], "roman", None, "libertinus", None, "osf")
+            fm.expandFontMapping(['libertinusmath'], "math", None, "libertinust1math", None, None)
     return fm
 
-def convert_fonts(document, fm):
-    " Handle font definition to LaTeX "
-
+def convert_fonts(document, fm, osfoption = "osf"):
+    """Handle font definition (LaTeX preamble -> native)"""
     rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
     rscaleopt = re.compile(r'^scaled?=(.*)')
 
+    # Check whether we go beyond font option feature introduction
+    haveFontOpts = document.end_format > 580
+
     i = 0
-    while i < len(document.preamble):
-        i = find_re(document.preamble, rpkg, i)
+    while True:
+        i = find_re(document.preamble, rpkg, i+1)
         if i == -1:
             return
         mo = rpkg.search(document.preamble[i])
@@ -175,22 +220,41 @@ def convert_fonts(document, fm):
         pkg = mo.group(3)
         o = 0
         oscale = 1
+        has_osf = False
         while o < len(options):
+            if options[o] == osfoption:
+                has_osf = True
+                del options[o]
+                continue
             mo = rscaleopt.search(options[o])
             if mo == None:
                 o += 1
                 continue
             oscale = mo.group(1)
             del options[o]
-            break
+            continue
 
         if not pkg in fm.pkginmap:
-            i += 1
             continue
         # determine fontname
-        fn = fm.getfontname(pkg, options)
+        fn = None
+        if haveFontOpts:
+            # Try with name-option combination first
+            # (only one default option supported currently)
+            o = 0
+            while o < len(options):
+                opt = options[o]
+                fn = fm.getfontname(pkg, [opt])
+                if fn != None:
+                    del options[o]
+                    break
+                o += 1
+                continue
+            if fn == None:
+                fn = fm.getfontname(pkg, [])
+        else:
+            fn = fm.getfontname(pkg, options)
         if fn == None:
-            i += 1
             continue
         del document.preamble[i]
         fontinfo = fm.font2pkgmap[fn]
@@ -199,9 +263,21 @@ def convert_fonts(document, fm):
         else:
             fontscale = "\\font_" + fontinfo.scaletype + "_scale"
             fontinfo.scaleval = oscale
-
+        if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
+            if fontinfo.osfopt == None:
+                options.extend(osfoption)
+                continue
+            osf = find_token(document.header, "\\font_osf false")
+            osftag = "\\font_osf"
+            if osf == -1 and fontinfo.fonttype != "math":
+                # Try with newer format
+                osftag = "\\font_" + fontinfo.fonttype + "_osf"
+                osf = find_token(document.header, osftag + " false")
+            if osf != -1:
+                document.header[osf] = osftag + " true"
         if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
             del document.preamble[i-1]
+            i -= 1
         if fontscale != None:
             j = find_token(document.header, fontscale, 0)
             if j != -1:
@@ -215,11 +291,25 @@ def convert_fonts(document, fm):
         j = find_token(document.header, ft, 0)
         if j != -1:
             val = get_value(document.header, ft, j)
-            vals = val.split()
-            document.header[j] = ft + ' "' + fn + '" ' + vals[1]
+            words = val.split() # ! splits also values like '"DejaVu Sans"'
+            words[0] = '"' + fn + '"'
+            document.header[j] = ft + ' ' + ' '.join(words)
+        if haveFontOpts and fontinfo.fonttype != "math":
+            fotag = "\\font_" + fontinfo.fonttype + "_opts"
+            fo = find_token(document.header, fotag)
+            if fo != -1:
+                document.header[fo] = fotag + " \"" + ",".join(options) + "\""
+            else:
+                # Sensible place to insert tag
+                fo = find_token(document.header, "\\font_sf_scale")
+                if fo == -1:
+                    document.warning("Malformed LyX document! Missing \\font_sf_scale")
+                else:
+                    document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
 
-def revert_fonts(document, fm, fontmap):
-    " Revert native font definition to LaTeX "
+
+def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
+    """Revert native font definition to LaTeX"""
     # fonlist := list of fonts created from the same package
     # Empty package means that the font-name is the same as the package-name
     # fontmap (key = package, val += found options) will be filled
@@ -229,25 +319,43 @@ def revert_fonts(document, fm, fontmap):
     rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
     i = 0
     while i < len(document.header):
-        i = find_re(document.header, rfontscale, i)
+        i = find_re(document.header, rfontscale, i+1)
         if (i == -1):
-            break
+            return True
         mo = rfontscale.search(document.header[i])
         if mo == None:
-            i += 1
             continue
         ft = mo.group(1)    # 'roman', 'sans', 'typewriter', 'math'
         val = get_value(document.header, ft, i)
-        words = val.split()
-        font = words[0].replace('"', '')
+        words = val.split(' ')     # ! splits also values like '"DejaVu Sans"'
+        font = words[0].strip('"') # TeX font name has no whitespace
         if not font in fm.font2pkgmap:
-            i += 1
             continue
         fontinfo = fm.font2pkgmap[font]
         val = fontinfo.package
         if not val in fontmap:
             fontmap[val] = []
-        document.header[i] = ft + ' "default" ' + words[1]
+        x = -1
+        if OnlyWithXOpts or WithXOpts:
+            if ft == "\\font_math":
+                return False
+            regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
+            if ft == "\\font_sans":
+                regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
+            elif ft == "\\font_typewriter":
+                regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
+            x = find_re(document.header, regexp, 0)
+            if x == -1 and OnlyWithXOpts:
+                return False
+
+            if x != -1:
+                # We need to use this regex since split() does not handle quote protection
+                xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+                opts = xopts[1].strip('"').split(",")
+                fontmap[val].extend(opts)
+                del document.header[x]
+        words[0] = '"default"'
+        document.header[i] = ft + ' ' + ' '.join(words)
         if fontinfo.scaleopt != None:
             xval =  get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
             mo = rscales.search(xval)
@@ -257,9 +365,24 @@ def revert_fonts(document, fm, fontmap):
                 if xval1 != "100":
                     # set correct scale option
                     fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
+        if fontinfo.osfopt != None:
+            oldval = "true"
+            if fontinfo.osfdef == "true":
+                oldval = "false"
+            osf = find_token(document.header, "\\font_osf " + oldval)
+            if osf == -1 and ft != "\\font_math":
+                # Try with newer format
+                osftag = "\\font_roman_osf " + oldval
+                if ft == "\\font_sans":
+                    osftag = "\\font_sans_osf " + oldval
+                elif ft == "\\font_typewriter":
+                    osftag = "\\font_typewriter_osf " + oldval
+                osf = find_token(document.header, osftag)
+            if osf != -1:
+                fontmap[val].extend([fontinfo.osfopt])
         if len(fontinfo.options) > 0:
             fontmap[val].extend(fontinfo.options)
-        i += 1
+    return True
 
 ###############################################################################
 ###
@@ -267,45 +390,96 @@ def revert_fonts(document, fm, fontmap):
 ###
 ###############################################################################
 
+def convert_inputencoding_namechange(document):
+    """Rename inputencoding settings."""
+    i = find_token(document.header, "\\inputencoding", 0)
+    if i == -1:
+        return
+    s = document.header[i].replace("auto", "auto-legacy")
+    document.header[i] = s.replace("default", "auto-legacy-plain")
+
+def revert_inputencoding_namechange(document):
+    """Rename inputencoding settings."""
+    i = find_token(document.header, "\\inputencoding", 0)
+    if i == -1:
+        return
+    s = document.header[i].replace("auto-legacy-plain", "default")
+    document.header[i] = s.replace("auto-legacy", "auto")
+
+def convert_notoFonts(document):
+    """Handle Noto fonts definition to LaTeX"""
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fm = createFontMapping(['Noto'])
+        convert_fonts(document, fm)
+
+def revert_notoFonts(document):
+    """Revert native Noto font definition to LaTeX"""
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fontmap = dict()
+        fm = createFontMapping(['Noto'])
+        if revert_fonts(document, fm, fontmap):
+            add_preamble_fonts(document, fontmap)
+
 def convert_latexFonts(document):
-    " Handle DejaVu and IBMPlex fonts definition to LaTeX "
+    """Handle DejaVu and IBMPlex fonts definition to LaTeX"""
 
-    if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
-        fm = createFontMapping()
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fm = createFontMapping(['DejaVu', 'IBM'])
         convert_fonts(document, fm)
 
 def revert_latexFonts(document):
-    " Revert native DejaVu font definition to LaTeX "
+    """Revert native DejaVu font definition to LaTeX"""
 
-    if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
         fontmap = dict()
-        fm = createFontMapping()
-        revert_fonts(document, fm, fontmap)
-        add_preamble_fonts(document, fontmap)
+        fm = createFontMapping(['DejaVu', 'IBM'])
+        if revert_fonts(document, fm, fontmap):
+            add_preamble_fonts(document, fontmap)
+
+def convert_AdobeFonts(document):
+    """Handle Adobe Source fonts definition to LaTeX"""
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fm = createFontMapping(['Adobe'])
+        convert_fonts(document, fm)
+
+def revert_AdobeFonts(document):
+    """Revert Adobe Source font definition to LaTeX"""
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fontmap = dict()
+        fm = createFontMapping(['Adobe'])
+        if revert_fonts(document, fm, fontmap):
+            add_preamble_fonts(document, fontmap)
 
 def removeFrontMatterStyles(document):
-    " Remove styles Begin/EndFrontmatter"
+    """Remove styles Begin/EndFrontmatter"""
 
     layouts = ['BeginFrontmatter', 'EndFrontmatter']
-    for layout in layouts:
-        i = 0
-        while True:
-            i = find_token(document.body, '\\begin_layout ' + layout, i)
-            if i == -1:
-                break
-            j = find_end_of_layout(document.body, i)
-            if j == -1:
-                document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
-                i += 1
-                continue
-            while i > 0 and document.body[i-1].strip() == '':
-                i -= 1
-            while document.body[j+1].strip() == '':
-                j = j + 1
-            document.body[i:j+1] = ['']
+    tokenend = len('\\begin_layout ')
+    i = 0
+    while True:
+        i = find_token_exact(document.body, '\\begin_layout ', i+1)
+        if i == -1:
+            return
+        layout = document.body[i][tokenend:].strip()
+        if layout not in layouts:
+            continue
+        j = find_end_of_layout(document.body, i)
+        if j == -1:
+            document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
+            continue
+        while document.body[j+1].strip() == '':
+            j += 1
+        document.body[i:j+1] = []
 
 def addFrontMatterStyles(document):
-    " Use styles Begin/EndFrontmatter for elsarticle"
+    """Use styles Begin/EndFrontmatter for elsarticle"""
+
+    if document.textclass != "elsarticle":
+        return
 
     def insertFrontmatter(prefix, line):
         above = line
@@ -323,85 +497,91 @@ def addFrontMatterStyles(document):
                                     '\\end_inset', '', '',
                                     '\\end_layout', '']
 
-    if document.textclass == "elsarticle":
-        layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
-                   'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
-        first = -1
-        last = -1
-        for layout in layouts:
-            i = 0
-            while True:
-                i = find_token(document.body, '\\begin_layout ' + layout, i)
-                if i == -1:
-                    break
-                k = find_end_of_layout(document.body, i)
-                if k == -1:
-                    document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
-                    i += 1;
-                    continue
-                if first == -1 or i < first:
-                    first = i
-                if last == -1 or last <= k:
-                    last = k+1
-                i = k+1
+    layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
+                'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
+    tokenend = len('\\begin_layout ')
+    first = -1
+    i = 0
+    while True:
+        i = find_token_exact(document.body, '\\begin_layout ', i+1)
+        if i == -1:
+            break
+        layout = document.body[i][tokenend:].strip()
+        if layout not in layouts:
+            continue
+        k = find_end_of_layout(document.body, i)
+        if k == -1:
+            document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
+            continue
         if first == -1:
-            return
-        insertFrontmatter('End', last)
-        insertFrontmatter('Begin', first)
+            first = i
+        i = k
+    if first == -1:
+        return
+    insertFrontmatter('End', k+1)
+    insertFrontmatter('Begin', first)
+
 
 def convert_lst_literalparam(document):
-    " Add param literal to include inset "
+    """Add param literal to include inset"""
 
     i = 0
     while True:
-        i = find_token(document.body, '\\begin_inset CommandInset include', i)
+        i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
         if i == -1:
             break
         j = find_end_of_inset(document.body, i)
         if j == -1:
             document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
-            i += 1
             continue
         while i < j and document.body[i].strip() != '':
             i += 1
-        document.body.insert(i, "literal \"true\"")
+        document.body.insert(i, 'literal "true"')
 
 
 def revert_lst_literalparam(document):
-    " Remove param literal from include inset "
+    """Remove param literal from include inset"""
 
     i = 0
     while True:
-        i = find_token(document.body, '\\begin_inset CommandInset include', i)
+        i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
         if i == -1:
             break
         j = find_end_of_inset(document.body, i)
         if j == -1:
             document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
-            i += 1
             continue
-        k = find_token(document.body, 'literal', i, j)
-        if k == -1:
-            i += 1
-            continue
-        del document.body[k]
+        del_token(document.body, 'literal', i, j)
 
 
 def revert_paratype(document):
-    " Revert ParaType font definitions to LaTeX "
+    """Revert ParaType font definitions to LaTeX"""
 
-    if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
         preamble = ""
         i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
         i2 = find_token(document.header, "\\font_sans \"default\"", 0)
         i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
         j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
-        sfval = get_value(document.header, "\\font_sf_scale", 0)
-        # cutoff " 100"
-        sfval = sfval[:-4]
+
+        sf_scale = 100.0
+        sfval = find_token(document.header, "\\font_sf_scale", 0)
+        if sfval == -1:
+            document.warning("Malformed LyX document: Missing \\font_sf_scale.")
+        else:
+            sfscale = document.header[sfval].split()
+            val = sfscale[1]
+            sfscale[1] = "100"
+            document.header[sfval] = " ".join(sfscale)
+            try:
+                # float() can throw
+                sf_scale = float(val)
+            except:
+                document.warning("Invalid font_sf_scale value: " + val)
+
         sfoption = ""
-        if sfval != "100":
-            sfoption = "scaled=" + format(float(sfval) / 100, '.2f')
+        if sf_scale != "100.0":
+            sfoption = "scaled=" + str(sf_scale / 100.0)
         k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
         ttval = get_value(document.header, "\\font_tt_scale", 0)
         # cutoff " 100"
@@ -430,7 +610,7 @@ def revert_paratype(document):
 
 
 def revert_xcharter(document):
-    " Revert XCharter font definitions to LaTeX "
+    """Revert XCharter font definitions to LaTeX"""
 
     i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
     if i == -1:
@@ -454,20 +634,19 @@ def revert_xcharter(document):
 
 
 def revert_lscape(document):
-    " Reverts the landscape environment (Landscape module) to TeX-code "
+    """Reverts the landscape environment (Landscape module) to TeX-code"""
 
     if not "landscape" in document.get_module_list():
         return
 
     i = 0
     while True:
-        i = find_token(document.body, "\\begin_inset Flex Landscape", i)
+        i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
         if i == -1:
-            return
+            break
         j = find_end_of_inset(document.body, i)
         if j == -1:
             document.warning("Malformed LyX document: Can't find end of Landscape inset")
-            i += 1
             continue
 
         if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
@@ -479,11 +658,11 @@ def revert_lscape(document):
             document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
 
         add_to_preamble(document, ["\\usepackage{pdflscape}"])
-        # no need to reset i
+    document.del_module("landscape")
 
 
 def convert_fontenc(document):
-    " Convert default fontenc setting "
+    """Convert default fontenc setting"""
 
     i = find_token(document.header, "\\fontencoding global", 0)
     if i == -1:
@@ -493,7 +672,7 @@ def convert_fontenc(document):
 
 
 def revert_fontenc(document):
-    " Revert default fontenc setting "
+    """Revert default fontenc setting"""
 
     i = find_token(document.header, "\\fontencoding auto", 0)
     if i == -1:
@@ -503,7 +682,7 @@ def revert_fontenc(document):
 
 
 def revert_nospellcheck(document):
-    " Remove nospellcheck font info param "
+    """Remove nospellcheck font info param"""
 
     i = 0
     while True:
@@ -514,49 +693,40 @@ def revert_nospellcheck(document):
 
 
 def revert_floatpclass(document):
-    " Remove float placement params 'document' and 'class' "
+    """Remove float placement params 'document' and 'class'"""
 
-    i = 0
-    i = find_token(document.header, "\\float_placement class", 0)
-    if i != -1:
-        del document.header[i]
+    del_token(document.header, "\\float_placement class")
 
     i = 0
     while True:
-        i = find_token(document.body, '\\begin_inset Float', i)
+        i = find_token(document.body, '\\begin_inset Float', i + 1)
         if i == -1:
             break
         j = find_end_of_inset(document.body, i)
-        k = find_token(document.body, 'placement class', i, i + 2)
+        k = find_token(document.body, 'placement class', i, j)
         if k == -1:
-            k = find_token(document.body, 'placement document', i, i + 2)
+            k = find_token(document.body, 'placement document', i, j)
             if k != -1:
                 del document.body[k]
-            i = j
             continue
         del document.body[k]
 
 
 def revert_floatalignment(document):
-    " Remove float alignment params "
+    """Remove float alignment params"""
 
-    i = 0
-    i = find_token(document.header, "\\float_alignment", 0)
-    galignment = ""
-    if i != -1:
-        galignment = get_value(document.header, "\\float_alignment", i)
-        del document.header[i]
+    galignment = get_value(document.header, "\\float_alignment", delete=True)
 
     i = 0
     while True:
-        i = find_token(document.body, '\\begin_inset Float', i)
+        i = find_token(document.body, '\\begin_inset Float', i + 1)
         if i == -1:
             break
         j = find_end_of_inset(document.body, i)
         if j == -1:
             document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
-            i += 1
-        k = find_token(document.body, 'alignment', i, i + 4)
+            continue
+        k = find_token(document.body, 'alignment', i, j)
         if k == -1:
             i = j
             continue
@@ -567,7 +737,6 @@ def revert_floatalignment(document):
         l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
         if l == -1:
             document.warning("Can't find float layout!")
-            i = j
             continue
         alcmd = []
         if alignment == "left":
@@ -578,11 +747,12 @@ def revert_floatalignment(document):
             alcmd = put_cmd_in_ert("\\raggedleft{}")
         if len(alcmd) > 0:
             document.body[l+1:l+1] = alcmd
-        i = j 
-
+        # There might be subfloats, so we do not want to move past
+        # the end of the inset.
+        i += 1
 
 def revert_tuftecite(document):
-    " Revert \cite commands in tufte classes "
+    """Revert \cite commands in tufte classes"""
 
     tufte = ["tufte-book", "tufte-handout"]
     if document.textclass not in tufte:
@@ -590,22 +760,21 @@ def revert_tuftecite(document):
 
     i = 0
     while (True):
-        i = find_token(document.body, "\\begin_inset CommandInset citation", i)
+        i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
         if i == -1:
             break
         j = find_end_of_inset(document.body, i)
         if j == -1:
             document.warning("Can't find end of citation inset at line %d!!" %(i))
-            i += 1
             continue
         k = find_token(document.body, "LatexCommand", i, j)
         if k == -1:
             document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
-            i = j + 1
+            i = j
             continue
         cmd = get_value(document.body, "LatexCommand", k)
         if cmd != "cite":
-            i = j + 1
+            i = j
             continue
         pre = get_quoted_value(document.body, "before", i, j)
         post = get_quoted_value(document.body, "after", i, j)
@@ -623,17 +792,18 @@ def revert_tuftecite(document):
             res += "[]"
         res += "{" + key + "}"
         document.body[i:j+1] = put_cmd_in_ert([res])
-        i = j + 1
+        i = j
+
 
 
 def revert_stretchcolumn(document):
-    " We remove the column varwidth flags or everything else will become a mess. "
+    """We remove the column varwidth flags or everything else will become a mess."""
     i = 0
     while True:
-        i = find_token(document.body, "\\begin_inset Tabular", i)
+        i = find_token(document.body, "\\begin_inset Tabular", i+1)
         if i == -1:
             return
-        j = find_end_of_inset(document.body, i + 1)
+        j = find_end_of_inset(document.body, i+1)
         if j == -1:
             document.warning("Malformed LyX document: Could not find end of tabular.")
             continue
@@ -641,23 +811,21 @@ def revert_stretchcolumn(document):
             if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
                 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
                 document.body[k] = document.body[k].replace(' varwidth="true"', '')
-        i = i + 1
 
 
 def revert_vcolumns(document):
-    " Revert standard columns with line breaks etc. "
+    """Revert standard columns with line breaks etc."""
     i = 0
     needvarwidth = False
     needarray = False
     try:
         while True:
-            i = find_token(document.body, "\\begin_inset Tabular", i)
+            i = find_token(document.body, "\\begin_inset Tabular", i+1)
             if i == -1:
                 return
             j = find_end_of_inset(document.body, i)
             if j == -1:
                 document.warning("Malformed LyX document: Could not find end of tabular.")
-                i += 1
                 continue
 
             # Collect necessary column information
@@ -708,7 +876,7 @@ def revert_vcolumns(document):
                             if vval != "":
                                 needarray = True
                             vval += "V{\\linewidth}"
-                
+
                             document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
                             # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
                             # with newlines, and we do not want that)
@@ -729,7 +897,7 @@ def revert_vcolumns(document):
                                     document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
                     m += 1
 
-            i = j + 1
+            i = j
 
     finally:
         if needarray == True:
@@ -739,7 +907,7 @@ def revert_vcolumns(document):
 
 
 def revert_bibencoding(document):
-    " Revert bibliography encoding "
+    """Revert bibliography encoding"""
 
     # Get cite engine
     engine = "basic"
@@ -754,7 +922,7 @@ def revert_bibencoding(document):
     if engine in ["biblatex", "biblatex-natbib"]:
         biblatex = True
 
-    # Map lyx to latex encoding names 
+    # Map lyx to latex encoding names
     encodings = {
         "utf8" : "utf8",
         "utf8x" : "utf8x",
@@ -797,22 +965,22 @@ def revert_bibencoding(document):
     i = 0
     bibresources = []
     while (True):
-        i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
+        i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
         if i == -1:
             break
         j = find_end_of_inset(document.body, i)
         if j == -1:
             document.warning("Can't find end of bibtex inset at line %d!!" %(i))
-            i += 1
             continue
         encoding = get_quoted_value(document.body, "encoding", i, j)
         if not encoding:
-            i += 1
             continue
         # remove encoding line
         k = find_token(document.body, "encoding", i, j)
         if k != -1:
             del document.body[k]
+        if encoding == "default":
+            continue
         # Re-find inset end line
         j = find_end_of_inset(document.body, i)
         if biblatex:
@@ -833,12 +1001,12 @@ def revert_bibencoding(document):
             document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
             document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
 
-        i = j + 1
+        i = j
 
 
 
 def convert_vcsinfo(document):
-    " Separate vcs Info inset from buffer Info inset. "
+    """Separate vcs Info inset from buffer Info inset."""
 
     types = {
         "vcs-revision" : "revision",
@@ -849,61 +1017,73 @@ def convert_vcsinfo(document):
     }
     i = 0
     while True:
-        i = find_token(document.body, "\\begin_inset Info", i)
+        i = find_token(document.body, "\\begin_inset Info", i+1)
         if i == -1:
             return
-        j = find_end_of_inset(document.body, i + 1)
+        j = find_end_of_inset(document.body, i+1)
         if j == -1:
             document.warning("Malformed LyX document: Could not find end of Info inset.")
-            i = i + 1
             continue
         tp = find_token(document.body, 'type', i, j)
         tpv = get_quoted_value(document.body, "type", tp)
         if tpv != "buffer":
-            i = i + 1
             continue
         arg = find_token(document.body, 'arg', i, j)
         argv = get_quoted_value(document.body, "arg", arg)
         if argv not in list(types.keys()):
-            i = i + 1
             continue
         document.body[tp] = "type \"vcs\""
         document.body[arg] = "arg \"" + types[argv] + "\""
-        i = i + 1
 
 
 def revert_vcsinfo(document):
-    " Merge vcs Info inset to buffer Info inset. "
+    """Merge vcs Info inset to buffer Info inset."""
 
     args = ["revision", "tree-revision", "author", "time", "date" ]
     i = 0
     while True:
-        i = find_token(document.body, "\\begin_inset Info", i)
+        i = find_token(document.body, "\\begin_inset Info", i+1)
         if i == -1:
             return
-        j = find_end_of_inset(document.body, i + 1)
+        j = find_end_of_inset(document.body, i+1)
         if j == -1:
             document.warning("Malformed LyX document: Could not find end of Info inset.")
-            i = i + 1
             continue
         tp = find_token(document.body, 'type', i, j)
         tpv = get_quoted_value(document.body, "type", tp)
         if tpv != "vcs":
-            i = i + 1
             continue
         arg = find_token(document.body, 'arg', i, j)
         argv = get_quoted_value(document.body, "arg", arg)
         if argv not in args:
             document.warning("Malformed Info inset. Invalid vcs arg.")
-            i = i + 1
             continue
         document.body[tp] = "type \"buffer\""
         document.body[arg] = "arg \"vcs-" + argv + "\""
-        i = i + 1
 
+def revert_vcsinfo_rev_abbrev(document):
+    " Convert abbreviated revisions to regular revisions. "
+
+    i = 0
+    while True:
+        i = find_token(document.body, "\\begin_inset Info", i+1)
+        if i == -1:
+            return
+        j = find_end_of_inset(document.body, i+1)
+        if j == -1:
+            document.warning("Malformed LyX document: Could not find end of Info inset.")
+            continue
+        tp = find_token(document.body, 'type', i, j)
+        tpv = get_quoted_value(document.body, "type", tp)
+        if tpv != "vcs":
+            continue
+        arg = find_token(document.body, 'arg', i, j)
+        argv = get_quoted_value(document.body, "arg", arg)
+        if( argv == "revision-abbrev" ):
+            document.body[arg] = "arg \"revision\""
 
 def revert_dateinfo(document):
-    " Revert date info insets to static text. "
+    """Revert date info insets to static text."""
 
 # FIXME This currently only considers the main language and uses the system locale
 # Ideally, it should honor context languages and switch the locale accordingly.
@@ -975,6 +1155,7 @@ def revert_dateinfo(document):
         "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
         "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
         "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
+        "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
         "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
         "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
         "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
@@ -1015,28 +1196,23 @@ def revert_dateinfo(document):
     }
 
     types = ["date", "fixdate", "moddate" ]
-    i = 0
-    i = find_token(document.header, "\\language", 0)
-    if i == -1:
-        # this should not happen
+    lang = get_value(document.header, "\\language")
+    if lang == "":
         document.warning("Malformed LyX document! No \\language header found!")
         return
-    lang = get_value(document.header, "\\language", i)
 
     i = 0
     while True:
-        i = find_token(document.body, "\\begin_inset Info", i)
+        i = find_token(document.body, "\\begin_inset Info", i+1)
         if i == -1:
             return
-        j = find_end_of_inset(document.body, i + 1)
+        j = find_end_of_inset(document.body, i+1)
         if j == -1:
             document.warning("Malformed LyX document: Could not find end of Info inset.")
-            i = i + 1
             continue
         tp = find_token(document.body, 'type', i, j)
         tpv = get_quoted_value(document.body, "type", tp)
         if tpv not in types:
-            i = i + 1
             continue
         arg = find_token(document.body, 'arg', i, j)
         argv = get_quoted_value(document.body, "arg", arg)
@@ -1074,12 +1250,15 @@ def revert_dateinfo(document):
             fmt = re.sub('[^\'%]d', '%d', fmt)
             fmt = fmt.replace("'", "")
             result = dte.strftime(fmt)
-        document.body[i : j+1] = result
-        i = i + 1
+        if sys.version_info < (3,0):
+            # In Python 2, datetime module works with binary strings,
+            # our dateformat strings are utf8-encoded:
+            result = result.decode('utf-8')
+        document.body[i : j+1] = [result]
 
 
 def revert_timeinfo(document):
-    " Revert time info insets to static text. "
+    """Revert time info insets to static text."""
 
 # FIXME This currently only considers the main language and uses the system locale
 # Ideally, it should honor context languages and switch the locale accordingly.
@@ -1153,6 +1332,7 @@ def revert_timeinfo(document):
         "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
         "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
         "magyar" : ["%H:%M:%S %Z", "%H:%M"],
+        "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
         "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
         "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
         "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
@@ -1193,7 +1373,6 @@ def revert_timeinfo(document):
     }
 
     types = ["time", "fixtime", "modtime" ]
-    i = 0
     i = find_token(document.header, "\\language", 0)
     if i == -1:
         # this should not happen
@@ -1203,18 +1382,16 @@ def revert_timeinfo(document):
 
     i = 0
     while True:
-        i = find_token(document.body, "\\begin_inset Info", i)
+        i = find_token(document.body, "\\begin_inset Info", i+1)
         if i == -1:
             return
-        j = find_end_of_inset(document.body, i + 1)
+        j = find_end_of_inset(document.body, i+1)
         if j == -1:
             document.warning("Malformed LyX document: Could not find end of Info inset.")
-            i = i + 1
             continue
         tp = find_token(document.body, 'type', i, j)
         tpv = get_quoted_value(document.body, "type", tp)
         if tpv not in types:
-            i = i + 1
             continue
         arg = find_token(document.body, 'arg', i, j)
         argv = get_quoted_value(document.body, "arg", arg)
@@ -1252,67 +1429,59 @@ def revert_timeinfo(document):
             fmt = fmt.replace("'", "")
             result = dte.strftime(fmt)
         document.body[i : j+1] = result
-        i = i + 1
 
 
 def revert_namenoextinfo(document):
-    " Merge buffer Info inset type name-noext to name. "
+    """Merge buffer Info inset type name-noext to name."""
 
     i = 0
     while True:
-        i = find_token(document.body, "\\begin_inset Info", i)
+        i = find_token(document.body, "\\begin_inset Info", i+1)
         if i == -1:
             return
-        j = find_end_of_inset(document.body, i + 1)
+        j = find_end_of_inset(document.body, i+1)
         if j == -1:
             document.warning("Malformed LyX document: Could not find end of Info inset.")
-            i = i + 1
             continue
         tp = find_token(document.body, 'type', i, j)
         tpv = get_quoted_value(document.body, "type", tp)
         if tpv != "buffer":
-            i = i + 1
             continue
         arg = find_token(document.body, 'arg', i, j)
         argv = get_quoted_value(document.body, "arg", arg)
         if argv != "name-noext":
-            i = i + 1
             continue
         document.body[arg] = "arg \"name\""
-        i = i + 1
 
 
 def revert_l7ninfo(document):
-    " Revert l7n Info inset to text. "
+    """Revert l7n Info inset to text."""
 
     i = 0
     while True:
-        i = find_token(document.body, "\\begin_inset Info", i)
+        i = find_token(document.body, "\\begin_inset Info", i+1)
         if i == -1:
             return
-        j = find_end_of_inset(document.body, i + 1)
+        j = find_end_of_inset(document.body, i+1)
         if j == -1:
             document.warning("Malformed LyX document: Could not find end of Info inset.")
-            i = i + 1
             continue
         tp = find_token(document.body, 'type', i, j)
         tpv = get_quoted_value(document.body, "type", tp)
         if tpv != "l7n":
-            i = i + 1
             continue
         arg = find_token(document.body, 'arg', i, j)
         argv = get_quoted_value(document.body, "arg", arg)
-        # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & " 
+        # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
         argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
         document.body[i : j+1] = argv
-        i = i + 1
 
 
 def revert_listpargs(document):
-    " Reverts listpreamble arguments to TeX-code "
+    """Reverts listpreamble arguments to TeX-code"""
     i = 0
     while True:
-        i = find_token(document.body, "\\begin_inset Argument listpreamble:", i)
+        i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
         if i == -1:
             return
         j = find_end_of_inset(document.body, i)
@@ -1320,7 +1489,6 @@ def revert_listpargs(document):
         parent = get_containing_layout(document.body, i)
         if parent == False:
             document.warning("Malformed LyX document: Can't find parent paragraph layout")
-            i += 1
             continue
         parbeg = parent[3]
         beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
@@ -1330,35 +1498,2467 @@ def revert_listpargs(document):
         subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
                  "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
         document.body[parbeg : parbeg] = subst
-        i += 1
 
 
 def revert_lformatinfo(document):
-    " Revert layout format Info inset to text. "
+    """Revert layout format Info inset to text."""
 
     i = 0
     while True:
-        i = find_token(document.body, "\\begin_inset Info", i)
+        i = find_token(document.body, "\\begin_inset Info", i+1)
         if i == -1:
             return
-        j = find_end_of_inset(document.body, i + 1)
+        j = find_end_of_inset(document.body, i+1)
         if j == -1:
             document.warning("Malformed LyX document: Could not find end of Info inset.")
-            i = i + 1
             continue
         tp = find_token(document.body, 'type', i, j)
         tpv = get_quoted_value(document.body, "type", tp)
         if tpv != "lyxinfo":
-            i = i + 1
             continue
         arg = find_token(document.body, 'arg', i, j)
         argv = get_quoted_value(document.body, "arg", arg)
         if argv != "layoutformat":
-            i = i + 1
             continue
         # hardcoded for now
         document.body[i : j+1] = "69"
-        i = i + 1
+
+
+def convert_hebrew_parentheses(document):
+    """ Swap opening/closing parentheses in Hebrew text.
+
+    Up to LyX 2.4, "(" was used as closing parenthesis and
+    ")" as opening parenthesis for Hebrew in the LyX source.
+    """
+    # print("convert hebrew parentheses")
+    current_languages = [document.language]
+    for i, line in enumerate(document.body):
+        if line.startswith('\\lang '):
+            current_languages[-1] = line.lstrip('\\lang ')
+        elif line.startswith('\\begin_layout'):
+            current_languages.append(current_languages[-1])
+            # print (line, current_languages[-1])
+        elif line.startswith('\\end_layout'):
+            current_languages.pop()
+        elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
+            document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
+
+
+def revert_hebrew_parentheses(document):
+    """Store parentheses in Hebrew text reversed"""
+    # This only exists to keep the convert/revert naming convention
+    convert_hebrew_parentheses(document)
+
+
+def revert_malayalam(document):
+    """Set the document language to English but assure Malayalam output"""
+
+    revert_language(document, "malayalam", "", "malayalam")
+
+
+def revert_soul(document):
+    """Revert soul module flex insets to ERT"""
+
+    flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
+
+    for flex in flexes:
+        i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
+        if i != -1:
+            add_to_preamble(document, ["\\usepackage{soul}"])
+            break
+    i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
+    if i != -1:
+        add_to_preamble(document, ["\\usepackage{color}"])
+
+    revert_flex_inset(document.body, "Spaceletters", "\\so")
+    revert_flex_inset(document.body, "Strikethrough", "\\st")
+    revert_flex_inset(document.body, "Underline", "\\ul")
+    revert_flex_inset(document.body, "Highlight", "\\hl")
+    revert_flex_inset(document.body, "Capitalize", "\\caps")
+
+
+def revert_tablestyle(document):
+    """Remove tablestyle params"""
+
+    i = find_token(document.header, "\\tablestyle")
+    if i != -1:
+        del document.header[i]
+
+
+def revert_bibfileencodings(document):
+    """Revert individual Biblatex bibliography encodings"""
+
+    # Get cite engine
+    engine = "basic"
+    i = find_token(document.header, "\\cite_engine", 0)
+    if i == -1:
+        document.warning("Malformed document! Missing \\cite_engine")
+    else:
+        engine = get_value(document.header, "\\cite_engine", i)
+
+    # Check if biblatex
+    biblatex = False
+    if engine in ["biblatex", "biblatex-natbib"]:
+        biblatex = True
+
+    # Map lyx to latex encoding names
+    encodings = {
+        "utf8" : "utf8",
+        "utf8x" : "utf8x",
+        "armscii8" : "armscii8",
+        "iso8859-1" : "latin1",
+        "iso8859-2" : "latin2",
+        "iso8859-3" : "latin3",
+        "iso8859-4" : "latin4",
+        "iso8859-5" : "iso88595",
+        "iso8859-6" : "8859-6",
+        "iso8859-7" : "iso-8859-7",
+        "iso8859-8" : "8859-8",
+        "iso8859-9" : "latin5",
+        "iso8859-13" : "latin7",
+        "iso8859-15" : "latin9",
+        "iso8859-16" : "latin10",
+        "applemac" : "applemac",
+        "cp437" : "cp437",
+        "cp437de" : "cp437de",
+        "cp850" : "cp850",
+        "cp852" : "cp852",
+        "cp855" : "cp855",
+        "cp858" : "cp858",
+        "cp862" : "cp862",
+        "cp865" : "cp865",
+        "cp866" : "cp866",
+        "cp1250" : "cp1250",
+        "cp1251" : "cp1251",
+        "cp1252" : "cp1252",
+        "cp1255" : "cp1255",
+        "cp1256" : "cp1256",
+        "cp1257" : "cp1257",
+        "koi8-r" : "koi8-r",
+        "koi8-u" : "koi8-u",
+        "pt154" : "pt154",
+        "utf8-platex" : "utf8",
+        "ascii" : "ascii"
+    }
+
+    i = 0
+    bibresources = []
+    while (True):
+        i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
+        if i == -1:
+            break
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Can't find end of bibtex inset at line %d!!" %(i))
+            continue
+        encodings = get_quoted_value(document.body, "file_encodings", i, j)
+        if not encodings:
+            i = j
+            continue
+        bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
+        opts = get_quoted_value(document.body, "biblatexopts", i, j)
+        if len(bibfiles) == 0:
+            document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
+        # remove encoding line
+        k = find_token(document.body, "file_encodings", i, j)
+        if k != -1:
+            del document.body[k]
+        # Re-find inset end line
+        j = find_end_of_inset(document.body, i)
+        if biblatex:
+            enclist = encodings.split("\t")
+            encmap = dict()
+            for pp in enclist:
+                ppp = pp.split(" ", 1)
+                encmap[ppp[0]] = ppp[1]
+            for bib in bibfiles:
+                pr = "\\addbibresource"
+                if bib in encmap.keys():
+                    pr += "[bibencoding=" + encmap[bib] + "]"
+                pr += "{" + bib + "}"
+                add_to_preamble(document, [pr])
+            # Insert ERT \\printbibliography and wrap bibtex inset to a Note
+            pcmd = "printbibliography"
+            if opts:
+                pcmd += "[" + opts + "]"
+            repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
+                    "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
+                    "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
+                    "status open", "", "\\begin_layout Plain Layout" ]
+            repl += document.body[i:j+1]
+            repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
+            document.body[i:j+1] = repl
+            j += 27
+
+        i = j
+
+
+def revert_cmidruletrimming(document):
+    """Remove \\cmidrule trimming"""
+
+    # FIXME: Revert to TeX code?
+    i = 0
+    while True:
+        # first, let's find out if we need to do anything
+        i = find_token(document.body, '<cell ', i+1)
+        if i == -1:
+            return
+        j = document.body[i].find('trim="')
+        if j == -1:
+             continue
+        rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
+        # remove trim option
+        document.body[i] = rgx.sub('', document.body[i])
+
+
+ruby_inset_def = [
+    r'### Inserted by lyx2lyx (ruby inset) ###',
+    r'InsetLayout Flex:Ruby',
+    r'  LyxType       charstyle',
+    r'  LatexType     command',
+    r'  LatexName     ruby',
+    r'  HTMLTag       ruby',
+    r'  HTMLAttr      ""',
+    r'  HTMLInnerTag  rb',
+    r'  HTMLInnerAttr ""',
+    r'  BgColor       none',
+    r'  LabelString   "Ruby"',
+    r'  Decoration    Conglomerate',
+    r'  Preamble',
+    r'    \ifdefined\kanjiskip',
+    r'      \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
+    r'    \else \ifdefined\luatexversion',
+    r'      \usepackage{luatexja-ruby}',
+    r'    \else \ifdefined\XeTeXversion',
+    r'      \usepackage{ruby}%',
+    r'    \fi\fi\fi',
+    r'    \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
+    r'  EndPreamble',
+    r'  Argument  post:1',
+    r'    LabelString  "ruby text"',
+    r'    MenuString  "Ruby Text|R"',
+    r'    Tooltip    "Reading aid (ruby, furigana) for Chinese characters."',
+    r'    Decoration  Conglomerate',
+    r'    Font',
+    r'      Size    tiny',
+    r'    EndFont',
+    r'    LabelFont',
+    r'      Size    tiny',
+    r'    EndFont',
+    r'    Mandatory  1',
+    r'  EndArgument',
+    r'End',
+]
+
+
+def convert_ruby_module(document):
+    """Use ruby module instead of local module definition"""
+    if document.del_local_layout(ruby_inset_def):
+        document.add_module("ruby")
+
+
+def revert_ruby_module(document):
+    """Replace ruby module with local module definition"""
+    if document.del_module("ruby"):
+        document.append_local_layout(ruby_inset_def)
+
+
+def convert_utf8_japanese(document):
+    """Use generic utf8 with Japanese documents."""
+    lang = get_value(document.header, "\\language")
+    if not lang.startswith("japanese"):
+        return
+    inputenc = get_value(document.header, "\\inputencoding")
+    if ((lang == "japanese" and inputenc == "utf8-platex")
+        or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
+        document.set_parameter("inputencoding", "utf8")
+
+
+def revert_utf8_japanese(document):
+    """Use Japanese utf8 variants with Japanese documents."""
+    inputenc = get_value(document.header, "\\inputencoding")
+    if inputenc != "utf8":
+        return
+    lang = get_value(document.header, "\\language")
+    if lang == "japanese":
+        document.set_parameter("inputencoding", "utf8-platex")
+    if lang == "japanese-cjk":
+        document.set_parameter("inputencoding", "utf8-cjk")
+
+
+def revert_lineno(document):
+    " Replace lineno setting with user-preamble code."
+
+    options = get_quoted_value(document.header, "\\lineno_options",
+                               delete=True)
+    if not get_bool_value(document.header, "\\use_lineno", delete=True):
+        return
+    if options:
+        options = "[" + options + "]"
+    add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
+                               "\\linenumbers"])
+
+def convert_lineno(document):
+    " Replace user-preamble code with native lineno support."
+    use_lineno = 0
+    options = ""
+    i = find_token(document.preamble, "\\linenumbers", 1)
+    if i > -1:
+        usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
+        if usepkg:
+            use_lineno = 1
+            options = usepkg.group(1).strip("[]")
+            del(document.preamble[i-1:i+1])
+            del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
+
+    k = find_token(document.header, "\\index ")
+    if options == "":
+        document.header[k:k] = ["\\use_lineno %d" % use_lineno]
+    else:
+        document.header[k:k] = ["\\use_lineno %d" % use_lineno,
+                                "\\lineno_options %s" % options]
+
+
+def convert_aaencoding(document):
+    " Convert default document option due to encoding change in aa class. "
+
+    if document.textclass != "aa":
+        return
+
+    i = find_token(document.header, "\\use_default_options true")
+    if i == -1:
+        return
+    val = get_value(document.header, "\\inputencoding")
+    if not val:
+        document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
+        return
+    if val == "auto-legacy" or val == "latin9":
+        document.header[i] = "\\use_default_options false"
+        k = find_token(document.header, "\\options")
+        if k == -1:
+            document.header.insert(i, "\\options latin9")
+        else:
+            document.header[k] += ",latin9"
+
+
+def revert_aaencoding(document):
+    " Revert default document option due to encoding change in aa class. "
+
+    if document.textclass != "aa":
+        return
+
+    i = find_token(document.header, "\\use_default_options true")
+    if i == -1:
+        return
+    val = get_value(document.header, "\\inputencoding")
+    if not val:
+        document.warning("Malformed LyX Document! Missing \\inputencoding header.")
+        return
+    if val == "utf8":
+        document.header[i] = "\\use_default_options false"
+        k = find_token(document.header, "\\options", 0)
+        if k == -1:
+            document.header.insert(i, "\\options utf8")
+        else:
+            document.header[k] = document.header[k] + ",utf8"
+
+
+def revert_new_languages(document):
+    """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
+    and Russian (Petrine orthography)."""
+
+    #                lyxname:          (babelname, polyglossianame)
+    new_languages = {"azerbaijani":    ("azerbaijani", ""),
+                     "bengali":        ("", "bengali"),
+                     "churchslavonic": ("", "churchslavonic"),
+                     "oldrussian":     ("", "russian"),
+                     "korean":         ("", "korean"),
+                    }
+    if document.language in new_languages:
+        used_languages = set((document.language, ))
+    else:
+        used_languages = set()
+    i = 0
+    while True:
+        i = find_token(document.body, "\\lang", i+1)
+        if i == -1:
+            break
+        val = get_value(document.body, "\\lang", i)
+        if val in new_languages:
+            used_languages.add(val)
+
+    # Korean is already supported via CJK, so leave as-is for Babel
+    if ("korean" in used_languages
+        and (not get_bool_value(document.header, "\\use_non_tex_fonts")
+             or get_value(document.header, "\\language_package") == "babel")):
+        used_languages.discard("korean")
+
+    for lang in used_languages:
+        revert_language(document, lang, *new_languages[lang])
+
+
+gloss_inset_def = [
+    r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
+    r'InsetLayout Flex:Glosse',
+    r'  LyXType               custom',
+    r'  LabelString           "Gloss (old version)"',
+    r'  MenuString            "Gloss (old version)"',
+    r'  LatexType             environment',
+    r'  LatexName             linggloss',
+    r'  Decoration            minimalistic',
+    r'  LabelFont',
+    r'    Size                Small',
+    r'  EndFont',
+    r'  MultiPar              true',
+    r'  CustomPars            false',
+    r'  ForcePlain            true',
+    r'  ParbreakIsNewline     true',
+    r'  FreeSpacing           true',
+    r'  Requires              covington',
+    r'  Preamble',
+    r'          \def\glosstr{}',
+    r'          \@ifundefined{linggloss}{%',
+    r'          \newenvironment{linggloss}[2][]{',
+    r'             \def\glosstr{\glt #1}%',
+    r'             \gll #2}',
+    r'          {\glosstr\glend}}{}',
+    r'  EndPreamble',
+    r'  InToc                 true',
+    r'  ResetsFont            true',
+    r'  Argument 1',
+    r'          Decoration    conglomerate',
+    r'          LabelString   "Translation"',
+    r'          MenuString    "Glosse Translation|s"',
+    r'          Tooltip       "Add a translation for the glosse"',
+    r'  EndArgument',
+    r'End'
+]
+
+glosss_inset_def = [
+    r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
+    r'InsetLayout Flex:Tri-Glosse',
+    r'  LyXType               custom',
+    r'  LabelString           "Tri-Gloss (old version)"',
+    r'  MenuString            "Tri-Gloss (old version)"',
+    r'  LatexType             environment',
+    r'  LatexName             lingglosss',
+    r'  Decoration            minimalistic',
+    r'  LabelFont',
+    r'    Size                Small',
+    r'  EndFont',
+    r'  MultiPar              true',
+    r'  CustomPars            false',
+    r'  ForcePlain            true',
+    r'  ParbreakIsNewline     true',
+    r'  FreeSpacing           true',
+    r'  InToc                 true',
+    r'  Requires              covington',
+    r'  Preamble',
+    r'          \def\glosstr{}',
+    r'          \@ifundefined{lingglosss}{%',
+    r'          \newenvironment{lingglosss}[2][]{',
+    r'              \def\glosstr{\glt #1}%',
+    r'              \glll #2}',
+    r'          {\glosstr\glend}}{}',
+    r'  EndPreamble',
+    r'  ResetsFont            true',
+    r'  Argument 1',
+    r'          Decoration    conglomerate',
+    r'          LabelString   "Translation"',
+    r'          MenuString    "Glosse Translation|s"',
+    r'          Tooltip       "Add a translation for the glosse"',
+    r'  EndArgument',
+    r'End'
+]
+
+def convert_linggloss(document):
+    " Move old ling glosses to local layout "
+    if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
+        document.append_local_layout(gloss_inset_def)
+    if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
+        document.append_local_layout(glosss_inset_def)
+
+def revert_linggloss(document):
+    " Revert to old ling gloss definitions "
+    if not "linguistics" in document.get_module_list():
+        return
+    document.del_local_layout(gloss_inset_def)
+    document.del_local_layout(glosss_inset_def)
+
+    cov_req = False
+    glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
+    for glosse in glosses:
+        i = 0
+        while True:
+            i = find_token(document.body, glosse, i+1)
+            if i == -1:
+                break
+            j = find_end_of_inset(document.body, i)
+            if j == -1:
+                document.warning("Malformed LyX document: Can't find end of Gloss inset")
+                continue
+
+            arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
+            endarg = find_end_of_inset(document.body, arg)
+            optargcontent = []
+            if arg != -1:
+                argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
+                if argbeginPlain == -1:
+                    document.warning("Malformed LyX document: Can't find optarg plain Layout")
+                    continue
+                argendPlain = find_end_of_inset(document.body, argbeginPlain)
+                optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
+
+                # remove Arg insets and paragraph, if it only contains this inset
+                if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
+                    del document.body[arg - 1 : endarg + 4]
+                else:
+                    del document.body[arg : endarg + 1]
+
+            arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
+            endarg = find_end_of_inset(document.body, arg)
+            marg1content = []
+            if arg != -1:
+                argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
+                if argbeginPlain == -1:
+                    document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
+                    continue
+                argendPlain = find_end_of_inset(document.body, argbeginPlain)
+                marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
+
+                # remove Arg insets and paragraph, if it only contains this inset
+                if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
+                    del document.body[arg - 1 : endarg + 4]
+                else:
+                    del document.body[arg : endarg + 1]
+
+            arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
+            endarg = find_end_of_inset(document.body, arg)
+            marg2content = []
+            if arg != -1:
+                argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
+                if argbeginPlain == -1:
+                    document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
+                    continue
+                argendPlain = find_end_of_inset(document.body, argbeginPlain)
+                marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
+
+                # remove Arg insets and paragraph, if it only contains this inset
+                if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
+                    del document.body[arg - 1 : endarg + 4]
+                else:
+                    del document.body[arg : endarg + 1]
+
+            arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
+            endarg = find_end_of_inset(document.body, arg)
+            marg3content = []
+            if arg != -1:
+                argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
+                if argbeginPlain == -1:
+                    document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
+                    continue
+                argendPlain = find_end_of_inset(document.body, argbeginPlain)
+                marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
+
+                # remove Arg insets and paragraph, if it only contains this inset
+                if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
+                    del document.body[arg - 1 : endarg + 4]
+                else:
+                    del document.body[arg : endarg + 1]
+
+            cmd = "\\digloss"
+            if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
+                cmd = "\\trigloss"
+
+            beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
+            endInset = find_end_of_inset(document.body, i)
+            endPlain = find_end_of_layout(document.body, beginPlain)
+            precontent = put_cmd_in_ert(cmd)
+            if len(optargcontent) > 0:
+                precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
+            precontent += put_cmd_in_ert("{")
+
+            postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
+            if cmd == "\\trigloss":
+                postcontent += put_cmd_in_ert("}{") + marg3content
+            postcontent += put_cmd_in_ert("}")
+
+            document.body[endPlain:endInset + 1] = postcontent
+            document.body[beginPlain + 1:beginPlain] = precontent
+            del document.body[i : beginPlain + 1]
+            if not cov_req:
+                document.append_local_layout("Requires covington")
+                cov_req = True
+            i = beginPlain
+
+
+def revert_subexarg(document):
+    " Revert linguistic subexamples with argument to ERT "
+
+    if not "linguistics" in document.get_module_list():
+        return
+
+    cov_req = False
+    i = 0
+    while True:
+        i = find_token(document.body, "\\begin_layout Subexample", i+1)
+        if i == -1:
+            break
+        j = find_end_of_layout(document.body, i)
+        if j == -1:
+            document.warning("Malformed LyX document: Can't find end of Subexample layout")
+            continue
+        while True:
+            # check for consecutive layouts
+            k = find_token(document.body, "\\begin_layout", j)
+            if k == -1 or document.body[k] != "\\begin_layout Subexample":
+                break
+            j = find_end_of_layout(document.body, k)
+            if j == -1:
+                 document.warning("Malformed LyX document: Can't find end of Subexample layout")
+                 continue
+
+        arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
+        if arg == -1:
+            continue
+
+        endarg = find_end_of_inset(document.body, arg)
+        optargcontent = ""
+        argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
+        if argbeginPlain == -1:
+            document.warning("Malformed LyX document: Can't find optarg plain Layout")
+            continue
+        argendPlain = find_end_of_inset(document.body, argbeginPlain)
+        optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
+
+        # remove Arg insets and paragraph, if it only contains this inset
+        if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
+            del document.body[arg - 1 : endarg + 4]
+        else:
+            del document.body[arg : endarg + 1]
+
+        cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
+
+        # re-find end of layout
+        j = find_end_of_layout(document.body, i)
+        if j == -1:
+            document.warning("Malformed LyX document: Can't find end of Subexample layout")
+            continue
+        while True:
+            # check for consecutive layouts
+            k = find_token(document.body, "\\begin_layout", j)
+            if k == -1 or document.body[k] != "\\begin_layout Subexample":
+                break
+            document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
+            j = find_end_of_layout(document.body, k)
+            if j == -1:
+                 document.warning("Malformed LyX document: Can't find end of Subexample layout")
+                 continue
+
+        endev = put_cmd_in_ert("\\end{subexamples}")
+
+        document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
+        document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
+                + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
+        if not cov_req:
+            document.append_local_layout("Requires covington")
+            cov_req = True
+
+
+def revert_drs(document):
+    " Revert DRS insets (linguistics) to ERT "
+
+    if not "linguistics" in document.get_module_list():
+        return
+
+    cov_req = False
+    drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
+             "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
+             "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
+             "\\begin_inset Flex SDRS"]
+    for drs in drses:
+        i = 0
+        while True:
+            i = find_token(document.body, drs, i+1)
+            if i == -1:
+                break
+            j = find_end_of_inset(document.body, i)
+            if j == -1:
+                document.warning("Malformed LyX document: Can't find end of DRS inset")
+                continue
+
+            # Check for arguments
+            arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
+            endarg = find_end_of_inset(document.body, arg)
+            prearg1content = []
+            if arg != -1:
+                argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
+                if argbeginPlain == -1:
+                    document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
+                    continue
+                argendPlain = find_end_of_inset(document.body, argbeginPlain)
+                prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
+
+                # remove Arg insets and paragraph, if it only contains this inset
+                if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
+                    del document.body[arg - 1 : endarg + 4]
+                else:
+                    del document.body[arg : endarg + 1]
+
+            # re-find inset end
+            j = find_end_of_inset(document.body, i)
+            if j == -1:
+                document.warning("Malformed LyX document: Can't find end of DRS inset")
+                continue
+
+            arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
+            endarg = find_end_of_inset(document.body, arg)
+            prearg2content = []
+            if arg != -1:
+                argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
+                if argbeginPlain == -1:
+                    document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
+                    continue
+                argendPlain = find_end_of_inset(document.body, argbeginPlain)
+                prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
+
+                # remove Arg insets and paragraph, if it only contains this inset
+                if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
+                    del document.body[arg - 1 : endarg + 4]
+                else:
+                    del document.body[arg : endarg + 1]
+
+            # re-find inset end
+            j = find_end_of_inset(document.body, i)
+            if j == -1:
+                document.warning("Malformed LyX document: Can't find end of DRS inset")
+                continue
+
+            arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
+            endarg = find_end_of_inset(document.body, arg)
+            postarg1content = []
+            if arg != -1:
+                argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
+                if argbeginPlain == -1:
+                    document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
+                    continue
+                argendPlain = find_end_of_inset(document.body, argbeginPlain)
+                postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
+
+                # remove Arg insets and paragraph, if it only contains this inset
+                if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
+                    del document.body[arg - 1 : endarg + 4]
+                else:
+                    del document.body[arg : endarg + 1]
+
+            # re-find inset end
+            j = find_end_of_inset(document.body, i)
+            if j == -1:
+                document.warning("Malformed LyX document: Can't find end of DRS inset")
+                continue
+
+            arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
+            endarg = find_end_of_inset(document.body, arg)
+            postarg2content = []
+            if arg != -1:
+                argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
+                if argbeginPlain == -1:
+                    document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
+                    continue
+                argendPlain = find_end_of_inset(document.body, argbeginPlain)
+                postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
+
+                # remove Arg insets and paragraph, if it only contains this inset
+                if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
+                    del document.body[arg - 1 : endarg + 4]
+                else:
+                    del document.body[arg : endarg + 1]
+
+            # re-find inset end
+            j = find_end_of_inset(document.body, i)
+            if j == -1:
+                document.warning("Malformed LyX document: Can't find end of DRS inset")
+                continue
+
+            arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
+            endarg = find_end_of_inset(document.body, arg)
+            postarg3content = []
+            if arg != -1:
+                argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
+                if argbeginPlain == -1:
+                    document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
+                    continue
+                argendPlain = find_end_of_inset(document.body, argbeginPlain)
+                postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
+
+                # remove Arg insets and paragraph, if it only contains this inset
+                if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
+                    del document.body[arg - 1 : endarg + 4]
+                else:
+                    del document.body[arg : endarg + 1]
+
+            # re-find inset end
+            j = find_end_of_inset(document.body, i)
+            if j == -1:
+                document.warning("Malformed LyX document: Can't find end of DRS inset")
+                continue
+
+            arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
+            endarg = find_end_of_inset(document.body, arg)
+            postarg4content = []
+            if arg != -1:
+                argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
+                if argbeginPlain == -1:
+                    document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
+                    continue
+                argendPlain = find_end_of_inset(document.body, argbeginPlain)
+                postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
+
+                # remove Arg insets and paragraph, if it only contains this inset
+                if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
+                    del document.body[arg - 1 : endarg + 4]
+                else:
+                    del document.body[arg : endarg + 1]
+
+            # The respective LaTeX command
+            cmd = "\\drs"
+            if drs == "\\begin_inset Flex DRS*":
+                cmd = "\\drs*"
+            elif drs == "\\begin_inset Flex IfThen-DRS":
+                cmd = "\\ifdrs"
+            elif drs == "\\begin_inset Flex Cond-DRS":
+                cmd = "\\condrs"
+            elif drs == "\\begin_inset Flex QDRS":
+                cmd = "\\qdrs"
+            elif drs == "\\begin_inset Flex NegDRS":
+                cmd = "\\negdrs"
+            elif drs == "\\begin_inset Flex SDRS":
+                cmd = "\\sdrs"
+
+            beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
+            endInset = find_end_of_inset(document.body, i)
+            endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
+            precontent = put_cmd_in_ert(cmd)
+            precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
+            if drs == "\\begin_inset Flex SDRS":
+                precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
+            precontent += put_cmd_in_ert("{")
+
+            postcontent = []
+            if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
+                postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
+                if cmd == "\\condrs" or cmd == "\\qdrs":
+                    postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
+                if cmd == "\\qdrs":
+                    postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
+            else:
+                postcontent = put_cmd_in_ert("}")
+
+            document.body[endPlain:endInset + 1] = postcontent
+            document.body[beginPlain + 1:beginPlain] = precontent
+            del document.body[i : beginPlain + 1]
+            if not cov_req:
+                document.append_local_layout("Provides covington 1")
+                add_to_preamble(document, ["\\usepackage{drs,covington}"])
+                cov_req = True
+            i = beginPlain
+
+
+
+def revert_babelfont(document):
+    " Reverts the use of \\babelfont to user preamble "
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        return
+
+    i = find_token(document.header, '\\language_package', 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\language_package.")
+        return
+    if get_value(document.header, "\\language_package", 0) != "babel":
+        return
+
+    # check font settings
+    # defaults
+    roman = sans = typew = "default"
+    osf = False
+    sf_scale = tt_scale = 100.0
+
+    j = find_token(document.header, "\\font_roman", 0)
+    if j == -1:
+        document.warning("Malformed LyX document: Missing \\font_roman.")
+    else:
+        # We need to use this regex since split() does not handle quote protection
+        romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
+        roman = romanfont[2].strip('"')
+        romanfont[2] = '"default"'
+        document.header[j] = " ".join(romanfont)
+
+    j = find_token(document.header, "\\font_sans", 0)
+    if j == -1:
+        document.warning("Malformed LyX document: Missing \\font_sans.")
+    else:
+        # We need to use this regex since split() does not handle quote protection
+        sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
+        sans = sansfont[2].strip('"')
+        sansfont[2] = '"default"'
+        document.header[j] = " ".join(sansfont)
+
+    j = find_token(document.header, "\\font_typewriter", 0)
+    if j == -1:
+        document.warning("Malformed LyX document: Missing \\font_typewriter.")
+    else:
+        # We need to use this regex since split() does not handle quote protection
+        ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
+        typew = ttfont[2].strip('"')
+        ttfont[2] = '"default"'
+        document.header[j] = " ".join(ttfont)
+
+    i = find_token(document.header, "\\font_osf", 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_osf.")
+    else:
+        osf = str2bool(get_value(document.header, "\\font_osf", i))
+
+    j = find_token(document.header, "\\font_sf_scale", 0)
+    if j == -1:
+        document.warning("Malformed LyX document: Missing \\font_sf_scale.")
+    else:
+        sfscale = document.header[j].split()
+        val = sfscale[2]
+        sfscale[2] = "100"
+        document.header[j] = " ".join(sfscale)
+        try:
+            # float() can throw
+            sf_scale = float(val)
+        except:
+            document.warning("Invalid font_sf_scale value: " + val)
+
+    j = find_token(document.header, "\\font_tt_scale", 0)
+    if j == -1:
+        document.warning("Malformed LyX document: Missing \\font_tt_scale.")
+    else:
+        ttscale = document.header[j].split()
+        val = ttscale[2]
+        ttscale[2] = "100"
+        document.header[j] = " ".join(ttscale)
+        try:
+            # float() can throw
+            tt_scale = float(val)
+        except:
+            document.warning("Invalid font_tt_scale value: " + val)
+
+    # set preamble stuff
+    pretext = ['%% This document must be processed with xelatex or lualatex!']
+    pretext.append('\\AtBeginDocument{%')
+    if roman != "default":
+        pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
+    if sans != "default":
+        sf = '\\babelfont{sf}['
+        if sf_scale != 100.0:
+            sf += 'Scale=' + str(sf_scale / 100.0) + ','
+        sf += 'Mapping=tex-text]{' + sans + '}'
+        pretext.append(sf)
+    if typew != "default":
+        tw = '\\babelfont{tt}'
+        if tt_scale != 100.0:
+            tw += '[Scale=' + str(tt_scale / 100.0) + ']'
+        tw += '{' + typew + '}'
+        pretext.append(tw)
+    if osf:
+        pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
+    pretext.append('}')
+    insert_to_preamble(document, pretext)
+
+
+def revert_minionpro(document):
+    " Revert native MinionPro font definition (with extra options) to LaTeX "
+
+    if get_bool_value(document.header, "\\use_non_tex_fonts"):
+        return
+
+    regexp = re.compile(r'(\\font_roman_opts)')
+    x = find_re(document.header, regexp, 0)
+    if x == -1:
+        return
+
+    # We need to use this regex since split() does not handle quote protection
+    romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+    opts = romanopts[1].strip('"')
+
+    i = find_token(document.header, "\\font_roman", 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_roman.")
+        return
+    else:
+        # We need to use this regex since split() does not handle quote protection
+        romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+        roman = romanfont[1].strip('"')
+        if roman != "minionpro":
+            return
+        romanfont[1] = '"default"'
+        document.header[i] = " ".join(romanfont)
+        osf = False
+        j = find_token(document.header, "\\font_osf true", 0)
+        if j != -1:
+            osf = True
+        preamble = "\\usepackage["
+        if osf:
+            document.header[j] = "\\font_osf false"
+        else:
+            preamble += "lf,"
+        preamble += opts
+        preamble += "]{MinionPro}"
+        add_to_preamble(document, [preamble])
+        del document.header[x]
+
+
+def revert_font_opts(document):
+    " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
+
+    NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
+    Babel = (get_value(document.header, "\\language_package") == "babel")
+
+    # 1. Roman
+    regexp = re.compile(r'(\\font_roman_opts)')
+    i = find_re(document.header, regexp, 0)
+    if i != -1:
+        # We need to use this regex since split() does not handle quote protection
+        romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+        opts = romanopts[1].strip('"')
+        del document.header[i]
+        if NonTeXFonts:
+            regexp = re.compile(r'(\\font_roman)')
+            i = find_re(document.header, regexp, 0)
+            if i != -1:
+                # We need to use this regex since split() does not handle quote protection
+                romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+                font = romanfont[2].strip('"')
+                romanfont[2] = '"default"'
+                document.header[i] = " ".join(romanfont)
+                if font != "default":
+                    if Babel:
+                        preamble = "\\babelfont{rm}["
+                    else:
+                        preamble = "\\setmainfont["
+                    preamble += opts
+                    preamble += ","
+                    preamble += "Mapping=tex-text]{"
+                    preamble += font
+                    preamble += "}"
+                    add_to_preamble(document, [preamble])
+
+    # 2. Sans
+    regexp = re.compile(r'(\\font_sans_opts)')
+    i = find_re(document.header, regexp, 0)
+    if i != -1:
+        scaleval = 100
+        # We need to use this regex since split() does not handle quote protection
+        sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+        opts = sfopts[1].strip('"')
+        del document.header[i]
+        if NonTeXFonts:
+            regexp = re.compile(r'(\\font_sf_scale)')
+            i = find_re(document.header, regexp, 0)
+            if i != -1:
+                scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
+            regexp = re.compile(r'(\\font_sans)')
+            i = find_re(document.header, regexp, 0)
+            if i != -1:
+                # We need to use this regex since split() does not handle quote protection
+                sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+                font = sffont[2].strip('"')
+                sffont[2] = '"default"'
+                document.header[i] = " ".join(sffont)
+                if font != "default":
+                    if Babel:
+                        preamble = "\\babelfont{sf}["
+                    else:
+                        preamble = "\\setsansfont["
+                    preamble += opts
+                    preamble += ","
+                    if scaleval != 100:
+                        preamble += "Scale=0."
+                        preamble += scaleval
+                        preamble += ","
+                    preamble += "Mapping=tex-text]{"
+                    preamble += font
+                    preamble += "}"
+                    add_to_preamble(document, [preamble])
+
+    # 3. Typewriter
+    regexp = re.compile(r'(\\font_typewriter_opts)')
+    i = find_re(document.header, regexp, 0)
+    if i != -1:
+        scaleval = 100
+        # We need to use this regex since split() does not handle quote protection
+        ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+        opts = ttopts[1].strip('"')
+        del document.header[i]
+        if NonTeXFonts:
+            regexp = re.compile(r'(\\font_tt_scale)')
+            i = find_re(document.header, regexp, 0)
+            if i != -1:
+                scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
+            regexp = re.compile(r'(\\font_typewriter)')
+            i = find_re(document.header, regexp, 0)
+            if i != -1:
+                # We need to use this regex since split() does not handle quote protection
+                ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+                font = ttfont[2].strip('"')
+                ttfont[2] = '"default"'
+                document.header[i] = " ".join(ttfont)
+                if font != "default":
+                    if Babel:
+                        preamble = "\\babelfont{tt}["
+                    else:
+                        preamble = "\\setmonofont["
+                    preamble += opts
+                    preamble += ","
+                    if scaleval != 100:
+                        preamble += "Scale=0."
+                        preamble += scaleval
+                        preamble += ","
+                    preamble += "Mapping=tex-text]{"
+                    preamble += font
+                    preamble += "}"
+                    add_to_preamble(document, [preamble])
+
+
+def revert_plainNotoFonts_xopts(document):
+    " Revert native (straight) Noto font definition (with extra options) to LaTeX "
+
+    if get_bool_value(document.header, "\\use_non_tex_fonts"):
+        return
+
+    osf = False
+    y = find_token(document.header, "\\font_osf true", 0)
+    if y != -1:
+        osf = True
+
+    regexp = re.compile(r'(\\font_roman_opts)')
+    x = find_re(document.header, regexp, 0)
+    if x == -1 and not osf:
+        return
+
+    opts = ""
+    if x != -1:
+        # We need to use this regex since split() does not handle quote protection
+        romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+        opts = romanopts[1].strip('"')
+    if osf:
+        if opts != "":
+            opts += ", "
+        opts += "osf"
+
+    i = find_token(document.header, "\\font_roman", 0)
+    if i == -1:
+        return
+
+    # We need to use this regex since split() does not handle quote protection
+    romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+    roman = romanfont[1].strip('"')
+    if roman != "NotoSerif-TLF":
+        return
+
+    j = find_token(document.header, "\\font_sans", 0)
+    if j == -1:
+        return
+
+    # We need to use this regex since split() does not handle quote protection
+    sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
+    sf = sffont[1].strip('"')
+    if sf != "default":
+        return
+
+    j = find_token(document.header, "\\font_typewriter", 0)
+    if j == -1:
+        return
+
+    # We need to use this regex since split() does not handle quote protection
+    ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
+    tt = ttfont[1].strip('"')
+    if tt != "default":
+        return
+
+    # So we have noto as "complete font"
+    romanfont[1] = '"default"'
+    document.header[i] = " ".join(romanfont)
+
+    preamble = "\\usepackage["
+    preamble += opts
+    preamble += "]{noto}"
+    add_to_preamble(document, [preamble])
+    if osf:
+        document.header[y] = "\\font_osf false"
+    if x != -1:
+        del document.header[x]
+
+
+def revert_notoFonts_xopts(document):
+    " Revert native (extended) Noto font definition (with extra options) to LaTeX "
+
+    if get_bool_value(document.header, "\\use_non_tex_fonts"):
+        return
+
+    fontmap = dict()
+    fm = createFontMapping(['Noto'])
+    if revert_fonts(document, fm, fontmap, True):
+        add_preamble_fonts(document, fontmap)
+
+
+def revert_IBMFonts_xopts(document):
+    " Revert native IBM font definition (with extra options) to LaTeX "
+
+    if get_bool_value(document.header, "\\use_non_tex_fonts"):
+        return
+
+    fontmap = dict()
+    fm = createFontMapping(['IBM'])
+    ft = ""
+    if revert_fonts(document, fm, fontmap, True):
+        add_preamble_fonts(document, fontmap)
+
+
+def revert_AdobeFonts_xopts(document):
+    " Revert native Adobe font definition (with extra options) to LaTeX "
+
+    if get_bool_value(document.header, "\\use_non_tex_fonts"):
+        return
+
+    fontmap = dict()
+    fm = createFontMapping(['Adobe'])
+    ft = ""
+    if revert_fonts(document, fm, fontmap, True):
+        add_preamble_fonts(document, fontmap)
+
+
+def convert_osf(document):
+    " Convert \\font_osf param to new format "
+
+    NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
+
+    i = find_token(document.header, '\\font_osf', 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_osf.")
+        return
+
+    osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
+    osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
+
+    osfval = str2bool(get_value(document.header, "\\font_osf", i))
+    document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
+
+    if NonTeXFonts:
+        document.header.insert(i, "\\font_sans_osf false")
+        document.header.insert(i + 1, "\\font_typewriter_osf false")
+        return
+
+    if osfval:
+        x = find_token(document.header, "\\font_sans", 0)
+        if x == -1:
+            document.warning("Malformed LyX document: Missing \\font_sans.")
+        else:
+            # We need to use this regex since split() does not handle quote protection
+            sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+            sf = sffont[1].strip('"')
+            if sf in osfsf:
+                document.header.insert(i, "\\font_sans_osf true")
+            else:
+                document.header.insert(i, "\\font_sans_osf false")
+
+        x = find_token(document.header, "\\font_typewriter", 0)
+        if x == -1:
+            document.warning("Malformed LyX document: Missing \\font_typewriter.")
+        else:
+            # We need to use this regex since split() does not handle quote protection
+            ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+            tt = ttfont[1].strip('"')
+            if tt in osftt:
+                document.header.insert(i + 1, "\\font_typewriter_osf true")
+            else:
+                document.header.insert(i + 1, "\\font_typewriter_osf false")
+
+    else:
+        document.header.insert(i, "\\font_sans_osf false")
+        document.header.insert(i + 1, "\\font_typewriter_osf false")
+
+
+def revert_osf(document):
+    " Revert \\font_*_osf params "
+
+    NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
+
+    i = find_token(document.header, '\\font_roman_osf', 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_roman_osf.")
+        return
+
+    osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
+    document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
+
+    i = find_token(document.header, '\\font_sans_osf', 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_sans_osf.")
+        return
+
+    osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
+    del document.header[i]
+
+    i = find_token(document.header, '\\font_typewriter_osf', 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
+        return
+
+    osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
+    del document.header[i]
+
+    if osfval:
+        i = find_token(document.header, '\\font_osf', 0)
+        if i == -1:
+            document.warning("Malformed LyX document: Missing \\font_osf.")
+            return
+        document.header[i] = "\\font_osf true"
+
+
+def revert_texfontopts(document):
+    " Revert native TeX font definitions (with extra options) to LaTeX "
+
+    if get_bool_value(document.header, "\\use_non_tex_fonts"):
+        return
+
+    rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
+
+    # First the sf (biolinum only)
+    regexp = re.compile(r'(\\font_sans_opts)')
+    x = find_re(document.header, regexp, 0)
+    if x != -1:
+        # We need to use this regex since split() does not handle quote protection
+        sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+        opts = sfopts[1].strip('"')
+        i = find_token(document.header, "\\font_sans", 0)
+        if i == -1:
+            document.warning("Malformed LyX document: Missing \\font_sans.")
+        else:
+            # We need to use this regex since split() does not handle quote protection
+            sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+            sans = sffont[1].strip('"')
+            if sans == "biolinum":
+                sf_scale = 100.0
+                sffont[1] = '"default"'
+                document.header[i] = " ".join(sffont)
+                osf = False
+                j = find_token(document.header, "\\font_sans_osf true", 0)
+                if j != -1:
+                    osf = True
+                k = find_token(document.header, "\\font_sf_scale", 0)
+                if k == -1:
+                    document.warning("Malformed LyX document: Missing \\font_sf_scale.")
+                else:
+                    sfscale = document.header[k].split()
+                    val = sfscale[1]
+                    sfscale[1] = "100"
+                    document.header[k] = " ".join(sfscale)
+                    try:
+                        # float() can throw
+                        sf_scale = float(val)
+                    except:
+                        document.warning("Invalid font_sf_scale value: " + val)
+                preamble = "\\usepackage["
+                if osf:
+                    document.header[j] = "\\font_sans_osf false"
+                    preamble += "osf,"
+                if sf_scale != 100.0:
+                    preamble += 'scaled=' + str(sf_scale / 100.0) + ','
+                preamble += opts
+                preamble += "]{biolinum}"
+                add_to_preamble(document, [preamble])
+                del document.header[x]
+
+    regexp = re.compile(r'(\\font_roman_opts)')
+    x = find_re(document.header, regexp, 0)
+    if x == -1:
+        return
+
+    # We need to use this regex since split() does not handle quote protection
+    romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+    opts = romanopts[1].strip('"')
+
+    i = find_token(document.header, "\\font_roman", 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_roman.")
+        return
+    else:
+        # We need to use this regex since split() does not handle quote protection
+        romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+        roman = romanfont[1].strip('"')
+        if not roman in rmfonts:
+            return
+        romanfont[1] = '"default"'
+        document.header[i] = " ".join(romanfont)
+        package = roman
+        if roman == "utopia":
+            package = "fourier"
+        elif roman == "palatino":
+            package = "mathpazo"
+        elif roman == "times":
+            package = "mathptmx"
+        elif roman == "xcharter":
+            package = "XCharter"
+        osf = ""
+        j = find_token(document.header, "\\font_roman_osf true", 0)
+        if j != -1:
+            if roman == "cochineal":
+                osf = "proportional,osf,"
+            elif roman == "utopia":
+                osf = "oldstyle,"
+            elif roman == "garamondx":
+                osf = "osfI,"
+            elif roman == "libertine":
+                osf = "osf,"
+            elif roman == "palatino":
+                osf = "osf,"
+            elif roman == "xcharter":
+                osf = "osf,"
+            document.header[j] = "\\font_roman_osf false"
+        k = find_token(document.header, "\\font_sc true", 0)
+        if k != -1:
+            if roman == "utopia":
+                osf += "expert,"
+            if roman == "palatino" and osf == "":
+                osf = "sc,"
+            document.header[k] = "\\font_sc false"
+        preamble = "\\usepackage["
+        preamble += osf
+        preamble += opts
+        preamble += "]{" + package + "}"
+        add_to_preamble(document, [preamble])
+        del document.header[x]
+
+
+def convert_CantarellFont(document):
+    " Handle Cantarell font definition to LaTeX "
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fm = createFontMapping(['Cantarell'])
+        convert_fonts(document, fm, "oldstyle")
+
+def revert_CantarellFont(document):
+    " Revert native Cantarell font definition to LaTeX "
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fontmap = dict()
+        fm = createFontMapping(['Cantarell'])
+        if revert_fonts(document, fm, fontmap, False, True):
+            add_preamble_fonts(document, fontmap)
+
+def convert_ChivoFont(document):
+    " Handle Chivo font definition to LaTeX "
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fm = createFontMapping(['Chivo'])
+        convert_fonts(document, fm, "oldstyle")
+
+def revert_ChivoFont(document):
+    " Revert native Chivo font definition to LaTeX "
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fontmap = dict()
+        fm = createFontMapping(['Chivo'])
+        if revert_fonts(document, fm, fontmap, False, True):
+            add_preamble_fonts(document, fontmap)
+
+
+def convert_FiraFont(document):
+    " Handle Fira font definition to LaTeX "
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fm = createFontMapping(['Fira'])
+        convert_fonts(document, fm, "lf")
+
+def revert_FiraFont(document):
+    " Revert native Fira font definition to LaTeX "
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fontmap = dict()
+        fm = createFontMapping(['Fira'])
+        if revert_fonts(document, fm, fontmap, False, True):
+            add_preamble_fonts(document, fontmap)
+
+
+def convert_Semibolds(document):
+    " Move semibold options to extraopts "
+
+    NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
+
+    i = find_token(document.header, "\\font_roman", 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_roman.")
+    else:
+        # We need to use this regex since split() does not handle quote protection
+        romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+        roman = romanfont[1].strip('"')
+        if roman == "IBMPlexSerifSemibold":
+            romanfont[1] = '"IBMPlexSerif"'
+            document.header[i] = " ".join(romanfont)
+
+            if NonTeXFonts == False:
+                regexp = re.compile(r'(\\font_roman_opts)')
+                x = find_re(document.header, regexp, 0)
+                if x == -1:
+                    # Sensible place to insert tag
+                    fo = find_token(document.header, "\\font_sf_scale")
+                    if fo == -1:
+                        document.warning("Malformed LyX document! Missing \\font_sf_scale")
+                    else:
+                        document.header.insert(fo, "\\font_roman_opts \"semibold\"")
+                else:
+                    # We need to use this regex since split() does not handle quote protection
+                    romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+                    document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
+
+    i = find_token(document.header, "\\font_sans", 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_sans.")
+    else:
+        # We need to use this regex since split() does not handle quote protection
+        sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+        sf = sffont[1].strip('"')
+        if sf == "IBMPlexSansSemibold":
+            sffont[1] = '"IBMPlexSans"'
+            document.header[i] = " ".join(sffont)
+
+            if NonTeXFonts == False:
+                regexp = re.compile(r'(\\font_sans_opts)')
+                x = find_re(document.header, regexp, 0)
+                if x == -1:
+                    # Sensible place to insert tag
+                    fo = find_token(document.header, "\\font_sf_scale")
+                    if fo == -1:
+                        document.warning("Malformed LyX document! Missing \\font_sf_scale")
+                    else:
+                        document.header.insert(fo, "\\font_sans_opts \"semibold\"")
+                else:
+                    # We need to use this regex since split() does not handle quote protection
+                    sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+                    document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
+
+    i = find_token(document.header, "\\font_typewriter", 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_typewriter.")
+    else:
+        # We need to use this regex since split() does not handle quote protection
+        ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+        tt = ttfont[1].strip('"')
+        if tt == "IBMPlexMonoSemibold":
+            ttfont[1] = '"IBMPlexMono"'
+            document.header[i] = " ".join(ttfont)
+
+            if NonTeXFonts == False:
+                regexp = re.compile(r'(\\font_typewriter_opts)')
+                x = find_re(document.header, regexp, 0)
+                if x == -1:
+                    # Sensible place to insert tag
+                    fo = find_token(document.header, "\\font_tt_scale")
+                    if fo == -1:
+                        document.warning("Malformed LyX document! Missing \\font_tt_scale")
+                    else:
+                        document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
+                else:
+                    # We need to use this regex since split() does not handle quote protection
+                    ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
+                    document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
+
+
+def convert_NotoRegulars(document):
+    " Merge diverse noto reagular fonts "
+
+    i = find_token(document.header, "\\font_roman", 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_roman.")
+    else:
+        # We need to use this regex since split() does not handle quote protection
+        romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+        roman = romanfont[1].strip('"')
+        if roman == "NotoSerif-TLF":
+            romanfont[1] = '"NotoSerifRegular"'
+            document.header[i] = " ".join(romanfont)
+
+    i = find_token(document.header, "\\font_sans", 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_sans.")
+    else:
+        # We need to use this regex since split() does not handle quote protection
+        sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+        sf = sffont[1].strip('"')
+        if sf == "NotoSans-TLF":
+            sffont[1] = '"NotoSansRegular"'
+            document.header[i] = " ".join(sffont)
+
+    i = find_token(document.header, "\\font_typewriter", 0)
+    if i == -1:
+        document.warning("Malformed LyX document: Missing \\font_typewriter.")
+    else:
+        # We need to use this regex since split() does not handle quote protection
+        ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
+        tt = ttfont[1].strip('"')
+        if tt == "NotoMono-TLF":
+            ttfont[1] = '"NotoMonoRegular"'
+            document.header[i] = " ".join(ttfont)
+
+
+def convert_CrimsonProFont(document):
+    " Handle CrimsonPro font definition to LaTeX "
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fm = createFontMapping(['CrimsonPro'])
+        convert_fonts(document, fm, "lf")
+
+def revert_CrimsonProFont(document):
+    " Revert native CrimsonPro font definition to LaTeX "
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fontmap = dict()
+        fm = createFontMapping(['CrimsonPro'])
+        if revert_fonts(document, fm, fontmap, False, True):
+            add_preamble_fonts(document, fontmap)
+
+
+def revert_pagesizes(document):
+    " Revert new page sizes in memoir and KOMA to options "
+
+    if document.textclass != "memoir" and document.textclass[:2] != "scr":
+        return
+
+    i = find_token(document.header, "\\use_geometry true", 0)
+    if i != -1:
+        return
+
+    defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
+
+    i = find_token(document.header, "\\papersize", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\papersize header.")
+        return
+    val = get_value(document.header, "\\papersize", i)
+    if val in defsizes:
+        # nothing to do
+        return
+
+    document.header[i] = "\\papersize default"
+
+    i = find_token(document.header, "\\options", 0)
+    if i == -1:
+        i = find_token(document.header, "\\textclass", 0)
+        if i == -1:
+            document.warning("Malformed LyX document! Missing \\textclass header.")
+            return
+        document.header.insert(i, "\\options " + val)
+        return
+    document.header[i] = document.header[i] + "," + val
+
+
+def convert_pagesizes(document):
+    " Convert to new page sizes in memoir and KOMA to options "
+
+    if document.textclass != "memoir" and document.textclass[:3] != "scr":
+        return
+
+    i = find_token(document.header, "\\use_geometry true", 0)
+    if i != -1:
+        return
+
+    defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
+
+    i = find_token(document.header, "\\papersize", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\papersize header.")
+        return
+    val = get_value(document.header, "\\papersize", i)
+    if val in defsizes:
+        # nothing to do
+        return
+
+    i = find_token(document.header, "\\use_geometry false", 0)
+    if i != -1:
+        # Maintain use of geometry
+        document.header[1] = "\\use_geometry true"
+
+def revert_komafontsizes(document):
+    " Revert new font sizes in KOMA to options "
+
+    if document.textclass[:3] != "scr":
+        return
+
+    i = find_token(document.header, "\\paperfontsize", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\paperfontsize header.")
+        return
+
+    defsizes = ["default", "10", "11", "12"]
+
+    val = get_value(document.header, "\\paperfontsize", i)
+    if val in defsizes:
+        # nothing to do
+        return
+
+    document.header[i] = "\\paperfontsize default"
+
+    fsize = "fontsize=" + val
+
+    i = find_token(document.header, "\\options", 0)
+    if i == -1:
+        i = find_token(document.header, "\\textclass", 0)
+        if i == -1:
+            document.warning("Malformed LyX document! Missing \\textclass header.")
+            return
+        document.header.insert(i, "\\options " + fsize)
+        return
+    document.header[i] = document.header[i] + "," + fsize
+
+
+def revert_dupqualicites(document):
+    " Revert qualified citation list commands with duplicate keys to ERT "
+
+    # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
+    # we need to revert those with multiple uses of the same key.
+
+    # Get cite engine
+    engine = "basic"
+    i = find_token(document.header, "\\cite_engine", 0)
+    if i == -1:
+        document.warning("Malformed document! Missing \\cite_engine")
+    else:
+        engine = get_value(document.header, "\\cite_engine", i)
+
+    if not engine in ["biblatex", "biblatex-natbib"]:
+        return
+
+    # Citation insets that support qualified lists, with their LaTeX code
+    ql_citations = {
+        "cite" : "cites",
+        "Cite" : "Cites",
+        "citet" : "textcites",
+        "Citet" : "Textcites",
+        "citep" : "parencites",
+        "Citep" : "Parencites",
+        "Footcite" : "Smartcites",
+        "footcite" : "smartcites",
+        "Autocite" : "Autocites",
+        "autocite" : "autocites",
+        }
+
+    i = 0
+    while (True):
+        i = find_token(document.body, "\\begin_inset CommandInset citation", i)
+        if i == -1:
+            break
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Can't find end of citation inset at line %d!!" %(i))
+            i += 1
+            continue
+
+        k = find_token(document.body, "LatexCommand", i, j)
+        if k == -1:
+            document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
+            i = j + 1
+            continue
+
+        cmd = get_value(document.body, "LatexCommand", k)
+        if not cmd in list(ql_citations.keys()):
+            i = j + 1
+            continue
+
+        pres = find_token(document.body, "pretextlist", i, j)
+        posts = find_token(document.body, "posttextlist", i, j)
+        if pres == -1 and posts == -1:
+            # nothing to do.
+            i = j + 1
+            continue
+
+        key = get_quoted_value(document.body, "key", i, j)
+        if not key:
+            document.warning("Citation inset at line %d does not have a key!" %(i))
+            i = j + 1
+            continue
+
+        keys = key.split(",")
+        ukeys = list(set(keys))
+        if len(keys) == len(ukeys):
+            # no duplicates.
+            i = j + 1
+            continue
+
+        pretexts = get_quoted_value(document.body, "pretextlist", pres)
+        posttexts = get_quoted_value(document.body, "posttextlist", posts)
+
+        pre = get_quoted_value(document.body, "before", i, j)
+        post = get_quoted_value(document.body, "after", i, j)
+        prelist = pretexts.split("\t")
+        premap = dict()
+        for pp in prelist:
+            ppp = pp.split(" ", 1)
+            val = ""
+            if len(ppp) > 1:
+                val = ppp[1]
+            else:
+                val = ""
+            if ppp[0] in premap:
+                premap[ppp[0]] = premap[ppp[0]] + "\t" + val
+            else:
+                premap[ppp[0]] = val
+        postlist = posttexts.split("\t")
+        postmap = dict()
+        num = 1
+        for pp in postlist:
+            ppp = pp.split(" ", 1)
+            val = ""
+            if len(ppp) > 1:
+                val = ppp[1]
+            else:
+                val = ""
+            if ppp[0] in postmap:
+                postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
+            else:
+                postmap[ppp[0]] = val
+        # Replace known new commands with ERT
+        if "(" in pre or ")" in pre:
+            pre = "{" + pre + "}"
+        if "(" in post or ")" in post:
+            post = "{" + post + "}"
+        res = "\\" + ql_citations[cmd]
+        if pre:
+            res += "(" + pre + ")"
+        if post:
+            res += "(" + post + ")"
+        elif pre:
+            res += "()"
+        for kk in keys:
+            if premap.get(kk, "") != "":
+                akeys = premap[kk].split("\t", 1)
+                akey = akeys[0]
+                if akey != "":
+                    res += "[" + akey + "]"
+                if len(akeys) > 1:
+                    premap[kk] = "\t".join(akeys[1:])
+                else:
+                    premap[kk] = ""
+            if postmap.get(kk, "") != "":
+                akeys = postmap[kk].split("\t", 1)
+                akey = akeys[0]
+                if akey != "":
+                    res += "[" + akey + "]"
+                if len(akeys) > 1:
+                    postmap[kk] = "\t".join(akeys[1:])
+                else:
+                    postmap[kk] = ""
+            elif premap.get(kk, "") != "":
+                res += "[]"
+            res += "{" + kk + "}"
+        document.body[i:j+1] = put_cmd_in_ert([res])
+
+
+def convert_pagesizenames(document):
+    " Convert LyX page sizes names "
+
+    i = find_token(document.header, "\\papersize", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\papersize header.")
+        return
+    oldnames = ["letterpaper", "legalpaper", "executivepaper", \
+                "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
+               "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
+               "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
+    val = get_value(document.header, "\\papersize", i)
+    if val in oldnames:
+        newval = val.replace("paper", "")
+        document.header[i] = "\\papersize " + newval
+
+def revert_pagesizenames(document):
+    " Convert LyX page sizes names "
+
+    i = find_token(document.header, "\\papersize", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\papersize header.")
+        return
+    newnames = ["letter", "legal", "executive", \
+                "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
+               "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
+               "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
+    val = get_value(document.header, "\\papersize", i)
+    if val in newnames:
+        newval = val + "paper"
+        document.header[i] = "\\papersize " + newval
+
+
+def revert_theendnotes(document):
+    " Reverts native support of \\theendnotes to TeX-code "
+
+    if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
+        return
+
+    i = 0
+    while True:
+        i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
+        if i == -1:
+            return
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Malformed LyX document: Can't find end of FloatList inset")
+            continue
+
+        document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
+
+
+def revert_enotez(document):
+    " Reverts native support of enotez package to TeX-code "
+
+    if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
+        return
+
+    use = False
+    if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
+        use = True
+
+    revert_flex_inset(document.body, "Endnote", "\\endnote")
+
+    i = 0
+    while True:
+        i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
+        if i == -1:
+            break
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Malformed LyX document: Can't find end of FloatList inset")
+            continue
+
+        use = True
+        document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
+
+    if use:
+        add_to_preamble(document, ["\\usepackage{enotez}"])
+    document.del_module("enotez")
+    document.del_module("foottoenotez")
+
+
+def revert_memoir_endnotes(document):
+    " Reverts native support of memoir endnotes to TeX-code "
+
+    if document.textclass != "memoir":
+        return
+
+    encommand = "\\pagenote"
+    modules = document.get_module_list()
+    if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
+        encommand = "\\endnote"
+
+    revert_flex_inset(document.body, "Endnote", encommand)
+
+    i = 0
+    while True:
+        i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
+        if i == -1:
+            break
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Malformed LyX document: Can't find end of FloatList inset")
+            continue
+
+        if document.body[i] == "\\begin_inset FloatList pagenote*":
+            document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
+        else:
+            document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
+        add_to_preamble(document, ["\\makepagenote"])
+
+
+def revert_totalheight(document):
+    " Reverts graphics height parameter from totalheight to height "
+
+    relative_heights = {
+        "\\textwidth" : "text%",
+        "\\columnwidth" : "col%",
+        "\\paperwidth" : "page%",
+        "\\linewidth" : "line%",
+        "\\textheight" : "theight%",
+        "\\paperheight" : "pheight%",
+        "\\baselineskip " : "baselineskip%"
+    }
+    i = 0
+    while (True):
+        i = find_token(document.body, "\\begin_inset Graphics", i)
+        if i == -1:
+            break
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Can't find end of graphics inset at line %d!!" %(i))
+            i += 1
+            continue
+
+        rx = re.compile(r'\s*special\s*(\S+)$')
+        rxx = re.compile(r'(\d*\.*\d+)(\S+)$')
+        k = find_re(document.body, rx, i, j)
+        special = ""
+        oldheight = ""
+        if k != -1:
+            m = rx.match(document.body[k])
+            if m:
+                special = m.group(1)
+            mspecial = special.split(',')
+            for spc in mspecial:
+                if spc[:7] == "height=":
+                    oldheight = spc.split('=')[1]
+                    ms = rxx.search(oldheight)
+                    if ms:
+                        oldval = ms.group(1)
+                        oldunit = ms.group(2)
+                        if oldval[1] == ".":
+                            oldval = "0" + oldval
+                        if oldunit in list(relative_heights.keys()):
+                            oldval = str(float(oldval) * 100)
+                            oldunit = relative_heights[oldunit]
+                            oldheight = oldval + oldunit
+                    mspecial.remove(spc)
+                    break
+            if len(mspecial) > 0:
+                special = ",".join(mspecial)
+            else:
+                special = ""
+
+        rx = re.compile(r'(\s*height\s*)(\S+)$')
+        kk = find_re(document.body, rx, i, j)
+        if kk != -1:
+            m = rx.match(document.body[kk])
+            val = ""
+            if m:
+                val = m.group(2)
+                if k != -1:
+                    if special != "":
+                        val = val + "," + special
+                    document.body[k] = "\tspecial " + "totalheight=" + val
+                else:
+                    document.body.insert(kk, "\tspecial totalheight=" + val)
+                if oldheight != "":
+                    document.body[kk] = m.group(1) + oldheight
+                else:
+                    del document.body[kk]
+        elif oldheight != "":
+            if special != "":
+                document.body[k] = "\tspecial " + special
+                document.body.insert(k, "\theight " + oldheight)
+            else:
+                document.body[k] = "\theight " + oldheight
+        i = j + 1
+
+
+def convert_totalheight(document):
+    " Converts graphics height parameter from totalheight to height "
+
+    relative_heights = {
+        "text%" : "\\textwidth",
+        "col%"  : "\\columnwidth",
+        "page%" : "\\paperwidth",
+        "line%" : "\\linewidth",
+        "theight%" : "\\textheight",
+        "pheight%" : "\\paperheight",
+        "baselineskip%" : "\\baselineskip"
+    }
+    i = 0
+    while (True):
+        i = find_token(document.body, "\\begin_inset Graphics", i)
+        if i == -1:
+            break
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Can't find end of graphics inset at line %d!!" %(i))
+            i += 1
+            continue
+
+        rx = re.compile(r'\s*special\s*(\S+)$')
+        k = find_re(document.body, rx, i, j)
+        special = ""
+        newheight = ""
+        if k != -1:
+            m = rx.match(document.body[k])
+            if m:
+                special = m.group(1)
+            mspecial = special.split(',')
+            for spc in mspecial:
+                if spc[:12] == "totalheight=":
+                    newheight = spc.split('=')[1]
+                    mspecial.remove(spc)
+                    break
+            if len(mspecial) > 0:
+                special = ",".join(mspecial)
+            else:
+                special = ""
+
+        rx = re.compile(r'(\s*height\s*)(\d+)(\S+)$')
+        kk = find_re(document.body, rx, i, j)
+        if kk != -1:
+            m = rx.match(document.body[kk])
+            val = ""
+            if m:
+                val = m.group(2)
+                unit = m.group(3)
+                if unit in list(relative_heights.keys()):
+                    val = str(float(val) / 100)
+                    unit = relative_heights[unit]
+                if k != -1:
+                    if special != "":
+                        val = val + unit + "," + special
+                    document.body[k] = "\tspecial " + "height=" + val
+                else:
+                    document.body.insert(kk + 1, "\tspecial height=" + val + unit)
+                if newheight != "":
+                    document.body[kk] = m.group(1) + newheight
+                else:
+                    del document.body[kk]
+        elif newheight != "":
+            document.body.insert(k, "\theight " + newheight)
+        i = j + 1
+
+
+def convert_changebars(document):
+    " Converts the changebars module to native solution "
+
+    if not "changebars" in document.get_module_list():
+        return
+
+    i = find_token(document.header, "\\output_changes", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\output_changes header.")
+        document.del_module("changebars")
+        return
+
+    document.header.insert(i, "\\change_bars true")
+    document.del_module("changebars")
+
+
+def revert_changebars(document):
+    " Converts native changebar param to module "
+
+    i = find_token(document.header, "\\change_bars", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\change_bars header.")
+        return
+
+    val = get_value(document.header, "\\change_bars", i)
+
+    if val == "true":
+        document.add_module("changebars")
+
+    del document.header[i]
+
+
+def convert_postpone_fragile(document):
+    " Adds false \\postpone_fragile_content buffer param "
+
+    i = find_token(document.header, "\\output_changes", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\output_changes header.")
+        return
+    # Set this to false for old documents (see #2154)
+    document.header.insert(i, "\\postpone_fragile_content false")
+
+
+def revert_postpone_fragile(document):
+    " Remove \\postpone_fragile_content buffer param "
+
+    i = find_token(document.header, "\\postpone_fragile_content", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
+        return
+
+    del document.header[i]
+
+
+def revert_colrow_tracking(document):
+    " Remove change tag from tabular columns/rows "
+    i = 0
+    while True:
+        i = find_token(document.body, "\\begin_inset Tabular", i+1)
+        if i == -1:
+            return
+        j = find_end_of_inset(document.body, i+1)
+        if j == -1:
+            document.warning("Malformed LyX document: Could not find end of tabular.")
+            continue
+        for k in range(i, j):
+            m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
+            if m:
+                document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
+            m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
+            if m:
+                document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
+
+
+def convert_counter_maintenance(document):
+    " Convert \\maintain_unincluded_children buffer param from boolean value tro tristate "
+
+    i = find_token(document.header, "\\maintain_unincluded_children", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
+        return
+
+    val = get_value(document.header, "\\maintain_unincluded_children", i)
+
+    if val == "true":
+        document.header[i] = "\\maintain_unincluded_children strict"
+    else:
+        document.header[i] = "\\maintain_unincluded_children no"
+
+
+def revert_counter_maintenance(document):
+    " Revert \\maintain_unincluded_children buffer param to previous boolean value "
+
+    i = find_token(document.header, "\\maintain_unincluded_children", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
+        return
+
+    val = get_value(document.header, "\\maintain_unincluded_children", i)
+
+    if val == "no":
+        document.header[i] = "\\maintain_unincluded_children false"
+    else:
+        document.header[i] = "\\maintain_unincluded_children true"
+
+
+def revert_counter_inset(document):
+    " Revert counter inset to ERT, where possible"
+    i = 0
+    needed_counters = {}
+    while True:
+        i = find_token(document.body, "\\begin_inset CommandInset counter", i)
+        if i == -1:
+            break
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Can't find end of counter inset at line %d!" % i)
+            i += 1
+            continue
+        lyx = get_quoted_value(document.body, "lyxonly", i, j)
+        if lyx == "true":
+            # there is nothing we can do to affect the LyX counters
+            document.body[i : j + 1] = []
+            i = j + 1
+            continue
+        cnt = get_quoted_value(document.body, "counter", i, j)
+        if not cnt:
+            document.warning("No counter given for inset at line %d!" % i)
+            i = j + 1
+            continue
+
+        cmd = get_quoted_value(document.body, "LatexCommand", i, j)
+        document.warning(cmd)
+        ert = ""
+        if cmd == "set":
+            val = get_quoted_value(document.body, "value", i, j)
+            if not val:
+                document.warning("Can't convert counter inset at line %d!" % i)
+            else:
+                ert = put_cmd_in_ert("\\setcounter{%s}{%s}" % (cnt, val))
+        elif cmd == "addto":
+            val = get_quoted_value(document.body, "value", i, j)
+            if not val:
+                document.warning("Can't convert counter inset at line %d!" % i)
+            else:
+                ert = put_cmd_in_ert("\\addtocounter{%s}{%s}" % (cnt, val))
+        elif cmd == "reset":
+            ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
+        elif cmd == "save":
+            needed_counters[cnt] = 1
+            savecnt = "LyXSave" + cnt
+            ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (savecnt, cnt))
+        elif cmd == "restore":
+            needed_counters[cnt] = 1
+            savecnt = "LyXSave" + cnt
+            ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (cnt, savecnt))
+        else:
+            document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
+
+        if ert:
+            document.body[i : j + 1] = ert
+        i += 1
+        continue
+
+    pretext = []
+    for cnt in needed_counters:
+        pretext.append("\\newcounter{LyXSave%s}" % (cnt))
+    if pretext:
+        add_to_preamble(document, pretext)
+
+
+def revert_ams_spaces(document):
+    "Revert InsetSpace medspace and thickspace into their TeX-code counterparts"
+    Found = False
+    insets = ["\\medspace{}", "\\thickspace{}"]
+    for inset in insets:
+        i = 0
+        j = 0
+        i = find_token(document.body, "\\begin_inset space " + inset, i)
+        if i == -1:
+            continue
+        end = find_end_of_inset(document.body, i)
+        subst = put_cmd_in_ert(inset)
+        document.body[i : end + 1] = subst
+        Found = True
+
+    if Found == True:
+        # load amsmath in the preamble if not already loaded
+        i = find_token(document.header, "\\use_package amsmath 2", 0)
+        if i == -1:
+            add_to_preamble(document, ["\\@ifundefined{thickspace}{\\usepackage{amsmath}}{}"])
+            return
+
+
+def convert_parskip(document):
+    " Move old parskip settings to preamble "
+
+    i = find_token(document.header, "\\paragraph_separation skip", 0)
+    if i == -1:
+        return
+
+    j = find_token(document.header, "\\defskip", 0)
+    if j == -1:
+        document.warning("Malformed LyX document! Missing \\defskip.")
+        return
+
+    val = get_value(document.header, "\\defskip", j)
+
+    skipval = "\\medskipamount"
+    if val == "smallskip" or val == "medskip" or val == "bigskip":
+        skipval = "\\" + val + "amount"
+    else:
+        skipval = val
+
+    add_to_preamble(document, ["\\setlength{\\parskip}{" + skipval + "}", "\\setlength{\\parindent}{0pt}"])
+
+    document.header[i] = "\\paragraph_separation indent"
+    document.header[j] = "\\paragraph_indentation default"
+
+
+def revert_parskip(document):
+    " Revert new parskip settings to preamble "
+
+    i = find_token(document.header, "\\paragraph_separation skip", 0)
+    if i == -1:
+        return
+
+    j = find_token(document.header, "\\defskip", 0)
+    if j == -1:
+        document.warning("Malformed LyX document! Missing \\defskip.")
+        return
+
+    val = get_value(document.header, "\\defskip", j)
+
+    skipval = ""
+    if val == "smallskip" or val == "medskip" or val == "bigskip":
+        skipval = "[skip=\\" + val + "amount]"
+    elif val == "fullline":
+        skipval = "[skip=\\baselineskip]"
+    elif val != "halfline":
+        skipval = "[skip={" + val + "}]"
+
+    add_to_preamble(document, ["\\usepackage" + skipval + "{parskip}"])
+
+    document.header[i] = "\\paragraph_separation indent"
+    document.header[j] = "\\paragraph_indentation default"
+
+
+def revert_line_vspaces(document):
+    " Revert fulline and halfline vspaces to TeX "
+    insets = {
+        "fullline*" : "\\vspace*{\\baselineskip}",
+        "fullline" : "\\vspace{\\baselineskip}",
+        "halfline*" : "\\vspace*{0.5\\baselineskip}",
+        "halfline" : "\\vspace{0.5\\baselineskip}",
+        }
+    for inset in insets.keys():
+        i = 0
+        j = 0
+        i = find_token(document.body, "\\begin_inset VSpace " + inset, i)
+        if i == -1:
+            continue
+        end = find_end_of_inset(document.body, i)
+        subst = put_cmd_in_ert(insets[inset])
+        document.body[i : end + 1] = subst
+
+def convert_libertinus_rm_fonts(document):
+    """Handle Libertinus serif fonts definition to LaTeX"""
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fm = createFontMapping(['Libertinus'])
+        convert_fonts(document, fm)
+
+def revert_libertinus_rm_fonts(document):
+    """Revert Libertinus serif font definition to LaTeX"""
+
+    if not get_bool_value(document.header, "\\use_non_tex_fonts"):
+        fontmap = dict()
+        fm = createFontMapping(['libertinus'])
+        if revert_fonts(document, fm, fontmap):
+            add_preamble_fonts(document, fontmap)
+
+def revert_libertinus_sftt_fonts(document):
+    " Revert Libertinus sans and tt font definitions to LaTeX "
+
+    if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+        preamble = ""
+        # first sf font
+        i = find_token(document.header, "\\font_sans \"LibertinusSans-LF\"", 0)
+        if i != -1:
+            j = find_token(document.header, "\\font_sans_osf true", 0)
+            if j != -1:
+                add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-OsF}"])
+                document.header[j] = "\\font_sans_osf false"
+            else:
+                add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-LF}"])
+            document.header[i] = document.header[i].replace("LibertinusSans-LF", "default")
+            sf_scale = 100.0
+            sfval = find_token(document.header, "\\font_sf_scale", 0)
+            if sfval == -1:
+                document.warning("Malformed LyX document: Missing \\font_sf_scale.")
+            else:
+                sfscale = document.header[sfval].split()
+                val = sfscale[1]
+                sfscale[1] = "100"
+                document.header[sfval] = " ".join(sfscale)
+                try:
+                    # float() can throw
+                    sf_scale = float(val)
+                except:
+                    document.warning("Invalid font_sf_scale value: " + val)
+                if sf_scale != "100.0":
+                    add_to_preamble(document, ["\\renewcommand*{\\LibertinusSans@scale}{" + str(sf_scale / 100.0) + "}"])
+        # now tt font
+        i = find_token(document.header, "\\font_typewriter \"LibertinusMono-TLF\"", 0)
+        if i != -1:
+            add_to_preamble(document, ["\\renewcommand{\\ttdefault}{LibertinusMono-TLF}"])
+            document.header[i] = document.header[i].replace("LibertinusMono-TLF", "default")
+            tt_scale = 100.0
+            ttval = find_token(document.header, "\\font_tt_scale", 0)
+            if ttval == -1:
+                document.warning("Malformed LyX document: Missing \\font_tt_scale.")
+            else:
+                ttscale = document.header[ttval].split()
+                val = ttscale[1]
+                ttscale[1] = "100"
+                document.header[ttval] = " ".join(ttscale)
+                try:
+                    # float() can throw
+                    tt_scale = float(val)
+                except:
+                    document.warning("Invalid font_tt_scale value: " + val)
+                if tt_scale != "100.0":
+                    add_to_preamble(document, ["\\renewcommand*{\\LibertinusMono@scale}{" + str(tt_scale / 100.0) + "}"])
+
+
+def revert_docbook_table_output(document):
+    i = find_token(document.header, '\\docbook_table_output')
+    if i != -1:
+        del document.header[i]
 
 
 ##
@@ -1383,17 +3983,84 @@ convert = [
            [558, [removeFrontMatterStyles]],
            [559, []],
            [560, []],
-           [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
+           [561, [convert_latexFonts]],  # Handle dejavu, ibmplex fonts in GUI
            [562, []],
            [563, []],
-           [564, []]
+           [564, []],
+           [565, [convert_AdobeFonts]],  # Handle adobe fonts in GUI
+           [566, [convert_hebrew_parentheses]],
+           [567, []],
+           [568, []],
+           [569, []],
+           [570, []],
+           [571, []],
+           [572, [convert_notoFonts]],  # Added options thin, light, extralight for Noto
+           [573, [convert_inputencoding_namechange]],
+           [574, [convert_ruby_module, convert_utf8_japanese]],
+           [575, [convert_lineno, convert_aaencoding]],
+           [576, []],
+           [577, [convert_linggloss]],
+           [578, []],
+           [579, []],
+           [580, []],
+           [581, [convert_osf]],
+           [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
+           [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
+           [584, []],
+           [585, [convert_pagesizes]],
+           [586, []],
+           [587, [convert_pagesizenames]],
+           [588, []],
+           [589, [convert_totalheight]],
+           [590, [convert_changebars]],
+           [591, [convert_postpone_fragile]],
+           [592, []],
+           [593, [convert_counter_maintenance]],
+           [594, []],
+           [595, []],
+           [596, [convert_parskip]],
+           [597, [convert_libertinus_rm_fonts]],
+           [598, []]
           ]
 
-revert =  [
+revert =  [[597, [revert_docbook_table_output]],
+           [596, [revert_libertinus_rm_fonts,revert_libertinus_sftt_fonts]],
+           [595, [revert_parskip,revert_line_vspaces]],
+           [594, [revert_ams_spaces]],
+           [593, [revert_counter_inset]],
+           [592, [revert_counter_maintenance]],
+           [591, [revert_colrow_tracking]],
+           [590, [revert_postpone_fragile]],
+           [589, [revert_changebars]],
+           [588, [revert_totalheight]],
+           [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
+           [586, [revert_pagesizenames]],
+           [585, [revert_dupqualicites]],
+           [584, [revert_pagesizes,revert_komafontsizes]],
+           [583, [revert_vcsinfo_rev_abbrev]],
+           [582, [revert_ChivoFont,revert_CrimsonProFont]],
+           [581, [revert_CantarellFont,revert_FiraFont]],
+           [580, [revert_texfontopts,revert_osf]],
+           [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
+           [578, [revert_babelfont]],
+           [577, [revert_drs]],
+           [576, [revert_linggloss, revert_subexarg]],
+           [575, [revert_new_languages]],
+           [574, [revert_lineno, revert_aaencoding]],
+           [573, [revert_ruby_module, revert_utf8_japanese]],
+           [572, [revert_inputencoding_namechange]],
+           [571, [revert_notoFonts]],
+           [570, [revert_cmidruletrimming]],
+           [569, [revert_bibfileencodings]],
+           [568, [revert_tablestyle]],
+           [567, [revert_soul]],
+           [566, [revert_malayalam]],
+           [565, [revert_hebrew_parentheses]],
+           [564, [revert_AdobeFonts]],
            [563, [revert_lformatinfo]],
            [562, [revert_listpargs]],
            [561, [revert_l7ninfo]],
-           [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
+           [560, [revert_latexFonts]],  # Handle dejavu, ibmplex fonts in user preamble
            [559, [revert_timeinfo, revert_namenoextinfo]],
            [558, [revert_dateinfo]],
            [557, [addFrontMatterStyles]],
@@ -1405,7 +4072,7 @@ revert =  [
            [551, [revert_floatpclass, revert_floatalignment]],
            [550, [revert_nospellcheck]],
            [549, [revert_fontenc]],
-           [548, []],# dummy format change
+           [548, []],  # dummy format change
            [547, [revert_lscape]],
            [546, [revert_xcharter]],
            [545, [revert_paratype]],