]> git.lyx.org Git - lyx.git/blobdiff - lib/lyx2lyx/lyx_2_4.py
lyx2lyx: correct placement of (new) local layout
[lyx.git] / lib / lyx2lyx / lyx_2_4.py
index e7962c64b4ac264e0f784784d2471ae747adf1d0..8436d9a84c1e57e3d5de5814b4c4a94f68101405 100644 (file)
@@ -642,7 +642,7 @@ def revert_lscape(document):
     while True:
         i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
         if i == -1:
-            return
+            break
         j = find_end_of_inset(document.body, i)
         if j == -1:
             document.warning("Malformed LyX document: Can't find end of Landscape inset")
@@ -657,6 +657,7 @@ def revert_lscape(document):
             document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
 
         add_to_preamble(document, ["\\usepackage{pdflscape}"])
+    document.del_module("landscape")
 
 
 def convert_fontenc(document):
@@ -1056,6 +1057,26 @@ def revert_vcsinfo(document):
         document.body[tp] = "type \"buffer\""
         document.body[arg] = "arg \"vcs-" + argv + "\""
 
+def revert_vcsinfo_rev_abbrev(document):
+    " Convert abbreviated revisions to regular revisions. "
+
+    i = 0
+    while True:
+        i = find_token(document.body, "\\begin_inset Info", i+1)
+        if i == -1:
+            return
+        j = find_end_of_inset(document.body, i+1)
+        if j == -1:
+            document.warning("Malformed LyX document: Could not find end of Info inset.")
+            continue
+        tp = find_token(document.body, 'type', i, j)
+        tpv = get_quoted_value(document.body, "type", tp)
+        if tpv != "vcs":
+            continue
+        arg = find_token(document.body, 'arg', i, j)
+        argv = get_quoted_value(document.body, "arg", arg)
+        if( argv == "revision-abbrev" ):
+            document.body[arg] = "arg \"revision\""
 
 def revert_dateinfo(document):
     " Revert date info insets to static text. "
@@ -2001,7 +2022,7 @@ def revert_linggloss(document):
 
             beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
             endInset = find_end_of_inset(document.body, i)
-            endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
+            endPlain = find_end_of_layout(document.body, beginPlain)
             precontent = put_cmd_in_ert(cmd)
             if len(optargcontent) > 0:
                 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
@@ -3102,6 +3123,464 @@ def revert_CrimsonProFont(document):
         if revert_fonts(document, fm, fontmap, False, True):
             add_preamble_fonts(document, fontmap)
 
+
+def revert_pagesizes(document):
+    " Revert new page sizes in memoir and KOMA to options "
+
+    if document.textclass != "memoir" and document.textclass[:2] != "scr":
+        return
+
+    i = find_token(document.header, "\\use_geometry true", 0)
+    if i != -1:
+        return
+
+    defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
+
+    i = find_token(document.header, "\\papersize", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\papersize header.")
+        return
+    val = get_value(document.header, "\\papersize", i)
+    if val in defsizes:
+        # nothing to do
+        return
+
+    document.header[i] = "\\papersize default"
+
+    i = find_token(document.header, "\\options", 0)
+    if i == -1:
+        i = find_token(document.header, "\\textclass", 0)
+        if i == -1:
+            document.warning("Malformed LyX document! Missing \\textclass header.")
+            return
+        document.header.insert(i, "\\options " + val)
+        return
+    document.header[i] = document.header[i] + "," + val
+
+
+def convert_pagesizes(document):
+    " Convert to new page sizes in memoir and KOMA to options "
+
+    if document.textclass != "memoir" and document.textclass[:3] != "scr":
+        return
+
+    i = find_token(document.header, "\\use_geometry true", 0)
+    if i != -1:
+        return
+
+    defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
+
+    i = find_token(document.header, "\\papersize", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\papersize header.")
+        return
+    val = get_value(document.header, "\\papersize", i)
+    if val in defsizes:
+        # nothing to do
+        return
+
+    i = find_token(document.header, "\\use_geometry false", 0)
+    if i != -1:
+        # Maintain use of geometry
+        document.header[1] = "\\use_geometry true"
+
+def revert_komafontsizes(document):
+    " Revert new font sizes in KOMA to options "
+
+    if document.textclass[:3] != "scr":
+        return
+
+    i = find_token(document.header, "\\paperfontsize", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\paperfontsize header.")
+        return
+
+    defsizes = ["default", "10", "11", "12"]
+
+    val = get_value(document.header, "\\paperfontsize", i)
+    if val in defsizes:
+        # nothing to do
+        return
+
+    document.header[i] = "\\paperfontsize default"
+
+    fsize = "fontsize=" + val
+
+    i = find_token(document.header, "\\options", 0)
+    if i == -1:
+        i = find_token(document.header, "\\textclass", 0)
+        if i == -1:
+            document.warning("Malformed LyX document! Missing \\textclass header.")
+            return
+        document.header.insert(i, "\\options " + fsize)
+        return
+    document.header[i] = document.header[i] + "," + fsize
+
+
+def revert_dupqualicites(document):
+    " Revert qualified citation list commands with duplicate keys to ERT "
+
+    # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
+    # we need to revert those with multiple uses of the same key.
+
+    # Get cite engine
+    engine = "basic"
+    i = find_token(document.header, "\\cite_engine", 0)
+    if i == -1:
+        document.warning("Malformed document! Missing \\cite_engine")
+    else:
+        engine = get_value(document.header, "\\cite_engine", i)
+
+    if not engine in ["biblatex", "biblatex-natbib"]:
+        return
+
+    # Citation insets that support qualified lists, with their LaTeX code
+    ql_citations = {
+        "cite" : "cites",
+        "Cite" : "Cites",
+        "citet" : "textcites",
+        "Citet" : "Textcites",
+        "citep" : "parencites",
+        "Citep" : "Parencites",
+        "Footcite" : "Smartcites",
+        "footcite" : "smartcites",
+        "Autocite" : "Autocites",
+        "autocite" : "autocites",
+        }
+
+    i = 0
+    while (True):
+        i = find_token(document.body, "\\begin_inset CommandInset citation", i)
+        if i == -1:
+            break
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Can't find end of citation inset at line %d!!" %(i))
+            i += 1
+            continue
+
+        k = find_token(document.body, "LatexCommand", i, j)
+        if k == -1:
+            document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
+            i = j + 1
+            continue
+
+        cmd = get_value(document.body, "LatexCommand", k)
+        if not cmd in list(ql_citations.keys()):
+            i = j + 1
+            continue
+
+        pres = find_token(document.body, "pretextlist", i, j)
+        posts = find_token(document.body, "posttextlist", i, j)
+        if pres == -1 and posts == -1:
+            # nothing to do.
+            i = j + 1
+            continue
+
+        key = get_quoted_value(document.body, "key", i, j)
+        if not key:
+            document.warning("Citation inset at line %d does not have a key!" %(i))
+            i = j + 1
+            continue
+
+        keys = key.split(",")
+        ukeys = list(set(keys))
+        if len(keys) == len(ukeys):
+            # no duplicates.
+            i = j + 1
+            continue
+
+        pretexts = get_quoted_value(document.body, "pretextlist", pres)
+        posttexts = get_quoted_value(document.body, "posttextlist", posts)
+
+        pre = get_quoted_value(document.body, "before", i, j)
+        post = get_quoted_value(document.body, "after", i, j)
+        prelist = pretexts.split("\t")
+        premap = dict()
+        for pp in prelist:
+            ppp = pp.split(" ", 1)
+            val = ""
+            if len(ppp) > 1:
+                val = ppp[1]
+            else:
+                val = ""
+            if ppp[0] in premap:
+                premap[ppp[0]] = premap[ppp[0]] + "\t" + val
+            else:
+                premap[ppp[0]] = val
+        postlist = posttexts.split("\t")
+        postmap = dict()
+        num = 1
+        for pp in postlist:
+            ppp = pp.split(" ", 1)
+            val = ""
+            if len(ppp) > 1:
+                val = ppp[1]
+            else:
+                val = ""
+            if ppp[0] in postmap:
+                postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
+            else:
+                postmap[ppp[0]] = val
+        # Replace known new commands with ERT
+        if "(" in pre or ")" in pre:
+            pre = "{" + pre + "}"
+        if "(" in post or ")" in post:
+            post = "{" + post + "}"
+        res = "\\" + ql_citations[cmd]
+        if pre:
+            res += "(" + pre + ")"
+        if post:
+            res += "(" + post + ")"
+        elif pre:
+            res += "()"
+        for kk in keys:
+            if premap.get(kk, "") != "":
+                akeys = premap[kk].split("\t", 1)
+                akey = akeys[0]
+                if akey != "":
+                    res += "[" + akey + "]"
+                if len(akeys) > 1:
+                    premap[kk] = "\t".join(akeys[1:])
+                else:
+                    premap[kk] = ""
+            if postmap.get(kk, "") != "":
+                akeys = postmap[kk].split("\t", 1)
+                akey = akeys[0]
+                if akey != "":
+                    res += "[" + akey + "]"
+                if len(akeys) > 1:
+                    postmap[kk] = "\t".join(akeys[1:])
+                else:
+                    postmap[kk] = ""
+            elif premap.get(kk, "") != "":
+                res += "[]"
+            res += "{" + kk + "}"
+        document.body[i:j+1] = put_cmd_in_ert([res])
+
+
+def convert_pagesizenames(document):
+    " Convert LyX page sizes names "
+
+    i = find_token(document.header, "\\papersize", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\papersize header.")
+        return
+    oldnames = ["letterpaper", "legalpaper", "executivepaper", \
+                "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
+               "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
+               "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
+    val = get_value(document.header, "\\papersize", i)
+    if val in oldnames:
+        newval = val.replace("paper", "")
+        document.header[i] = "\\papersize " + newval
+
+def revert_pagesizenames(document):
+    " Convert LyX page sizes names "
+
+    i = find_token(document.header, "\\papersize", 0)
+    if i == -1:
+        document.warning("Malformed LyX document! Missing \\papersize header.")
+        return
+    newnames = ["letter", "legal", "executive", \
+                "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
+               "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
+               "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
+    val = get_value(document.header, "\\papersize", i)
+    if val in newnames:
+        newval = val + "paper"
+        document.header[i] = "\\papersize " + newval
+
+
+def revert_theendnotes(document):
+    " Reverts native support of \\theendnotes to TeX-code "
+
+    if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
+        return
+
+    i = 0
+    while True:
+        i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
+        if i == -1:
+            return
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Malformed LyX document: Can't find end of FloatList inset")
+            continue
+
+        document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
+
+
+def revert_enotez(document):
+    " Reverts native support of enotez package to TeX-code "
+
+    if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
+        return
+
+    use = False
+    if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
+        use = True
+
+    revert_flex_inset(document.body, "Endnote", "\\endnote")
+
+    i = 0
+    while True:
+        i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
+        if i == -1:
+            break
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Malformed LyX document: Can't find end of FloatList inset")
+            continue
+
+        use = True
+        document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
+
+    if use:
+        add_to_preamble(document, ["\\usepackage{enotez}"])
+    document.del_module("enotez")
+    document.del_module("foottoenotez")
+
+
+def revert_memoir_endnotes(document):
+    " Reverts native support of memoir endnotes to TeX-code "
+
+    if document.textclass != "memoir":
+        return
+
+    encommand = "\\pagenote"
+    modules = document.get_module_list()
+    if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
+        encommand = "\\endnote"
+
+    revert_flex_inset(document.body, "Endnote", encommand)
+
+    i = 0
+    while True:
+        i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
+        if i == -1:
+            break
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Malformed LyX document: Can't find end of FloatList inset")
+            continue
+
+        if document.body[i] == "\\begin_inset FloatList pagenote*":
+            document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
+        else:
+            document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
+        add_to_preamble(document, ["\\makepagenote"])
+
+
+def revert_totalheight(document):
+    " Reverts graphics height parameter from totalheight to height "
+
+    i = 0
+    while (True):
+        i = find_token(document.body, "\\begin_inset Graphics", i)
+        if i == -1:
+            break
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Can't find end of graphics inset at line %d!!" %(i))
+            i += 1
+            continue
+
+        rx = re.compile(r'\s*special\s*(\S+)$')
+        k = find_re(document.body, rx, i, j)
+        special = ""
+        oldheight = ""
+        if k != -1:
+            m = rx.match(document.body[k])
+            if m:
+                special = m.group(1)
+            mspecial = special.split(',')
+            for spc in mspecial:
+                if spc[:7] == "height=":
+                    oldheight = spc.split('=')[1]
+                    mspecial.remove(spc)
+                    break
+            if len(mspecial) > 0:
+                special = ",".join(mspecial)
+            else:
+                special = ""
+
+        rx = re.compile(r'(\s*height\s*)(\S+)$')
+        kk = find_re(document.body, rx, i, j)
+        if kk != -1:
+            m = rx.match(document.body[kk])
+            val = ""
+            if m:
+                val = m.group(2)
+                if k != -1:
+                    if special != "":
+                        val = val + "," + special
+                    document.body[k] = "\tspecial " + "totalheight=" + val
+                else:
+                    document.body.insert(kk, "\tspecial totalheight=" + val) 
+                if oldheight != "":
+                    document.body[kk] = m.group(1) + oldheight
+                else:
+                    del document.body[kk]
+        elif oldheight != "":
+            document.body.insert(k, "\theight " + oldheight) 
+        i = j + 1
+
+
+def convert_totalheight(document):
+    " Converts graphics height parameter from totalheight to height "
+
+    i = 0
+    while (True):
+        i = find_token(document.body, "\\begin_inset Graphics", i)
+        if i == -1:
+            break
+        j = find_end_of_inset(document.body, i)
+        if j == -1:
+            document.warning("Can't find end of graphics inset at line %d!!" %(i))
+            i += 1
+            continue
+
+        rx = re.compile(r'\s*special\s*(\S+)$')
+        k = find_re(document.body, rx, i, j)
+        special = ""
+        newheight = ""
+        if k != -1:
+            m = rx.match(document.body[k])
+            if m:
+                special = m.group(1)
+            mspecial = special.split(',')
+            for spc in mspecial:
+                if spc[:12] == "totalheight=":
+                    newheight = spc.split('=')[1]
+                    mspecial.remove(spc)
+                    break
+            if len(mspecial) > 0:
+                special = ",".join(mspecial)
+            else:
+                special = ""
+
+        rx = re.compile(r'(\s*height\s*)(\S+)$')
+        kk = find_re(document.body, rx, i, j)
+        if kk != -1:
+            m = rx.match(document.body[kk])
+            val = ""
+            if m:
+                val = m.group(2)
+                if k != -1:
+                    if special != "":
+                        val = val + "," + special
+                    document.body[k] = "\tspecial " + "height=" + val
+                else:
+                    document.body.insert(kk + 1, "\tspecial height=" + val) 
+                if newheight != "":
+                    document.body[kk] = m.group(1) + newheight
+                else:
+                    del document.body[kk]
+        elif newheight != "":
+            document.body.insert(k, "\theight " + newheight) 
+        i = j + 1
+
 ##
 # Conversion hub
 #
@@ -3147,9 +3626,21 @@ convert = [
            [581, [convert_osf]],
            [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
            [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
+           [584, []],
+           [585, [convert_pagesizes]],
+           [586, []],
+           [587, [convert_pagesizenames]],
+           [588, []],
+           [589, [convert_totalheight]]
           ]
 
-revert =  [[582, [revert_ChivoFont,revert_CrimsonProFont]],
+revert =  [[588, [revert_totalheight]],
+           [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
+           [586, [revert_pagesizenames]],
+           [585, [revert_dupqualicites]],
+           [584, [revert_pagesizes,revert_komafontsizes]],
+           [583, [revert_vcsinfo_rev_abbrev]],
+           [582, [revert_ChivoFont,revert_CrimsonProFont]],
            [581, [revert_CantarellFont,revert_FiraFont]],
            [580, [revert_texfontopts,revert_osf]],
            [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!