#include "insets/Inset.h"
-#include "support/lassert.h"
#include "support/debug.h"
+#include "support/gettext.h"
+#include "support/lassert.h"
#include <algorithm>
#include <deque>
Obviously, the stored range should be as small as possible. However,
there is a lower limit: The StableDocIterator stored in the undo class
must be valid after the changes, too, as it will used as a pointer
-where to insert the stored bits when performining undo.
+where to insert the stored bits when performining undo.
*/
struct UndoElement
{
///
- UndoElement(UndoKind kin, CursorData const & cb,
+ UndoElement(UndoKind kin, CursorData const & cb,
StableDocIterator const & cel,
- pit_type fro, pit_type en, ParagraphList * pl,
- MathData * ar, BufferParams const & bp,
+ pit_type fro, pit_type en, ParagraphList * pl,
+ MathData * ar, BufferParams const & bp,
bool ifb, bool lc, size_t gid) :
kind(kin), cur_before(cb), cell(cel), from(fro), end(en),
pars(pl), array(ar), bparams(0), isFullBuffer(ifb),
size_t group_id;
private:
/// Protect construction
- UndoElement();
+ UndoElement();
};
-class UndoElementStack
+class UndoElementStack
{
public:
/// limit is the maximum size of the stack
/// Push an item on to the stack, deleting the bottom group on
/// overflow.
void push(UndoElement const & v) {
- c_.push_front(v);
- if (c_.size() > limit_) {
+ // Remove some entries if the limit has been reached.
+ // However, if the only group on the stack is the one
+ // we are currently populating, do nothing.
+ if (c_.size() >= limit_
+ && c_.front().group_id != v.group_id) {
// remove a whole group at once.
const size_t gid = c_.back().group_id;
while (!c_.empty() && c_.back().group_id == gid)
c_.pop_back();
}
+ c_.push_front(v);
}
/// Mark all the elements of the stack as dirty
void markDirty() {
for (size_t i = 0; i != c_.size(); ++i)
c_[i].lyx_clean = false;
- }
+ }
private:
/// Internal contents.
struct Undo::Private
{
- Private(Buffer & buffer) : buffer_(buffer), undo_finished_(true),
+ Private(Buffer & buffer) : buffer_(buffer), undo_finished_(true),
group_id(0), group_level(0) {}
-
+
// Do one undo/redo step
- void doTextUndoOrRedo(CursorData & cur, UndoElementStack & stack,
+ void doTextUndoOrRedo(CursorData & cur, UndoElementStack & stack,
UndoElementStack & otherStack);
// Apply one undo/redo group. Returns false if no undo possible.
bool textUndoOrRedo(CursorData & cur, bool isUndoOperation);
d->undostack_.clear();
d->redostack_.clear();
d->undo_finished_ = true;
- d->group_id = 0;
- d->group_level = 0;
+ // We used to do that, but I believe it is better to keep
+ // groups (only used in Buffer::reload for now (JMarc)
+ //d->group_id = 0;
+ //d->group_level = 0;
}
{
d->undo_finished_ = true;
d->undostack_.markDirty();
- d->redostack_.markDirty();
+ d->redostack_.markDirty();
}
else
LYXERR(Debug::UNDO, "Create undo element of group " << group_id);
// create the position information of the Undo entry
- UndoElement undo(kind, cur_before, cell, from, end, 0, 0,
+ UndoElement undo(kind, cur_before, cell, from, end, 0, 0,
buffer_.params(), isFullBuffer, buffer_.isClean(), group_id);
// fill in the real data to be saved
// main Text _is_ the whole document.
// record the relevant paragraphs
Text const * text = cell.text();
- LASSERT(text, /**/);
+ LBUFERR(text);
ParagraphList const & plist = text->paragraphs();
ParagraphList::const_iterator first = plist.begin();
advance(first, first_pit);
CursorData const & cur,
bool isFullBuffer)
{
- LASSERT(first_pit <= cell.lastpit(), /**/);
- LASSERT(last_pit <= cell.lastpit(), /**/);
+ LASSERT(first_pit <= cell.lastpit(), return);
+ LASSERT(last_pit <= cell.lastpit(), return);
doRecordUndo(kind, cell, first_pit, last_pit, cur,
isFullBuffer, undostack_);
//LYXERR0("undo, performing: " << undo);
DocIterator dit = undo.cell.asDocIterator(&buffer_);
if (undo.isFullBuffer) {
- LASSERT(undo.pars, /**/);
+ LBUFERR(undo.pars);
// This is a full document
delete otherstack.top().bparams;
otherstack.top().bparams = new BufferParams(buffer_.params());
// gained by storing just 'a few' paragraphs (most if not
// all math inset cells have just one paragraph!)
//LYXERR0("undo.array: " << *undo.array);
- LASSERT(undo.array, /**/);
+ LBUFERR(undo.array);
dit.cell().swap(*undo.array);
delete undo.array;
undo.array = 0;
} else {
// Some finer machinery is needed here.
Text * text = dit.text();
- LASSERT(text, /**/);
- LASSERT(undo.pars, /**/);
+ LBUFERR(text);
+ LBUFERR(undo.pars);
ParagraphList & plist = text->paragraphs();
// remove new stuff between first and last
delete undo.pars;
undo.pars = 0;
}
- LASSERT(undo.pars == 0, /**/);
- LASSERT(undo.array == 0, /**/);
+
+ // We'll clean up in release mode.
+ LASSERT(undo.pars == 0, undo.pars = 0);
+ LASSERT(undo.array == 0, undo.array = 0);
if (!undo.cur_before.empty())
cur = undo.cur_before;
void Undo::endUndoGroup()
{
- if (d->group_level == 0)
+ if (d->group_level == 0) {
LYXERR0("There is no undo group to end here");
+ return;
+ }
--d->group_level;
if (d->group_level == 0) {
// real end of the group
// This one may happen outside of the main undo group, so we
// put it in its own subgroup to avoid complaints.
beginUndoGroup();
- d->recordUndo(ATOMIC_UNDO, doc_iterator_begin(&d->buffer_),
+ d->recordUndo(ATOMIC_UNDO, doc_iterator_begin(&d->buffer_),
0, d->buffer_.paragraphs().size() - 1, cur, true);
endUndoGroup();
}