Stroika Library 3.0d16
 
Loading...
Searching...
No Matches
ChunkedArrayTextStore.cpp
1/*
2 * Copyright(c) Sophist Solutions, Inc. 1990-2025. All rights reserved
3 */
4#include "Stroika/Frameworks/StroikaPreComp.h"
5
6#include "ChunkedArrayTextStore.h"
7
8using namespace Stroika::Foundation;
9using namespace Stroika::Foundation::Memory;
10
11using namespace Stroika::Frameworks;
12using namespace Stroika::Frameworks::Led;
13
14// Debug later why this doesn't work. Actually - I think I'm still going to do a lot more
15// on the invarients with hackmarkers, so I can more efficiently add and remove them!
16// This should be fine as-is for the 1.0 release however - LGP950527
17#if qStroika_Foundation_Debug_AssertionsChecked
18//#define qHeavyMarkerDebugging 1
19#endif
20
21class ChunkedArrayTextStore::TextChunk {
22public:
23 TextChunk () = default;
24 TextChunk (const Led_tChar* copyFrom, size_t bytesToCopy);
25
26public:
27 enum {
28 kTextChunkSize = (4096 - sizeof (size_t) - 8) / sizeof (Led_tChar)
29 }; // good guess as to how big we can allocate and still have stdlib allocator
30 // not waste any space for rounding up
31
32 /*
33 * All indexes are relative to this TextChunk - and it is a programming error to
34 * insert past end of buffer - or to delete too many characters.
35 */
36public:
37 nonvirtual size_t GetLength () const noexcept;
38 nonvirtual size_t GetBytesCanAccommodate () const noexcept;
39 nonvirtual void CopyOut (size_t from, size_t count, Led_tChar* buffer) const noexcept;
40 nonvirtual const Led_tChar* PeekAfter (size_t charPos) const noexcept;
41 nonvirtual void InsertAfter (const Led_tChar* what, size_t howMany, size_t after) noexcept;
42 nonvirtual void DeleteAfter (size_t howMany, size_t after) noexcept;
43
44private:
45 size_t fTotalTcharsUsed{0};
46 Led_tChar fData[kTextChunkSize];
47};
48
49// class ChunkedArrayTextStore::TextChunk
50inline ChunkedArrayTextStore::TextChunk::TextChunk (const Led_tChar* copyFrom, size_t bytesToCopy)
51 : fTotalTcharsUsed{bytesToCopy}
52{
53 Assert (bytesToCopy <= kTextChunkSize);
54 AssertNotNull (copyFrom);
55 (void)::memcpy (fData, copyFrom, bytesToCopy * sizeof (Led_tChar));
56}
57inline size_t ChunkedArrayTextStore::TextChunk::GetLength () const noexcept
58{
59 return (fTotalTcharsUsed);
60}
61inline size_t ChunkedArrayTextStore::TextChunk::GetBytesCanAccommodate () const noexcept
62{
63 return (kTextChunkSize - fTotalTcharsUsed);
64}
65inline void ChunkedArrayTextStore::TextChunk::CopyOut (size_t from, size_t count, Led_tChar* buffer) const noexcept
66{
67 AssertNotNull (buffer);
68 Assert (from + count <= fTotalTcharsUsed);
69 (void)::memcpy (buffer, &fData[from], count * sizeof (Led_tChar));
70}
71inline const Led_tChar* ChunkedArrayTextStore::TextChunk::PeekAfter (size_t charPos) const noexcept
72{
73 Assert (charPos >= 0);
74 Assert (charPos < fTotalTcharsUsed);
75 return (&fData[charPos]);
76}
77inline void ChunkedArrayTextStore::TextChunk::InsertAfter (const Led_tChar* what, size_t howMany, size_t after) noexcept
78{
79 Assert (what != 0 or howMany == 0);
80 Assert (after >= 0);
81 Assert (after <= fTotalTcharsUsed); // cannot insert past end (other than appending)
82 Assert (after + howMany <= kTextChunkSize);
83 /*
84 * If we are overwriting existing bytes- shovel them off to the right first.
85 * Then copyin the new data.
86 */
87 Assert (fTotalTcharsUsed >= after);
88 size_t bytesToMove = fTotalTcharsUsed - after;
89 if (bytesToMove != 0) {
90 (void)memmove (&fData[after + howMany], &fData[after], bytesToMove * sizeof (Led_tChar));
91 }
92 (void)::memcpy (&fData[after], what, howMany * sizeof (Led_tChar));
93 fTotalTcharsUsed += howMany;
94}
95inline void ChunkedArrayTextStore::TextChunk::DeleteAfter (size_t howMany, size_t after) noexcept
96{
97 Require (after + howMany <= fTotalTcharsUsed);
98 size_t bytesToMove = fTotalTcharsUsed - (after + howMany);
99 if (bytesToMove != 0) {
100 (void)::memmove (&fData[after], &fData[after + howMany], bytesToMove * sizeof (Led_tChar));
101 }
102 fTotalTcharsUsed -= howMany;
103}
104
105/*
106 * Tried a bunch of different sizes. Smaller sizes have produced no perceptable speed difference,
107 * and the DO require more memory (more hack markers), so I pick this size. And larger and I CAN
108 * see some slow down. -- LGP 950528
109 *
110 * Played around with this again briefly, and 50 still appears to be about the best
111 * value - LGP 960515.
112 */
113const size_t kEnufChildrenToApplyHackMarkers = 50;
114
115// This is what goes in a fTextStoreHook of a Marker* when we add it...
116class ChunkedArrayMarkerHook : public Marker::HookData, public Memory::UseBlockAllocationIfAppropriate<ChunkedArrayMarkerHook> {
117public:
118 ChunkedArrayMarkerHook () = default;
119
120public:
121 virtual MarkerOwner* GetOwner () const override;
122 virtual size_t GetStart () const override;
123 virtual size_t GetEnd () const override;
124 virtual size_t GetLength () const override;
125 virtual void GetStartEnd (size_t* start, size_t* end) const override;
126
127 nonvirtual void AddToChildList (Marker* marker);
128 nonvirtual size_t CountChildren () const;
129 nonvirtual bool CountChildrenMoreThan (size_t n) const; // return true iff at least n children
130
131 // NB: As far as I know - ordering in this linked list doesn't matter.
132 // Consider replacing it with a lhs/rhs child pointers and stricly having binary tree?
133 // Maybe not cuz then we lose our contains property for subtrees
134 Marker* fFirstSubMarker; // singly linked list of submarkers
135 Marker* fNextSubMarker; // (next link with same parent)
136
137 Marker* fParent{nullptr};
138 bool fIsHackMarker : 1 {false};
139 bool fIsDeletedMarker : 1 {false}; // true only for REAL markers who are deleted
140 bool fIsPreRemoved : 1 {false};
141 size_t fStart{0};
142 size_t fLength{0};
143 MarkerOwner* fOwner{nullptr};
144};
145
146// Subclass only to do block allocation
147class HackMarker : public Marker, public Memory::UseBlockAllocationIfAppropriate<HackMarker> {
148public:
149 HackMarker () = default;
150};
151
152MarkerOwner* ChunkedArrayMarkerHook::GetOwner () const
153{
154 //OLD COMMENT OBSOLETE - LGP 2000-07-25 - // NOT CLEAR IF WE SHOULD ALLOW NULL RETURN????
155 EnsureNotNull (fOwner); // LGP 2000-07-25 - this can never be NULL - cuz we always require a non-NULL marker owner when adding!
156 return fOwner;
157}
158
159size_t ChunkedArrayMarkerHook::GetStart () const
160{
161 Assert (fStart < 0x8000000); // a really big number - we don't have enough memory to trigger
162 // this - only point is to test of accidental cast of negnum to
163 // size_t.
164 return fStart;
165}
166
167size_t ChunkedArrayMarkerHook::GetEnd () const
168{
169 Assert (fStart < 0x8000000); // See GetStart
170 Assert (fLength < 0x8000000); // ''
171 return fStart + fLength;
172}
173
174size_t ChunkedArrayMarkerHook::GetLength () const
175{
176 Assert (fLength < 0x8000000); // See GetStart
177 return fLength;
178}
179
180void ChunkedArrayMarkerHook::GetStartEnd (size_t* start, size_t* end) const
181{
182 Assert (fStart < 0x8000000); // See GetStart
183 Assert (fLength < 0x8000000); // ''
184 RequireNotNull (start);
185 RequireNotNull (end);
186 *start = fStart;
187 *end = fStart + fLength;
188}
189
190/*
191 ********************************************************************************
192 ********************** Some private inline function definitions ****************
193 ********************************************************************************
194 */
195static inline ChunkedArrayMarkerHook* OurStuff (const Marker* marker)
196{
197 RequireNotNull (marker);
198 RequireNotNull ((ChunkedArrayMarkerHook*)marker->fTextStoreHook);
199 AssertMember (marker->fTextStoreHook, ChunkedArrayMarkerHook);
200 EnsureNotNull (marker->fTextStoreHook);
201 return (ChunkedArrayMarkerHook*)marker->fTextStoreHook;
202}
203
204inline void ChunkedArrayMarkerHook::AddToChildList (Marker* marker)
205{
206 AssertNotNull (marker);
207 OurStuff (marker)->fNextSubMarker = fFirstSubMarker;
208 fFirstSubMarker = marker;
209}
210inline size_t ChunkedArrayMarkerHook::CountChildren () const
211{
212 size_t nChildren = 0;
213 for (auto curChild = fFirstSubMarker; curChild != NULL; curChild = OurStuff (curChild)->fNextSubMarker) {
214 ++nChildren;
215 }
216 return nChildren;
217}
218inline bool ChunkedArrayMarkerHook::CountChildrenMoreThan (size_t n) const // return true iff at least n children
219{
220 size_t nChildren = 0;
221 for (auto curChild = fFirstSubMarker; curChild != NULL; curChild = OurStuff (curChild)->fNextSubMarker) {
222 ++nChildren;
223 if (nChildren >= n) {
224 return true;
225 }
226 }
227 return nChildren >= n;
228}
229inline bool ChunkedArrayTextStore::AllHackMarkers (const Marker* m)
230{
231 AssertNotNull (m);
232 return (OurStuff (m)->fIsHackMarker and (OurStuff (m)->fFirstSubMarker == NULL or AllSubMarkersAreHackMarkerTrees (m)));
233}
234inline ChunkedArrayTextStore::TextChunk* ChunkedArrayTextStore::AtomicAddChunk (size_t atArrayPos)
235{
236 TextChunk* t = new TextChunk ();
237 AssertNotNull (t);
238 try {
239 // fTextChunks.InsertAt (t, atArrayPos);
240 fTextChunks.insert (fTextChunks.begin () + atArrayPos, t);
241 }
242 catch (...) {
243 delete t;
244 throw;
245 }
246 AssertNotNull (t);
247 return (t);
248}
249
250static inline size_t GetMarkerStart_ (Marker* m)
251{
252 RequireNotNull (m);
253 Ensure (OurStuff (m)->fStart == m->GetStart ());
254 return OurStuff (m)->fStart;
255}
256static inline void SetMarkerStart_ (Marker* m, size_t s)
257{
258 OurStuff (m)->fStart = s;
259}
260static inline size_t GetMarkerLength_ (Marker* m)
261{
262 RequireNotNull (m);
263 Ensure (OurStuff (m)->fLength == m->GetLength ());
264 return OurStuff (m)->fLength;
265}
266static inline void SetMarkerLength_ (Marker* m, size_t s)
267{
268 OurStuff (m)->fLength = s;
269}
270static inline void SetMarkerOwner_ (Marker* m, MarkerOwner* o)
271{
272 OurStuff (m)->fOwner = o;
273}
274
275static inline bool QUICK_Overlap (ChunkedArrayMarkerHook* h, size_t from, size_t to)
276{
277 RequireNotNull (h);
278 /*
279 * Avoid virtual function call on m to get its range, for speed.
280 */
281 size_t start = h->fStart;
282 if (start > to) {
283 return false;
284 }
285 size_t end = start + h->fLength;
286
287 if ((from <= end) and (start <= to)) {
288 // Maybe overlap - handle nuanced cases of zero-sized overlaps
289 size_t overlapSize;
290 if (to >= end) {
291 size_t a = end - from;
292 size_t b = end - start;
293 overlapSize = min (a, b);
294 }
295 else {
296 size_t a = to - from;
297 size_t b = to - start;
298 overlapSize = min (a, b);
299 }
300 if (overlapSize == 0) {
301 return end == start;
302 }
303 else {
304 return true;
305 }
306 }
307 else {
308 return false;
309 }
310}
311static inline bool QUICK_Overlap (const Marker& m, size_t from, size_t to)
312{
313 bool result = QUICK_Overlap (OurStuff (&m), from, to);
314 Assert (result == TextStore::Overlap (m, from, to));
315 return result;
316}
317static inline bool QUICK_Contains (size_t containedStart, size_t containedEnd, size_t containerStart, size_t containerEnd)
318{
319 return (containedStart >= containerStart) and (containerEnd >= containedEnd);
320}
321static inline bool QUICK_Contains (const Marker& containedMarker, const Marker& containerMarker)
322{
323 ChunkedArrayMarkerHook* h = OurStuff (&containerMarker);
324 size_t containerStart = h->fStart;
325 size_t containerEnd = containerStart + h->fLength;
326
327 ChunkedArrayMarkerHook* h1 = OurStuff (&containedMarker);
328 size_t containedStart = h1->fStart;
329 size_t containedEnd = containedStart + h1->fLength;
330 ;
331 bool result = QUICK_Contains (containedStart, containedEnd, containerStart, containerEnd);
332 Assert (result == Contains (containedMarker, containerMarker));
333 return result;
334}
335
336#if qUseLRUCacheForRecentlyLookedUpMarkers
337struct ChunkedArrayTextStore::CollectLookupCacheElt {
338 vector<Marker*> fMarkers;
339 size_t fFrom;
340 size_t fTo;
341
342 struct COMPARE_ITEM {
343 COMPARE_ITEM (size_t from, size_t to)
344 : fFrom (from)
345 , fTo (to)
346 {
347 }
348
349 size_t fFrom;
350 size_t fTo;
351 };
352
353 nonvirtual void Clear ()
354 {
355 fFrom = static_cast<size_t> (-1);
356 }
357 static bool Equal (const CollectLookupCacheElt& lhs, const COMPARE_ITEM& rhs)
358 {
359 return lhs.fFrom == rhs.fFrom and lhs.fTo == rhs.fTo;
360 }
361};
362#endif
363
364class ChunkedArrayMarkerOwnerHook : public MarkerOwner::HookData {
365private:
366 using inherited = MarkerOwner::HookData;
367
368public:
369 ChunkedArrayMarkerOwnerHook (MarkerOwner* mo, size_t len)
370 :
371#if qUseLRUCacheForRecentlyLookedUpMarkers
372 fCache{2}
373 ,
374#endif
375 fRootMarker{}
376#if qKeepChunkedArrayStatistics
377 , fTotalMarkersPresent{0}
378 , fTotalHackMarkersPresent{0}
379 , fPeakTotalMarkersPresent{0}
380 , fPeakHackMarkersPresent{0}
381 , fTotalHackMarkersAlloced{0}
382#endif
383 {
384 Assert (fRootMarker.fTextStoreHook == NULL);
385 fRootMarker.fTextStoreHook = new ChunkedArrayMarkerHook ();
386 SetMarkerOwner_ (&fRootMarker, mo);
387 SetMarkerStart_ (&fRootMarker, 0);
388 SetMarkerLength_ (&fRootMarker, len);
389 }
390 ~ChunkedArrayMarkerOwnerHook ()
391 {
392#if qKeepChunkedArrayStatistics
393 Require (fTotalMarkersPresent == 0); // ALL MARKERS MUST BE REMOVED BEFORE EDITOR DESTROYED!!!!
394 // If this assertion ever triggers, then it may well be a Led client
395 // bug and not a Led-bug per-se. Check that all your markers have been
396 // removed!
397 //
398 // See Led's FAQ#27 - http://www.sophists.com/Led/LedClassLib/ClassLibDocs/Recipes.html#27
399#endif
400
401//----Disabled this BUG WORKAROUND cleanup code again - now that its localized per-MarkerOwner -
402// maybe easier to debug if it ever comes up again - LGP 2000-07-26. NOTE - this code should NOT be enabled! except to
403// work around a Led ChunkedArrayTextStore bug which doesn't appear to exist anymore... LGP 2001-08-28 (3.0c1x).
404//
405//----Enabled this cleanup code again to avoid needless asserts in LEC's 5.0 product. Debug this later... Again...LGP980629
406//---- THIS CLEANUP CODE STILL BROKEN - SEEN ERROR SPORADICLY, BUT STILL NO REPRODUCIBLE CASE - Harmless anyhow - leave this as is - LGP 980406
407//---- tmp re-enable - and see if this is fixed, or perhaps debug it... - LGP 980316
408// Should not be needed anymore!!!
409// And yet it is!!! - try file natfta.txt!!! - LGP 950527 - See SPR 0300
410#if 0
411 // Walk children of root marker and see if any hack markers left... Any children left at this point MUST be
412 // hack markers.
413 {
414 // Note that FreeHackTree will remove the argument marker from the parents list, so we can just keep using
415 // the first child of fRootMarker til there are none...
416 for (Marker* mi = OurStuff (&fRootMarker)->fFirstSubMarker; mi != NULL; mi = OurStuff (&fRootMarker)->fFirstSubMarker) {
417 Assert (AllHackMarkers (mi));
418 FreeHackTree (mi);
419 }
420 }
421#endif
422
423#if qKeepChunkedArrayStatistics
424 Assert (fTotalHackMarkersPresent == 0); // If the fTotalMarkersPresent==0 and fTotalHackMarkersPresent != 0, then
425 // this is a Led bug (see walk children/FreeHackTree code above for workaround)
426#endif
427
428 Assert (OurStuff (&fRootMarker)->fFirstSubMarker == NULL); // Everything should be deleted by now
429
430 if (fRootMarker.fTextStoreHook != NULL) {
431 SetMarkerOwner_ (&fRootMarker, NULL);
432 }
433 delete ((ChunkedArrayMarkerHook*)fRootMarker.fTextStoreHook);
434 }
435
436#if qUseLRUCacheForRecentlyLookedUpMarkers
437public:
438 using CollectLookupCacheElt = ChunkedArrayTextStore::CollectLookupCacheElt;
439 using CACHE_TYPE =
441
442public:
443 nonvirtual void ClearCache ()
444 {
445 fCache.ClearCache ();
446 }
447 nonvirtual CollectLookupCacheElt* LookupElement (const CollectLookupCacheElt::COMPARE_ITEM& item)
448 {
449 return fCache.LookupElement (item);
450 }
451 nonvirtual CollectLookupCacheElt* AddNew (const CollectLookupCacheElt::COMPARE_ITEM& item)
452 {
453 return fCache.AddNew (item);
454 }
455
456private:
457 CACHE_TYPE fCache;
458#endif
459
460public:
461 inline bool AllHackMarkers (const Marker* m)
462 {
463 AssertNotNull (m);
464 return (OurStuff (m)->fIsHackMarker and (OurStuff (m)->fFirstSubMarker == NULL or AllSubMarkersAreHackMarkerTrees (m)));
465 }
466
467public:
468 nonvirtual bool AllSubMarkersAreHackMarkerTrees (const Marker* m)
469 {
470 AssertNotNull (m);
471 Assert (OurStuff (m)->fIsHackMarker);
472
473 // Try unwinding tail recusion using explicit stack
474 size_t stackDepth = 0;
475 StackBuffer<Marker*> stack{0};
476
477 RoutineTop:
478 // Do in TWO loops rather than one simply because it is much faster to check the tree
479 // breadth-wise than depth-wise (no recursion/function call) so probably faster to check
480 // this way (assuming we get negative answers) even though we visit each node twice (assuming
481 // we end up with a yes
482 for (auto mi = OurStuff (m)->fFirstSubMarker; mi != NULL;) {
483 ChunkedArrayMarkerHook* misStuff = OurStuff (mi);
484 Assert (misStuff->fParent == m);
485 if (not misStuff->fIsHackMarker) {
486 return false;
487 }
488 mi = misStuff->fNextSubMarker;
489 }
490
491 auto mi = OurStuff (m)->fFirstSubMarker;
492 for (; mi != NULL; mi = OurStuff (mi)->fNextSubMarker) {
493 Assert (OurStuff (mi)->fIsHackMarker);
494 // Save args, then push new args
495 stack.GrowToSize (stackDepth + 1);
496 stack[stackDepth] = mi;
497 ++stackDepth;
498 m = mi; // bind formal arg
499 goto RoutineTop;
500 AfterCallPoint:
501 Assert (stackDepth > 0);
502 --stackDepth;
503 mi = stack[stackDepth];
504 }
505
506 if (stackDepth != 0) {
507 // pop the stack and continue in the loop
508 goto AfterCallPoint;
509 }
510 return (true);
511 }
512
513public:
514 nonvirtual void FreeHackTree (Marker* m)
515 {
516 AssertNotNull (m);
517 Assert (OurStuff (m)->fIsHackMarker);
518
519 // remove self from parent first, then blindly do tree delete...
520 Marker* parent = OurStuff (m)->fParent;
521 AssertNotNull (parent);
522 Marker* prevMarker = NULL;
523 for (auto mi = OurStuff (parent)->fFirstSubMarker; mi != NULL; (prevMarker = mi), (mi = OurStuff (mi)->fNextSubMarker)) {
524 Assert (OurStuff (mi)->fParent == parent);
525 if (mi == m) {
526 if (prevMarker == NULL) {
527 Assert (OurStuff (parent)->fFirstSubMarker == mi);
528 OurStuff (parent)->fFirstSubMarker = OurStuff (mi)->fNextSubMarker;
529 }
530 else {
531 Assert (OurStuff (prevMarker)->fNextSubMarker == mi);
532 OurStuff (prevMarker)->fNextSubMarker = OurStuff (mi)->fNextSubMarker;
533 }
534 OurStuff (mi)->fNextSubMarker = NULL;
535 FreeHackTree1 (m);
536 return;
537 }
538 }
539 Assert (false); // if we get here then we weren't in our parents linked list of children !!! BAD!!!
540 }
541
542 nonvirtual void FreeHackTree1 (Marker* m)
543 {
545 Assert (OurStuff (m)->fIsHackMarker);
546 }
547 for (Marker* mi = OurStuff (m)->fFirstSubMarker; mi != NULL;) {
548 Marker* nextMI = OurStuff (mi)->fNextSubMarker;
549 FreeHackTree1 (mi);
550 mi = nextMI;
551 }
552 AssertNotNull (m->fTextStoreHook);
553 bool isDeletedMarker = OurStuff (m)->fIsDeletedMarker;
554 delete ((ChunkedArrayMarkerHook*)m->fTextStoreHook);
555
556 if (isDeletedMarker) {
557 m->fTextStoreHook = NULL; // so not deleted twice by RemoveMarkers()
558 }
559 else {
561 m->fTextStoreHook = (ChunkedArrayMarkerHook*)666; // magic # so we know these are bad... Should never be referenced after this
562 }
563 delete m;
564#if qKeepChunkedArrayStatistics
565 Assert (fTotalHackMarkersPresent > 0);
566 --fTotalHackMarkersPresent;
567#endif
568 }
569 }
570
571public:
572 Marker fRootMarker;
573
574#if qKeepChunkedArrayStatistics
575public:
576 unsigned long fTotalMarkersPresent; // real markers - not including hack and root
577 unsigned long fTotalHackMarkersPresent;
578 unsigned long fPeakTotalMarkersPresent;
579 unsigned long fPeakHackMarkersPresent;
580 unsigned long fTotalHackMarkersAlloced; // total freed = fTotalHackMarkersAlloced - fTotalHackMarkersPresent
581#endif
582};
583
584inline ChunkedArrayMarkerOwnerHook* GetAMOH (const MarkerOwner* mo)
585{
586 RequireNotNull (mo);
587 RequireNotNull (mo->fTextStoreHook);
588 RequireNotNull (dynamic_cast<ChunkedArrayMarkerOwnerHook*> (mo->fTextStoreHook));
589 return dynamic_cast<ChunkedArrayMarkerOwnerHook*> (mo->fTextStoreHook);
590}
591inline ChunkedArrayMarkerOwnerHook* GetAMOH (ChunkedArrayMarkerHook* moh)
592{
593 RequireNotNull (moh);
594 return GetAMOH (moh->fOwner);
595}
596inline ChunkedArrayMarkerOwnerHook* GetAMOH (Marker* m)
597{
598 return GetAMOH (OurStuff (m));
599}
600
601/*
602 ********************************************************************************
603 ******************************** ChunkedArrayTextStore *************************
604 ********************************************************************************
605 */
606
607/*
608 * Preliminary Design ideas:
609 *
610 * Try to keep all chunks at least 1/2 full - except the last. Do invariant to assure
611 * this. Reason is to avoid memory waste - otherwise we might end up with a 4096byte block
612 * for each character!!!!
613 *
614 * On second thought - this isn't such a great idea. Might be better to have a separate
615 * coalesceall routine. Wind thru the list of chunks and if it even finds two adjacent that
616 * can be joined - then do so.
617 */
618ChunkedArrayTextStore::ChunkedArrayTextStore ()
619{
620 fTextStoreHook = new ChunkedArrayMarkerOwnerHook (this, 2);
621}
622
623ChunkedArrayTextStore::~ChunkedArrayTextStore ()
624{
625 Require (GetMarkerOwners ().size () == 1); // Really this should properly be checked in the TextStore::DTOR - and it is.
626 // But if this test fails, other tests within THIS DTOR will likely also fail. And
627 // those can be confusing. This diagnostic should clearly indicate to users that they've
628 // forgotten to remove some MarkerOwners - like Views or MarkerCovers, or ParagraphDatabases,
629 // etc.
630 //
631 // Note also - that since all markers must be removed for a given MarkerOwner before IT can be removed
632 // this constraint also implies we contain no markers!
633
634 delete fTextStoreHook;
635 fTextStoreHook = NULL;
636
637 // NB: We don't consider it an error to not remove all the text from the text editor.
638 for (size_t i = 0; i < fTextChunks.size (); i++) {
639 delete fTextChunks[i];
640 }
641}
642
643/*
644@METHOD: ChunkedArrayTextStore::ConstructNewTextStore
645@DESCRIPTION: <p>See @'TextStore::ConstructNewTextStore' ().</p>
646*/
647TextStore* ChunkedArrayTextStore::ConstructNewTextStore () const
648{
649 return new ChunkedArrayTextStore ();
650}
651
652void ChunkedArrayTextStore::AddMarkerOwner (MarkerOwner* owner)
653{
654 RequireNotNull (owner);
655 Require (owner->fTextStoreHook == NULL);
656 inherited::AddMarkerOwner (owner);
657 try {
658 owner->fTextStoreHook = new ChunkedArrayMarkerOwnerHook (owner, GetLength () + 2); // include ALL text, and ALL other markers - one more than
659 // the text imager does - not sure if any of that needed
660 // anymore??? LGP 941006 -yes probably is cuz we do >1
661 // length there - not so we get notified - that is
662 // already handled -but so we get autogrowing...
663 }
664 catch (...) {
665 inherited::RemoveMarkerOwner (owner);
666 throw;
667 }
668}
669
670void ChunkedArrayTextStore::RemoveMarkerOwner (MarkerOwner* owner)
671{
672 RequireNotNull (owner);
673 RequireNotNull (owner->fTextStoreHook);
674 RequireNotNull (dynamic_cast<ChunkedArrayMarkerOwnerHook*> (owner->fTextStoreHook));
675#if qKeepChunkedArrayStatistics
676 {
677 ChunkedArrayMarkerOwnerHook* camoh = dynamic_cast<ChunkedArrayMarkerOwnerHook*> (owner->fTextStoreHook);
678 Require (camoh->fTotalMarkersPresent == 0); // ALL MARKERS MUST BE REMOVED BEFORE EDITOR DESTROYED!!!!
679 // If this assertion ever triggers, then it may well be a Led client
680 // bug and not a Led-bug per-se. Check that all your markers have been
681 // removed!
682 //
683 // Look just below at 'markersWhichShouldHaveBeenDeleted' to see just what markers are left.
684 //
685 // See Led's FAQ#27 - http://www.sophists.com/Led/LedClassLib/ClassLibDocs/Recipes.html#27
687 if (camoh->fTotalMarkersPresent != 0) {
688 vector<Marker*> markersWhichShouldHaveBeenDeleted;
689 VectorMarkerSink tmp{&markersWhichShouldHaveBeenDeleted};
690 CollectAllMarkersInRangeInto (0, GetEnd () + 2, owner, tmp);
691 }
692 }
693 }
694#endif
695 delete owner->fTextStoreHook;
696 owner->fTextStoreHook = NULL;
697 inherited::RemoveMarkerOwner (owner);
698}
699
700void ChunkedArrayTextStore::CopyOut (size_t from, size_t count, Led_tChar* buffer) const noexcept
701{
702 // Note that it IS NOT an error to call CopyOut for multibyte characters and split them. This is one of the few
703 // API routines where that is so...
704 RequireNotNull (buffer);
705 Require (from >= 0);
706 Require (from + count <= GetEnd ()); // Be sure all Led_tChars requested fall in range
707
708 ChunkAndOffset chunkIdx = FindChunkIndex (from);
709 for (size_t bytesToGo = count; bytesToGo != 0;) {
710 Assert (chunkIdx.fChunk >= 0);
711 Assert (chunkIdx.fChunk < fTextChunks.size ()); // can call copyOut where this isn't true - but
712 // better not be copying any bytes then!!!
713 TextChunk* t = fTextChunks[chunkIdx.fChunk];
714 AssertNotNull (t);
715 size_t copyFromThisGuy = min (bytesToGo, t->GetLength () - (chunkIdx.fOffset));
716 t->CopyOut (chunkIdx.fOffset, copyFromThisGuy, &buffer[count - bytesToGo]);
717
718 // For next time through the loop
719 chunkIdx.fOffset = 0;
720 ++chunkIdx.fChunk;
721 bytesToGo -= copyFromThisGuy;
722 }
723}
724
725void ChunkedArrayTextStore::ReplaceWithoutUpdate (size_t from, size_t to, const Led_tChar* withWhat, size_t withWhatCount)
726{
727#if qUseLRUCacheForRecentlyLookedUpMarkers
728 for (vector<MarkerOwner*>::const_iterator i = GetMarkerOwners ().begin (); i != GetMarkerOwners ().end (); ++i) {
729 GetAMOH (*i)->ClearCache ();
730 }
731#endif
732 Assert (from <= to);
733 if (from != to or withWhatCount != 0) {
734 // THIS ISN't QUITE RIGHT - A GOOD APPROX HOWEVER...
735 // cuz we don't update markers properly yet... Close - but not quite, a replace
736 // is treated as a delete/insert - which isn't quite what we want...
737 /*
738 * Though the implication for updating markers is slightly different, for updating just
739 * the text, we can treat this as a delete, followed by an insert.
740 */
741
742 /*
743 * We could either do insert first or delete first.
744 *
745 * To assure the proper semantics for replace inside of markers however,
746 * we must at least update the markers for INSERTION before updating them
747 * for deletion. So this argues to do insert then delete.
748 *
749 * But - only paying attention to the speed of inserting the text,
750 * it is probably (maybe) faster to delete first and then insert.
751 * (actually if the delete causes a coalesce this may not be true).
752 *
753 * We could have both, by splitting the marker update code out from
754 * the text insert/delete routines. But right now I think the simplest
755 * (and most correct) thing todo is just do the insert first.
756 *
757 * <<
758 * WHOOPS! This rationale is WRONG - even this way, we would not get
759 * stuff inserted. Perhaps we really shouldn't. Unclear. See SPR#0212.
760 * For now, leave things as they were!!!
761 * >>
762 */
763 if (from != to) {
764 DeleteAfter_ (to - from, from);
765 }
766 if (withWhatCount != 0) {
767 InsertAfter_ (withWhat, withWhatCount, from);
768 }
769 }
770}
771
772void ChunkedArrayTextStore::InsertAfter_ (const Led_tChar* what, size_t howMany, size_t after)
773{
774 Assert (howMany > 0);
775 Assert (what != NULL);
776
777 Assert (howMany > 0);
778 Assert (what != NULL);
779
780 ChunkAndOffset chunkIdx = FindChunkIndex (after);
781
782 /*
783 * Make sure we have a good chunk to insert into - at least to begin with. This simplfies
784 * the code below which just tries to insert into the current chunk pointed to, and then
785 * inserts extra chunks as needed. This just removes a special case down there.
786 */
787 if (chunkIdx.fChunk >= fTextChunks.size ()) {
788 if (fTextChunks.size () == 0) {
789 (void)AtomicAddChunk (0);
790 }
791 else {
792 // If we are pointing past the end of a chunk - maybe we can just
793 // go back and insert inside the previous chunk. If not - THEN
794 // append a new one...
795 Assert (chunkIdx.fChunk >= 1);
796 TextChunk* preChunk = fTextChunks[chunkIdx.fChunk - 1];
797 if (preChunk->GetBytesCanAccommodate () == 0) {
798 (void)AtomicAddChunk (fTextChunks.size ());
799 }
800 else {
801 --chunkIdx.fChunk;
802 chunkIdx.fOffset = preChunk->GetLength ();
803 }
804 }
805 }
806 TextChunk* t = fTextChunks[chunkIdx.fChunk];
807 AssertNotNull (t);
808
809 /*
810 * Now add text and update bytesXFered as we go. On exceptions, we update fLength with ONLY
811 * AS MUCH AS WE ADDED. Similarly, we update the extents by only as much as we added.
812 */
813 size_t bytesXfered = 0;
814 try {
815 if (t->GetBytesCanAccommodate () >= howMany) {
816 /*
817 * We're happy - we can just insert into this chunk.
818 */
819 t->InsertAfter (what, howMany, chunkIdx.fOffset);
820 bytesXfered += howMany;
821 }
822 else {
823 /*
824 * Now things are MUCH more compicated. The stuff at the end of this
825 * chunk must be pushed forward into the following chunk - if
826 * it fits - and otherwise - we must build a new - following -
827 * chunk to accomodate it.
828 *
829 * Then - once we are truely appending to this chunk - we are
830 * all set.
831 */
832 {
833 if (chunkIdx.fChunk == fTextChunks.size () - 1) {
834 (void)AtomicAddChunk (chunkIdx.fChunk + 1);
835 }
836 TextChunk* followingChunk = fTextChunks[chunkIdx.fChunk + 1];
837 AssertNotNull (followingChunk);
838 size_t splitPoint = chunkIdx.fOffset;
839 size_t bytesToShift = t->GetLength () - (splitPoint);
840 const Led_tChar* peekAfter = (bytesToShift == 0) ? NULL : t->PeekAfter (splitPoint);
841 if (followingChunk->GetBytesCanAccommodate () >= bytesToShift) {
842 followingChunk->InsertAfter (peekAfter, bytesToShift, 0); // MUST PREPEND these chars
843 }
844 else {
845 // complexities within the complexities! If there isn't room in the following chunk
846 // then we do something to make room. A simple approach is to just cons up a new
847 // chunk and stick it between. This may be less than ideal - but it will do for now...
848 // LGP 941022
849 followingChunk = AtomicAddChunk (chunkIdx.fChunk + 1); // insert AFTER current item (before old following)...
850 followingChunk->InsertAfter (peekAfter, bytesToShift, 0);
851 }
852 t->DeleteAfter (bytesToShift, splitPoint); // shoundn't shuffle anything - just update length count
853 }
854
855 /*
856 * Ah - finally. Now that we've dispensed with the preliminaries - we can get on with the
857 * crux of our work. This is to append text to this chunk. And then keep looping. And
858 * as we loop - prepend to the following chunk ONLY IF IT COMPLETES THE TRANSFER,
859 * and otherwise - keep building new chunks, and inserting them.
860 */
861 size_t bytesToGo = howMany;
862 size_t copyFromThisGuy = min (bytesToGo, t->GetBytesCanAccommodate ());
863 t->InsertAfter (&what[bytesXfered], copyFromThisGuy, chunkIdx.fOffset);
864
865 bytesToGo -= copyFromThisGuy;
866 bytesXfered += copyFromThisGuy;
867
868 for (; bytesToGo != 0;) {
869 ++chunkIdx.fChunk;
870 if (chunkIdx.fChunk <= fTextChunks.size () - 1 and fTextChunks[chunkIdx.fChunk]->GetBytesCanAccommodate () >= bytesToGo) {
871 // Yippie - we're done. Just prepend here, and lets go...
872 t = fTextChunks[chunkIdx.fChunk];
873 t->InsertAfter (&what[bytesXfered], bytesToGo, 0);
874 bytesXfered += bytesToGo;
875 bytesToGo -= bytesToGo;
876 Assert (bytesToGo == 0);
877 Assert (bytesXfered == howMany);
878 }
879 else {
880 // Since we've filled the previous - and cannot fit in the following,
881 // we'll have to create another...
882 t = AtomicAddChunk (chunkIdx.fChunk);
883 AssertNotNull (t);
884 copyFromThisGuy = min (bytesToGo, t->GetBytesCanAccommodate ());
885 t->InsertAfter (&what[bytesXfered], copyFromThisGuy, 0);
886 bytesToGo -= copyFromThisGuy;
887 bytesXfered += copyFromThisGuy;
888 }
889 }
890 }
891 }
892 catch (...) {
893 fLength += bytesXfered;
894 AdjustMarkersForInsertAfter (after, bytesXfered);
895 throw;
896 }
897
898 Assert (howMany == bytesXfered);
899 fLength += bytesXfered;
900 AdjustMarkersForInsertAfter (after, bytesXfered);
901}
902
903void ChunkedArrayTextStore::DeleteAfter_ (size_t howMany, size_t after)
904{
905 Assert (after >= 0);
906 Assert ((after) + howMany <= GetLength ());
907 Assert (howMany > 0);
908
909 ChunkAndOffset chunkIdx = FindChunkIndex (after);
910 for (size_t bytesToGo = howMany; bytesToGo != 0;) {
911
912 TextChunk* t = fTextChunks[chunkIdx.fChunk];
913 AssertNotNull (t);
914
915 // Now try to insert what we can into this section...
916 size_t deleteFromThisGuy = min (bytesToGo, t->GetLength () - (chunkIdx.fOffset));
917 Assert (deleteFromThisGuy != 0);
918 t->DeleteAfter (deleteFromThisGuy, chunkIdx.fOffset);
919 bytesToGo -= deleteFromThisGuy;
920 fLength -= deleteFromThisGuy;
921
922 // For next time through the loop
923 chunkIdx.fOffset = 0;
924 chunkIdx.fChunk += 1;
925
926 // MUST DO SOMETHING TO COALECE BLOCKS
927 // Not a solution - but at LEAST we can eliminate ZERO LENGTH BLOCKS!!!
928 // SPR#0012
929
930 if (t->GetLength () == 0) {
931 chunkIdx.fChunk -= 1; // so we refer to same block after removeat
932 fTextChunks.erase (fTextChunks.begin () + chunkIdx.fChunk);
933 delete t;
934 }
935 }
936 AdjustMarkersForDeleteAfter (after, howMany);
937}
938
939void ChunkedArrayTextStore::AddMarker (Marker* marker, size_t lhs, size_t length, MarkerOwner* owner)
940{
941 RequireNotNull (marker);
942 RequireNotNull (owner);
943#if !qVirtualBaseMixinCallDuringCTORBug
944 Require (owner->PeekAtTextStore () == this);
945#endif
946 Require (owner == this or IndexOf (GetMarkerOwners (), owner) != kBadIndex); // new Led 2.3 requirement - not strictly required internally yet - but it will be - LGP 980416
947 Require (marker->fTextStoreHook == NULL);
948 Require (lhs < 0x80000000); // not real test, just sanity check
949 Require (length < 0x80000000); // not real test, just sanity check
950 Require (lhs + length <= fLength + 1); // perhaps shouldn't require, but current implementation DOES require this
951 Invariant ();
952
953 ChunkedArrayMarkerOwnerHook* camoh = dynamic_cast<ChunkedArrayMarkerOwnerHook*> (owner->fTextStoreHook);
954 AssertNotNull (camoh);
955
956#if qUseLRUCacheForRecentlyLookedUpMarkers
957 camoh->ClearCache ();
958#endif
959
960 marker->fTextStoreHook = new ChunkedArrayMarkerHook ();
961
962 //DON'T call - cuz asserts owner NON-NULL Assert (marker->GetOwner () == NULL);
963 SetMarkerOwner_ (marker, owner);
964
965 SetMarkerStart_ (marker, lhs);
966 SetMarkerLength_ (marker, length);
967
968 AddMarker1 (marker, &camoh->fRootMarker, true);
969
970#if qKeepChunkedArrayStatistics
971 ++camoh->fTotalMarkersPresent;
972 camoh->fPeakTotalMarkersPresent = max (camoh->fPeakTotalMarkersPresent, camoh->fTotalMarkersPresent);
973#endif
974 Invariant ();
975}
976
977#ifndef qKeepTrackOfChildCountAndAvoidSomePossiblyAdds
978#define qKeepTrackOfChildCountAndAvoidSomePossiblyAdds 1
979#endif
980
981void ChunkedArrayTextStore::AddMarker1 (Marker* marker, Marker* insideMarker, bool canAddHackMarkers)
982{
983 AssertNotNull (marker);
984 AssertNotNull (insideMarker);
985 Assert (marker != insideMarker);
986 AssertNotNull (marker->GetOwner ());
987 AssertNotNull (insideMarker->GetOwner ());
988 Assert (QUICK_Contains (*marker, *insideMarker));
989 Assert (OurStuff (marker)->fParent == NULL);
990 Assert (OurStuff (marker)->fNextSubMarker == NULL); // should be properly removed from sibling list - OK if we keep SUBMARKER list!
991
992 // NB: These are used throughout the loop, but are invariant throughout this function call
993 size_t markerStart = GetMarkerStart_ (marker);
994 size_t markerEnd = markerStart + GetMarkerLength_ (marker);
995
996 /*
997 * Somewhat tricky.
998 *
999 * The basic idea here is to build a tree of submarkers corresponding to
1000 * the already existent Contains() relationship. The KEY FEATURE of the
1001 * Contains() relationship - is that it remains INVARIANT under text insertions/
1002 * deletions (and the marker updates they cause). This means the tree structure
1003 * we build - remains valid with respect to that relationship. So - we can use
1004 * the tree structure to find all items in a particular range without scanning
1005 * all markers - just apx Log n of them (assuming we can keep the tree balanced,
1006 * and all notes at most k-ary where k is constant).
1007 */
1008 Marker* specificInsideMarker = insideMarker;
1009Again:
1010#if qKeepTrackOfChildCountAndAvoidSomePossiblyAdds
1011 size_t specificInsideMarkerChildCount = 0; // use in PossiblyAddHackMarkers optimization below
1012#endif
1013 Marker* prevMarker = NULL;
1014 for (Marker* curChild = OurStuff (specificInsideMarker)->fFirstSubMarker; curChild != NULL;
1015 (prevMarker = curChild), (curChild = OurStuff (curChild)->fNextSubMarker)) {
1016 Assert (marker != curChild);
1017
1018#if qKeepTrackOfChildCountAndAvoidSomePossiblyAdds
1019 ++specificInsideMarkerChildCount;
1020#endif
1021
1022 /*
1023 * Important optimization. Check if the two markers are equal, and if so, then insert right here,
1024 * replacing the marker (by position) and pushing him down to be one of our new children.
1025 *
1026 * The reason this is important is if we have a very tall tree with lots of markers all at the same
1027 * position (e.g. 2500 zero length sentence extents in LVEJ) then inserting a new one could
1028 * involve walking a very long linked list all the way down to the leaf of the tree. Not wrong,
1029 * but not worth the long walk!
1030 *
1031 * Note - no good reason to not do this for OurStuff (marker)->fFirstSubMarker == NULL - except more
1032 * complex case - ignore for now - LGP 950525
1033 */
1034 size_t curChildStart = GetMarkerStart_ (curChild);
1035 size_t curChildEnd = curChildStart + GetMarkerLength_ (curChild);
1036 if (markerStart == curChildStart and markerEnd == curChildEnd and OurStuff (marker)->fFirstSubMarker == NULL) {
1037 Assert (QUICK_Contains (*marker, *curChild)); // we could have gone deeper in contains, but why bother?
1038
1039 /*
1040 * Set 'marker' up to point all the places curChild used to
1041 */
1042 Assert (OurStuff (marker)->fParent == NULL);
1043 OurStuff (marker)->fParent = OurStuff (curChild)->fParent;
1044
1045 Assert (OurStuff (marker)->fFirstSubMarker == NULL);
1046 OurStuff (marker)->fFirstSubMarker = OurStuff (curChild)->fFirstSubMarker;
1047
1048 Assert (OurStuff (marker)->fNextSubMarker == NULL);
1049 OurStuff (marker)->fNextSubMarker = OurStuff (curChild)->fNextSubMarker;
1050
1051 // Make those who used to point to 'curChild' now point to our new marker
1052 if (prevMarker == NULL) {
1053 Assert (OurStuff (marker)->fParent == specificInsideMarker);
1054 Assert (OurStuff (specificInsideMarker)->fFirstSubMarker == curChild);
1055 OurStuff (specificInsideMarker)->fFirstSubMarker = marker;
1056 }
1057 else {
1058 Assert (OurStuff (prevMarker)->fNextSubMarker == curChild);
1059 OurStuff (prevMarker)->fNextSubMarker = marker;
1060 }
1061
1062 // Now patch 'curChild' to now REALLY become OUR one and only child
1063 OurStuff (curChild)->fNextSubMarker = OurStuff (marker)->fFirstSubMarker;
1064 OurStuff (marker)->fFirstSubMarker = curChild;
1065 OurStuff (curChild)->fFirstSubMarker = NULL;
1066 OurStuff (curChild)->fParent = marker;
1067
1068 // Now walk all our children and adjust thier parent pointer - since they used to point
1069 // to 'curChild'
1070 for (Marker* mm = OurStuff (marker)->fFirstSubMarker; mm != NULL; mm = OurStuff (mm)->fNextSubMarker) {
1071 Assert (OurStuff (mm)->fParent == curChild or (mm == curChild));
1072 OurStuff (mm)->fParent = marker;
1073 }
1074
1075 goto Done;
1076 }
1077
1078 Assert (QUICK_Contains (*marker, *curChild) == QUICK_Contains (markerStart, markerEnd, curChildStart, curChildEnd));
1079 if (QUICK_Contains (markerStart, markerEnd, curChildStart, curChildEnd)) {
1080 specificInsideMarker = curChild;
1081 goto Again;
1082 }
1083 }
1084
1085#if qKeepTrackOfChildCountAndAvoidSomePossiblyAdds
1086 if (specificInsideMarkerChildCount < kEnufChildrenToApplyHackMarkers) {
1087 canAddHackMarkers = false;
1088 }
1089#endif
1090
1091 /*
1092 * If we got here - then the marker we are adding was not strictly contained
1093 * inside any of our existing submarkers. We must just add it to our list - for
1094 * now - maybe later - adding hack markers to help create depth in the tree.
1095 */
1096 OurStuff (specificInsideMarker)->AddToChildList (marker);
1097 Assert (OurStuff (marker)->fParent == NULL);
1098 OurStuff (marker)->fParent = specificInsideMarker;
1099
1100Done:
1101 if (canAddHackMarkers) {
1102 PossiblyAddHackMarkers (specificInsideMarker);
1103 }
1104}
1105
1106void ChunkedArrayTextStore::PossiblyAddHackMarkers (Marker* insideMarker)
1107{
1108 /*
1109 * No point in introducing hack markers unless we have any chance of reducing the breadth, and no chance of
1110 * that if we don't have an inside marker at least two wide!!!
1111 */
1112 if (GetMarkerLength_ (insideMarker) >= 2 and OurStuff (insideMarker)->CountChildrenMoreThan (kEnufChildrenToApplyHackMarkers)) {
1113 size_t insideMarkerStart = GetMarkerStart_ (insideMarker);
1114 size_t insideMarkerEnd = insideMarkerStart + GetMarkerLength_ (insideMarker);
1115 size_t insideMarkerLength = insideMarkerEnd - insideMarkerStart;
1116
1117 /*
1118 * Add two hack markers splitting the current cell.
1119 */
1120 Marker* lhs = NULL;
1121 Marker* rhs = NULL;
1122
1123 try {
1124 lhs = AddHackMarkerHelper_ (insideMarker, insideMarkerStart, insideMarkerLength / 2);
1125 size_t rhsStart = insideMarkerStart + insideMarkerLength / 2;
1126 rhs = AddHackMarkerHelper_ (insideMarker, rhsStart, insideMarkerEnd - rhsStart);
1127
1128 // Now, walk through the list of markers, and see if any of the existing items can be move
1129 // into the hack-markers...
1130 Assert (OurStuff (insideMarker)->CountChildren () > 2);
1131 Marker* prevMarker = NULL;
1132 for (Marker* curMarker = OurStuff (insideMarker)->fFirstSubMarker; curMarker != NULL;) {
1133 Marker* nextMarker = OurStuff (curMarker)->fNextSubMarker;
1134 if (curMarker != lhs and curMarker != rhs) { // skip hack markers
1135 DISABLE_COMPILER_MSC_WARNING_START (6011)
1136 if (QUICK_Contains (*curMarker, *lhs)) {
1137 AssertNotNull (prevMarker); // cuz two hack markers come first
1138 // Now remove old, and re-call add marker
1139 OurStuff (prevMarker)->fNextSubMarker = OurStuff (curMarker)->fNextSubMarker;
1140 OurStuff (curMarker)->fNextSubMarker = NULL;
1141 Assert (OurStuff (curMarker)->fParent == insideMarker);
1142 OurStuff (curMarker)->fParent = NULL;
1143 AddMarker1 (curMarker, lhs, false);
1144 // don't reset prev, but make cur now be next
1145 curMarker = nextMarker;
1146 continue;
1147 }
1148 else if (QUICK_Contains (*curMarker, *rhs)) {
1149 AssertNotNull (prevMarker); // cuz two hack markers come first
1150 // Now remove old, and re-call add marker
1151 OurStuff (prevMarker)->fNextSubMarker = OurStuff (curMarker)->fNextSubMarker;
1152 OurStuff (curMarker)->fNextSubMarker = NULL;
1153 Assert (OurStuff (curMarker)->fParent == insideMarker);
1154 OurStuff (curMarker)->fParent = NULL;
1155 AddMarker1 (curMarker, rhs, false);
1156 // don't reset prev, but make cur now be next
1157 curMarker = nextMarker;
1158 continue;
1159 }
1160 DISABLE_COMPILER_MSC_WARNING_END (6011)
1161 }
1162 prevMarker = curMarker;
1163 curMarker = nextMarker;
1164 }
1165 LoseIfUselessHackMarker (lhs);
1166 LoseIfUselessHackMarker (rhs);
1167 }
1168 catch (...) {
1169 // MUST BE MORE CAREFUL HOW WE DELETE HACK MARKERS!!!
1170 // THIS CODE IS CURRENTLY UNSAFE
1171 Assert (false); // so when debugging at least we get a hint as to what went wrong... LGP 960313
1172 // delete lhs;
1173 // delete rhs;
1174
1175 // NOTE: WE DON'T RE-THROW here!!!
1176 // Simply eat the exception. No big deal if no memory to add a hack-marker.
1177 }
1178 }
1179}
1180
1181void ChunkedArrayTextStore::RemoveMarkers (Marker* const markerArray[], size_t markerCount)
1182{
1183#if qUseLRUCacheForRecentlyLookedUpMarkers
1184 // could only do this for affected markers, but we want to avoid redundant clearcache calls - and this is simpler...
1185 // Probably no real performance harm...
1186 for (vector<MarkerOwner*>::const_iterator i = GetMarkerOwners ().begin (); i != GetMarkerOwners ().end (); ++i) {
1187 GetAMOH (*i)->ClearCache ();
1188 }
1189#endif
1190 Assert (markerCount == 0 or markerArray != NULL);
1191
1192 /*
1193 * This is a fairly primitive implementation, and I can easily imagine doing much better.
1194 * But this is so simple, and runs at least 20x faster than the old code (for 500K delete
1195 * all text test case). It seems fast enough for now - LGP 950416.
1196 */
1197 for (size_t i = 0; i < markerCount; ++i) {
1198 Assert (not OurStuff (markerArray[i])->fIsHackMarker);
1199 OurStuff (markerArray[i])->fIsHackMarker = true;
1200 OurStuff (markerArray[i])->fIsDeletedMarker = true;
1201#if qKeepChunkedArrayStatistics
1202 Assert (GetAMOH (markerArray[i])->fTotalMarkersPresent >= 1);
1203 GetAMOH (markerArray[i])->fTotalMarkersPresent -= 1;
1204#endif
1205 }
1206
1207 for (size_t i = 0; i < markerCount; ++i) {
1208 Marker* marker = markerArray[i];
1209 if (marker->fTextStoreHook != NULL) { // if its NULL - that means its already been destroyed as a
1210 // hack marker in a previous lap through the loop
1211 Assert (OurStuff (markerArray[i])->fIsHackMarker);
1212 RemoveMarker1 (marker);
1213 SetMarkerOwner_ (marker, NULL);
1214 AssertNotNull (marker->fTextStoreHook);
1215 delete ((ChunkedArrayMarkerHook*)marker->fTextStoreHook);
1216 marker->fTextStoreHook = NULL; // set to NULL so if we encounter it again, we won't try to delete it
1217 }
1218 }
1219}
1220
1221void ChunkedArrayTextStore::PreRemoveMarker (Marker* marker)
1222{
1223 RequireNotNull (marker);
1224 Require (not OurStuff (marker)->fIsPreRemoved);
1225 OurStuff (marker)->fIsPreRemoved = true;
1226}
1227
1228void ChunkedArrayTextStore::RemoveMarker1 (Marker* marker)
1229{
1230 AssertNotNull (marker);
1231 AssertNotNull (OurStuff (marker)->fParent);
1232
1233 /*
1234 * In order to remove an element from a tree, we must simply find out parent, and our
1235 * leftmost sibling, in order to remove ourselves.
1236 *
1237 * However, the other big thing todo is that each of our children must get redistributed
1238 * through the tree.
1239 */
1240 Marker* parent = OurStuff (marker)->fParent;
1241 Marker* ourLeftSibling = OurStuff (parent)->fFirstSubMarker;
1242 if (ourLeftSibling == marker) {
1243 ourLeftSibling = NULL;
1244 }
1245 else {
1246 AssertNotNull (ourLeftSibling);
1247 for (; OurStuff (ourLeftSibling)->fNextSubMarker != marker; ourLeftSibling = OurStuff (ourLeftSibling)->fNextSubMarker) {
1248 AssertNotNull (ourLeftSibling); // cuz we must be in our parents list someplace before the end!
1249 }
1250 }
1251 Assert (ourLeftSibling == NULL or OurStuff (ourLeftSibling)->fNextSubMarker == marker);
1252
1253 /*
1254 * Good. At this point we have our parent and our previous marker, so we can remove ourselves
1255 * easily. But first some hair.
1256 */
1257 if (ourLeftSibling == NULL) {
1258 Assert (OurStuff (parent)->fFirstSubMarker == marker);
1259 OurStuff (parent)->fFirstSubMarker = OurStuff (marker)->fNextSubMarker;
1260 }
1261 else {
1262 Assert (OurStuff (ourLeftSibling)->fNextSubMarker == marker);
1263 OurStuff (ourLeftSibling)->fNextSubMarker = OurStuff (marker)->fNextSubMarker;
1264 }
1265 OurStuff (marker)->fNextSubMarker = NULL;
1266 Assert (OurStuff (marker)->fParent == parent);
1267 OurStuff (marker)->fParent = NULL;
1268
1269 /*
1270 * Now - if that marker had any submarkers - those must all be moved up to the
1271 * parent. This should probably be done in such a way as to redistribute them -
1272 * but well do it the quick way for now...
1273 */
1274 size_t moveCount = 0;
1275 for (Marker* markerToMove = OurStuff (marker)->fFirstSubMarker; markerToMove != NULL;) {
1276 // first find who will be next marker in linked list since the below add will
1277 // blow our next ptr away
1278 Marker* nextMarkerToMove = OurStuff (markerToMove)->fNextSubMarker;
1279 OurStuff (markerToMove)->fNextSubMarker = NULL;
1280 OurStuff (markerToMove)->fParent = NULL;
1281 AddMarker1 (markerToMove, parent, false);
1282 markerToMove = nextMarkerToMove;
1283 ++moveCount;
1284 }
1285 if (moveCount > 1) {
1286 PossiblyAddHackMarkers (parent);
1287 }
1288
1289 /*
1290 * Now check and see if the removeall of from parent has created any empty hack trees.
1291 *
1292 * <NOTE - THIS CAN PROBABLY BE DONE BETTER - WE CAN CHECK BASED ON IF WE RMEOVED/MOVED HACK MARKJERS AND AVOID THIS TEST>
1293 * LGP 950525
1294 */
1295 for (Marker* hmi = OurStuff (parent)->fFirstSubMarker; hmi != NULL;) {
1296 ChunkedArrayMarkerHook* hmiStuff = OurStuff (hmi);
1297 Marker* nexthmi = hmiStuff->fNextSubMarker; // save this away before FreeHackTree call so we don't lose anything!
1298 if (OurStuff (hmi)->fIsHackMarker) {
1299 if (AllHackMarkers (hmi)) {
1300 GetAMOH (hmi)->FreeHackTree (hmi);
1301 }
1302 else {
1303 LoseIfUselessHackMarker (hmi);
1304 }
1305 }
1306 hmi = nexthmi;
1307 }
1308
1309 // FIX FOR LONGSTANDING BUG CLEANUPING HACKMARKERS???
1310 {
1311 // really must do while loop walking UP parent tree
1312 for (Marker* hmi = parent; OurStuff (hmi)->fIsHackMarker;) {
1313 Marker* nextUp = OurStuff (hmi)->fParent;
1314 if (AllHackMarkers (hmi)) {
1315 AssertNotNull (nextUp); // cuz top marker is fRootMarker - and thats not a hack marker!
1316 GetAMOH (hmi)->FreeHackTree (hmi);
1317 hmi = nextUp;
1318 }
1319 else {
1320 break;
1321 }
1322 }
1323 }
1324}
1325
1326Marker* ChunkedArrayTextStore::AddHackMarkerHelper_ (Marker* insideMarker, size_t start, size_t length)
1327{
1328 HackMarker* marker = new HackMarker ();
1329
1330 AssertNotNull (insideMarker);
1331
1332 try {
1333 Assert (marker->fTextStoreHook == NULL);
1334 marker->fTextStoreHook = new ChunkedArrayMarkerHook ();
1335 Assert (OurStuff (marker)->fParent == NULL);
1336 OurStuff (marker)->fParent = insideMarker;
1337 SetMarkerOwner_ (marker, OurStuff (insideMarker)->fOwner); // owner must be same as that of 'insideMarker' - so that all the caching/tree/stats etc kept coherent...
1338 SetMarkerStart_ (marker, start);
1339 SetMarkerLength_ (marker, length);
1340 OurStuff (insideMarker)->AddToChildList (marker);
1341
1342 Assert (not OurStuff (marker)->fIsHackMarker);
1343 OurStuff (marker)->fIsHackMarker = true;
1344#if qKeepChunkedArrayStatistics
1345 ++GetAMOH (insideMarker)->fTotalHackMarkersAlloced;
1346 ++GetAMOH (insideMarker)->fTotalHackMarkersPresent;
1347 GetAMOH (insideMarker)->fPeakHackMarkersPresent =
1348 max (GetAMOH (insideMarker)->fPeakHackMarkersPresent, GetAMOH (insideMarker)->fTotalHackMarkersPresent);
1349#endif
1350 }
1351 catch (...) {
1352 // this is safe cuz only point we can throw is 'new ChunkedArrayMarkerHook ()', and at that point
1353 // nobody refers to our marker.
1354 delete marker;
1355 throw;
1356 }
1357 return (marker);
1358}
1359
1360void ChunkedArrayTextStore::LoseIfUselessHackMarker (Marker* potentiallyUselessHackMarker)
1361{
1362 AssertNotNull (potentiallyUselessHackMarker);
1363 Assert (OurStuff (potentiallyUselessHackMarker)->fIsHackMarker);
1364
1365 Marker* firstChild = OurStuff (potentiallyUselessHackMarker)->fFirstSubMarker;
1366
1367 bool truelyUseless = (firstChild == NULL) or (OurStuff (firstChild)->fNextSubMarker == NULL);
1368
1369 /*
1370 * Any hack marker with zero or one children is serving no purpose. So remove them.
1371 *
1372 * Here we first check for the harder case of one child. In that case we must
1373 * move the markers children up into our parent right where we are.
1374 */
1375 if (firstChild != NULL and OurStuff (firstChild)->fNextSubMarker == NULL) {
1376 Assert (truelyUseless);
1377 Marker* goodParent = OurStuff (potentiallyUselessHackMarker)->fParent;
1378 Assert (OurStuff (firstChild)->fParent == potentiallyUselessHackMarker);
1379 OurStuff (firstChild)->fParent = goodParent;
1380 OurStuff (firstChild)->fNextSubMarker = OurStuff (goodParent)->fFirstSubMarker;
1381 OurStuff (goodParent)->fFirstSubMarker = firstChild;
1382 OurStuff (potentiallyUselessHackMarker)->fFirstSubMarker = NULL; // avoid deleting subtree in FreeHackTree () below
1383 }
1384
1385 // Now we are left with a truelyUseless marker, with no children, so just remove it from parent, and dispose of it.
1386 if (truelyUseless) {
1387 GetAMOH (potentiallyUselessHackMarker)->FreeHackTree (potentiallyUselessHackMarker);
1388 }
1389}
1390
1391void ChunkedArrayTextStore::SetMarkerRange (Marker* marker, size_t start, size_t end) noexcept
1392{
1393 RequireNotNull (marker);
1394#if qUseLRUCacheForRecentlyLookedUpMarkers
1395 GetAMOH (marker)->ClearCache ();
1396#endif
1397 /*
1398 * NB: There is still alot of room to improve this routine's efficency, by
1399 * being more clever about how we move the marker and its children when we no
1400 * longer fit. If we are growing marker - then the children still fit, and can be
1401 * moved with the marker. If they DON'T fit, we can move then directly up to our parent,
1402 * and then move the marker. And we can inline make decisions here about whether to
1403 * add/remove hack markers, and where (maybe replace ourselves with a hack marker instead
1404 * of moving children up to parent? - yes - probably a VERY good idea!)
1405 *
1406 * These optimizations can - and probably will - wait til after the 1.0 release - LGP 950527.
1407 *
1408 */
1409 Assert (start >= 0);
1410 Assert (end >= 0);
1411 Assert (start <= end);
1412 AssertNotNull (marker);
1413
1414 size_t len = end - start;
1415
1416 // changing the start, or end may force a re-ordering...
1417 if (GetMarkerStart_ (marker) != start or GetMarkerLength_ (marker) != len) {
1418 /*
1419 * If this marker is still contained in its parent - no need to remove it.
1420 * Furthermore - if all its children remain in it - no need to move them up.
1421 * But the worst case is - remove marker - and re-add it.
1422 */
1423 Marker* parent = OurStuff (marker)->fParent;
1424 AssertNotNull (parent);
1425 Assert (QUICK_Contains (*marker, *parent));
1426 if (Contains (start, end, *parent)) {
1428 // before we re-adjust anything. - make sure all is well...
1429 for (Marker* mi = OurStuff (marker)->fFirstSubMarker; mi != NULL; mi = OurStuff (mi)->fNextSubMarker) {
1430 Assert (QUICK_Contains (*mi, *marker));
1431 }
1432 }
1433
1434 SetMarkerStart_ (marker, start);
1435 SetMarkerLength_ (marker, len);
1436
1437 /*
1438 * Now check each child - and if it is still contained in the new range - leave it -
1439 * otherwise - remove it, and move it to the parent.
1440 */
1441 Marker* prevMarker = NULL;
1442 for (Marker* mi = OurStuff (marker)->fFirstSubMarker; mi != NULL;) {
1443 Marker* nextMarker = OurStuff (mi)->fNextSubMarker;
1444 if (not QUICK_Contains (*mi, *marker)) { // cuz we just patched our start/size
1445 if (prevMarker == NULL) {
1446 Assert (OurStuff (marker)->fFirstSubMarker == mi);
1447 OurStuff (marker)->fFirstSubMarker = OurStuff (mi)->fNextSubMarker;
1448 }
1449 else {
1450 Assert (OurStuff (prevMarker)->fNextSubMarker == mi);
1451 OurStuff (prevMarker)->fNextSubMarker = nextMarker;
1452 }
1453 OurStuff (mi)->fNextSubMarker = NULL;
1454 OurStuff (mi)->fParent = NULL;
1455 AddMarker1 (mi, parent, false); // don't bother with adding hack markers - possibly might help but unlikely since we are going
1456 // into same list we started in (though possibly with different size)
1457 }
1458 else {
1459 prevMarker = mi;
1460 }
1461 mi = nextMarker;
1462 }
1463 }
1464 else {
1465 MarkerOwner* owner = marker->GetOwner ();
1466 RemoveMarker (marker);
1467 AddMarker (marker, start, len, owner);
1468 }
1469 }
1470}
1471
1472struct StackContext {
1473 const Marker* saved_belowHere;
1474 Marker* saved_mi;
1475};
1476void ChunkedArrayTextStore::CollectAllMarkersInRangeInto (size_t from, size_t to, const MarkerOwner* owner, MarkerSink& output) const
1477{
1478 RequireNotNull (owner); // though it can be TextStore::kAnyMarkerOwner.
1479 Require (from <= to);
1480
1481 /*
1482 * Please pardon the complexity of this routine. It was once written simply and clearly as a recursive function.
1483 * But it was a central bottleneck to Led's speed. So the recursion was unwound, and changed to use an
1484 * explicit, inline stack -- purely for speed.
1485 *
1486 * Similarly for this LRUCache code (SPR#0652).
1487 */
1488 if (owner == kAnyMarkerOwner) {
1489 for (vector<MarkerOwner*>::const_iterator i = GetMarkerOwners ().begin (); i != GetMarkerOwners ().end (); ++i) {
1490 CollectAllMarkersInRangeInto_Helper_MO (from, to, *i, output);
1491 }
1492 }
1493 else {
1494#if qUseLRUCacheForRecentlyLookedUpMarkers
1495 ChunkedArrayMarkerOwnerHook* camoh = GetAMOH (owner);
1496 CollectLookupCacheElt* cacheItem = camoh->LookupElement (CollectLookupCacheElt::COMPARE_ITEM (from, to));
1497 if (cacheItem != NULL) {
1498 for (vector<Marker*>::const_iterator i = cacheItem->fMarkers.begin (); i != cacheItem->fMarkers.end (); ++i) {
1499 if (not OurStuff (*i)->fIsPreRemoved) {
1500 output.Append (*i);
1501 }
1502 }
1503 return;
1504 }
1505 if (cacheItem == NULL and (to - from) <= 3) { // only try to cache small guys (but at least size 3 cuz of _Surrounding() calls...
1506 cacheItem = camoh->AddNew (CollectLookupCacheElt::COMPARE_ITEM (from, to));
1507 cacheItem->fFrom = from;
1508 cacheItem->fTo = to;
1509 cacheItem->fMarkers.clear ();
1510 CollectAllMarkersInRangeInto_Helper_MO (from, to, owner, output, cacheItem);
1511 return;
1512 }
1513#endif
1514 CollectAllMarkersInRangeInto_Helper_MO (from, to, owner, output);
1515 }
1516}
1517
1518void ChunkedArrayTextStore::CollectAllMarkersInRangeInto_Helper_MO (size_t from, size_t to, const MarkerOwner* owner, MarkerSink& output
1519#if qUseLRUCacheForRecentlyLookedUpMarkers
1520 ,
1521 CollectLookupCacheElt* fillingCache
1522#endif
1523) const
1524{
1525 ChunkedArrayMarkerOwnerHook* camoh = dynamic_cast<ChunkedArrayMarkerOwnerHook*> (owner->fTextStoreHook);
1526 AssertNotNull (camoh);
1527 const Marker* belowHere = &camoh->fRootMarker;
1528 size_t stackDepth = 0;
1530
1531RoutineTop:
1532 AssertNotNull (belowHere);
1533 Marker* mi = OurStuff (belowHere)->fFirstSubMarker;
1534 ChunkedArrayMarkerHook* mio = mi == NULL ? NULL : OurStuff (mi); // be SURE to always keep 'mio' in sync with 'mi'
1535 for (; mi != NULL; (mi = mio->fNextSubMarker), (mio = mi == NULL ? NULL : OurStuff (mi))) {
1536 AssertNotNull (mi);
1537 AssertNotNull (mio);
1538 Assert (mio == OurStuff (mi));
1539 Assert (QUICK_Overlap (mio, from, to) == QUICK_Overlap (*mi, from, to));
1540 if (QUICK_Overlap (mio, from, to)) {
1541 if (not mio->fIsHackMarker and not mio->fIsPreRemoved) {
1542#if qUseLRUCacheForRecentlyLookedUpMarkers
1543 if (fillingCache != NULL) {
1544 fillingCache->fMarkers.push_back (mi);
1545 }
1546#endif
1547 output.Append (mi);
1548 }
1549 // Save args, then push new args
1550 stack.GrowToSize (stackDepth + 1);
1551 stack[stackDepth].saved_belowHere = belowHere;
1552 stack[stackDepth].saved_mi = mi;
1553 ++stackDepth;
1554 belowHere = mi; // bind formal arg
1555 goto RoutineTop;
1556 AfterCallPoint:
1557 Assert (stackDepth > 0);
1558 --stackDepth;
1559 belowHere = stack[stackDepth].saved_belowHere;
1560 mi = stack[stackDepth].saved_mi;
1561 AssertNotNull (mi);
1562 mio = OurStuff (mi);
1563 }
1564 }
1565 if (stackDepth != 0) {
1566 // pop the stack and continue in the loop
1567 goto AfterCallPoint;
1568 }
1569}
1570
1571ChunkedArrayTextStore::ChunkAndOffset ChunkedArrayTextStore::FindChunkIndex_ (size_t charPos) const
1572{
1573 Assert (charPos >= 0);
1574 Assert (charPos <= GetEnd ());
1575 size_t totalChunks = fTextChunks.size ();
1576 size_t bytePosSoFar = 0;
1577
1578 /*
1579 * A note of caution about the cahing of the indexes here. We currently never bother
1580 * invalidating the fCachedChunkIndexesOffset/fCachedChunkIndex because the only
1581 * way we can get in trouble with them is if somebody inserts stuff or deletes stuff
1582 * BEFORE our cached indexes. But all the code that does the insert/delete calls this
1583 * routine first, and only modifies stuff AFTER the ChunkAndOffset () we return - this
1584 * not affecting our cache (since we never return an answer < our cached value).
1585 *
1586 * This is a somewhat fragile strategy - and care should be taken in modifying code
1587 * which might change this assumption. Of course - if there is a change that breaks this-
1588 * all that needs to be done is to call the invalidate routine.
1589 */
1590 size_t startAt = 0;
1591 if (charPos >= fCachedChunkIndexesOffset) {
1592 startAt = fCachedChunkIndex;
1593 Assert (fCachedChunkIndexesOffset >= 0);
1594 bytePosSoFar = fCachedChunkIndexesOffset;
1595 }
1596 for (size_t i = startAt; i < totalChunks; ++i) {
1597 AssertNotNull (fTextChunks[i]);
1598 size_t curChunkSize = fTextChunks[i]->GetLength ();
1599 if (bytePosSoFar + curChunkSize > charPos) {
1600 Assert (charPos >= bytePosSoFar + 0);
1601 // set new cache value
1602 fCachedChunkIndex = i;
1603 fCachedChunkIndexesOffset = bytePosSoFar + 0;
1604 return (ChunkAndOffset (i, charPos - bytePosSoFar));
1605 }
1606 bytePosSoFar += curChunkSize;
1607 }
1608 return (ChunkAndOffset (totalChunks, 0)); // we might be appending - so this is OK.. Refer to non-existent chunk
1609}
1610
1611void ChunkedArrayTextStore::AdjustMarkersForInsertAfter (size_t after, size_t howMany)
1612{
1613 for (auto i = GetMarkerOwners ().begin (); i != GetMarkerOwners ().end (); ++i) {
1614 ChunkedArrayMarkerOwnerHook* camoh = dynamic_cast<ChunkedArrayMarkerOwnerHook*> ((*i)->fTextStoreHook);
1615 AssertNotNull (camoh);
1616 AdjustMarkersForInsertAfter1 (after, howMany, &camoh->fRootMarker);
1617 }
1618}
1619
1620void ChunkedArrayTextStore::AdjustMarkersForInsertAfter1 (size_t after, size_t howMany, Marker* startAt)
1621{
1622 AssertNotNull (startAt);
1623
1624 // Try unwinding tail recusion using explicit stack
1625 size_t stackDepth = 0;
1626 StackBuffer<Marker*> stack{0};
1627
1628 Marker* mi = NULL; // declared up here instead of down below to avoid MSVC 2.1 compiler bug.
1629 // LGP 950527
1630
1631RoutineTop:
1632 /*
1633 * Adjust THIS marker.
1634 */
1635 {
1636 size_t start = GetMarkerStart_ (startAt);
1637 size_t len = GetMarkerLength_ (startAt);
1638 size_t end = start + len;
1639 if (after < start) {
1640 SetMarkerStart_ (startAt, start + howMany);
1641 }
1642 else if (after >= start and after < end) {
1643 SetMarkerLength_ (startAt, len + howMany);
1644 }
1645 else {
1646 Assert (after >= end); // then we don't need to adjust any of our children either!
1647 if (stackDepth == 0) {
1648 return;
1649 }
1650 else {
1651 goto AfterCallPoint;
1652 }
1653 }
1654 }
1655
1656 /*
1657 * Adjust its children...
1658 */
1659 for (mi = OurStuff (startAt)->fFirstSubMarker; mi != NULL; mi = OurStuff (mi)->fNextSubMarker) {
1660 // Save args, then push new args
1661 stack.GrowToSize (stackDepth + 1);
1662 stack[stackDepth] = mi;
1663 ++stackDepth;
1664 startAt = mi; // bind formal arg
1665 goto RoutineTop;
1666 AfterCallPoint:
1667 Assert (stackDepth > 0);
1668 --stackDepth;
1669 mi = stack[stackDepth];
1670 }
1671 if (stackDepth != 0) {
1672 // pop the stack and continue in the loop
1673 goto AfterCallPoint;
1674 }
1675}
1676
1677void ChunkedArrayTextStore::AdjustMarkersForDeleteAfter (size_t after, size_t howMany)
1678{
1679 for (auto i = GetMarkerOwners ().begin (); i != GetMarkerOwners ().end (); ++i) {
1680 ChunkedArrayMarkerOwnerHook* camoh = dynamic_cast<ChunkedArrayMarkerOwnerHook*> ((*i)->fTextStoreHook);
1681 AssertNotNull (camoh);
1682 AdjustMarkersForDeleteAfter1 (after, howMany, &camoh->fRootMarker);
1683 }
1684}
1685
1686void ChunkedArrayTextStore::AdjustMarkersForDeleteAfter1 (size_t after, size_t howMany, Marker* startAt)
1687{
1688 AssertNotNull (startAt);
1689
1690 // Try unwinding tail recusion using explicit stack
1691 size_t stackDepth = 0;
1692 StackBuffer<Marker*> stack{0};
1693
1694 Marker* mi = NULL; // declared up here instead of down below to avoid MSVC 2.1 compiler bug.
1695// LGP 950527
1696RoutineTop:
1697 /*
1698 * Adjust THIS marker.
1699 */
1700 {
1701 size_t start = GetMarkerStart_ (startAt);
1702 size_t len = GetMarkerLength_ (startAt);
1703 size_t end = start + len;
1704 if (after < start) {
1705 // size_t newStart = start;
1706 if (howMany + after <= start) {
1707 Assert (start >= howMany + 0);
1708 SetMarkerStart_ (startAt, start - howMany);
1709 }
1710 else {
1711 Assert (howMany > (start - after));
1712 size_t deleteNCharsOffFront = howMany - (start - after);
1713 size_t moveFront = howMany - deleteNCharsOffFront;
1714 SetMarkerStart_ (startAt, start - moveFront);
1715 /*
1716 * Note when the whole extent is deleted - we simply pin the size to zero.
1717 */
1718 SetMarkerLength_ (startAt, (len > deleteNCharsOffFront) ? (len - deleteNCharsOffFront) : 0);
1719 }
1720 }
1721 else if (after >= start and after < end) {
1722 size_t newEnd = end;
1723 if (end - after < howMany) {
1724 newEnd = after;
1725 }
1726 else {
1727 newEnd -= howMany;
1728 }
1729 Assert (newEnd >= start);
1730 size_t newLen = newEnd - start;
1731 SetMarkerLength_ (startAt, newLen);
1732 }
1733 else {
1734 Assert (after >= end); // then we don't need to adjust any of our children either!
1735 if (stackDepth == 0) {
1736 return;
1737 }
1738 else {
1739 goto AfterCallPoint;
1740 }
1741 }
1742 }
1743
1744 /*
1745 * Adjust its children...
1746 */
1747 for (mi = OurStuff (startAt)->fFirstSubMarker; mi != NULL; mi = OurStuff (mi)->fNextSubMarker) {
1748 // Save args, then push new args
1749 stack.GrowToSize (stackDepth + 1);
1750 stack[stackDepth] = mi;
1751 ++stackDepth;
1752 startAt = mi; // bind formal arg
1753 goto RoutineTop;
1754 AfterCallPoint:
1755 Assert (stackDepth > 0);
1756 --stackDepth;
1757 mi = stack[stackDepth];
1758 }
1759
1760 if (stackDepth != 0) {
1761 // pop the stack and continue in the loop
1762 goto AfterCallPoint;
1763 }
1764}
1765
1766bool ChunkedArrayTextStore::AllSubMarkersAreHackMarkerTrees (const Marker* m)
1767{
1768 AssertNotNull (m);
1769 Assert (OurStuff (m)->fIsHackMarker);
1770
1771 // Try unwinding tail recusion using explicit stack
1772 size_t stackDepth = 0;
1773 StackBuffer<Marker*> stack{0};
1774
1775RoutineTop:
1776 // Do in TWO loops rather than one simply because it is much faster to check the tree
1777 // breadth-wise than depth-wise (no recursion/function call) so probably faster to check
1778 // this way (assuming we get negative answers) even though we visit each node twice (assuming
1779 // we end up with a yes
1780 for (Marker* mi = OurStuff (m)->fFirstSubMarker; mi != NULL;) {
1781 ChunkedArrayMarkerHook* misStuff = OurStuff (mi);
1782 Assert (misStuff->fParent == m);
1783 if (not misStuff->fIsHackMarker) {
1784 return false;
1785 }
1786 mi = misStuff->fNextSubMarker;
1787 }
1788
1789 Marker* mi = OurStuff (m)->fFirstSubMarker;
1790 for (; mi != NULL; mi = OurStuff (mi)->fNextSubMarker) {
1791 Assert (OurStuff (mi)->fIsHackMarker);
1792 // Save args, then push new args
1793 stack.GrowToSize (stackDepth + 1);
1794 stack[stackDepth] = mi;
1795 ++stackDepth;
1796 m = mi; // bind formal arg
1797 goto RoutineTop;
1798 AfterCallPoint:
1799 Assert (stackDepth > 0);
1800 --stackDepth;
1801 mi = stack[stackDepth];
1802 }
1803
1804 if (stackDepth != 0) {
1805 // pop the stack and continue in the loop
1806 goto AfterCallPoint;
1807 }
1808 return (true);
1809}
1810
1811#if qStroika_Foundation_Debug_AssertionsChecked
1812void ChunkedArrayTextStore::Invariant_ () const
1813{
1814 TextStore::Invariant_ ();
1815 for (auto i = GetMarkerOwners ().begin (); i != GetMarkerOwners ().end (); ++i) {
1816 ChunkedArrayMarkerOwnerHook* camoh = dynamic_cast<ChunkedArrayMarkerOwnerHook*> ((*i)->fTextStoreHook);
1817 AssertNotNull (camoh);
1818 WalkSubTreeAndCheckInvariants (&camoh->fRootMarker);
1819 }
1820}
1821#endif
1822
1823#if qStroika_Foundation_Debug_AssertionsChecked
1824void ChunkedArrayTextStore::WalkSubTreeAndCheckInvariants (const Marker* m) const
1825{
1826 AssertNotNull (m);
1827 AssertNotNull (OurStuff (m));
1828 for (const Marker* subMarker = OurStuff (m)->fFirstSubMarker; subMarker != NULL; subMarker = OurStuff (subMarker)->fNextSubMarker) {
1829 AssertNotNull (subMarker);
1830 AssertNotNull (OurStuff (subMarker));
1831 Assert (OurStuff (subMarker)->fParent == m);
1832 Assert (QUICK_Contains (*subMarker, *m));
1833#if qHeavyMarkerDebugging
1834 Assert (not OurStuff (subMarker)->fIsHackMarker or OurStuff (subMarker)->fFirstSubMarker != NULL); // leaf hacks should be deleted!
1835#endif
1836 WalkSubTreeAndCheckInvariants (subMarker);
1837 }
1838}
1839#endif
#define AssertNotNull(p)
Definition Assertions.h:333
#define EnsureNotNull(p)
Definition Assertions.h:340
#define qStroika_Foundation_Debug_AssertionsChecked
The qStroika_Foundation_Debug_AssertionsChecked flag determines if assertions are checked and validat...
Definition Assertions.h:48
#define RequireNotNull(p)
Definition Assertions.h:347
#define AssertMember(p, c)
Definition Assertions.h:312
conditional_t< qStroika_Foundation_Memory_PreferBlockAllocation and andTrueCheck, BlockAllocationUseHelper< T >, Common::Empty > UseBlockAllocationIfAppropriate
Use this to enable block allocation for a particular class. Beware of subclassing.
LRUCache implements a simple least-recently-used caching strategy, with optional hashing (of keys) to...
Definition LRUCache.h:94
Logically halfway between std::array and std::vector; Smart 'direct memory array' - which when needed...