Stroika Library 3.0d18
 
Loading...
Searching...
No Matches
SharedMemoryStream.inl
1/*
2 * Copyright(c) Sophist Solutions, Inc. 1990-2025. All rights reserved
3 */
4#include <mutex>
5
7#include "Stroika/Foundation/Containers/Support/ReserveTweaks.h"
9#include "Stroika/Foundation/Execution/NullMutex.h"
10#include "Stroika/Foundation/Execution/WaitableEvent.h"
11
12namespace Stroika::Foundation::Streams::SharedMemoryStream {
13
14 namespace Private_ {
15 template <typename ELEMENT_TYPE>
16 class IRep_ : public InputOutputStream::IRep<ELEMENT_TYPE> {
17 public:
18 virtual Options GetOptions () const = 0;
19 virtual vector<ELEMENT_TYPE> AsVector () const = 0;
20 virtual string AsString () const = 0;
21 };
22 template <typename ELEMENT_TYPE, typename LOCK_IMPL>
23 class SeekableRep_ : public IRep_<ELEMENT_TYPE> {
24 public:
25 using ElementType = ELEMENT_TYPE;
26
27 private:
28 bool fIsOpenForRead_{true};
29
30 private:
31 static constexpr bool kLocking_ = same_as<LOCK_IMPL, recursive_mutex>;
32
33 public:
34 SeekableRep_ ()
35 : fReadCursor_{fData_.begin ()}
36 , fWriteCursor_{fData_.begin ()}
37 {
38 }
39 SeekableRep_ (const SeekableRep_&) = delete;
40 ~SeekableRep_ () = default;
41 nonvirtual SeekableRep_& operator= (const SeekableRep_&) = delete;
42
43 // InputOutputStream::IRep<ELEMENT_TYPE> overrides
44 virtual bool IsSeekable () const override
45 {
46 return true;
47 }
48 virtual void CloseWrite () override
49 {
50 {
51 [[maybe_unused]] lock_guard critSec{fMutex_};
52 fClosedForWrites_ = true;
53 }
54 if constexpr (kLocking_) {
55 fMoreDataWaiter_.Set ();
56 }
57 Ensure (not IsOpenWrite ());
58 }
59 virtual bool IsOpenWrite () const override
60 {
61 return not fClosedForWrites_;
62 }
63 virtual void CloseRead () override
64 {
65 fIsOpenForRead_ = false;
66 Ensure (not IsOpenRead ());
67 }
68 virtual bool IsOpenRead () const override
69 {
70 return fIsOpenForRead_;
71 }
72 virtual optional<size_t> AvailableToRead () override
73 {
74 Require (IsOpenRead ());
75 [[maybe_unused]] lock_guard critSec{fMutex_};
76 size_t nDefinitelyAvail = distance (fReadCursor_, fData_.cend ());
77 if (nDefinitelyAvail > 0) {
78 return nDefinitelyAvail;
79 }
80 else if (fClosedForWrites_) {
81 return 0;
82 }
83 else {
84 return nullopt; // if nothing available, but not closed for write, no idea if more to come
85 }
86 }
87 virtual optional<SeekOffsetType> RemainingLength () override
88 {
89 Require (IsOpenRead ());
90 return nullopt; // pretty easy but @todo
91 }
92 DISABLE_COMPILER_GCC_WARNING_START ("GCC diagnostic ignored \"-Wunused-label\"");
93 DISABLE_COMPILER_MSC_WARNING_START (4102)
94 virtual optional<span<ELEMENT_TYPE>> Read (span<ELEMENT_TYPE> intoBuffer, NoDataAvailableHandling blockFlag) override
95 {
96 Require (not intoBuffer.empty ());
97 Require (IsOpenRead ());
98 size_t nRequested = intoBuffer.size ();
99 tryAgain:
100 switch (blockFlag) {
101 case eDontBlock: {
102 if (this->AvailableToRead () == nullopt) {
103 return nullopt;
104 }
105 } break;
106 [[likely]] case eBlockIfNoDataAvailable: {
107 if constexpr (kLocking_) {
108 fMoreDataWaiter_.Wait ();
109 }
110 } break;
111 }
112 // at this point, data is available
113 [[maybe_unused]] lock_guard critSec{fMutex_}; // hold lock for everything EXCEPT wait
114 Assert ((fData_.begin () <= fReadCursor_) and (fReadCursor_ <= fData_.end ()));
115 size_t nAvail = distance (fReadCursor_, fData_.cend ());
116 if (nAvail == 0 and not fClosedForWrites_) {
117 if constexpr (kLocking_) {
118 fMoreDataWaiter_.Reset (); // ?? @todo consider - is this a race? If we reset at same time(apx) as someone else sets
119 goto tryAgain; // cannot wait while we hold lock
120 }
121 else {
122 Require (blockFlag == eDontBlock); // else would be a deadlock.
123 }
124 }
125 size_t nCopied = min (nAvail, nRequested);
126 {
127 copy (fReadCursor_, fReadCursor_ + nCopied, intoBuffer.data ());
128 fReadCursor_ = fReadCursor_ + nCopied;
129 }
130 return intoBuffer.subspan (0, nCopied); // this can be empty on EOF
131 }
132 DISABLE_COMPILER_MSC_WARNING_END (4102)
133 DISABLE_COMPILER_GCC_WARNING_END ("GCC diagnostic ignored \"-Wunused-label\"");
134 virtual void Write (span<const ELEMENT_TYPE> elts) override
135 {
136 Require (not elts.empty ());
137 Require (IsOpenWrite ());
138 [[maybe_unused]] lock_guard critSec{fMutex_};
139 size_t roomLeft = distance (fWriteCursor_, fData_.end ());
140 size_t roomRequired = elts.size ();
141 if constexpr (kLocking_) {
142 fMoreDataWaiter_.Set (); // just means MAY be more data - readers check
143 }
144 if (roomLeft < roomRequired) {
145 size_t curReadOffset = distance (fData_.cbegin (), fReadCursor_);
146 size_t curWriteOffset = distance (fData_.begin (), fWriteCursor_);
147 const size_t kChunkSize_ = 128; // WAG: @todo tune number...
148 Containers::Support::ReserveTweaks::Reserve4AddN (fData_, roomRequired - roomLeft, kChunkSize_);
149 fData_.resize (curWriteOffset + roomRequired);
150 fReadCursor_ = fData_.begin () + curReadOffset;
151 fWriteCursor_ = fData_.begin () + curWriteOffset;
152 Assert (fWriteCursor_ < fData_.end ());
153 }
154 copy (elts.data (), elts.data () + roomRequired, fWriteCursor_);
155 fWriteCursor_ += roomRequired;
156 Assert (fReadCursor_ < fData_.end ()); // < because we wrote at least one byte and that didn't move read cursor
157 Assert (fWriteCursor_ <= fData_.end ());
158 }
159 virtual void Flush () override
160 {
161 // nothing todo - write 'writes thru'
162 }
163 virtual SeekOffsetType GetReadOffset () const override
164 {
165 Require (IsOpenRead ());
166 [[maybe_unused]] lock_guard critSec{fMutex_};
167 return distance (fData_.begin (), fReadCursor_);
168 }
169 virtual SeekOffsetType SeekRead (Whence whence, SignedSeekOffsetType offset) override
170 {
171 Require (IsOpenRead ());
172 [[maybe_unused]] lock_guard critSec{fMutex_};
173 if constexpr (kLocking_) {
174 fMoreDataWaiter_.Set (); // just means MAY be more data - readers check
175 }
176 switch (whence) {
177 case eFromStart: {
178 if (offset < 0) [[unlikely]] {
179 Execution::Throw (kSeekException_);
180 }
181 SeekOffsetType uOffset = static_cast<SeekOffsetType> (offset);
182 if (uOffset > fData_.size ()) [[unlikely]] {
183 Execution::Throw (kSeekException_);
184 }
185 fReadCursor_ = fData_.begin () + static_cast<size_t> (uOffset);
186 } break;
187 case eFromCurrent: {
188 Streams::SeekOffsetType curOffset = distance (fData_.cbegin (), fReadCursor_);
189 Streams::SignedSeekOffsetType newOffset = curOffset + offset;
190 if (newOffset < 0) [[unlikely]] {
191 Execution::Throw (kSeekException_);
192 }
193 SeekOffsetType uNewOffset = static_cast<SeekOffsetType> (newOffset);
194 if (uNewOffset > fData_.size ()) [[unlikely]] {
195 Execution::Throw (kSeekException_);
196 }
197 fReadCursor_ = fData_.begin () + static_cast<size_t> (uNewOffset);
198 } break;
199 case eFromEnd: {
200 Streams::SignedSeekOffsetType newOffset = fData_.size () + offset;
201 if (newOffset < 0) [[unlikely]] {
202 Execution::Throw (kSeekException_);
203 }
204 SeekOffsetType uNewOffset = static_cast<SeekOffsetType> (newOffset);
205 if (uNewOffset > fData_.size ()) [[unlikely]] {
206 Execution::Throw (kSeekException_);
207 }
208 fReadCursor_ = fData_.begin () + static_cast<size_t> (uNewOffset);
209 } break;
210 }
211 Ensure ((fData_.begin () <= fReadCursor_) and (fReadCursor_ <= fData_.end ()));
212 return distance (fData_.cbegin (), fReadCursor_);
213 }
214 virtual SeekOffsetType GetWriteOffset () const override
215 {
216 Require (IsOpenWrite ());
217 [[maybe_unused]] lock_guard critSec{fMutex_};
218 return distance (fData_.begin (), static_cast<typename vector<ElementType>::const_iterator> (fWriteCursor_));
219 }
220 virtual SeekOffsetType SeekWrite (Whence whence, SignedSeekOffsetType offset) override
221 {
222 Require (IsOpenWrite ());
223 [[maybe_unused]] lock_guard critSec{fMutex_};
224 if constexpr (kLocking_) {
225 fMoreDataWaiter_.Set (); // just means MAY be more data - readers check
226 }
227 switch (whence) {
228 case eFromStart: {
229 if (offset < 0) [[unlikely]] {
230 Execution::Throw (kSeekException_);
231 }
232 if (static_cast<SeekOffsetType> (offset) > fData_.size ()) [[unlikely]] {
233 Execution::Throw (kSeekException_);
234 }
235 fWriteCursor_ = fData_.begin () + static_cast<size_t> (offset);
236 } break;
237 case eFromCurrent: {
238 Streams::SeekOffsetType curOffset = distance (fData_.begin (), fWriteCursor_);
239 Streams::SignedSeekOffsetType newOffset = curOffset + offset;
240 if (newOffset < 0) [[unlikely]] {
241 Execution::Throw (kSeekException_);
242 }
243 if (static_cast<size_t> (newOffset) > fData_.size ()) [[unlikely]] {
244 Execution::Throw (kSeekException_);
245 }
246 fWriteCursor_ = fData_.begin () + static_cast<size_t> (newOffset);
247 } break;
248 case eFromEnd: {
249 Streams::SignedSeekOffsetType newOffset = fData_.size () + offset;
250 if (newOffset < 0) [[unlikely]] {
251 Execution::Throw (kSeekException_);
252 }
253 if (static_cast<size_t> (newOffset) > fData_.size ()) [[unlikely]] {
254 Execution::Throw (kSeekException_);
255 }
256 fWriteCursor_ = fData_.begin () + static_cast<size_t> (newOffset);
257 } break;
258 }
259 Ensure ((fData_.begin () <= fWriteCursor_) and (fWriteCursor_ <= fData_.end ()));
260 return distance (fData_.begin (), fWriteCursor_);
261 }
262 // Private_::IRep_ overrides
263 virtual Options GetOptions () const override
264 {
265 return Options{.fInternallySynchronized = same_as<LOCK_IMPL, recursive_mutex>
266 ? Execution::InternallySynchronized::eInternallySynchronized
267 : Execution::InternallySynchronized::eNotKnownInternallySynchronized,
268 .fSeekable = true};
269 }
270 virtual vector<ElementType> AsVector () const override
271 {
272 [[maybe_unused]] lock_guard critSec{fMutex_};
273 return fData_;
274 }
275 virtual string AsString () const override
276 {
277 [[maybe_unused]] lock_guard critSec{fMutex_};
278 return string{reinterpret_cast<const char*> (Containers::Start (fData_)), reinterpret_cast<const char*> (Containers::End (fData_))};
279 }
280
281 private:
282 static inline const auto kSeekException_ = range_error{"seek"};
283 mutable LOCK_IMPL fMutex_;
284 [[no_unique_address]] conditional_t<kLocking_, Execution::WaitableEvent, Common::Empty> fMoreDataWaiter_{}; // not a race cuz always set/reset when holding fMutex; no need to pre-set cuz auto set when someone adds data (Write)
285 vector<ElementType> fData_; // Important data comes before cursors cuz of use in CTOR
286 typename vector<ElementType>::const_iterator fReadCursor_;
287 typename vector<ElementType>::iterator fWriteCursor_;
288 bool fClosedForWrites_{false};
289 };
290 // @todo re-implement for saving memory - since not seekable
291 template <typename ELEMENT_TYPE, typename LOCK_IMPL>
292 class UnseekableRep_ : public IRep_<ELEMENT_TYPE> {
293 public:
294 using ElementType = ELEMENT_TYPE;
295
296 private:
297 bool fIsOpenForRead_{true};
298
299 private:
300 static constexpr bool kLocking_ = same_as<LOCK_IMPL, recursive_mutex>;
301
302 public:
303 UnseekableRep_ ()
304 : fReadCursor_{fData_.begin ()}
305 , fWriteCursor_{fData_.begin ()}
306 {
307 }
308 UnseekableRep_ (const UnseekableRep_&) = delete;
309 ~UnseekableRep_ () = default;
310 nonvirtual UnseekableRep_& operator= (const UnseekableRep_&) = delete;
311
312 // InputOutputStream::IRep<ELEMENT_TYPE> overrides
313 virtual bool IsSeekable () const override
314 {
315 return false;
316 }
317 virtual void CloseWrite () override
318 {
319 {
320 [[maybe_unused]] lock_guard critSec{fMutex_};
321 fClosedForWrites_ = true;
322 }
323 if constexpr (kLocking_) {
324 fMoreDataWaiter_.Set ();
325 }
326 Ensure (not IsOpenWrite ());
327 }
328 virtual bool IsOpenWrite () const override
329 {
330 return not fClosedForWrites_;
331 }
332 virtual void CloseRead () override
333 {
334 fIsOpenForRead_ = false;
335 Ensure (not IsOpenRead ());
336 }
337 virtual bool IsOpenRead () const override
338 {
339 return fIsOpenForRead_;
340 }
341 virtual optional<size_t> AvailableToRead () override
342 {
343 Require (IsOpenRead ());
344 [[maybe_unused]] lock_guard critSec{fMutex_};
345 size_t nDefinitelyAvail = distance (fReadCursor_, fData_.cend ());
346 if (nDefinitelyAvail > 0) {
347 return nDefinitelyAvail;
348 }
349 else if (fClosedForWrites_) {
350 return 0;
351 }
352 else {
353 return nullopt; // if nothing available, but not closed for write, no idea if more to come
354 }
355 }
356 virtual optional<SeekOffsetType> RemainingLength () override
357 {
358 Require (IsOpenRead ());
359 return nullopt; // pretty easy but @todo
360 }
361 DISABLE_COMPILER_MSC_WARNING_START (4102)
362 DISABLE_COMPILER_GCC_WARNING_START ("GCC diagnostic ignored \"-Wunused-label\"");
363 virtual optional<span<ELEMENT_TYPE>> Read (span<ELEMENT_TYPE> intoBuffer, NoDataAvailableHandling blockFlag) override
364 {
365 Require (not intoBuffer.empty ());
366 Require (IsOpenRead ());
367 size_t nRequested = intoBuffer.size ();
368 tryAgain:
369 switch (blockFlag) {
370 case eDontBlock: {
371 if (this->AvailableToRead () == nullopt) {
372 return nullopt;
373 }
374 } break;
375 [[likely]] case eBlockIfNoDataAvailable: {
376 if constexpr (kLocking_) {
377 fMoreDataWaiter_.Wait ();
378 }
379 } break;
380 }
381 // at this point, data is available
382 [[maybe_unused]] lock_guard critSec{fMutex_}; // hold lock for everything EXCEPT wait
383 Assert ((fData_.begin () <= fReadCursor_) and (fReadCursor_ <= fData_.end ()));
384 size_t nAvail = distance (fReadCursor_, fData_.cend ());
385 if (nAvail == 0 and not fClosedForWrites_) {
386 if constexpr (kLocking_) {
387 fMoreDataWaiter_.Reset (); // ?? @todo consider - is this a race? If we reset at same time(apx) as someone else sets
388 goto tryAgain; // cannot wait while we hold lock
389 }
390 else {
391 Require (blockFlag == eDontBlock); // else would be a deadlock.
392 }
393 }
394 size_t nCopied = min (nAvail, nRequested);
395 {
396 copy (fReadCursor_, fReadCursor_ + nCopied, intoBuffer.data ());
397 fReadCursor_ = fReadCursor_ + nCopied;
398 }
399 FreeUpSpaceIfNeeded_ ();
400 return intoBuffer.subspan (0, nCopied); // this can be empty on EOF
401 }
402 DISABLE_COMPILER_GCC_WARNING_END ("GCC diagnostic ignored \"-Wunused-label\"");
403 DISABLE_COMPILER_MSC_WARNING_END (4102)
404 virtual void Write (span<const ELEMENT_TYPE> elts) override
405 {
406 Require (not elts.empty ());
407 Require (IsOpenWrite ());
408 [[maybe_unused]] lock_guard critSec{fMutex_};
409 size_t roomLeft = distance (fWriteCursor_, fData_.end ());
410 size_t roomRequired = elts.size ();
411 if constexpr (kLocking_) {
412 fMoreDataWaiter_.Set (); // just means MAY be more data - readers check
413 }
414 if (roomLeft < roomRequired) {
415 size_t curReadOffset = distance (fData_.cbegin (), fReadCursor_);
416 size_t curWriteOffset = distance (fData_.begin (), fWriteCursor_);
417 const size_t kChunkSize_ = 128; // WAG: @todo tune number...
418 Containers::Support::ReserveTweaks::Reserve4AddN (fData_, roomRequired - roomLeft, kChunkSize_);
419 fData_.resize (curWriteOffset + roomRequired);
420 fReadCursor_ = fData_.begin () + curReadOffset;
421 fWriteCursor_ = fData_.begin () + curWriteOffset;
422 Assert (fWriteCursor_ < fData_.end ());
423 }
424 copy (elts.data (), elts.data () + roomRequired, fWriteCursor_);
425 fWriteCursor_ += roomRequired;
426 Assert (fReadCursor_ < fData_.end ()); // < because we wrote at least one byte and that didnt move read cursor
427 Assert (fWriteCursor_ <= fData_.end ());
428 }
429 virtual void Flush () override
430 {
431 // nothing todo - write 'writes thru'
432 }
433 virtual SeekOffsetType GetReadOffset () const override
434 {
435 Require (IsOpenRead ());
436 [[maybe_unused]] lock_guard critSec{fMutex_};
437 return fSpaceClearedFromStreamHead_ + distance (fData_.begin (), fReadCursor_);
438 }
439 virtual SeekOffsetType SeekRead ([[maybe_unused]] Whence whence, [[maybe_unused]] SignedSeekOffsetType offset) override
440 {
442 return 0;
443 }
444 virtual SeekOffsetType GetWriteOffset () const override
445 {
446 Require (IsOpenWrite ());
447 [[maybe_unused]] lock_guard critSec{fMutex_};
448 return fSpaceClearedFromStreamHead_ +
449 std::distance (fData_.begin (), static_cast<typename vector<ElementType>::const_iterator> (fWriteCursor_));
450 }
451 virtual SeekOffsetType SeekWrite ([[maybe_unused]] Whence whence, [[maybe_unused]] SignedSeekOffsetType offset) override
452 {
454 return 0;
455 }
456 // Private_::IRep_ overrides
457 virtual Options GetOptions () const override
458 {
459 return Options{.fInternallySynchronized = same_as<LOCK_IMPL, recursive_mutex>
460 ? Execution::InternallySynchronized::eInternallySynchronized
461 : Execution::InternallySynchronized::eNotKnownInternallySynchronized,
462 .fSeekable = false};
463 }
464 virtual vector<ElementType> AsVector () const override
465 {
467 return {};
468 }
469 virtual string AsString () const override
470 {
472 return {};
473 }
474
475 private:
476 /*
477 * Since the read stream is not seekable, anything before its read offset can be thrown away. Just adjust the reported 'seek offsets' so its not
478 * clear to anyone this has happened.
479 *
480 * Also - given current data structures (could replace with ChunkedArray - maybe better) - costly to throw stuff away. So for now
481 * only do if would save significant space.
482 */
483 nonvirtual void FreeUpSpaceIfNeeded_ ()
484 {
485 [[maybe_unused]] lock_guard critSec{fMutex_};
486 Assert ((fData_.begin () <= fReadCursor_) and (fReadCursor_ <= fData_.end ()));
487 Assert (fReadCursor_ <= fWriteCursor_); // cuz cannot seek, and cannot read past where we've written so far
488 constexpr size_t kMinData2Reclaim_ = 16 * 1024;
489 size_t elts2Reclaim = distance (fData_.cbegin (), fReadCursor_);
490 if (elts2Reclaim * sizeof (ELEMENT_TYPE) >= kMinData2Reclaim_ and IsOpenRead () and IsOpenWrite ()) [[unlikely]] {
491 SeekOffsetType readOffset = GetReadOffset ();
492 SeekOffsetType writeOffset = GetWriteOffset ();
493 fData_.erase (fData_.begin (), fData_.begin () + elts2Reclaim);
494 fSpaceClearedFromStreamHead_ += elts2Reclaim;
495 Assert (readOffset == fSpaceClearedFromStreamHead_); // cuz always wrote more than read, and clear all that read
496 fReadCursor_ = fData_.begin () + static_cast<size_t> (readOffset - fSpaceClearedFromStreamHead_);
497 fWriteCursor_ = fData_.begin () + static_cast<size_t> (writeOffset - fSpaceClearedFromStreamHead_);
498 Assert (readOffset == GetReadOffset ());
499 Assert (writeOffset == GetWriteOffset ());
500 }
501 }
502
503 private:
504 [[no_unique_address]] mutable LOCK_IMPL fMutex_;
505 size_t fSpaceClearedFromStreamHead_{0};
506 [[no_unique_address]] conditional_t<kLocking_, Execution::WaitableEvent, Common::Empty> fMoreDataWaiter_{}; // not a race cuz always set/reset when holding fMutex; no need to pre-set cuz auto set when someone adds data (Write)
507 vector<ElementType> fData_; // Important data comes before cursors cuz of use in CTOR
508 typename vector<ElementType>::const_iterator fReadCursor_;
509 typename vector<ElementType>::iterator fWriteCursor_;
510 bool fClosedForWrites_{false};
511 };
512 }
513
514 /*
515 ********************************************************************************
516 ********************** SharedMemoryStream<ELEMENT_TYPE> ************************
517 ********************************************************************************
518 */
519 template <typename ELEMENT_TYPE>
520 inline auto New (Options options) -> Ptr<ELEMENT_TYPE>
521 {
522 // @todo - could do better on NullMutex stuff \see http://stroika-bugs.sophists.com/browse/STK-584
523 if (options.fSeekable) {
524 return options.fInternallySynchronized == Execution::InternallySynchronized::eInternallySynchronized
525 ? Ptr<ELEMENT_TYPE>{make_shared<Private_::SeekableRep_<ELEMENT_TYPE, recursive_mutex>> ()}
526 : Ptr<ELEMENT_TYPE>{make_shared<Private_::SeekableRep_<ELEMENT_TYPE, Execution::NullMutex>> ()};
527 }
528 else {
529 return options.fInternallySynchronized == Execution::InternallySynchronized::eInternallySynchronized
530 ? Ptr<ELEMENT_TYPE>{make_shared<Private_::UnseekableRep_<ELEMENT_TYPE, recursive_mutex>> ()}
531 : Ptr<ELEMENT_TYPE>{make_shared<Private_::UnseekableRep_<ELEMENT_TYPE, Execution::NullMutex>> ()};
532 }
533 }
534 template <typename ELEMENT_TYPE, typename COPY_FROM>
535 inline auto New (const COPY_FROM& copyFrom, Options options) -> Ptr<ELEMENT_TYPE>
536 requires (same_as<ELEMENT_TYPE, byte> and Common::IAnyOf<COPY_FROM, Memory::BLOB, span<const ELEMENT_TYPE>>)
537 {
538 auto p = New<ELEMENT_TYPE> (options);
539 p.Write (copyFrom);
540 return p;
541 }
542
543 /*
544 ********************************************************************************
545 ****************** SharedMemoryStream::Ptr<ELEMENT_TYPE> ***********************
546 ********************************************************************************
547 */
548 template <typename ELEMENT_TYPE>
549 inline Ptr<ELEMENT_TYPE>::Ptr (const shared_ptr<Private_::IRep_<ELEMENT_TYPE>>& from)
550 : inherited{from}
551 {
552 }
553 template <typename ELEMENT_TYPE>
554 inline auto Ptr<ELEMENT_TYPE>::GetRepConstRef_ () const -> const Private_::IRep_<ELEMENT_TYPE>&
555 {
556 return *Debug::UncheckedDynamicCast<const Private_::IRep_<ELEMENT_TYPE>*> (&inherited::GetRepConstRef ());
557 }
558 template <typename ELEMENT_TYPE>
559 inline Options Ptr<ELEMENT_TYPE>::GetOptions () const
560 {
561 return GetRepConstRef_ ().GetOptions ();
562 }
563 template <typename ELEMENT_TYPE>
564 template <typename T>
565 inline T Ptr<ELEMENT_TYPE>::As () const
566 requires (same_as<T, vector<ELEMENT_TYPE>> or (same_as<ELEMENT_TYPE, byte> and Common::IAnyOf<T, Memory::BLOB, string>) or
567 (same_as<ELEMENT_TYPE, Characters::Character> and same_as<T, Characters::String>))
568 {
569 using Characters::Character;
570 using Characters::String;
571 using Memory::BLOB;
572 if constexpr (same_as<T, vector<ELEMENT_TYPE>>) {
573 return GetRepConstRef_ ().AsVector ();
574 }
575 else if constexpr (same_as<T, Memory::BLOB>) {
576 return GetRepConstRef_ ().AsVector ();
577 }
578 else if constexpr (same_as<T, string>) {
579 return GetRepConstRef_ ().AsString ();
580 }
581 else if constexpr (same_as<T, String>) {
582 auto tmp = GetRepConstRef_ ().AsVector ();
583 return String{span{tmp}};
584 }
585 }
586
587 /// deprecated
588 template <typename ELEMENT_TYPE>
589 [[deprecated ("Since Stroika v3.0d5 - use span overload")]] inline auto New (const ELEMENT_TYPE* start, const ELEMENT_TYPE* end)
590 -> Ptr<ELEMENT_TYPE>
591 {
592 return New (span<ELEMENT_TYPE>{start, end});
593 }
594
595}
#define RequireNotReached()
Definition Assertions.h:385
CONTAINER::value_type * End(CONTAINER &c)
For a contiguous container (such as a vector or basic_string) - find the pointer to the end of the co...
CONTAINER::value_type * Start(CONTAINER &c)
For a contiguous container (such as a vector or basic_string) - find the pointer to the start of the ...
void Throw(T &&e2Throw)
identical to builtin C++ 'throw' except that it does helpful, type dependent DbgTrace() messages firs...
Definition Throw.inl:43