Stroika Library 3.0d23x
 
Loading...
Searching...
No Matches
SharedMemoryStream.inl
1/*
2 * Copyright(c) Sophist Solutions, Inc. 1990-2026. All rights reserved
3 */
4#include <mutex>
5
7#include "Stroika/Foundation/Containers/Support/ReserveTweaks.h"
9#include "Stroika/Foundation/Execution/NullMutex.h"
10#include "Stroika/Foundation/Execution/WaitableEvent.h"
12
13namespace Stroika::Foundation::Streams::SharedMemoryStream {
14
15 namespace Private_ {
16 template <typename ELEMENT_TYPE>
17 class IRep_ : public InputOutputStream::IRep<ELEMENT_TYPE> {
18 public:
19 virtual Options GetOptions () const = 0;
20 virtual vector<ELEMENT_TYPE> AsVector () const = 0;
21 virtual string AsString () const = 0;
22 };
23 template <typename ELEMENT_TYPE, typename LOCK_IMPL>
24 class SeekableRep_ : public IRep_<ELEMENT_TYPE> {
25 public:
26 using ElementType = ELEMENT_TYPE;
27
28 private:
29 bool fIsOpenForRead_{true};
30
31 private:
32 static constexpr bool kLocking_ = same_as<LOCK_IMPL, recursive_mutex>;
33
34 public:
35 SeekableRep_ ()
36 : fReadCursor_{fData_.begin ()}
37 , fWriteCursor_{fData_.begin ()}
38 {
39 }
40 SeekableRep_ (const SeekableRep_&) = delete;
41 ~SeekableRep_ () = default;
42 nonvirtual SeekableRep_& operator= (const SeekableRep_&) = delete;
43
44 // InputOutputStream::IRep<ELEMENT_TYPE> overrides
45 virtual bool IsSeekable () const override
46 {
47 return true;
48 }
49 virtual void CloseWrite () override
50 {
51 {
52 [[maybe_unused]] lock_guard critSec{fMutex_};
53 fClosedForWrites_ = true;
54 }
55 if constexpr (kLocking_) {
56 fMoreDataWaiter_.Set ();
57 }
58 Ensure (not IsOpenWrite ());
59 }
60 virtual bool IsOpenWrite () const override
61 {
62 return not fClosedForWrites_;
63 }
64 virtual void CloseRead () override
65 {
66 fIsOpenForRead_ = false;
67 Ensure (not IsOpenRead ());
68 }
69 virtual bool IsOpenRead () const override
70 {
71 return fIsOpenForRead_;
72 }
73 virtual optional<size_t> AvailableToRead () override
74 {
75 Require (IsOpenRead ());
76 [[maybe_unused]] lock_guard critSec{fMutex_};
77 size_t nDefinitelyAvail = distance (fReadCursor_, fData_.cend ());
78 if (nDefinitelyAvail > 0) {
79 return nDefinitelyAvail;
80 }
81 else if (fClosedForWrites_) {
82 return 0;
83 }
84 else {
85 return nullopt; // if nothing available, but not closed for write, no idea if more to come
86 }
87 }
88 virtual optional<SeekOffsetType> RemainingLength () override
89 {
90 Require (IsOpenRead ());
91 return nullopt; // pretty easy but @todo
92 }
93 DISABLE_COMPILER_GCC_WARNING_START ("GCC diagnostic ignored \"-Wunused-label\"");
94 DISABLE_COMPILER_MSC_WARNING_START (4102)
95 virtual optional<span<ELEMENT_TYPE>> Read (span<ELEMENT_TYPE> intoBuffer, NoDataAvailableHandling blockFlag) override
96 {
97 Require (not intoBuffer.empty ());
98 Require (IsOpenRead ());
99 size_t nRequested = intoBuffer.size ();
100 tryAgain:
101 switch (blockFlag) {
102 case eDontBlock: {
103 if (this->AvailableToRead () == nullopt) {
104 return nullopt;
105 }
106 } break;
107 [[likely]] case eBlockIfNoDataAvailable: {
108 if constexpr (kLocking_) {
109 fMoreDataWaiter_.Wait ();
110 }
111 } break;
112 }
113 // at this point, data is available
114 [[maybe_unused]] lock_guard critSec{fMutex_}; // hold lock for everything EXCEPT wait
115 Assert ((fData_.begin () <= fReadCursor_) and (fReadCursor_ <= fData_.end ()));
116 size_t nAvail = distance (fReadCursor_, fData_.cend ());
117 if (nAvail == 0 and not fClosedForWrites_) {
118 if constexpr (kLocking_) {
119 fMoreDataWaiter_.Reset (); // ?? @todo consider - is this a race? If we reset at same time(apx) as someone else sets
120 goto tryAgain; // cannot wait while we hold lock
121 }
122 else {
123 Require (blockFlag == eDontBlock); // else would be a deadlock.
124 }
125 }
126 size_t nCopied = min (nAvail, nRequested);
127 {
128 copy (fReadCursor_, fReadCursor_ + nCopied, intoBuffer.data ());
129 fReadCursor_ = fReadCursor_ + nCopied;
130 }
131 return intoBuffer.subspan (0, nCopied); // this can be empty on EOF
132 }
133 DISABLE_COMPILER_MSC_WARNING_END (4102)
134 DISABLE_COMPILER_GCC_WARNING_END ("GCC diagnostic ignored \"-Wunused-label\"");
135 virtual void Write (span<const ELEMENT_TYPE> elts) override
136 {
137 Require (not elts.empty ());
138 Require (IsOpenWrite ());
139 [[maybe_unused]] lock_guard critSec{fMutex_};
140 size_t roomLeft = distance (fWriteCursor_, fData_.end ());
141 size_t roomRequired = elts.size ();
142 if constexpr (kLocking_) {
143 fMoreDataWaiter_.Set (); // just means MAY be more data - readers check
144 }
145 if (roomLeft < roomRequired) {
146 size_t curReadOffset = distance (fData_.cbegin (), fReadCursor_);
147 size_t curWriteOffset = distance (fData_.begin (), fWriteCursor_);
148 const size_t kChunkSize_ = 128; // WAG: @todo tune number...
149 Containers::Support::ReserveTweaks::Reserve4AddN (fData_, roomRequired - roomLeft, kChunkSize_);
150 fData_.resize (curWriteOffset + roomRequired);
151 fReadCursor_ = fData_.begin () + curReadOffset;
152 fWriteCursor_ = fData_.begin () + curWriteOffset;
153 Assert (fWriteCursor_ < fData_.end ());
154 }
155 copy (elts.data (), elts.data () + roomRequired, fWriteCursor_);
156 fWriteCursor_ += roomRequired;
157 Assert (fReadCursor_ < fData_.end ()); // < because we wrote at least one byte and that didn't move read cursor
158 Assert (fWriteCursor_ <= fData_.end ());
159 }
160 virtual void Flush () override
161 {
162 // nothing todo - write 'writes thru'
163 }
164 virtual SeekOffsetType GetReadOffset () const override
165 {
166 Require (IsOpenRead ());
167 [[maybe_unused]] lock_guard critSec{fMutex_};
168 return distance (fData_.begin (), fReadCursor_);
169 }
170 virtual SeekOffsetType SeekRead (Whence whence, SignedSeekOffsetType offset) override
171 {
172 Require (IsOpenRead ());
173 [[maybe_unused]] lock_guard critSec{fMutex_};
174 if constexpr (kLocking_) {
175 fMoreDataWaiter_.Set (); // just means MAY be more data - readers check
176 }
177 switch (whence) {
178 case eFromStart: {
179 if (offset < 0) [[unlikely]] {
180 Execution::Throw (kSeekException_);
181 }
182 SeekOffsetType uOffset = static_cast<SeekOffsetType> (offset);
183 if (uOffset > fData_.size ()) [[unlikely]] {
184 Execution::Throw (kSeekException_);
185 }
186 fReadCursor_ = fData_.begin () + static_cast<size_t> (uOffset);
187 } break;
188 case eFromCurrent: {
189 Streams::SeekOffsetType curOffset = distance (fData_.cbegin (), fReadCursor_);
190 Streams::SignedSeekOffsetType newOffset = curOffset + offset;
191 if (newOffset < 0) [[unlikely]] {
192 Execution::Throw (kSeekException_);
193 }
194 SeekOffsetType uNewOffset = static_cast<SeekOffsetType> (newOffset);
195 if (uNewOffset > fData_.size ()) [[unlikely]] {
196 Execution::Throw (kSeekException_);
197 }
198 fReadCursor_ = fData_.begin () + static_cast<size_t> (uNewOffset);
199 } break;
200 case eFromEnd: {
201 Streams::SignedSeekOffsetType newOffset = fData_.size () + offset;
202 if (newOffset < 0) [[unlikely]] {
203 Execution::Throw (kSeekException_);
204 }
205 SeekOffsetType uNewOffset = static_cast<SeekOffsetType> (newOffset);
206 if (uNewOffset > fData_.size ()) [[unlikely]] {
207 Execution::Throw (kSeekException_);
208 }
209 fReadCursor_ = fData_.begin () + static_cast<size_t> (uNewOffset);
210 } break;
211 }
212 Ensure ((fData_.begin () <= fReadCursor_) and (fReadCursor_ <= fData_.end ()));
213 return distance (fData_.cbegin (), fReadCursor_);
214 }
215 virtual SeekOffsetType GetWriteOffset () const override
216 {
217 Require (IsOpenWrite ());
218 [[maybe_unused]] lock_guard critSec{fMutex_};
219 return distance (fData_.begin (), static_cast<typename vector<ElementType>::const_iterator> (fWriteCursor_));
220 }
221 virtual SeekOffsetType SeekWrite (Whence whence, SignedSeekOffsetType offset) override
222 {
223 Require (IsOpenWrite ());
224 [[maybe_unused]] lock_guard critSec{fMutex_};
225 if constexpr (kLocking_) {
226 fMoreDataWaiter_.Set (); // just means MAY be more data - readers check
227 }
228 switch (whence) {
229 case eFromStart: {
230 if (offset < 0) [[unlikely]] {
231 Execution::Throw (kSeekException_);
232 }
233 if (static_cast<SeekOffsetType> (offset) > fData_.size ()) [[unlikely]] {
234 Execution::Throw (kSeekException_);
235 }
236 fWriteCursor_ = fData_.begin () + static_cast<size_t> (offset);
237 } break;
238 case eFromCurrent: {
239 Streams::SeekOffsetType curOffset = distance (fData_.begin (), fWriteCursor_);
240 Streams::SignedSeekOffsetType newOffset = curOffset + offset;
241 if (newOffset < 0) [[unlikely]] {
242 Execution::Throw (kSeekException_);
243 }
244 if (static_cast<size_t> (newOffset) > fData_.size ()) [[unlikely]] {
245 Execution::Throw (kSeekException_);
246 }
247 fWriteCursor_ = fData_.begin () + static_cast<size_t> (newOffset);
248 } break;
249 case eFromEnd: {
250 Streams::SignedSeekOffsetType newOffset = fData_.size () + offset;
251 if (newOffset < 0) [[unlikely]] {
252 Execution::Throw (kSeekException_);
253 }
254 if (static_cast<size_t> (newOffset) > fData_.size ()) [[unlikely]] {
255 Execution::Throw (kSeekException_);
256 }
257 fWriteCursor_ = fData_.begin () + static_cast<size_t> (newOffset);
258 } break;
259 }
260 Ensure ((fData_.begin () <= fWriteCursor_) and (fWriteCursor_ <= fData_.end ()));
261 return distance (fData_.begin (), fWriteCursor_);
262 }
263 // Private_::IRep_ overrides
264 virtual Options GetOptions () const override
265 {
266 return Options{.fInternallySynchronized = same_as<LOCK_IMPL, recursive_mutex>
267 ? Execution::InternallySynchronized::eInternallySynchronized
268 : Execution::InternallySynchronized::eNotKnownInternallySynchronized,
269 .fSeekable = true};
270 }
271 virtual vector<ElementType> AsVector () const override
272 {
273 [[maybe_unused]] lock_guard critSec{fMutex_};
274 return fData_;
275 }
276 virtual string AsString () const override
277 {
278 [[maybe_unused]] lock_guard critSec{fMutex_};
279 return string{reinterpret_cast<const char*> (Containers::Start (fData_)), reinterpret_cast<const char*> (Containers::End (fData_))};
280 }
281
282 private:
283 static inline const auto kSeekException_ = range_error{"seek"};
284 mutable LOCK_IMPL fMutex_;
285 [[no_unique_address]] conditional_t<kLocking_, Execution::WaitableEvent, Common::Empty> fMoreDataWaiter_{}; // not a race cuz always set/reset when holding fMutex; no need to pre-set cuz auto set when someone adds data (Write)
286 vector<ElementType> fData_; // Important data comes before cursors cuz of use in CTOR
287 typename vector<ElementType>::const_iterator fReadCursor_;
288 typename vector<ElementType>::iterator fWriteCursor_;
289 bool fClosedForWrites_{false};
290 };
291 // @todo re-implement for saving memory - since not seekable
292 template <typename ELEMENT_TYPE, typename LOCK_IMPL>
293 class UnseekableRep_ : public IRep_<ELEMENT_TYPE> {
294 public:
295 using ElementType = ELEMENT_TYPE;
296
297 private:
298 bool fIsOpenForRead_{true};
299
300 private:
301 static constexpr bool kLocking_ = same_as<LOCK_IMPL, recursive_mutex>;
302
303 public:
304 UnseekableRep_ ()
305 : fReadCursor_{fData_.begin ()}
306 , fWriteCursor_{fData_.begin ()}
307 {
308 }
309 UnseekableRep_ (const UnseekableRep_&) = delete;
310 ~UnseekableRep_ () = default;
311 nonvirtual UnseekableRep_& operator= (const UnseekableRep_&) = delete;
312
313 // InputOutputStream::IRep<ELEMENT_TYPE> overrides
314 virtual bool IsSeekable () const override
315 {
316 return false;
317 }
318 virtual void CloseWrite () override
319 {
320 {
321 [[maybe_unused]] lock_guard critSec{fMutex_};
322 fClosedForWrites_ = true;
323 }
324 if constexpr (kLocking_) {
325 fMoreDataWaiter_.Set ();
326 }
327 Ensure (not IsOpenWrite ());
328 }
329 virtual bool IsOpenWrite () const override
330 {
331 return not fClosedForWrites_;
332 }
333 virtual void CloseRead () override
334 {
335 fIsOpenForRead_ = false;
336 Ensure (not IsOpenRead ());
337 }
338 virtual bool IsOpenRead () const override
339 {
340 return fIsOpenForRead_;
341 }
342 virtual optional<size_t> AvailableToRead () override
343 {
344 Require (IsOpenRead ());
345 [[maybe_unused]] lock_guard critSec{fMutex_};
346 size_t nDefinitelyAvail = distance (fReadCursor_, fData_.cend ());
347 if (nDefinitelyAvail > 0) {
348 return nDefinitelyAvail;
349 }
350 else if (fClosedForWrites_) {
351 return 0;
352 }
353 else {
354 return nullopt; // if nothing available, but not closed for write, no idea if more to come
355 }
356 }
357 virtual optional<SeekOffsetType> RemainingLength () override
358 {
359 Require (IsOpenRead ());
360 return nullopt; // pretty easy but @todo
361 }
362 DISABLE_COMPILER_MSC_WARNING_START (4102)
363 DISABLE_COMPILER_GCC_WARNING_START ("GCC diagnostic ignored \"-Wunused-label\"");
364 virtual optional<span<ELEMENT_TYPE>> Read (span<ELEMENT_TYPE> intoBuffer, NoDataAvailableHandling blockFlag) override
365 {
366 Require (not intoBuffer.empty ());
367 Require (IsOpenRead ());
368 size_t nRequested = intoBuffer.size ();
369 tryAgain:
370 switch (blockFlag) {
371 case eDontBlock: {
372 if (this->AvailableToRead () == nullopt) {
373 return nullopt;
374 }
375 } break;
376 [[likely]] case eBlockIfNoDataAvailable: {
377 if constexpr (kLocking_) {
378 fMoreDataWaiter_.Wait ();
379 }
380 } break;
381 }
382 // at this point, data is available
383 [[maybe_unused]] lock_guard critSec{fMutex_}; // hold lock for everything EXCEPT wait
384 Assert ((fData_.begin () <= fReadCursor_) and (fReadCursor_ <= fData_.end ()));
385 size_t nAvail = distance (fReadCursor_, fData_.cend ());
386 if (nAvail == 0 and not fClosedForWrites_) {
387 if constexpr (kLocking_) {
388 fMoreDataWaiter_.Reset (); // ?? @todo consider - is this a race? If we reset at same time(apx) as someone else sets
389 goto tryAgain; // cannot wait while we hold lock
390 }
391 else {
392 Require (blockFlag == eDontBlock); // else would be a deadlock.
393 }
394 }
395 size_t nCopied = min (nAvail, nRequested);
396 {
397 copy (fReadCursor_, fReadCursor_ + nCopied, intoBuffer.data ());
398 fReadCursor_ = fReadCursor_ + nCopied;
399 }
400 FreeUpSpaceIfNeeded_ ();
401 return intoBuffer.subspan (0, nCopied); // this can be empty on EOF
402 }
403 DISABLE_COMPILER_GCC_WARNING_END ("GCC diagnostic ignored \"-Wunused-label\"");
404 DISABLE_COMPILER_MSC_WARNING_END (4102)
405 virtual void Write (span<const ELEMENT_TYPE> elts) override
406 {
407 Require (not elts.empty ());
408 Require (IsOpenWrite ());
409 [[maybe_unused]] lock_guard critSec{fMutex_};
410 size_t roomLeft = distance (fWriteCursor_, fData_.end ());
411 size_t roomRequired = elts.size ();
412 if constexpr (kLocking_) {
413 fMoreDataWaiter_.Set (); // just means MAY be more data - readers check
414 }
415 if (roomLeft < roomRequired) {
416 size_t curReadOffset = distance (fData_.cbegin (), fReadCursor_);
417 size_t curWriteOffset = distance (fData_.begin (), fWriteCursor_);
418 const size_t kChunkSize_ = 128; // WAG: @todo tune number...
419 Containers::Support::ReserveTweaks::Reserve4AddN (fData_, roomRequired - roomLeft, kChunkSize_);
420 fData_.resize (curWriteOffset + roomRequired);
421 fReadCursor_ = fData_.begin () + curReadOffset;
422 fWriteCursor_ = fData_.begin () + curWriteOffset;
423 Assert (fWriteCursor_ < fData_.end ());
424 }
425 copy (elts.data (), elts.data () + roomRequired, fWriteCursor_);
426 fWriteCursor_ += roomRequired;
427 Assert (fReadCursor_ < fData_.end ()); // < because we wrote at least one byte and that didnt move read cursor
428 Assert (fWriteCursor_ <= fData_.end ());
429 }
430 virtual void Flush () override
431 {
432 // nothing todo - write 'writes thru'
433 }
434 virtual SeekOffsetType GetReadOffset () const override
435 {
436 Require (IsOpenRead ());
437 [[maybe_unused]] lock_guard critSec{fMutex_};
438 return fSpaceClearedFromStreamHead_ + distance (fData_.begin (), fReadCursor_);
439 }
440 virtual SeekOffsetType SeekRead ([[maybe_unused]] Whence whence, [[maybe_unused]] SignedSeekOffsetType offset) override
441 {
443 return 0;
444 }
445 virtual SeekOffsetType GetWriteOffset () const override
446 {
447 Require (IsOpenWrite ());
448 [[maybe_unused]] lock_guard critSec{fMutex_};
449 return fSpaceClearedFromStreamHead_ +
450 std::distance (fData_.begin (), static_cast<typename vector<ElementType>::const_iterator> (fWriteCursor_));
451 }
452 virtual SeekOffsetType SeekWrite ([[maybe_unused]] Whence whence, [[maybe_unused]] SignedSeekOffsetType offset) override
453 {
455 return 0;
456 }
457 // Private_::IRep_ overrides
458 virtual Options GetOptions () const override
459 {
460 return Options{.fInternallySynchronized = same_as<LOCK_IMPL, recursive_mutex>
461 ? Execution::InternallySynchronized::eInternallySynchronized
462 : Execution::InternallySynchronized::eNotKnownInternallySynchronized,
463 .fSeekable = false};
464 }
465 virtual vector<ElementType> AsVector () const override
466 {
468 return {};
469 }
470 virtual string AsString () const override
471 {
473 return {};
474 }
475
476 private:
477 /*
478 * Since the read stream is not seekable, anything before its read offset can be thrown away. Just adjust the reported 'seek offsets' so its not
479 * clear to anyone this has happened.
480 *
481 * Also - given current data structures (could replace with ChunkedArray - maybe better) - costly to throw stuff away. So for now
482 * only do if would save significant space.
483 */
484 nonvirtual void FreeUpSpaceIfNeeded_ ()
485 {
486 [[maybe_unused]] lock_guard critSec{fMutex_};
487 Assert ((fData_.begin () <= fReadCursor_) and (fReadCursor_ <= fData_.end ()));
488 Assert (fReadCursor_ <= fWriteCursor_); // cuz cannot seek, and cannot read past where we've written so far
489 constexpr size_t kMinData2Reclaim_ = 16 * 1024;
490 size_t elts2Reclaim = distance (fData_.cbegin (), fReadCursor_);
491 if (elts2Reclaim * sizeof (ELEMENT_TYPE) >= kMinData2Reclaim_ and IsOpenRead () and IsOpenWrite ()) [[unlikely]] {
492 SeekOffsetType readOffset = GetReadOffset ();
493 SeekOffsetType writeOffset = GetWriteOffset ();
494 fData_.erase (fData_.begin (), fData_.begin () + elts2Reclaim);
495 fSpaceClearedFromStreamHead_ += elts2Reclaim;
496 Assert (readOffset == fSpaceClearedFromStreamHead_); // cuz always wrote more than read, and clear all that read
497 fReadCursor_ = fData_.begin () + static_cast<size_t> (readOffset - fSpaceClearedFromStreamHead_);
498 fWriteCursor_ = fData_.begin () + static_cast<size_t> (writeOffset - fSpaceClearedFromStreamHead_);
499 Assert (readOffset == GetReadOffset ());
500 Assert (writeOffset == GetWriteOffset ());
501 }
502 }
503
504 private:
505 [[no_unique_address]] mutable LOCK_IMPL fMutex_;
506 size_t fSpaceClearedFromStreamHead_{0};
507 [[no_unique_address]] conditional_t<kLocking_, Execution::WaitableEvent, Common::Empty> fMoreDataWaiter_{}; // not a race cuz always set/reset when holding fMutex; no need to pre-set cuz auto set when someone adds data (Write)
508 vector<ElementType> fData_; // Important data comes before cursors cuz of use in CTOR
509 typename vector<ElementType>::const_iterator fReadCursor_;
510 typename vector<ElementType>::iterator fWriteCursor_;
511 bool fClosedForWrites_{false};
512 };
513 }
514
515 /*
516 ********************************************************************************
517 ********************** SharedMemoryStream<ELEMENT_TYPE> ************************
518 ********************************************************************************
519 */
520 template <typename ELEMENT_TYPE>
521 inline auto New (Options options) -> Ptr<ELEMENT_TYPE>
522 {
523 // @todo - could do better on NullMutex stuff \see http://stroika-bugs.sophists.com/browse/STK-584
524 if (options.fSeekable) {
525 return options.fInternallySynchronized == Execution::InternallySynchronized::eInternallySynchronized
526 ? Ptr<ELEMENT_TYPE>{Memory::MakeSharedPtr<Private_::SeekableRep_<ELEMENT_TYPE, recursive_mutex>> ()}
527 : Ptr<ELEMENT_TYPE>{Memory::MakeSharedPtr<Private_::SeekableRep_<ELEMENT_TYPE, Execution::NullMutex>> ()};
528 }
529 else {
530 return options.fInternallySynchronized == Execution::InternallySynchronized::eInternallySynchronized
531 ? Ptr<ELEMENT_TYPE>{Memory::MakeSharedPtr<Private_::UnseekableRep_<ELEMENT_TYPE, recursive_mutex>> ()}
532 : Ptr<ELEMENT_TYPE>{Memory::MakeSharedPtr<Private_::UnseekableRep_<ELEMENT_TYPE, Execution::NullMutex>> ()};
533 }
534 }
535 template <typename ELEMENT_TYPE, typename COPY_FROM>
536 inline auto New (const COPY_FROM& copyFrom, Options options) -> Ptr<ELEMENT_TYPE>
537 requires (same_as<ELEMENT_TYPE, byte> and Common::IAnyOf<COPY_FROM, Memory::BLOB, span<const ELEMENT_TYPE>>)
538 {
539 auto p = New<ELEMENT_TYPE> (options);
540 p.Write (copyFrom);
541 return p;
542 }
543
544 /*
545 ********************************************************************************
546 ****************** SharedMemoryStream::Ptr<ELEMENT_TYPE> ***********************
547 ********************************************************************************
548 */
549 template <typename ELEMENT_TYPE>
550 inline Ptr<ELEMENT_TYPE>::Ptr (const shared_ptr<Private_::IRep_<ELEMENT_TYPE>>& from)
551 : inherited{from}
552 {
553 }
554 template <typename ELEMENT_TYPE>
555 inline auto Ptr<ELEMENT_TYPE>::GetRepConstRef_ () const -> const Private_::IRep_<ELEMENT_TYPE>&
556 {
557 return *Debug::UncheckedDynamicCast<const Private_::IRep_<ELEMENT_TYPE>*> (&inherited::GetRepConstRef ());
558 }
559 template <typename ELEMENT_TYPE>
560 inline Options Ptr<ELEMENT_TYPE>::GetOptions () const
561 {
562 return GetRepConstRef_ ().GetOptions ();
563 }
564 template <typename ELEMENT_TYPE>
565 template <typename T>
566 inline T Ptr<ELEMENT_TYPE>::As () const
567 requires (same_as<T, vector<ELEMENT_TYPE>> or (same_as<ELEMENT_TYPE, byte> and Common::IAnyOf<T, Memory::BLOB, string>) or
568 (same_as<ELEMENT_TYPE, Characters::Character> and same_as<T, Characters::String>))
569 {
570 using Characters::Character;
571 using Characters::String;
572 using Memory::BLOB;
573 if constexpr (same_as<T, vector<ELEMENT_TYPE>>) {
574 return GetRepConstRef_ ().AsVector ();
575 }
576 else if constexpr (same_as<T, Memory::BLOB>) {
577 return GetRepConstRef_ ().AsVector ();
578 }
579 else if constexpr (same_as<T, string>) {
580 return GetRepConstRef_ ().AsString ();
581 }
582 else if constexpr (same_as<T, String>) {
583 auto tmp = GetRepConstRef_ ().AsVector ();
584 return String{span{tmp}};
585 }
586 }
587
588 /// deprecated
589 template <typename ELEMENT_TYPE>
590 [[deprecated ("Since Stroika v3.0d5 - use span overload")]] inline auto New (const ELEMENT_TYPE* start, const ELEMENT_TYPE* end)
591 -> Ptr<ELEMENT_TYPE>
592 {
593 return New (span<ELEMENT_TYPE>{start, end});
594 }
595
596}
#define RequireNotReached()
Definition Assertions.h:385
CONTAINER::value_type * End(CONTAINER &c)
For a contiguous container (such as a vector or basic_string) - find the pointer to the end of the co...
CONTAINER::value_type * Start(CONTAINER &c)
For a contiguous container (such as a vector or basic_string) - find the pointer to the start of the ...
void Throw(T &&e2Throw)
identical to builtin C++ 'throw' except that it does helpful, type dependent DbgTrace() messages firs...
Definition Throw.inl:43