diff --git a/src/vault/gear/block-flow.hpp b/src/vault/gear/block-flow.hpp index 3ff453119..d709e2329 100644 --- a/src/vault/gear/block-flow.hpp +++ b/src/vault/gear/block-flow.hpp @@ -28,7 +28,7 @@ ** as messages through the scheduler, the actual implementation requires a fixed ** descriptor record sitting at a stable memory location while the computation ** is underway. Moreover, activities can spawn further activities, implying that - ** activity descriptor records can emanate from multiple threads concurrently, + ** activity descriptor records for various deadlines need to be accommodated ** and the duration to keep those descriptors in valid state is contingent. ** On the other hand, ongoing rendering produces a constant flow of further ** activities, necessitating timely clean-up of obsolete descriptors. @@ -36,6 +36,34 @@ ** pooled allocation tiles, extending the underlying block ** allocation on increased throughput. ** + ** # Implementation technique + ** + ** The usage within the [Scheduler](\ref scheduler.hpp) can be arranged in a way + ** to avoid concurrency issues altogether; while allocations are not always done + ** by _the same thread,_ it can be ensured at any given time that only a single + ** Worker performs Scheduler administrative tasks (queue management and allocation); + ** a read/write barrier is issued whenever some Worker enters this management mode. + ** + ** Memory is allocated in larger _extents,_ which are then used to place individual + ** fixed-size allocations. These are not managed further, assuming that the storage + ** is used for POD data records, and the destructors need not be invoked at all. + ** This arrangement is achieved by interpreting the storage extents as temporal + ** *Epochs*. Each #Epoch holds an Epoch::EpochGate to define a deadline and to allow + ** blocking this Epoch by pending IO operations (with the help of a count-down latch). + ** The rationale is based on the observation that any render activity for late and + ** obsolete goals is pointless and can be just side stepped. Once the scheduling has + ** passed a defined deadline (and no further pending IO operations are around), the + ** Epoch can be abandoned as a whole and the storage extent can be re-used. + ** + ** Dynamic adjustments are necessary to keep this scheme running efficiently. + ** Ideally, the temporal stepping between subsequent Epochs should be chosen such + ** as to accommodate all render activities with deadlines falling into this Epoch, + ** without wasting much space for unused storage slots. But the throughput and thus + ** the allocation pressure of the scheduler can change intermittently, necessitating + ** to handle excess allocations by shifting them into the next Epoch. These _overflow + ** events_ are registered, and on clean-up the actual usage ratio of each Epoch is + ** detected, leading to exponentially damped adjustments of the actual Epoch duration. + ** ** @note currently this rather marks the intended memory management pattern, ** while the actual allocations are still performed on the heap. ** @see BlockFlow_test @@ -84,7 +112,7 @@ namespace gear { const Duration INITIAL_EPOCH_STEP{FRAMES_PER_EPOCH * FrameRate{50}.duration()}; - const Rat OVERFLOW_BOOST_FACTOR = 9_r/10; ///< increase capacity on each Epoch overflow event + const Rat OVERFLOW_BOOST_FACTOR = 9_r/10; ///< increase capacity on each Epoch overflow event const size_t AVERAGE_EPOCHS = 10; ///< moving average len for exponential convergence towards average Epoch fill /** raw allocator to provide a sequence of Extents to place Activity records */ @@ -92,6 +120,7 @@ namespace gear { } + /** * Allocation Extent holding _scheduler Activities_ to be performed altogether @@ -187,7 +216,7 @@ namespace gear { - /** + /******************************************************//** * Allocation scheme for the Scheduler, based on Epoch(s). * Scheduling entails to provide a chain of Activity definitions, * which will then »flow« through the priority queue until invocation. @@ -237,7 +266,7 @@ namespace gear { } - /** Adapted storage-extent iterator, directly exposing Extent& */ + /** Adapted storage-Extent iterator, directly exposing Epoch& */ using EpochIter = lib::IterableDecorator; @@ -279,19 +308,29 @@ namespace gear { void* claimSlot() ///< EX_SANE { + bool first{true}; while (not (epoch_ and epoch_->gate().hasFreeSlot())) // Epoch overflow // use following Epoch; possibly allocate - if (not epoch_) - { - auto lastDeadline = flow_->lastEpoch().deadline(); - epoch_.expandAlloc(); - ENSURE (epoch_); - Epoch::setup (epoch_, lastDeadline + flow_->getEpochStep()); - } - else - ++epoch_; + { + if (first) + {// each shifted allocation accounted once as overflow + flow_->markEpochOverflow(); + first = false; + } + if (not epoch_) + { + auto lastDeadline = flow_->lastEpoch().deadline(); + epoch_.expandAlloc(); // may throw out-of-memory.. + ENSURE (epoch_); + Epoch::setup (epoch_, lastDeadline + flow_->getEpochStep()); + } + else + { + ++epoch_; + } + } return epoch_->gate().claimNextSlot(); } }; @@ -299,6 +338,10 @@ namespace gear { /* ===== public BlockFlow API ===== */ + /** + * initiate allocations for activities to happen until some deadline + * @return opaque handle allowing to perform several allocations. + */ AllocatorHandle until (Time deadline) { @@ -323,7 +366,7 @@ namespace gear { ENSURE (not nextEpoch); // not valid yet, but we will allocate starting there... auto requiredNew = distance / _raw(epochStep_); if (distance % _raw(epochStep_) > 0) - ++requiredNew; // fractional: requested deadline lies within last epoch + ++requiredNew; // fractional: requested deadline lies within last epoch alloc_.openNew(requiredNew); // Note: epochHandle now points to the first new Epoch for ( ; 0 < requiredNew; --requiredNew) { @@ -348,6 +391,13 @@ namespace gear { } } + /** + * Clean-up all storage related to activities before the given deadline. + * @note when some Epoch is blocked by pending IO, all subsequent Epochs + * will be kept alive too, since the returning IO operation may trigger + * activities there (at least up to the point where the control logic + * detects a timeout and abandons the execution chain). + */ void discardBefore (Time deadline) { @@ -376,7 +426,7 @@ namespace gear { void markEpochOverflow() { - UNIMPLEMENTED ("adjust size after overflow"); + adjustEpochStep (OVERFLOW_BOOST_FACTOR); } /** diff --git a/tests/vault/gear/block-flow-test.cpp b/tests/vault/gear/block-flow-test.cpp index 2ff4f8a73..fb326b0ab 100644 --- a/tests/vault/gear/block-flow-test.cpp +++ b/tests/vault/gear/block-flow-test.cpp @@ -66,7 +66,6 @@ namespace test { run (Arg) { simpleUsage(); - verifyAPI(); handleEpoch(); placeActivity(); adjustEpochs(); @@ -96,19 +95,6 @@ namespace test { - /** @test verify the primary BlockFlow API functions in isolation - * @todo WIP 7/23 ⟶ define ⟶ implement - */ - void - verifyAPI() - { -//SHOW_EXPR(watch(bFlow).cntEpochs()); -//SHOW_EXPR(watch(bFlow).poolSize()); -//SHOW_EXPR(watch(bFlow).first()); - } - - - /** @test cover properties and handling of Epochs (low-level) * - demonstrate that Epoch is placed into an Extent * - verify that both Extent and Epoch access the same memory block @@ -200,8 +186,16 @@ namespace test { - /** @test TODO place Activity record into storage - * @todo WIP 7/23 ⟶ ✔define ⟶ 🔁implement + /** @test place Activity record into storage + * - new Activity without any previously established Epoch + * - place Activity into future, expanding the Epoch grid + * - locate Activity relative to established Epoch grid + * - fill up existing Epoch, causing overflow to next one + * - exhaust multiple adjacent Epochs, overflowing to first free one + * - exhaust last Epoch, causing setup of new Epoch, with reduced spacing + * - use this reduced spacing also for subsequently created Epochs + * - clean up obsoleted Epochs, based on given deadline + * @todo WIP 7/23 ⟶ ✔define ⟶ ✔implement */ void placeActivity() @@ -212,24 +206,28 @@ namespace test { Time t2 = Time{500,10}; Time t3 = Time{ 0,11}; + // no Epoch established yet... auto& a1 = bFlow.until(t1).create(); CHECK (watch(bFlow).allEpochs() == "10s200ms"_expect); CHECK (watch(bFlow).find(a1) == "10s200ms"_expect); + // setup Epoch grid into the future auto& a3 = bFlow.until(t3).create(); CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect); CHECK (watch(bFlow).find(a3) == "11s"_expect); + // associate to existing Epoch auto& a2 = bFlow.until(t2).create(); CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect); CHECK (watch(bFlow).find(a2) == "10s600ms"_expect); Time t0 = Time{0,5}; - + // late(past) Activity is placed in the oldest Epoch alive auto& a0 = bFlow.until(t0).create(); CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect); CHECK (watch(bFlow).find(a0) == "10s200ms"_expect); + // provoke Epoch overflow by exhausting all available storage slots BlockFlow::AllocatorHandle allocHandle = bFlow.until(Time{300,10}); for (uint i=1; i -
+
//Integration effort to promote the development of rendering, playback and video display in the GUI//
 This IntegrationSlice was started in {{red{2023}}} as [[Ticket #1221|https://issues.lumiera.org/ticket/1221]] to coordinate the completion and integration of various implementation facilities, planned, drafted and built during the last years; this effort marks the return of development focus to the lower layers (after years of focussed UI development) and will implement the asynchronous and time-bound rendering coordinated by the [[Scheduler]] in the [[Vault|Vault-Layer]]
 
@@ -6222,6 +6222,11 @@ The Scheduler will be structured into two Layers, where the lower layer is imple
 :* thus needs to //understand Job dependencies//
 :* will be decomposed into several implementation layers
 :SchedulerMemory will be managed by an //Extent scheme.//
+;Asynchronous IO
+:is treated as a responsibility and may block dependent planned activities
+:a dependency check is operationalised as activity primitive and also used to hold allocations alive
+:operational control and data management is //performed by the workers// interspersed with render activities
+:however, only one worker at any time is allowed to perform these meta tasks, avoiding further synchronisation
 
@@ -7095,7 +7100,7 @@ The Scheduler is now considered an implementation-level facility with an interfa &rarr; [[Workers|SchedulerWorker]]
-
+
//The Scheduler uses an »Extent« based memory management scheme known as {{{BlockFlow}}}.//
 The organisation of rendering happens in terms of [[Activities|RenderActivity]], which may bound by //dependencies// and limited by //deadlines.// For the operational point of view this implies that a sequence of allocations must be able to „flow through the Scheduler“ -- in fact, only references to these {{{Activity}}}-records are passed, while the actual descriptors reside at fixed memory locations. This is essential to model the dependencies and conditional execution structures efficiently. At some point however, any {{{Activity}}}-record will either be //performed// or //obsoleted// -- and this leads to the idea of managing the allocations in //extents// of memory here termed as »Epochs«
 * a new Activity is planted into a suitable //Epoch,// based on its deadline
@@ -7110,7 +7115,7 @@ Unfortunately this tricky arrangement also implies that many safety barriers of
 * each »Epoch« gets an associated //deadline//
 * when the next [[job|RenderJob]] processed by a worker starts //after this Epoch's deadline//, the worker //has left the Epoch.//
 * when all workers have left an Epoch, only ''pending async IO tasks'' need to be considered, since IO can possibly be delayed for an extended period of time.<br/>For an IO task, buffers //need to be kept available,// and those buffers are indirectly tied to the job depending on them.
-* ⟹ thus a count of pending IO activities must be maintained //for each Epoch//  -- implemented by the same mechanism also employed for dependencies between render jobs, which is a notification message causing a local counter to be decremented.
+* ⟹ thus a count of pending IO activities must be maintained //for each Epoch//  -- implemented by the same mechanism also employed for dependencies between render jobs, which is a notification message causing a local counter to be decremented.<br/>All Epochs ''following the blocked one must be blocked as well'' -- since the callback from IO may immediately pass control there; only later, when the execution logic detects a passed deadline, it is possible to side step further activities; this is achieved by inserting {{{GATE}}}-[[Activity records|RenderActivity]] before any render job invocation bound by deadline.
 
 !operational capabilities
 The memory management for the scheduler is arranged into three layers...
diff --git a/wiki/thinkPad.ichthyo.mm b/wiki/thinkPad.ichthyo.mm
index 54c098407..49f738c8c 100644
--- a/wiki/thinkPad.ichthyo.mm
+++ b/wiki/thinkPad.ichthyo.mm
@@ -78830,8 +78830,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -78841,21 +78841,20 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + - - - - - + + + + - - + + @@ -78867,8 +78866,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -78885,8 +78884,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -78902,20 +78901,20 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + + + + + + + + - - - - -
- - - - + + @@ -79331,6 +79330,12 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + @@ -79711,8 +79716,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -79732,9 +79737,20 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - + + + + + + +

+ Entscheidung: Allokation entgleist nur ausnahmsweise +

+ + +
+ +
@@ -79747,11 +79763,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - - - + + @@ -79808,8 +79821,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -79833,8 +79846,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -79886,10 +79899,10 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + - + @@ -79994,7 +80007,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -80010,8 +80023,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + +
@@ -80029,10 +80042,10 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - - + + + + @@ -80068,8 +80081,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -80195,15 +80208,21 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - + + + - - + + + + + + + + @@ -80226,7 +80245,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- +