From 18904e5b580f31ae607f47d26e517486e30186d4 Mon Sep 17 00:00:00 2001 From: Ichthyostega Date: Wed, 12 Jul 2023 21:55:50 +0200 Subject: [PATCH] Block-Flow: completed implementation of low-level cyclic extent storage ..verified boundary cases for expansion while retaining addresses of currently active extents... --- src/vault/mem/extent-family.hpp | 11 +-- tests/11vault-memory.tests | 2 +- tests/vault/mem/extent-family-test.cpp | 105 +++++++++++++++++++------ wiki/renderengine.html | 11 ++- wiki/thinkPad.ichthyo.mm | 99 +++++++++++++++-------- 5 files changed, 160 insertions(+), 68 deletions(-) diff --git a/src/vault/mem/extent-family.hpp b/src/vault/mem/extent-family.hpp index fd55a993b..c637855c3 100644 --- a/src/vault/mem/extent-family.hpp +++ b/src/vault/mem/extent-family.hpp @@ -34,9 +34,6 @@ ** ** @see ExtentFamily_test ** @see gear::BlockFlow usage example - ** - ** @todo WIP-WIP-WIP 7/2023 »Playback Vertical Slice« - ** */ @@ -49,7 +46,6 @@ #include "lib/nocopy.hpp" #include "lib/util.hpp" -//#include #include #include #include @@ -59,13 +55,12 @@ namespace vault{ namespace mem { -// using util::isnil; -// using std::string; using util::unConst; template class ExtentDiagnostic; - + + /** * Memory manager to provide a sequence of Extents for cyclic usage. @@ -73,7 +68,6 @@ namespace mem { * ctors or dtors on the objects nominally located in »slots« within the Extent. * @tparam T payload type living in the Extent's »slots« * @tparam siz number of »slots« per Extent - * @todo WIP-WIP 7/2023 * @see ExtentFamily_test */ template @@ -237,6 +231,7 @@ namespace mem { friend iterator end (ExtentFamily& exFam) { return exFam.end(); } + private: /* ====== storage management implementation ====== */ bool isWrapped() const diff --git a/tests/11vault-memory.tests b/tests/11vault-memory.tests index e24544c4e..083c2b0db 100644 --- a/tests/11vault-memory.tests +++ b/tests/11vault-memory.tests @@ -2,6 +2,6 @@ TESTING "Low-level Test Suite: Memory Management" ./test-suite --group=memory -PLANNED "Cyclic Extent Sequence" ExtentFamily_test < using test::Test; -//using std::move; using util::isnil; using util::isSameObject; using lib::explore; @@ -46,10 +42,6 @@ namespace vault{ namespace mem { namespace test { -// using lib::time::FrameRate; -// using lib::time::Offset; -// using lib::time::Time; - using Extents = ExtentFamily; using Extent = Extents::Extent; using Iter = Extents::iterator; @@ -57,7 +49,6 @@ namespace test { - /***************************************************************//** * @test document and verify a memory management scheme to maintain * a flexible set of _»memory extents«_ for cyclic usage. @@ -243,10 +234,32 @@ namespace test { /** @test verify in detail how iteration wraps around to also reuse * previously dropped extents, possibly rearranging the internal * management-vector to allow growing new extents at the end. + * - existing allocations are re-used cyclically + * - this may lead to a »wrapped« internal state + * - necessitating to expand allocations in the middle + * - yet all existing Extent addresses remain stable */ void wrapAround() { + // Helper to capture the storage addresses of all currently active Extents + auto snapshotAdr = [](Extents& extents) + { + auto takeAdr = [](auto& x){ return &*x; }; + return explore(extents).transform(takeAdr).effuse(); + }; + auto verifyAdr = [](auto snapshot, auto it) + { + for (auto oldAddr : snapshot) + { + if (not isSameObject(*oldAddr, *it)) + return false; + ++it; + } + return true; + }; + + Extents extents{5}; CHECK ( 0 == watch(extents).first()); CHECK ( 0 == watch(extents).last()); @@ -259,27 +272,69 @@ namespace test { CHECK ( 4 == watch(extents).active()); CHECK ( 5 == watch(extents).size()); - auto takeAdr = [](auto& x){ return &*x; }; - auto snapshot = explore(extents).transform(takeAdr).effuse(); -SHOW_EXPR(snapshot.size()) -SHOW_EXPR(snapshot[0]) -SHOW_EXPR(snapshot[3]) + auto snapshot = snapshotAdr(extents); // capture *addresses* of currently active Extents + CHECK (4 == snapshot.size()); extents.openNew(); -SHOW_EXPR(watch(extents).first()) -SHOW_EXPR(watch(extents).last()) -SHOW_EXPR(watch(extents).active()) -SHOW_EXPR(watch(extents).size()) CHECK ( 0 == watch(extents).first()); CHECK ( 5 == watch(extents).last()); CHECK ( 5 == watch(extents).active()); CHECK (10 == watch(extents).size()); // Note: heuristics to over-allocate to some degree - auto it = extents.begin(); - for (auto oldAddr : snapshot) - { - CHECK (isSameObject(*oldAddr, *it)); - ++it; - } + CHECK (verifyAdr (snapshot, extents.begin())); + + extents.dropOld(3); // place the active window such as to start on last snapshotted Extent + CHECK ( 3 == watch(extents).first()); + CHECK ( 5 == watch(extents).last()); + CHECK ( 2 == watch(extents).active()); + CHECK (10 == watch(extents).size()); + CHECK (isSameObject (*extents.begin(), *snapshot.back())); + + extents.openNew(6); // now provoke a »wrapped« state of internal management of active Extents + CHECK ( 3 == watch(extents).first()); // ...Note: the position of the *first* active Extent... + CHECK ( 1 == watch(extents).last()); // ... is *behind* the position of the last active Extent + CHECK ( 8 == watch(extents).active()); // ... implying that the active strike wraps at allocation end + CHECK (10 == watch(extents).size()); + snapshot = snapshotAdr (extents); // take a new snapshot; this also verifies proper iteration + CHECK (8 == snapshot.size()); + + extents.openNew(2); // ask for more than can be accommodated without ambiguity + CHECK ( 8 == watch(extents).first()); // ...Note: new allocation was inserted, existing tail shifted + CHECK ( 3 == watch(extents).last()); // ... allowing for the requested two »slots« to be accommodated + CHECK (10 == watch(extents).active()); + CHECK (15 == watch(extents).size()); + CHECK (verifyAdr (snapshot, extents.begin())); // ... yet all existing Extent addresses have been rotated transparently + + extents.dropOld(10); // close out all active slots, wrapping the first-pos to approach last + CHECK ( 3 == watch(extents).first()); + CHECK ( 3 == watch(extents).last()); + CHECK ( 0 == watch(extents).active()); + CHECK (15 == watch(extents).size()); + + extents.openNew(12); // provoke a special boundary situation, where the end is *just wrapped* + CHECK ( 3 == watch(extents).first()); + CHECK ( 0 == watch(extents).last()); + CHECK (12 == watch(extents).active()); + CHECK (15 == watch(extents).size()); + + extents.dropOld(11); // and make this boundary situation even more nasty, just sitting on the rim + CHECK (14 == watch(extents).first()); + CHECK ( 0 == watch(extents).last()); + CHECK ( 1 == watch(extents).active()); + CHECK (15 == watch(extents).size()); + + CHECK (14 == extents.begin().getIndex()); + snapshot = snapshotAdr (extents); // verify iteration end just after wrapping properly detected + CHECK (1 == snapshot.size()); + CHECK (isSameObject (*extents.begin(), *snapshot.front())); + + extents.openNew(14); // and now provoke further expansion, adding new allocation right at start + CHECK (19 == watch(extents).first()); // ...Note: first must be relocated to sit again at the very rim + CHECK (14 == watch(extents).last()); // ... to allow last to sit at the index previously used by first + CHECK (15 == watch(extents).active()); + CHECK (20 == watch(extents).size()); + + CHECK (19 == extents.begin().getIndex()); // ... yet address of the first Extent remains the same, just held in another slot + CHECK (isSameObject (*extents.begin(), *snapshot.front())); } }; diff --git a/wiki/renderengine.html b/wiki/renderengine.html index 3056f0eeb..9733f076d 100644 --- a/wiki/renderengine.html +++ b/wiki/renderengine.html @@ -7095,7 +7095,7 @@ The Scheduler is now considered an implementation-level facility with an interfa &rarr; [[Workers|SchedulerWorker]] -
+
//The Scheduler uses an »Extent« based memory management scheme known as {{{BlockFlow}}}.//
 The organisation of rendering happens in terms of [[Activities|RenderActivity]], which may bound by //dependencies// and limited by //deadlines.// For the operational point of view this implies that a sequence of allocations must be able to „flow through the Scheduler“ -- in fact, only references to these {{{Activity}}}-records are passed, while the actual descriptors reside at fixed memory locations. This is essential to model the dependencies and conditional execution structures efficiently. At some point however, any {{{Activity}}}-record will either be //performed// or //obsoleted// -- and this leads to the idea of managing the allocations in //extents// of memory here termed as »Epochs«
 * a new Activity is planted into a suitable //Epoch,// based on its deadline
@@ -7109,8 +7109,15 @@ This is a rather fragile composition and chosen here for performance reasons; wh
 Unfortunately this tricky arrangement also implies that many safety barriers of the C++ language are circumvented. A strict processing regime must be established, with clear rules as to when activities may, or may no longer be accessed.
 * each »Epoch« gets an associated //deadline//
 * when the next [[job|RenderJob]] processed by a worker starts //after this Epoch's deadline//, the worker //has left the Epoch.//
-* when all workers have left an Epoch, only ''pending async IO tasks'' need to be considered, since such IO task can always be delayed for an extended period of time. For an IO task, buffers need to be kept available, and those buffers are indirectly tied to the job depending on them.
+* when all workers have left an Epoch, only ''pending async IO tasks'' need to be considered, since IO can possibly be delayed for an extended period of time.<br/>For an IO task, buffers //need to be kept available,// and those buffers are indirectly tied to the job depending on them.
 * ⟹ thus a count of pending IO activities must be maintained //for each Epoch//  -- implemented by the same mechanism also employed for dependencies between render jobs, which is a notification message causing a local counter to be decremented.
+
+!operational capabilities
+The memory management for the scheduler is arranged into three layers...
+* raw memory is allocated in large blocks of {{{Extent}}} size -- {{red{currently as of 7/23}}} claimed from regular heap storage
+* a low-level allocation scheme, the {{{ExtentFamily}}} uses a //pool of extents cyclically,// with the ability to claim more extents on-demand
+* the high-level {{{BlockFlow}}} allocation manager is aware of scheduler semantics and dresses up those extents as {{{Epoch}}}
+For each new RenderActivity, the API usage with the help of the {{{ActivityLang}}} is required to designate a ''deadline'' -- which can be used to associate the corresponding {{{Activity}}}-records with a suitable {{{Epoch}}}. The //temporal spacing// of epochs, as well as the number of active epochs (=extents) must be managed dynamically. {{red{As of 7/23, a scheme to avoid control oscillations}}} need to be devised, see [[#1316|https://issues.lumiera.org/ticket/1316]]. When the reserved allocation for an epoch turns out as insufficient (i.e. the underlying extent has been filled up prior to maturity), further {{{Activity}}} records will be //„borrowed“// from the next epoch, while reducing the epoch spacing for compensation. Each {{{Epoch}}} automatically maintains a specifically rigged »''~EpochGuard''«-{{{Activity}}}, always located in the first »slot« of the epoch storage. This guard models the deadline and additionally allows to block deallocation with a count-down latch, which can be tied to pending IO operations.
 
diff --git a/wiki/thinkPad.ichthyo.mm b/wiki/thinkPad.ichthyo.mm index 469b2978b..2755dcea1 100644 --- a/wiki/thinkPad.ichthyo.mm +++ b/wiki/thinkPad.ichthyo.mm @@ -78789,9 +78789,9 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - + + + @@ -78854,19 +78854,34 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + - - + + + + - - + + - - + + + + + + + + + + + + + + + @@ -79107,12 +79122,12 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - + + + - + @@ -79132,9 +79147,9 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - + + + @@ -79203,7 +79218,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200

- +
@@ -79221,14 +79236,14 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - + + + - + @@ -79371,8 +79386,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -79389,8 +79404,9 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + + @@ -79423,6 +79439,9 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + +
@@ -79436,12 +79455,14 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + - - + + + + @@ -79474,7 +79495,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -80530,9 +80551,23 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + + + + + +

+ tatsächlich ... wenn die Datenfelder base values sind +

+ +
+ +
+ + +