From 3716a5b3d4aec2caca57a7a0c1312caa09e05e4f Mon Sep 17 00:00:00 2001 From: Ichthyostega Date: Tue, 26 Dec 2023 20:15:04 +0100 Subject: [PATCH] Scheduler-test: address defects in memory manager MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ...discovered by during investigation of latest Scheduler failures. The root of the problems is that block overflow can potentially trigger expansion of the allocation pool. Under some circumstances, this on-the fly allocation requires a rotation of index slots, thereby invalidating existing iterators. While such behaviour is not uncommon with storage data structures (see std::vector), in this case it turns out problematic because due to performance considerations, a usage pattern emerged which exploits re-using existing storage »Slots« with known deadline. This optimisation seems to have significant leverage on the planning jobs, which happen to allocated and arrange a whole strike of Activities with similar deadlines. One of these problem situations can easily be fixed, since it is triggered through the iterator itself, using a delegate function to request a storage expansion, at which point the iterator is able to re-link and fix its internal index. This solution also has no tangible performance implications in optimised code. Unfortunately there remains one obscure corner case where such an pool expansion could also have invalidated other iterators, which are then used later to attach dependency relations; even a partial fix for that problem seems to cause considerable performance cost of about -14% in optimised code. --- src/vault/gear/block-flow.hpp | 54 +++++++++++++++++-- src/vault/mem/extent-family.hpp | 12 ++++- tests/vault/gear/block-flow-test.cpp | 2 +- wiki/thinkPad.ichthyo.mm | 78 ++++++++++++++++++++++------ 4 files changed, 124 insertions(+), 22 deletions(-) diff --git a/src/vault/gear/block-flow.hpp b/src/vault/gear/block-flow.hpp index 5aee13b9b..43957415e 100644 --- a/src/vault/gear/block-flow.hpp +++ b/src/vault/gear/block-flow.hpp @@ -372,15 +372,58 @@ namespace gear { return static_cast (extent); } - struct StorageAdaptor : RawIter + /** + * Adapt the access to the raw storage to present the Extents as Epoch; + * also caches the address resolution for performance reasons (+20%). + */ + class StorageAdaptor + : public RawIter { + Epoch* curr_{nullptr}; + + Epoch* + accessEpoch() + { + return RawIter::checkPoint()? & asEpoch (RawIter::yield()) + : nullptr; + } + + public: StorageAdaptor() = default; - StorageAdaptor(RawIter it) : RawIter{it} { } - Epoch& yield() const { return asEpoch (RawIter::yield()); } + StorageAdaptor(RawIter it) + : RawIter{it} + , curr_{accessEpoch()} + { } + + bool + checkPoint() const + { + return bool(curr_); + } + + Epoch& + yield() const + { + return *curr_; + } + + void + iterNext() + { + RawIter::iterNext(); + curr_ = accessEpoch(); + } + + void + expandAlloc (size_t cnt =1) + { + RawIter::expandAlloc(cnt); + curr_ = accessEpoch(); + } }; - + public: BlockFlow() : alloc_{Strategy::initialEpochCnt()} @@ -499,7 +542,8 @@ namespace gear { ___sanityCheckAlloc(requiredNew); if (distance % _raw(epochStep_) > 0) ++requiredNew; // fractional: requested deadline lies within last epoch - alloc_.openNew(requiredNew); // Note: nextEpoch now points to the first new Epoch + nextEpoch.expandAlloc (requiredNew); + // nextEpoch now points to the first new Epoch for ( ; 0 < requiredNew; --requiredNew) { REQUIRE (nextEpoch); diff --git a/src/vault/mem/extent-family.hpp b/src/vault/mem/extent-family.hpp index 57e21e5a4..6717be493 100644 --- a/src/vault/mem/extent-family.hpp +++ b/src/vault/mem/extent-family.hpp @@ -163,7 +163,17 @@ namespace mem { /* === pass-through extended functionality === */ size_t getIndex() { return index; } - void expandAlloc(){ exFam->openNew();} + + void + expandAlloc (size_t cnt =1) + { + size_t prevStart = exFam->start_; + exFam->openNew(cnt); + if (index >= prevStart) + index += (exFam->start_-prevStart); + // was in a segment that might be moved up + ENSURE (exFam->isValidPos (index)); + } }; diff --git a/tests/vault/gear/block-flow-test.cpp b/tests/vault/gear/block-flow-test.cpp index 5969a27ac..b52be0f74 100644 --- a/tests/vault/gear/block-flow-test.cpp +++ b/tests/vault/gear/block-flow-test.cpp @@ -524,7 +524,7 @@ namespace test { gear::BlockFlow blockFlow; // Note: using the RenderConfig, which uses larger blocks and more pre-allocation auto blockFlowAlloc = [&]{ - auto allocHandle = blockFlow.until(Time{400,0}); + auto allocHandle = blockFlow.until(Time{BASE_DEADLINE}); auto allocate = [&, j=0](Time t, size_t check) mutable -> Activity& { if (++j >= 10) // typically several Activities are allocated on the same deadline diff --git a/wiki/thinkPad.ichthyo.mm b/wiki/thinkPad.ichthyo.mm index cb5b40cbc..91b5d50f4 100644 --- a/wiki/thinkPad.ichthyo.mm +++ b/wiki/thinkPad.ichthyo.mm @@ -86024,7 +86024,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -86574,10 +86574,19 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + + + + + + + + + + @@ -86610,13 +86619,39 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -106549,10 +106584,10 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - - + + + + @@ -107858,7 +107893,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -108464,9 +108499,6 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - @@ -108486,7 +108518,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -109133,8 +109165,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -109154,6 +109186,22 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + +
+ + + + + + + + + + + +