/* BlockFlow(Test) - verify scheduler memory management scheme Copyright (C) 2023, Hermann Vosseler   **Lumiera** is free software; you can redistribute it and/or modify it   under the terms of the GNU General Public License as published by the   Free Software Foundation; either version 2 of the License, or (at your   option) any later version. See the file COPYING for further details. * *****************************************************************/ /** @file block-flow-test.cpp ** unit test \ref BlockFlow_test */ #include "lib/test/run.hpp" #include "lib/test/test-helper.hpp" #include "vault/gear/block-flow.hpp" #include "lib/test/microbenchmark.hpp" #include "lib/time/timevalue.hpp" #include "lib/meta/function.hpp" #include "lib/format-cout.hpp" #include "lib/util.hpp" #include #include #include using test::Test; using util::isSameObject; using lib::test::randTime; using lib::test::showType; using lib::time::Offset; using std::vector; using std::pair; using std::reference_wrapper; namespace vault{ namespace gear { namespace test { namespace { // shorthand for test parametrisation using BlockFlow = gear::BlockFlow<>; using Allocator = BlockFlow::Allocator; using Strategy = BlockFlow::Strategy; using Extent = BlockFlow::Extent; using Epoch = BlockFlow::Epoch; const size_t EXTENT_SIZ = Extent::SIZ(); Duration INITIAL_EPOCH_STEP = Strategy{}.initialEpochStep(); const size_t AVERAGE_EPOCHS = Strategy{}.averageEpochs(); const double BOOST_OVERFLOW = Strategy{}.boostFactorOverflow(); const double TARGET_FILL = Strategy{}.config().TARGET_FILL; const double ACTIVITIES_P_FR = Strategy{}.config().ACTIVITIES_PER_FRAME; } /*****************************************************************//** * @test document the memory management scheme used by the Scheduler. * @see SchedulerActivity_test * @see SchedulerUsage_test */ class BlockFlow_test : public Test { virtual void run (Arg) { seedRand(); simpleUsage(); handleEpoch(); placeActivity(); adjustEpochs(); announceLoad(); storageFlow(); } /** @test demonstrate a simple usage scenario * - open new Epoch to allocate an Activity * - clean-up at a future time point */ void simpleUsage() { BlockFlow bFlow; Time deadline = randTime(); Activity& tick = bFlow.until(deadline).create(); CHECK (tick.verb_ == Activity::TICK); CHECK (1 == watch(bFlow).cntElm()); CHECK (1 == watch(bFlow).cntEpochs()); CHECK (watch(bFlow).first() > deadline); CHECK (watch(bFlow).first() - deadline == bFlow.getEpochStep()); bFlow.discardBefore (deadline + Time{0,5}); CHECK (0 == watch(bFlow).cntEpochs()); CHECK (0 == watch(bFlow).cntElm()); } /** @test cover properties and handling of Epochs (low-level) * - demonstrate that each Epoch is placed into an Extent * - verify that both Extent and Epoch access the same memory block * - demonstrate the standard setup and initialisation of an Epoch * - allocate some Activities into the storage and observe free-managment * - detect when the Epoch is filled up * - verify alive / dead decision relative to given deadline * @note this test covers helpers and implementation structures of BlockFlow, * without actually using a BlockFlow instance; rather, the typical handling * and low-level bookkeeping aspects are emulated and observed */ void handleEpoch() { Allocator alloc; alloc.openNew(); // the raw storage Extent is a compact block // providing uninitialised storage typed as `vault::gear::Activity` Extent& extent = *alloc.begin(); CHECK (extent.size() == Extent::SIZ::value); CHECK (sizeof(extent) == extent.size() * sizeof(Activity)); CHECK (showType() == "vault::gear::Activity"_expect); // we can just access some slot and place data there extent[55].data_.feed.one = 555555555555555; // now establish an Epoch placed into this storage block: Epoch& epoch = Epoch::setup (alloc.begin(), Time{0,10}); // the underlying storage is not touched yet... CHECK (epoch[55].data_.feed.one == 555555555555555); // but in the first slot, an »EpochGate« has been implanted Epoch::EpochGate& gate = epoch.gate(); CHECK (isSameObject (gate, epoch[0])); CHECK (isSameObject (epoch[0], extent[0])); CHECK (Time{gate.deadline()} == Time(0,10)); CHECK (Time{gate.deadline()} == Time{epoch[0].data_.condition.dead}); CHECK (epoch[0].is (Activity::GATE)); // the gate's `next`-pointer is (ab)used to manage the next allocation slot CHECK (isSameObject (*gate.next, epoch[extent.size()-1])); CHECK (0 == gate.filledSlots()); CHECK (0 == epoch.getFillFactor()); // the storage there is not used yet.... epoch[extent.size()-1].data_.timing.instant = Time{5,5}; // ....but will be overwritten by the following ctor call // allocate a new Activity into the next free slot (using a faked AllocatorHandle) BlockFlow::AllocatorHandle allocHandle{alloc.begin(), nullptr}; Activity& timeStart = allocHandle.create (Activity::WORKSTART); CHECK (isSameObject (timeStart, epoch[extent.size()-1])); // this Activity object is properly initialised (and memory was altered) CHECK (epoch[extent.size()-1].data_.timing.instant != Time(5,5)); CHECK (epoch[extent.size()-1].data_.timing.instant == Time::NEVER); CHECK (timeStart.verb_ == Activity::WORKSTART); CHECK (timeStart.data_.timing.instant == Time::NEVER); CHECK (timeStart.data_.timing.quality == 0); // and the free-pointer was decremented to point to the next free slot CHECK (isSameObject (*gate.next, epoch[extent.size()-2])); // which also implies that there is still ample space left... CHECK (1 == gate.filledSlots()); CHECK (gate.hasFreeSlot()); CHECK (epoch.getFillFactor() == double(gate.filledSlots()) / (EXTENT_SIZ-1)); // so let's eat this space up... for (uint i=extent.size()-2; i>1; --i) gate.claimNextSlot(); // one final slot is left (beyond the EpochGate itself) CHECK (isSameObject (*gate.next, epoch[1])); CHECK (gate.filledSlots() == EXTENT_SIZ-2); CHECK (gate.hasFreeSlot()); gate.claimNextSlot(); // aaand the boat is full... CHECK (not gate.hasFreeSlot()); CHECK (isSameObject (*gate.next, epoch[0])); CHECK (gate.filledSlots() == EXTENT_SIZ-1); CHECK (epoch.getFillFactor() == 1); // a given Epoch can be checked for relevance against a deadline CHECK (gate.deadline() == Time(0,10)); CHECK ( gate.isAlive (Time(0,5))); CHECK ( gate.isAlive (Time(999,9))); CHECK (not gate.isAlive (Time(0,10))); CHECK (not gate.isAlive (Time(1,10))); } /** @test place Activity record into storage * - new Activity without any previously established Epoch * - place Activity into future, expanding the Epoch grid * - locate Activity relative to established Epoch grid * - fill up existing Epoch, causing overflow to next one * - exhaust multiple adjacent Epochs, overflowing to first free one * - exhaust last Epoch, causing setup of new Epoch, with reduced spacing * - use this reduced spacing also for subsequently created Epochs * - clean up obsoleted Epochs, based on given deadline */ void placeActivity() { BlockFlow bFlow; Time t1 = Time{ 0,10}; Time t2 = Time{500,10}; Time t3 = Time{ 0,11}; // no Epoch established yet... auto& a1 = bFlow.until(t1).create(); CHECK (watch(bFlow).allEpochs() == "10s200ms"_expect); CHECK (watch(bFlow).find(a1) == "10s200ms"_expect); // setup Epoch grid into the future auto& a3 = bFlow.until(t3).create(); CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect); CHECK (watch(bFlow).find(a3) == "11s"_expect); // associate to existing Epoch auto& a2 = bFlow.until(t2).create(); CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect); CHECK (watch(bFlow).find(a2) == "10s600ms"_expect); Time t0 = Time{0,5}; // late(past) Activity is placed in the oldest Epoch alive auto& a0 = bFlow.until(t0).create(); CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect); CHECK (watch(bFlow).find(a0) == "10s200ms"_expect); // provoke Epoch overflow by exhausting all available storage slots BlockFlow::AllocatorHandle allocHandle = bFlow.until(Time{300,10}); for (uint i=1; i>; using Subjects = vector>; // pre-generate random test data TestData testData{INSTANCES}; for (size_t i=0; i size_t { // allocate Activity record for deadline and with given random payload ASSERT_VALID_SIGNATURE (decltype(allocate), Activity&(Time, size_t)); // access the given Activity, read the payload, then trigger disposal ASSERT_VALID_SIGNATURE (decltype(invoke), size_t(Activity&)); size_t checksum{0}; for (size_t i=0; i storage{INSTANCES}; auto noAlloc = [&]{ // use pre-allocated storage block auto allocate = [i=0, &storage](Time, size_t check) mutable -> Activity& { return *new(&storage[i++]) Activity{check, size_t{55}}; }; auto invoke = [](Activity& feedActivity) { return feedActivity.data_.feed.one; }; sum1 = runTest (allocate, invoke); }; /* =========== Test-Setup-2: individual heap allocations ========== */ size_t sum2{0}; auto heapAlloc = [&]{ auto allocate = [](Time, size_t check) mutable -> Activity& { return *new Activity{check, size_t{55}}; }; auto invoke = [](Activity& feedActivity) { size_t check = feedActivity.data_.feed.one; delete &feedActivity; return check; }; sum2 = runTest (allocate, invoke); }; /* =========== Test-Setup-3: manage individually by ref-cnt ========== */ size_t sum3{0}; vector> manager{INSTANCES}; auto sharedAlloc = [&]{ auto allocate = [&, i=0](Time, size_t check) mutable -> Activity& { Activity* a = new Activity{check, size_t{55}}; manager[i].reset(a); ++i; return *a; }; auto invoke = [&, i=0](Activity& feedActivity) mutable { size_t check = feedActivity.data_.feed.one; manager[i].reset(); return check; }; sum3 = runTest (allocate, invoke); }; /* =========== Test-Setup-4: use BlockFlow allocation scheme ========== */ size_t sum4{0}; gear::BlockFlow blockFlow; // Note: using the RenderConfig, which uses larger blocks and more pre-allocation auto blockFlowAlloc = [&]{ auto allocHandle = blockFlow.until(Time{BASE_DEADLINE}); auto allocate = [&, j=0](Time t, size_t check) mutable -> Activity& { if (++j >= 10) // typically several Activities are allocated on the same deadline { allocHandle = blockFlow.until(t); j = 0; } return allocHandle.create (check, size_t{55}); }; auto invoke = [&, i=0](Activity& feedActivity) mutable { size_t check = feedActivity.data_.feed.one; if (i % CLEAN_UP == 0) blockFlow.discardBefore (Time{TimeValue{i*STP}}); ++i; return check; }; sum4 = runTest (allocate, invoke); }; // INVOKE Setup-1 auto time_noAlloc = benchmark(noAlloc); // INVOKE Setup-2 auto time_heapAlloc = benchmark(heapAlloc); // INVOKE Setup-3 auto time_sharedAlloc = benchmark(sharedAlloc); cout<<"\n\n■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■"<