2023-07-03 18:40:37 +02:00
/*
BlockFlow ( Test ) - verify scheduler memory management scheme
Copyright ( C ) Lumiera . org
2023 , Hermann Vosseler < Ichthyostega @ web . de >
This program is free software ; you can redistribute it and / or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation ; either version 2 of
the License , or ( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program ; if not , write to the Free Software
Foundation , Inc . , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/** @file block-flow-test.cpp
* * unit test \ ref BlockFlow_test
*/
# include "lib/test/run.hpp"
2023-07-05 15:10:34 +02:00
# include "lib/test/test-helper.hpp"
2023-07-03 18:40:37 +02:00
# include "vault/gear/block-flow.hpp"
//#include "lib/time/timevalue.hpp"
//#include "lib/format-cout.hpp"
2023-07-13 03:41:24 +02:00
# include "lib/test/diagnostic-output.hpp" ////////////////////////////////TODO
2023-07-17 18:36:12 +02:00
# include "lib/meta/function.hpp"
2023-07-14 01:51:00 +02:00
# include "lib/util.hpp"
2023-07-03 18:40:37 +02:00
//#include <utility>
2023-07-17 18:36:12 +02:00
# include <vector>
# include <tuple>
2023-07-03 18:40:37 +02:00
using test : : Test ;
//using std::move;
2023-07-14 01:51:00 +02:00
using util : : isSameObject ;
2023-07-05 15:10:34 +02:00
using lib : : test : : randTime ;
2023-07-14 01:51:00 +02:00
using lib : : test : : showType ;
2023-07-17 18:36:12 +02:00
using lib : : time : : Offset ;
using std : : vector ;
using std : : pair ;
using std : : reference_wrapper ;
2023-07-03 18:40:37 +02:00
namespace vault {
2023-07-05 15:10:34 +02:00
namespace gear {
2023-07-03 18:40:37 +02:00
namespace test {
// using lib::time::FrameRate;
// using lib::time::Time;
/*****************************************************************/ /**
* @ test document the memory management scheme used by the Scheduler .
* @ see SchedulerActivity_test
* @ see SchedulerUsage_test
*/
class BlockFlow_test : public Test
{
virtual void
run ( Arg )
{
simpleUsage ( ) ;
2023-07-13 01:51:21 +02:00
handleEpoch ( ) ;
placeActivity ( ) ;
adjustEpochs ( ) ;
storageFlow ( ) ;
2023-07-03 18:40:37 +02:00
}
2023-07-13 19:19:55 +02:00
/** @test demonstrate a simple usage scenario
* - open new Epoch to allocate an Activity
* - clean - up at a future time point
2023-07-03 18:40:37 +02:00
*/
void
simpleUsage ( )
{
2023-07-05 15:10:34 +02:00
BlockFlow bFlow ;
Time deadline = randTime ( ) ;
2023-07-13 03:41:24 +02:00
2023-07-13 01:51:21 +02:00
Activity & tick = bFlow . until ( deadline ) . create ( ) ;
2023-07-13 03:41:24 +02:00
CHECK ( tick . verb_ = = Activity : : TICK ) ;
CHECK ( 1 = = watch ( bFlow ) . cntEpochs ( ) ) ;
CHECK ( watch ( bFlow ) . first ( ) > deadline ) ;
2023-07-15 18:54:59 +02:00
CHECK ( watch ( bFlow ) . first ( ) - deadline = = bFlow . getEpochStep ( ) ) ;
2023-07-05 15:10:34 +02:00
bFlow . discardBefore ( deadline + Time { 0 , 5 } ) ;
2023-07-13 03:41:24 +02:00
CHECK ( 0 = = watch ( bFlow ) . cntEpochs ( ) ) ;
2023-07-03 18:40:37 +02:00
}
2023-07-14 01:51:00 +02:00
/** @test cover properties and handling of Epochs (low-level)
* - demonstrate that Epoch is placed into an Extent
* - verify that both Extent and Epoch access the same memory block
* - demonstrate the standard setup and initialisation of an Epoch
* - allocate some Activities into the storage and observe free - managment
* - detect when the Epoch is filled up
* - verify alive / dead decision relative to given deadline
* @ note this test covers helpers and implementation structures of BlockFlow ,
* without actually using a BlockFlow instance ; rather , the typical handling
* and low - level bookkeeping aspects are emulated and observed
2023-07-03 18:40:37 +02:00
*/
void
2023-07-13 01:51:21 +02:00
handleEpoch ( )
{
2023-07-14 01:51:00 +02:00
using Extent = Allocator : : Extent ;
// the raw storage Extent is a compact block
// providing uninitialised storage typed as `vault::gear::Activity`
Allocator alloc ;
alloc . openNew ( ) ;
Extent & extent = * alloc . begin ( ) ;
CHECK ( extent . size ( ) = = Extent : : SIZ : : value ) ;
CHECK ( sizeof ( extent ) = = extent . size ( ) * sizeof ( Activity ) ) ;
CHECK ( showType < Extent : : value_type > ( ) = = " vault::gear::Activity " _expect ) ;
// we can just access some slot and place data there
extent [ 55 ] . data_ . feed . one = 555555555555555 ;
// now establish an Epoch in this storage block:
Epoch & epoch = Epoch : : setup ( alloc . begin ( ) , Time { 0 , 10 } ) ;
// the underlying storage is not touched yet...
CHECK ( epoch [ 55 ] . data_ . feed . one = = 555555555555555 ) ;
// but in the first slot, an »EpochGate« has been implanted
Epoch : : EpochGate & gate = epoch . gate ( ) ;
CHECK ( isSameObject ( gate , epoch [ 0 ] ) ) ;
CHECK ( isSameObject ( epoch [ 0 ] , extent [ 0 ] ) ) ;
CHECK ( Time { gate . deadline ( ) } = = Time ( 0 , 10 ) ) ;
CHECK ( Time { gate . deadline ( ) } = = Time { epoch [ 0 ] . data_ . condition . dead } ) ;
CHECK ( Activity : : GATE = = epoch [ 0 ] . verb_ ) ;
// the gate's `next`-pointer is (ab)used to manage the next allocation slot
CHECK ( isSameObject ( * gate . next , epoch [ extent . size ( ) - 1 ] ) ) ;
2023-07-17 04:32:10 +02:00
CHECK ( 0 = = gate . filledSlots ( ) ) ;
CHECK ( 0 = = epoch . getFillFactor ( ) ) ;
2023-07-14 01:51:00 +02:00
// the storage there is not yet used, but will be overwritten by the ctor call
epoch [ extent . size ( ) - 1 ] . data_ . timing . instant = Time { 5 , 5 } ;
2023-07-16 18:03:27 +02:00
// allocate a new Activity into the next free slot (using a faked AllocatorHandle)
BlockFlow : : AllocatorHandle allocHandle { alloc . begin ( ) , nullptr } ;
2023-07-14 01:51:00 +02:00
Activity & timeStart = allocHandle . create ( Activity : : TIMESTART ) ;
CHECK ( isSameObject ( timeStart , epoch [ extent . size ( ) - 1 ] ) ) ;
// this Activity object is properly initialised (and memory was altered)
CHECK ( epoch [ extent . size ( ) - 1 ] . data_ . timing . instant ! = Time ( 5 , 5 ) ) ;
CHECK ( epoch [ extent . size ( ) - 1 ] . data_ . timing . instant = = Time : : NEVER ) ;
CHECK ( timeStart . verb_ = = Activity : : TIMESTART ) ;
CHECK ( timeStart . data_ . timing . instant = = Time : : NEVER ) ;
CHECK ( timeStart . data_ . timing . quality = = 0 ) ;
// and the free-pointer was decremented to point to the next free slot
CHECK ( isSameObject ( * gate . next , epoch [ extent . size ( ) - 2 ] ) ) ;
// which also implies that there is still ample space left...
2023-07-17 04:32:10 +02:00
CHECK ( 1 = = gate . filledSlots ( ) ) ;
2023-07-14 01:51:00 +02:00
CHECK ( gate . hasFreeSlot ( ) ) ;
2023-07-17 04:32:10 +02:00
CHECK ( epoch . getFillFactor ( ) = = Rat ( gate . filledSlots ( ) , Epoch : : SIZ ( ) - 1 ) ) ;
2023-07-14 01:51:00 +02:00
// so let's eat this space up...
for ( uint i = extent . size ( ) - 2 ; i > 1 ; - - i )
2023-07-16 18:03:27 +02:00
gate . claimNextSlot ( ) ;
2023-07-14 01:51:00 +02:00
// one final slot is left (beyond of the EpochGate itself)
CHECK ( isSameObject ( * gate . next , epoch [ 1 ] ) ) ;
2023-07-17 04:32:10 +02:00
CHECK ( gate . filledSlots ( ) = = Epoch : : SIZ ( ) - 2 ) ;
2023-07-14 01:51:00 +02:00
CHECK ( gate . hasFreeSlot ( ) ) ;
2023-07-16 18:03:27 +02:00
gate . claimNextSlot ( ) ;
2023-07-14 01:51:00 +02:00
// aaand the boat is full...
CHECK ( not gate . hasFreeSlot ( ) ) ;
CHECK ( isSameObject ( * gate . next , epoch [ 0 ] ) ) ;
2023-07-17 04:32:10 +02:00
CHECK ( gate . filledSlots ( ) = = Epoch : : SIZ ( ) - 1 ) ;
CHECK ( epoch . getFillFactor ( ) = = 1 ) ;
2023-07-14 01:51:00 +02:00
// a given Epoch can be checked for relevance against a deadline
CHECK ( gate . deadline ( ) = = Time ( 0 , 10 ) ) ;
CHECK ( gate . isAlive ( Time ( 0 , 5 ) ) ) ;
CHECK ( gate . isAlive ( Time ( 999 , 9 ) ) ) ;
CHECK ( not gate . isAlive ( Time ( 0 , 10 ) ) ) ;
CHECK ( not gate . isAlive ( Time ( 1 , 10 ) ) ) ;
////////////////////////////////////////////////////////////////////////////////////////TICKET #1298 : actually use a GATE implementation and then also check the count-down latch
2023-07-13 01:51:21 +02:00
}
2023-07-16 20:47:39 +02:00
/** @test place Activity record into storage
* - new Activity without any previously established Epoch
* - place Activity into future , expanding the Epoch grid
* - locate Activity relative to established Epoch grid
* - fill up existing Epoch , causing overflow to next one
* - exhaust multiple adjacent Epochs , overflowing to first free one
* - exhaust last Epoch , causing setup of new Epoch , with reduced spacing
* - use this reduced spacing also for subsequently created Epochs
* - clean up obsoleted Epochs , based on given deadline
* @ todo WIP 7 / 23 ⟶ ✔ define ⟶ ✔ implement
2023-07-13 01:51:21 +02:00
*/
void
placeActivity ( )
{
2023-07-14 02:58:00 +02:00
BlockFlow bFlow ;
Time t1 = Time { 0 , 10 } ;
Time t2 = Time { 500 , 10 } ;
Time t3 = Time { 0 , 11 } ;
2023-07-16 20:47:39 +02:00
// no Epoch established yet...
2023-07-14 02:58:00 +02:00
auto & a1 = bFlow . until ( t1 ) . create ( ) ;
2023-07-15 18:54:59 +02:00
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 10s200ms " _expect ) ;
CHECK ( watch ( bFlow ) . find ( a1 ) = = " 10s200ms " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// setup Epoch grid into the future
2023-07-14 02:58:00 +02:00
auto & a3 = bFlow . until ( t3 ) . create ( ) ;
2023-07-15 21:37:58 +02:00
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 10s200ms|10s400ms|10s600ms|10s800ms|11s " _expect ) ;
CHECK ( watch ( bFlow ) . find ( a3 ) = = " 11s " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// associate to existing Epoch
2023-07-14 02:58:00 +02:00
auto & a2 = bFlow . until ( t2 ) . create ( ) ;
2023-07-15 21:37:58 +02:00
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 10s200ms|10s400ms|10s600ms|10s800ms|11s " _expect ) ;
CHECK ( watch ( bFlow ) . find ( a2 ) = = " 10s600ms " _expect ) ;
2023-07-14 02:58:00 +02:00
Time t0 = Time { 0 , 5 } ;
2023-07-16 20:47:39 +02:00
// late(past) Activity is placed in the oldest Epoch alive
2023-07-14 02:58:00 +02:00
auto & a0 = bFlow . until ( t0 ) . create ( ) ;
2023-07-15 21:37:58 +02:00
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 10s200ms|10s400ms|10s600ms|10s800ms|11s " _expect ) ;
2023-07-15 18:54:59 +02:00
CHECK ( watch ( bFlow ) . find ( a0 ) = = " 10s200ms " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// provoke Epoch overflow by exhausting all available storage slots
2023-07-14 02:58:00 +02:00
BlockFlow : : AllocatorHandle allocHandle = bFlow . until ( Time { 300 , 10 } ) ;
for ( uint i = 1 ; i < Epoch : : SIZ ( ) ; + + i )
allocHandle . create ( ) ;
CHECK ( allocHandle . currDeadline ( ) = = Time ( 400 , 10 ) ) ;
CHECK ( not allocHandle . hasFreeSlot ( ) ) ;
2023-07-16 20:47:39 +02:00
// ...causing next allocation to be shifted into subsequent Epoch
2023-07-16 03:06:02 +02:00
auto & a4 = allocHandle . create ( ) ;
2023-07-14 02:58:00 +02:00
CHECK ( allocHandle . currDeadline ( ) = = Time ( 600 , 10 ) ) ;
CHECK ( allocHandle . hasFreeSlot ( ) ) ;
2023-07-15 18:54:59 +02:00
CHECK ( watch ( bFlow ) . find ( a4 ) = = " 10s600ms " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// fill up and exhaust this Epoch too....
2023-07-14 02:58:00 +02:00
for ( uint i = 1 ; i < Epoch : : SIZ ( ) ; + + i )
allocHandle . create ( ) ;
2023-07-16 20:47:39 +02:00
// so the handle has moved to the after next Epoch
2023-07-14 02:58:00 +02:00
CHECK ( allocHandle . currDeadline ( ) = = Time ( 800 , 10 ) ) ;
2023-07-16 03:06:02 +02:00
CHECK ( allocHandle . hasFreeSlot ( ) ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// even allocation with way earlier deadline is shifted here now
2023-07-14 02:58:00 +02:00
auto & a5 = bFlow . until ( Time { 220 , 10 } ) . create ( ) ;
2023-07-16 03:06:02 +02:00
CHECK ( watch ( bFlow ) . find ( a5 ) = = " 10s800ms " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// now repeat the same pattern, but now towards uncharted Epochs
2023-07-14 02:58:00 +02:00
allocHandle = bFlow . until ( Time { 900 , 10 } ) ;
2023-07-16 03:06:02 +02:00
for ( uint i = 2 ; i < Epoch : : SIZ ( ) ; + + i )
2023-07-14 02:58:00 +02:00
allocHandle . create ( ) ;
2023-07-16 03:06:02 +02:00
CHECK ( allocHandle . currDeadline ( ) = = Time ( 0 , 11 ) ) ;
2023-07-14 02:58:00 +02:00
CHECK ( not allocHandle . hasFreeSlot ( ) ) ;
auto & a6 = bFlow . until ( Time { 850 , 10 } ) . create ( ) ;
2023-07-16 20:47:39 +02:00
// Note: encountered four overflow-Events, leading to decreased Epoch spacing for new Epochs
CHECK ( watch ( bFlow ) . find ( a6 ) = = " 11s131ms " _expect ) ;
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 10s200ms|10s400ms|10s600ms|10s800ms|11s|11s131ms " _expect ) ;
2023-07-14 02:58:00 +02:00
auto & a7 = bFlow . until ( Time { 500 , 11 } ) . create ( ) ;
2023-07-16 20:47:39 +02:00
// this allocation does not count as overflow, but has to expand the Epoch grid, now using the reduced Epoch spacing
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 10s200ms|10s400ms|10s600ms|10s800ms|11s|11s131ms|11s262ms|11s393ms|11s524ms " _expect ) ;
CHECK ( watch ( bFlow ) . find ( a7 ) = = " 11s524ms " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-17 04:32:10 +02:00
CHECK ( bFlow . getEpochStep ( ) = = " ≺131ms≻ " _expect ) ;
2023-07-14 02:58:00 +02:00
bFlow . discardBefore ( Time { 999 , 10 } ) ;
2023-07-17 04:32:10 +02:00
CHECK ( bFlow . getEpochStep ( ) = = " ≺149ms≻ " _expect ) ;
2023-07-16 20:47:39 +02:00
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 11s|11s131ms|11s262ms|11s393ms|11s524ms " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// placed into the oldest Epoch still alive
2023-07-14 02:58:00 +02:00
auto & a8 = bFlow . until ( Time { 500 , 10 } ) . create ( ) ;
2023-07-16 20:47:39 +02:00
CHECK ( watch ( bFlow ) . find ( a8 ) = = " 11s131ms " _expect ) ;
2023-07-13 01:51:21 +02:00
}
2023-07-17 03:00:56 +02:00
/** @test load based regulation of Epoch spacing
* - on overflow , capacity is boosted by a fixed factor
* - on clean - up , a moving average of ( in hindsight ) optimal length is
* computed and used as the new Epoch spacing
2023-07-16 20:47:39 +02:00
* @ todo WIP 7 / 23 ⟶ ✔ define ⟶ 🔁 implement
2023-07-13 01:51:21 +02:00
*/
void
adjustEpochs ( )
{
2023-07-15 18:54:59 +02:00
BlockFlow bFlow ;
CHECK ( bFlow . getEpochStep ( ) = = INITIAL_EPOCH_STEP ) ;
2023-07-17 03:00:56 +02:00
// whenever an Epoch overflow happens, capacity is boosted by reducing the Epoch duration
2023-07-15 18:54:59 +02:00
bFlow . markEpochOverflow ( ) ;
CHECK ( bFlow . getEpochStep ( ) = = INITIAL_EPOCH_STEP * OVERFLOW_BOOST_FACTOR ) ;
bFlow . markEpochOverflow ( ) ;
CHECK ( bFlow . getEpochStep ( ) = = INITIAL_EPOCH_STEP * OVERFLOW_BOOST_FACTOR * OVERFLOW_BOOST_FACTOR ) ;
2023-07-17 03:00:56 +02:00
// To counteract this increase, on clean-up the actual fill rate of the Extent
// serves to guess an optimal Epoch duration, which is averaged exponentially
// Using just arbitrary demo values for some fictional Epochs
TimeVar dur1 = INITIAL_EPOCH_STEP ;
Rat fill1 = 8 _r / 10 ;
TimeVar dur2 = INITIAL_EPOCH_STEP * OVERFLOW_BOOST_FACTOR ;
Rat fill2 = 3 _r / 10 ;
2023-07-15 18:54:59 +02:00
Rat N = AVERAGE_EPOCHS ;
2023-07-17 03:00:56 +02:00
TimeVar step = bFlow . getEpochStep ( ) ;
2023-07-15 18:54:59 +02:00
2023-07-17 03:00:56 +02:00
bFlow . markEpochUnderflow ( dur1 , fill1 ) ;
CHECK ( bFlow . getEpochStep ( ) = = Duration { FSecs { step } * ( N - 1 ) / N + FSecs { dur1 } / fill1 / N } ) ;
2023-07-15 18:54:59 +02:00
step = bFlow . getEpochStep ( ) ;
2023-07-17 03:00:56 +02:00
bFlow . markEpochUnderflow ( dur2 , fill2 ) ;
CHECK ( bFlow . getEpochStep ( ) = = Duration { FSecs { step } * ( N - 1 ) / N + FSecs { dur2 } / fill2 / N } ) ;
} // Note: for verification the exponential average is computed via FSecs
// which is a different calculation path but yields the same result
2023-07-13 01:51:21 +02:00
2023-07-17 18:36:12 +02:00
/** @test investigate progression of epochs under realistic load
* - expose the allocator to a load of 200f ps for simulated 60 sec
* - assuming 10 Activities per frame , this means a throughput of 120000 Activities
* - run this load exposure under saturation for performance measurement
* - use a planning to deadline delay of 500 ms , but with ± 200 ms random spread
* - after 250 ms ( 500 steps ) , » invoke « by accessing and adding the random checksum
* - run a comparison of all - pre - allocated ⟷ heap allocated ⟷ BlockFlow
* @ todo WIP 7 / 23 ⟶ 🔁 define ⟶ 🔁 implement
2023-07-13 01:51:21 +02:00
*/
void
storageFlow ( )
2023-07-03 18:40:37 +02:00
{
2023-07-17 18:36:12 +02:00
const uint ACTIVITIES = 120000 ; // Activities to send through the test subject
const uint MAX_TIME = 121000 ; // Test steps to perform, with 2 steps / ms
Offset BASE_DEADLINE { FSecs { 1 , 2 } } ; // base pre-roll before deadline
Offset SPREAD_DEAD { FSecs { 2 , 100 } } ; // random spread of deadline around base
const uint INVOKE_LAG = 500 ; // „invoke“ the Activity after 500 steps (≙ simulated 250ms)
const uint CLEAN_UP = 200 ; // perform clean-up every 200 steps
using TestData = vector < pair < TimeVar , size_t > > ;
using Subjects = vector < reference_wrapper < Activity > > ;
using Storage = vector < Activity > ;
TestData testData { ACTIVITIES } ;
for ( auto & [ t , r ] : testData )
{
const size_t SPREAD = 2 * _raw ( SPREAD_DEAD ) ;
const size_t MIN_DEAD = _raw ( BASE_DEADLINE ) - _raw ( SPREAD_DEAD ) ;
r = rand ( ) % SPREAD ;
t = TimeValue ( MIN_DEAD + r ) ;
}
Activity dummy ;
Subjects subject { ACTIVITIES , std : : ref ( dummy ) } ;
auto runTest = [ & ] ( auto allocate , auto invoke ) - > size_t
{
// allocate Activity record for deadline and with given random payload
ASSERT_VALID_SIGNATURE ( decltype ( allocate ) , Activity & ( Time , size_t ) ) ;
// access the given Activity, read the payload, then trigger disposal
ASSERT_VALID_SIGNATURE ( decltype ( invoke ) , size_t ( Activity & ) ) ;
size_t checksum { 0 } ;
for ( size_t i = 0 ; i < MAX_TIME ; + + i )
{
if ( i < ACTIVITIES )
{
auto const & data = testData [ i ] ;
subject [ i ] = allocate ( data . first , data . second ) ;
}
if ( i > = INVOKE_LAG )
checksum + = invoke ( subject [ i - INVOKE_LAG ] ) ;
}
} ;
2023-07-03 18:40:37 +02:00
}
} ;
/** Register this test class... */
LAUNCHER ( BlockFlow_test , " unit engine " ) ;
2023-07-05 15:10:34 +02:00
} } } // namespace vault::gear::test