2023-07-03 18:40:37 +02:00
/*
BlockFlow ( Test ) - verify scheduler memory management scheme
Copyright ( C ) Lumiera . org
2023 , Hermann Vosseler < Ichthyostega @ web . de >
This program is free software ; you can redistribute it and / or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation ; either version 2 of
the License , or ( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program ; if not , write to the Free Software
Foundation , Inc . , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/** @file block-flow-test.cpp
* * unit test \ ref BlockFlow_test
*/
# include "lib/test/run.hpp"
2023-07-05 15:10:34 +02:00
# include "lib/test/test-helper.hpp"
2023-07-03 18:40:37 +02:00
# include "vault/gear/block-flow.hpp"
2023-07-22 01:54:25 +02:00
# include "lib/test/microbenchmark.hpp"
2023-07-21 04:20:55 +02:00
# include "lib/time/timevalue.hpp"
2023-07-17 18:36:12 +02:00
# include "lib/meta/function.hpp"
2023-07-22 01:54:25 +02:00
# include "lib/format-cout.hpp"
2023-07-14 01:51:00 +02:00
# include "lib/util.hpp"
2023-07-03 18:40:37 +02:00
2023-07-18 01:59:17 +02:00
# include <chrono>
2023-07-17 18:36:12 +02:00
# include <vector>
# include <tuple>
2023-07-03 18:40:37 +02:00
using test : : Test ;
2023-07-14 01:51:00 +02:00
using util : : isSameObject ;
2023-07-05 15:10:34 +02:00
using lib : : test : : randTime ;
2023-07-14 01:51:00 +02:00
using lib : : test : : showType ;
2023-07-17 18:36:12 +02:00
using lib : : time : : Offset ;
using std : : vector ;
using std : : pair ;
using std : : reference_wrapper ;
2023-07-03 18:40:37 +02:00
namespace vault {
2023-07-05 15:10:34 +02:00
namespace gear {
2023-07-03 18:40:37 +02:00
namespace test {
2023-07-20 21:47:18 +02:00
namespace { // shorthand for test parametrisation
2023-07-20 19:28:20 +02:00
using BlockFlow = gear : : BlockFlow < > ;
2023-07-20 21:47:18 +02:00
using Allocator = BlockFlow : : Allocator ;
using Strategy = BlockFlow : : Strategy ;
using Extent = BlockFlow : : Extent ;
using Epoch = BlockFlow : : Epoch ;
2023-07-20 19:28:20 +02:00
2023-07-20 21:47:18 +02:00
const size_t EXTENT_SIZ = Extent : : SIZ ( ) ;
Duration INITIAL_EPOCH_STEP = Strategy { } . initialEpochStep ( ) ;
const size_t AVERAGE_EPOCHS = Strategy { } . averageEpochs ( ) ;
const double BOOST_OVERFLOW = Strategy { } . boostFactorOverflow ( ) ;
const double TARGET_FILL = Strategy { } . config ( ) . TARGET_FILL ;
2023-07-22 01:54:25 +02:00
const double ACTIVITIES_P_FR = Strategy { } . config ( ) . ACTIVITIES_PER_FRAME ;
2023-07-20 19:28:20 +02:00
}
2023-07-03 18:40:37 +02:00
/*****************************************************************/ /**
* @ test document the memory management scheme used by the Scheduler .
* @ see SchedulerActivity_test
* @ see SchedulerUsage_test
*/
class BlockFlow_test : public Test
{
virtual void
run ( Arg )
{
simpleUsage ( ) ;
2023-07-13 01:51:21 +02:00
handleEpoch ( ) ;
2023-07-21 04:20:55 +02:00
placeActivity ( ) ;
adjustEpochs ( ) ;
2023-07-13 01:51:21 +02:00
storageFlow ( ) ;
2023-07-03 18:40:37 +02:00
}
2023-07-13 19:19:55 +02:00
/** @test demonstrate a simple usage scenario
* - open new Epoch to allocate an Activity
* - clean - up at a future time point
2023-07-03 18:40:37 +02:00
*/
void
simpleUsage ( )
{
2023-07-05 15:10:34 +02:00
BlockFlow bFlow ;
Time deadline = randTime ( ) ;
2023-07-13 03:41:24 +02:00
2023-07-13 01:51:21 +02:00
Activity & tick = bFlow . until ( deadline ) . create ( ) ;
2023-07-13 03:41:24 +02:00
CHECK ( tick . verb_ = = Activity : : TICK ) ;
2023-08-29 18:46:37 +02:00
CHECK ( 1 = = watch ( bFlow ) . cntElm ( ) ) ;
2023-07-13 03:41:24 +02:00
CHECK ( 1 = = watch ( bFlow ) . cntEpochs ( ) ) ;
CHECK ( watch ( bFlow ) . first ( ) > deadline ) ;
2023-07-15 18:54:59 +02:00
CHECK ( watch ( bFlow ) . first ( ) - deadline = = bFlow . getEpochStep ( ) ) ;
2023-07-05 15:10:34 +02:00
bFlow . discardBefore ( deadline + Time { 0 , 5 } ) ;
2023-07-13 03:41:24 +02:00
CHECK ( 0 = = watch ( bFlow ) . cntEpochs ( ) ) ;
2023-08-29 18:46:37 +02:00
CHECK ( 0 = = watch ( bFlow ) . cntElm ( ) ) ;
2023-07-03 18:40:37 +02:00
}
2023-07-14 01:51:00 +02:00
/** @test cover properties and handling of Epochs (low-level)
* - demonstrate that Epoch is placed into an Extent
* - verify that both Extent and Epoch access the same memory block
* - demonstrate the standard setup and initialisation of an Epoch
* - allocate some Activities into the storage and observe free - managment
* - detect when the Epoch is filled up
* - verify alive / dead decision relative to given deadline
* @ note this test covers helpers and implementation structures of BlockFlow ,
* without actually using a BlockFlow instance ; rather , the typical handling
* and low - level bookkeeping aspects are emulated and observed
2023-07-03 18:40:37 +02:00
*/
void
2023-07-13 01:51:21 +02:00
handleEpoch ( )
{
2023-07-14 01:51:00 +02:00
Allocator alloc ;
alloc . openNew ( ) ;
2023-07-21 04:20:55 +02:00
// the raw storage Extent is a compact block
// providing uninitialised storage typed as `vault::gear::Activity`
2023-07-14 01:51:00 +02:00
Extent & extent = * alloc . begin ( ) ;
CHECK ( extent . size ( ) = = Extent : : SIZ : : value ) ;
CHECK ( sizeof ( extent ) = = extent . size ( ) * sizeof ( Activity ) ) ;
CHECK ( showType < Extent : : value_type > ( ) = = " vault::gear::Activity " _expect ) ;
// we can just access some slot and place data there
extent [ 55 ] . data_ . feed . one = 555555555555555 ;
2023-07-21 04:20:55 +02:00
// now establish an Epoch placed into this storage block:
2023-07-14 01:51:00 +02:00
Epoch & epoch = Epoch : : setup ( alloc . begin ( ) , Time { 0 , 10 } ) ;
// the underlying storage is not touched yet...
CHECK ( epoch [ 55 ] . data_ . feed . one = = 555555555555555 ) ;
// but in the first slot, an »EpochGate« has been implanted
Epoch : : EpochGate & gate = epoch . gate ( ) ;
CHECK ( isSameObject ( gate , epoch [ 0 ] ) ) ;
CHECK ( isSameObject ( epoch [ 0 ] , extent [ 0 ] ) ) ;
CHECK ( Time { gate . deadline ( ) } = = Time ( 0 , 10 ) ) ;
CHECK ( Time { gate . deadline ( ) } = = Time { epoch [ 0 ] . data_ . condition . dead } ) ;
2023-09-01 17:39:55 +02:00
CHECK ( epoch [ 0 ] . is ( Activity : : GATE ) ) ;
2023-07-14 01:51:00 +02:00
// the gate's `next`-pointer is (ab)used to manage the next allocation slot
CHECK ( isSameObject ( * gate . next , epoch [ extent . size ( ) - 1 ] ) ) ;
2023-07-17 04:32:10 +02:00
CHECK ( 0 = = gate . filledSlots ( ) ) ;
CHECK ( 0 = = epoch . getFillFactor ( ) ) ;
2023-07-14 01:51:00 +02:00
2023-07-21 04:20:55 +02:00
// the storage there is not used yet....
2023-07-14 01:51:00 +02:00
epoch [ extent . size ( ) - 1 ] . data_ . timing . instant = Time { 5 , 5 } ;
2023-07-21 04:20:55 +02:00
// ....but will be overwritten by the following ctor call
2023-07-14 01:51:00 +02:00
2023-07-16 18:03:27 +02:00
// allocate a new Activity into the next free slot (using a faked AllocatorHandle)
BlockFlow : : AllocatorHandle allocHandle { alloc . begin ( ) , nullptr } ;
2023-07-30 21:45:31 +02:00
Activity & timeStart = allocHandle . create ( Activity : : WORKSTART ) ;
2023-07-14 01:51:00 +02:00
CHECK ( isSameObject ( timeStart , epoch [ extent . size ( ) - 1 ] ) ) ;
// this Activity object is properly initialised (and memory was altered)
CHECK ( epoch [ extent . size ( ) - 1 ] . data_ . timing . instant ! = Time ( 5 , 5 ) ) ;
CHECK ( epoch [ extent . size ( ) - 1 ] . data_ . timing . instant = = Time : : NEVER ) ;
2023-07-30 21:45:31 +02:00
CHECK ( timeStart . verb_ = = Activity : : WORKSTART ) ;
2023-07-14 01:51:00 +02:00
CHECK ( timeStart . data_ . timing . instant = = Time : : NEVER ) ;
CHECK ( timeStart . data_ . timing . quality = = 0 ) ;
// and the free-pointer was decremented to point to the next free slot
CHECK ( isSameObject ( * gate . next , epoch [ extent . size ( ) - 2 ] ) ) ;
// which also implies that there is still ample space left...
2023-07-17 04:32:10 +02:00
CHECK ( 1 = = gate . filledSlots ( ) ) ;
2023-07-14 01:51:00 +02:00
CHECK ( gate . hasFreeSlot ( ) ) ;
2023-07-20 19:28:20 +02:00
CHECK ( epoch . getFillFactor ( ) = = double ( gate . filledSlots ( ) ) / ( EXTENT_SIZ - 1 ) ) ;
2023-07-17 04:32:10 +02:00
2023-07-14 01:51:00 +02:00
// so let's eat this space up...
for ( uint i = extent . size ( ) - 2 ; i > 1 ; - - i )
2023-07-16 18:03:27 +02:00
gate . claimNextSlot ( ) ;
2023-07-14 01:51:00 +02:00
// one final slot is left (beyond of the EpochGate itself)
CHECK ( isSameObject ( * gate . next , epoch [ 1 ] ) ) ;
2023-07-20 19:28:20 +02:00
CHECK ( gate . filledSlots ( ) = = EXTENT_SIZ - 2 ) ;
2023-07-14 01:51:00 +02:00
CHECK ( gate . hasFreeSlot ( ) ) ;
2023-07-16 18:03:27 +02:00
gate . claimNextSlot ( ) ;
2023-07-14 01:51:00 +02:00
// aaand the boat is full...
CHECK ( not gate . hasFreeSlot ( ) ) ;
CHECK ( isSameObject ( * gate . next , epoch [ 0 ] ) ) ;
2023-07-20 19:28:20 +02:00
CHECK ( gate . filledSlots ( ) = = EXTENT_SIZ - 1 ) ;
2023-07-17 04:32:10 +02:00
CHECK ( epoch . getFillFactor ( ) = = 1 ) ;
2023-07-14 01:51:00 +02:00
// a given Epoch can be checked for relevance against a deadline
CHECK ( gate . deadline ( ) = = Time ( 0 , 10 ) ) ;
CHECK ( gate . isAlive ( Time ( 0 , 5 ) ) ) ;
CHECK ( gate . isAlive ( Time ( 999 , 9 ) ) ) ;
CHECK ( not gate . isAlive ( Time ( 0 , 10 ) ) ) ;
CHECK ( not gate . isAlive ( Time ( 1 , 10 ) ) ) ;
////////////////////////////////////////////////////////////////////////////////////////TICKET #1298 : actually use a GATE implementation and then also check the count-down latch
2023-07-13 01:51:21 +02:00
}
2023-07-16 20:47:39 +02:00
/** @test place Activity record into storage
* - new Activity without any previously established Epoch
* - place Activity into future , expanding the Epoch grid
* - locate Activity relative to established Epoch grid
* - fill up existing Epoch , causing overflow to next one
* - exhaust multiple adjacent Epochs , overflowing to first free one
* - exhaust last Epoch , causing setup of new Epoch , with reduced spacing
* - use this reduced spacing also for subsequently created Epochs
* - clean up obsoleted Epochs , based on given deadline
2023-07-13 01:51:21 +02:00
*/
void
placeActivity ( )
{
2023-07-14 02:58:00 +02:00
BlockFlow bFlow ;
Time t1 = Time { 0 , 10 } ;
Time t2 = Time { 500 , 10 } ;
Time t3 = Time { 0 , 11 } ;
2023-07-16 20:47:39 +02:00
// no Epoch established yet...
2023-07-14 02:58:00 +02:00
auto & a1 = bFlow . until ( t1 ) . create ( ) ;
2023-07-15 18:54:59 +02:00
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 10s200ms " _expect ) ;
CHECK ( watch ( bFlow ) . find ( a1 ) = = " 10s200ms " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// setup Epoch grid into the future
2023-07-14 02:58:00 +02:00
auto & a3 = bFlow . until ( t3 ) . create ( ) ;
2023-07-15 21:37:58 +02:00
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 10s200ms|10s400ms|10s600ms|10s800ms|11s " _expect ) ;
CHECK ( watch ( bFlow ) . find ( a3 ) = = " 11s " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// associate to existing Epoch
2023-07-14 02:58:00 +02:00
auto & a2 = bFlow . until ( t2 ) . create ( ) ;
2023-07-15 21:37:58 +02:00
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 10s200ms|10s400ms|10s600ms|10s800ms|11s " _expect ) ;
CHECK ( watch ( bFlow ) . find ( a2 ) = = " 10s600ms " _expect ) ;
2023-07-14 02:58:00 +02:00
Time t0 = Time { 0 , 5 } ;
2023-07-16 20:47:39 +02:00
// late(past) Activity is placed in the oldest Epoch alive
2023-07-14 02:58:00 +02:00
auto & a0 = bFlow . until ( t0 ) . create ( ) ;
2023-07-15 21:37:58 +02:00
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 10s200ms|10s400ms|10s600ms|10s800ms|11s " _expect ) ;
2023-07-15 18:54:59 +02:00
CHECK ( watch ( bFlow ) . find ( a0 ) = = " 10s200ms " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// provoke Epoch overflow by exhausting all available storage slots
2023-07-14 02:58:00 +02:00
BlockFlow : : AllocatorHandle allocHandle = bFlow . until ( Time { 300 , 10 } ) ;
2023-07-20 19:28:20 +02:00
for ( uint i = 1 ; i < EXTENT_SIZ ; + + i )
2023-07-14 02:58:00 +02:00
allocHandle . create ( ) ;
CHECK ( allocHandle . currDeadline ( ) = = Time ( 400 , 10 ) ) ;
CHECK ( not allocHandle . hasFreeSlot ( ) ) ;
2023-07-16 20:47:39 +02:00
// ...causing next allocation to be shifted into subsequent Epoch
2023-07-16 03:06:02 +02:00
auto & a4 = allocHandle . create ( ) ;
2023-07-14 02:58:00 +02:00
CHECK ( allocHandle . currDeadline ( ) = = Time ( 600 , 10 ) ) ;
CHECK ( allocHandle . hasFreeSlot ( ) ) ;
2023-07-15 18:54:59 +02:00
CHECK ( watch ( bFlow ) . find ( a4 ) = = " 10s600ms " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// fill up and exhaust this Epoch too....
2023-07-20 19:28:20 +02:00
for ( uint i = 1 ; i < EXTENT_SIZ ; + + i )
2023-07-14 02:58:00 +02:00
allocHandle . create ( ) ;
2023-07-16 20:47:39 +02:00
// so the handle has moved to the after next Epoch
2023-07-14 02:58:00 +02:00
CHECK ( allocHandle . currDeadline ( ) = = Time ( 800 , 10 ) ) ;
2023-07-16 03:06:02 +02:00
CHECK ( allocHandle . hasFreeSlot ( ) ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// even allocation with way earlier deadline is shifted here now
2023-07-14 02:58:00 +02:00
auto & a5 = bFlow . until ( Time { 220 , 10 } ) . create ( ) ;
2023-07-16 03:06:02 +02:00
CHECK ( watch ( bFlow ) . find ( a5 ) = = " 10s800ms " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// now repeat the same pattern, but now towards uncharted Epochs
2023-07-14 02:58:00 +02:00
allocHandle = bFlow . until ( Time { 900 , 10 } ) ;
2023-07-20 19:28:20 +02:00
for ( uint i = 2 ; i < EXTENT_SIZ ; + + i )
2023-07-14 02:58:00 +02:00
allocHandle . create ( ) ;
2023-07-16 03:06:02 +02:00
CHECK ( allocHandle . currDeadline ( ) = = Time ( 0 , 11 ) ) ;
2023-07-14 02:58:00 +02:00
CHECK ( not allocHandle . hasFreeSlot ( ) ) ;
auto & a6 = bFlow . until ( Time { 850 , 10 } ) . create ( ) ;
2023-07-16 20:47:39 +02:00
// Note: encountered four overflow-Events, leading to decreased Epoch spacing for new Epochs
2023-07-22 01:54:25 +02:00
CHECK ( watch ( bFlow ) . find ( a6 ) = = " 11s192ms " _expect ) ;
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 10s200ms|10s400ms|10s600ms|10s800ms|11s|11s192ms " _expect ) ;
2023-07-14 02:58:00 +02:00
auto & a7 = bFlow . until ( Time { 500 , 11 } ) . create ( ) ;
2023-07-16 20:47:39 +02:00
// this allocation does not count as overflow, but has to expand the Epoch grid, now using the reduced Epoch spacing
2023-07-22 01:54:25 +02:00
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 10s200ms|10s400ms|10s600ms|10s800ms|11s|11s192ms|11s384ms|11s576ms " _expect ) ;
CHECK ( watch ( bFlow ) . find ( a7 ) = = " 11s576ms " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-08-29 18:46:37 +02:00
// we created 8 elements (a0...a7) and caused three epochs to overflow...
CHECK ( watch ( bFlow ) . cntElm ( ) = = 8 + EXTENT_SIZ - 1 + EXTENT_SIZ - 1 + EXTENT_SIZ - 2 ) ;
2023-07-18 21:23:00 +02:00
// on clean-up, actual fill ratio is used to adjust to optimise Epoch length for better space usage
2023-07-22 01:54:25 +02:00
CHECK ( bFlow . getEpochStep ( ) = = " ≺192ms≻ " _expect ) ;
2023-07-14 02:58:00 +02:00
bFlow . discardBefore ( Time { 999 , 10 } ) ;
2023-07-22 01:54:25 +02:00
CHECK ( bFlow . getEpochStep ( ) = = " ≺218ms≻ " _expect ) ;
CHECK ( watch ( bFlow ) . allEpochs ( ) = = " 11s|11s192ms|11s384ms|11s576ms " _expect ) ;
2023-07-14 02:58:00 +02:00
2023-07-16 20:47:39 +02:00
// placed into the oldest Epoch still alive
2023-07-14 02:58:00 +02:00
auto & a8 = bFlow . until ( Time { 500 , 10 } ) . create ( ) ;
2023-07-22 01:54:25 +02:00
CHECK ( watch ( bFlow ) . find ( a8 ) = = " 11s192ms " _expect ) ;
2023-07-13 01:51:21 +02:00
}
2023-07-17 03:00:56 +02:00
/** @test load based regulation of Epoch spacing
* - on overflow , capacity is boosted by a fixed factor
2023-07-22 01:54:25 +02:00
* - on clean - up , a moving average of ( in hindsight ) optimal length
* is computed and used as the new Epoch spacing
2023-07-13 01:51:21 +02:00
*/
void
adjustEpochs ( )
{
2023-07-15 18:54:59 +02:00
BlockFlow bFlow ;
CHECK ( bFlow . getEpochStep ( ) = = INITIAL_EPOCH_STEP ) ;
2023-07-17 03:00:56 +02:00
// whenever an Epoch overflow happens, capacity is boosted by reducing the Epoch duration
2023-07-15 18:54:59 +02:00
bFlow . markEpochOverflow ( ) ;
2023-07-18 21:23:00 +02:00
CHECK ( bFlow . getEpochStep ( ) = = INITIAL_EPOCH_STEP * BOOST_OVERFLOW ) ;
2023-07-15 18:54:59 +02:00
bFlow . markEpochOverflow ( ) ;
2023-07-18 21:23:00 +02:00
CHECK ( bFlow . getEpochStep ( ) = = INITIAL_EPOCH_STEP * BOOST_OVERFLOW * BOOST_OVERFLOW ) ;
2023-07-15 18:54:59 +02:00
2023-07-17 03:00:56 +02:00
// To counteract this increase, on clean-up the actual fill rate of the Extent
// serves to guess an optimal Epoch duration, which is averaged exponentially
// Using just arbitrary demo values for some fictional Epochs
TimeVar dur1 = INITIAL_EPOCH_STEP ;
2023-07-18 21:23:00 +02:00
double fac1 = 0.8 ;
TimeVar dur2 = INITIAL_EPOCH_STEP * BOOST_OVERFLOW ;
2023-07-19 03:29:09 +02:00
double fac2 = 0.3 ;
2023-07-15 18:54:59 +02:00
2023-07-19 03:29:09 +02:00
double goal1 = double ( _raw ( dur1 ) ) / ( fac1 / TARGET_FILL ) ;
double goal2 = double ( _raw ( dur2 ) ) / ( fac2 / TARGET_FILL ) ;
2023-07-18 21:23:00 +02:00
auto movingAverage = [ & ] ( TimeValue old , double contribution )
{
auto N = AVERAGE_EPOCHS ;
auto averageTicks = double ( _raw ( old ) ) * ( N - 1 ) / N + contribution / N ;
return TimeValue { gavl_time_t ( floor ( averageTicks ) ) } ;
} ;
2023-07-19 03:29:09 +02:00
TimeVar step = bFlow . getEpochStep ( ) ;
2023-07-18 21:23:00 +02:00
bFlow . markEpochUnderflow ( dur1 , fac1 ) ;
2023-07-19 03:29:09 +02:00
CHECK ( bFlow . getEpochStep ( ) = = movingAverage ( step , goal1 ) ) ;
2023-07-15 18:54:59 +02:00
step = bFlow . getEpochStep ( ) ;
2023-07-18 21:23:00 +02:00
bFlow . markEpochUnderflow ( dur2 , fac2 ) ;
2023-07-19 03:29:09 +02:00
CHECK ( bFlow . getEpochStep ( ) = = movingAverage ( step , goal2 ) ) ;
2023-07-18 21:23:00 +02:00
}
2023-07-13 01:51:21 +02:00
2023-07-22 01:54:25 +02:00
2023-07-17 18:36:12 +02:00
/** @test investigate progression of epochs under realistic load
2023-07-22 01:54:25 +02:00
* - expose the allocator to a load of 200f ps for simulated 3 Minutes
* - assuming 10 Activities per frame , this means a throughput of 360000 Activities
2023-07-17 18:36:12 +02:00
* - run this load exposure under saturation for performance measurement
* - use a planning to deadline delay of 500 ms , but with ± 200 ms random spread
* - after 250 ms ( 500 steps ) , » invoke « by accessing and adding the random checksum
2023-07-19 23:43:44 +02:00
* - run a comparison of all - pre - allocated ⟷ heap allocated ⟷ Refcount ⟷ BlockFlow
2023-07-22 01:54:25 +02:00
* @ remarks
* This test setup can be used to investigate different load scenarios .
* In the standard as defined , the BlockFlow allocator is overloaded initially ;
* within 5 seconds , the algorithm should have regulated the Epoch stepping down
* to accommodate the load peak . As immediate response , excess allocation requests
* are shifted into later Epochs . To cope with a persisting higher load , the spacing
* is reduced swiftly , by growing the internal pool with additional heap allocated Extents .
* In the following balancing phase , the mechanism aims at bringing back the Epoch duration
* into a narrow corridor , to keep the usage quotient as close as possible to 90 %
2023-07-13 01:51:21 +02:00
*/
void
storageFlow ( )
2023-07-03 18:40:37 +02:00
{
2023-07-22 01:54:25 +02:00
const size_t FPS = 200 ;
const size_t TICK_P_S = FPS * ACTIVITIES_P_FR ; // Simulated throughput 200 frames per second
const gavl_time_t STP = Time : : SCALE / TICK_P_S ; // Simulation stepping (here 2 steps per ms)
const gavl_time_t RUN = _raw ( Time { 0 , 0 , 3 } ) ; // nominal length of the simulation time axis
Offset BASE_DEADLINE { FSecs { 1 , 2 } } ; // base pre-roll before deadline
Offset SPREAD_DEAD { FSecs { 2 , 100 } } ; // random spread of deadline around base
const uint INVOKE_LAG = _raw ( Time { 250 , 0 } ) / STP ; // „invoke“ the Activity after simulated 250ms (≙ 500 steps)
const uint CLEAN_UP = _raw ( Time { 100 , 0 } ) / STP ; // perform clean-up every 200 steps
const uint INSTANCES = RUN / STP ; // 120000 Activity records to send through the test subject
const uint MAX_TIME = INSTANCES
+ INVOKE_LAG + 2 * CLEAN_UP ; // overall count of Test steps to perform
2023-07-17 18:36:12 +02:00
using TestData = vector < pair < TimeVar , size_t > > ;
using Subjects = vector < reference_wrapper < Activity > > ;
2023-07-18 01:59:17 +02:00
// pre-generate random test data
TestData testData { INSTANCES } ;
2023-07-18 21:23:00 +02:00
for ( size_t i = 0 ; i < INSTANCES ; + + i )
2023-07-17 18:36:12 +02:00
{
const size_t SPREAD = 2 * _raw ( SPREAD_DEAD ) ;
const size_t MIN_DEAD = _raw ( BASE_DEADLINE ) - _raw ( SPREAD_DEAD ) ;
2023-07-18 21:23:00 +02:00
auto & [ t , r ] = testData [ i ] ;
2023-07-17 18:36:12 +02:00
r = rand ( ) % SPREAD ;
2023-07-18 21:23:00 +02:00
t = TimeValue ( i * STP + MIN_DEAD + r ) ;
2023-07-17 18:36:12 +02:00
}
2023-07-18 01:59:17 +02:00
Activity dummy ; // reserve memory for test subject index
Subjects subject { INSTANCES , std : : ref ( dummy ) } ;
2023-07-17 18:36:12 +02:00
auto runTest = [ & ] ( auto allocate , auto invoke ) - > size_t
{
// allocate Activity record for deadline and with given random payload
ASSERT_VALID_SIGNATURE ( decltype ( allocate ) , Activity & ( Time , size_t ) ) ;
// access the given Activity, read the payload, then trigger disposal
ASSERT_VALID_SIGNATURE ( decltype ( invoke ) , size_t ( Activity & ) ) ;
size_t checksum { 0 } ;
for ( size_t i = 0 ; i < MAX_TIME ; + + i )
{
2023-07-18 01:59:17 +02:00
if ( i < INSTANCES )
2023-07-17 18:36:12 +02:00
{
auto const & data = testData [ i ] ;
subject [ i ] = allocate ( data . first , data . second ) ;
}
2023-07-18 01:59:17 +02:00
if ( INVOKE_LAG < = i and i - INVOKE_LAG < INSTANCES )
2023-07-17 18:36:12 +02:00
checksum + = invoke ( subject [ i - INVOKE_LAG ] ) ;
}
2023-07-18 01:59:17 +02:00
return checksum ;
} ;
auto benchmark = [ INSTANCES ] ( auto invokeTest )
2023-09-24 18:05:17 +02:00
{ // does the timing measurement with result in µ-seconds
2023-07-22 01:54:25 +02:00
return lib : : test : : benchmarkTime ( invokeTest , INSTANCES ) ;
2023-07-17 18:36:12 +02:00
} ;
2023-07-18 01:59:17 +02:00
2023-07-22 01:54:25 +02:00
2023-07-18 01:59:17 +02:00
/* =========== Test-Setup-1: no individual allocations/deallocations ========== */
size_t sum1 { 0 } ;
vector < Activity > storage { INSTANCES } ;
auto noAlloc = [ & ] { // use pre-allocated storage block
auto allocate = [ i = 0 , & storage ] ( Time , size_t check ) mutable - > Activity &
{
return * new ( & storage [ i + + ] ) Activity { check , size_t { 55 } } ;
} ;
auto invoke = [ ] ( Activity & feedActivity )
{
return feedActivity . data_ . feed . one ;
} ;
sum1 = runTest ( allocate , invoke ) ;
} ;
/* =========== Test-Setup-2: individual heap allocations ========== */
size_t sum2 { 0 } ;
auto heapAlloc = [ & ] {
auto allocate = [ ] ( Time , size_t check ) mutable - > Activity &
{
return * new Activity { check , size_t { 55 } } ;
} ;
auto invoke = [ ] ( Activity & feedActivity )
{
2023-07-22 01:54:25 +02:00
size_t check = feedActivity . data_ . feed . one ;
2023-07-18 01:59:17 +02:00
delete & feedActivity ;
return check ;
} ;
sum2 = runTest ( allocate , invoke ) ;
} ;
2023-07-19 03:29:09 +02:00
/* =========== Test-Setup-3: manage individually by ref-cnt ========== */
2023-07-18 01:59:17 +02:00
size_t sum3 { 0 } ;
2023-07-19 03:29:09 +02:00
vector < std : : shared_ptr < Activity > > manager { INSTANCES } ;
auto sharedAlloc = [ & ] {
auto allocate = [ & , i = 0 ] ( Time , size_t check ) mutable - > Activity &
{
Activity * a = new Activity { check , size_t { 55 } } ;
manager [ i ] . reset ( a ) ;
+ + i ;
return * a ;
} ;
auto invoke = [ & , i = 0 ] ( Activity & feedActivity ) mutable
{
2023-07-22 01:54:25 +02:00
size_t check = feedActivity . data_ . feed . one ;
2023-07-19 03:29:09 +02:00
manager [ i ] . reset ( ) ;
return check ;
} ;
sum3 = runTest ( allocate , invoke ) ;
} ;
/* =========== Test-Setup-4: use BlockFlow allocation scheme ========== */
2023-07-21 04:20:55 +02:00
2023-07-19 03:29:09 +02:00
size_t sum4 { 0 } ;
2023-07-21 04:20:55 +02:00
gear : : BlockFlow < blockFlow : : RenderConfig > blockFlow ;
// Note: using the RenderConfig, which uses larger blocks and more pre-allocation
auto blockFlowAlloc = [ & ] {
auto allocHandle = blockFlow . until ( Time { 400 , 0 } ) ;
2023-07-19 03:29:09 +02:00
auto allocate = [ & , j = 0 ] ( Time t , size_t check ) mutable - > Activity &
2023-07-18 01:59:17 +02:00
{
2023-07-19 03:29:09 +02:00
if ( + + j > = 10 ) // typically several Activities are allocated on the same deadline
{
allocHandle = blockFlow . until ( t ) ;
j = 0 ;
}
return allocHandle . create ( check , size_t { 55 } ) ;
2023-07-18 01:59:17 +02:00
} ;
auto invoke = [ & , i = 0 ] ( Activity & feedActivity ) mutable
{
size_t check = feedActivity . data_ . feed . one ;
if ( i % CLEAN_UP = = 0 )
blockFlow . discardBefore ( Time { TimeValue { i * STP } } ) ;
2023-07-18 21:23:00 +02:00
+ + i ;
2023-07-18 01:59:17 +02:00
return check ;
} ;
2023-07-19 03:29:09 +02:00
sum4 = runTest ( allocate , invoke ) ;
2023-07-18 01:59:17 +02:00
} ;
// INVOKE Setup-1
auto time_noAlloc = benchmark ( noAlloc ) ;
// INVOKE Setup-2
auto time_heapAlloc = benchmark ( heapAlloc ) ;
2023-07-19 03:29:09 +02:00
// INVOKE Setup-3
auto time_sharedAlloc = benchmark ( sharedAlloc ) ;
2023-07-18 21:23:00 +02:00
2023-07-22 01:54:25 +02:00
cout < < " \n \n ■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■ " < < endl ;
2023-07-19 03:29:09 +02:00
// INVOKE Setup-4
2023-07-21 04:20:55 +02:00
auto time_blockFlow = benchmark ( blockFlowAlloc ) ;
2023-07-22 01:54:25 +02:00
Duration expectStep { FSecs { blockFlow . framesPerEpoch ( ) , FPS } * 9 / 10 } ;
cout < < " \n ___Microbenchmark____ "
< < " \n noAlloc : " < < time_noAlloc
< < " \n heapAlloc : " < < time_heapAlloc
< < " \n sharedAlloc : " < < time_sharedAlloc
< < " \n blockFlow : " < < time_blockFlow
< < " \n _____________________ \n "
< < " \n instances.... " < < INSTANCES
< < " \n fps.......... " < < FPS
< < " \n Activities/s. " < < TICK_P_S
< < " \n Epoch(expect) " < < expectStep
< < " \n Epoch (real) " < < blockFlow . getEpochStep ( )
< < " \n cnt Epochs... " < < watch ( blockFlow ) . cntEpochs ( )
< < " \n alloc pool... " < < watch ( blockFlow ) . poolSize ( )
< < endl ;
// all Activities have been read in all test cases,
// yielding identical checksum
CHECK ( sum1 = = sum2 ) ;
CHECK ( sum1 = = sum3 ) ;
CHECK ( sum1 = = sum4 ) ;
// Epoch spacing regulation must be converge up to ±10ms
CHECK ( expectStep - blockFlow . getEpochStep ( ) < Time ( 10 , 0 ) ) ;
// after the initial overload is levelled,
// only a small number of Epochs should be active
CHECK ( watch ( blockFlow ) . cntEpochs ( ) < 8 ) ;
// Due to Debug / Release builds, we can not check the runtime only a very rough margin.
// With -O3, this amortised allocation time should be way below time_sharedAlloc
CHECK ( time_blockFlow < 800 ) ;
2023-07-03 18:40:37 +02:00
}
} ;
/** Register this test class... */
LAUNCHER ( BlockFlow_test , " unit engine " ) ;
2023-07-05 15:10:34 +02:00
} } } // namespace vault::gear::test