2023-10-19 22:04:30 +02:00
/*
SchedulerStress ( Test ) - verify scheduler performance characteristics
Copyright ( C ) Lumiera . org
2023 , Hermann Vosseler < Ichthyostega @ web . de >
This program is free software ; you can redistribute it and / or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation ; either version 2 of
the License , or ( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program ; if not , write to the Free Software
Foundation , Inc . , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/** @file scheduler-usage-test.cpp
* * unit test \ ref SchedulerStress_test
*/
# include "lib/test/run.hpp"
2023-12-19 21:06:23 +01:00
# include "test-chain-load.hpp"
2024-01-02 21:46:44 +01:00
# include "stress-test-rig.hpp"
2023-10-19 22:04:30 +02:00
# include "vault/gear/scheduler.hpp"
2023-12-19 21:06:23 +01:00
# include "lib/time/timevalue.hpp"
2023-12-31 21:59:16 +01:00
# include "lib/format-string.hpp"
2023-12-19 21:06:23 +01:00
# include "lib/format-cout.hpp"
# include "lib/test/diagnostic-output.hpp" //////////////////////////TODO work in distress
//#include "lib/format-string.hpp"
2024-01-02 15:45:40 +01:00
# include "lib/test/transiently.hpp"
2023-12-19 21:06:23 +01:00
//#include "lib/test/microbenchmark.hpp"
2023-10-19 22:04:30 +02:00
//#include "lib/util.hpp"
//#include <utility>
2024-01-02 18:44:20 +01:00
//#include <vector>
# include <array>
2023-10-19 22:04:30 +02:00
using test : : Test ;
//using std::move;
//using util::isSameObject;
namespace vault {
namespace gear {
namespace test {
// using lib::time::FrameRate;
// using lib::time::Offset;
// using lib::time::Time;
2023-12-31 21:59:16 +01:00
using util : : _Fmt ;
2024-01-02 18:44:20 +01:00
// using std::vector;
using std : : array ;
2023-10-19 22:04:30 +02:00
2023-12-19 21:06:23 +01:00
namespace { // Test definitions and setup...
}
2023-10-19 22:04:30 +02:00
/***************************************************************************/ /**
* @ test Investigate and verify non - functional characteristics of the Scheduler .
* @ see SchedulerActivity_test
* @ see SchedulerInvocation_test
* @ see SchedulerCommutator_test
2024-01-08 22:58:16 +01:00
* @ see stress - test - rig . hpp
2023-10-19 22:04:30 +02:00
*/
class SchedulerStress_test : public Test
{
virtual void
run ( Arg )
{
2023-12-28 23:56:52 +01:00
//smokeTest();
2024-01-10 20:39:20 +01:00
// setup_systematicSchedule();
2024-02-18 23:02:32 +01:00
search_breaking_point ( ) ;
// verify_instrumentation();
2024-02-15 00:43:03 +01:00
// investigateWorkProcessing();
2023-10-19 22:04:30 +02:00
walkingDeadline ( ) ;
}
2023-12-19 21:06:23 +01:00
/** @test TODO demonstrate sustained operation under load
2023-12-28 23:56:52 +01:00
* - TODO this is a placeholder and works now , but need a better example
* - it should not produce so much overload , rather some stretch of steady - state processing
2023-12-19 21:06:23 +01:00
* @ todo WIP 12 / 23 🔁 define ⟶ implement
2023-10-19 22:04:30 +02:00
*/
void
2023-12-19 21:06:23 +01:00
smokeTest ( )
2023-10-19 22:04:30 +02:00
{
2023-12-19 21:06:23 +01:00
MARK_TEST_FUN
2023-12-21 23:26:01 +01:00
TestChainLoad testLoad { 512 } ;
2023-12-19 21:06:23 +01:00
testLoad . configureShape_chain_loadBursts ( )
2024-01-02 23:51:47 +01:00
. buildTopology ( )
2023-12-21 23:26:01 +01:00
// .printTopologyDOT()
;
2023-12-19 21:06:23 +01:00
auto stats = testLoad . computeGraphStatistics ( ) ;
cout < < _Fmt { " Test-Load: Nodes: %d Levels: %d ∅Node/Level: %3.1f Forks: %d Joins: %d " }
% stats . nodes
% stats . levels
% stats . indicators [ STAT_NODE ] . pL
% stats . indicators [ STAT_FORK ] . cnt
% stats . indicators [ STAT_JOIN ] . cnt
< < endl ;
// while building the calculation-plan graph
// node hashes were computed, observing dependencies
size_t expectedHash = testLoad . getHash ( ) ;
// some jobs/nodes are marked with a weight-step
// these can be instructed to spend some CPU time
auto LOAD_BASE = 500u s ;
testLoad . performGraphSynchronously ( LOAD_BASE ) ;
CHECK ( testLoad . getHash ( ) = = expectedHash ) ;
double referenceTime = testLoad . calcRuntimeReference ( LOAD_BASE ) ;
cout < < " refTime(singleThr): " < < referenceTime / 1000 < < " ms " < < endl ;
// Perform through Scheduler----------
BlockFlowAlloc bFlow ;
EngineObserver watch ;
Scheduler scheduler { bFlow , watch } ;
double performanceTime =
testLoad . setupSchedule ( scheduler )
. withLoadTimeBase ( LOAD_BASE )
2023-12-26 15:00:35 +01:00
. withJobDeadline ( 150 ms )
2023-12-23 19:33:55 +01:00
. withPlanningStep ( 200u s )
2023-12-21 23:26:01 +01:00
. withChunkSize ( 20 )
2023-12-19 21:06:23 +01:00
. launch_and_wait ( ) ;
cout < < " runTime(Scheduler): " < < performanceTime / 1000 < < " ms " < < endl ;
// invocation through Scheduler has reproduced all node hashes
CHECK ( testLoad . getHash ( ) = = expectedHash ) ;
2023-10-19 22:04:30 +02:00
}
2024-01-08 22:58:16 +01:00
/** @test build a scheme to adapt the schedule to expected runtime.
* - as in many other tests , use the massively forking load pattern
* - demonstrate how TestChainLoad computes an idealised level expense
* - verify how schedule times are derived from this expense sequence
* @ todo WIP 12 / 23 ✔ define ⟶ ✔ implement
2023-12-28 23:56:52 +01:00
*/
void
setup_systematicSchedule ( )
{
TestChainLoad testLoad { 64 } ;
testLoad . configureShape_chain_loadBursts ( )
2024-01-02 23:51:47 +01:00
. buildTopology ( )
2024-01-01 23:59:02 +01:00
// .printTopologyDOT()
// .printTopologyStatistics()
2023-12-28 23:56:52 +01:00
;
auto LOAD_BASE = 500u s ;
ComputationalLoad cpuLoad ;
cpuLoad . timeBase = LOAD_BASE ;
cpuLoad . calibrate ( ) ;
double micros = cpuLoad . invoke ( ) ;
CHECK ( micros < 550 ) ;
CHECK ( micros > 450 ) ;
2024-01-01 20:06:53 +01:00
// build a schedule sequence based on
// summing up weight factors, with example concurrency ≔ 4
uint concurrency = 4 ;
2024-01-01 23:59:02 +01:00
auto stepFactors = testLoad . levelScheduleSequence ( concurrency ) . effuse ( ) ;
CHECK ( stepFactors . size ( ) = = 1 + testLoad . topLevel ( ) ) ;
CHECK ( stepFactors . size ( ) = = 27 ) ;
2023-12-31 21:59:16 +01:00
2024-01-01 20:06:53 +01:00
// Build-Performance-test-setup--------
BlockFlowAlloc bFlow ;
EngineObserver watch ;
Scheduler scheduler { bFlow , watch } ;
2023-12-31 21:59:16 +01:00
2024-01-01 20:06:53 +01:00
auto testSetup =
testLoad . setupSchedule ( scheduler )
. withLoadTimeBase ( LOAD_BASE )
2024-01-01 23:59:02 +01:00
. withJobDeadline ( 50 ms )
2024-01-01 20:06:53 +01:00
. withUpfrontPlanning ( ) ;
2023-12-31 21:59:16 +01:00
2024-01-01 20:06:53 +01:00
auto schedule = testSetup . getScheduleSeq ( ) . effuse ( ) ;
CHECK ( schedule . size ( ) = = testLoad . topLevel ( ) + 2 ) ;
CHECK ( schedule [ 0 ] = = _uTicks ( 0 ms ) ) ;
CHECK ( schedule [ 1 ] = = _uTicks ( 1 ms ) ) ;
2024-01-01 23:59:02 +01:00
CHECK ( schedule [ 2 ] = = _uTicks ( 2 ms ) ) ;
// ....
CHECK ( schedule [ 25 ] = = _uTicks ( 25 ms ) ) ;
2024-01-01 20:06:53 +01:00
CHECK ( schedule [ 26 ] = = _uTicks ( 26 ms ) ) ;
CHECK ( schedule [ 27 ] = = _uTicks ( 27 ms ) ) ;
2023-12-31 21:59:16 +01:00
2024-01-01 23:59:02 +01:00
// Adapted Schedule----------
2024-01-01 22:42:51 +01:00
double stressFac = 1.0 ;
testSetup . withAdaptedSchedule ( stressFac , concurrency ) ;
schedule = testSetup . getScheduleSeq ( ) . effuse ( ) ;
CHECK ( schedule . size ( ) = = testLoad . topLevel ( ) + 2 ) ;
2024-01-01 23:59:02 +01:00
CHECK ( schedule [ 0 ] = = _uTicks ( 0 ms ) ) ;
CHECK ( schedule [ 1 ] = = _uTicks ( 0 ms ) ) ;
// verify the numbers in detail....
_Fmt stepFmt { " lev:%-2d stepFac:%-6.3f schedule:%6.3f " } ;
auto stepStr = [ & ] ( uint i ) { return string { stepFmt % i % stepFactors [ i > 0 ? i - 1 : 0 ] % ( _raw ( schedule [ i ] ) / 1000.0 ) } ; } ;
CHECK ( stepStr ( 0 ) = = " lev:0 stepFac:0.000 schedule: 0.000 " _expect ) ;
CHECK ( stepStr ( 1 ) = = " lev:1 stepFac:0.000 schedule: 0.000 " _expect ) ;
CHECK ( stepStr ( 2 ) = = " lev:2 stepFac:0.000 schedule: 0.000 " _expect ) ;
CHECK ( stepStr ( 3 ) = = " lev:3 stepFac:2.000 schedule: 1.000 " _expect ) ;
CHECK ( stepStr ( 4 ) = = " lev:4 stepFac:2.000 schedule: 1.000 " _expect ) ;
CHECK ( stepStr ( 5 ) = = " lev:5 stepFac:2.000 schedule: 1.000 " _expect ) ;
CHECK ( stepStr ( 6 ) = = " lev:6 stepFac:2.000 schedule: 1.000 " _expect ) ;
CHECK ( stepStr ( 7 ) = = " lev:7 stepFac:3.000 schedule: 1.500 " _expect ) ;
CHECK ( stepStr ( 8 ) = = " lev:8 stepFac:5.000 schedule: 2.500 " _expect ) ;
CHECK ( stepStr ( 9 ) = = " lev:9 stepFac:7.000 schedule: 3.500 " _expect ) ;
CHECK ( stepStr ( 10 ) = = " lev:10 stepFac:8.000 schedule: 4.000 " _expect ) ;
CHECK ( stepStr ( 11 ) = = " lev:11 stepFac:8.000 schedule: 4.000 " _expect ) ;
CHECK ( stepStr ( 12 ) = = " lev:12 stepFac:8.000 schedule: 4.000 " _expect ) ;
CHECK ( stepStr ( 13 ) = = " lev:13 stepFac:9.000 schedule: 4.500 " _expect ) ;
CHECK ( stepStr ( 14 ) = = " lev:14 stepFac:10.000 schedule: 5.000 " _expect ) ;
CHECK ( stepStr ( 15 ) = = " lev:15 stepFac:12.000 schedule: 6.000 " _expect ) ;
CHECK ( stepStr ( 16 ) = = " lev:16 stepFac:12.000 schedule: 6.000 " _expect ) ;
CHECK ( stepStr ( 17 ) = = " lev:17 stepFac:13.000 schedule: 6.500 " _expect ) ;
CHECK ( stepStr ( 18 ) = = " lev:18 stepFac:16.000 schedule: 8.000 " _expect ) ;
CHECK ( stepStr ( 19 ) = = " lev:19 stepFac:16.000 schedule: 8.000 " _expect ) ;
CHECK ( stepStr ( 20 ) = = " lev:20 stepFac:20.000 schedule:10.000 " _expect ) ;
CHECK ( stepStr ( 21 ) = = " lev:21 stepFac:22.500 schedule:11.250 " _expect ) ;
CHECK ( stepStr ( 22 ) = = " lev:22 stepFac:24.167 schedule:12.083 " _expect ) ;
CHECK ( stepStr ( 23 ) = = " lev:23 stepFac:26.167 schedule:13.083 " _expect ) ;
CHECK ( stepStr ( 24 ) = = " lev:24 stepFac:28.167 schedule:14.083 " _expect ) ;
CHECK ( stepStr ( 25 ) = = " lev:25 stepFac:30.867 schedule:15.433 " _expect ) ;
CHECK ( stepStr ( 26 ) = = " lev:26 stepFac:31.867 schedule:15.933 " _expect ) ;
CHECK ( stepStr ( 27 ) = = " lev:27 stepFac:32.867 schedule:16.433 " _expect ) ;
// Adapted Schedule with lower stress level and higher concurrency....
stressFac = 0.3 ;
concurrency = 6 ;
stepFactors = testLoad . levelScheduleSequence ( concurrency ) . effuse ( ) ;
testSetup . withAdaptedSchedule ( stressFac , concurrency ) ;
schedule = testSetup . getScheduleSeq ( ) . effuse ( ) ;
CHECK ( stepStr ( 0 ) = = " lev:0 stepFac:0.000 schedule: 0.000 " _expect ) ;
CHECK ( stepStr ( 1 ) = = " lev:1 stepFac:0.000 schedule: 0.000 " _expect ) ;
CHECK ( stepStr ( 2 ) = = " lev:2 stepFac:0.000 schedule: 0.000 " _expect ) ;
CHECK ( stepStr ( 3 ) = = " lev:3 stepFac:2.000 schedule: 3.333 " _expect ) ;
CHECK ( stepStr ( 4 ) = = " lev:4 stepFac:2.000 schedule: 3.333 " _expect ) ;
CHECK ( stepStr ( 5 ) = = " lev:5 stepFac:2.000 schedule: 3.333 " _expect ) ;
CHECK ( stepStr ( 6 ) = = " lev:6 stepFac:2.000 schedule: 3.333 " _expect ) ;
CHECK ( stepStr ( 7 ) = = " lev:7 stepFac:3.000 schedule: 5.000 " _expect ) ;
CHECK ( stepStr ( 8 ) = = " lev:8 stepFac:5.000 schedule: 8.333 " _expect ) ;
CHECK ( stepStr ( 9 ) = = " lev:9 stepFac:7.000 schedule:11.666 " _expect ) ;
CHECK ( stepStr ( 10 ) = = " lev:10 stepFac:8.000 schedule:13.333 " _expect ) ;
CHECK ( stepStr ( 11 ) = = " lev:11 stepFac:8.000 schedule:13.333 " _expect ) ;
CHECK ( stepStr ( 12 ) = = " lev:12 stepFac:8.000 schedule:13.333 " _expect ) ;
CHECK ( stepStr ( 13 ) = = " lev:13 stepFac:9.000 schedule:15.000 " _expect ) ;
CHECK ( stepStr ( 14 ) = = " lev:14 stepFac:10.000 schedule:16.666 " _expect ) ;
CHECK ( stepStr ( 15 ) = = " lev:15 stepFac:12.000 schedule:20.000 " _expect ) ;
CHECK ( stepStr ( 16 ) = = " lev:16 stepFac:12.000 schedule:20.000 " _expect ) ;
CHECK ( stepStr ( 17 ) = = " lev:17 stepFac:13.000 schedule:21.666 " _expect ) ;
CHECK ( stepStr ( 18 ) = = " lev:18 stepFac:16.000 schedule:26.666 " _expect ) ;
CHECK ( stepStr ( 19 ) = = " lev:19 stepFac:16.000 schedule:26.666 " _expect ) ;
CHECK ( stepStr ( 20 ) = = " lev:20 stepFac:18.000 schedule:30.000 " _expect ) ; // note: here the higher concurrency allows to process all 5 concurrent nodes at once
CHECK ( stepStr ( 21 ) = = " lev:21 stepFac:20.500 schedule:34.166 " _expect ) ;
CHECK ( stepStr ( 22 ) = = " lev:22 stepFac:22.167 schedule:36.944 " _expect ) ;
CHECK ( stepStr ( 23 ) = = " lev:23 stepFac:23.167 schedule:38.611 " _expect ) ;
CHECK ( stepStr ( 24 ) = = " lev:24 stepFac:24.167 schedule:40.277 " _expect ) ;
CHECK ( stepStr ( 25 ) = = " lev:25 stepFac:25.967 schedule:43.277 " _expect ) ;
CHECK ( stepStr ( 26 ) = = " lev:26 stepFac:26.967 schedule:44.944 " _expect ) ;
CHECK ( stepStr ( 27 ) = = " lev:27 stepFac:27.967 schedule:46.611 " _expect ) ;
2024-01-02 15:45:40 +01:00
// perform a Test with this low stress level (0.3)
double runTime = testSetup . launch_and_wait ( ) ;
double expected = testSetup . getExpectedEndTime ( ) ;
CHECK ( fabs ( runTime - expected ) < 5000 ) ;
} // Scheduler should able to follow the expected schedule
2024-02-17 21:55:21 +01:00
/** @test verify capability for instrumentation of job invocations
* @ see IncidenceCount_test
* @ todo WIP 2 / 24 ✔ define ⟶ ✔ implement
2024-02-15 00:43:03 +01:00
*/
void
verify_instrumentation ( )
{
2024-02-17 21:55:21 +01:00
const size_t NODES = 20 ;
const size_t CORES = work : : Config : : COMPUTATION_CAPACITY ;
auto LOAD_BASE = 5 ms ;
TestChainLoad testLoad { NODES } ;
2024-02-15 00:43:03 +01:00
BlockFlowAlloc bFlow ;
EngineObserver watch ;
Scheduler scheduler { bFlow , watch } ;
auto testSetup =
2024-02-17 21:55:21 +01:00
testLoad . setWeight ( 1 )
. setupSchedule ( scheduler )
. withLoadTimeBase ( LOAD_BASE )
. withJobDeadline ( 50 ms )
. withInstrumentation ( ) // activate an instrumentation bracket around each job invocation
;
2024-02-15 00:43:03 +01:00
double runTime = testSetup . launch_and_wait ( ) ;
2024-02-17 21:55:21 +01:00
auto stat = testSetup . getInvocationStatistic ( ) ; // retrieve observed invocation statistics
CHECK ( runTime < stat . activeTime ) ;
CHECK ( isLimited ( 4900 , stat . activeTime / NODES , 8000 ) ) ; // should be close to 5000
CHECK ( stat . coveredTime < runTime ) ;
CHECK ( NODES = = stat . activationCnt ) ; // each node activated once
CHECK ( isLimited ( CORES / 2 , stat . avgConcurrency , CORES ) ) ; // should ideally come close to hardware concurrency
CHECK ( 0 = = stat . timeAtConc ( 0 ) ) ;
CHECK ( 0 = = stat . timeAtConc ( CORES + 1 ) ) ;
CHECK ( runTime / 2 < stat . timeAtConc ( CORES - 1 ) + stat . timeAtConc ( CORES ) ) ;
2024-02-18 18:01:21 +01:00
} // should ideally spend most of the time at highest concurrency levels
2024-02-15 00:43:03 +01:00
2024-01-02 15:45:40 +01:00
/** @test TODO determine the breaking point towards scheduler overload
2024-01-08 22:58:16 +01:00
* - use the integrated StressRig
* - demonstrate how parameters can be tweaked
* - perform a run , leading to a binary search for the breaking point
* @ note on my machine , I observe stress factors close below 0.5 , due to the fact
* that the ComputationalLoad typically takes 2 times as long in concurrent
* usage compared to its calibration , which is done in a tight loop . This
* is strange and may well be due to some peculiarity of my system . Which
* also implies that this test ' s behaviour might be difficult to verify ,
* other than by qualitative interpretation of the log output on STDOUT .
* @ see stress - test - rig . hpp
* @ todo WIP 1 / 24 ✔ define ⟶ ✔ implement
2024-01-02 15:45:40 +01:00
*/
void
search_breaking_point ( )
{
MARK_TEST_FUN
2024-01-02 21:46:44 +01:00
struct Setup : StressRig
{
usec LOAD_BASE = 500u s ;
uint CONCURRENCY = 4 ;
2024-01-03 23:53:44 +01:00
bool showRuns = true ;
2024-01-02 21:46:44 +01:00
auto testLoad ( ) { return TestChainLoad < > { 64 } . configureShape_chain_loadBursts ( ) ; }
} ;
2024-02-23 03:04:24 +01:00
auto [ stress , delta , time ] = StressRig : : with < Setup > ( )
. perform < bench : : BreakingPoint > ( ) ;
2024-02-19 17:36:46 +01:00
CHECK ( delta > 2.5 ) ;
CHECK ( 1.15 > stress and stress > 0.9 ) ;
2023-12-28 23:56:52 +01:00
}
2023-10-19 22:04:30 +02:00
/** @test TODO
2024-01-02 15:45:40 +01:00
* @ todo WIP 1 / 24 🔁 define ⟶ implement
2023-10-19 22:04:30 +02:00
*/
void
2024-01-10 20:39:20 +01:00
investigateWorkProcessing ( )
2023-10-19 22:04:30 +02:00
{
2024-01-10 20:39:20 +01:00
MARK_TEST_FUN
2024-02-11 03:53:42 +01:00
TestChainLoad < 8 > testLoad { 256 } ;
2024-01-11 22:03:36 +01:00
testLoad . seedingRule ( testLoad . rule ( ) . probability ( 0.6 ) . minVal ( 2 ) )
. pruningRule ( testLoad . rule ( ) . probability ( 0.44 ) )
. setSeed ( 55 )
. buildTopology ( )
2024-01-23 18:29:09 +01:00
// .printTopologyDOT()
// .printTopologyStatistics()
2024-01-11 22:03:36 +01:00
;
// ////////////////////////////////////////////////////////WIP : Run test directly for investigation of SEGFAULT....
// BlockFlowAlloc bFlow;
// EngineObserver watch;
// Scheduler scheduler{bFlow, watch};
2024-01-23 18:29:09 +01:00
auto LOAD_BASE = 500u s ;
2024-01-11 22:03:36 +01:00
// auto stressFac = 1.0;
// auto concurrency = 8;
//
2024-01-23 18:29:09 +01:00
ComputationalLoad cpuLoad ;
cpuLoad . timeBase = LOAD_BASE ;
cpuLoad . calibrate ( ) ;
2024-01-11 22:03:36 +01:00
//
2024-01-23 18:29:09 +01:00
double loadMicros = cpuLoad . invoke ( ) ;
2024-01-11 22:03:36 +01:00
// double refTime = testLoad.calcRuntimeReference(LOAD_BASE);
2024-01-23 18:29:09 +01:00
SHOW_EXPR ( loadMicros )
2024-01-11 22:03:36 +01:00
//
// auto testSetup =
// testLoad.setupSchedule(scheduler)
// .withLoadTimeBase(LOAD_BASE)
// .withJobDeadline(50ms)
// .withUpfrontPlanning()
// .withAdaptedSchedule (stressFac, concurrency);
// double runTime = testSetup.launch_and_wait();
// double expected = testSetup.getExpectedEndTime();
//SHOW_EXPR(runTime)
//SHOW_EXPR(expected)
//SHOW_EXPR(refTime)
2024-01-10 20:39:20 +01:00
struct Setup : StressRig
{
usec LOAD_BASE = 500u s ;
2024-01-23 18:29:09 +01:00
usec BASE_EXPENSE = 200u s ;
2024-02-11 03:53:42 +01:00
double UPPER_STRESS = 12 ;
//
2024-02-11 17:38:20 +01:00
double FAIL_LIMIT = 1.0 ; //0.7;
double TRIGGER_SDEV = 1.0 ; //0.25;
double TRIGGER_DELTA = 2.0 ; //0.5;
2024-01-10 20:39:20 +01:00
// uint CONCURRENCY = 4;
// bool SCHED_DEPENDS = true;
bool showRuns = true ;
auto
testLoad ( )
{
2024-02-11 03:53:42 +01:00
TestChainLoad < 8 > testLoad { 256 } ;
2024-01-10 20:39:20 +01:00
testLoad . seedingRule ( testLoad . rule ( ) . probability ( 0.6 ) . minVal ( 2 ) )
. pruningRule ( testLoad . rule ( ) . probability ( 0.44 ) )
2024-02-11 17:38:20 +01:00
. weightRule ( testLoad . value ( 1 ) )
2024-01-10 20:39:20 +01:00
. setSeed ( 55 ) ;
return testLoad ;
}
} ;
2024-02-23 03:04:24 +01:00
auto [ stress , delta , time ] = StressRig : : with < Setup > ( )
. perform < bench : : BreakingPoint > ( ) ;
2024-01-10 20:39:20 +01:00
SHOW_EXPR ( stress )
SHOW_EXPR ( delta )
SHOW_EXPR ( time )
2023-10-19 22:04:30 +02:00
}
/** @test TODO
2024-01-02 15:45:40 +01:00
* @ todo WIP 1 / 24 🔁 define ⟶ implement
2023-10-19 22:04:30 +02:00
*/
void
2023-12-19 21:06:23 +01:00
walkingDeadline ( )
2023-10-19 22:04:30 +02:00
{
}
} ;
/** Register this test class... */
LAUNCHER ( SchedulerStress_test , " unit engine " ) ;
} } } // namespace vault::gear::test