diff --git a/tests/vault/gear/scheduler-stress-test.cpp b/tests/vault/gear/scheduler-stress-test.cpp index 974672ab7..003ea90a4 100644 --- a/tests/vault/gear/scheduler-stress-test.cpp +++ b/tests/vault/gear/scheduler-stress-test.cpp @@ -296,6 +296,7 @@ SHOW_EXPR(micros); { usec LOAD_BASE = 500us; uint CONCURRENCY = 4; + bool SCHED_DEPENDS = true; bool showRuns = true; auto testLoad() { return TestChainLoad<>{64}.configureShape_chain_loadBursts(); } @@ -306,7 +307,7 @@ SHOW_EXPR(micros); SHOW_EXPR(stress) SHOW_EXPR(delta) SHOW_EXPR(time) - CHECK (delta > 4.0); + CHECK (delta > 2.0); CHECK (0.55 > stress and stress > 0.4); } diff --git a/tests/vault/gear/stress-test-rig.hpp b/tests/vault/gear/stress-test-rig.hpp index 60d6496f1..f125755cd 100644 --- a/tests/vault/gear/stress-test-rig.hpp +++ b/tests/vault/gear/stress-test-rig.hpp @@ -159,7 +159,9 @@ namespace test { void configureTest (TestSetup& testSetup, double stressFac) { - testSetup.withLoadTimeBase (CONF::LOAD_BASE) + testSetup.withLoadTimeBase(CONF::LOAD_BASE) + .withBaseExpense (CONF::BASE_EXPENSE) + .withSchedDepends(CONF::SCHED_DEPENDS) .withAdaptedSchedule(stressFac, CONF::CONCURRENCY); } @@ -328,6 +330,8 @@ namespace test { using usec = std::chrono::microseconds; usec LOAD_BASE = 500us; + usec BASE_EXPENSE = 0us; + bool SCHED_DEPENDS = false; uint CONCURRENCY = work::Config::getDefaultComputationCapacity(); double EPSILON = 0.01; ///< error bound to abort binary search double UPPER_STRESS = 0.6; ///< starting point for the upper limit, likely to fail diff --git a/tests/vault/gear/test-chain-load.hpp b/tests/vault/gear/test-chain-load.hpp index 4126b20cc..cbb4ac354 100644 --- a/tests/vault/gear/test-chain-load.hpp +++ b/tests/vault/gear/test-chain-load.hpp @@ -193,8 +193,8 @@ namespace test { const Duration SCHEDULE_LEVEL_STEP{_uTicks(1ms)}; ///< time budget to plan for the calculation of each »time level« of jobs const Duration SCHEDULE_NODE_STEP{Duration::NIL}; ///< additional time step to include in the plan for each job (node). const Duration SCHEDULE_PLAN_STEP{_uTicks(100us)}; ///< time budget to reserve for each node to be planned and scheduled - const bool SCHEDULE_DEPENDENCY = false; ///< explicitly schedule a dependent job (or rely on NOTIFY) - const bool SCHEDULE_NOTIFY = true; ///< explicitly set notify dispatch time to the dependencie's start time. + const bool SCHED_DEPENDS = false; ///< explicitly schedule a dependent job (or rely on NOTIFY) + const bool SCHED_NOTIFY = true; ///< explicitly set notify dispatch time to the dependency's start time. inline uint defaultConcurr() @@ -1358,7 +1358,7 @@ namespace test { { auto round = roundsNeeded (scaleStep); Sink sink; - size_t scree{0x55DEAD55}; + size_t scree{sink}; for ( ; 0 < round; --round) boost::hash_combine (scree,scree); sink = scree; @@ -1369,12 +1369,14 @@ namespace test { causeMemProcessLoad (uint scaleStep) { auto [siz,round] = allocNeeded (scaleStep); - lib::UninitialisedDynBlock memBlock{siz}; - ++*memBlock.front(); + lib::UninitialisedDynBlock memBlock{siz}; + Sink sink; + *memBlock.front() = sink+1; for ( ; 0 < round; --round) for (size_t i=0; i - - - +

anders kann ich mir das nicht erklären, denn es ist ja nun immer nur eine kleine Zahl an Einträgen in der Queue, d.h. es besteht durchaus die Gefahr, daß sich Worker schlafen legen, weil die Queue scheinbar leer ist. Tatsächlich passiert das aber nur in den Abschnitten mit geringer Parallelität (ja der Doppel-Strang wird nun nur von einem Worker bearbeitet)

- -
+
@@ -108026,16 +108023,13 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

muß dann die Info über zwei Ebenen durchreichen...?

- -
+
@@ -110653,22 +110647,17 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

die typische Test-Load ~ 500µs braucht im Scheduler-Kontext oft doppelt so lange als kalibriert

- -
+ - - - +

nachdem ich aber nur noch per NOTIFY triggere, @@ -110680,11 +110669,58 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
Nur an einer Stelle mit vielen aktiven Workern, treten mal wieder 2ms auf

- -
+ + + + + + +

+ 0.42 statt 0.56 +

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +

+ ohne schedule-dependency ist die Performance aber minimal schlechter +

+ +
+ + + + + +