From 93729e566748f49e103f647bf9b2a433883ffd98 Mon Sep 17 00:00:00 2001 From: Ichthyostega Date: Mon, 19 Feb 2024 17:36:46 +0100 Subject: [PATCH] Scheduler-test: more precise accounting for expected concurrency It turns out to be not correct using all the divergence in concurrency as a form factor, since it is quite common that not all cores can be active at every level, given the structural constraints as dictated by the load graph. On the other hand, if the empirical work (non wait-time) concurrency systematically differs from the simple model used for establishing the schedule, then this should indeed be considered a form factor and deduced from the effective stress factor, since it is not a reserve available for speed-up The solution entertained here is to derive an effective compounded sum of weights from the calculation used to build the schedule. This compounded weight sum is typically lower than the plain sum of all node weights, which is precisely due to the theoretical amount of expense reduction assumed in the schedule generation. So this gives us a handle at the theoretically expected expense and through the plain weight sum, we may draw conclusion about the effective concurrency expected in this schedule. Taking only this part as base for the empirical deviations yields search results very close to stressFactor ~1 -- implying that the test setup now observes what was intended to observe... --- tests/vault/gear/scheduler-stress-test.cpp | 5 ++- tests/vault/gear/stress-test-rig.hpp | 6 ++-- tests/vault/gear/test-chain-load.hpp | 18 ++++++++-- wiki/thinkPad.ichthyo.mm | 40 +++++++++++++++++++++- 4 files changed, 59 insertions(+), 10 deletions(-) diff --git a/tests/vault/gear/scheduler-stress-test.cpp b/tests/vault/gear/scheduler-stress-test.cpp index 9a16567c8..e86f5db5f 100644 --- a/tests/vault/gear/scheduler-stress-test.cpp +++ b/tests/vault/gear/scheduler-stress-test.cpp @@ -350,15 +350,14 @@ namespace test { { usec LOAD_BASE = 500us; uint CONCURRENCY = 4; - bool SCHED_DEPENDS = true; bool showRuns = true; auto testLoad() { return TestChainLoad<>{64}.configureShape_chain_loadBursts(); } }; auto [stress,delta,time] = StressRig::with().searchBreakingPoint(); - CHECK (delta > 2.0); - CHECK (0.55 > stress and stress > 0.4); + CHECK (delta > 2.5); + CHECK (1.15 > stress and stress > 0.9); } diff --git a/tests/vault/gear/stress-test-rig.hpp b/tests/vault/gear/stress-test-rig.hpp index 69a084fd2..3a2ef78b5 100644 --- a/tests/vault/gear/stress-test-rig.hpp +++ b/tests/vault/gear/stress-test-rig.hpp @@ -183,7 +183,7 @@ namespace test { runTime[i] = testSetup.launch_and_wait() / 1000; avgT += runTime[i]; testSetup.adaptEmpirically (stressFac, CONF::CONCURRENCY); - this->adjustmentFac = testSetup.getStressFac() / stressFac; + this->adjustmentFac = 1 / (testSetup.getStressFac() / stressFac); } expT = testSetup.getExpectedEndTime() / 1000; avgT /= CONF::REPETITIONS; @@ -249,7 +249,7 @@ namespace test { _Fmt fmtRun_ {"....·%-2d: Δ=%4.1f t=%4.1f %s %s"}; // i % Δ % t % t>avg? % fail? - _Fmt fmtStep_{ "%4.2f| : ∅Δ=%4.1f±%-4.2f ∅t=%4.1f %s %%%3.1f -- expect:%4.1fms"}; // stress % ∅Δ % σ % ∅t % fail % pecentOff % t-expect + _Fmt fmtStep_{ "%4.2f| : ∅Δ=%4.1f±%-4.2f ∅t=%4.1f %s %%%-3.0f -- expect:%4.1fms"};// stress % ∅Δ % σ % ∅t % fail % pecentOff % t-expect _Fmt fmtResSDv_{"%9s= %5.2f ±%4.2f%s"}; _Fmt fmtResVal_{"%9s: %5.2f%s"}; @@ -267,7 +267,7 @@ namespace test { if (CONF::showStep) cout << fmtStep_ % res.stressFac % res.avgDelta % res.stdDev % res.avgTime % (decideBreakPoint(res)? "—◆—":"—◇—") - % res.percentOff % res.expTime + % (100*res.percentOff) % res.expTime << endl; } diff --git a/tests/vault/gear/test-chain-load.hpp b/tests/vault/gear/test-chain-load.hpp index 0dcadf263..7f788d990 100644 --- a/tests/vault/gear/test-chain-load.hpp +++ b/tests/vault/gear/test-chain-load.hpp @@ -834,7 +834,7 @@ namespace test { /** overall sum of configured node weights **/ size_t - getWeightSum() + calcWeightSum() { return allNodes() .transform([](Node& n){ return n.weight; }) @@ -870,6 +870,15 @@ namespace test { }); } + /** calculate the simplified/theoretic reduction of compounded weight through concurrency */ + double + calcExpectedCompoundedWeight (uint concurrency =1) + { + return allLevelWeights() + .transform([concurrency](LevelWeight const& lw){ return computeWeightFactor (lw, concurrency); }) + .resultSum(); + } + Statistic computeGraphStatistics(); @@ -1969,8 +1978,11 @@ namespace test { concurrency = defaultConcurrency(); double worktimeRatio = 1 - stat.timeAtConc(0) / stat.coveredTime; double workConcurrency = stat.avgConcurrency / worktimeRatio; - double formFac = concurrency / workConcurrency; - double expectedNodeTime = _uSec(compuLoad_->timeBase) * chainLoad_.getWeightSum() / chainLoad_.size(); + double weightSum = chainLoad_.calcWeightSum(); + double expectedCompoundedWeight = chainLoad_.calcExpectedCompoundedWeight(concurrency); + double expectedConcurrency = weightSum / expectedCompoundedWeight; + double formFac = 1 / (workConcurrency / expectedConcurrency); + double expectedNodeTime = _uSec(compuLoad_->timeBase) * weightSum / chainLoad_.size(); double realAvgNodeTime = stat.activeTime / stat.activationCnt; formFac *= realAvgNodeTime / expectedNodeTime; return withAdaptedSchedule (stressFac, concurrency, formFac); diff --git a/wiki/thinkPad.ichthyo.mm b/wiki/thinkPad.ichthyo.mm index b23dba96d..e2c256678 100644 --- a/wiki/thinkPad.ichthyo.mm +++ b/wiki/thinkPad.ichthyo.mm @@ -111634,7 +111634,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -111665,6 +111665,44 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + +

+ ...die Berechnung läuft zwar genauso, nämlich über eine Gruppierung per Level, jedoch müssen dann nur die reinen Nodes pro Level berücksichtigt werden +

+ + +
+
+ + + + + + +

+ weil die Gewichte entsprechend proportional auch in die durchschnittliche empirische Concurrency eingehen +

+ + +
+
+ + + + + + + + +