diff --git a/src/vault/gear/block-flow.hpp b/src/vault/gear/block-flow.hpp index b7f8b5111..70f25559a 100644 --- a/src/vault/gear/block-flow.hpp +++ b/src/vault/gear/block-flow.hpp @@ -559,7 +559,7 @@ namespace gear { * Activity deadlines are evenly spaced, for a simple heuristic we can just divide * actual Epoch duration by the fill factor (longer Epoch => less capacity). * To avoid control oscillations however, it seems prudent to use damping by - * an exponential moving average, nominally over #AVERAGE_EPOCHS. + * an exponential moving average, nominally over #Strategy::averageEpochs(). * The current epochStep_ is assumed to be such a moving average, * and will be updated accordingly. */ @@ -578,7 +578,7 @@ namespace gear { // damped adjustment towards ideal size double contribution = double(_raw(actualLen)) / _raw(epochStep_) / adjust; - // Exponential MA: mean ≔ mean * (N-1)/N + newVal/N + // Exponential MA: mean ≔ mean · (N-1)/N + newVal/N auto N = Strategy::averageEpochs(); double avgFactor = (contribution + N-1) / N; adjustEpochStep (avgFactor); diff --git a/src/vault/gear/load-controller.hpp b/src/vault/gear/load-controller.hpp index 3444b8ae5..a0fd936df 100644 --- a/src/vault/gear/load-controller.hpp +++ b/src/vault/gear/load-controller.hpp @@ -29,9 +29,9 @@ ** scenarios, a wide array of load patterns may be encountered, complicating ** any generic performance optimisation. Rather, the participating components ** are designed to withstand a short-term imbalance, expecting that general - ** engine parametrisation will be adjusted based on moving averages. + ** engine parametrisation will be adjusted based on moving averages. ** - ** @see scheduler.hpp + ** @see scheduler.hpp ** @see SchedulerStress_test ** ** @todo WIP-WIP-WIP 10/2023 »Playback Vertical Slice« @@ -45,11 +45,13 @@ #include "lib/error.hpp" //#include "vault/gear/block-flow.hpp" +#include "vault/gear/activity-lang.hpp" //#include "lib/symbol.hpp" #include "lib/nocopy.hpp" //#include "lib/util.hpp" //#include +#include namespace vault{ @@ -57,6 +59,7 @@ namespace gear { // using util::isnil; // using std::string; + using std::chrono::microseconds; /** @@ -75,6 +78,43 @@ namespace gear { LoadController (BlockFlowAlloc& blockFlow) : allocator_{blockFlow} { } + + + /** + * @note const and non-grooming + */ + bool + tendedNext() const + { + UNIMPLEMENTED ("Predicate to determine if next foreseeable Activity was tended for"); + } + + void + tendNext() + { + UNIMPLEMENTED ("tend for the next foreseeable Activity"); + } + + enum + Capacity {SPINTIME ///< imminent activities + ,NEARTIME ///< capacity for active processing required + ,WORKTIME ///< typical stable work task rhythm expected + ,IDLETIME ///< time to go to sleep + }; + + Capacity + classifyCapacity() const + { + UNIMPLEMENTED ("establish a categorisation for available capacity"); + } + + + microseconds + scatteredDelayTime() + { + UNIMPLEMENTED ("establish a randomised targeted delay time"); + } + }; diff --git a/src/vault/gear/scheduler.hpp b/src/vault/gear/scheduler.hpp index 2680328a8..ce847368c 100644 --- a/src/vault/gear/scheduler.hpp +++ b/src/vault/gear/scheduler.hpp @@ -33,7 +33,7 @@ ** @see SchedulerInvocation Layer-1 ** @see SchedulerCommutator Layer-2 ** - ** @todo WIP-WIP-WIP 6/2023 »Playback Vertical Slice« + ** @todo WIP-WIP 10/2023 »Playback Vertical Slice« ** */ @@ -75,7 +75,7 @@ namespace gear { /******************************************************//** * »Scheduler-Service« : coordinate render activities. - * @todo WIP-WIP 6/2023 + * @todo WIP-WIP 10/2023 * @see BlockFlow * @see SchedulerUsage_test */ @@ -112,6 +112,16 @@ namespace gear { { } + /** + * + */ + void + ignite() + { + UNIMPLEMENTED("suicide"); + } + + /** * */ @@ -169,6 +179,17 @@ namespace gear { } + /** + * send this thread into a targeted short-time wait. + * @return how to proceed further with this worker + */ + activity::Proc + scatteredDelay() + { + UNIMPLEMENTED("scattered short-term delay"); + } + + /** @internal expose a binding for Activity execution */ class ExecutionCtx; }; diff --git a/tests/32scheduler.tests b/tests/32scheduler.tests index e6a6d3c57..3619dab13 100644 --- a/tests/32scheduler.tests +++ b/tests/32scheduler.tests @@ -32,6 +32,12 @@ END +TEST "Scheduler Load-Control" SchedulerLoadControl_test < + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +* *****************************************************/ + +/** @file scheduler-load-control-test.cpp + ** unit test \ref SchedulerLoadControl_test + */ + + +#include "lib/test/run.hpp" +#include "vault/gear/load-controller.hpp" +//#include "lib/time/timevalue.hpp" +//#include "lib/format-cout.hpp" +//#include "lib/util.hpp" + +//#include + +using test::Test; +//using std::move; +//using util::isSameObject; + + +namespace vault{ +namespace gear { +namespace test { + +// using lib::time::FrameRate; +// using lib::time::Offset; +// using lib::time::Time; + + + + + + /*************************************************************************//** + * @test verify behaviour patterns relevant for Scheduler load control. + * @see SchedulerCommutator_test + * @see SchedulerService_test + * @see SchedulerStress_test + */ + class SchedulerLoadControl_test : public Test + { + + virtual void + run (Arg) + { + simpleUsage(); + walkingDeadline(); + setupLalup(); + } + + + /** @test TODO demonstrate a simple usage scenario + * @todo WIP 10/23 ✔ define ⟶ 🔁 implement + */ + void + simpleUsage() + { + BlockFlowAlloc bFlow; + LoadController lcontrl{bFlow}; + } + + + + /** @test TODO + * @todo WIP 10/23 🔁 define ⟶ implement + */ + void + walkingDeadline() + { + UNIMPLEMENTED ("walking Deadline"); + } + + + + /** @test TODO + * @todo WIP 10/23 🔁 define ⟶ implement + */ + void + setupLalup() + { + } + }; + + + /** Register this test class... */ + LAUNCHER (SchedulerLoadControl_test, "unit engine"); + + + +}}} // namespace vault::gear::test diff --git a/tests/vault/gear/scheduler-service-test.cpp b/tests/vault/gear/scheduler-service-test.cpp index c2aef3aa2..8db9e7100 100644 --- a/tests/vault/gear/scheduler-service-test.cpp +++ b/tests/vault/gear/scheduler-service-test.cpp @@ -55,6 +55,7 @@ namespace test { * @see SchedulerActivity_test * @see SchedulerInvocation_test * @see SchedulerCommutator_test + * @see SchedulerLoadControl_test */ class SchedulerService_test : public Test { diff --git a/wiki/thinkPad.ichthyo.mm b/wiki/thinkPad.ichthyo.mm index 2b6c0e694..4299286ff 100644 --- a/wiki/thinkPad.ichthyo.mm +++ b/wiki/thinkPad.ichthyo.mm @@ -80558,7 +80558,9 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + + + @@ -82122,6 +82124,314 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + + + + + + + + + + +

+ um das System nicht unnötig zu belasten +

+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +

+ Das bedeutet: bei nur vereinzelt vorhandenen Tasks dürfen diese nicht zufällig verteilt werden (sonst bleiben alle Worker am Leben). Vielmehr muß bevorzugt derjenige den nächsten Task bekommen, der ohnehin  grade gearbeitet hat +

+ + +
+
+ + + + + + + + +

+ Idler: findet direkt beim Eintritt in pullWork() nichts zu tun vor +

+ + +
+
+ + + + + + +

+ Worker: findet nach einem Work-Zyklus nichts zu tun vor +

+ + +
+
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + +

+ ...weil der water-level ein internes Implementierungsdetail von Layer-1 ist, und der eigentliche Dispatch mit "now" erfolgt. Möglicherweise hab' ich mich da verrant — andererseits wollte ich ganz explizit nicht überall und in jeder Activity auch noch eine Startzeit mitschleppen, sondern habe mich darauf verlegt, diese Information kontextuell zu handhaben +

+ + +
+
+ + + + + + + +

+ ...und ich hatte kürzlich noch solche Zweifel ob das Design komplett entgleist, habe mich dann aber entschieden, locker zu lassen +

+ + +
+ + +
+
+
+ + + +
+ + + +
+ + + + + + + + + + + +

+ damit im Schnitt jedes 1/Nte - Warte-Intervall sich ein Worker meldet +

+ + +
+ + + + + + + + + + + +

+ denn: innerhalb der zeitnah-Phase wird jeder verfügbare worker per gezieltem Schlaf auf die nächste headTime gesetzt, und damit ist er bis dahin geblockt. Also sollte es extrem unwahrscheinlich sein, daß inzwischen so kurzfristig noch was dazwischen geplant wird +

+ + +
+
+ + +
+ + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ man schickt einen Worker in »scattered delay« +

+ + +
+ + + + + + + + +
+
+ + + +
+
@@ -82133,6 +82443,21 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + + + + + + + @@ -87086,6 +87411,9 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + @@ -88314,6 +88642,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ @@ -89312,6 +89641,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ @@ -89399,6 +89729,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+
@@ -89637,6 +89968,53 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + + + + + + + + + + + + + + + + + +

+ ⟹ bedeutet für jedes Ereignis auch eine Kategorisierung zu erfassen, so daß dann in der Auswertung später proportionale Anteile beobachtbar werden +

+ + +
+ + + + + + + + + + + + + + + + @@ -90423,9 +90801,18 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + @@ -90519,6 +90906,10 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + +