diff --git a/src/vault/gear/activity.hpp b/src/vault/gear/activity.hpp index 31d10d536..7201404ca 100644 --- a/src/vault/gear/activity.hpp +++ b/src/vault/gear/activity.hpp @@ -524,13 +524,6 @@ namespace gear { return activity::PASS; } - template - activity::Proc - dispatchNotify (Time now, EXE& executionCtx) - { - return executionCtx.post (now, this, executionCtx); - } - template activity::Proc notifyTarget (Time now, EXE& executionCtx) @@ -649,7 +642,7 @@ namespace gear { case WORKSTOP: return signalStop (now, executionCtx); case NOTIFY: - return dispatchNotify (now, executionCtx); + return dispatch (now, executionCtx); //▷ special processing for the Notification case GATE: return checkGate (now, executionCtx); case POST: @@ -678,9 +671,7 @@ namespace gear { * @note special twist for the `NOTIFY`-Activity: it is not _activated_ * itself, rather the #notify operation is invoked on its target argument; * this is necessary since a notification passes control-flow outside - * the regular linear `next`-chain; when a `NOTIFY` is _activated,_ - * it will `post()` itself to acquire the `GroomingToken` and then - * invoke this dispatch() function to pass the notification + * the regular linear `next`-chain. */ template activity::Proc @@ -690,12 +681,7 @@ namespace gear { switch (verb_) { case NOTIFY: - { - auto res = notifyTarget (now, executionCtx); - if (activity::PASS == res) - res=activity::SKIP; // prevent activation of NOTIFY.next - return res; - } + return notifyTarget (now, executionCtx); case POST: case FEED: // signal just to proceed with next... return activity::PASS; @@ -711,6 +697,9 @@ namespace gear { * a notification is passed to a `GATE`-Activity, the embedded counter is * decremented; after all prerequisites are „checked off“ this way, the * Activity-chain behind the Gate is activated. + * @note this function is invoked from the context of the source, and + * thus any follow-up actions beyond that scope are re-POSTed, + * after possibly performing the GATE-check. */ template activity::Proc diff --git a/tests/vault/gear/scheduler-service-test.cpp b/tests/vault/gear/scheduler-service-test.cpp index 57bd6dcab..9960c4e4c 100644 --- a/tests/vault/gear/scheduler-service-test.cpp +++ b/tests/vault/gear/scheduler-service-test.cpp @@ -552,6 +552,7 @@ namespace test { processSchedule() { MARK_TEST_FUN + auto LOAD_BASE = 200us; TestChainLoad testLoad{64}; // .configureShape_short_segments3_interleaved() @@ -565,13 +566,13 @@ namespace test { // node hashes were computed, observing dependencies size_t expectedHash = testLoad.getHash(); - testLoad.performGraphSynchronously(); + testLoad.performGraphSynchronously(LOAD_BASE); CHECK (testLoad.getHash() == expectedHash); testLoad.printTopologyDOT() .printTopologyStatistics() ; - double referenceTime = testLoad.calcRuntimeReference(); + double referenceTime = testLoad.calcRuntimeReference(LOAD_BASE); SHOW_EXPR(referenceTime) BlockFlowAlloc bFlow; @@ -579,6 +580,7 @@ SHOW_EXPR(referenceTime) Scheduler scheduler{bFlow, watch}; testLoad.setupSchedule(scheduler) + .withLoadTimeBase(LOAD_BASE) .launch_and_wait(); // invocation through Scheduler has reproduced all node hashes diff --git a/tests/vault/gear/test-chain-load.hpp b/tests/vault/gear/test-chain-load.hpp index b07c5a9d6..56f2fcc22 100644 --- a/tests/vault/gear/test-chain-load.hpp +++ b/tests/vault/gear/test-chain-load.hpp @@ -1658,12 +1658,14 @@ namespace test { withLevelDuration (microseconds plannedTime_per_level) { levelSpeed_ = FrameRate{1, Duration{_uTicks(plannedTime_per_level)}}; + return move(*this); } ScheduleCtx&& withLoadFactor (uint factor_on_levelSpeed) { blockLoadFactor_ = factor_on_levelSpeed; + return move(*this); } ScheduleCtx&& @@ -1671,36 +1673,42 @@ namespace test { { chunkSize_ = nodes_per_chunk; preRoll_ = guessPlanningPreroll (chunkSize_); + return move(*this); } ScheduleCtx&& withPreRoll (microseconds planning_headstart) { preRoll_ = planning_headstart; + return move(*this); } ScheduleCtx&& withJobDeadline (microseconds deadline_after_start) { deadline_ = deadline_after_start; + return move(*this); } ScheduleCtx&& withManifestation (ManifestationID manID) { manID_ = manID; + return move(*this); } ScheduleCtx&& withLoadTimeBase (microseconds timeBase =LOAD_DEFAULT_TIME) { compuLoad_->timeBase = timeBase; + return move(*this); } ScheduleCtx&& deactivateLoad() { compuLoad_->timeBase = 0us; + return move(*this); } ScheduleCtx&& @@ -1716,6 +1724,7 @@ namespace test { compuLoad_->sizeBase = sizeBase; compuLoad_->useAllocation =true; } + return move(*this); } private: diff --git a/wiki/thinkPad.ichthyo.mm b/wiki/thinkPad.ichthyo.mm index 82ba4b558..a0af7fbb6 100644 --- a/wiki/thinkPad.ichthyo.mm +++ b/wiki/thinkPad.ichthyo.mm @@ -80982,7 +80982,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -81034,6 +81034,19 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + + + + +
@@ -81208,6 +81221,105 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + + + + + + + + + + +

+ da am Beginn der Kette ohnehin ein Ctx.post vorangegangen ist +

+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ bestimme: dies soll jetzt geschehen +

+ +
+
+ + + + +

+ prüfe das Fenster +

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -85349,7 +85461,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -85553,7 +85665,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -85585,6 +85697,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+
@@ -95935,6 +96048,9 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + +
@@ -96364,7 +96480,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -102970,6 +103086,294 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ 164.458 / 1.016*100µs - 1 +

+ + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ in einem Fall sehe ich das sogar in einer Zeile als "HT" (head time) +

+ + +
+
+ + + + + + + + + + + + + + + + +

+ man sieht sofort wo das Problem liegt: die Deadlines +

+ + +
+ + + + + + + + + + + +

+ nebenbei bemerkt: jeder Dispatch geht auch nochmal durch ctx.post +

+ + +
+ + + + + + + + + + + + + + + + + + +

+ ...und zwar liegt die Wurzel in der Offenheit der Activity-Language; ich wollte (und will) diese nicht auf eine Implementierungs-Logik des Schedulers reduzieren; dadurch sind Redundanzen entstanden, und aus logischen Gründen müßte eingentlich das Zeitfenster [start,dead] vom initialen POST am Anfang der Kette gelten, zumindes »sinngemäß« +

+ + +
+
+ + + + + + + + + + + + + + + +

+ wenn überhaupt, sollte die Deadline des Target gelten +

+ + +
+
+ + + + + + +

+ Deadlines betreffen die Aktivierung. Es ist die Aufgabe des Job-Planning, das per Verkettung zurückzuführen auf die gewünschte Ankunftszeit. Die Activity-Language könnte das gar nicht tun, denn ihr fehlt dazu die Information über Erfahrungswerte die Ausführungszeit betreffend +

+ + +
+
+
+ + + + + + + + + + + + + + +

+ Der Code ist wirklich performance-kritisch, und bis jetzt hab ich richtig gute Werte erziehlt, durch genau diese Art Maßnahmen. +

+ + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ ich weiß nicht, wie gut die CPU-Lasterzeugung funktioniert; Wohl möglich, daß Cache-Effekte die tatsächliche Zeit in de Höhe treiben +

+ + +
+
+ + + + + +
+
+
+
+
+
@@ -103180,6 +103584,24 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + +

+ tatsächlich ... wenn die Datenfelder base values sind +

+ +
+ +
+ + + +
@@ -103340,6 +103762,118 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ Tracking-Ticket #1228 Implement Vertical Slice: play a clip +

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -104720,24 +105254,6 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - - - - - -

- tatsächlich ... wenn die Datenfelder base values sind -

- -
- -
- - - -