diff --git a/src/vault/gear/work-force.hpp b/src/vault/gear/work-force.hpp index e708dd063..523c759cc 100644 --- a/src/vault/gear/work-force.hpp +++ b/src/vault/gear/work-force.hpp @@ -63,10 +63,24 @@ namespace gear { short nothing_; public: + template explicit - WorkForce (char const& b) - : nothing_(b) + WorkForce (FUN&& fun) + : nothing_{42} { } + + + void + activate (double degree =1.0) + { + UNIMPLEMENTED ("scale up"); + } + + void + deactivate() + { + UNIMPLEMENTED ("scale down to halt"); + } }; diff --git a/tests/32scheduler.tests b/tests/32scheduler.tests index 97a900a22..5d1286fd4 100644 --- a/tests/32scheduler.tests +++ b/tests/32scheduler.tests @@ -8,7 +8,7 @@ END -Test "BlockFlow memory management scheme" BlockFlow_test < + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of + the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +* *****************************************************/ + +/** @file work-force-test.cpp + ** unit test \ref WorkForce_test + */ + + +#include "lib/test/run.hpp" +#include "vault/gear/work-force.hpp" +//#include "lib/time/timevalue.hpp" +//#include "lib/format-cout.hpp" +//#include "lib/util.hpp" + +//#include +//#include +#include + +using test::Test; +//using std::move; +//using util::isSameObject; + + +namespace vault{ +namespace gear { +namespace test { + + using std::this_thread::sleep_for; + using namespace std::chrono_literals; +// using lib::time::FrameRate; +// using lib::time::Offset; +// using lib::time::Time; + + + + + + /*************************************************************************//** + * @test WorkForce-Service: maintain a pool of active worker threads. + * @see SchedulerUsage_test + */ + class WorkForce_test : public Test + { + + virtual void + run (Arg) + { + simpleUsage(); + walkingDeadline(); + setupLalup(); + } + + + /** @test demonstrate simple worker pool usage + */ + void + simpleUsage() + { + uint check{0}; + + WorkForce wof{[&]{ ++check; return true; }}; + + CHECK (0 == check); + + wof.activate(); + sleep_for(20ms); + + CHECK (0 < check); + } + + + + /** @test TODO + */ + void + walkingDeadline() + { + } + + + + /** @test TODO + */ + void + setupLalup() + { + } + }; + + + /** Register this test class... */ + LAUNCHER (WorkForce_test, "unit engine"); + + + +}}} // namespace vault::mem::test diff --git a/wiki/renderengine.html b/wiki/renderengine.html index 72901cd16..9ddc34d8b 100644 --- a/wiki/renderengine.html +++ b/wiki/renderengine.html @@ -7245,6 +7245,19 @@ The primary scaling effects exploited to achieve this level of performance are t The way other parts of the system are built, requires us to obtain a guaranteed knowledge of some job's termination. It is possible to obtain that knowledge with some limited delay, but it nees to be absoultely reliable (violations leading to segfault). The requirements stated above assume this can be achieved through //jobs with guaranteed execution.// Alternatively we could consider installing specific callbacks -- in this case the scheduler itself has to guarantee the invocation of these callbacks, even if the corresponding job fails or is never invoked. It doesn't seem there is any other option. +
+
The Scheduler //maintains a ''Work Force'' (a pool of workers) to perform the next [[render activities|RenderActivity]] continuously.//
+Each worker runs in a dedicated thread; the Activities are arranged in a way to avoid blocking those worker threads
+* IO operations are performed asynchronously {{red{planned as of 9/23}}}
+* the actual calculation job is started only when all prerequisites are available
+!Workload and invocation scheme
+Using a pool of workers to perform small isolated steps of work atomically and in parallel is an well established pattern in high performance computing. However, the workload for rendering media is known to have some distinctive traits, calling for a slightly different approach compared with an operation system scheduler or a load balancer. Notably, the demand for resources is high, often using „whatever it takes“ -- driving the system into load saturation. The individual chunks of work, which can be computed independently, are comparatively large, and must often be computed in a constrained order. For real-time performance, it is desirable to compute data as late as possible, to avoid blocking memory with computed results. And for the final quality render, for the same reason it is advisable to proceed in data dependency order to keep as much data as possible in memory and avoid writing temporary files.
+
+This leads to a situation where it is more adequate to //distribute the scarce computation resources// to the tasks //sequenced in temporary and dependence order//. The computation tasks must be prepared and ordered -- but beyond that, there is not much that can be »managed« with a computation task. For this reason, the Scheduler in the Lumiera Render Engine uses a pool of workers, each providing one unit of computation resource (a »core«), and these workers will ''pull work'' actively, rather then distributing, queuing and dispatching tasks to a passive set of workers.
+
+Moreover, the actual computation tasks, which can be parallelised, are at least by an order of magnitude more expensive than any administrative work for sorting tasks, checking dependencies and maintaining process state. This leads to a scheme where a worker first performs some »management work«, until encountering the next actual computation job, at which point the worker leaves the //management mode// and transitions into //concurrent work mode//. All workers are expected to be in work mode almost entirely most of the time, and thus we can expect not much contention between workers performing »management work« -- allowing to confine this management work to //single threaded operation,// thereby drastically reducing the complexity of management data structures and memory allocation.
+
+
A link to relate a compound of [[nested placement scopes|PlacementScope]] to the //current// session and the //current//&nbsp; [[focus for querying|QueryFocus]] and exploring the structure. ScopeLocator is a singleton service, allowing to ''explore'' a [[Placement]] as a scope, i.e. discover any other placements within this scope, and allowing to locate the position of this scope by navigating up the ScopePath finally to reach the root scope of the HighLevelModel.
 
diff --git a/wiki/thinkPad.ichthyo.mm b/wiki/thinkPad.ichthyo.mm
index 8dd51823b..9fcc8b18a 100644
--- a/wiki/thinkPad.ichthyo.mm
+++ b/wiki/thinkPad.ichthyo.mm
@@ -79314,10 +79314,15 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + + + + + + @@ -79440,11 +79445,188 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ ↯ eine Standard-Lösung impliziert auch ein Ausführungs-Framework +

+ +
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ die Limitierungen und der Threadpool waren gut gemeint +

+ +
+ +
+ + + + + + +

+ ...wenn es sich ohnehin nur um eine Einrichtung innerhalb einer bestimmten Applikation handelt, dann bleibt lediglich die Frage übrig, wohin der Quell/Objektcode gepackt wird +

+ +
+
+ + + + +

+ Da Library-Funktionen ohne Weiteres genutzt werden, zwingt das dazu, statische Instanzen und versteckte magische Initialisierung zu verwenden +

+ +
+
+ + + + +

+ Sprachmittel sind stets aus gutem Grund da. Entweder es gibt aktuell adäquaten Nutzen, oder es gab früher mal einen relevanten Nutzen. Daher stellt es eine Anmaßung dar, wenn jemand nun einfach rundheraus behauptet, es sei falsch dieses Sprachmittel zu verwenden. Wenn man einen Anfänger ohne ausreichende Kompetenzen entlasten möchte, oder wenn man unter Einschränkung ein bestimmtes Nutmuster vorgeben möchte, dann baut man ein Framework, und keine Library. +

+ +
+
+
+ + + + +

+ in allen sonstigen Eigenschaften hatte unsere Library-Lösung genau das Design gewählt, +

+

+ das sich nun auch im Standard als angemessen herausgebildet hat — +

+

+ aber die Standard-Lösung ist ausgereifter und verwendet moderne Sprachmittel +

+ +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -80574,6 +80756,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ @@ -83367,6 +83550,17 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + + +
@@ -83396,7 +83590,12 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + + + + + + @@ -83430,6 +83629,51 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +