From 90ab20be61403c4f03cb71c919b1ea5fa609c534 Mon Sep 17 00:00:00 2001 From: Ichthyostega Date: Thu, 21 Dec 2023 23:26:01 +0100 Subject: [PATCH] Scheduler-test: press harder with long and massive graph ...observing multiple failures, which seem to be interconnected - the test-setup with the planning chunk pre-roll is insufficient - basically it is not possible to perform further concurrent planning, without getting behind the actual schedule; at least in the setup with DUMP print statements (which slowdown everything) - muliple chained re-entrant calls into the planning function can result - the **ASSERTION in the Allocator** was triggered again - the log+stacktrace indicate that there **is still a Gap** in the logic to protect the allocations via Grooming-Token --- tests/vault/gear/scheduler-stress-test.cpp | 8 +- tests/vault/gear/test-chain-load.hpp | 2 +- wiki/thinkPad.ichthyo.mm | 932 ++++++++++++++++++--- 3 files changed, 819 insertions(+), 123 deletions(-) diff --git a/tests/vault/gear/scheduler-stress-test.cpp b/tests/vault/gear/scheduler-stress-test.cpp index ef0dd6a50..08487792e 100644 --- a/tests/vault/gear/scheduler-stress-test.cpp +++ b/tests/vault/gear/scheduler-stress-test.cpp @@ -84,10 +84,11 @@ namespace test { smokeTest() { MARK_TEST_FUN - TestChainLoad testLoad{128}; + TestChainLoad testLoad{512}; testLoad.configureShape_chain_loadBursts() .buildToplolgy() - .printTopologyDOT(); +// .printTopologyDOT() + ; auto stats = testLoad.computeGraphStatistics(); cout << _Fmt{"Test-Load: Nodes: %d Levels: %d ∅Node/Level: %3.1f Forks: %d Joins: %d"} @@ -120,7 +121,8 @@ namespace test { double performanceTime = testLoad.setupSchedule(scheduler) .withLoadTimeBase(LOAD_BASE) - .withJobDeadline(50ms) + .withJobDeadline(100ms) + .withChunkSize(20) .launch_and_wait(); cout << "runTime(Scheduler): "< - - - +

..zum einen der Namespace: das führt dann zu using-Klauseln an vielen Stellen @@ -57608,43 +57606,34 @@ ...außerdem die Sichtbarkeit: solche Definitionen erzeugen einen Sog

- -
+
- - - +

...und zwar, weil for all practical purposes entweder igendwo iteriert wird (iter-adapter sind includiert) oder eine Format-Operation stattfindet oder zumindest eines der elementaren Metaprogrammnig-Hilfsmittel indirekt zum Einsatz kommt. D.h. die Wahrscheinlichkeit, daß lib/meta/util.hpp »zufällig schon« includiert wird, ist hoch, und damit kann dieser Sündenfall unter dem Radar fliegen

- -
+
- - - +

solange es nur limitiert genutzt wird, ist mir das lieber, als einen zentralen Header zu schaffen, der dann überall includiert wird und Begehrlichkeiten wecken könnte

- -
+
- - - +

lb(1GiB) ≡ 30 @@ -57653,16 +57642,13 @@ wrap-around droht

- -
+ - - - +
  • @@ -57676,8 +57662,7 @@
- -
+
@@ -105002,7 +104987,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -105012,9 +104997,9 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + - + @@ -105041,6 +105026,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200

+ @@ -105409,7 +105395,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -105422,10 +105408,20 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + + + + + + + + + + + @@ -105774,7 +105770,17 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + + + + +

+ das eigentliche Problem ist, daß der Planer sein eigenes Schedule überfährt +

+ +
+ + @@ -105813,6 +105819,13 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + +
@@ -105870,8 +105883,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -106468,7 +106481,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -106519,9 +106532,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

das ist aber keine dramatische Verschlechterung, vielmehr nur sichtbar, wenn man genau nachrechnet @@ -106543,16 +106554,14 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + - + - - - +

... dispose(i=66,lev:25) -> @25000 @@ -106685,22 +106694,20 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + + - - - +

extent-family.hpp line 105

- -
+
@@ -106711,22 +106718,17 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

extent-family.hpp l.201

- -
+
- - - +

          if (not canAccomodate (cnt)) @@ -106750,8 +106752,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
              extents_.resize (oldSiz + addSiz);

- -
+
@@ -106770,21 +106771,16 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

block-flow.hpp, line 489

- -
+ - - - +

              if (lastEpoch().deadline() < deadline) @@ -106820,8 +106816,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
                  alloc_.openNew(requiredNew);

- -
+
@@ -106846,16 +106841,13 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

Fazit: es ist ein einziger Call — der verlangt Unmögiches vom Memory-Manager

- -
+
@@ -106874,59 +106866,47 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

...es geht hier um Performance-Messungen, die allerdings auch schon im Debug-Build aussagekräftig sein sollen (den Release-Build betrachte ich dann als zusätzlichen Bonus); deshalb habe ich auf viele Sicherheitsmaßnamen und Diagnose-Hilfsmittel verzichtet, die ich normalerweise in Tests einsetze

- -
+
- - - +

nun schon mehrfach aufgefallen: die Allokationen passieren schon vor der Verarbeitung

- -
+ - - - +

ist gut, und setzt auch eine Begrenzung durch, die den Allokator vor Überforderung schützen soll, kommt aber leider zu spät für den Allokator, der ist dann schon tot

- -
+
- - - +

...hab ich in den letzten Tagen aufgeklärt und gefixt: das Grooming-Token wurde im Scheduler gedroppt, wird aber im Planungs-Job gebraucht, denn die Allokation erfolgt dort (also früher als gedacht)

- -
+
@@ -106949,9 +106929,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

commit 892099412cdf171d9a7f960c4a5e2d2b063bd5fc @@ -107008,9 +106986,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

der sanityCheck prüft also vornehmlich daß es eine Deadline gibt @@ -107022,9 +106998,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

...dazu müßte nämlich der BlockFlow viel dynamischer seine Config auswerten — lästiges Thema — YAGNI @@ -107040,9 +107014,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

wähle hier 8 GiB für die totale Allokation @@ -107056,9 +107028,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

begrenze hier konsistent mit dem Scheduler auf +3000 neue Blöcke pro Schritt @@ -107068,9 +107038,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

weil das Limit im BlockFlow-Allokator nur greift, wenn wir tatsächlich  auf Minimal-Blockggröße unten sind, während der Scheduler eine feste Obergrenze für die Deadlines erzwingt. @@ -107089,9 +107057,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

und ist jetzt sogar schenller als der single-threaded Fall @@ -107106,7 +107072,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -107124,6 +107090,693 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + +
+ + + + + + + + + + +

+ die veranschlagte Zeit ist viel zu kurz +

+ +
+
+ + + + + + + + + + + + + + + + + + + +

+ Die Grundannahme war, daß die Planung in einem sicheren Environment erfolgt und nicht korrupt ist. Das gilt für die Lumiera-Engine insgesamt. Deshalb wird auf spezielle Härtung bei der Planung verzichtet (zusätzliche Konsistenz-Checks, Synchronisation, Commits/Transaktionen) +

+ +
+
+
+ + + + + + + + + + + + + + + + + + +

+           Extent& +

+

+           access() +

+

+             { +

+

+               auto* rawStorage = this->get(); +

+

+               ENSURE (rawStorage != nullptr);  +

+

+               return static_cast<Extent&> (rawStorage->array()); +

+

+             } +

+ +
+
+ + + + + + +

+ vault::mem::ExtentFamily<vault::gear::Activity, 500ul>::access(unsigned long) const+0xdd +

+

+ +

+

+ vault::mem::ExtentFamily<vault::gear::Activity, 500ul>::IdxLink::yield() const+0x26 +

+

+ +

+

+ vault::gear::BlockFlow<vault::gear::blockFlow::RenderConfig>::StorageAdaptor::yield() const+0x18 +

+

+ +

+

+ lib::IterableDecorator<vault::gear::blockFlow::Epoch<vault::mem::ExtentFamily<vault::gear::Activity, 500ul> >, vault::gear::BlockFlow<vault::gear::blockFlow::RenderConfig>::StorageAdaptor>::operator->() const+0x20 +

+

+ +

+

+ vault::gear::BlockFlow<vault::gear::blockFlow::RenderConfig>::AllocatorHandle::claimSlot()+0x2b +

+

+ +

+

+ vault::gear::Activity& vault::gear::BlockFlow<vault::gear::blockFlow::RenderConfig>::AllocatorHandle::create<vault::gear::Activity::Verb>(vault::gear::Activity::Verb&&)+0x2e +

+

+ +

+

+ vault::gear::activity::Term::appendNotificationTo(vault::gear::activity::Term&)+0x2a +

+

+ +

+

+ vault::gear::ScheduleSpec::linkToSuccessor(vault::gear::ScheduleSpec&)+0x43 +

+

+ +

+

+ vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::setDependency(vault::gear::test::TestChainLoad<16ul>::Node*, vault::gear::test::TestChainLoad<16ul>::Node*)+0xad +

+

+ +

+

+ auto vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::ScheduleCtx(vault::gear::test::TestChainLoad<16ul>&, vault::gear::Scheduler&)::{lambda(auto:1*, auto:2*)#2}::operator()<vault::gear::test::TestChainLoad<16ul>::Node, {lambda(auto:1*, auto:2*)#2}::operator()>(vault::gear::test::TestChainLoad<16ul>::Node*, {lambda(auto:1*, auto:2*)#2}::operator()*) const+0x2e +

+

+ +

+

+ std::_Function_handler<void (vault::gear::test::TestChainLoad<16ul>::Node*, vault::gear::test::TestChainLoad<16ul>::Node*), vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::ScheduleCtx(vault::gear::test::TestChainLoad<16ul>&, vault::gear::Scheduler&)::{lambda(auto:1*, auto:2*)#2}>::_M_invoke(std::_Any_data const&, vault::gear::test::TestChainLoad<16ul>::Node*&&, std::_Any_data const&)+0x52 +

+

+ +

+

+ std::function<void (vault::gear::test::TestChainLoad<16ul>::Node*, vault::gear::test::TestChainLoad<16ul>::Node*)>::operator()(vault::gear::test::TestChainLoad<16ul>::Node*, vault::gear::test::TestChainLoad<16ul>::Node*) const+0x61 +

+

+ +

+

+ vault::gear::test::RandomChainPlanFunctor<16ul>::invokeJobOperation(lumiera_jobParameter_struct const&)+0x2fc +

+

+ +

+

+ vault::gear::Activity::invokeFunktor(lib::time::Time)+0x740 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::Activity::activate<vault::gear::Scheduler::ExecutionCtx>(lib::time::Time, vault::gear::Scheduler::ExecutionCtx&)+0x70 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::ActivityLang::activateChain<vault::gear::Scheduler::ExecutionCtx>(vault::gear::Activity*, vault::gear::Scheduler::ExecutionCtx&)+0x4e +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::ActivityLang::dispatchChain<vault::gear::Scheduler::ExecutionCtx>(vault::gear::Activity*, vault::gear::Scheduler::ExecutionCtx&)+0x68 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::SchedulerCommutator::postDispatch<vault::gear::Scheduler::ExecutionCtx>(vault::gear::ActivationEvent, vault::gear::Scheduler::ExecutionCtx&, vault::gear::SchedulerInvocation&)+0xc1 +

+

+ +

+

+ vault::gear::Scheduler::postChain(vault::gear::ActivationEvent)+0x2a9 +

+

+ +

+

+ vault::gear::Scheduler::continueMetaJob(lib::time::Time, vault::gear::Job, vault::gear::ManifestationID)+0x164 +

+

+ +

+

+ vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::continuation(unsigned long, unsigned long, bool)+0x21a +

+

+ +

+

+ vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::ScheduleCtx(vault::gear::test::TestChainLoad<16ul>&, vault::gear::Scheduler&)::{lambda(unsigned long, unsigned long, bool)#3}::operator()(unsigned long, unsigned long, bool) const+0x34 +

+

+ +

+

+ std::_Function_handler<void (unsigned long, unsigned long, bool), vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::ScheduleCtx(vault::gear::test::TestChainLoad<16ul>&, vault::gear::Scheduler&)::{lambda(unsigned long, unsigned long, bool)#3}>::_M_invoke(std::_Any_data const&, unsigned long&&, std::_Any_data const&, bool&&)+0x6e +

+

+ +

+

+ std::function<void (unsigned long, unsigned long, bool)>::operator()(unsigned long, unsigned long, bool) const+0x77 +

+

+ +

+

+ vault::gear::test::RandomChainPlanFunctor<16ul>::invokeJobOperation(lumiera_jobParameter_struct const&)+0x463 +

+

+ +

+

+ vault::gear::Activity::invokeFunktor(lib::time::Time)+0x740 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::Activity::activate<vault::gear::Scheduler::ExecutionCtx>(lib::time::Time, vault::gear::Scheduler::ExecutionCtx&)+0x70 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::ActivityLang::activateChain<vault::gear::Scheduler::ExecutionCtx>(vault::gear::Activity*, vault::gear::Scheduler::ExecutionCtx&)+0x4e +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::ActivityLang::dispatchChain<vault::gear::Scheduler::ExecutionCtx>(vault::gear::Activity*, vault::gear::Scheduler::ExecutionCtx&)+0x68 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::SchedulerCommutator::postDispatch<vault::gear::Scheduler::ExecutionCtx>(vault::gear::ActivationEvent, vault::gear::Scheduler::ExecutionCtx&, vault::gear::SchedulerInvocation&)+0xc1 +

+

+ +

+

+ vault::gear::Scheduler::postChain(vault::gear::ActivationEvent)+0x2a9 +

+

+ +

+

+ vault::gear::Scheduler::continueMetaJob(lib::time::Time, vault::gear::Job, vault::gear::ManifestationID)+0x164 +

+

+ +

+

+ vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::continuation(unsigned long, unsigned long, bool)+0x21a +

+

+ +

+

+ vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::ScheduleCtx(vault::gear::test::TestChainLoad<16ul>&, vault::gear::Scheduler&)::{lambda(unsigned long, unsigned long, bool)#3}::operator()(unsigned long, unsigned long, bool) const+0x34 +

+

+ +

+

+ std::_Function_handler<void (unsigned long, unsigned long, bool), vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::ScheduleCtx(vault::gear::test::TestChainLoad<16ul>&, vault::gear::Scheduler&)::{lambda(unsigned long, unsigned long, bool)#3}>::_M_invoke(std::_Any_data const&, unsigned long&&, std::_Any_data const&, bool&&)+0x6e +

+

+ +

+

+ std::function<void (unsigned long, unsigned long, bool)>::operator()(unsigned long, unsigned long, bool) const+0x77 +

+

+ +

+

+ vault::gear::test::RandomChainPlanFunctor<16ul>::invokeJobOperation(lumiera_jobParameter_struct const&)+0x463 +

+

+ +

+

+ vault::gear::Activity::invokeFunktor(lib::time::Time)+0x740 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::Activity::activate<vault::gear::Scheduler::ExecutionCtx>(lib::time::Time, vault::gear::Scheduler::ExecutionCtx&)+0x70 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::ActivityLang::activateChain<vault::gear::Scheduler::ExecutionCtx>(vault::gear::Activity*, vault::gear::Scheduler::ExecutionCtx&)+0x4e +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::ActivityLang::dispatchChain<vault::gear::Scheduler::ExecutionCtx>(vault::gear::Activity*, vault::gear::Scheduler::ExecutionCtx&)+0x68 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::SchedulerCommutator::postDispatch<vault::gear::Scheduler::ExecutionCtx>(vault::gear::ActivationEvent, vault::gear::Scheduler::ExecutionCtx&, vault::gear::SchedulerInvocation&)+0xc1 +

+

+ +

+

+ vault::gear::Scheduler::postChain(vault::gear::ActivationEvent)+0x2a9 +

+

+ +

+

+ vault::gear::Scheduler::continueMetaJob(lib::time::Time, vault::gear::Job, vault::gear::ManifestationID)+0x164 +

+

+ +

+

+ vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::continuation(unsigned long, unsigned long, bool)+0x21a +

+

+ +

+

+ vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::ScheduleCtx(vault::gear::test::TestChainLoad<16ul>&, vault::gear::Scheduler&)::{lambda(unsigned long, unsigned long, bool)#3}::operator()(unsigned long, unsigned long, bool) const+0x34 +

+

+ +

+

+ std::_Function_handler<void (unsigned long, unsigned long, bool), vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::ScheduleCtx(vault::gear::test::TestChainLoad<16ul>&, vault::gear::Scheduler&)::{lambda(unsigned long, unsigned long, bool)#3}>::_M_invoke(std::_Any_data const&, unsigned long&&, std::_Any_data const&, bool&&)+0x6e +

+

+ +

+

+ std::function<void (unsigned long, unsigned long, bool)>::operator()(unsigned long, unsigned long, bool) const+0x77 +

+

+ +

+

+ vault::gear::test::RandomChainPlanFunctor<16ul>::invokeJobOperation(lumiera_jobParameter_struct const&)+0x463 +

+

+ +

+

+ vault::gear::Activity::invokeFunktor(lib::time::Time)+0x740 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::Activity::activate<vault::gear::Scheduler::ExecutionCtx>(lib::time::Time, vault::gear::Scheduler::ExecutionCtx&)+0x70 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::ActivityLang::activateChain<vault::gear::Scheduler::ExecutionCtx>(vault::gear::Activity*, vault::gear::Scheduler::ExecutionCtx&)+0x4e +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::ActivityLang::dispatchChain<vault::gear::Scheduler::ExecutionCtx>(vault::gear::Activity*, vault::gear::Scheduler::ExecutionCtx&)+0x68 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::SchedulerCommutator::postDispatch<vault::gear::Scheduler::ExecutionCtx>(vault::gear::ActivationEvent, vault::gear::Scheduler::ExecutionCtx&, vault::gear::SchedulerInvocation&)+0xc1 +

+

+ +

+

+ vault::gear::Scheduler::postChain(vault::gear::ActivationEvent)+0x2a9 +

+

+ +

+

+ vault::gear::Scheduler::continueMetaJob(lib::time::Time, vault::gear::Job, vault::gear::ManifestationID)+0x164 +

+

+ +

+

+ vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::continuation(unsigned long, unsigned long, bool)+0x21a +

+

+ +

+

+ vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::ScheduleCtx(vault::gear::test::TestChainLoad<16ul>&, vault::gear::Scheduler&)::{lambda(unsigned long, unsigned long, bool)#3}::operator()(unsigned long, unsigned long, bool) const+0x34 +

+

+ +

+

+ std::_Function_handler<void (unsigned long, unsigned long, bool), vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::ScheduleCtx(vault::gear::test::TestChainLoad<16ul>&, vault::gear::Scheduler&)::{lambda(unsigned long, unsigned long, bool)#3}>::_M_invoke(std::_Any_data const&, unsigned long&&, std::_Any_data const&, bool&&)+0x6e +

+

+ +

+

+ std::function<void (unsigned long, unsigned long, bool)>::operator()(unsigned long, unsigned long, bool) const+0x77 +

+

+ +

+

+ vault::gear::test::RandomChainPlanFunctor<16ul>::invokeJobOperation(lumiera_jobParameter_struct const&)+0x463 +

+

+ +

+

+ vault::gear::Activity::invokeFunktor(lib::time::Time)+0x740 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::Activity::activate<vault::gear::Scheduler::ExecutionCtx>(lib::time::Time, vault::gear::Scheduler::ExecutionCtx&)+0x70 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::ActivityLang::activateChain<vault::gear::Scheduler::ExecutionCtx>(vault::gear::Activity*, vault::gear::Scheduler::ExecutionCtx&)+0x4e +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::ActivityLang::dispatchChain<vault::gear::Scheduler::ExecutionCtx>(vault::gear::Activity*, vault::gear::Scheduler::ExecutionCtx&)+0x68 +

+

+ +

+

+ vault::gear::activity::Proc vault::gear::SchedulerCommutator::postDispatch<vault::gear::Scheduler::ExecutionCtx>(vault::gear::ActivationEvent, vault::gear::Scheduler::ExecutionCtx&, vault::gear::SchedulerInvocation&)+0xc1 +

+

+ +

+

+ vault::gear::Scheduler::getWork()::{lambda()#2}::operator()() const+0x3ce +

+

+ +

+

+ vault::gear::Scheduler::WorkerInstruction vault::gear::Scheduler::WorkerInstruction::performStep<vault::gear::Scheduler::getWork()::{lambda()#2}>(vault::gear::Scheduler::getWork()::{lambda()#2})+0x26 +

+

+ +

+

+ vault::gear::Scheduler::getWork()+0x3d +

+

+ +

+

+ vault::gear::Scheduler::Setup::doWork()+0x1c +

+

+ +

+

+ vault::gear::work::Worker<vault::gear::Scheduler::Setup>::pullWork()+0x26 +

+

+ +

+ +
+ + + + + + + + + + + +

+ und diese re-entrance 4-Mal wiederholt +

+ +
+
+ + + + + + + + + +

+                 |n.(412,lev:72) +

+

+ ... dispose(i=412,lev:72) -> @72000 +

+

+ ‖•△•‖     wof:8 HT:154587 +

+

+ ‖SCH‖ 0F: @154927 ○ start=72000 dead:100000 +

+

+ ..!   FD: @154922  ⤴       (i=380) +

+

+    ·‖ 74: @154936 HT:154587  -> ∘ +

+

+ ‖PST‖ FD: @154954 ◒ start=68000▹▹69000 dead:100000 +

+

+    ·‖ 74: @154958 HT:307445672727859657  -> ▶ 154587 +

+

+ ‖▷▷▷‖ 74: @ 154976 EMPTY +

+

+ !◆!   0F: @154940  ⚙  calc(i=412, lev:72) +

+

+ +

+

+ ‖PST‖ FD: @154987 ◒ start=68000▹▹69000 dead:100000 +

+

+ ..!   BF: @154998  ⤴       (i=381) +

+

+ ‖PST‖ FD: @155018 ◒ start=68000▹▹69000 dead:100000 +

+

+ ‖PST‖ BF: @155029 ◒ start=68000▹▹69000 dead:100000 +

+

+ ‖PST‖ FD: @155033 ◒ start=68000▹▹69000 dead:100000 +

+

+ ‖PST‖ BF: @155047 ◒ start=68000▹▹69000 dead:100000 +

+

+    ·‖ FD: @155056 HT:69000  -> ∘ +

+

+ ‖PST‖ BF: @155063 ◒ start=68000▹▹69000 dead:100000 +

+

+    ·‖ FD: @155071 HT:69000  -> ∘ +

+

+ ‖PST‖ BF: @155079 ◒ start=68000▹▹69000 dead:100000 +

+

+    ·‖ FD: @155087 HT:69000  -> ∘ +

+

+    ·‖ FD: @155108 HT:69000  -> ∘ +

+

+    ·‖ BF: @155114 HT:69000  -> ∘ +

+

+    ·‖ 74: @155076 HT:69000  -> ▶ 69000 +

+

+    ·‖ FD: @155131 HT:69000  -> ∘ +

+

+    ·‖ BF: @155134 HT:69000  -> ∘ +

+

+    ·‖ BF: @155178 HT:69000  -> ▶ 69000 +

+

+ +

+

+ !◆!   74: @155162  ⚙  calc(i=383, lev:69) +

+

+ +

+

+ !◆!   BF: @155194  ⚙  calc(i=384, lev:69) +

+

+    ·‖ FD: @155354 HT:69000  -> ▶ 69000 +

+

+ +

+

+ !◆!   FD: @155371  ⚙  calc(i=387, lev:69) +

+

+ ..!   0F: @155533  ⤴       (i=412) +

+

+ 0000000615: PRECONDITION: extent-family.hpp:321: thread_4: access: (isValidPos (idx)) +

+ +
+ + + + + + + + + + + + + + + + +

+ Das ist ein Problem mit der Steuerung des Grooming-Tokens +

+ +
+ + +
+
+
@@ -107179,6 +107832,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+
@@ -107572,6 +108226,48 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + + + + + + + +

+ des Weiteren: wir schützen den Planer durch externe Maßnahmen +

+ +
+
+ + + + + + + + + + + + + +

+ ⟹ Forderung: ein echer Planer muß das erkennen und abbrechen +

+ +
+
+
+
@@ -113535,9 +114231,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

the corresponding unsigned type has the same rank