diff --git a/src/vault/gear/block-flow.hpp b/src/vault/gear/block-flow.hpp index 6ee76e53b..5aee13b9b 100644 --- a/src/vault/gear/block-flow.hpp +++ b/src/vault/gear/block-flow.hpp @@ -112,7 +112,7 @@ namespace gear { namespace blockFlow {///< Parametrisation of Scheduler memory management scheme /** limit for maximum number of blocks allowed in Epoch expansion - * @note SchedulerCommutator::sanityCheck() defines a similar limit, + * @note Scheduler::sanityCheck() defines a similar limit, * but there the same reasoning is translated into a hard limit for * deadlines to be < 20sec, while this limit here will only be triggered * if the current block duration has been lowered to the OVERLOAD_LIMIT diff --git a/src/vault/gear/scheduler-commutator.hpp b/src/vault/gear/scheduler-commutator.hpp index d76718de3..2c95bf8e4 100644 --- a/src/vault/gear/scheduler-commutator.hpp +++ b/src/vault/gear/scheduler-commutator.hpp @@ -57,10 +57,10 @@ ** - silently dispose of any outdated entries ** - use the [Activity Language environment](\ref ActivityLang) to _perform_ ** the retrieved chain within some worker thread; this is called _dispatch_ - ** The central cross road of this implementation is the #postDispatch function. + ** The main entrance point into this implementation is the #postChain function. ** @see SchedulerCommutator::acquireGroomingToken() ** @see SchedulerCommutator::findWork() - ** @see SchedulerCommutator::postDispatch() + ** @see SchedulerCommutator::postChain() ** @see SchedulerCommutator_test ** @see scheduler.hpp usage ** @@ -74,6 +74,7 @@ #include "vault/common.hpp" #include "vault/gear/activity.hpp" #include "vault/gear/scheduler-invocation.hpp" +#include "vault/gear/load-controller.hpp" #include "vault/gear/activity-lang.hpp" #include "lib/time/timevalue.hpp" #include "lib/format-string.hpp" @@ -98,7 +99,6 @@ namespace gear { namespace { // Configuration / Scheduling limit - Offset FUTURE_PLANNING_LIMIT{FSecs{20}}; ///< limit timespan of deadline into the future (~360 MiB max) microseconds GROOMING_WAIT_CYCLE{70us}; ///< wait-sleep in case a thread must forcibly acquire the Grooming-Token /** convenient short-notation, also used by SchedulerService */ @@ -174,37 +174,6 @@ namespace gear { - void - sanityCheck (ActivationEvent const& event, Time now) - { - if (event.startTime() == Time::ANYTIME) - throw error::Fatal ("Attempt to schedule an Activity without valid start time"); - if (event.deathTime() == Time::NEVER) - throw error::Fatal ("Attempt to schedule an Activity without valid deadline"); - Offset toDeadline{now, event.deathTime()}; - if (toDeadline > FUTURE_PLANNING_LIMIT) - throw error::Fatal (util::_Fmt{"Attempt to schedule Activity %s " - "with a deadline by %s into the future"} - % *event.activity - % toDeadline); - } - - /** - * Decide if Activities shall be performed now and in this thread. - * @param when the indicated time of start of the first Activity - * @param now current scheduler time - * @return allow dispatch if time is due and GroomingToken was acquired - * @warning accesses current ThreadID and changes GroomingToken state. - */ - bool - decideDispatchNow (Time when, Time now) - { - return (when <= now) - and (holdsGroomingToken (thisThread()) - or acquireGoomingToken()); - } - - /** tend to the input queue if possible */ void maybeFeed (SchedulerInvocation& layer1) @@ -236,48 +205,22 @@ namespace gear { } - /***************************************************//** + /***********************************************************//** * This is the primary entrance point to the Scheduler. - * Engage into activity as controlled by given start time. - * Attempts to acquire the GroomingToken if Activity is due - * immediately, otherwise just enqueue it for prioritisation. - * @param chain the Render Activities to put into action - * @param when the indicated time of start for these - * @param executionCtx abstracted execution environment for - * Render Activities (typically backed by the - * Scheduler as a whole, including notifications + * Place the given event into the schedule, with prioritisation + * according to its start time. + * @param event the chain of Render Activities to be scheduled, + * including start time and deadline * @return Status value to indicate how to proceed processing * - activity::PASS continue processing in regular operation * - activity::WAIT nothing to do now, check back later * - activity::HALT serious problem, cease processing - * - activity::KICK to contend (spin) on GroomingToken - * @note Attempts to acquire the GroomingToken for immediate - * processing, but not for just enqueuing planned tasks. - * Never drops the GroomingToken explicitly (unless when - * switching from grooming-mode to work-mode in the course - * of processing the given Activity chain regularly). + * @note Never attempts to acquire the GroomingToken itself, + * but if current thread holds the token, the task can + * be placed directly into the scheduler queue. */ - template activity::Proc - postDispatch (ActivationEvent event - ,EXE& executionCtx - ,SchedulerInvocation& layer1 - ) - { - if (!event) return activity::KICK; - - Time now = executionCtx.getSchedTime(); - sanityCheck (event, now); - if (decideDispatchNow (event.startTime(), now)) - return ActivityLang::dispatchChain (event, executionCtx); - else - instructFollowUp (event,layer1); - return activity::PASS; - } - - activity::Proc - instructFollowUp (ActivationEvent event - ,SchedulerInvocation& layer1 ) + postChain (ActivationEvent event, SchedulerInvocation& layer1) { if (holdsGroomingToken (thisThread())) layer1.feedPrioritisation (move (event)); @@ -285,11 +228,183 @@ namespace gear { layer1.instruct (move (event)); return activity::PASS; } + + + /** + * Implementation of the worker-Functor: + * - redirect work capacity in accordance to current scheduler and load + * - dequeue and dispatch the Activity chains from the queue to perform the render jobs. + */ + template + activity::Proc + dispatchCapacity (SchedulerInvocation&, LoadController&, DISPATCH, CLOCK); + + + + private: + activity::Proc + scatteredDelay (Time now, Time head + ,LoadController& loadController + ,LoadController::Capacity capacity); + + void + ensureDroppedGroomingToken() + { + if (holdsGroomingToken (thisThread())) + dropGroomingToken(); + } + + /** + * monad-like step sequence: perform sequence of steps, + * as long as the result remains activity::PASS + */ + struct WorkerInstruction + { + activity::Proc lastResult = activity::PASS; + + /** exposes the latest verdict as overall result + * @note returning activity::SKIP from the dispatch + * signals early exit, which is acquitted here. */ + operator activity::Proc() + { + return activity::SKIP == lastResult? activity::PASS + : lastResult; + } + + template + WorkerInstruction + performStep (FUN step) + { + if (activity::PASS == lastResult) + lastResult = step(); + return move(*this); + } + }; }; + + + /** + * @remarks this function is invoked from within the worker thread(s) and will + * - decide if and how the capacity of this worker shall be used right now + * - possibly go into a short targeted wait state to redirect capacity at a better time point + * - and most notably commence with dispatch of render Activities, to calculate media data. + * @return an instruction for the work::Worker how to proceed next: + * - activity::PASS causes the worker to poll again immediately + * - activity::KICK to contend (spin) on GroomingToken + * - activity::WAIT induces a sleep state + * - activity::HALT terminates the worker + * @note Under some circumstances, this function depends on acquiring the »grooming-token«, + * which is an atomic lock to ensure only one thread at a time can alter scheduler internals. + * In the regular processing sequence, this token is dropped after dequeuing and processing + * some Activities, yet prior to invoking the actual »Render Job«. Explicitly dropping the + * token at the end of this function is a safeguard against deadlocking the system. + * If some other thread happens to hold the token, SchedulerCommutator::findWork + * will bail out, leading to active spinning wait for the current thread. + */ + template + inline activity::Proc + SchedulerCommutator::dispatchCapacity (SchedulerInvocation& layer1 + ,LoadController& loadController + ,DISPATCH executeActivity + ,CLOCK getSchedTime + ) + { + try { + auto res = WorkerInstruction{} + .performStep([&]{ + maybeFeed(layer1); + Time now = getSchedTime(); + Time head = layer1.headTime(); + return scatteredDelay(now, head, loadController, + loadController.markIncomingCapacity (head,now)); + }) + .performStep([&]{ + Time now = getSchedTime(); + auto toDispatch = findWork (layer1,now); + if (not toDispatch) return activity::KICK; // contention + return executeActivity (toDispatch); + }) + .performStep([&]{ + maybeFeed(layer1); + Time now = getSchedTime(); + Time head = layer1.headTime(); + return scatteredDelay(now, head, loadController, + loadController.markOutgoingCapacity (head,now)); + }); + + // ensure lock clean-up + if (res != activity::PASS) + ensureDroppedGroomingToken(); + return res; + } + catch(...) + { + ensureDroppedGroomingToken(); + throw; + } + } + + + /** + * A worker [asking for work](\ref #doWork) constitutes free capacity, + * which can be redirected into a focused zone of the scheduler time axis + * where it is most likely to be useful, unless there is active work to + * be carried out right away. + * @param capacity classification of the capacity to employ this thread + * @return how to proceed further with this worker + * - activity::PASS indicates to proceed or call back immediately + * - activity::SKIP causes to exit this round, yet call back again + * - activity::KICK signals contention (not emitted here) + * - activity::WAIT exits and places the worker into sleep mode + * @note as part of the regular work processing, this function may + * place the current thread into a short-term targeted sleep. + */ + inline activity::Proc + SchedulerCommutator::scatteredDelay (Time now, Time head + ,LoadController& loadController + ,LoadController::Capacity capacity) + { + auto doTargetedSleep = [&] + { // ensure not to block the Scheduler after management work + ensureDroppedGroomingToken(); + // relocate this thread(capacity) to a time where its more useful + Offset targetedDelay = loadController.scatteredDelayTime (now, capacity); + std::this_thread::sleep_for (std::chrono::microseconds (_raw(targetedDelay))); + }; + auto doTendNextHead = [&] + { + if (not loadController.tendedNext(head) + and (holdsGroomingToken(thisThread()) + or acquireGoomingToken())) + loadController.tendNext(head); + }; + + switch (capacity) { + case LoadController::DISPATCH: + return activity::PASS; + case LoadController::SPINTIME: + std::this_thread::yield(); + return activity::SKIP; // prompts to abort chain but call again immediately + case LoadController::IDLEWAIT: + return activity::WAIT; // prompts to switch this thread into sleep mode + case LoadController::TENDNEXT: + doTendNextHead(); + doTargetedSleep(); // let this thread wait until next head time is due + return activity::SKIP; + default: + doTargetedSleep(); + return activity::SKIP; // prompts to abort this processing-chain for good + } + } + + + + + class SchedulerCommutator::ScopedGroomingGuard : util::MoveOnly { diff --git a/src/vault/gear/scheduler-invocation.hpp b/src/vault/gear/scheduler-invocation.hpp index dce1de6c8..200a0293d 100644 --- a/src/vault/gear/scheduler-invocation.hpp +++ b/src/vault/gear/scheduler-invocation.hpp @@ -50,7 +50,7 @@ ** is triggered in the SchedulerService, should such an entry ** [miss it's deadline](\ref SchedulerInvocation::isOutOfTime()) ** @see SchedulerCommutator::findWork() - ** @see SchedulerCommutator::postDispatch() + ** @see SchedulerCommutator::postChain() ** @see SchedulerInvocation_test ** @see SchedulerUsage_test integrated usage ** diff --git a/src/vault/gear/scheduler.hpp b/src/vault/gear/scheduler.hpp index 2592c074e..87831a401 100644 --- a/src/vault/gear/scheduler.hpp +++ b/src/vault/gear/scheduler.hpp @@ -57,7 +57,7 @@ ** in single-threaded mode. This is achieved by an atomic lock, maintained in ** [Layer-2 of the Scheduler implementation](\ref SchedulerCommutator::groomingToken_). ** Any thread looking for more work will pull a pre-configured functor, which - ** is implemented by the [work-function](\ref Scheduler::getWork()). The thread + ** is implemented by the [work-function](\ref Scheduler::doWork()). The thread ** will attempt to acquire the lock, designated as »grooming-token« -- but only ** if this is necessary to perform internal changes. Since workers are calling ** in randomly, in many cases there might be no task to perform at the moment, @@ -83,7 +83,7 @@ ** Notably _internal processing_ (e.g. planning of new jobs) will _not_ drop ** the token, since it must be able to change the schedule. Such internal ** tasks can be processed in row and will be confined to a single thread - ** (there is a special treatment at the end of #getWork() to achieve that). + ** (there is a special treatment at the end of #doWork() to achieve that). ** As a safety net, the grooming-token will automatically be dropped after ** catching an exception, or when a thread is sent to sleep. ** @@ -130,7 +130,6 @@ namespace gear { using lib::time::Time; using lib::time::FSecs; using lib::time::Offset; - using lib::time::Duration; namespace test { // declared friend for test access class SchedulerService_test; @@ -142,6 +141,7 @@ namespace gear { const size_t DISMISS_CYCLES = 100; ///< number of wait cycles before an idle worker terminates completely Offset DUTY_CYCLE_PERIOD{FSecs(1,20)}; ///< period of the regular scheduler »tick« for state maintenance. Offset DUTY_CYCLE_TOLERANCE{FSecs(1,10)}; ///< maximum slip tolerated on duty-cycle start before triggering Scheduler-emergency + Offset FUTURE_PLANNING_LIMIT{FSecs{20}}; ///< limit timespan of deadline into the future (~360 MiB max) } @@ -224,7 +224,7 @@ namespace gear { struct Setup : work::Config { Scheduler& scheduler; - activity::Proc doWork() { return scheduler.getWork(); } + activity::Proc doWork() { return scheduler.doWork(); } void finalHook (bool _) { scheduler.handleWorkerTermination(_);} }; @@ -291,7 +291,9 @@ namespace gear { /** - * + * @return a synthetic indicator fused from several observations + * - 1.0 defines full work capacity yet no significant congestion + * - values > 2.0 indicate overload */ double getLoadIndicator() @@ -341,7 +343,6 @@ namespace gear { { bool isCompulsory = true; Time deadline = nextStart + DUTY_CYCLE_TOLERANCE; - auto guard = layer2_.requireGroomingTokenHere(); // protect allocation // place the meta-Job into the timeline... postChain ({activityLang_.buildMetaJob(planningJob, nextStart, deadline) .post() @@ -371,11 +372,12 @@ namespace gear { * The worker-Functor: called by the active Workers from the * \ref WorkForce to pull / perform the actual render Activities. */ - activity::Proc getWork(); + activity::Proc doWork(); private: void postChain (ActivationEvent); + void sanityCheck (ActivationEvent const&); void handleDutyCycle (Time now, bool =false); void handleWorkerTermination (bool isFailure); void maybeScaleWorkForce (Time startHorizon); @@ -383,38 +385,6 @@ namespace gear { void triggerEmergency(); - /** send this thread into a targeted short-time wait. */ - activity::Proc scatteredDelay (Time now, LoadController::Capacity); - - - /** - * monad-like step sequence: perform sequence of steps, - * as long as the result remains activity::PASS - */ - struct WorkerInstruction - { - activity::Proc lastResult = activity::PASS; - - /** exposes the latest verdict as overall result - * @note returning activity::SKIP from the dispatch - * signals early exit, which is acquitted here. */ - operator activity::Proc() - { - return activity::SKIP == lastResult? activity::PASS - : lastResult; - } - - template - WorkerInstruction - performStep (FUN step) - { - if (activity::PASS == lastResult) - lastResult = step(); - return move(*this); - } - }; - - /** @internal connect state signals for use by the LoadController */ LoadController::Wiring connectMonitoring() @@ -426,13 +396,6 @@ namespace gear { return setup; } - void - ensureDroppedGroomingToken() - { - if (layer2_.holdsGroomingToken (thisThread())) - layer2_.dropGroomingToken(); - } - /** access high-resolution-clock, rounded to µ-Ticks */ Time getSchedTime() @@ -454,6 +417,8 @@ namespace gear { + + /** work-timing event for performance observation */ class WorkTiming : public EngineEvent @@ -500,9 +465,6 @@ namespace gear { * of activations and notifications. The concrete implementation * needs some further contextual information, which is passed * down here as a nested sub-context. - * @note different than Scheduler::postChain(), this operation here - * always enqueues the \a chain, never dispatches directly. - * This special twist helps to improve parallelisation. */ activity::Proc post (Time when, Time dead, Activity* chain, ExecutionCtx& ctx) @@ -510,7 +472,8 @@ namespace gear { REQUIRE (chain); ActivationEvent chainEvent = ctx.rootEvent; chainEvent.refineTo (chain, when, dead); - return scheduler_.layer2_.instructFollowUp (chainEvent, scheduler_.layer1_); + scheduler_.sanityCheck (chainEvent); + return scheduler_.layer2_.postChain (chainEvent, scheduler_.layer1_); } /** @@ -550,6 +513,22 @@ namespace gear { }; + inline activity::Proc + Scheduler::doWork() + { + return layer2_.dispatchCapacity (layer1_ + ,loadControl_ + ,[this](ActivationEvent toDispatch) + { + ExecutionCtx ctx{*this, toDispatch}; + return ActivityLang::dispatchChain (toDispatch, ctx); + } + ,[this]{ return getSchedTime(); } + ); + } + + + /** @note after invoking this terminal operation, @@ -564,7 +543,7 @@ namespace gear { inline ScheduleSpec ScheduleSpec::post() { // protect allocation - auto guard = theScheduler_->layer2_.requireGroomingTokenHere(); +// auto guard = theScheduler_->layer2_.requireGroomingTokenHere();//////////////////////////////////////TODO can we avoid that? term_ = move( theScheduler_->activityLang_ .buildCalculationJob (job_, start_,death_)); @@ -578,22 +557,39 @@ namespace gear { inline ScheduleSpec ScheduleSpec::linkToSuccessor (ScheduleSpec& succSpec) - { // protect allocation - auto guard = theScheduler_->layer2_.requireGroomingTokenHere(); + { term_->appendNotificationTo (*succSpec.term_); return move(*this); } inline ScheduleSpec ScheduleSpec::linkToPredecessor (ScheduleSpec& predSpec) - { // protect allocation - auto guard = theScheduler_->layer2_.requireGroomingTokenHere(); + { predSpec.term_->appendNotificationTo (*term_); return move(*this); } + inline void + Scheduler::sanityCheck (ActivationEvent const& event) + { + if (not event) + throw error::Logic ("Empty event passed into Scheduler entrance"); + if (event.startTime() == Time::ANYTIME) + throw error::Fatal ("Attempt to schedule an Activity without valid start time"); + if (event.deathTime() == Time::NEVER) + throw error::Fatal ("Attempt to schedule an Activity without valid deadline"); + Time now{getSchedTime()}; + Offset toDeadline{now, event.deathTime()}; + if (toDeadline > FUTURE_PLANNING_LIMIT) + throw error::Fatal (util::_Fmt{"Attempt to schedule Activity %s " + "with a deadline by %s into the future"} + % *event.activity + % toDeadline); + } + + /** * Enqueue for time-bound execution, possibly dispatch immediately. * This is the »main entrance« to get some Activity scheduled. @@ -603,118 +599,9 @@ namespace gear { inline void Scheduler::postChain (ActivationEvent actEvent) { + sanityCheck (actEvent); maybeScaleWorkForce (actEvent.startTime()); - ExecutionCtx ctx{*this, actEvent}; - layer2_.postDispatch (actEvent, ctx, layer1_); - } - - - - /** - * @remarks this function is invoked from within the worker thread(s) and will - * - decide if and how the capacity of this worker shall be used right now - * - possibly go into a short targeted wait state to redirect capacity at a better time point - * - and most notably commence with dispatch of render Activities, to calculate media data. - * @return an instruction for the work::Worker how to proceed next: - * - activity::PROC causes the worker to poll again immediately - * - activity::SLEEP induces a sleep state - * - activity::HALT terminates the worker - * @note Under some circumstances, this function depends on acquiring the »grooming-token«, - * which is an atomic lock to ensure only one thread at a time can alter scheduler internals. - * In the regular processing sequence, this token is dropped after dequeuing and processing - * some Activities, yet prior to invoking the actual »Render Job«. Explicitly dropping the - * token at the end of this function is a safeguard against deadlocking the system. - * If some other thread happens to hold the token, SchedulerCommutator::findWork - * will bail out, leading to active spinning wait for the current thread. - */ - inline activity::Proc - Scheduler::getWork() - { - try { - auto res = WorkerInstruction{} - .performStep([&]{ - layer2_.maybeFeed(layer1_); - Time now = getSchedTime(); - Time head = layer1_.headTime(); - return scatteredDelay(now, - loadControl_.markIncomingCapacity (head,now)); - }) - .performStep([&]{ - Time now = getSchedTime(); - auto toDispatch = layer2_.findWork (layer1_,now); - ExecutionCtx ctx{*this, toDispatch}; - return layer2_.postDispatch (toDispatch, ctx, layer1_); - }) - .performStep([&]{ - layer2_.maybeFeed(layer1_); - Time now = getSchedTime(); - Time head = layer1_.headTime(); - return scatteredDelay(now, - loadControl_.markOutgoingCapacity (head,now)); - }); - - // ensure lock clean-up - if (res != activity::PASS) - ensureDroppedGroomingToken(); - return res; - } - catch(...) - { - ensureDroppedGroomingToken(); - throw; - } - } - - - /** - * A worker [asking for work](\ref #getWork) constitutes free capacity, - * which can be redirected into a focused zone of the scheduler time axis - * where it is most likely to be useful, unless there is active work to - * be carried out right away. - * @param capacity classification of the capacity to employ this thread - * @return how to proceed further with this worker - * - activity::PASS indicates to proceed or call back immediately - * - activity::SKIP causes to exit this round, yet call back again - * - activity::KICK signals contention (not emitted here) - * - activity::WAIT exits and places the worker into sleep mode - * @note as part of the regular work processing, this function may - * place the current thread into a short-term targeted sleep. - */ - inline activity::Proc - Scheduler::scatteredDelay (Time now, LoadController::Capacity capacity) - { - auto doTargetedSleep = [&] - { // ensure not to block the Scheduler after management work - ensureDroppedGroomingToken(); - // relocate this thread(capacity) to a time where its more useful - Offset targetedDelay = loadControl_.scatteredDelayTime (now, capacity); - std::this_thread::sleep_for (std::chrono::microseconds (_raw(targetedDelay))); - }; - auto doTendNextHead = [&] - { - Time head = layer1_.headTime(); - if (not loadControl_.tendedNext(head) - and (layer2_.holdsGroomingToken(thisThread()) - or layer2_.acquireGoomingToken())) - loadControl_.tendNext(head); - }; - - switch (capacity) { - case LoadController::DISPATCH: - return activity::PASS; - case LoadController::SPINTIME: - std::this_thread::yield(); - return activity::SKIP; // prompts to abort chain but call again immediately - case LoadController::IDLEWAIT: - return activity::WAIT; // prompts to switch this thread into sleep mode - case LoadController::TENDNEXT: - doTendNextHead(); - doTargetedSleep(); // let this thread wait until next head time is due - return activity::SKIP; - default: - doTargetedSleep(); - return activity::SKIP; // prompts to abort this processing-chain for good - } + layer2_.postChain (actEvent, layer1_); } @@ -764,8 +651,7 @@ namespace gear { Time deadline = nextTick + DUTY_CYCLE_TOLERANCE; Activity& tickActivity = activityLang_.createTick (deadline); ActivationEvent tickEvent{tickActivity, nextTick, deadline, ManifestationID(), true}; - ExecutionCtx ctx{*this, tickEvent}; - layer2_.postDispatch (tickEvent, ctx, layer1_); + layer2_.postChain (tickEvent, layer1_); } // *deliberately* use low-level entrance } // to avoid ignite() cycles and derailed load-regulation diff --git a/tests/vault/gear/scheduler-commutator-test.cpp b/tests/vault/gear/scheduler-commutator-test.cpp index 3c28ea2ab..dfa0fe968 100644 --- a/tests/vault/gear/scheduler-commutator-test.cpp +++ b/tests/vault/gear/scheduler-commutator-test.cpp @@ -97,10 +97,9 @@ namespace test { verify_GroomingToken(); verify_GroomingGuard(); torture_GroomingToken(); - verify_DispatchDecision(); verify_findWork(); verify_Significance(); - verify_postDispatch(); + verify_postChain(); integratedWorkCycle(); } @@ -124,7 +123,7 @@ namespace test { // prepare scenario: some activity is enqueued queue.instruct ({activity, when, dead}); - sched.postDispatch (sched.findWork(queue,now), detector.executionCtx,queue); +//// sched.postChain (sched.findWork(queue,now), detector.executionCtx,queue);///////////////TODO CHECK (detector.verifyInvocation("CTX-tick").arg(now)); CHECK (queue.empty()); @@ -279,51 +278,51 @@ namespace test { - /** @test verify the logic to decide where and when to perform - * the dispatch of a Scheduler Activity chain. - */ - void - verify_DispatchDecision() - { - SchedulerCommutator sched; - ___ensureGroomingTokenReleased(sched); - - Time t1{10,0}; - Time t2{20,0}; - Time t3{30,0}; - Time now{t2}; - - auto myself = std::this_thread::get_id(); - CHECK (sched.decideDispatchNow (t1, now)); // time is before now => good to execute - CHECK (sched.holdsGroomingToken (myself)); // Side-Effect: acquired the Grooming-Token - - CHECK (sched.decideDispatchNow (t1, now)); // also works if Grooming-Token is already acquired - CHECK (sched.holdsGroomingToken (myself)); - - CHECK (sched.decideDispatchNow (t2, now)); // Boundary case time == now => good to execute - CHECK (sched.holdsGroomingToken (myself)); - - CHECK (not sched.decideDispatchNow (t3, now)); // Task in the future shall not be dispatched now - CHECK (sched.holdsGroomingToken (myself)); // ...and this case has no impact on the Grooming-Token - sched.dropGroomingToken(); - - CHECK (not sched.decideDispatchNow (t3, now)); - CHECK (not sched.holdsGroomingToken (myself)); - - blockGroomingToken(sched); - CHECK (not sched.acquireGoomingToken()); - - CHECK (not sched.decideDispatchNow (t1, now)); // unable to acquire => can not decide positively - CHECK (not sched.holdsGroomingToken (myself)); - - CHECK (not sched.decideDispatchNow (t2, now)); - CHECK (not sched.holdsGroomingToken (myself)); - - unblockGroomingToken(); - - CHECK (sched.decideDispatchNow (t2, now)); - CHECK (sched.holdsGroomingToken (myself)); - } +// /** @test verify the logic to decide where and when to perform +// * the dispatch of a Scheduler Activity chain. +// */ +// void +// verify_DispatchDecision() +// { +// SchedulerCommutator sched; +// ___ensureGroomingTokenReleased(sched); +// +// Time t1{10,0}; +// Time t2{20,0}; +// Time t3{30,0}; +// Time now{t2}; +// +// auto myself = std::this_thread::get_id(); +// CHECK (sched.decideDispatchNow (t1, now)); // time is before now => good to execute +// CHECK (sched.holdsGroomingToken (myself)); // Side-Effect: acquired the Grooming-Token +// +// CHECK (sched.decideDispatchNow (t1, now)); // also works if Grooming-Token is already acquired +// CHECK (sched.holdsGroomingToken (myself)); +// +// CHECK (sched.decideDispatchNow (t2, now)); // Boundary case time == now => good to execute +// CHECK (sched.holdsGroomingToken (myself)); +// +// CHECK (not sched.decideDispatchNow (t3, now)); // Task in the future shall not be dispatched now +// CHECK (sched.holdsGroomingToken (myself)); // ...and this case has no impact on the Grooming-Token +// sched.dropGroomingToken(); +// +// CHECK (not sched.decideDispatchNow (t3, now)); +// CHECK (not sched.holdsGroomingToken (myself)); +// +// blockGroomingToken(sched); +// CHECK (not sched.acquireGoomingToken()); +// +// CHECK (not sched.decideDispatchNow (t1, now)); // unable to acquire => can not decide positively +// CHECK (not sched.holdsGroomingToken (myself)); +// +// CHECK (not sched.decideDispatchNow (t2, now)); +// CHECK (not sched.holdsGroomingToken (myself)); +// +// unblockGroomingToken(); +// +// CHECK (sched.decideDispatchNow (t2, now)); +// CHECK (sched.holdsGroomingToken (myself)); +// } @@ -468,7 +467,7 @@ namespace test { /** @test verify entrance point for performing an Activity chain. */ void - verify_postDispatch() + verify_postChain() { // rigged execution environment to detect activations-------------- ActivityDetector detector; @@ -488,18 +487,18 @@ namespace test { CHECK (not sched.holdsGroomingToken (myself)); // no effect when empty / no Activity given (usually this can happen due to lock contention) - CHECK (activity::KICK == sched.postDispatch (ActivationEvent(), detector.executionCtx, queue)); + CHECK (activity::KICK == sched.postChain (ActivationEvent(), queue)); CHECK (not sched.holdsGroomingToken (myself)); - // Activity immediately dispatched when on time and GroomingToken can be acquired - CHECK (activity::PASS == sched.postDispatch (makeEvent(past), detector.executionCtx, queue)); + // Activity immediately dispatched when on time and GroomingToken can be acquired ///////////////////////////TODO + CHECK (activity::PASS == sched.postChain (makeEvent(past), queue)); CHECK (detector.verifyInvocation("testActivity").timeArg(now)); // was invoked immediately CHECK ( sched.holdsGroomingToken (myself)); CHECK ( queue.empty()); detector.incrementSeq(); // Seq-point-1 in the detector log // future Activity is enqueued by short-circuit directly into the PriorityQueue if possible - CHECK (activity::PASS == sched.postDispatch (makeEvent(future), detector.executionCtx, queue)); + CHECK (activity::PASS == sched.postChain (makeEvent(future), queue)); CHECK ( sched.holdsGroomingToken (myself)); CHECK (not queue.empty()); CHECK (isSameObject (activity, *queue.peekHead())); // appears at Head, implying it's in Priority-Queue @@ -510,14 +509,14 @@ namespace test { CHECK (queue.empty()); // ...but GroomingToken is not acquired explicitly; Activity is just placed into the Instruct-Queue - CHECK (activity::PASS == sched.postDispatch (makeEvent(future), detector.executionCtx, queue)); + CHECK (activity::PASS == sched.postChain (makeEvent(future), queue)); CHECK (not sched.holdsGroomingToken (myself)); CHECK (not queue.peekHead()); // not appearing at Head this time, CHECK (not queue.empty()); // rather waiting in the Instruct-Queue blockGroomingToken(sched); - CHECK (activity::PASS == sched.postDispatch (makeEvent(now), detector.executionCtx, queue)); + CHECK (activity::PASS == sched.postChain (makeEvent(now), queue)); CHECK (not sched.holdsGroomingToken (myself)); CHECK (not queue.peekHead()); // was enqueued, not executed @@ -586,7 +585,7 @@ namespace test { TimeVar now{Time::ZERO}; // rig the ExecutionCtx to allow manipulating "current scheduler time" - detector.executionCtx.getSchedTime = [&]{ return Time{now}; }; + detector.executionCtx.getSchedTime = [&]{ return Time{now}; };///////////////////////////TODO RLY? // rig the λ-work to verify GroomingToken and to drop it then detector.executionCtx.work.implementedAs( [&](Time, size_t) @@ -598,7 +597,7 @@ namespace test { // ·=================================================================== actual test sequence // Add the Activity-Term to be scheduled for planned start-Time - sched.postDispatch (ActivationEvent{anchor, start}, detector.executionCtx, queue); + sched.postChain (ActivationEvent{anchor, start}, queue); CHECK (detector.ensureNoInvocation("testJob")); CHECK (not sched.holdsGroomingToken (myself)); CHECK (not queue.empty()); @@ -612,7 +611,7 @@ namespace test { CHECK (sched.holdsGroomingToken (myself)); // acquired the GroomingToken CHECK (isSameObject(*act, anchor)); // "found" the rigged Activity as next piece of work - sched.postDispatch (act, detector.executionCtx, queue); + sched.postChain (act, queue); ////////////////////////TODO must dispatch here CHECK (queue.empty()); CHECK (not sched.holdsGroomingToken (myself)); // the λ-work was invoked and dropped the GroomingToken diff --git a/tests/vault/gear/scheduler-service-test.cpp b/tests/vault/gear/scheduler-service-test.cpp index bb1bec9ca..e89cfd132 100644 --- a/tests/vault/gear/scheduler-service-test.cpp +++ b/tests/vault/gear/scheduler-service-test.cpp @@ -129,8 +129,7 @@ namespace test { postNewTask (Scheduler& scheduler, Activity& chain, Time start) { ActivationEvent actEvent{chain, start, start + Time{50,0}}; // add dummy deadline +50ms - Scheduler::ExecutionCtx ctx{scheduler, actEvent}; - scheduler.layer2_.postDispatch (actEvent, ctx, scheduler.layer1_); + scheduler.layer2_.postChain (actEvent, scheduler.layer1_); } @@ -299,7 +298,7 @@ namespace test { - /** @test verify visible behaviour of the [work-pulling function](\ref Scheduler::getWork) + /** @test verify visible behaviour of the [work-pulling function](\ref Scheduler::doWork) * - use a rigged Activity probe to capture the schedule time on invocation * - additionally perform a timing measurement for invoking the work-function * - invoking the Activity probe itself costs 50...150µs, Scheduler internals < 50µs @@ -308,8 +307,8 @@ namespace test { * + an Activity already due will be dispatched immediately by post() * + an Activity due at the point when invoking the work-function is dispatched * + while queue is empty, the work-function returns immediately, indicating sleep - * + invoking the work-function when there is still some time span up to the next - * planned Activity will enter a targeted sleep, returning shortly after the + * + invoking the work-function, when there is still some time span up to the next + * planned Activity, will cause a targeted sleep, returning shortly after the * next schedule. Entering then again will cause dispatch of that activity. * + if the work-function dispatches an Activity while the next entry is planned * for some time ahead, the work-function will likewise go into a targeted @@ -349,7 +348,7 @@ namespace test { }; auto pullWork = [&] { - delay_us = lib::test::benchmarkTime([&]{ res = scheduler.getWork(); }); + delay_us = lib::test::benchmarkTime([&]{ res = scheduler.doWork(); }); slip_us = _raw(detector.invokeTime(probe)) - _raw(start); cout << "res:"< + + + + + +

+ ...die Eingangs-Schnittstelle zum Scheduler ist nebenbei beim Bauen entstanden — und erscheint doch als ein rechtes Stückwerk, zumal auch erhebliche Probleme unter Druck auftreten können; hab daher beschlossen, dieses Thema nochmal insgesamt zu durchdenken +

+ +
+ +
@@ -80359,6 +80371,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ @@ -80900,7 +80913,16 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + + + + +

+ ...oh wie wahr (seinerzeit nur aus allgemeiner Argumentation abgeleitet ... inzwischen geprüft und bestätigt gefunden) +

+ +
+
@@ -80922,6 +80944,36 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + +

+ ...in diese Falle bin ich nun schon x-mal gegangen; das ist alles so schön hinter einem Builder verpackt, so daß man nicht sieht, wo der Effekt passiert; festzuhalten bleibt: bereits das Konstruieren der Activity-Records ist gefährlich, und das kann durchaus passieren, bevor wir überhaupt in den Scheduler selber eingesteigen sind +

+ +
+ + +
+ + + + +

+ die ActivityLang steht gefährlich außerhalb des Scheduler-Kernsystems +

+ +
+ + + +

+ ...hängt aber hinten unten am Allokator, und ist deshalb darauf angewiesen, daß allozierende Operationen nur in einer sicheren »Management-Zone« passieren +

+ +
+
@@ -83127,6 +83179,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ @@ -83370,7 +83423,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -83379,6 +83432,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200

+
@@ -85873,6 +85927,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ @@ -85946,6 +86001,622 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ Ein einfaches Schedule läßt sich damit ohne weiteres aufbauen und betreiben; die Probleme beginnen, wenn dieser Plan unzulänglich ist oder in der Ausführung durch Verzögerungen bricht. Dann erweist es sich als schwierig, das Grooming-Token korrekt (und effizient) zu handhaben — und es kann im Extremfall zur Korruption im Memory-Management kommen +

+ +
+ + + + + + + + + + +

+ dies folgt aus der Eigenschaft, daß wir (bisher) eine Abkürzung an der Queue vorbei nehmen, wenn ein Job bereits hinter seiner Startzeit liegt — denn im Zuge der normalen Job-Verarbeitung wird der »Managment-Modus« regulär verlassen, in einem expliziten Prozeß-Schritt. Zusammen bewirkt das eine Unsicherheit, ob auf Ebene des Planungs-Jobs das Grooming-Token gehalten wird, und ob es dort gehalten werden muß. Diese Unsicherheit steht im Widerspruch zum Konzept, welches diesen Belang an die Zugehörigkeit zu Zonen gebunden hat, um auf eine aufwendige, feingranulare Steuerung verzichten zu können. Und die Beobachtungen in den ersten Lasttests scheinen diese Entscheidung zu bestätigen: anscheinend kann das Prüfen und Wechseln des Grooming-Tokens zu Verzögerungen im Bereich 100µs führen (wohl durch stall und Cache-Effekte), sobald das System concurrent läuft. +

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +

+ möchte gefühlsmäßig trotzdem daran festhalten +

+ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ ...und damit auch keine Möglichkeit, auf natürlichem Weg ein zweifach unterschiedliches Verhalten anzuordnen. +

+ +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ Grooming-Token wird nur noch hier erlangt +

+ +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + +

+ Haupt-Eingang: postChain() +

+ +
+
+ + + + +

+ Work-Verteilung: dispatchCapacity() +

+ +
+
+ + + + +

+ Work-Funktion: doWork() +

+ +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ deshalb treffen hier ein Zustand im Epochen-Pool zusammen mit einem kontextuellen Zustand aus dem Handle, welches aus Performance-Gründen eben die internen Koordinaten aus dem Epochen-Pool verdeckt einbettet +

+ + +
+
+ + + + + + +

+ ...der untere Layer kann und darf nichts wissen davon, daß sein Zustand auf einem höheren Layer in eine Abstraktion materialisiert wird. Deshalb wird er im Zuge einer Überlauf-Behandlung seine internen Strukturen reorganisieren und damit das Handle auf dem höheren Layer indirekt invalidieren +

+ + +
+
+ + + + + + +

+ ...also nicht nur in dem Fall, an dem ich das Problem entdeckt habe; generell kann das Handle durch jede Allokation invalidiert werden, denn jede Allokation kann eine Kettenreaktion mit Überlauf nach sich ziehen, und dadurch viele Handles entwerten — der Effekt ist nicht lokal +

+ +
+
+
+ + + + + + + + + + + +

+ es handelt sich nämlich hier um einen hoch problematischen Durchgriff durch die Layer-Struktur des Allokators — und zu allem Überfluß auch noch mitten aus einer anderen Operation heraus als Seiteneffekt, welcher explizit nicht Teil des Iterator-Konzepts ist und daran vorbei auf das Basis-API zugreift. Mit einem Wort, schrecklich. +

+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + +

+ und zwar wenn isWrapped() und idx im oberen Bereich hinter start_ +

+

+ Falls die Neu-Allokation nicht so groß ist, verschiebt sich dadurch idx „nur“ in eine Epoche mit früherer Deadline. Ab dem Punkt gehen alle weitere Allokationen in diese frühere Epoche, ohne daß der Aufrufkontext davon etwas mitbekommt. +

+ + +
+
+ + + + + + +

+ ...und dann wird die Lage noch schröcklicher: entweder der Extent ist noch uninitialisierte Storage und der Positionszeiger schickt uns dann irgendwohin  und in die allgemeine Speicherkorruption, oder es handelt sich um einen alten, re-cycleten Extent, dessen Werte noch plausibel sind und zu einer funktionierenden Allokation führen, die dann irgendwann später ohne Vorankündigung mit einer neuen Nutzung überschrieben wird. +

+ + +
+
+
+
+
+
+ + + + + + +

+ selbst-Gefährdung könnte man reparieren +

+

+  — fremd-Gefährdung nicht +

+ + +
+ + + + + + + + + + +

+ wenn aber etwas passiert, sind die Folgen destruktiv und nicht zu diagnostizieren +

+ + +
+ +
+
+ + + + + + + + + + + + + + +

+ das Weiterverwenden der Handles ist der größte Hebel, mit dem der Allocator überhaupt in eine akzeptable Perfromance-Zone kommt +

+ + +
+
+ + + + + + + + +

+ aber einen Check für jeden Zugriff erfordern (teuer) +

+ + +
+
+
+ + + +
+
+
+
+
@@ -102859,6 +103530,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ @@ -105829,9 +106501,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

...und diese wird im aktuellen Test-Setup nicht von der Grooming-Token-Klammer erfaßt @@ -105843,7 +106513,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -105878,6 +106548,11 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + @@ -105906,7 +106581,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -106897,7 +107572,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -106906,6 +107581,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200

+ @@ -107181,7 +107857,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -107217,9 +107893,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

vault::mem::ExtentFamily<vault::gear::Activity, 500ul>::access(unsigned long) const+0xdd @@ -107827,16 +108501,13 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

< now ⟹ re-entrant

- -
+ @@ -107844,16 +108515,13 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

== now ⟹ re-entrant

- -
+ @@ -107861,9 +108529,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

< now ⟹ re-entrant @@ -107877,9 +108543,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

< now ⟹ re-entrant @@ -107893,9 +108557,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

< now ⟹ re-entrant @@ -107909,9 +108571,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

... @@ -107923,14 +108583,11 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
...

- -
+ - - - +

< now ⟹ re-entrant @@ -107943,16 +108600,13 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

@36141 Continuation(lastNode=304, levelDone=43) --> to ...324  ○ start=45000

- -
+
@@ -107979,16 +108633,13 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

Level 19(lang) * 5 wird von 5 Threads bearbeitet

- -
+ @@ -108028,9 +108679,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

        scheduleCalcJob_(currIdx_, n->level); @@ -108042,12 +108691,11 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
          markDependency_(pred,n);

- -
+
- + @@ -108057,9 +108705,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

@@ -108119,8 +108765,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
PRECONDITION: extent-family.hpp:321: thread_4: access: (isValidPos (idx))

- -
+
@@ -108128,16 +108773,13 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

...und der Stack-Trace zeigt ja, daß der Call-Pfad über SchedulerCtx::setDependency()  lief

- -
+
@@ -108155,16 +108797,13 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

sichtbar wurde das durch eine zu späte Startzeit

- -
+ @@ -108181,16 +108820,13 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - - +

der reale Planer muß dafür sogar elaborierte Kontroll-Logik  haben

- -
+
@@ -108208,6 +108844,299 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + + + + + + + +

+ vault::mem::ExtentFamily<vault::gear::Activity, 500ul>::access(unsigned long) const+0xdd +

+

+ +

+

+ vault::mem::ExtentFamily<vault::gear::Activity, 500ul>::IdxLink::yield() const+0x26 +

+

+ +

+

+ vault::gear::BlockFlow<vault::gear::blockFlow::RenderConfig>::StorageAdaptor::yield() const+0x18 +

+

+ +

+

+ lib::IterableDecorator<vault::gear::blockFlow::Epoch<vault::mem::ExtentFamily<vault::gear::Activity, 500ul> >, vault::gear::BlockFlow<vault::gear::blockFlow::RenderConfig>::StorageAdaptor>::operator->() const+0x20 +

+

+ +

+

+ vault::gear::BlockFlow<vault::gear::blockFlow::RenderConfig>::AllocatorHandle::claimSlot()+0x2b +

+

+ +

+

+ vault::gear::Activity& vault::gear::BlockFlow<vault::gear::blockFlow::RenderConfig>::AllocatorHandle::create<vault::gear::Activity::Verb>(vault::gear::Activity::Verb&&)+0x2e +

+

+ +

+

+ vault::gear::activity::Term::appendNotificationTo(vault::gear::activity::Term&)+0x2a +

+

+ +

+

+ vault::gear::ScheduleSpec::linkToSuccessor(vault::gear::ScheduleSpec&)+0x61 +

+

+ +

+

+ vault::gear::test::TestChainLoad<16ul>::ScheduleCtx::setDependency(vault::gear::test::TestChainLoad<16ul>::Node*, vault::gear::test::TestChainLoad<16ul>::Node*)+0xad +

+ +
+
+ + + + +

+                 |n.(361,lev:62) +

+

+ ... dispose(i=361,lev:62) -> @62000 +

+

+ |·?·| +

+

+ |·◆·| +

+

+ ‖•△•‖     wof:8 HT:162454 +

+

+ ‖SCH‖ 99: @161506 ○ start=62000 dead:100000 +

+

+ +

+

+ !◆!   99: @161518  ⚙  calc(i=361, lev:62) +

+

+ ..!   99: @162551  ⤴       (i=361) +

+

+ |·○·| +

+

+ |·?·| +

+

+    ·‖ D5: @162567 HT:307445574858905914  -> ▶ 162454 +

+

+ ‖▷▷▷‖ D5: @ 162633 EMPTY +

+

+ |·◆·| +

+

+ |·◇·| +

+

+ |·?·| +

+

+ |·◆·| +

+

+ |·◇·| +

+

+                 |n.(362,lev:62) +

+

+ ... dispose(i=362,lev:62) -> @62000 +

+

+ |·?·| +

+

+ |·◆·| +

+

+ ‖▷▷▷‖ 99: @ 162887 EMPTY +

+

+ ‖IGN‖     wof:8 +

+

+ ‖SCH‖ 99: @162979 ○ start=62000 dead:100000 +

+

+ |·◇·| +

+

+ |·?·| +

+

+ |·◆·| +

+

+ |↯↯↯| wrapped:true start=7 idx=6 after=2 slcnt:15 +

+

+ 0000000620: PRECONDITION: extent-family.hpp:327: thread_6: access: (isValidPos (idx)) +

+

+    ·‖ 8C: @167889 HT:167887  -> ∘ +

+

+    ·‖ 0B: @167888 HT:167887  -> ∘ +

+

+    ·‖ 8C: @167920 HT:167887  -> ∘ +

+ +
+ + + + + + + + + + + + + +

+ falsche Schlußfolgerung vmtl auch schon für die vorherigen Incidents +

+ +
+ + + +

+ ich hatte bisher die konkreten Koordinaten nicht zu fassen gekriegt, und deshalb eine Daten-Korruption durch concurrent Access vermutet +

+ +
+
+ + + + + + + +

+ ein use-after-free oder ein bereits überschriebener oder rotierter Block +

+ +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + + + +

+ isValid = isWrapped()? (start_ <= idx and idx < slotCnt()) +

+

+                         or idx < after_ +

+

+                       : (start_ <= idx and idx < after_); +

+ +
+ +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ auch eine Rotation invalidiert bestehende Alloc-Handle +

+ +
+
+
+ + + + + +