LUMIERA.clone/tests/vault/gear/work-force-test.cpp

519 lines
15 KiB
C++
Raw Permalink Normal View History

2023-09-04 18:31:45 +02:00
/*
WorkForce(Test) - verify worker thread service
Copyright: clarify and simplify the file headers * Lumiera source code always was copyrighted by individual contributors * there is no entity "Lumiera.org" which holds any copyrights * Lumiera source code is provided under the GPL Version 2+ == Explanations == Lumiera as a whole is distributed under Copyleft, GNU General Public License Version 2 or above. For this to become legally effective, the ''File COPYING in the root directory is sufficient.'' The licensing header in each file is not strictly necessary, yet considered good practice; attaching a licence notice increases the likeliness that this information is retained in case someone extracts individual code files. However, it is not by the presence of some text, that legally binding licensing terms become effective; rather the fact matters that a given piece of code was provably copyrighted and published under a license. Even reformatting the code, renaming some variables or deleting parts of the code will not alter this legal situation, but rather creates a derivative work, which is likewise covered by the GPL! The most relevant information in the file header is the notice regarding the time of the first individual copyright claim. By virtue of this initial copyright, the first author is entitled to choose the terms of licensing. All further modifications are permitted and covered by the License. The specific wording or format of the copyright header is not legally relevant, as long as the intention to publish under the GPL remains clear. The extended wording was based on a recommendation by the FSF. It can be shortened, because the full terms of the license are provided alongside the distribution, in the file COPYING.
2024-11-17 23:42:55 +01:00
Copyright (C)
2023, Hermann Vosseler <Ichthyostega@web.de>
2023-09-04 18:31:45 +02:00
Copyright: clarify and simplify the file headers * Lumiera source code always was copyrighted by individual contributors * there is no entity "Lumiera.org" which holds any copyrights * Lumiera source code is provided under the GPL Version 2+ == Explanations == Lumiera as a whole is distributed under Copyleft, GNU General Public License Version 2 or above. For this to become legally effective, the ''File COPYING in the root directory is sufficient.'' The licensing header in each file is not strictly necessary, yet considered good practice; attaching a licence notice increases the likeliness that this information is retained in case someone extracts individual code files. However, it is not by the presence of some text, that legally binding licensing terms become effective; rather the fact matters that a given piece of code was provably copyrighted and published under a license. Even reformatting the code, renaming some variables or deleting parts of the code will not alter this legal situation, but rather creates a derivative work, which is likewise covered by the GPL! The most relevant information in the file header is the notice regarding the time of the first individual copyright claim. By virtue of this initial copyright, the first author is entitled to choose the terms of licensing. All further modifications are permitted and covered by the License. The specific wording or format of the copyright header is not legally relevant, as long as the intention to publish under the GPL remains clear. The extended wording was based on a recommendation by the FSF. It can be shortened, because the full terms of the license are provided alongside the distribution, in the file COPYING.
2024-11-17 23:42:55 +01:00
  **Lumiera** is free software; you can redistribute it and/or modify it
  under the terms of the GNU General Public License as published by the
  Free Software Foundation; either version 2 of the License, or (at your
  option) any later version. See the file COPYING for further details.
2023-09-04 18:31:45 +02:00
Copyright: clarify and simplify the file headers * Lumiera source code always was copyrighted by individual contributors * there is no entity "Lumiera.org" which holds any copyrights * Lumiera source code is provided under the GPL Version 2+ == Explanations == Lumiera as a whole is distributed under Copyleft, GNU General Public License Version 2 or above. For this to become legally effective, the ''File COPYING in the root directory is sufficient.'' The licensing header in each file is not strictly necessary, yet considered good practice; attaching a licence notice increases the likeliness that this information is retained in case someone extracts individual code files. However, it is not by the presence of some text, that legally binding licensing terms become effective; rather the fact matters that a given piece of code was provably copyrighted and published under a license. Even reformatting the code, renaming some variables or deleting parts of the code will not alter this legal situation, but rather creates a derivative work, which is likewise covered by the GPL! The most relevant information in the file header is the notice regarding the time of the first individual copyright claim. By virtue of this initial copyright, the first author is entitled to choose the terms of licensing. All further modifications are permitted and covered by the License. The specific wording or format of the copyright header is not legally relevant, as long as the intention to publish under the GPL remains clear. The extended wording was based on a recommendation by the FSF. It can be shortened, because the full terms of the license are provided alongside the distribution, in the file COPYING.
2024-11-17 23:42:55 +01:00
* *****************************************************************/
2023-09-04 18:31:45 +02:00
/** @file work-force-test.cpp
** unit test \ref WorkForce_test
*/
#include "lib/test/run.hpp"
#include "vault/gear/work-force.hpp"
#include "lib/thread.hpp"
2023-09-09 23:42:13 +02:00
#include "lib/sync.hpp"
2023-09-04 18:31:45 +02:00
#include <functional>
2023-09-04 18:31:45 +02:00
#include <thread>
#include <chrono>
2023-09-09 23:42:13 +02:00
#include <set>
2023-09-04 18:31:45 +02:00
using test::Test;
namespace vault{
namespace gear {
namespace test {
using std::this_thread::sleep_for;
using namespace std::chrono_literals;
using std::chrono::milliseconds;
using lib::Thread;
2023-09-04 18:31:45 +02:00
namespace {
using WorkFun = std::function<work::SIG_WorkFun>;
using FinalFun = std::function<work::SIG_FinalHook>;
2023-09-09 23:42:13 +02:00
/**
* Helper: setup a Worker-Pool configuration for the test.
* Derived from the default configuration, it allows to bind
* a lambda as work-functor and to tweak other parameters.
*/
template<class FUN>
auto
setup (FUN&& workFun)
{
struct Setup
: work::Config
{
WorkFun doWork;
FinalFun finalHook = [](bool){ /*NOP*/ };
milliseconds IDLE_WAIT = work::Config::IDLE_WAIT;
size_t DISMISS_CYCLES = work::Config::DISMISS_CYCLES;
Setup (FUN&& workFun)
: doWork{std::forward<FUN> (workFun)}
{ }
Setup&&
withFinalHook (FinalFun finalFun)
{
finalHook = move (finalFun);
return move(*this);
}
Setup&&
withSleepPeriod (std::chrono::milliseconds millis)
{
IDLE_WAIT = millis;
return move(*this);
}
Setup&&
dismissAfter (size_t cycles)
{
DISMISS_CYCLES = cycles;
return move(*this);
}
};
return Setup{std::forward<FUN> (workFun)};
}
}
2023-09-04 18:31:45 +02:00
/*************************************************************************//**
* @test WorkForce-Service: maintain a pool of active worker threads.
2023-09-09 23:42:13 +02:00
* @warning this test relies on empirical timings and can be brittle.
2023-09-04 18:31:45 +02:00
* @see SchedulerUsage_test
*/
class WorkForce_test : public Test
{
virtual void
run (Arg)
{
simpleUsage();
verify_pullWork();
verify_workerHalt();
verify_workerSleep();
verify_workerRetard();
verify_workerDismiss();
verify_finalHook();
verify_detectError();
verify_defaultPool();
verify_scalePool();
verify_countActive();
verify_dtor_blocks();
2023-09-04 18:31:45 +02:00
}
/** @test demonstrate simple worker pool usage
2023-09-04 18:31:45 +02:00
*/
void
simpleUsage()
{
atomic<uint> check{0};
WorkForce wof{setup ([&]{ ++check; return activity::PASS; })};
2023-09-09 23:42:13 +02:00
// ^^^ this is the doWork-λ
2023-09-04 18:31:45 +02:00
CHECK (0 == check);
wof.activate();
sleep_for(20ms);
2023-09-09 23:42:13 +02:00
CHECK (0 < check); // λ invoked in the worker threads
2023-09-04 18:31:45 +02:00
}
/** @test the given work-functor is invoked repeatedly, once activated.
2023-09-04 18:31:45 +02:00
*/
void
verify_pullWork()
{
atomic<uint> check{0};
WorkForce wof{setup ([&]{ ++check; return activity::PASS; })};
CHECK (0 == check);
wof.incScale();
sleep_for(20ms);
uint invocations = check;
CHECK (0 < invocations);
sleep_for(2ms);
CHECK (invocations < check);
invocations = check;
sleep_for(2ms);
CHECK (invocations < check);
wof.awaitShutdown();
invocations = check;
sleep_for(2ms);
CHECK (invocations == check);
}
/** @test can cause a worker to terminate by return-value from the work-functor
*/
void
verify_workerHalt()
{
atomic<uint> check{0};
atomic<activity::Proc> control{activity::PASS};
WorkForce wof{setup ([&]{ ++check; return activity::Proc(control); })};
wof.incScale();
sleep_for(1ms);
uint invocations = check;
CHECK (0 < invocations);
control = activity::HALT;
sleep_for(1ms);
invocations = check;
sleep_for(10ms);
CHECK (invocations == check);
}
2023-09-09 23:42:13 +02:00
/** @test a worker can be sent to sleep, throttling the poll frequency.
*/
void
verify_workerSleep()
{
atomic<uint> check{0};
WorkForce wof{setup ([&]{ ++check; return activity::WAIT; })
.withSleepPeriod (10ms)};
wof.incScale();
sleep_for(1ms);
CHECK (1 == check);
sleep_for(10us);
CHECK (1 == check);
sleep_for(12ms); // after waiting one sleep-period...
CHECK (2 == check); // ...functor invoked again
}
/** @test a worker can be retarded and throttled in case of contention.
*/
void
verify_workerRetard()
{
atomic<uint> check{0};
{ // ▽▽▽▽ regular work-cycles without delay
WorkForce wof{setup ([&]{ ++check; return activity::PASS; })};
wof.incScale();
sleep_for(5ms);
}
uint cyclesPASS{check};
check = 0;
{ // ▽▽▽▽ signals »contention«
WorkForce wof{setup ([&]{ ++check; return activity::KICK; })};
wof.incScale();
sleep_for(5ms);
}
uint cyclesKICK{check};
CHECK (cyclesKICK < cyclesPASS);
CHECK (cyclesKICK < 50);
}
/** @test when a worker is sent into sleep-cycles for an extended time,
2023-09-09 23:42:13 +02:00
* the worker terminates itself.
*/
void
verify_workerDismiss()
2023-09-04 18:31:45 +02:00
{
atomic<uint> check{0};
WorkForce wof{setup ([&]{ ++check; return activity::WAIT; })
.withSleepPeriod (10ms)
.dismissAfter(5)};
wof.incScale();
sleep_for(1ms);
CHECK (1 == check);
sleep_for(12ms);
CHECK (2 == check); // after one wait cycle, one further invocation
sleep_for(100ms);
CHECK (5 == check); // only 5 invocations total...
CHECK (0 == wof.size()); // ...after that, the worker terminated
2023-09-04 18:31:45 +02:00
}
/** @test verify invocation of a thread-termination callback
2023-09-04 18:31:45 +02:00
*/
void
verify_finalHook()
{
2023-09-09 23:42:13 +02:00
atomic<uint> exited{0};
atomic<activity::Proc> control{activity::PASS};
WorkForce wof{setup([&]{ return activity::Proc(control); })
2023-09-09 23:42:13 +02:00
.withFinalHook([&](bool){ ++exited; })};
2023-09-09 23:42:13 +02:00
CHECK (0 == exited);
wof.activate();
sleep_for(10ms);
CHECK (wof.size() == work::Config::COMPUTATION_CAPACITY);
2023-09-09 23:42:13 +02:00
CHECK (0 == exited);
control = activity::HALT;
sleep_for(10ms);
CHECK (0 == wof.size());
2023-09-09 23:42:13 +02:00
CHECK (exited == work::Config::COMPUTATION_CAPACITY);
}
2023-09-09 23:42:13 +02:00
/** @test exceptions emanating from within the worker are catched
* and reported by setting the isFailure argument flag of
* the `finalHook` functor invoked at worker termination.
*/
void
verify_detectError()
{
atomic<uint> check{0};
atomic<uint> errors{0};
WorkForce wof{setup ([&]{
if (++check == 555)
throw error::State("evil");
return activity::PASS;
})
.withFinalHook([&](bool isFailure)
{
if (isFailure)
++errors;
})};
CHECK (0 == check);
CHECK (0 == errors);
wof.incScale();
wof.incScale();
wof.incScale();
sleep_for(10us);
CHECK (3 == wof.size());
2023-09-09 23:42:13 +02:00
CHECK (0 < check);
CHECK (0 == errors);
sleep_for(200ms); // wait for the programmed disaster
CHECK (2 == wof.size());
CHECK (1 == errors);
}
/** @test by default, the WorkForce is initially inactive;
* once activated, it scales up to the number of cores
* reported by the runtime system.
*/
void
verify_defaultPool()
{
atomic<uint> check{0};
WorkForce wof{setup ([&]{ ++check; return activity::PASS; })};
// after construction, the WorkForce is inactive
CHECK (0 == wof.size());
CHECK (0 == check);
wof.activate();
sleep_for(20ms);
CHECK (0 < check);
CHECK (wof.size() == work::Config::COMPUTATION_CAPACITY);
CHECK (work::Config::COMPUTATION_CAPACITY == std::thread::hardware_concurrency());
}
2023-09-09 23:42:13 +02:00
/** @test the number of (separate) workers can be scaled up,
* both stepwise and as fraction of full hardware concurrency
*/
void
verify_scalePool()
{
2023-09-09 23:42:13 +02:00
/** helper to count distinct thread-IDs */
class UniqueCnt
: public std::set<std::thread::id>
, public lib::Sync<>
{
public:
void
mark (std::thread::id const& tID)
{
Lock guard{this};
2023-09-09 23:42:13 +02:00
this->insert(tID);
}
operator size_t() const
{
Lock guard{this};
2023-09-09 23:42:13 +02:00
return this->size();
}
}
uniqueCnt;
WorkForce wof{setup ([&]{
uniqueCnt.mark(std::this_thread::get_id());
return activity::PASS;
})};
CHECK (0 == uniqueCnt);
CHECK (0 == wof.size());
wof.incScale();
sleep_for(1ms);
2023-09-09 23:42:13 +02:00
CHECK (1 == uniqueCnt);
CHECK (1 == wof.size());
wof.incScale();
sleep_for(1ms);
2023-09-09 23:42:13 +02:00
CHECK (2 == uniqueCnt);
CHECK (2 == wof.size());
auto fullCnt = work::Config::COMPUTATION_CAPACITY;
wof.activate (1.0);
sleep_for(5ms);
2023-09-09 23:42:13 +02:00
CHECK (fullCnt == uniqueCnt);
CHECK (fullCnt == wof.size());
wof.activate (2.0);
sleep_for(10ms);
2023-09-09 23:42:13 +02:00
CHECK (2*fullCnt == uniqueCnt);
CHECK (2*fullCnt == wof.size());
wof.awaitShutdown();
CHECK (0 == wof.size());
uniqueCnt.clear();
sleep_for(5ms);
2023-09-09 23:42:13 +02:00
CHECK (0 == uniqueCnt);
wof.activate (0.5);
sleep_for(5ms);
2023-09-09 23:42:13 +02:00
CHECK (fullCnt/2 == uniqueCnt);
CHECK (fullCnt/2 == wof.size());
}
/** @test dynamically determine count of currently active workers.
*/
void
verify_countActive()
{
atomic<uint> check{0};
WorkForce wof{setup ([&]{
++check;
if (check == 5'000 or check == 5'110)
return activity::HALT;
else
return activity::PASS;
})};
CHECK (0 == wof.size());
wof.incScale();
wof.incScale();
wof.incScale();
2025-06-07 23:59:57 +02:00
sleep_for(15us); // this may be fragile; must be sufficiently short
CHECK (3 == wof.size());
while (check < 6'000)
sleep_for(15ms); // .....sufficiently long to count way beyond 10'000
CHECK (check > 6'000);
CHECK (1 == wof.size());
}
/** @test verify that the WorkForce dtor waits for all active threads to disappear
* - use a work-functor which keeps all workers blocked
* - start the WorkForce within a separate thread
* - in this separate thread, cause the WorkForce destructor to be called
2023-09-09 23:42:13 +02:00
* - in the test main thread release the work-functor blocking
* - at this point, all workers return, detect shutdown and terminate
*/
void
verify_dtor_blocks()
{
atomic<bool> trapped{true};
auto blockingWork = [&]{
while (trapped)
/* spin */;
return activity::PASS;
};
atomic<bool> pool_scaled_up{false};
atomic<bool> shutdown_done{false};
Thread operate{"controller"
,[&] {
{// nested scope...
WorkForce wof{setup (blockingWork)};
wof.activate();
sleep_for (10ms);
CHECK (wof.size() == work::Config::COMPUTATION_CAPACITY);
pool_scaled_up = true;
} // WorkForce goes out of scope => dtor called
// when reaching this point, dtor has terminated
shutdown_done = true;
}};
CHECK (operate); // operate-thread is in running state
sleep_for(100ms);
CHECK (pool_scaled_up);
CHECK (not shutdown_done); // all workers are trapped in the work-functor
// thus the destructor can't dismantle the pool
trapped = false;
2025-06-07 23:59:57 +02:00
sleep_for(40ms);
CHECK (shutdown_done);
CHECK (not operate); // operate-thread has detached and terminated
2023-09-04 18:31:45 +02:00
}
};
/** Register this test class... */
LAUNCHER (WorkForce_test, "unit engine");
2023-09-09 23:42:13 +02:00
}}} // namespace vault::gear::test