2023-06-24 03:14:17 +02:00
|
|
|
/*
|
|
|
|
|
BLOCK-FLOW.hpp - specialised custom allocator to manage scheduler data
|
|
|
|
|
|
|
|
|
|
Copyright (C) Lumiera.org
|
|
|
|
|
2023, Hermann Vosseler <Ichthyostega@web.de>
|
|
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or
|
|
|
|
|
modify it under the terms of the GNU General Public License as
|
|
|
|
|
published by the Free Software Foundation; either version 2 of
|
|
|
|
|
the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
|
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/** @file block-flow.hpp
|
|
|
|
|
** Memory management scheme for activities and parameter data passed through
|
|
|
|
|
** the Scheduler within the Lumiera render engine. While — conceptually — the
|
|
|
|
|
** intended render operations are described as connected activity terms, sent
|
|
|
|
|
** as messages through the scheduler, the actual implementation requires a fixed
|
|
|
|
|
** descriptor record sitting at a stable memory location while the computation
|
|
|
|
|
** is underway. Moreover, activities can spawn further activities, implying that
|
|
|
|
|
** activity descriptor records can emanate from multiple threads concurrently,
|
|
|
|
|
** and the duration to keep those descriptors in valid state is contingent.
|
|
|
|
|
** On the other hand, ongoing rendering produces a constant flow of further
|
|
|
|
|
** activities, necessitating timely clean-up of obsolete descriptors.
|
|
|
|
|
** Used memory should be recycled, calling for an arrangement of
|
|
|
|
|
** pooled allocation tiles, extending the underlying block
|
2023-07-09 01:32:27 +02:00
|
|
|
** allocation on increased throughput.
|
2023-06-24 03:14:17 +02:00
|
|
|
**
|
|
|
|
|
** @note currently this rather marks the intended memory management pattern,
|
|
|
|
|
** while the actual allocations are still performed on the heap.
|
2023-07-03 18:40:37 +02:00
|
|
|
** @see BlockFlow_test
|
|
|
|
|
** @see SchedulerUsage_test
|
|
|
|
|
** @see extent-family.hpp underlying allocation scheme
|
2023-06-24 03:14:17 +02:00
|
|
|
**
|
|
|
|
|
** @todo WIP-WIP-WIP 6/2023 »Playback Vertical Slice«
|
|
|
|
|
**
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef SRC_VAULT_GEAR_BLOCK_FLOW_H_
|
|
|
|
|
#define SRC_VAULT_GEAR_BLOCK_FLOW_H_
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include "vault/common.hpp"
|
2023-07-05 15:10:34 +02:00
|
|
|
#include "vault/gear/activity.hpp"
|
|
|
|
|
#include "vault/mem/extent-family.hpp"
|
|
|
|
|
#include "lib/time/timevalue.hpp"
|
2023-06-24 03:14:17 +02:00
|
|
|
#include "lib/nocopy.hpp"
|
2023-07-13 03:41:24 +02:00
|
|
|
#include "lib/util.hpp"
|
2023-06-24 03:14:17 +02:00
|
|
|
|
2023-07-09 01:32:27 +02:00
|
|
|
#include <utility>
|
2023-06-24 03:14:17 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
namespace vault{
|
|
|
|
|
namespace gear {
|
|
|
|
|
|
2023-07-13 03:41:24 +02:00
|
|
|
using util::isnil;
|
2023-07-05 15:10:34 +02:00
|
|
|
using lib::time::Time;
|
2023-07-13 03:41:24 +02:00
|
|
|
using lib::time::TimeVar;
|
|
|
|
|
using lib::time::Duration;
|
|
|
|
|
using lib::time::FrameRate;
|
2023-07-05 15:10:34 +02:00
|
|
|
|
2023-07-13 18:35:10 +02:00
|
|
|
|
2023-07-05 15:10:34 +02:00
|
|
|
namespace {// hard-wired parametrisation
|
|
|
|
|
const size_t EPOCH_SIZ = 100;
|
|
|
|
|
const size_t ACTIVITIES_PER_FRAME = 10;
|
2023-07-13 03:41:24 +02:00
|
|
|
const size_t FRAMES_PER_EPOCH = EPOCH_SIZ/ACTIVITIES_PER_FRAME;
|
2023-07-05 15:10:34 +02:00
|
|
|
const size_t INITIAL_FRAMES = 50;
|
|
|
|
|
const size_t INITIAL_ALLOC = 1 + (INITIAL_FRAMES * ACTIVITIES_PER_FRAME) / EPOCH_SIZ;
|
|
|
|
|
|
2023-07-13 03:41:24 +02:00
|
|
|
const Duration INITIAL_EPOCH_STEP{FRAMES_PER_EPOCH * FrameRate{50}.duration()};
|
|
|
|
|
|
2023-07-09 01:32:27 +02:00
|
|
|
/** raw allocator to provide a sequence of Extents to place Activity records */
|
2023-07-05 15:10:34 +02:00
|
|
|
using Allocator = mem::ExtentFamily<Activity, EPOCH_SIZ>;
|
|
|
|
|
}
|
2023-06-24 03:14:17 +02:00
|
|
|
|
|
|
|
|
|
2023-07-09 01:32:27 +02:00
|
|
|
|
2023-07-05 15:10:34 +02:00
|
|
|
/**
|
2023-07-09 01:32:27 +02:00
|
|
|
* Allocation Extent holding _scheduler Activities_ to be performed altogether
|
|
|
|
|
* before a common _deadline._ Other than the underlying raw Extent, the Epoch
|
|
|
|
|
* maintains a deadline time and keeps track of storage slots already claimed.
|
|
|
|
|
* This is achieved by using the Activity record in the first slot as a GATE term
|
|
|
|
|
* to maintain those administrative information.
|
2023-07-13 18:35:10 +02:00
|
|
|
* @remark rationale is to discard the Extent as a whole, once deadline passed.
|
2023-07-05 15:10:34 +02:00
|
|
|
*/
|
|
|
|
|
class Epoch
|
|
|
|
|
: public Allocator::Extent
|
|
|
|
|
{
|
2023-07-09 01:32:27 +02:00
|
|
|
|
2023-07-13 18:35:10 +02:00
|
|
|
/// @warning will be faked, never constructed
|
2023-07-07 03:41:30 +02:00
|
|
|
Epoch() = delete;
|
2023-07-05 15:10:34 +02:00
|
|
|
|
|
|
|
|
public:
|
2023-07-09 01:32:27 +02:00
|
|
|
/**
|
|
|
|
|
* specifically rigged GATE Activity,
|
|
|
|
|
* used for managing Epoch metadata
|
|
|
|
|
* - the Condition::rest tracks pending async IO operations
|
|
|
|
|
* - the Condition::deadline is the nominal deadline of this Epoch
|
|
|
|
|
* - the field `next` points to the next free allocation Slot to use
|
|
|
|
|
*/
|
2023-07-05 15:10:34 +02:00
|
|
|
struct EpochGate
|
|
|
|
|
: Activity
|
|
|
|
|
{
|
2023-07-09 01:32:27 +02:00
|
|
|
/** @note initially by default there is...
|
|
|
|
|
* - effectively no deadline
|
|
|
|
|
* - no IO operations pending (i.e. we can just discard the Epoch)
|
|
|
|
|
* - the `next` usable Slot is the last Storage slot, and will be
|
|
|
|
|
* decremented until there is only one slot left (EpochGate itself)
|
|
|
|
|
* @warning EpochGate is assumed to sit in the Epoch's first slot
|
|
|
|
|
*/
|
2023-07-05 15:10:34 +02:00
|
|
|
EpochGate()
|
2023-07-07 03:41:30 +02:00
|
|
|
: Activity{int(0), Time::ANYTIME}
|
2023-07-05 15:10:34 +02:00
|
|
|
{
|
2023-07-09 01:32:27 +02:00
|
|
|
// initialise allocation usage marker: start at last usable slot
|
|
|
|
|
next = this + (Epoch::SIZ() - 1);
|
|
|
|
|
ENSURE (next != this);
|
2023-07-05 15:10:34 +02:00
|
|
|
}
|
|
|
|
|
// default copyable
|
2023-07-09 01:32:27 +02:00
|
|
|
|
2023-07-13 03:41:24 +02:00
|
|
|
Instant&
|
|
|
|
|
deadline()
|
|
|
|
|
{
|
|
|
|
|
return data_.condition.dead;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-09 01:32:27 +02:00
|
|
|
bool
|
2023-07-13 19:19:55 +02:00
|
|
|
isAlive (Time deadline)
|
|
|
|
|
{
|
|
|
|
|
/////////////////////////////////////////////OOO preliminary implementation ... should use the GATE-Activity itself
|
|
|
|
|
return this->deadline() > deadline;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
|
hasFreeSlot() const
|
2023-07-09 01:32:27 +02:00
|
|
|
{ // see C++ § 5.9 : comparison of pointers within same array
|
|
|
|
|
return next > this;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Activity*
|
|
|
|
|
claimNextSlot()
|
|
|
|
|
{
|
|
|
|
|
REQUIRE (hasFreeSlot());
|
|
|
|
|
return next--;
|
|
|
|
|
}
|
2023-07-05 15:10:34 +02:00
|
|
|
};
|
|
|
|
|
|
2023-07-13 18:35:10 +02:00
|
|
|
|
|
|
|
|
EpochGate& gate() { return static_cast<EpochGate&> ((*this)[0]); }
|
|
|
|
|
Time deadline() { return Time{gate().deadline()}; }
|
|
|
|
|
|
|
|
|
|
|
2023-07-05 15:10:34 +02:00
|
|
|
static Epoch&
|
2023-07-13 18:35:10 +02:00
|
|
|
implantInto (Allocator::iterator storageSlot)
|
2023-07-05 15:10:34 +02:00
|
|
|
{
|
2023-07-13 18:35:10 +02:00
|
|
|
Epoch& target = static_cast<Epoch&> (*storageSlot);
|
2023-07-05 15:10:34 +02:00
|
|
|
new(&target[0]) EpochGate{};
|
|
|
|
|
return target;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-13 18:35:10 +02:00
|
|
|
static Epoch&
|
|
|
|
|
setup (Allocator::iterator storageSlot, Time deadline)
|
2023-07-05 15:10:34 +02:00
|
|
|
{
|
2023-07-13 18:35:10 +02:00
|
|
|
Epoch& newEpoch{implantInto (storageSlot)};
|
|
|
|
|
newEpoch.gate().deadline() = deadline;
|
|
|
|
|
return newEpoch;
|
2023-07-05 15:10:34 +02:00
|
|
|
}
|
2023-07-13 18:35:10 +02:00
|
|
|
|
2023-07-05 15:10:34 +02:00
|
|
|
};
|
|
|
|
|
|
2023-07-09 01:32:27 +02:00
|
|
|
|
|
|
|
|
|
2023-06-24 03:14:17 +02:00
|
|
|
/**
|
2023-07-09 01:32:27 +02:00
|
|
|
* Allocation scheme for the Scheduler, based on Epoch(s).
|
|
|
|
|
* Scheduling entails to provide a chain of Activity definitions,
|
|
|
|
|
* which will then »flow« through the priority queue until invocation.
|
2023-06-24 03:14:17 +02:00
|
|
|
*
|
2023-07-03 18:40:37 +02:00
|
|
|
* @see SchedulerCommutator
|
|
|
|
|
* @see BlockFlow_test
|
2023-06-24 03:14:17 +02:00
|
|
|
*/
|
|
|
|
|
class BlockFlow
|
|
|
|
|
: util::NonCopyable
|
|
|
|
|
{
|
2023-07-05 15:10:34 +02:00
|
|
|
Allocator alloc_;
|
2023-07-13 03:41:24 +02:00
|
|
|
TimeVar epochStep_;
|
2023-06-24 03:14:17 +02:00
|
|
|
|
2023-07-13 18:35:10 +02:00
|
|
|
|
|
|
|
|
/** @internal use a raw storage Extent as Epoch (unchecked cast) */
|
|
|
|
|
static Epoch&
|
|
|
|
|
asEpoch (Allocator::Extent& extent)
|
|
|
|
|
{
|
|
|
|
|
return static_cast<Epoch&> (extent);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct StorageAdaptor : Allocator::iterator
|
|
|
|
|
{
|
2023-07-13 19:19:55 +02:00
|
|
|
StorageAdaptor() = default;
|
2023-07-13 18:35:10 +02:00
|
|
|
StorageAdaptor(Allocator::iterator it) : Allocator::iterator{it} { }
|
|
|
|
|
Epoch& yield() const { return asEpoch (Allocator::iterator::yield()); }
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-24 03:14:17 +02:00
|
|
|
public:
|
2023-07-05 15:10:34 +02:00
|
|
|
BlockFlow()
|
|
|
|
|
: alloc_{INITIAL_ALLOC}
|
2023-07-13 03:41:24 +02:00
|
|
|
, epochStep_{INITIAL_EPOCH_STEP}
|
2023-06-24 03:14:17 +02:00
|
|
|
{ }
|
2023-07-05 15:10:34 +02:00
|
|
|
|
2023-07-13 03:41:24 +02:00
|
|
|
Duration
|
|
|
|
|
currEpochStep() const
|
|
|
|
|
{
|
|
|
|
|
return Duration{epochStep_};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2023-07-13 18:35:10 +02:00
|
|
|
/** Adapted storage-extent iterator, directly exposing Extent& */
|
|
|
|
|
using EpochIter = lib::IterableDecorator<Epoch, StorageAdaptor>;
|
|
|
|
|
|
|
|
|
|
|
2023-07-09 01:32:27 +02:00
|
|
|
/**
|
|
|
|
|
* Local handle to allow allocating a collection of Activities,
|
|
|
|
|
* all sharing a common deadline. Internally, these records are
|
|
|
|
|
* maintained in fixed-sized _extents_ and thus allocations may
|
|
|
|
|
* _overflow_ — leading to allocation of further extents. However,
|
|
|
|
|
* this extension is handled transparently by the embedded iterator.
|
|
|
|
|
*/
|
|
|
|
|
class AllocatorHandle
|
|
|
|
|
{
|
2023-07-13 18:35:10 +02:00
|
|
|
EpochIter epoch_;
|
2023-07-09 01:32:27 +02:00
|
|
|
|
|
|
|
|
public:
|
2023-07-13 03:41:24 +02:00
|
|
|
AllocatorHandle(Allocator::iterator slot)
|
2023-07-13 18:35:10 +02:00
|
|
|
: epoch_{slot}
|
2023-07-13 03:41:24 +02:00
|
|
|
{ }
|
|
|
|
|
|
2023-07-13 18:35:10 +02:00
|
|
|
/*************************************************//**
|
|
|
|
|
* Main API operation: allocate a new Activity record
|
|
|
|
|
*/
|
2023-07-09 01:32:27 +02:00
|
|
|
template<typename...ARGS>
|
|
|
|
|
Activity&
|
|
|
|
|
create (ARGS&& ...args)
|
|
|
|
|
{
|
|
|
|
|
return *new(claimSlot()) Activity {std::forward<ARGS> (args)...};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
void*
|
|
|
|
|
claimSlot() ///< EX_SANE
|
|
|
|
|
{
|
2023-07-13 18:35:10 +02:00
|
|
|
if (epoch_->gate().hasFreeSlot())
|
2023-07-09 01:32:27 +02:00
|
|
|
{
|
2023-07-13 18:35:10 +02:00
|
|
|
return epoch_->gate().claimNextSlot();
|
2023-07-09 01:32:27 +02:00
|
|
|
}
|
|
|
|
|
else // Epoch overflow
|
|
|
|
|
{ // use following Epoch; possibly allocate
|
|
|
|
|
UNIMPLEMENTED ("advance to next epoch");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2023-07-13 18:35:10 +02:00
|
|
|
|
|
|
|
|
/* ===== public BlockFlow API ===== */
|
|
|
|
|
|
2023-07-09 01:32:27 +02:00
|
|
|
AllocatorHandle
|
|
|
|
|
until (Time deadline)
|
2023-07-05 15:10:34 +02:00
|
|
|
{
|
2023-07-13 03:41:24 +02:00
|
|
|
if (isnil (alloc_))
|
|
|
|
|
{//just create new Epoch one epochStep ahead
|
|
|
|
|
alloc_.openNew();
|
2023-07-13 18:35:10 +02:00
|
|
|
Epoch::setup (alloc_.begin(), deadline + Time{epochStep_});
|
2023-07-13 03:41:24 +02:00
|
|
|
return AllocatorHandle{alloc_.begin()};
|
|
|
|
|
}
|
|
|
|
|
else
|
2023-07-13 14:57:28 +02:00
|
|
|
{//find out how the given time relates to existing Epochs
|
2023-07-13 03:41:24 +02:00
|
|
|
UNIMPLEMENTED ("search through existing Epochs to locate the latest one to support given deadline");
|
|
|
|
|
}
|
2023-07-05 15:10:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
discardBefore (Time deadline)
|
|
|
|
|
{
|
2023-07-13 14:57:28 +02:00
|
|
|
if (isnil (alloc_)
|
2023-07-13 18:35:10 +02:00
|
|
|
or firstEpoch().deadline() > deadline)
|
2023-07-13 14:57:28 +02:00
|
|
|
return;
|
2023-07-13 19:19:55 +02:00
|
|
|
|
|
|
|
|
size_t toDiscard{0};
|
|
|
|
|
for (Epoch& epoch : allEpochs())
|
|
|
|
|
{
|
|
|
|
|
if (epoch.gate().isAlive (deadline))
|
|
|
|
|
break;
|
|
|
|
|
++toDiscard;
|
|
|
|
|
}
|
|
|
|
|
// ask to discard the enumerated Extents
|
|
|
|
|
alloc_.dropOld (toDiscard);
|
2023-07-05 15:10:34 +02:00
|
|
|
}
|
2023-07-13 01:51:21 +02:00
|
|
|
|
2023-07-13 18:35:10 +02:00
|
|
|
|
|
|
|
|
|
2023-07-13 01:51:21 +02:00
|
|
|
private:
|
2023-07-13 18:35:10 +02:00
|
|
|
Epoch&
|
|
|
|
|
firstEpoch()
|
2023-07-13 03:41:24 +02:00
|
|
|
{
|
2023-07-13 18:35:10 +02:00
|
|
|
REQUIRE (not isnil (alloc_));
|
|
|
|
|
return asEpoch(*alloc_.begin());
|
2023-07-13 03:41:24 +02:00
|
|
|
}
|
2023-07-13 18:35:10 +02:00
|
|
|
Epoch&
|
|
|
|
|
lastEpoch()
|
2023-07-13 03:41:24 +02:00
|
|
|
{
|
2023-07-13 18:35:10 +02:00
|
|
|
REQUIRE (not isnil (alloc_));
|
|
|
|
|
return asEpoch(*alloc_.last());
|
2023-07-13 03:41:24 +02:00
|
|
|
}
|
|
|
|
|
|
2023-07-13 19:19:55 +02:00
|
|
|
EpochIter
|
|
|
|
|
allEpochs()
|
|
|
|
|
{
|
|
|
|
|
return alloc_.begin();
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-13 01:51:21 +02:00
|
|
|
|
|
|
|
|
/// „backdoor“ to watch internals from tests
|
|
|
|
|
friend class FlowDiagnostic;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* ===== Test / Diagnostic ===== */
|
|
|
|
|
|
|
|
|
|
class FlowDiagnostic
|
|
|
|
|
{
|
|
|
|
|
BlockFlow& flow_;
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
FlowDiagnostic(BlockFlow& theFlow)
|
|
|
|
|
: flow_{theFlow}
|
|
|
|
|
{ }
|
|
|
|
|
|
2023-07-13 18:35:10 +02:00
|
|
|
Time first() { return flow_.firstEpoch().deadline();}
|
|
|
|
|
Time last() { return flow_.lastEpoch().deadline(); }
|
2023-07-13 01:51:21 +02:00
|
|
|
size_t cntEpochs() { return watch(flow_.alloc_).active(); }
|
2023-07-13 18:35:10 +02:00
|
|
|
size_t poolSize() { return watch(flow_.alloc_).size(); }
|
2023-06-24 03:14:17 +02:00
|
|
|
};
|
|
|
|
|
|
2023-07-13 01:51:21 +02:00
|
|
|
inline FlowDiagnostic
|
|
|
|
|
watch (BlockFlow& theFlow)
|
|
|
|
|
{
|
|
|
|
|
return FlowDiagnostic{theFlow};
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-24 03:14:17 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
}} // namespace vault::gear
|
|
|
|
|
#endif /*SRC_VAULT_GEAR_BLOCK_FLOW_H_*/
|