implement missing parts of test/dummy buffer provider
This commit is contained in:
parent
0a1256f4e3
commit
91b74ad7bd
5 changed files with 66 additions and 20 deletions
|
|
@ -39,8 +39,12 @@ namespace engine {
|
|||
|
||||
LUMIERA_ERROR_DEFINE (BUFFER_MANAGEMENT, "Problem providing working buffers");
|
||||
|
||||
|
||||
|
||||
/** build a new provider instance, managing a family of buffers.
|
||||
* The metadata of these buffers is organised hierarchically based on
|
||||
* chained hash values, using the #implementationID as a seed.
|
||||
* @param implementationID symbolic ID setting these family of buffers apart.
|
||||
*/
|
||||
BufferProvider::BufferProvider (Literal implementationID)
|
||||
: meta_(new BufferMetadata (implementationID))
|
||||
{ }
|
||||
|
|
@ -53,9 +57,9 @@ namespace engine {
|
|||
* currently locked and usable by client code
|
||||
*/
|
||||
bool
|
||||
BufferProvider::verifyValidity (BufferDescriptor const&)
|
||||
BufferProvider::verifyValidity (BufferDescriptor const& bufferID)
|
||||
{
|
||||
UNIMPLEMENTED ("BufferProvider basic and default implementation");
|
||||
return meta_->isLocked (bufferID);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -81,6 +85,12 @@ namespace engine {
|
|||
}
|
||||
|
||||
|
||||
/** callback from implementation to build and enrol a BufferHandle,
|
||||
* to be returned to the client as result of the #lockBuffer call.
|
||||
* Performs the necessary metadata state transition leading from an
|
||||
* abstract buffer type to a metadata::Entry corresponding to an
|
||||
* actual buffer, which is locked for exclusive use by one client.
|
||||
*/
|
||||
BuffHandle
|
||||
BufferProvider::buildHandle (HashVal typeID, void* storage, LocalKey const& implID)
|
||||
{
|
||||
|
|
@ -177,7 +187,7 @@ namespace engine {
|
|||
bool
|
||||
BufferProvider::was_created_by_this_provider (BufferDescriptor const& descr) const
|
||||
{
|
||||
return isSameObject (this, descr.provider_);
|
||||
return isSameObject (*this, *descr.provider_);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -194,18 +204,20 @@ namespace engine {
|
|||
|
||||
|
||||
void
|
||||
BuffHandle::emit()
|
||||
BufferDescriptor::emit (BuffHandle const& handle) const
|
||||
{
|
||||
UNIMPLEMENTED ("forward buffer emit call to buffer provider");
|
||||
REQUIRE (verifyValidity());
|
||||
provider_->emitBuffer(handle);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
BuffHandle::release()
|
||||
BufferDescriptor::release (BuffHandle const& handle) const
|
||||
{
|
||||
UNIMPLEMENTED ("forward buffer release call to buffer provider");
|
||||
REQUIRE (verifyValidity());
|
||||
provider_->releaseBuffer(handle);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
} // namespace engine
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
/** @file buffer-provider.hpp
|
||||
** Abstraction to represent buffer management and lifecycle within the render engine.
|
||||
** It turns out that -- throughout the render engine implementation -- we never need
|
||||
** It turns out that -- throughout the render engine implementation -- we never need
|
||||
** direct access to the buffers holding media data. Buffers are just some entity to be \em managed,
|
||||
** i.e. "allocated", "locked" and "released"; the actual meaning of these operations is an implementation detail.
|
||||
** The code within the render engine just pushes around BufferHandle objects, which act as a front-end,
|
||||
|
|
@ -71,7 +71,10 @@ namespace engine {
|
|||
* - "locking" a buffer to yield a buffer handle
|
||||
* - dereferencing this smart-handle class
|
||||
*
|
||||
* @warning all of BufferProvider is assumed to run within a threadsafe environment.
|
||||
*
|
||||
* @todo as of 6/2011 buffer management within the engine is still a bit vague
|
||||
* @todo as of 11/11 thread safety within the engine remains to be clarified
|
||||
*/
|
||||
class BufferProvider
|
||||
: boost::noncopyable
|
||||
|
|
|
|||
|
|
@ -54,6 +54,7 @@ namespace engine {
|
|||
|
||||
typedef size_t HashVal; ////////////TICKET #722
|
||||
|
||||
class BuffHandle;
|
||||
class BufferProvider;
|
||||
|
||||
|
||||
|
|
@ -86,6 +87,9 @@ namespace engine {
|
|||
|
||||
bool verifyValidity() const;
|
||||
|
||||
void emit (BuffHandle const&) const;
|
||||
void release (BuffHandle const&) const;
|
||||
|
||||
operator HashVal() const { return subClassification_; }
|
||||
};
|
||||
|
||||
|
|
@ -140,8 +144,18 @@ namespace engine {
|
|||
|
||||
|
||||
|
||||
void emit();
|
||||
void release();
|
||||
void
|
||||
emit()
|
||||
{
|
||||
REQUIRE (pBuffer_);
|
||||
descriptor_.emit (*this);
|
||||
}
|
||||
|
||||
void release()
|
||||
{
|
||||
REQUIRE (pBuffer_);
|
||||
descriptor_.release (*this);
|
||||
}
|
||||
|
||||
|
||||
template<typename BU>
|
||||
|
|
|
|||
|
|
@ -107,12 +107,14 @@ namespace engine {
|
|||
class BlockPool
|
||||
: public lib::BoolCheckable<BlockPool>
|
||||
{
|
||||
uint maxAllocCount_;
|
||||
size_t memBlockSize_;
|
||||
PoolHolder blockList_;
|
||||
|
||||
public:
|
||||
BlockPool()
|
||||
: memBlockSize_(0)
|
||||
: maxAllocCount_(0) // unlimited by default
|
||||
, memBlockSize_(0)
|
||||
, blockList_()
|
||||
{ }
|
||||
|
||||
|
|
@ -127,12 +129,26 @@ namespace engine {
|
|||
|
||||
~BlockPool()
|
||||
{
|
||||
if (!verify_all_children_closed())
|
||||
if (!verify_all_children_idle())
|
||||
ERROR (test, "Block actively in use while shutting down BufferProvider "
|
||||
"allocation pool. This might lead to Segfault and memory leaks.");
|
||||
}
|
||||
|
||||
|
||||
uint
|
||||
prepare_for (uint number_of_expected_buffers)
|
||||
{
|
||||
if (maxAllocCount_ &&
|
||||
maxAllocCount_ < blockList_->size() + number_of_expected_buffers)
|
||||
{
|
||||
ASSERT (maxAllocCount_ >= blockList_->size());
|
||||
return maxAllocCount_ - blockList_->size();
|
||||
}
|
||||
// currently no hard limit imposed
|
||||
return number_of_expected_buffers;
|
||||
}
|
||||
|
||||
|
||||
Block&
|
||||
createBlock()
|
||||
{
|
||||
|
|
@ -168,7 +184,7 @@ namespace engine {
|
|||
|
||||
private:
|
||||
bool
|
||||
verify_all_children_closed()
|
||||
verify_all_children_idle()
|
||||
{
|
||||
try {
|
||||
if (blockList_)
|
||||
|
|
@ -217,9 +233,10 @@ namespace engine {
|
|||
/* ==== Implementation of the BufferProvider interface ==== */
|
||||
|
||||
uint
|
||||
TrackingHeapBlockProvider::prepareBuffers(uint, lib::HashVal)
|
||||
TrackingHeapBlockProvider::prepareBuffers(uint requestedAmount, HashVal typeID)
|
||||
{
|
||||
UNIMPLEMENTED ("pre-register storage for buffers of a specific kind");
|
||||
diagn::BlockPool& responsiblePool = getBlockPoolFor (typeID);
|
||||
return responsiblePool.prepare_for (requestedAmount);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1133,7 +1133,7 @@ Beyond that, it can be necessary to associate at least a state flag with //indiv
|
|||
__Note__: while the API to access this service is uniform, conceptually there is a difference between just using the (shared) type information and associating individual metadata, like the buffer state. Type-~IDs, once allocated, will never be discarded (within the lifetime of an Lumiera application instance -- buffer associations aren't persistent). To the contrary, individual metadata //will be discarded,// when releasing the corresponding buffer. According to the ''prototype pattern'', individual metadata is treated as a one-way-off specialisation.
|
||||
</pre>
|
||||
</div>
|
||||
<div title="BufferProvider" modifier="Ichthyostega" modified="201111050003" created="201107082330" tags="Rendering spec draft" changecount="21">
|
||||
<div title="BufferProvider" modifier="Ichthyostega" modified="201111192223" created="201107082330" tags="Rendering spec draft" changecount="22">
|
||||
<pre>It turns out that -- throughout the render engine implementation -- we never need direct access to the buffers holding actual media data. Buffers are just some entity to be //managed,// i.e. "allocated", "locked" and "released"; the //actual meaning of these operations can be left to the implementation.// The code within the render engine just pushes around ''smart-prt like handles''. These [[buffer handles|BuffHandle]] act as a front-end, being created by and linked to a buffer provider implementation. There is no need to manage the lifecycle of buffers automatically, because the use of buffers is embedded into the render calculation cycle, which follows a rather strict protocol anyway. Relying on the [[capabilities of the scheduler|SchedulerRequirements]], the sequence of individual jobs in the engine ensures...
|
||||
* that the availability of a buffer was ensured prior to planning a job ("buffer allocation")
|
||||
* that a buffer handle was obtained ("locked") prior to any operation requiring a buffer
|
||||
|
|
@ -1142,7 +1142,7 @@ __Note__: while the API to access this service is uniform, conceptually there is
|
|||
!operations
|
||||
While BufferProvider is an interface meant to be backed by various different kinds of buffer and memory management approaches, there is a common set of operations to be supported by any of them
|
||||
;announcing
|
||||
:client code may announce beforehand that it expects to get a certain amount of buffers. Usually this causes some allocations to happen right away, or it might trigger similar mechanisms to ensure availability; the BufferProvider will then return the actual number of buffers guaranteed to be available. This announcing step is optional an can happen any time before or even after using the buffers and it can be repeated with different values to adjust to changing requirements. (Currently 9/2011 this is meant to be global for the whole BufferProvider, but it might happen that we need to break that down to individual clients)
|
||||
:client code may announce beforehand that it expects to get a certain amount of buffers. Usually this causes some allocations to happen right away, or it might trigger similar mechanisms to ensure availability; the BufferProvider will then return the actual number of buffers guaranteed to be available. This announcing step is optional an can happen any time before or even after using the buffers and it can be repeated with different values to adjust to changing requirements. Thus the announced amount of buffers always denotes //additional buffers,// on top of what is actively used at the moment. This safety margin of available buffers usually is accounted separately for each distinct kind of buffer (buffer type). There is no tracking as to which specific client requested buffers, beyond the buffer type.
|
||||
;locking
|
||||
:this operation actually makes a buffer available for a specific client and returns a [[buffer handle|BuffHandle]]. The corresponding buffer is marked as used and can't be locked again unless released. If necessary, at that point the BufferProvider might allocate memory to accommodate (especially when the buffers weren't announced beforehand). The locking may fail and raise an exception. You may expect failure to be unlikely when buffers have been //announced beforehand.// To support additional sanity checks, the client may provide a token-ID with the lock-operation. This token may be retrieved later and it may be used to ensure the buffer is actually locked for //this token.//
|
||||
;attaching
|
||||
|
|
|
|||
Loading…
Reference in a new issue