draft: integrating an engine mock implementation

This commit is contained in:
Fischlurch 2012-01-21 03:13:33 +01:00
parent ec659b7141
commit 7e7ecc5d51
8 changed files with 210 additions and 15 deletions

View file

@ -47,6 +47,21 @@ namespace engine{
// using lumiera::DummyPlayer;
/**
* Abstract definition of the environment
* hosting a given render activity (CalcStream).
* Exposes all the operations necessary to adjust the
* runtime behaviour of the render activity, like e.g.
* re-scheduling with modified playback speed. Since the
* CalcStream is an conceptual representation of "the rendering",
* the actual engine implementation is kept opaque this way.
*/
class RenderEnvironmentClosure
{
public:
virtual ~RenderEnvironmentClosure() { } ///< this is an interface
};
@ -69,19 +84,24 @@ namespace engine{
*/
class CalcStream
{
RenderEnvironmentClosure* env_;
protected:
CalcStream (RenderEnvironmentClosure& engine)
: env_(&engine)
{ }
friend class EngineService;
public:
CalcStream()
{
UNIMPLEMENTED("build a disabled/dead calculation stream");
}
: env_(0)
{ }
CalcStream (CalcStream const& o)
{
UNIMPLEMENTED("clone a calculation stream");
}
: env_(o.env_)
{ }
~CalcStream() { }

View file

@ -22,6 +22,7 @@
#include "proc/engine/engine-service-mock.hpp"
#include "proc/engine/worker/dummy-tick.hpp"
//#include <string>
//#include <memory>
@ -49,8 +50,13 @@ namespace engine{
/** */
/**
* Initialise a mock render engine.
* This dummy implementation manages a collection of
* "Processors", each running in a separate thread.
*/
EngineServiceMock::EngineServiceMock()
: processors_()
{ }

View file

@ -51,14 +51,16 @@
//#include "common/instancehandle.hpp"
//#include "lib/singleton-ref.hpp"
#include "proc/engine/engine-service.hpp"
#include "lib/singleton.hpp"
#include "lib/scoped-ptrvect.hpp"
//
#include <boost/noncopyable.hpp>
//#include <boost/scoped_ptr.hpp>
//#include <string>
namespace proc {
namespace proc {
namespace node { class DummyTick; }
namespace engine{
// using std::string;

View file

@ -87,6 +87,42 @@ namespace engine{
}
/** @internal build a representation of a single, ongoing calculation effort.
* This "CalcStream" is tied to the actual engine implementation, but only
* through an opaque link, representing this concrete engine as an
* RenderEnvironmentClosure. This enables the created CalcStream to be
* re-configured and adjusted while running.
*/
CalcStream
EngineService::activateCalculation (RenderEnvironmentClosure& engineCallback)
{
return CalcStream (engineCallback);
}
/** @internal extension point
* Install and activate a single, ongoing calculation effort.
* Configure and prepare all the internal components, pre-allocate
* resources and add entries to the registration tables to get this
* render activity into running state
* @return CalcStream representing this newly started rendering activity
* @note variations and especially mock implementations of the render engine
* might choose to configure internal differently. As long as the
* CalcStream and the embedded RenderEnvironmentClosure are consistent,
* such a specific configuration remains opaque for the user of the
* created render activity
*/
CalcStream
EngineService::configureCalculation ()
{
UNIMPLEMENTED ("represent *this as RenderEnvironmentClosure)");
RenderEnvironmentClosure* todo_fake(0); ////KABOOOM
return activateCalculation (*todo_fake);
}
/* ===== Quality-of-Service ===== */

View file

@ -150,7 +150,12 @@ namespace engine{
Quality serviceQuality =QoS_BACKGROUND);
private:
CalcStream activateCalculation (RenderEnvironmentClosure&);
protected:
virtual CalcStream configureCalculation ();
void activateTracing();
void disableTracing(); ///< EX_FREE

View file

@ -0,0 +1,126 @@
/*
DUMMY-TICK.hpp - issuing timed callbacks
Copyright (C) Lumiera.org
2009, Joel Holdsworth <joel@airwebreathe.org.uk>,
Hermann Vosseler <Ichthyostega@web.de>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/** @file dummy-tick.hpp
** A timer service invoking a given callback periodically.
** This is a rough preliminary implementation as of 1/2009. We use it to
** drive the frame "creation" of a player dummy (the render engine is not
** ready yet). The intention is to use this service as part of a mock engine
** setup, used to verify the construction of engine components. As an integration
** test, we build a "dummy player", delivering some test data frames to the Gui.
**
** @see proc::play::DummyPlayerService
**
*/
#ifndef PROC_ENGINE_WORKER_DUMMY_TICK_H
#define PROC_ENGINE_WORKER_DUMMY_TICK_H
#include "lib/error.hpp"
#include "backend/thread-wrapper.hpp"
#include <tr1/functional>
#include <limits>
namespace proc {
namespace node {
using std::tr1::function;
using std::tr1::bind;
/************************************************************
* Tick generating service for a periodic callback,
* with adjustable frequency. Quick'n dirty implementation!
*/
class DummyTick
: backend::ThreadJoinable
{
typedef function<void(void)> Tick;
volatile uint timespan_;
/** poll interval for new settings in wait state */
static const uint POLL_TIMEOUT = 1000;
public:
DummyTick (Tick callback)
: ThreadJoinable("Tick generator (dummy)"
, bind (&DummyTick::timerLoop, this, callback)
)
{
INFO (proc, "TickService started.");
}
~DummyTick ()
{
timespan_ = 0;
this->join();
usleep (200000); // additional delay allowing GTK to dispatch the last output
INFO (proc, "TickService shutdown.");
}
/** set the periodic timer to run with a given frequency,
* starting \em now. Well, not actually now, but at the next
* opportunity. It should be \em now, but this implementation
* is sloppy! setting fps==0 halts (pauses) the timer.
*/
void activate (uint fps)
{
REQUIRE ( 0==fps
||( 1000000/fps < std::numeric_limits<uint>::max()
&& 1000000/fps > POLL_TIMEOUT));
if (fps)
timespan_ = 1000000/fps; // microseconds per tick
else
timespan_ = POLL_TIMEOUT;
}
private:
void timerLoop(Tick periodicFun)
{
timespan_ = POLL_TIMEOUT;
while (0 < timespan_)
{
if (timespan_ > POLL_TIMEOUT)
periodicFun();
usleep (timespan_);
}
TRACE (proc_dbg, "Tick Thread timer loop exiting...");
}
};
}} // namespace proc::node
#endif

View file

@ -77,7 +77,7 @@ namespace test {
DataSink sink2 = *++sinks;
// within the frame-calculation "loop"
// we perform an data exchange cycle
// we perform a data exchange cycle
int64_t frameNr = 123;
BuffHandle buff00 = sink1.lockBufferFor (frameNr);
BuffHandle buff10 = sink2.lockBufferFor (frameNr);

View file

@ -1160,14 +1160,14 @@ __see also__
&amp;rarr; RenderMechanics for details on the buffer management within the node invocation for a single render step
</pre>
</div>
<div title="BufferTable" modifier="Ichthyostega" created="201109172253" tags="def spec Rendering draft" changecount="1">
<div title="BufferTable" modifier="Ichthyostega" modified="201201192303" created="201109172253" tags="def spec Rendering draft" changecount="3">
<pre>The invocation of individual [[render nodes|ProcNode]] uses an ''buffer table'' internal helper data structure to encapsulate technical details of the allocation, use, re-use and feeing of data buffers for the media calculations. Here, the management of the physical data buffers is delegated through a BufferProvider, which typically is implemented relying on the ''frame cache'' in the backend. Yet some partially quite involved technical details need to be settled for each invocation: We need input buffers, maybe provided as external input, while in other cases to be filled by a recursive call. We need storage to prepare the (possibly automated) parameters, and finally we need a set of output buffers. All of these buffers and parameters need to be rearranged for invoking the (external) processing function, followed by releasing the input buffers and commiting the output buffers to be used as result.
Because there are several flavours of node wiring, the building blocks comprising such a node invocation will be combined depending on the circumstances. Performing all these various steps is indeed the core concern of the render node -- with the help of BufferTable to deal with the repetitive, tedious and technical details.
!requirements
The layout of the buffer table will be planned beforehand for each invocation, allongside with planning the individual invocation jobs for the scheduler. At that point, a generic JobTicket for the whole timeline segment is available, describing the necessary operations in an abstract way, as determined by the preceeding planning phase. Jobs are prepared chunk wise, some time in advance (but not all jobs of at once). Jobs will be executed concurrently. Thus, buffer tables need to be created repeatedly and placed into a memory block accessed and owned exclusively by the individual job.
* within the buffer table, we need an working area for the output handles, the input handles and the parameter descriptors
* within the buffer table, we need a working area for the output handles, the input handles and the parameter descriptors
* actually, these can be seen as pools holding handle objects which might even be re-used, especially for a chain of effects calculated in-place.
* each of these pools is characterised by a common //buffer type,// represented as buffer descriptor
* we need some way to integrate with the StateProxy, because some of the buffers need to be marked especially, e.g. as result
@ -3226,8 +3226,8 @@ While the general approach and reasoning remains valid, a lot of the details loo
In the most general case the render network may be just a DAG (not just a tree). Especially, multiple exit points may lead down to the same node, and following each of this possible paths the node may be at a different depth on each. This rules out a simple counter starting from the exit level, leaving us with the possibility of either employing a rather convoluted addressing scheme or using arbitrary ID numbers.{{red{...which is what we do for now}}}
</pre>
</div>
<div title="NodeOperationProtocol" modifier="Ichthyostega" modified="201109172254" created="200806010251" tags="Rendering operational" changecount="25">
<pre>The [[nodes|ProcNode]] are wired to form a &quot;Directed Acyclic Graph&quot;; each node knows its predecessor(s), but not its successor(s). The RenderProcess is organized according to the ''pull principle'', thus we find an operation {{{pull()}}} at the core of this process. Meaning that there isn't an central entity invoking nodes consecutively. Rather, the nodes themselves contain the detailed knowledg regarding prerequisites, so the calculation plan is worked out recursively. Yet there are some prerequisite resources to be made available for any calculation to happen. Thus the actual calculation is broken down into atomic chunks of work, resulting in a 2-phase invocation whenever &quot;pulling&quot; a node. For this to work, we need the nodes to adhere to a specific protocol:
<div title="NodeOperationProtocol" modifier="Ichthyostega" modified="201201192303" created="200806010251" tags="Rendering operational" changecount="29">
<pre>The [[nodes|ProcNode]] are wired to form a &quot;Directed Acyclic Graph&quot;; each node knows its predecessor(s), but not its successor(s). The RenderProcess is organized according to the ''pull principle'', thus we find an operation {{{pull()}}} at the core of this process. Meaning that there isn't a central entity to invoke nodes consecutively. Rather, the nodes themselves contain the detailed knowledge regarding prerequisites, so the calculation plan is worked out recursively. Yet still there are some prerequisite resources to be made available for any calculation to happen. So the actual calculation is broken down into atomic chunks of work, resulting in a 2-phase invocation whenever &quot;pulling&quot; a node. For this to work, we need the nodes to adhere to a specific protocol:
;planning phase
:when a node invocation is foreseeable to be required for getting a specific frame for a specific nominal and actual time, the engine has to find out the actual operations to happen
:# the planning is initiated by issuing an &quot;get me output&quot; request, finally resulting in a JobTicket
@ -3251,7 +3251,7 @@ some points to note:
* the WiringDescriptor is {{{const}}} and precalculated while building (remember another thread may call in parallel)
* when a node is &quot;inplace-capable&quot;, input and output buffer may actually point to the same location
* but there is no guarantee for this to happen, because the cache may be involved (and we can't overwrite the contents of a cache frame)
* generally, a node may have N inputs and M output frames, which are expected to be processed in a single call
* nodes in general may require N inputs and M output frames, which are expected to be processed in a single call
* some of the technical details of buffer management are encapsulated within the BufferTable of each invocation
&amp;rarr; the [[&quot;mechanics&quot; of the render process|RenderMechanics]]