WIP pick up on the design work regarding Engine, OutputSlot and Player

This commit is contained in:
Fischlurch 2011-08-14 03:09:05 +02:00
parent f8842c75ed
commit 4a62444ad4
5 changed files with 124 additions and 14 deletions

View file

@ -0,0 +1,91 @@
/*
CALC-STREAM.hpp - abstraction representing a series of scheduled calculations
Copyright (C) Lumiera.org
2011, Hermann Vosseler <Ichthyostega@web.de>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef PROC_ENGINE_CALC_STREAM_H
#define PROC_ENGINE_CALC_STREAM_H
//#include "include/dummy-player-facade.h"
//#include "include/display-facade.h"
//#include "common/instancehandle.hpp"
//#include "lib/singleton-ref.hpp"
//
//#include <boost/noncopyable.hpp>
//#include <boost/scoped_ptr.hpp>
//#include <string>
namespace proc {
namespace play {
// using std::string;
// using lumiera::Subsys;
// using lumiera::Display;
// using lumiera::DummyPlayer;
/*********************************************************
* A calculation stream groups and abstracts a series of
* calculation jobs, delivering frames into the configured
* OutputSlot in a timely fashion. Behind the scenes, this
* gets translated into several jobs enqueued with the
* scheduler in the backend layer. The calculation stream
* implementation cares to create and configure these
* jobs and to manage the necessary dependencies and
* callbacks.
*
* Regarding the implementation, a CalcStream is an const
* value object holding the metadata necessary to manage
* the underlying jobs. The only way to create a CalcStream
* properly is to retrieve it from the factory functions
* of the EngineService. At that point, the corresponding
* jobs will be configured and enqueued.
*/
class CalcStream
{
friend class EngineService;
CalcStream()
{
}
public:
CalcStream (CalcStream const& o)
{ }
~CalcStream() { }
};
} // namespace play
} // namespace proc
#endif

View file

@ -21,10 +21,14 @@
*/
/** @file engine-service.hpp
** A public service provided by
**
** @see lumiera::DummyPlayer
** @see gui::PlaybackController usage example
** Access point for the (core) calculation service of the render engine.
** This public service is provided by the Proc-Layer, but actually implemented
** using backend services (especially the scheduler). The central concept provided
** through this facade interface is that of a <i>calculation stream</i>. On the
** implementation side, these get translated into a series of jobs invoking
** render nodes, to be invoked through the scheduler in the backend layer.
**
** @see proc::play::PlayerService
*/
@ -34,6 +38,7 @@
//#include "include/dummy-player-facade.h"
//#include "include/display-facade.h"
#include "proc/engine/calc-stream.hpp"
//#include "common/instancehandle.hpp"
//#include "lib/singleton-ref.hpp"
//
@ -55,12 +60,14 @@ namespace play {
/******************************************************
* Actual implementation of the DummyPlayer service.
* Creating an instance of this class automatically
* registers the interface lumieraorg_DummyPlayer with
* the Lumiera Interface/Plugin system and creates
* a forwarding proxy within the application core to
* route calls through this interface.
* A service to schedule series of calculations,
* delivering the rendered data into an external output
* sink in a timely fashion. Actually the CalculationStream
* instances provided through this (facade) interface are
* backed by jobs executed through the scheduler in the
* backend layer. The implementation of this service
* cares to create the right job entries in the correct
* order and to enqueue these into the scheduler.
*/
class EngineService
: boost::noncopyable
@ -84,6 +91,9 @@ namespace play {
~EngineService() { } /////TODO notifyTermination_(&error_); }
CalcStream
};

View file

@ -31,6 +31,11 @@
using std::list;
/////////////////////////////TODO 7/11 this is a piece of debris, left over from the first attempt to complete the render nodes network.
/////////////////////////////TODO Meanwhile the intention is to treat the render nodes network more like a data structure,
/////////////////////////////TODO consequently this will become some kind of root or anchor point for this network
//////////TODO for the "real" engine API: look at engine-serivce.hpp
namespace engine {

View file

@ -29,6 +29,10 @@
#include "lib/time/timevalue.hpp"
/////////////////////////////TODO 7/11 this is a piece of debris, left over from the first attempt to complete the render nodes network.
/////////////////////////////TODO Meanwhile the intention is to treat the render nodes network more like a data structure,
/////////////////////////////TODO consequently this will become some kind of root or anchor point for this network
namespace engine {

View file

@ -1785,10 +1785,10 @@ To make the intended use of the classes more clear, consider the following two e
* a video clip placed relatively, with an attached HUE effect &amp;rarr;[[Example2]]
</pre>
</div>
<div title="ExitNode" modifier="Ichthyostega" modified="201106210120" created="200706220322" tags="def" changecount="3">
<div title="ExitNode" modifier="Ichthyostega" modified="201108140105" created="200706220322" tags="def" changecount="4">
<pre>a special ProcNode which is used to pull the finished output of one Render Pipeline (Tree or Graph). This term is already used in the Cinelerra2 codebase. I am unsure at the moment if it is a distinct subclass or rahter a specially configured ProcNode (a general design rule tells us to err in favour of the latter if in doubt).
The render nodes network is always buils separate for each [[timeline segment|Segmentation]], which is //constant in wiring configuration.// Thus, while exit node(s) are per segment, the corresponding exit nodes of consecutive segments together belong to a ModelPort, which in turn corresponds to a global pipe (master bus not connected any further). These relations guide the possible configuration for an exit node: It may still provide multiple channels -- but all those channels are bound to belong to a single logical stream -- same StreamPrototype, always handled as bundle, connected and routed in one step. For example, when there is an 5.1 Audio master bus with a single fader, then &quot;5.1 Audio&quot; would be a prototype and these 6 channels will always be handled together; in such a case it makes perfectly sense to access these 6 audio channels through a single exit node, which is keyed (identified) by the same [[Pipe]]-ID as the corresponding ModelPort and the corresponding global pipe (&quot;5.1 Audio master bus&quot;)
The render nodes network is always built separate for each [[timeline segment|Segmentation]], which is //constant in wiring configuration.// Thus, while exit node(s) are per segment, the corresponding exit nodes of consecutive segments together belong to a ModelPort, which in turn corresponds to a global pipe (master bus not connected any further). These relations guide the possible configuration for an exit node: It may still provide multiple channels -- but all those channels are bound to belong to a single logical stream -- same StreamPrototype, always handled as bundle, connected and routed in one step. For example, when there is an 5.1 Audio master bus with a single fader, then &quot;5.1 Audio&quot; would be a prototype and these 6 channels will always be handled together; in such a case it makes perfectly sense to access these 6 audio channels through a single exit node, which is keyed (identified) by the same [[Pipe]]-ID as the corresponding ModelPort and the corresponding global pipe (&quot;5.1 Audio master bus&quot;)
</pre>
</div>
<div title="ExplicitPlacement" modifier="Ichthyostega" modified="201012122052" created="200706220304" tags="def" changecount="6">
@ -3397,7 +3397,7 @@ Thus there are two serious problem situations
&amp;rarr; SchedulerRequirements
&amp;rarr; OutputSlotImpl</pre>
</div>
<div title="OutputSlotImpl" modifier="Ichthyostega" modified="201107110047" created="201107102343" tags="spec operational" changecount="25">
<div title="OutputSlotImpl" modifier="Ichthyostega" modified="201108112339" created="201107102343" tags="spec operational" changecount="26">
<pre>OutputSlot is an abstraction, allowing unified treatment of various physical output connections from within the render jobs. The actual output slot is a subclass object, created and managed from the &quot;driver code&quot; for a specific output connection. Moreover, each output slot is outfitted with a concrete BufferProvider to reflect the actual buffer handling policy applicable for this specific output connection. Some output connections might e.g. require delivery of the media data into a buffer residing on external hardware, while others work just fine when pointed to some arbitrary memory block holding generated data.
!operation steps
@ -3414,7 +3414,7 @@ Thus there are two serious problem situations
!buffer states
While the BufferProvider abstracts away the actual access to the output buffers and just hands out a ''buffer handle'', the client (here the concrete output slot) is allowed to associate and maintain a ''state flag'' with each buffer. The general assumption is that writing this state flag is atomic, and that other facilities will care for the necessary memory barriers (that is: the output slot and the buffer provider will just access this state flag without much ado). The generic part of the output slot implementation utilises this buffer state flag to implement a state machine, which -- together with the timing constraints established with the [[help of the scheduler|SchedulerRequirements]] -- ensures sane access to the buffer without collisions.
While the BufferProvider abstracts away the actual access to the output buffers and just hands out a ''buffer handle'', the server side (here the concrete output slot) is allowed to associate and maintain a ''state flag'' with each buffer. The general assumption is that writing this state flag is atomic, and that other facilities will care for the necessary memory barriers (that is: the output slot and the buffer provider will just access this state flag without much ado). The generic part of the output slot implementation utilises this buffer state flag to implement a state machine, which -- together with the timing constraints established with the [[help of the scheduler|SchedulerRequirements]] -- ensures sane access to the buffer without collisions.
| !state||&gt;| !lock() |&gt;| !transfer() |&gt;| !pushout() |
| {{{NIL}}}||↯| · |↯| | ↯ | |
| {{{FREE}}}||✔|↷ locked |✔|· (ignore) | · | · |