DONE: time anchor and latency handling for job planning

This commit is contained in:
Fischlurch 2013-01-12 12:38:33 +01:00
parent e40f3fe97f
commit a4411d00b1
9 changed files with 144 additions and 47 deletions

View file

@ -29,6 +29,7 @@
using boost::rational;
using lib::time::FrameRate;
using lib::time::FSecs;
namespace backend{
@ -39,8 +40,8 @@ namespace engine {
const rational<uint> ONE_THIRD(1,3);
const rational<uint> EIGHTY_PERCENT(8,10);
const Duration DEFAULT_ENGINE_LATENCY = EIGHTY_PERCENT * Duration(1, FrameRate::PAL);
const Duration DEFAULT_ENGINE_LATENCY = EIGHTY_PERCENT * Duration(1, FrameRate::PAL);
const Duration DEFAULT_JOB_PLANNING_TURNOVER(FSecs(3,2));
}//(End)hard wired settings
@ -70,6 +71,11 @@ namespace engine {
}
Duration
EngineConfig::currentJobPlanningRhythm() const
{
return DEFAULT_JOB_PLANNING_TURNOVER;
}
}} // namespace backend::engine

View file

@ -85,6 +85,14 @@ namespace engine {
*/
Duration currentEngineLatency() const;
/** Time interval for ahead planning of render jobs.
* Frame calculation is broken down into individual jobs, and these
* jobs are prepared and scheduled chunk wise, while they are invoked
* as late as possible. This setting defines the time span to prepare
* and cover in a single planning chunk.
*/
Duration currentJobPlanningRhythm() const;
};
}} // namespace backend::engine

View file

@ -290,7 +290,7 @@ namespace lib {
IterStateWrapper (ST const& initialState)
: core_(initialState)
{
checkPoint (core_);
checkPoint (core_); // extension point: checkPoint
}
IterStateWrapper ()

View file

@ -712,8 +712,20 @@ namespace lib {
/**
* IterExplorer "state core" for progressively expanding
* an initial result set. This is a partially reduced and hard-coded
* variation on the #RecursiveExhaustingEvaluation in depth-first configuration.
* an initial result set. This initial set can be conceived to hold the seed
* or starting points of evaluation. Elements are consumed by an iterator, at
* the front. Each element is fed to the "explorer function". This exploration
* returns an expanded result sequence, which is immediately integrated into the
* overall result sequence, followed by further exploration of the then-to-be first
* element of the result sequence. All this exploration is driven on-demand, by
* consuming the result sequence. Exploration will proceed until exhaustion,
* in which case the exploration function will yield an empty result set.
*
* This strategy is intended for use with the IterExplorer -- most prominently
* in use for discovering render prerequisites and creating new render jobs for
* the engine. The RecursiveSelfIntegration strategy is a partially reduced
* and hard-coded variation on the #RecursiveExhaustingEvaluation in depth-first
* configuration.
* This setup works in conjunction with a <i>special result sequence</i> type,
* with the ability to re-integrate results yielded by partial evaluation.
* But the working pattern is more similar to the #CombinedIteratorEvaluation,
@ -728,7 +740,7 @@ namespace lib {
* hold the result set(s). This custom type together with the Explorer function
* are performing the actual expansion and re-integration steps. The latter is
* accessed through the free function \c build(sequence) -- which is expected
* to yield a "builder trait" for manipulating the element set yielded by
* to return a "builder trait" for manipulating the element set yielded by
* the custom iterator type returned by the Explorer function.
*
* @param SRC the initial result set sequence; this iterator needs to yield

View file

@ -25,7 +25,6 @@
#define PROC_ENGINE_DISPATCHER_H
#include "proc/common.hpp"
//#include "proc/state.hpp"
#include "proc/mobject/model-port.hpp"
#include "proc/engine/time-anchor.hpp"
#include "proc/engine/frame-coord.hpp"
@ -45,8 +44,7 @@ namespace engine {
using lib::time::TimeSpan;
using lib::time::FSecs;
using lib::time::Time;
//
// class ExitNode;
/**
* Internal abstraction: a service within the engine

View file

@ -30,8 +30,6 @@
#include "proc/play/timings.hpp"
#include "proc/engine/frame-coord.hpp"
#include <boost/rational.hpp>
namespace proc {
@ -39,6 +37,7 @@ namespace engine {
using backend::RealClock;
using lib::time::Offset;
using lib::time::Duration;
using lib::time::TimeVar;
using lib::time::Time;
@ -49,9 +48,13 @@ namespace engine {
* chunks of evaluation. Each of these continued partial evaluations establishes a distinct
* anchor or breaking point in time: everything before this point can be considered settled
* and planned thus far. Effectively, this time point acts as a <i>evaluation closure</i>,
* to be picked up on the next partial evaluation. More specifically, the TimeAnchor closure
* is the definitive binding between the abstract logical time of the session timeline, and
* the real wall-clock time forming the deadline for rendering.
* to be picked up for the next partial evaluation. Each time anchor defines a span of the
* timeline, which will be covered with the next round of job planning; the successive next
* TimeAnchor will be located at the first frame \em after this time span, resulting in
* seamless coverage of the whole timeline. Whenever a TimeAnchor is created, a relation
* between nominal time, current engine latency and wall clock time is established, This way,
* the TimeAnchor closure is the definitive binding between the abstract logical time of the
* session timeline, and the real wall-clock time forming the deadline for rendering.
*
* \par internals
* The time anchor associates a nominal time, defined on the implicit time grid
@ -63,10 +66,20 @@ namespace engine {
* does refer to a grid defined somewhere within the session)
* - the actual #anchorPoint_ is defined as frame number relative to this grid
* - this anchor point is scheduled to happen at a #relatedRealTime_, based on
* system's real time clock scale (typically milliseconds since 1970)
* system's real time clock scale (typically milliseconds since 1970).
* This schedule contains a compensation for engine and output latency.
*
* @todo 1/12 WIP-WIP-WIP just emerging as a concept
* /////////////////////////////////////////////////////////////////TODO: WIP needs to act as proxy for the grid, using the Timings
* @remarks please note that time anchors are set per CalcStream.
* Since different streams might use different frame grids, the rhythm
* of these planning operations is likely to be specific for a given stream.
* The relation to real time is established anew at each time anchor, so any
* adjustments to the engine latency will be reflected in the planned job's
* deadlines. Actually, the embedded Timings record is responsible for these
* timing calculation and for fetching the current EngineConfig.
*
* @see Dispatcher
* @see DispatcherInterface_test
* @see Timings
*/
class TimeAnchor
{
@ -75,55 +88,65 @@ namespace engine {
Time relatedRealTime_;
Time
expectedTimeofArival (play::Timings const& timings, int64_t startFrame, Offset engineLatency)
expectedTimeofArival (play::Timings const& timings, int64_t startFrame, Offset startDelay)
{
Duration totalLatency = startDelay
+ timings.currentEngineLatency()
+ timings.outputLatency;
TimeVar deadline;
switch (timings.playbackUrgency)
{
case play::ASAP:
case play::NICE:
deadline = RealClock::now() + engineLatency;
deadline = RealClock::now() + totalLatency;
break;
case play::TIMEBOUND:
deadline = timings.getTimeDue(startFrame) - engineLatency;
deadline = timings.getTimeDue(startFrame) - totalLatency;
break;
}
return deadline - timings.outputLatency;
return deadline;
}
TimeAnchor (play::Timings timings, Offset engineLatency, int64_t startFrame)
TimeAnchor (play::Timings timings, int64_t startFrame, Offset startDelay =Offset::ZERO)
: timings_(timings)
, anchorPoint_(startFrame)
, relatedRealTime_(expectedTimeofArival(timings,startFrame,engineLatency))
, relatedRealTime_(expectedTimeofArival(timings,startFrame,startDelay))
{ }
public:
// using default copy operations
/** create a TimeAnchor for playback/rendering start
* at the given startFrame. Since no information is given
/** create a TimeAnchor for playback/rendering start at the given startFrame.
* For latency calculations, the EngineConfig will be queried behind the scenes.
* regarding the reaction latency required to get the engine
* to deliver at a given time, this "engine latency" is guessed
* to be 1/3 of the frame duration.
* @note using this function in case of "background" rendering
* doesn't make much sense; you should indeed retrieve the
* start delay from internals of the engine in this case.
* @note this builder function adds an additional, hard wired start margin
* of one frame duration, to compensate for first time effects.
*/
static TimeAnchor
build (play::Timings timings, int64_t startFrame)
{
const boost::rational<uint> DEFAULT_LATENCY_FACTOR (1,3);
Offset startDelay(timings.outputLatency
+ timings.getFrameDurationAt(startFrame) * DEFAULT_LATENCY_FACTOR
);
return TimeAnchor (timings,startDelay,startFrame);
Offset startDelay(timings.getFrameDurationAt(startFrame));
return TimeAnchor (timings,startFrame,startDelay);
}
//////////////////////////////////////////////////////////////////////////////////////////TODO: second builder function, relying on Engine timings
/** create a follow-up TimeAnchor.
* After planning a chunk of jobs, the dispatcher uses
* this function to set up a new breaking point (TimeAnchor)
* and places a continuation job to resume the planning activity.
* @return new TimeAchor which precisely satisfies the <i>planning
* chunk duration</i>: it will be anchored at the following
* grid point, resulting in seamless coverage of the timeline
*/
TimeAnchor
buildNextAnchor() const
{
int64_t nextStart = timings_.establishNextPlanningChunkStart (this->anchorPoint_);
return TimeAnchor(this->timings_, nextStart);
}
/** @internal for debugging and diagnostics:
@ -132,7 +155,7 @@ namespace engine {
* playback or render process). */
operator lib::time::TimeValue() const
{
UNIMPLEMENTED ("representation of the Time Anchor closure");
return timings_.getFrameStartAt (anchorPoint_);
}

View file

@ -22,6 +22,7 @@
#include "proc/play/timings.hpp"
#include "backend/engine/engine-config.h"
#include "lib/time/formats.hpp"
#include "lib/time/timequant.hpp"
@ -30,9 +31,11 @@
namespace proc {
namespace play {
using backend::engine::EngineConfig;
using lib::time::PQuant;
using lib::time::Time;
using lib::time::TimeVar;
namespace { // hidden local details of the service implementation....
@ -42,10 +45,10 @@ namespace play {
return PQuant (new lib::time::FixedFrameQuantiser (fps));
} //////TODO maybe caching these quantisers? they are immutable and threadsafe
} // (End) hidden service impl details
/** Create a default initialised Timing constraint record.
* Using the standard optimistic settings for most values,
* no latency, no special requirements. The frame grid is
@ -66,7 +69,7 @@ namespace play {
ENSURE (grid_);
}
//////////////////////////////////////////////////////////////////TODO ctors for use in the real player/engine
//////////////////////////////////////////////////////////////////TODO ctors for use in the real player/engine?
@ -77,6 +80,13 @@ namespace play {
}
TimeValue
Timings::getFrameStartAt (int64_t frameNr) const
{
return grid_->timeOf(frameNr);
}
Offset
Timings::getFrameOffsetAt (TimeValue refPoint) const
{
@ -107,7 +117,7 @@ namespace play {
* @todo implement real support for variable frame rates
*/ ////////////////////////////////////////////////////////TICKET #236
Duration
Timings::constantFrameTimingsInterval (TimeValue startPoint) const
Timings::constantFrameTimingsInterval (TimeValue) const
{
return Duration (Time::ANYTIME);
}
@ -140,10 +150,35 @@ namespace play {
Duration
Timings::getPlanningChunkDuration() const
{
UNIMPLEMENTED ("how to control the engine evaluation chunk size");
return EngineConfig::get().currentJobPlanningRhythm();
}
int64_t
Timings::establishNextPlanningChunkStart(int64_t currentAnchorFrame) const
{
TimeVar breakingPoint = grid_->timeOf(currentAnchorFrame);
breakingPoint += getPlanningChunkDuration();
int64_t nextFrame = grid_->gridPoint (breakingPoint);
ASSERT (breakingPoint <= grid_->timeOf(nextFrame));
ASSERT (breakingPoint > grid_->timeOf(nextFrame-1));
if (grid_->timeOf(nextFrame) == breakingPoint)
return nextFrame;
else
return nextFrame+1;
}
Duration
Timings::currentEngineLatency() const
{
return EngineConfig::get().currentEngineLatency();
}
Timings
Timings::constrainedBy (Timings additionalConditions)
{

View file

@ -110,9 +110,10 @@ namespace play {
TimeValue getOrigin() const;
Offset getFrameOffsetAt (TimeValue refPoint) const;
Duration getFrameDurationAt (TimeValue refPoint) const;
Duration getFrameDurationAt (int64_t refFrameNr) const;
TimeValue getFrameStartAt (int64_t frameNr) const;
Offset getFrameOffsetAt (TimeValue refPoint) const;
Duration getFrameDurationAt (TimeValue refPoint) const;
Duration getFrameDurationAt (int64_t refFrameNr) const;
/** the frame spacing and duration remains constant for some time...
* @param startPoint looking from that time point into future
@ -152,6 +153,20 @@ namespace play {
*/
Duration getPlanningChunkDuration() const;
/** establish the time point to anchor the next planning chunk,
* in accordance with #getPlanningChunkDuration
* @param currentAnchorFrame frame number where the current planning started
* @return number of the first frame, which is located strictly more than
* the planning chunk duration into the future
* @remarks this value is used by the frame dispatcher to create a
* follow-up planning job */
int64_t establishNextPlanningChunkStart(int64_t currentAnchorFrame) const;
/** reasonable guess of the current engine working delay.
* Frame calculation deadlines will be readjusted by that value,
* to be able to deliver in time with sufficient probability. */
Duration currentEngineLatency() const;
bool isOriginalSpeed() const;

View file

@ -276,7 +276,7 @@ namespace test {
{
Timings timings (FrameRate::PAL);
uint startFrame(10);
uint nrJobs = timings.getPlanningChunkSize();
uint nrJobs = 0; /////////////TODO timings.getPlanningChunkSize();
Duration frameDuration (1, FrameRate::PAL);
CHECK (Time(nextRefPoint) == Time::ZERO + (startFrame + nrJobs) * frameDuration);