diff --git a/src/lib/time.c b/src/lib/time.c index 5af40d640..17831a77a 100644 --- a/src/lib/time.c +++ b/src/lib/time.c @@ -23,6 +23,10 @@ #include "lib/time.h" #include "lib/tmpbuf.h" +#include +#include + + /* GAVL_TIME_SCALE is the correct factor or dividend when using gavl_time_t for * units of whole seconds from gavl_time_t. Since we want to use milliseconds, * we need to multiply or divide by 1000 to get correct results. */ @@ -57,6 +61,46 @@ lumiera_tmpbuf_print_time (gavl_time_t time) return buffer; } + +static double +calculate_quantisation (gavl_time_t time, double grid, gavl_time_t origin) +{ + double val = time; + val -= origin; + val /= grid; + return floor (val); +} + +static double +clip_to_64bit (double val) +{ + if (val > LLONG_MAX) + val = LLONG_MAX; + else + if (val < LLONG_MIN) + val = LLONG_MIN; + + return val; +} + + +int64_t +lumiera_quantise_frames (gavl_time_t time, double grid, gavl_time_t origin) +{ + double gridNr = calculate_quantisation (time, grid, origin); + gridNr = clip_to_64bit (gridNr); + return (int64_t) gridNr; +} + +gavl_time_t +lumiera_quantise_time (gavl_time_t time, double grid, gavl_time_t origin) +{ + double count = calculate_quantisation (time, grid, origin); + double alignedTime = clip_to_64bit (count * grid); + return (gavl_time_t) alignedTime; +} + + gavl_time_t lumiera_build_time(long millis, uint secs, uint mins, uint hours) { diff --git a/src/lib/time.h b/src/lib/time.h index f812c1b9d..eabc4d5f8 100644 --- a/src/lib/time.h +++ b/src/lib/time.h @@ -34,6 +34,30 @@ char* lumiera_tmpbuf_print_time (gavl_time_t time); +/** + * Quantise the given time into a fixed grid, relative to the origin. + * The time grid used for quantisation is comprised of equally spaced intervals, + * rooted at the given origin. The interval starting with the origin is numbered + * as zero. Each interval includes its lower bound, but excludes its upper bound. + * @param grid spacing of the grid intervals, measured in GAVL_TIME_SCALE + * @return number of the grid interval containing the given time. + * @warning the resulting value is limited such as to fit into a 64bit long + */ +int64_t +lumiera_quantise_frames (gavl_time_t time, double grid, gavl_time_t origin); + +/** + * Similar to #lumiera_quantise_frames, but returns a grid aligned \em time value + * @return time of start of the grid interval containing the given time, + * but measured relative to the origin + * @warning because the resulting value needs to be limited to fit into a 64bit long, + * the addressable time range can be considerably reduced. For example, if + * origin = LLONG_MIN, then all original time values above zero will be + * clipped, because the result, relative to origin, needs to be <= LLONG_MAX + */ +gavl_time_t +lumiera_quantise_time (gavl_time_t time, double grid, gavl_time_t origin); + /** * Builds a time value by summing up the given components. */ diff --git a/wiki/renderengine.html b/wiki/renderengine.html index 63faf288a..27ecbe2da 100644 --- a/wiki/renderengine.html +++ b/wiki/renderengine.html @@ -4121,11 +4121,11 @@ Besides, they provide an __inward interface__ for the [[ProcNode]]s, enabling th -
+
The Render Engine is the part of the application doing the actual video calculations. Utilising system level services and retrieving raw audio and video data through [[Lumiera's Backend|backend.html]], its operations are guided by the objects and parameters edited by the user in [[the session|Session]]. The middle layer of the Lumiera architecture, known as the Proc-Layer, spans the area between these two exteremes, providing the the (abstract) edit operations available to the user, the representation of [["editable things"|MObjects]] and the translation of those into structures and facilities allowing to [[drive the rendering|Rendering]].
 
 !About this wiki page
-|background-color:#E3F3F1;width:96ex;padding:2ex; This TiddlyWiki is the central location for design, planning and documentation of the Lumiera Proc-Layer. Some parts are used as //extended brain// &mdash; collecting ideas, considerations and conclusions &mdash; while other tiddlers contain the decisions and document the planned or implemented facilities. The intention is to move over the more mature parts into the emerging technical documentation section on the [[Lumiera website|http://www.lumiera.org]] eventually. <br/><br/>Besides cross-references, content is largely organised through [[Tags|TabTags]], most notably <br/><<tag overview>> &middot; <<tag def>> &middot; <<tag decision>> &middot; <<tag Concepts>> <br/> <<tag Model>> &middot; <<tag SessionLogic>> &middot; <<tag GuiIntegration>> &middot; <<tag Builder>> &middot; <<tag Rendering>> &middot; <<tag Player>> &middot; <<tag Rules>> |
+|background-color:#E3F3F1;width:96ex;padding:2ex; This TiddlyWiki is the central location for design, planning and documentation of the Lumiera Proc-Layer. Some parts are used as //extended brain// &mdash; collecting ideas, considerations and conclusions &mdash; while other tiddlers contain the decisions and document the planned or implemented facilities. The intention is to move over the more mature parts into the emerging technical documentation section on the [[Lumiera website|http://www.lumiera.org]] eventually. <br/><br/>Besides cross-references, content is largely organised through [[Tags|TabTags]], most notably <br/><<tag overview>> &middot; <<tag def>> &middot; <<tag decision>> &middot; <<tag Concepts>> <br/> <<tag Model>> &middot; <<tag SessionLogic>> &middot; <<tag GuiIntegration>> &middot; <<tag Builder>> &middot; <<tag Rendering>> &middot; <<tag Player>> &middot; <<tag Rules>> &middot; <<tag Types>> |
 
 !~Proc-Layer Summary
 When editing, the user operates several kinds of //things,// organized as [[assets|Asset]] in the AssetManager, like media, clips, effects, codecs, configuration templates. Within the context of the [[Project or Session|Session]], we can use these as &raquo;[[Media Objects|MObjects]]&laquo; &mdash; especially, we can [[place|Placement]] them in various kinds within the session and relative to one another.
@@ -4192,6 +4192,19 @@ Viewed as a micro program, the processing patterns are ''weak typed'' &mdash
 
a given Render Engine configuration is a list of Processors. Each Processor in turn contains a Graph of ProcNode.s to do the acutal data processing. In order to cary out any calculations, the Processor needs to be called with a StateProxy containing the state information for this RenderProcess
 
+
+
The Quantiser implementation works by determining the grid interval containing a given raw time.
+
+!frame quantisation convention
+Within Lumiera, there is a fixed convention how these frame intervals are to be defined (&rArr; time handling RfC)
+[img[Lumiera's frame quantisation convention|draw/FramePositions1.png]]
+
+!range limitation problems
+because times are represented as 64bit integers, the time points addressable within a given scale grid can be limited, compared with time points addressable through raw (internal) time values. As an extreme example, consider a time scale with origin at Time::MIN -- such a scale is unable to represent any value above zero, because the resulting coordinates would exceed the range of the 64bit integer.
+
+To avoid problems with larger intermediate values, the actual calculations are performed with doubles, which are clipped to the allowed value range prior to casting back to the integral data type. In all practically relevant cases, there is no danger of imprecisions or rounding errors, because the quantisation includes a floor operation. (Hypothetically, an imprecision could arise through extinction, when calculating the offset from the origin; but in practice this is irrelevant, assumed that the conversion 64bit integer to double yields reproducible double values)
+
+
{{red{WIP as of 10/09}}}...//brainstorming about the first ideas towards a query subsystem//
 
@@ -4689,9 +4702,10 @@ Later on we expect a distinct __query subsystem__ to emerge, presumably embeddin
 
A facility allowing the Proc-Layer to work with abstracted [[media stream types|StreamType]], linking (abstract or opaque) [[type tags|StreamTypeDescriptor]] to an [[library|MediaImplLib]], which provides functionality for acutally dealing with data of this media stream type. Thus, the stream type manager is a kind of registry of all the external libraries which can be bridged and accessed by Lumiera (for working with media data, that is). The most basic set of libraries is instelled here automatically at application start, most notably the [[GAVL]] library for working with uncompressed video and audio data. //Later on, when plugins will introduce further external libraries, these need to be registered here too.//
-
+
A scale grid controls the way of measuring and aligining a quantity the application has to deal with. The most prominent example is the way to handle time in fixed atomic chunks (''frames'') addressed through a fixed format (''timecode''): while internally the application uses time values of sufficiently fine grained resolution, the acutally visible timing coordinates of objects within the session are ''quantised'' to some predefined and fixed time grid.
-
+ +&rarr; QuantiserImpl
A link to relate a compound of [[nested placement scopes|PlacementScope]] to the //current// session and the //current//&nbsp; [[focus for querying|QueryFocus]] and exploring the structure. ScopeLocator is a singleton service, allowing to ''explore'' a [[Placement]] as a scope, i.e. discover any other placements within this scope, and allowing to locate the position of this scope by navigating up the ScopePath finally to reach the root scope of the HighLevelModel.
@@ -6561,7 +6575,7 @@ Thus no server and no network connection is needed. Simply open the file in your
  * see [[Homepage|http://tiddlywiki.com]], [[Wiki-Markup|http://tiddlywiki.org/wiki/TiddlyWiki_Markup]]
 
-
+
The term &raquo;Time&laquo; spans a variety of vastly different entities. Within a NLE we get to deal with various //flavours of time values.//
 ;continuous time
 :without any additional assumptions, ''points in time'' can be specified with arbitrary precision.
@@ -6619,6 +6633,8 @@ For Lumiera, the static typing approach is of limited value -- it excels when va
 
 At the level of individual timecode formats, we're lacking a common denominator; thus it is preferrable to work with different concrete timecode classes through //generic programming.// This way, each timecode format can expose operations specific only to the given format. Especially, different timecode formats expose different //component fields,// modelled by the generic ''Digxel'' concept. There is a common baseclass ~TCode though, which can be used for //type erasure.//
 &rarr; more on [[usage situations|TimeUsage]]
+&rarr; Quantiser [[implementation details|QuantiserImpl]]
+