diff --git a/README b/README index 95f86c7c3..7f7519ea9 100644 --- a/README +++ b/README @@ -42,7 +42,7 @@ The Application can be installed and started to bring up an GTK GUI outline, but the GUI is very preliminary and not connected to core functionality. The video processing pipeline is still not complete. -See the http://issues.lumiera.org/roadmap[Project roadmap] +See http://issues.lumiera.org/roadmap[Project roadmap] Build Requirements diff --git a/admin/rfc.sh b/admin/rfc.sh index 43fc36132..bbec05e0c 100755 --- a/admin/rfc.sh +++ b/admin/rfc.sh @@ -2,6 +2,14 @@ shopt -s extglob +# +# RFC's are created from ./doc/devel/template/new_rfc.sh and stored in +# ./doc/devel/rfc/ . There are directories for the various states +# ./doc/devel/rfc_final, ./doc/devel/rfc_pending, ./doc/devel/rfc_parked, +# ./doc/devel/rfc_dropped . Which contain symlinks back to ../rfc/ +# + + function usage() { grep -v '^// ' <<"EOF" | less -F @@ -199,7 +207,7 @@ function find_rfc() local globstate=$(shopt -p nocasematch) shopt -s nocasematch - for file in $(find ./doc/devel/rfc* -name '*.txt'); + for file in $(find ./doc/devel/rfc/ -name '*.txt'); do local name="/${file##*/}" if [[ "$name" =~ $match ]]; then @@ -229,28 +237,38 @@ function process_file() local file="$1" local path="${1%/*}" local basename="${1##*/}" - local destpath="$path" + local linkdest="$path" local state=$(grep '^\*State\* *' "$file") case "$state" in *Final*) - destpath="./doc/devel/rfc" + linkdest="./doc/devel/rfc_final" ;; *Idea*|*Draft*) - destpath="./doc/devel/rfc_pending" + linkdest="./doc/devel/rfc_pending" ;; *Parked*) - destpath="./doc/devel/rfc_parked" + linkdest="./doc/devel/rfc_parked" ;; *Dropped*) - destpath="./doc/devel/rfc_dropped" + linkdest="./doc/devel/rfc_dropped" ;; + *) + echo "Unknown State: '$state'" >&2 + exit 1 esac - if [[ "$path" != "$destpath" ]]; then - git mv "$file" "$destpath" + local oldpath + for oldpath in ./doc/devel/rfc_*/$basename; do :; done + + if [[ -h "$oldpath" ]]; then + if [[ "$oldpath" != "$linkdest/$basename" ]]; then + git mv "$oldpath" "$linkdest/$basename" + fi + elif [[ ! -s "$linkdest/$basename" ]]; then + ln -s "../rfc/$basename" "$linkdest/" + git add "$linkdest/$basename" fi - git add "$destpath/$basename" } @@ -336,7 +354,7 @@ function change_state() local state="$2" local nl=$'\n' - local comment=".State -> $state$nl//add reason$nl $(date +%c) $(git config --get user.name) <$(git config --get user.email)>$nl" + local comment=".State -> $state$nl//add reason$nl$nl$(git config --get user.name):: '$(date +%c)' ~<$(git config --get user.email)>~$nl" edit_state "$name" "$state" "$comment" edit "$name" -4 "endof_comments" process_file "$name" @@ -349,13 +367,16 @@ shift case "$command" in process) # for all rfc's - for file in $(find ./doc/devel/rfc* -name '*.txt'); + for file in $(find ./doc/devel/rfc -name '*.txt'); do echo "process $file" process_file "$file" done : ;; +search) + grep -r -C3 -n "$1" ./doc/devel/rfc | less -F + ;; find|list|ls) if [[ "$2" ]]; then find_rfc "$1" | xargs grep -i -C3 -n "$2" @@ -373,14 +394,13 @@ show|less|more) create) TITLE="$@" name=$(camel_case "$TITLE") - if [[ -f "./doc/devel/rfc/${name}.txt" || - -f "./doc/devel/rfc_pending/${name}.txt" || - -f "./doc/devel/rfc_dropped/${name}.txt" ]]; then + if [[ -f "./doc/devel/rfc/${name}.txt" ]]; then echo "$name.txt exists already" else - source ./doc/devel/template/new_rfc.sh >"./doc/devel/rfc_pending/${name}.txt" - edit "./doc/devel/rfc_pending/${name}.txt" 2 abstract - process_file "./doc/devel/rfc_pending/${name}.txt" + source ./doc/devel/template/new_rfc.sh >"./doc/devel/rfc/${name}.txt" + edit "./doc/devel/rfc/${name}.txt" 2 abstract + git add "./doc/devel/rfc/${name}.txt" + process_file "./doc/devel/rfc/${name}.txt" fi ;; edit) @@ -438,8 +458,15 @@ comment) ;; discard) name=$(unique_name "$1") + if [[ "$name" ]]; then - git rm "${name}" + for link in ./doc/devel/rfc_*/${name##*/}; do :; done + + if [[ -h "$link" ]]; then + git rm -f "${link}" || rm "${link}" + fi + + git rm -f "${name}" || rm "${name}" fi ;; wrap) diff --git a/doc/devel/rfc.txt b/doc/devel/design_process.txt similarity index 90% rename from doc/devel/rfc.txt rename to doc/devel/design_process.txt index 9879727e2..df982d575 100644 --- a/doc/devel/rfc.txt +++ b/doc/devel/design_process.txt @@ -2,17 +2,20 @@ Design Process ============== //Menu: include rfc -//Menu: include rfc_parked +//Menu: include rfc_final //Menu: include rfc_pending +//Menu: include rfc_parked //Menu: include rfc_dropped -//Menu: put child 'rfc_dropped' after 'rfc' +//Menu: put child 'rfc_dropped' after 'rfc_final' +//Menu: attach child 'rfc' + How it Works ------------ Design Process entries (Request for Comment) can be created by anyone. Each entry goes through several stages until it's accepted (or rejected). -All our RfC entries are filed here, either in the link:rfc/index.html[RfC accepted] section, +All our RfC entries are filed here, either in the link:rfc_final/index.html[RfC accepted] section, or as link:rfc_pending/index.html[pending RfC] or as link:rfc_dropped/index.html[RfC dropped]. * Every proposal starts out as _*Idea*_, allowing other people to review and comment on it, while still working out details. diff --git a/doc/devel/meeting_summary/2012-01-11.txt b/doc/devel/meeting_summary/2012-01-11.txt index cce9fbd6a..e128bbc0d 100644 --- a/doc/devel/meeting_summary/2012-01-11.txt +++ b/doc/devel/meeting_summary/2012-01-11.txt @@ -3,7 +3,7 @@ :Author: cehteh :Date: 2012-01-11 -Jan 11, 2011 on #lumiera 20:00 +Jan 11, 2011 on #lumiera 20:00 - 22:30 __Participants__ @@ -15,21 +15,23 @@ __Participants__ Conclusions ----------- -. ichthyo removed most of the tiddly wikis, and worked the content into the website -. cehteh reports that Lumiera got another donation (75Eur), arrangements with - the ffis to get access (view) about the donations account are under way. We'll - ask donors then if they want to be published or stay anonym and will set up +. _ichthyo_ removed most of the TiddlyWikis, and worked the content into the website +. _cehteh_ reports that Lumiera got another donation (75 €), arrangements with + the FFIS to get access (view) about the donations account are under way. We'll + ask donors then if they want to be published or stay anonymous and will set up a wiki page listing donations and expenses. -. ichthy rewrote the SCons build, as discussed last time -. cehteh writes a very short RfC, to document that we're using SCons for now. +. _ichthyo_ reworked the SCons build, as discussed last time +. _cehteh_ writes a very short RfC, to document that we're using SCons for now. . possibly no one going to LAC, too far away . we discussed a link checker / link resolver for the website. The idea is to have a semi automatic tool, which is used locally when authoring website content to find cross references. -. benn and ichthyo follow up on the libregraphics magazine and try to get into - discussion with them and see what can be done within our limited time. - ichthyo respond to the mail, and put you (benn and ct) on CC. +. _benny_ and _ichthyo_ follow up on the + link:http://libregraphicsmag.com/[Libre Graphics Magazine]'s offer to promote + graphics and media projects and try to get into discussion with them and see + what can be done within our limited time. . when it comes to have a working example for media file output, we stick to the - mainstream solutions ffmpeg and or gstreamer, but care not to lock ourselves - into a single solution. Concluded that we do this over plugin interfaces and - it mostly boils down to support ffmped .. and investigate something simpler too. + mainstream solutions *ffmpeg* and or *gstreamer*, but care not to lock ourselves + into a single solution. Concluded that we do this over plugin interfaces -- + it mostly boils down to support ffmpeg -- besides we'll investigate simple + alternatives for basic output. diff --git a/doc/devel/meeting_summary/index.txt b/doc/devel/meeting_summary/index.txt index 6a1d1cb06..06b15df36 100644 --- a/doc/devel/meeting_summary/index.txt +++ b/doc/devel/meeting_summary/index.txt @@ -12,6 +12,47 @@ Anyone interested in Lumiera development is also encouraged to read mailing list archives and other documentation. +************************ +During Summer 2011 again the regular monthly meethings +were mostly informal detail discussions plus some +organisational planning. Not much to account for. +************************ + + +11 Jan 2012 +----------- + +Topics +~~~~~~ + * Website status. Clean-up of TiddlyWikis + * cross-links and link checking for the website + * promotion: Libre Graphics Magazine + * rendering to file: library solutions + +Summary +^^^^^^^ + * link:2012-01-11.html[Summary (by cehteh)] + + + + +14 Dec 2011 +----------- + +Topics +~~~~~~ + * discussion about build systems. + * review of the SCons build scripts + * some details about the scheduler and invoking render jobs + +//// +Summary +^^^^^^^ + * link:2011-12-14.html[IRC Transcript (prepared by XXXX)] +//// + + +************************ During Summer 2011 we kept up our monthly IRC meetings -- rather casual exchange for the time being. @@ -21,7 +62,7 @@ for the time being. - 10 Aug 2011 - 13 Jul 2011 - 8 Jun 2011 - +************************ 11 May 2011 ----------- diff --git a/doc/devel/rfc/ApplicationInstall.txt b/doc/devel/rfc/ApplicationInstall.txt new file mode 100644 index 000000000..44ed4cdea --- /dev/null +++ b/doc/devel/rfc/ApplicationInstall.txt @@ -0,0 +1,225 @@ +ApplicationInstall +================== + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Draft_ +*Date* _Di 11 Jan 2011 17:00:55 CET_ +*Proposed by* Ichthyostega +------------------------------------- + +[abstract] +********************************************************************************* +Lumiera should be a _freely relocatable_ application bundle. +Relying only on the relative folder structure within this bundle, the application +will be fully functional at any location, provided that the external library +dependencies are resolvable using the standard mechanisms of the platform. +The setup mechanism must be obvious, self-explanatory and must not rely +on compiled in magic or buildsystem trickery. Yet packaging into a FSH conforming +installation location should be supported by the same mechanisms. +********************************************************************************* + +Description +----------- +//description: add a detailed description: +Lumiera is planned to become a large professional application bundle, relying +on several external resources for proper operation. An installed Lumiera +application will be more like Gimp, Blender, OpenOffice or Eclipse, +not like bash, autotools or emcas. + +Besides that, it can be expected that Lumiera frequently will be used in a +project or studio like setup, where the application isn't installed, but just +unZIPped / unTARed and used as-is. Thus, it should be sufficient to unpack +the application bundle and point it to the session file and maybe the +media storage. + +The Eclipse platform can serve as a model for the setup of an modern +application of that style: It can be just unpacked, and when looking +into the folder structure, the meaning of the parts is obvious, and the +basic bootstrap is controlled by two short text based INI files. +While Lumiera presumably won't get _that_ heavyweight and is clearly +not intended to become a general business application platform like OSGi -- +the underlying principles can serve as a point of reference for modern +development standards. + +This leads to the following conclusions: + +- we need to define a standard folder layout for the bundle +- the application must not rely on any compiled-in absolute paths +- the executable should fetch the directly accompanying shared modules +- all other lib dependencies should be handled by the system mechanisms +- the bootstrap configuration likewise must not be compiled-in +- this configuration must be human readable and clearly exhibit its purpose +- the same system must be able to adapt to a FSH conforming installation layout + +Judging from our current planning and the existing codebase, Lumiera +is on a good way in that direction, yet some cleanup needs to be done, +especially removing convenience shortcuts from the early days of development +and catching up with the repair of some traits of sloppyness here and there. + +Library resolution +~~~~~~~~~~~~~~~~~~ +In former days, it was common habit to compile-in a hard wired absolute ++RPATH+. This can be considered obsolete practice; for example, the Debian +policy forbids doing so. This is the result from numerous maintainability +problems in the past. On the other hand, the GNU linker and other modern +linkers support a relative resolution of shared modules directly accompanying +an specific executable. The Debian policy allows this, if and only if these +shared modules are installed with the same binary package and only used by +this specific executable(s). Together, this is exactly what we need to +solve our requirement. + +Thus, the build process enables the new-style DT-tags in the Elf binary +and sets the +DT_RUNPATH+ with an value relative to +$ORIGIN+, which resolves +to the path of the currently executing binary. Moreover, it is _sufficient_ +to set this on the initial executable _only,_ because this creates a common +searchpath for all lib resolution events in the scope of that loaded executable. +Besides that, we need to care that our private libraries have a unique +SONAME+, +in this case all starting with the prefix +liblumiera*+. Note moreover that this +new-style +DT_RUNPATH+ indeed _can_ be overridden by an +LD_LIBRARY_PATH+ in the +environment, should there be the need for very special experiments. + +Bootstrap location +~~~~~~~~~~~~~~~~~~ +Thus, a single relative library folder becomes the only hard wired start +configuration. In our case, the folder +$ORIGIN/modules+ was chosen. The +root of the package then holds all the binaries depending on these common +internal libraries, that is the +lumiera+ executable and any accompanying +special tools. As usual with such large application bundles, these get +only _symlinked_ into the +/usr/bin+ folder on installation. + +For sake of clarity, after starting the executable, the _same location_ +is used to load the bootstrap configuration. This configuration in turn +defines all further locations like the extended configuration, project +templates, plugin search path, the GUI module to load, the search path +for icons and GUI resources, project templates and similar basics. + +Relative paths and the location of the executable +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +According to the general requirements outlined above, most paths should +be given in a relative fashion. Unfortunately there is no portable solution +for self-discovering the currently running executable. But at least there +is a solution for all current major platforms. Under Linux, this information +can be retrieved from the kernel through the +/proc+ filesystem. + +Again for sake of clarity, the same token +$ORIGIN+ was chosen to denote +this executable location (note: this is _not_ the current working directory). +Moreover, due to the folder layout detailed above, this coincides with the +root of the application bundle, thus making for a self-explanatory convention. +Besides +$ORIGIN+, these search paths later on likely will contain locations +below the user's home directory, e.g. +~/.lumiera/themes+ + + +Tasks +~~~~~ +// List what needs to be done to implement this Proposal: +* identify what impedes such a modern setup procedure ([green]#✔ done#) +* rectify the folder structure created in the build target + directory ([green]#✔ done#) +* build the executables in a way to allow relative resolution of the + internal shared modules ([green]#✔ done#) +* replace the compiled-in path definitions for plugin loading by a + configurable bootstrap ([green]#✔#) +* add an working library implementation for a config loader ([green]#✔ done#) +* add a mechanism for establishing the path of the current execubable. + + This is _non-portable_ ([green]#✔ done#) +* wire the prepared API in the GUI to use this working config loader + for resolving GUI resources ([green]#✔ done#) +* try to extract the path search code from the existing config loader, + or build a new solution based on standard libraries ([green]#✔ done#) +* introduce an output root directory into the buildsystem, allowing + for package builds ([green]#✔#) +* define a _Debian packaging_ as proof-of-concept ([green]#✔ done#) + + +Discussion +~~~~~~~~~~ + +Pros +^^^^ +* self-contained +* self-explanatory +* based on _best practices_ +* conforming with FSH and Debian policy + + +Cons +^^^^ +* requires work +* raises the bar at the implementation side +* requires an bootstrap sequence to be explicitly performed + on application startup +* breaks with some beloved habits of the Unix community + + +Alternatives +^^^^^^^^^^^^ +//alternatives: explain alternatives and tell why they are not viable: +I can think of two alternatives + +. dealing with all those problems _later_ +. not making an concept, rather sticking to UNIX habits + +The first alternative is indeed worth considering, because we're settling +some things to be really implemented way later, which bears some dangers. +But, on the other hand, it is a common practice known from extreme programming +to deliver early and regularly, which effectively means to set up the deploy +path of an application really early in the development cycle. The rationale +is that -- according to general experience -- the deployment always turns +up some very specific problems and constraints, which can be a serious +threat when discovered late in the development process. + +The second alternative isn't really applicable IMHO. The original UNIX philosophy +breeds on an academic setup and really excels with small nifty commandline utils +combined by pipes, each specialised to do a single thing very well. These utils +are more like the objects within our implementation. The concept of large +application software bundles and desktop software was always a bit alien +within the classic UNIX environment. + + + +Rationale +--------- +//rationale: Give a concise summary why it should be done *this* way: + +This RfC can be seen as an commitment to an professional approach and as +clarification: Traditionally, the Unix community hailed a lot of _black magic_ +practices like compiled-in installation paths, macro magic, +sed+ and +awk+ +trickery, inline code compiled on-the-fly, relying on very specific and +un-obvious behaviour of some build script, configuration via environment +variables and a lot of similar idioms. These practices might be adequate +in a quickly moving Research & Development setup, but turned out to be +not so helpful when it comes to industrial strength development, +as they are known to lead to maintenance problems. + + + + +//Conclusion +//---------- +//conclusion: When approbate (this proposal becomes a Final) +// write some conclusions about its process: + + + + +Comments +-------- +//comments: append below + +.State -> Draft +There is now a complete implementation of this concept on my ``proc'' branch. + +Moreover, I was able to define an initial Debian packaging for Lumiera on top +of that implementation. + +During that work, I had opportunity to visit various areas of the existing codebase, +which reminded me of several small issues, which seem to become unhealthy when lying +around unfixed for such a long time. Probably I'll start a clean-up initiative and +try to bring these points to discussion separately. + + So 13 Feb 2011 20:04:00 CET Ichthyostega + + +//endof_comments: diff --git a/doc/devel/rfc/ApplicationStructure.txt b/doc/devel/rfc/ApplicationStructure.txt new file mode 100644 index 000000000..3b18ab92b --- /dev/null +++ b/doc/devel/rfc/ApplicationStructure.txt @@ -0,0 +1,200 @@ +Design Process : Application Structure +====================================== + +[grid="all"]] +`------------`---------------------- +*State* _Dropped_ +*Date* _2008-11-05_ +*Proposed by* link:ct[] +------------------------------------ + +Application Structure +--------------------- +Here I am going to propose some more refined structure of the application and +its components. + + +Description +~~~~~~~~~~~ +So far we came up with a simplified BACKEND/PROC/GUI structure where each of +this entities defines its own sub subcomponents. We agreed to glue that all +together with some portable versioned interfaces system, but details where not +laid out yet. At the time of this writing the interface system and plugin +loader are reasonable finished to be usable (some small refinements to do). We +recently discussed some details on IRC on how to engage this without a +definitive decision. The topic of this proposal is to make a detailed +description towards how the application components being glued together. + +In the discussion mentioned above we concluded that we want a 'lumiera' binary +which in turn loads the optional parts as plugins. There was no consent what +this parts are actually be, except that the GUI should be optional for headless +operation. I suggested to make as much as possible pluginable to make it easier +to validate our interfaces and try different things out. + +Now I introduce 'lumiera' here, this will become a new component in +./src/lumiera being the driver application for bootstraping all the rest: + +Then our application structure looks somewhat like (please refine): + + * the 'lumiera' loader + - commandline handling + - interface & plugin system + - session manager core + - configuration system + - lua scripting + * backend + - file and io handling + - caches + - streams + - threads + - scheduler + * proc + - asset management + - config rules system + - builder + - render graph management + * gui + - timelines + - viewers + - resources + - preferences + - ... + +Furthermore the interface&plugin system is flexible enough to provide things +independently of their origin (if it is build in or a plugin/dynamic library). +So deployment (where to link these things) is secondary. + +'lumiera' will then be the executable the user starts up, what exactly gets + initialized and booted up is then matter +of configuration and commmandline options (and maybe lua scripting?). + + + +Tasks +^^^^^ + + * create the 'lumiera' directory + - setup the build system + - move config, plugin and interfaces therein + - lua support can be done later + * write the main() part of the application + - start config system + - parse commandline opts + * librificate all other components (backend, proc gui) + - define their lumiera interfaces + - decide if they shall be statically linked, becoming shared libs or plugins + +This are rather distributed tasks, after the 'lumiera' being set up, all other +components have to be adapted to be loadable from it. + + +Pros +^^^^ + + * flexible plugin based architecture + - later: loads only things which are necessary for a given task + * very fast startup + * things which cant be used on a given environment can be left out (no gui on + a headless system, no $DISPLAY set) + * inter dependencies between interfaces and plugins are automatically tracked. + + +Cons +^^^^ + +Ichthyo raised concerns that this kind of flexibility might attract other +people to write things which are not in our intention and break future design +and compatibility. We need to carefully document and define interfaces that +people don't abuse those! + + + +Alternatives +^^^^^^^^^^^^ + +We discussed the startup/main() through the GUI as it is currently done, it +would be also possible to produce some more executables (lumigui, luminode, +lumiserver, ....). But I think we agreed that a common loader is the best way +to go. + + +Rationale +~~~~~~~~~ + +I just think this is the best way to ensure a enduring design even for future +changes we can not forsee yet. + + + + + +Comments +-------- +We discussed this issue lately on IRC and I got the feeling we pretty much +agreed on it. + + * we don't want to build a bunch of specialized executables, rather we build + one core app which pulls up optional parts after parsing the config + * we change the GUI to be loaded via the module/interfaces system + +From reading the above text, this proposal seems to capture that. But I am +somewhat unsure if the purpose of this proposal isn't rather to load just a +micro kernel and the pull up components according to configuration. Because I +wouldn't accept such an architecture, and I clearly stated so right at the +beginning of our project. I accepted a very flexible and language neutral +plugin system on the condition the core remains in control, stays +''reasonable'' monolithic and componentization doesn't handicap us in creating +an architecture based on abstractions and exploiting the proven design +patterns. + +It has that flexibility, yes. But that means not that we have to abuse it in +any way. The main() there and thus the bootstrap of the application is under +our tight control, if we want to reject scriptable/highly configurable +bootstrapping there then we can just do so. Thats more a social than a +technical decision. I personally don't like if a design is 'nannying' and puts +too much constraints into unforeseen areas. If the computer can do some task +better than we, it shall do it. This still means that I want to stay very much +in control, it should only do some tedious, error-prone managing tasks for me. +For example the interfaces system already tracks inter-dependencies between +plugins and interfaces automatically, without the programmer needs to care or +define anything. The interface system gets it right and we wont need to care +for the order initialization. I added that because I consider such as +absolutely important for plugins which might be supplied by third parties where +we have no control over. But I now realized that we can nicely use that for our +own internal things too. Imo thats some very valuable service. + -- link:ct[] [[DateTime(2008-11-08T06:26:18Z)]] + +Some further minor details: We didn't finish the discussion about namespaces on +the last meeting. (I know I still have to write up a proposal showing the two +or three alternatives I see regarding namespace organisation). But probably, +"lumiera::" will be our top level interface namespace and then probably the +lumiera directory will be taken by that. I see no problem also putting some +startup facilities in there, but generally, it shouldn't contain implementation +code, only headers and abstract classes. If that's going to become a problem, +we should consider to use a separate package for the startup, e.g. "src/boot". + +Another point is, you need not write a main, because there is already one. +Please have a look at it, especially with regards to the +[wiki:self:../GlobalInitialization global initialisation]. Further, last year +I've investigated boost::program_options and think it's fine. I use it for my +test class runner since then. I don't think there is any reason why we should +bother with parsing options (most config is pulled up from the session). I +don't think we get much program options, maybe something to set a GUI skin. +Moreover, I've written last year a thin wrapper around the commandline and +integrated it with the boost options parser such that user code can receive the +remaining options as a vector of std::strings. Please have a look at +link:http://git.lumiera.org/gitweb?p=LUMIERA;a=blob;f=tests/common/mainsuite.cpp;h=455bfd98effd0b7dbe6597f712a1bdfa35232308;hb=80e1e382f42512ebf2e10a802f77e50327b8fb73[the test class runner main] +for an usage example. I really want our Lumiera main to be clean and expressive +in the way showed there. Probably the most important part of the startup is +pulling up the session core; because of that I think most of the startup +process falls into the realm of the Proc-Layer. Within Proc, I don't want any +significant string manipulations done with C-strings and I don't want raw +arrays when we can use std::vector. + -- link:Ichthyostega[] [[DateTime(2008-11-06T19:28:13Z)]] + +I 'dropped' this now because we do it somewhat differently now and I dont want +to document this here :P + -- link:ct[] [[DateTime(2009-02-03T17:28:28Z)]] + +'''' +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/ClipCatalogingSystem.txt b/doc/devel/rfc/ClipCatalogingSystem.txt new file mode 100644 index 000000000..8149c23ac --- /dev/null +++ b/doc/devel/rfc/ClipCatalogingSystem.txt @@ -0,0 +1,128 @@ +Design Process : Clip Cataloging System +======================================= + +[grid="all"] +`------------`----------------------- +*State* _Parked_ +*Date* _2008-07-26_ +*Proposed by* link:JordanN[] +------------------------------------- + +Clip Cataloging System +----------------------- + +A system for storing, organizing, and retrieving assets, such as images and +videos. + + +Description +~~~~~~~~~~~ + +Organizations that work with video, and even home users, tend to have massive +collections of stock videos and images that they will need to find and use in +their projects. A Linux-based system is needed to help them to organize, tag, +and retrieve assets from those collections. Being able to find the clips the +user needs and bring them into his timeline, will mean that the user will be +able to more rapidly complete his project. + +This could be implemented as a separate application, but integrated for use in +a Linux-based video workflow suite, including apps like Lumiera and Blender. + + +Tasks +~~~~~ + +* Identify ways in which existing groups organize their collections. +* Determine pros / cons of each method +* Implement a solution that will be modular enough for other content creation + projects to also use + + +Pros +~~~~ + +* Faster, more efficient workflow + + +Cons +~~~~ +Not directly a part of Lumiera. If not implemented separately, could cause +undue bloat. + + +Alternatives +~~~~~~~~~~~~ +Storage-based organization. User must remember where files are, and must store +them correctly. Not clip-based, so the entire video must be imported and the +desired portion selected. + + +Rationale +~~~~~~~~~ + + +Comments +-------- + +* Such is planned, but as you pointed out, this would be a rather standalone + application which needs a lot of efforts to be implemented. We don't have the + development power to do that now. If someone wants to work on that, please + contact me. General idea is to put all kinds of resources (Footage, Clips, + Effects, Subprojects, Sounds ....) into a database with then gets + tagged/attributed in different ways (implicit things like 'filename', 'type', + 'length'; automatic deduceable things like 'Exposure', 'Timecode', ...; And + manual tags like: who was on set, location, ....). Then present this all in a + *good* GUI (by default just showing filesysten like) but one can define + queries on this database and the generated views will then be storeable. +Back to Lumiera, for now we will likely just use 'normal' file open dialogs +until the above system becomes available. + -- link:ct[] [[DateTime(2008-07-26T08:31:42Z)]] +* Yes, it's indeed an important feature we should care for. But cehteh is + right, we have more important things to do first. But feel free to target it. +* Also, we'd need integration with production support systems, for example + http://celtx.com/[CELTX]. +* The interface to the Lumiera App would be to populate the asset manager with + the required assets + -- link:Ichthyostega[] [[DateTime(2008-07-27T22:19:38Z)]] + + +Videos, Audio, Clips and Resources Manager by using plugins for FOSS GPL +"Library & Collections Management" programs. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The video and audio raw material, clips, etc could be managed using code that +is already available in project that carry out the same tasks. For example as +library managers, or media (video, audio or CD) collections, Integrated +Library Systems (ILS). + +Examples of a library management program ; + +. Kete - http://kete.net.nz/[] +. Koha - http://www.koha.org/[] +. link:GreenStone[] - http://www.greenstone.org/[] +. Evergreen - http://open-ils.org/faq.php[] + +An additional benefit to using "library" managers, is that it can handle +interloans, referencing of "other" (people's/organization's) libraries, +numbering systems, descriptions, and classifications, thousands to millions of +items, search systems, review and comment systems, plus the benefits of open +source that allow the expansion of features easily. The use of task oriented +programs in this way, makes use of established code, that has been developed by +experts in their field. Any database system would be useful for managing all +these media. But one that has been developed by the people that have been +working with cataloging systems for a long time is likely to do well. Plus it +can be readily improved, by people who do not have to know the first thing +about how to design video editing programs. The program also gets improved +because of it own community, which adds features or performance to Lumiera, +without even having to "drive" the development.. +--link:Tree[][[DateTime(2008-08-27T20:38:00NZ)]]. + +'''' + +Parked until someone cares +~~~~~~~~~~~~~~~~~~~~~~~~~~ +Decided on Developer meeting + + Do 14 Apr 2011 02:52:30 CEST Christian Thaeter + + +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/DIR_INFO b/doc/devel/rfc/DIR_INFO index 393ae51b8..205119027 100644 --- a/doc/devel/rfc/DIR_INFO +++ b/doc/devel/rfc/DIR_INFO @@ -1 +1,3 @@ -accepted design proposals +RFC pool + +all other rfc dirs (sorted by state) link back to here diff --git a/doc/devel/rfc/DelectusShotEvaluator.txt b/doc/devel/rfc/DelectusShotEvaluator.txt new file mode 100644 index 000000000..0d5b55a88 --- /dev/null +++ b/doc/devel/rfc/DelectusShotEvaluator.txt @@ -0,0 +1,397 @@ +[grid="all"] +`------------`----------------------- +*State* _Parked_ +*Date* _2008-09-21_ +*Proposed by* link:nasa[] +------------------------------------- + + +Delectus Shot Evaluator +----------------------- +This is a brain dump about the shot evaluator subproject. + + +Description +~~~~~~~~~~~ + + +Brainstorm on Delectus +~~~~~~~~~~~~~~~~~~~~~~ +Some (many) of the ideas presented herein come from the various parties +involved in the Lumiera discussion list and IRC channel #lumiera. +http://lists.lumiera.org/pipermail/lumiera/2008-September/000053.html[] -- the +main discussion thread + +Additionally, a lot of great concepts for how to streamline the interface are +derived in part from link:KPhotoAlbum[]. + +I use tags, keywords, and metadata almost interchangeably, with the exception +that metadata includes computer generated metadata as well. These are not tags +in the conventional sense -- they don't have to be text. In fact the planned +support (please add more!) is: + + * Text -- both simple strings (tags) and blocks + * Audio -- on the fly (recorded from the application) or pregenerated + * Video -- same as audio + * Link -- back to a Celtx or other document resource, forward to a final cut, + URL, etc + * Still image -- inspiration image, on set details, etc + * ID -- such as the serial number of a camera used, the ISBN of a book to be + cited, etc + +As such, the tags themselves can have metadata. You can see where this is +going... + +Also, the tags are applied to "clips" -- which I use interchangeably between +source material imported into the application and slice of that material that +tags are applied to. Any section of a video or audio source can have tags +applied to it. + + +Two key functions: assign metadata and filter by metadata. + +clips are one thing; but in reality most clips are much longer than their +interesting parts. Especially for raw footage, the interesting sections of a +clip can be very slim compared to the total footage. Here is a typical workflow +for selecting footage: + +. Import footage. +. Remove all footage that is technically too flawed to be useful. +. Mark interesting sections of existing clips, possibly grouped into different + sections. +. Mark all other footage as uninteresting. +. Repeat 3-4 as many times as desired. + +Some key points: + + * Import and export should be as painless and fast as possible. + * Technically flawed footage can be both manual and computer classified. + * In some cases (e.g. documentaries, dialog) audio and video clips/footage can + follow different section processes. + It is possible to use video from footage with useless audio or use audio + from footage with useless video. + * "Interesting" is designed to be broad and is explained below. + * steps 2-5 can be performed in parallel by numerous people and can span many + different individual clips. + +In simple editors like Kino or iMovie, the fundamental unit used to edit video +is the clip. This is great for a large number of uses, such as home videos or +quick Youtube postings, but it quickly limits the expressive power of more +experienced engineers in large scale productions (which are defined for the +purposes of this document to include more than 2 post-production crew members). +The clip in those editors is trimmed down to include only the desired footage, +and these segments are coalesced together into some sort of coherent mess. + +The key to adequate expressive power is as follows: + + * Well designed, fast metadata entry. Any data that can be included should by + default, and ideally the metadata entry process should run no less than + about 75% as fast as simple raw footage viewing. Powerful group commands + that act on sections of clips and also grouping commands that recognize the + differences between takes and angles (or individual mics) enhance and speed + up the process. + * Good tools to classify the metadata into categories that are actually + useful. Much of the metadata associated with a clip is not actively used in + any part of the footage generation. + * Merging and splicing capabilities. The application should be smart enough to + fill in audio if the existing source is missing. For example, in a recent + project I was working on a camera op accidently set the shotgun mike to test + mode, ruining about 10% of the audio for the gig. I was running sound, and + luckily I had a backup copy of the main audio being recorded. This + application should, when told that these two are of the same event at the + same time, seamlessly overlay the backup audio over the section of the old + audio that has been marked bad and not even play the bad audio. This is just + background noise, and streamlining the immense task of sorting through + footage needs to be simplified as much as possible. + * Connection to on site documentation and pre-production documentation. When + making decisions about what material to use and how to classify it, it is + essential to use any tools and resources available. The two most useful are + onsite documentation (what worked/didn't work, how the weather was, pictures + of the setup, etc all at the shoot) and pre-production (what the ideal scene + would be, what is intended, etc). Anything else that would be useful should + be supported as well. + * Be easily accessible when making the final cut. Lumiera is, if the + application gets up to speed, going to serve primarily to render effects, + finalize the cut, and fine tune what material best fits together. Any + metadata, and certainly any clipping decisions, should be very visible in + Lumiera. + * Notes, notes, notes! The application should support full multimedia notes. + These differ from (4) in that they are generated during the CLASSIFICATION + process, not before. This fits in with (5) as well -- Lumiera should display + these notes prominently on clip previews. The main way for multiple parties + to communicate and even for a single person to stay organized is to add in + notes about tough decisions made and rationale, questionable sections, etc. + These notes can be video, audio, text, etc from one of the clips, from the + machine used to edit (such as using a webcam or microphone), or over the + network (other people's input). + + +Too technically flawed +^^^^^^^^^^^^^^^^^^^^^^ +A clip is said to be too technically flawed if it has no chance of making it to +the final product whatsoever. This does not, however, preclude its use +throughout the post-production process; for example, part of a clip in which +the director describes his vision of the talent's facial expression in a +particular scene is never going to make it into the final product, but is +invaluable in classifying the scene. In this case, the most reasonable place to +put the clip would be as a multimedia note referenced by all takes/angles of +the scene it refers to. + +As mentioned above, flawed video doesn't necessarily mean flawed audio or +vice-versa. + + +Interesting +^^^^^^^^^^^ +An "interesting" clip is one that has potential -- either as a metadata piece +(multimedia note, talent briefing, etc) or footage (for the final product OR +intermediary step). The main goal of the application is to find and classify +interesting clips of various types as quickly as possible. + + +Parallel Processing +^^^^^^^^^^^^^^^^^^^ +Many people, accustomed to different interfaces and work styles, should be able +to work on the same project and add interactive metadata at the same time. + + +Classification interface +++++++++++++++++++++++++ +The classification interface is divided into two categories: technical and +effective. Technical classification is simply facts about a clip or part of a +clip: what weather there is, who is on set, how many frames are present, the +average audio level, etc. Effective classification allows the artist to express +their feelings of the subjective merits (or failures) of a clip. + + +DCMS +^^^^ +The project is organized around a distributed content management system which +allows access to all existing materials at all times. Content narrowing allows +for a more digestible amount of information to process, but everything is +non-destructive; every change to the clip structure and layout is recorded, +preferably with a reason as to why it was necessary or desired. + + +Content narrowing +^^^^^^^^^^^^^^^^^ +With all of the information of an entire production available from a single +application, information overload is easy. Content narrowing is designed to fix +that by having parts of individual clips, metadata, or other files be specific +to one aspect of the overall design. This allows for much more successful use +of the related information and a cleaner, streamlined layout. As an example, +metadata involving file size has no effect whatsoever on the vast majority of +most major decisions -- the answer is almost always "whatever it takes." Thus, +it would not appear most of the time. Content narrowing means that it is easy +to add back footage -- "widen the view" one step, add it back, and "narrow the +view" again. + + +Multiple cuts +^^^^^^^^^^^^^ +There is no need to export a final cut from this application; it merely is the +first step in the post-production chain. It is the missing link between +receiving raw footage from the camera and adding the well executed scenes to +the timeline. What should come out of the application is a classification of + + +Situational, take, and instance tagging +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +This is VERY powerful. The first step to using the application is to mark which +scenes are the same in all source clips -- where same means that they contain +sections which would both not run. This can include multiple takes, different +microphones or camera angles, etc. The key to fast editing is that the +application can edit metadata for the situation (what is actually going on IN +THE SCENE), take (what is actually going on IN THIS SPECIFIC RUN), and instance +(what is actually going on IN THIS CLIP). If editing a situation, the other +referenced clips AUTOMATICALLY add metadata and relevant sections. This can be +as precise and nested as desired, though rough cuts for level one editing +(first watchthrough after technically well executed clips have been selected) +and more accurate ones for higher levels is the recommended method. + + +Subtitling +^^^^^^^^^^ +This came up on the discussion list for Lumiera, and it will be supported, +probably as a special tag. + + +nasa's Laws of Tagging +^^^^^^^^^^^^^^^^^^^^^^ +. There is always more variety in data than tags. There are always more + situations present in the data than can be adequately expressed with any + (reasonable) number of tags. This is OK. All that is needed is the minimum + set of unique tags to progress to the next cycle without losing editing + intent or the ability to rapidly evaluate many situations. +. Many tags are used many times. "Outdoors" will be a very, very common tag; so + will "redub." If conventional names are decided upon and stuck to, it is + significantly easier to map the complex interactions between different + content situations. +. Avoid compound tags. Do not have "conversation_jill_joe" as a tag; use + "conversation," "jill," and "joe" instead. It is very easy to search for + multiple tags and very hard to link data that doesn't use overlapping tags. + + + + + + + + + + +The interface -- random idea +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This is not meant to be a final interface design, just something I wrote up to +get ideas out there. + +key commands + mutt/vim-style -- much faster than using a mouse, though GUI supported. + Easy to map to joystick, midi control surface, etc. +Space stop/start and tag enter Tab (auto pause) adds metadata special Tracks +have letters within scenes -- Audio[a-z], Video[a-z], Other[a-z] (these are not +limits) -- or names. Caps lock adds notes. This is really, really fast. It +works anywhere. This means that up to 26 different overlapping metadata +sections are allowed. + +Prompting Prompting for metadata is a laborious, time-consuming process. There +is no truly efficient way to do it. This application uses a method similar to +link:KPhotoAlbum[]. When the space key is held and a letter is pressed, the tag +that corresponds to that letter is assigned to the track for the duration of +the press. (If the space is pressed and no other key is pressed at the same +time, it stops the track.) For example, suppose that the following mapping is +present: +o = outside +x = extra +p = protagonist +c = closeup + +Then holding SPACE over a section and pressing one of these keys would assign +the tag to the audio AND video of the section over which the space was held. If +instead just the key is pressed (without space being held), that tag is +assigned to the section over which it is held. This is very fast and maps well +to e.g. PS3 controller or MIDI control. + +If LALT is held down instead of SPACE, the audio is effected instead. If RALT +is held, just the video is effected. + +In order to support scenario/take/clip tagging: + The default is situation. If the keybinding to x is: + x = t:extra ; effect only take + x = ts:extra ; effect take and scenario + x = c:extra ; extra only visible in this clip! + x = tc:extra ; this take and clip show the extra + etc + +Other keyargs (the part in front of the colon) can be added to account for +other uses (e.g. l = all taken on the same location). + +Tab is pressed to add metadata mappings. Tab is pressed to enter metadata edit +mode; this pauses video. Then press any key to map; and type the tag to +associate (with space, multiple tags can be added.). The following specials are +defined: + [:keyarg:]:TAG is special tagging for scenario/take/clip. + !TAG removes TAG if it is present. This is useful because it allows huge + sections of the clip to be defined as a certain tag, then have parts + removed later. + a:TAG applies TAG only to the audio. + v:TAG applies TAG only to the video. + p:PATH adds a link to PATH as a special tag. + +(This will have a nice GUI as well, I just will always use the keyboard method + so I am describing it first. Mapping configurations can be stored in a + separate file, as a user config, or in the specific project.) + +If ESC is pressed, all currently ranged tags are ended. + +Finally, if single_quote is pressed without SPACE or {L,R}ALT down, it marks an +"interesting location." Pressing SHIFT+single_quote goes to the next +"interesting location" and pressing CNTRL+' goes to the previous "interesting +location." This allows for very quick review of footage. + + + + + + + + + + + + + + + +Comments +-------- + + +Rating - Quantitative Rating as well as Qualitative Tagging +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The importance/value of the video for various factors uses, can vary through +the video. It would be helpful to have the ability to create continuous ratings +over the entire track. Ratings would be numerical. Automatic clip +selection/suggestion could be generated by using algorithms to compute the +usefulness of video based on these ratings (aswell as "boolean +operations"/"binary decisions" done with tags). The ratings could be viewed +just like levels are - color coded and ovelayed on track thumbnails. + +- Tree 2008-10-25 + + +link:MultiView[] - useful for concurrent ratings input +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It would be convenient to have an ability to view the different tracks (of the +same scene/time sequence) at once, so the viewer can input their ratings of the +video "on the fly", including a priority parameter that helps decide which +video is better than what other video.See the GUI brainstorming for a viewer +widget, and key combinations that allow both right and left hand input, that +could be used for raising/lowing ratings for up to six tracks at once. + +- Tree 2008-10-25 + + +I like the idea of rating clips (or rather, takes) a lot. It would be cool to +include both "hard," "relative," and "fuzzy" rating. Hard is an exactly defined +value (scaled 0-1) that puts the clip in an exact location in the queue. +Relative means that one is higher or lower rated than another. Fuzzy is a +slider which is approximate value, and there is some randomness. The best part +is that these can be assigned to hardware sliders/faders. Pressure sensitive +buttons + fuzzy ratings = really easy entry interface. Just hit as hard as +needed! Multiple tracks at once also an astounding idea. I could image some +sort of heap (think binary heap, at least for the data structure) which +determines the priorities and decides which clips are played. Then the highest +rated clips are played first, down to the worst. + +- link:NicholasSA[] 2009-01-04 + + +Possible Collaboration with the people from Ardour? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +I guess if the thing can do all the things we talked about here, it would be +perfectly suitable for sound classification too, and maybe could fill another +gap in FOSS: Audio Archival Software, like this: +http://www.soundminer.com/SM_Site/Home.html[] (which is very expensive)... +maybe the Ardour people would be interested in a collaboration on this? + +I like the suggestion of sound classification with a similar (or, even better, +identical) evaluator. link:SoundMiner[] looks interesting, but like you say +very expensive. I'm a sound guy, so I feel your pain... + +- link:NicholasSA[] 2009-01-04 + + +Parked +~~~~~~ + +Decided on Developer meeting, until someone wants to investigate this further. + + Do 14 Apr 2011 02:52:30 CEST Christian Thaeter + + +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/DesignParamAutomation.txt b/doc/devel/rfc/DesignParamAutomation.txt new file mode 100644 index 000000000..9ed2d9b4a --- /dev/null +++ b/doc/devel/rfc/DesignParamAutomation.txt @@ -0,0 +1,84 @@ +[grid="all"] +`------------`----------------------- +*State* _Idea_ +*Date* _2008-03-06_ +*Proposed by* link:Ichthyostega[] +------------------------------------- + + +Design the handling of Parameters and Automation +------------------------------------------------ +Parameters of Plugin Components and/or Render Nodes play a role at various +levels of the application. + + +Thus it seems reasonable to do a formal requirements analysis and design prior +to coding. + + +Description +~~~~~~~~~~~ +Regarding components directly participating in the render (which may be +implemented by plugins), we distinguish between *configuration* (static) and +*parameters* (dynamic). The point of reference for this distinction is the +render process: a plugin configuration may well be variable in some manner, +e.g. the plugin may provide different flavours of the same algorithm. But this +choice has to be fixed prior to feeding the corresponding plugin asset to the +builder. Contrary to such fixed configuration setup, the _parameters_ are +considered to be _variable_ during the rendering process. They can be changed +on-the-fly from GUI, and they may be automated. Probably, each Render Node will +have at least one such _parameter_ -- namely a bypass switch. + + +Tasks +^^^^^ + + * we need to work out an introspection mechanism for parameters + - asses what different types of parameters we need + - find out how much structured parameters will be (do simple values + suffice?) + - define how parameters can be discovered/enumerated + - define a naming scheme for parameters, so they can be addressed + unambiguously + * value parameters have a value range. Work out how to handle this + * parameters may need a specific presentation in the GUI + - linear/logarithmic scale, scale reference + - selecting the right widget + +So... + +. find out to which extend we need these properties +. find out what parts of the App will have what requirements? +. chose a best fitting implementation based on this information + +A closely related issue is the handling of *Automation*. The current draft +calls for an abstract interface "ParamProvider", which just allows the +link:Plugin/RenderComponent[] to pull a current value, without knowing if the +ParamProvider is a GUI widget or an automation data set with interpolation. The +component using the param value should not need to do any interpolation. We +should re-asses and refine this draft as needed. Note: Render Nodes are +stateless; this creates some tricky situations. + + + + + + + +Alternatives +^^^^^^^^^^^^ +?? (any ideas?) + + +Rationale +~~~~~~~~~ + + + + + + +Comments +-------- + + +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/DesignRenderNodesInterface.txt b/doc/devel/rfc/DesignRenderNodesInterface.txt new file mode 100644 index 000000000..201c34a72 --- /dev/null +++ b/doc/devel/rfc/DesignRenderNodesInterface.txt @@ -0,0 +1,128 @@ +[grid="all"] +`------------`----------------------- +*State* _Parked_ +*Date* _2008-03-06_ +*Proposed by* link:Ichthyostega[] +------------------------------------- + + +Design the Render Nodes interface +--------------------------------- +In the current design, the low-level model is comprised of "Render Nodes"; +Proc-Layer and Backend carry out some colaboration based on this node network. + + +Three different interfaces can be identified + * the node wiring interface + * the node invocation interface + * the processing function interface + + +Description +~~~~~~~~~~~ +Render Nodes are created and wired by the Builder in the Proc-Layer. On the +other hand, the rendering process is controlled by the backend, which also +provides the implementation for the individual data processing tasks. To create +a result, output nodes are ''pulled'' via the invocation interface, resulting +in the affected nodes to recursively pull their predecessor(s). In the course +of this call sequence, the nodes activate their processing function to work on +a given set of buffers. Moreover, we plan to use the render network also for +gathering statistics. + +'''Note''': Render Node is an internal interface used by Proc-Layer and + activated by the Backend. Plugins are planned to be added via Adapter nodes. + Thus the Render Node interface needs ''not'' to be exported. + + +the wiring interface +^^^^^^^^^^^^^^^^^^^^ +This part of the design defines how nodes can be combined and wired up by the +builder to form a network usable for rendering. For this purpose, the +link:ProcNode[] is used as a shell / container, which is then configured by a +const WiringDescriptor. Thus, the node gets to know its predecessor(s) and is +preselected to use a combination of specific working modes: + + * participate in caching + * calculate in-place + * source reading + * (planned) use hardware acceleration + * (planned) remote dispatched calculation + +Most nodes will just have a single predecessor, but we can't limit nodes to a +single input, because there are some calculation algorithms which natively need +to work on several data streams simultaneously. This means, a single node can +be involved into the calculations for multiple streams (several pull calls on +the same frame number but for different channel, and in each case maybe a +different output node). I decided to rely solely on the cache for avoiding +duplicate calculations caused by this complication, because I deem it to be an +corner case. + + +the invocation interface +^^^^^^^^^^^^^^^^^^^^^^^^ +this is intended to be a rather simple "call-style" interface, without much +possibilites to influence the way things are happening. You pull a node and +will find the results in a provided buffer or the cache, but you can't even +change the frame data type type of the result. Besides the node invocation, +functions for collecting statistics will be accessible here too (Probably these +functions will be ''implemented'' in a classic-OO fashion by virtual functions, +but that's another story) + + +the processing interface +^^^^^^^^^^^^^^^^^^^^^^^^ +the individual nodes are configured to call a plain-C {{{process()}}} function +and provide an array of buffer pointers to be used within this function. For +the purpose of invoking actual data processing, it is irrelevant if this +function is implemented somewhere in the backend or provided by a plugin. At +this point, no type- and other meta-information is passed, rather the +processing function is supposed to do The Right Thing ^TM^ + + + +Tasks +^^^^^ + * What services do we expect from Render Nodes. What do we plan to do with a + render node? + * What different kinds (if any) of Render Nodes can be foreseen? + * order the required functionality by Proc / Backend. Find out specific + implementation constraints. + * work out a design based on this informations + + + + + + + + + +Rationale +~~~~~~~~~ +The purpose of this Design Entry is to give a summary; the questions and the +details of carrying out the operations are much more involved. + + +Please see the +http://www.lumiera.org/wiki/renderengine.html#Rendering[Proc-Layer impl +documentation (TiddlyWiki)] and the +http://www.lumiera.org/gitweb?p=lumiera/ichthyo;a=blob;f=src/proc/engine/procnod +.hpp;h=9cf3a2ea8c33091d0ee992ec0fc8f37bb5874d34;hb=refs/heads/proc[Source Code] +for details +(and/or contact Ichthyo for in-depth discussion of those technical details) + + + + + + +Comments +-------- + + +Parked +~~~~~~ +We park this until we have time to revisit the details. It is accepted that we +need to design this interfaces. + + Do 14 Apr 2011 03:06:42 CEST Christian Thaeter + +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/DeveloperDocumentationStructure.txt b/doc/devel/rfc/DeveloperDocumentationStructure.txt new file mode 100644 index 000000000..1f88d130e --- /dev/null +++ b/doc/devel/rfc/DeveloperDocumentationStructure.txt @@ -0,0 +1,136 @@ +Developer Documentation Structure +================================= + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Idea_ +*Date* _Mon Aug 2 18:03:25 2010_ +*Proposed by* Christian Thaeter +------------------------------------- + +[abstract] +******************************************************************************** +I describe here how to bring the Lumiera Developer Documentation into an simple +hierarchical structure. Previously we accumulated a lot Documentation which +ended in quite a few different places. This should be tidied up. +******************************************************************************** + +Description +----------- +//description: add a detailed description: + +I propose to reorganize the developer documentation in the following way: + + * make a 3 (or more, see below) level documentation structure: + 1. The entry level becomes the 'Lumiera: The inner Core' document which shall + not go into details but give a hint what everything is made for. This + will be the first introductory doc for new developers. + 2. second level are the RFC's which descibe the design as planned on a + general level, not going (except for some example snippets) into + implementation details. + 3. the third level is the doxygen documentation which describes what + actually got implemented in detail. This can be further split into + an external reference and a internal part. + +We using test-driven-development, our tests are our specifications. This leads +to the idea that ideas, design and intentions for tests should be documented +there too. In a higher level abstract human written form. I propose to use my +pipadoc documentation extractor (that means, writing asciidoc within the code as +special comments) for this. + + +Tasks +~~~~~ +// List what would need to be done to implement this Proposal in a few words: +// * item ... + + * Go over the old content of the asciidoced tiddlywikis, integrate it either in + the "Lumiera: The inner Core" document or write single RFC's for them. + * The 'proc' tiddlywiki is a bit special, we need a plan how to integrate this. + Possibly making a own document-dir for this, or refactor it in plenty RFC's. + This is ichthyos decision. + * Decide how to proceed with the UML model + + + +Pros +^^^^ +// add just a fact list/enumeration which make this suitable: + +Much easier entry to the whole developer documentation. Reading the "Inner Core" +document should be sufficient to get a good idea about the Lumiera design and +layout. All details are linked from there and thus easily findable. + + +Cons +^^^^ +// fact list of the known/considered bad implications: + +There are some open ends yet, doxygen for example doesn't integrate nicely, we +possibly can't link to single doxygen entities since these have no permanent +link (to my understanding, to be investigated). Other parts like the UML model +are not yet decided and moving the other existing content over needs some (not +really much) work. + +Alternatives +------------ +//alternatives: explain alternatives and tell why they are not viable: + +Spring 2010 we discussed and decided an overall website and documentation structure. +We could just stick to that. + + +Rationale +--------- +//rationale: Describe why it should be done *this* way: + +This approach fits nicely into our overall infrastructure and the way we wanted +to do things. Using git and asciidoc mostly, making the developer documentation +part of the source tree and reasonable easy available/maintainable to +developers. + +//Conclusion +//---------- +//conclusion: When approbated (this proposal becomes a Final) +// write some conclusions about its process: + + + + +Comments +-------- +//comments: append below + +* The general idea of having three levels, with 'The Inner Core' as entry point, + looks OK for me. +* beyond that -- we had a detailed discussion about the overall website structure, + which includes the documentation. Why should we overthrow these results now and + re-start the discussion? Lets just stick to this agreed on structure! +* especially I don't like the way this proposal tries to squeeze everything into + an completely uniform structure. It is simply not true that the RFCs are just the + second level, and doxygen would cover the 3^rd^ level. Look at the existing + documentation to see why. + - RFCs are a 'kind' of document, not a 'hierarchy level.' Indeed, our existing + RFCs span all three hierarchy levels, and this is OK so and should remain this + way. (And yes, I like the RFCs much and want to retain them) + - RFCs are well suited to topics requiring discussion and agreement by the whole + core developer team. I see no point in 'pseudo-RFC-ing' the individual design + decisions only relevant for an isolated part of the application and without + any potential for discussion. + - similarily, in the TiddlyWiki, besides just working notes (``extended brain'') + you'll find finished text pages belonging to all different levels, from very + high-level conceptual down to explanation of technical details, with + cross references and tags for categorisation (and this will be retained + when asciidocing the content). +* so my conclusion is rather having one overview text, and then the split into + *conceptual* and *technical* documentation, each of which has a separate sub + structure not necessarily congruent to the structure on the other half. RFCs, + UML model and doxygen are just separate and consistent bodies of documentation + and can be referred to from the main documentation. (I agree with the observation + regarding permanent links into doxygen. But I can't imagine there isn't some + existing solution to this problem) + -- link:Ichthyostega[] 2010-10-15 + +//endof_comments: diff --git a/doc/devel/rfc/EngineInterfaceOverview.txt b/doc/devel/rfc/EngineInterfaceOverview.txt new file mode 100644 index 000000000..e9e931661 --- /dev/null +++ b/doc/devel/rfc/EngineInterfaceOverview.txt @@ -0,0 +1,271 @@ +Engine Interface Overview +========================= +:Date: 2010 + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Draft_ +*Date* _2010-04-16_ +*Proposed by* link:Ichthyostega[] +------------------------------------- + + +Overview Engine Interface(s) +---------------------------- + +******************************************************************************** +At the Engine Interfaces, Lumiera's Backend and Session get connected and work +together to produce rendered output. This design proposal intends to give an +overview of the connection points and facilities involved, to define some terms +and concepts and to provide a foundation for discussion and working out the +APIs in detail. +******************************************************************************** + + + +Participants +~~~~~~~~~~~~ + *Render Process*:: represents an ongoing calculation as a whole + *Engine Model*:: encloses the details of the current engine configuration and + wiring + *Dispatcher*:: translates a render process into the (planned) invocation of + individual nodes + *Scheduler*:: cares for calculations actually to happen, in the right order + and just in time, if at all + *Node*:: abstraction of an processing unit, supports planning by the + dispatcher, allows to pull data, thereby driving the actual calculation. + +Render Process +~~~~~~~~~~~~~~ +The render process brackets an ongoing calculation as a whole. It is not to be +confused with a operating system process or thread; rather it is a point of +reference for the relevant entities in the GUI and Proc-Layer in need to +connect to such a "rendering", and it holds the specific definitions for this +calculation series. A render process +_corresponds to a single data stream_ to be rendered. Thus, when the play + controller of some timeline in the model is +in _playing_ or _paused_ state, typically multiple corresponding render +processes exist. + +* there is an displayer- or output slot, which got allocated on creation + of the process +* the process disposes calculated data frames "into" this slot +* the process can be paused/started and stopped (aborted, halted). +* some processes allow for changing parameters dynamically (e.g. speed, + direction) +* each process has to ensure that the output/display slot gets closed or + released finally + +.Process parameters +A process is linked to a single stream data format (a -> +link:StreamTypeSystem.html[stream implementation type]). + +It is configured with _frame quantisation_ and _timings_, and a _model port_ +identifier and _channel selector_. + + quantisation:: + translates time values into frame numbers. (In the most general + case this is a function, connected to the session) + + timings:: + a definition to translate global model time units in real clock time, + including _alignment_ to an external _time grid_. + + model port:: + a point in the (high level) model where output can be produced. + + This might be a global pipe in one of the model's timelines, or + it might be a _probe point_. + + channel:: + within the session and high level model, details of the stream + implementation are abstracted. Typically, a global pipe (master bus + or subgroup) corresponds to a multichannel stream, and each of these + channels might be hooked up to an individual render process + (we have to work out if that's _always the case_ or just under + _some circumstances_) + + +[NOTE] +=================== +While certainly the port and channel definition is fixed, unfortunately the +quantisation and the timings are'nt. The timings may be changed in the middle +of an ongoing render process, due to changed playback speed, shuffling or +requirements forwarded from chase-and-lock synchronisation to an external +source. We still need to discuss if Lumiera is going to support variable +framerates (several media professionals I've talked to were rather positive we +need to support that -- personally I'm still in doubt we do). Variable +framerates force us to determine the frame numbers by an integration over time +from a start position up to the time position in question. The relevant data to +be integrated is located in the session / high-level model; probably we'll then +create an excerpt of this data, but still the less quantisation will be a +function of time. Anyway, it is the render processes job to translate all kinds +of parameter changes into relevant internal API calls to reconfigure the +calculation process to fit. +=================== + + + +Engine Model (low-level Model) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The low level model is a network of interconnected render nodes. It is created +by the build process to embody any configuration, setup and further +parametrisation derived from the high-level description within the session. But +the data structure of this node network is _opaque_ and considered an +implementation detail. It is not intended to be inspected and processed by +outward entities (contrast this to the high-level model within the session, +which provides an extensive discovery API and can be manipulated by model +mutating commands). We just provide a set of _query and information retrieval +functions_ to suit the needs of the calculation process. The engine model is +_not persisted._ + +* the engine model is partitioned by a _segmentation_ of the time axis. + Individual segments can be hot-swapped. +* the engine has _exit nodes,_ corresponding to the model ports mentioned above +* each exit node provides a stream type definition plus quantisation and + alignment constraints. + +Thus, for any pair (port, time) it is possible to figure out a segment and an +exit node to serve this position. The segmentation(s) for multiple ports might +differ. To allow for effective dispatching, the model should provide +convenience functions to translate these informations into frame number ranges. +The mentioned quantisation and alignment constraints stem from the fact that +the underlying media source(s) are typically themselves quantised and the +timings might be manipulated within the processing chain. We might or might not +be able to shift the underlying media source +(it might be a live input or it might be tied to a fixed timecode) + + + +Processing Node +~~~~~~~~~~~~~~~ +In this context, a node is a conceptual entity: it is an elementary unit of +processing. It might indeed be a single invocation of a _processor_ (plugin or +similar processing function), or it might be a chain of nodes, a complete +subtree, it might _represent_ a data source (file, external input or peer in +case of distributed rendering), or it might stand for a pipeline implemented in +hardware. The actual decision about these possibilities happened during the +build process and can be configured by rules. Information about these decisions +is retained only insofar it is required for the processing, most of the +detailed type information is discarded after the wiring and configuration step. +As mentioned above, each node serves two distinct purposes, namely to assist +with the planning and dispatching, and to pull data by performing the +calculations. + +Nodes can be considered _stateless_ -- pulling a node has no effect outside the +invocation context. While a node _might_ actually be configured to drive a +whole chain or subtree and propagate the pull request +_within_ this tree or chain internally, the node _never propagates a pull + request beyond its realm._ The pull() +call expects to be provided with all prerequisite data, intermediary and output +buffers. + + +Dispatching Step +~~~~~~~~~~~~~~~~ +The dispatcher translates a render process (actually a _calculation stream_ as +part of a render process) into sequences of node invocations, +which then can be analysed further (including planning the invocation of +prerequisites) and scheduled. This mapping is assisted by the engine model API +(to find the right exit node in the right segment), the render process (for +quantisation) and the involved node's invocation API (to find the +prerequisites) + + +Node Invocation API +~~~~~~~~~~~~~~~~~~~ +As nodes are stateless, they need to be embedded into an invocation context in +order to be of any use. The node invocation has two distinct stages and thus +the invocation API can be partitioned in two groups + +Planning +^^^^^^^^ +During the planning phase, the dispatcher retrieves various informations +necessary to _schedule_ the following pull call. These informations include + + * reproducible invocation identifier, usable to label frames for caching + * opaque source identifier (owned by the backed) in case this node + represents a source + * prerequisite nodes + * index (channel) of the prerequisite's output to be fed as input buffer(s) + * number and size of the output buffers required + * additional memory required + * control data frame(s) + + +Node pull +^^^^^^^^^ + * the pull call expects to be provided with all the resources announced during + the planning step + * moreover, the pull call needs to know (or some way to figure out) the time + coordinates + * after retrieving automation, the control flow forwards to the actual + processing function + * there is an result/error code (assuming the scheduler prefers error codes + over exceptions) + + +'''' + +Tasks +~~~~~ + * find out if we need to support variable framerate + ([green]#-> yes, implementation deferred#) + * find out about the exact handling of multichannel data streams ([green]#✔ done#) + * design and prototypical implementation of frame quantisation ([green]#✔ done#) + * design a buffer descriptor ([green]#✔ done#) + * design a buffer designation scheme [red]#TODO# + * expand on the node identification scheme [red]#TODO# + * clarify how control data frames can be addressed [red]#TODO# + + +Discussion +~~~~~~~~~~ + +Pros/Cons/Alternatives +^^^^^^^^^^^^^^^^^^^^^^ +Currently we're focussing on how to implement _this_ concept, not on +evaluating alternatives. Especially the idea of scheduling individual frame jobs +is a core concept of Lumiera. This RfC tries to bridge from the session model to +an engine based on these concepts. It's the attempt to link two concepts already +defined and decided on.... + + +Rationale +^^^^^^^^^ +* allow for optimal resource use and avoid blocking of threads +* shift away complexity from the engine into the builder, which is by far not + so performance critical +* allow to adjust the actual behaviour of the engine in a wide range, based on + actual measurements +* create a code structure able to support the foreseeable extensions (hardware + and distributed rendering) without killing maintainability + + + + +//Conclusion +//---------- +//conclusion: When approbate (this proposal becomes a Final) +// write some conclusions about its process: + + + + +Comments +-------- +//comments: append below + +.State -> Draft +Requirements and details of the design are sufficiently clear meanwhile. +Ther seems to be not much room for alternative approaches, given our +general planning for the application + + Mi 11 Mai 2011 19:27:12 CEST Ichthyostega + + +//endof_comments: + +'''' +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] + diff --git a/doc/devel/rfc/EngineInterfaceSpec.txt b/doc/devel/rfc/EngineInterfaceSpec.txt new file mode 100644 index 000000000..2b4be964e --- /dev/null +++ b/doc/devel/rfc/EngineInterfaceSpec.txt @@ -0,0 +1,234 @@ +Engine Interface Spec +===================== + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Idea_ +*Date* _Mi 11 Mai 2011 17:53:16 CEST_ +*Proposed by* Ichthyostega +------------------------------------- + +[abstract] +******************************************************************************** +The main service of the Renderengine is to deliver a stream of calculations +bound by timing constraints. The parameters of this delivery can be reconfigured +******************************************************************************** + +Try to start the integration and overall design of the Lumiera Render Engine. +Point of reference is the functionality other parts of the application are relying on. + +Description +----------- +//description: add a detailed description: +The Engine is driven by the Player subsystem and draws on the low-level Model +(Render nodes network) for all local parameters and control data. The goal is +to deliver all the typical playback and rendering operations commonly found +in NLE applications (functional requirements). Moreover, this functionality +shall be delivered in a robust and reliable fashion, while making optimal +use of the available I/O bandwidth and computation power (non-functional +requirements). + +Requirements Specification +~~~~~~~~~~~~~~~~~~~~~~~~~~ +.functional +- simple calculation stream +- with or without defined end point +- deliver to fixed output buffer(s) with high timing precision +- ability to deliver individual data chunks (e.g. single frames) +- ``free wheeling'' operation for maximum calculation throughput +- throttled operation using idle calculation or bandwidth resources +- streams with regular stepping and arbitrary block sizes +- looping, seamless chaining of calculation streams with ongoing timing +- ability to pause and to change / combine any of the above any time +- ability to abort or change, providing reliable feedback on completion +- ability to relocate (shift in time) parts of an ongoing calculation stream +- support for chase-and-lock synchronisation + +.non-functional +- protection against overload and deadlocks +- gracious degradation in case of problems +- maintain a configurable quality-of-service level +- utilise precisely the resources actually available + +Functionality description in detail +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +-> see the link:EngineInterfaceOverview.html[Engine/Interface overview] for +a description of the involved entities and for definitions for common terms. + +Definitions +^^^^^^^^^^^ +Calculation Stream:: + A series of similar but parametrised calculations, + bound to deliver results in sequence and in accordance to timing constraints +Timed Delivery:: + Calculation result data has to be delivered within a fixed time interval + immediately preceding the delivery deadline, so it can be consumed by + an (possibly external) output process without the need for further buffering +Output Slot:: + An abstracted facility receiving calculated data; including a scheme + to organise the output buffers, which get handed over to an independent + thread running in parallel, or maybe even to an external process + +Operation description +^^^^^^^^^^^^^^^^^^^^^ +When *creating a calculation stream*, the _exit node_ and an already opened +_output slot_ are provided, plus the _timing parameters_ (frame duration, +stepping interval length, optional start and/or endtime) + +A *free wheeling calculation stream* is created in a similar fashion, just +without timing constraints on the output delivery; i.e. just the output slot +is parametrised differently. This invocation is used to create a ``mixdown'' +or ``final render'' to be saved into an output file. + +A *background rendering mandate* is created analogous, but _without_ providing +an output slot. Rather, it is expected that the engine will cache the generated +data internally. + +When calculation stream definitions are *chained*, the follow-up calculation +stream is expected to be delivered seamlessly after the preceding stream, +without interrupting the output timings. + +*Looping* is a special kind of chained calculations, where the same segment +is delivered continuously. But note, the loop boundaries are not necessarily +aligned with the frame spacing or the output timing requirements. + +For supporting *chase-and-lock*, the engine needs a mechanism to follow an +externally provided synchronisation goal, _without_ altering the output +delivery timings. Obviously, we need to build in a strategy for handling +this problem (because the solution is bound to be different for different +kinds of media). + +The engine can expect the output slot to support *de-clicking* or *flicker +protection* -- yet the engine needs to signal precisely when this is necessary + +Quality of service +^^^^^^^^^^^^^^^^^^ +The Engine is expected to provide different quality-of-service classes, +which are requested as part of the definition parameters for a calculation stream. + +- 'SYNC_PRIORITY' means to keep up to the delivery requirements, + even if this means failing to deliver data altogether. +- 'PERFECT_RESULT' means to deliver data perfect up to the definition, + even if this means violating the timing constraints. +- 'COMPROMISE' allows the engine to take some shortcuts in order to + deliver an roughly satisfactory behaviour. Likely there will be + multiple classes of compromise. + +The quality of service is partially implemented directly by the engine +and partially passed on as parameter to the individual node invocations. +For example, the engine might decide to switch down to proxy media, while +actually the node network will perform the actual switch and reconfiguration. + +The quality of service could be implemented as a strategy, to be consulted at +various decision points. The above cases would then be just some preconfigured +default strategies. + +Reconfiguration and abort +^^^^^^^^^^^^^^^^^^^^^^^^^ +There needs to be a separate ``control channel'' to cause various reconfigurations +during an ongoing calculation process. With the exception of the output slot, all +parameters defining an calculation stream might be changed on-the-fly -- including +the possibility to abort calculation altogether. + +The engine is _not required_ to react on such change requests immediately or +synchronously. The goal is rather to integrate such changes seamlessly. +Yet we _require_... + +- a guarantee that the change request is observed within some tolerance interval + (i.e. we may block waiting on the change to happen, without risking a deadlock) +- a reliable feedback _after_ the change has happened, by invoking a response signal + (functor/callback provided with the change request) +- a guarantee not to proceed with the original setup after this signalling + (read: after receiving this feedback, resources required only by the initial + setup may be deallocated) + +Especially note that the following things might be changed in the middle of an +ongoing calculation: + +- timing parameters of the calculation stream (frame durations, stepping interval) +- start and end time +- splitting and chaining of calculation streams (e.g introducing jumps) +- adjust the looping boundaries +- toggle _paused_ state +- change the exit node to use for pulling +- relocate the nominal time position of parts of the calculation stream; + especially we expect already calculated and cached data to be re-labeled +- invalidate parts of the (nominal) time axis, forcing recalculation +- abort individual calculation streams without interfering with others. + + +Tasks +~~~~~ +// List what needs to be done to implement this Proposal: +* analyse requirements of the player subsystem ([green]#✔ done#) +* determine further informations needed during calculation [,yellow]#WIP# +* find out about timing requirements and constraints in detaill [red]#TODO# +* define the interface functions in detail [red]#TODO# +* prepare a test fixture with mock-up calculations [red]#TODO# +* implement the invocation backbone with stubbed functionality [red]#TODO# + + +Discussion +~~~~~~~~~~ + +Pros +^^^^ +// add a fact list/enumeration which make this suitable: +// * foo +// * bar ... + + + +Cons +^^^^ +// fact list of the known/considered bad implications: +The requirements placed on life changes are quite high + + + +Alternatives +^^^^^^^^^^^^ +//alternatives: explain alternatives and tell why they are not viable: + + + +Rationale +--------- +//rationale: Give a concise summary why it should be done *this* way: +The expectations for the playback and render functionality of a NLE are +pretty much set. There isn't much room for reducing functionality. +So the goal for this RfC is to precisely define the inevitable +and break it down into tangible functionality on the implementation level. + + + +//Conclusion +//---------- +//conclusion: When approbate (this proposal becomes a Final) +// write some conclusions about its process: + + + + +Comments +-------- +//comments: append below + +Discussed in the May developers meeting. Seems to be basically acceptable. +_Cehteh_ proposed some small adjustments: + +- making the _QualityOfService_ rather a strategy to be queried +- treating the rescheduling a bit separate from the other changes, because + that is very common and needs to be performant. +- introducing a separate scheduler/queue for time scheduled tasks, like + with rater soft realtime requirements + + So 15 Mai 2011 00:55:24 CEST Ichthyostega + + +//endof_comments: + +'''' +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/FeatureBundle_PluggableModules.txt b/doc/devel/rfc/FeatureBundle_PluggableModules.txt new file mode 100644 index 000000000..06b9fe814 --- /dev/null +++ b/doc/devel/rfc/FeatureBundle_PluggableModules.txt @@ -0,0 +1,195 @@ +[grid="all"] +`------------`----------------------- +*State* _Parked_ +*Date* _2008-09-03_ +*Proposed by* link:Ichthyostega[] +------------------------------------- + + + +Describe pluggable modules by a "Feature Bundle" +------------------------------------------------ +This proposal builds upon Cehteh's Plugin Loader, which is the fundamental +mechanism for integrating variable parts into the application. + +It targets the special situation when several layers have to cooperate in order +to provide some pluggable functionality. The most prominent example are the +"effects plugins" visible for the user. Because, in order to provide such an +effect + + * the engine needs a processing function + * the builder needs description data + * the gui may need a custom control plugin + * and all together need a deployment descriptor detailing how they are + related. + + + + +Description +~~~~~~~~~~~ +The Application has a fixed number of *Extension Points*. Lumiera deliberately +by design does _not build upon a component architecture_ -- which means that +plugins can not themselves create new extension points and mechanisms. New +extension points are created by the developers solely, by changing the code +base. Each extension point can be addressed by a fixed textual ID, e.g. +"Effect", "Transition", .... + +Now, to provide a pluggable extension for such an Extension Point, we use a +*Feature Bundle* Such a Feature Bundle is comprised of + + * a Deployment Descriptor (provided as "structured data" -- TODO: define the + actual data format) + * the corresponding resources mentioned by this Deployment Descriptor + +The Deployment Descriptor contains + + * Metadata describing the Feature Bundle + - ID of the Extension point + - ID of the Bundle (textual ID) + - ID of origin / provider (could be a domain name) + - Category (textual, tree-like) + - Version number (major, minor) + - required Extension point version number (or Lumiera version no.?) + - Author name (utf8) + - Support email (utf8) + - textual description in a single line (utf8) + * A List of Resources, each with: + - ResourceID + - SubID + - Type of Resource, which may be + . Plugin + . Properties + . Script + . ...? + + - one of: + . the Resource provided inline in suitable quoted form (for textual + resources only) + . an URL or path or similar locator for accessing the Resource (TODO: + define) + - Additional Metadata depending on Type of Resource (e.g. the language of a + script) + + + +We do _not_ provide a meta-language for defining requirements of an Extension +Point, rather, each extension point has hard wired requirements for a Feature +Bundle targeted at this extension point. There is an API which allows code +within lumiera to access the data found in the Feature Bundle's Deployment +Descriptor. Using this API, the code operating and utilizing the Extension +Point has to check if a given feature bundle is usable. + +It is assumed that these Feature Bundles are created / maintained by a third +party, which we call a *Packager*. This packager may use other resources from +different sources and assemble them as a Feature Bundle loadable by Lumiera. Of +course, Lumiera will come with some basic Feature Bundles (e.g. for colour +correction, sound panning,....) which are maintained by the core dev team. +(please don't confuse the "packager" mentioned here with the packager creating +RPMs or DEBs or tarballs for installation in a specific distro). Additionally, +we may allow for the auto-generation of Feature Bundles for some simple cases, +if feasible (e.g. for LADSPA plugins). + + +The individual resources +^^^^^^^^^^^^^^^^^^^^^^^^ +In most cases, the resources referred by a Feature Bundle will be Lumiera +Plugins. Which means, there is an Interface (with version number), which can be +used by the code within lumiera for accessing the functionality. Besides, we +allow for a number of further plugin architectures which can be loaded by +specialized loader code found in the core application. E.g. Lumiera will +probably provide a LADSPA host and a GStreamer host. If such an adapter is +applicable depends on the specific Extension point. + +The ResourceID is the identifyer by which an Extension point tries to find +required resources. For example, the Extension Point "Effect" will try to find +an ResourceID called "ProcFunction". There may be several Entries for the same +ResourceID, but with distinct SubID. This can be used to provide several +implementations for different platforms. It is up to the individual Extension +Pont to impose additional semantic requirements to this SubID datafield. (Which +means: define it as we go). Similarly, it is up to the code driving the +individual Extension point to define when a Feature Bundle is fully usable, +partially usable or to be rejected. For example, an +"Effect" Feature Bundle may be partially usable, even if we can't load any + "ProcFunction" for +the current platform, but it will be unusable (rejected) if the proc layer +can't access the properties describing the media stream type this effect is +supposed to handle. + +Besides binary plugins, other types of resources include: + * a set of properties (key/value pairs) + * a script, which is executed by the core code using the Extension Point and + which in turn may access certain interfaces provided by the core for "doing + things" + +Probably there will be some discovery mechanism for finding (new) Feature +Bundles similar to what we are planning for the bare plugins. It would be a +good idea to store the metadata of Feature Bundles in the same manner as we +plan to store the metadata of bare plugins in a plugin registry. + + + + +Tasks +^^^^^ + + +Pros +^^^^ + + + +Cons +^^^^ + + + +Alternatives +^^^^^^^^^^^^ +Use or adapt one of the existing component systems or invent a new one. + + + +Rationale +~~~~~~~~~ +The purpose of this framework is to decouple the core application code from the +details of accessing external functionality, while providing a clean +implementation with a basic set of sanity checks. Moreover, it allows us to +create an unique internal description for each loaded module, and this +description data e.g. is what is stored as an "Asset" into the user session. + +Today it is well understood what is necessary to make a real component +architecture work. This design proposal deliberately avoids to create a +component architecture and confines itself to the bare minimum needed to avoid +the common maintenance problems. As a guideline, for each flexibility available +to the user or packager, we should provide clearly specified bounds which can +be checked and enforced automatically. Because our main goal isn't to create a +new platform, framework or programming language, it is sufficient to allow the +user to _customize_ things, while structural and systematic changes can be done +by the lumiera developers only. + + + + + + +Comments +-------- + +From a fast reading, I like this, some things might get refined. For example +I'd strongly suggest to make the Deployment Descriptor itself an Interface +which is offered by a plugin, all data will then be queried by functions on +this interface, not by some 'dataformat'. Also Resource ID's and a lot other +metadata can be boiled down to interfaces: names, versions, uuid of these +instead reiventing another system for storing metadata. My Idea is to make the +link:Plugin/Interface[] system self-describing this will also be used to +bootstrap a session on itself (by the serializer which is tightly integrated) + -- link:ct[] [[DateTime(2008-09-04T09:28:37Z)]] 2008-09-04 09:28:37 + +Parked +~~~~~~ +Needs to ne reviewed some time later. + + Do 14 Apr 2011 03:06:42 CEST Christian Thaeter + +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/GitCommitMessageFormat.txt b/doc/devel/rfc/GitCommitMessageFormat.txt new file mode 100644 index 000000000..3446ff87d --- /dev/null +++ b/doc/devel/rfc/GitCommitMessageFormat.txt @@ -0,0 +1,208 @@ +Git Commit Message Format +========================= + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Final_ +*Date* _Fr 31 Aug 2012 03:54:14 CEST_ +*Proposed by* Christian Thaeter +------------------------------------- + +******************************************************************************** +.Abstract +This RFC describes the format of commit messages, based on already used +practice. +******************************************************************************** + +Description +----------- +//description: add a detailed description: + +Git commit messages are almost free form text, the only exception is that git +threats the first line in a commit message specially to form the header one +sees in various outputs. + +Since we aim to automate some processing we refine the format for commit +messages sightly more. + + +.General + +Commit Messages will be shown in space limited areas (lists on webpages, +emails, tooltips). Unlike source code where we are quite lax about line +lengths commit messages should not exceed 80 characters per line. + +The first line is treated as Header as described below, followed by an empty +line and then the Body of the commit message. The Body is optional but +recommended. + +This formalized keywords for headers are optional, if in doubt then don't use any. +But if one uses them, then use only one defined here since automatic processing +only knows about these. + + +.Header + +The Header is free form text explaining the purpose of the commit in a few +words. It may start with one uppercased keyword and a colon if appobiate directly +followed by some (optional, defined elsewhere) metadata. This Keywords are +optional but recommended since automatic processing acts upon them. Normal commits +need these keywords and are just free form text. + +To be exact, here is a regex matching valid Headers: + + ^[[:upper:]]+:[[:punct:]]?[[:alnum:][:punct:]]* + +Legal headers are for example: + + DONE: some feature + FIX:#1234 Segv when starting + RELEASE:1.0 Party + + +.List of Defined headers: + +'WIP:':: + 'Work in Progress', commits marked as this may be incomplete and fail + in various ways. For example automatic git-bisecting will skip them. + WIP should take precedence, if for example one needs multiple commits + to fix a bug, then only the final commit is tagged as FIX: and the + leading commits are WIP: + +'FIX:':: + Bugfix. The Text should explain what error got fixed. A referenence to + a bug number is not optional and not needed. + +'RFC:':: + This commit adds or modifies a RFC but doesn't touch the codebase + (tests/examples are permitted). + +'DOC:':: + This commit only alters documentation but no code. + +'STASH:':: + When 'git-stash' is not enough (for example one wants to move + unfinished changes changes to another repository or commit as backup and amend + things later). Normally such commits should not remain in a published + repository and not become merged. + +'DRAFT:':: + Much like WIP: but will not break compilation and is sane to use. For + example mockups, documentation and skeleton code for new facilities + may use this. + +'DONE:':: + Final commit/merge when some noteworthy part is done. The idea here is + that finished things could be easily filtered out for our quarterly reports. + +'TODO:':: + This commit adds a documentation, comments or tests about something to + be done. Same rules as NoBug's TODO apply. + +'FIXME:':: + This commit adds a documentation, comments or tests about something to + be fixed. Aka a known bug which can not be fixed instantly for some + reason. Same rules as NoBug's FIXME apply. + +'PLANNED:':: + This commit adds a documentation, comments or tests about something + planned. Same rules as NoBug's PLANNED apply. + +'ALPHA:':: + Notifies the CI system that this commit defines an ALPHA release, the CI + may take this and build an package accordingly + +'BETA:':: + Notifies the CI system that this commit defines an BETA release, the CI + may take this and build an package accordingly + +'RELEASE:':: + Notifies the CI system that this commit defines an production release, the CI + may take this and build an package accordingly + + +Note: This list will be updated as need arises + + +.Body + +Adding a body is optional but recommended but for the most simple changes. +A body, if present should separate from the header by one empty line. + +It is suggested not to make any lines longer than 80 characters and use +asciidoc formatting. In most cases this means just free form text and maybe +use of bulleted list. + +Care should be taken to write clean understandable commit messages. In some +extent they may repeat the comments and documentation from the committed code +in a short form. Think that anyone else reading only the commit message should +understand whats going on. + + +Tasks +~~~~~ +// List what needs to be done to implement this Proposal: +// * first step ([green]#✔ done#) +// * second step [,yellow]#WIP# + + +Discussion +~~~~~~~~~~ + +Pros +^^^^ +// add a fact list/enumeration which make this suitable: +// * foo +// * bar ... + + + +Cons +^^^^ +// fact list of the known/considered bad implications: + + + +Alternatives +^^^^^^^^^^^^ +//alternatives: explain alternatives and tell why they are not viable: + + + +Rationale +--------- +//rationale: Give a concise summary why it should be done *this* way: + +This RFC is based on existing practice, we almost done it this way. some minor +glitches are present in the project history (no colon after keywords, +lowercase keywords). Automatic processing becomes simpler when we formalize +these things in an unambigous way. Commits failing this definitions might +confuse the toolchain (builddrone) but this failures shall not be critical. + + +//Conclusion +//---------- +//conclusion: When approbate (this proposal becomes a Final) +// write some conclusions about its process: + + + + +Comments +-------- +//comments: append below + +.State -> Final +//add reason + +We decided on the sept. 2012 devel meeting to finlize this RFC. + +Christian Thaeter:: 'Do 13 Sep 2012 03:57:23 CEST' ~~ + + +//endof_comments: + +'''' +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/GitSubmoduleTransistion.txt b/doc/devel/rfc/GitSubmoduleTransistion.txt new file mode 100644 index 000000000..fdbcd69db --- /dev/null +++ b/doc/devel/rfc/GitSubmoduleTransistion.txt @@ -0,0 +1,87 @@ +[grid="all"] +`------------`----------------------- +*State* _Parked_ +*Date* _2008-04-09_ +*Proposed by* link:ct[] +------------------------------------- + + +Use Git Submodules to organize the project +------------------------------------------ +We planned this long time ago when the project started, this proposal is for to +work out the details and define a turnover point in time. + + +Description +~~~~~~~~~~~ +There is a git-filter-branch command which helps in doing the dirty work +isolating commits which touch certain dirs. This can moderately easily be used +to create a new repository with a rewritten history containing only sub parts +of the original history. + +The basic idea is that one developer who wants to works on a certain subsystem +clones the 'official' master and then updates and tracks only the development +state of a certain subsystem. + + +Tasks +^^^^^ + * what shall be in the master repository? + * boilerplate files, license, build infrastructure + * the _admin_ dir with supplemental scripts + * define which submodules shall be defined? + * _doc/devel_ + * _doc/user_ + * _wiki_ + * _uml_ + * _src/backend_ + * _src/proc_ + * _src/gui_ + * _src/lib_ + +Not yet decided: + * _tests_ move them into the _src/$subsystem_ as symlink? + * _src/tool_ + + +Pros +^^^^ + * better isolation of single subprojects + * one who is interested on one subproject can track a master and only + following certain subproject updates + * smaller/faster updates/downloads + + +Cons +^^^^ + * needs some more git-fu to be used by the developers + * we will host considerably more git repositories (bigger list in gitweb), + this is not a problem but might look more confusing + + + +Alternatives +^^^^^^^^^^^^ +Go as we do currently with one big repository per developer. The decision to +use submodules is not urgend and it can be transfered at any time. The +turnaround should just be planned and be scheduled to one day to minimize the +confusion and merging issues. + + +Rationale +~~~~~~~~~ +When all people get used to it it allows a cleaner more sane work flow and well +isolated, less conflicting commits. + + + + + +Comments +-------- + +We concluded that that submodules are not yet needed with exception for the +./doc folder. Parked for now. + -- ct 2008-07-26 09:09:57 + +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/MakeSconsTheOfficialBuildSystem.txt b/doc/devel/rfc/MakeSconsTheOfficialBuildSystem.txt index 53958c986..61be3df3c 100644 --- a/doc/devel/rfc/MakeSconsTheOfficialBuildSystem.txt +++ b/doc/devel/rfc/MakeSconsTheOfficialBuildSystem.txt @@ -12,7 +12,7 @@ Make Scons the official build System ******************************************************************************** .Abstract -_Bless Scons the default build system for Lumiera._ +_Bless SCons the default build system for Lumiera._ ******************************************************************************** Description @@ -20,7 +20,7 @@ Description //description: add a detailed description: So far we using autotools and scons in parallel. Over time the need arose to have one -reliable supported build system. This shall be scons. +reliable supported build system. This shall be SCons. Tasks @@ -28,10 +28,11 @@ Tasks // List what needs to be done to implement this Proposal: // * first step ([green]#✔ done#) // * second step [,yellow]#WIP# -Nothing to do except for releases scons *must* be working and all non functional -build systems will be stripped from releases (branches?). +Nothing to do except for releases SCons _must be working_ and all non functional +build systems will be stripped on the release branches. +//// Discussion ~~~~~~~~~~ @@ -59,6 +60,7 @@ Rationale --------- //rationale: Give a concise summary why it should be done *this* way: +//// Conclusion @@ -66,6 +68,7 @@ Conclusion //conclusion: When approbate (this proposal becomes a Final) // write some conclusions about its process: +result of discussions and evaluation during the last years Comments @@ -75,9 +78,11 @@ Comments .State -> Final //add reason Decided on the December 2011 Developer meeting. - Mi 11 Jan 2012 22:28:36 CET Christian Thaeter + +Christian Thaeter:: 'Wed 11 Jan 2012 22:28:36 CET' ~~ //endof_comments: '''' Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] + diff --git a/doc/devel/rfc/MistakestoAvoid.txt b/doc/devel/rfc/MistakestoAvoid.txt new file mode 100644 index 000000000..4c671b8c8 --- /dev/null +++ b/doc/devel/rfc/MistakestoAvoid.txt @@ -0,0 +1,427 @@ +Design Process : Mistakes to avoid +================================== + +[grid="all"] +`------------`----------------------- +*State* _Dropped_ +*Date* _2008-04-21_ +*Proposed by* link:rick_777[] +------------------------------------- + + +Mistakes to avoid in the Lumiera design +--------------------------------------- + + +As a multimedia user and experienced programmer, I've found various flaws +present in open source Non Linear Video editors. Here I will list the problems +and their proposed (or mandatory) solutions. Please forgive me if some of the +ideas here have already been approved, I wrote this text before reaching this +wiki. + + +Description +~~~~~~~~~~~ + + +As a multimedia user and experienced programmer, I've found the following flaws +present in open source Non Linear Video editors (your mileage may vary) : + +. Frequent crashes (which most of the time make you lose your work) +. Reinventing the wheel for every new project +. Lack of a user-friendly (and extensible) UI +. Lack of support for certain video formats or codecs +. Lack of documentation +. Lack of cross-platform support +. Dependency on scripted languages like Python, which make installation a mess + +I will expand on the problems and their proposed (or mandatory) solutions. + + +1. Frequent crashes +~~~~~~~~~~~~~~~~~~~ + +[grid="all"] +`------------`------------------------------------------------------ +*Problem* Frequent Crashes and unsaved work. +*Severity* CRITICAL. +*Solution* Isolating the UI from the rendering and data handling (also + improves the extensibility) +*Required* Yes +*Workarounds* Auto-save (however it's not a real solution for the problem) +-------------------------------------------------------------------- + +Working with multimedia (video / audio) editing is a magnet for segfaults +(crashes) due to the handling of pointers and compression algorithms. A bug in +a plugin (like in Audacity's low-pass filter) will crash and you suddenly +realize you lost your work - unless you have an auto-save feature, but that +doesn't go to the root of the problem. + +My proposal is to move the low-level handling of video to a separate process, +which then will do the processing - if it crashes, the UI will only report an +error with a dialog (i.e. "the process crashed. Try again?"), but you work will +stay safe. I'm not sure of the implementation difficulties that arise from +having a shared memory buffer for rendering / processing, but one thing is +certain: Whenever you move the cursor or rewind a part of a clip in your +resources, the application isn't supposed to crash. Just moving the cursor +isn't a time-critical task, so perhaps we can use temporary files for this. +It's safer if you're not doing the final rendering. + + +Comments +^^^^^^^^ + +I am not sure yet about separating things into processes, generally it is clear +that this would be more robust but there are some performance impacts and +programming problems (massisve amounts of data in shared memory). But most +importantly, when a subprocess gets a job and crashes on it, it won't complete +the job, we don't have a choice except gracefully abort it. From a user +perspective "It doesn't work!" there is no much difference to a complete crash. +Well and yes we aim to make it crash proof rather, crashes a bugs and have to +be fixed, point. + +Lumiera will never ever loose work, we don't plan to make a project file, +autosafe way. Lumiera will keep projects in an internal database like format +which consists of a Dumpfile and a contingous written logfile. After a +crash/powerdown whatever, this log just gets replayed. The advantages are +countless, imagine persistent, selective undo and so on. Any other format +(cinelerra2 XML, MXF, ...) will be realized by importer/exporter plugins. + -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] + + + +2. Reinventing the wheel for every new project +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +[grid="all"] +`------------`------------------------------------------------------ +*Problem* Various projects compete and reinvent the wheel +*Severity* Serious (Slows down development time. A lot) +*Solution* Multi-tier design, turn the data handling into a backend and use + whatever UI you prefer +*Required* Yes. Better now that the project hasn't started +--------------------------------------------------------------------- + +Imagine the Linux kernel was tied to the window manager. You would have to +stick with KDE or GNOME and you couldn't improve it! Fortunately it's not like +that for Linux, but it is for some projects. If you want to change the user +interface from QT to wxWidgets or GTK you'll need to rewrite every module. + +If you separate the UI from the project handling engine, you can simply fork +the project and change the UI to one that supports skinning, without having to +do the complicated video-processing stuff. + +Separating the processes has an equivalent for web programming, it's called +"separation of concerns", or multi-tier design. When you suddenly change the +database engine, you don't need to change the whole program, just the database +module. Same goes for changing the UI from HTML to XML or Flash. If they're +separate modules that only communicate through a clearly defined API. + +Example case 1: The Code::Blocks IDE. The compiling engine supports various +compilers, and the engine itself is only a plugin for the main editor. If the +compiler crashes, you only get an error, but the IDE doesn't crash (unless it's +the UI part that's doing something wrong). + +Example case 2: Chessmaster. The user interface and speech synthesis stuff only +call the chess engine, called "theking.exe". Linux chess games also depend on +an engine to do the thinking. + +So I suggest to split the project into four separate tiers (not necessarily +processes): + +. User interface - communicates with the "project" tier, handles the user + events and does the calls. +. The project tier - the main part of the video editor. This one invokes the + renderer and decides which effects to apply, saving them as mere parameters + for later processing. It also tells you where the current pointer for the + track view is. Also calls the rendering engine for the current frame, or for + previews of a certain special effect. Note that if this process keeps running + even if the GUI crashes, later we can restart the GUI and keep working. +. The rendering engine - This one must be a separate process for the reasons + stated in problem #1. This also gives us the advantage that it can work on + the background while we keep working on the project (after all the project is + just a set of data stating which effects to apply to which tracks, and which + files are used for the tracks) - instead of just having a window saying + "Rendering, please wait". Even Adobe Premiere Pro suffered from this problem. + This means that if we put enough effort, we can surpass commercial software + in certain areas. Note that the rendering engine uses the same API than the + project tier, as it works on a copy of the project when doing the final + rendering. +. The video processing wrapper, which has interfaces for different video + processing toolkits (DirectX, GStreamer, etc). This also makes the project + cross-platform. Tiers 1 and 2 can go in one process, and the 3 and 4 in + another (this would make tier 2 a library which defines a C++ Class, and tier + 4 would also be a library which is used by the rendering engine). + +By separating the tiers, these can later become their own projects and overall +the community would receive great benefits. + + +Comments +^^^^^^^^ + +Please look at our design drafts, things will be separated (little different +than you describe here). We reuse things which are benefitful (gavl, ffmpeg, +..) but we are also aware that we reinvent the wheel for some things by +intention. Lumieras goal is not just to glue some existing libraries together +under a new gui, there are already a lot projects trying this way. We rather +aim for a ''Professional'' high performance Video editing solution which does +some things in a different (maybe more complex) way. We do not use existing +frameworks like MLT or gstreamer because we believe that these do not fit our +goals (gstreamer will be supported through plugins). We do not produce yet +another multimedia framework library (this only happen by coincidence) to be +used by others. + -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] + + +3. Lack of a user-friendly and extensible UI. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +[grid="all"] +`------------`------------------------------------------------------ +*Problem* Often, editors provide a very poor and buggy interface. + Examples: Jahshaka doesn't even provide tooltips for the various tools, and + the documentation is poor; In Cinelerra I've noticed some bugs when using the + open dialog, I'd rather have the KDE one, thanks. +*Severity* From Annoying to Serious. +*Solution 1* Use a library that allows you to use different widget + libraries, like wxWidgets. +*Required* Recommended, but not obligatory. +*Solution 2* Write different user interfaces, but they'd be hard to maintain. +*Required*, No. +--------------------------------------------------------------------- + +This problem is complicated, we need a good framework for handling the tracks. +Perhaps this could become a separate project. Ideas are welcome. + + +Comments +^^^^^^^^ + +Joel started working on a GUI recently and making good progress. The UI should +finally be quite flexible as it mostly provides a skeletion where plugins +render to. We have quite a lot ideas about the UI and user input is welcome. +The UI is currently the most separate tier in the design, i'd like to make it a +plugin itself which is loaded when lumiera is started in a gui mode, but it is +to early to say how exactlly it will be integrated, except that we all agree +that GUI is optional and Lumiera can also run headless, script driven. + -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] + + +4. Lack of support for certain video formats or codecs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +[grid="all"] +`------------`------------------------------------------------------ +*Problem* Lack of support for certain video formats or codecs. +*Severity* Critical. +*Workarounds* 1. Give a help page for the user to do his own conversion, but + this is very inelegant, annoying, and a waste of time. 2. Provide conversion + on the fly, and keep a separate "preprocessed" copy of the imported clip in a + separate directory. This is a nice middle ground, IMHO. +*Solution* Use a wrapper library as stated in problem # 2, having a + plugin-based design is recommended. +*Required* Yes. +--------------------------------------------------------------------- + +Some editors like Cinelerra are hardwired into using one format, or have a +phobia to certain formats / codecs (i.e. DivX AVI's). If we separate the +project editing engine from the video handling libraries, we can use unknown +formats by simply providing an input/output plugin. This would allows us to use +files encoded with lossless codecs like +http://lags.leetcode.net/codec.html[Lagarith]. This also provides forward +compatibility for future formats. + + +Comments +^^^^^^^^ + +Lumiera is a video editor we don't care (*cough*, not really true) about video +formats. Everything which comes In and goes Out is defined in plugins which +handle video formats. We currently decided to use 'gavl' because it is a nice +small library which does exactly what we want. Later on gstreamer and other +such kinds of decoder/encoder/processing-pipe libs will be realized. + -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] + + +5. Lack of documentation +~~~~~~~~~~~~~~~~~~~~~~~~ + +[grid="all"] +`------------`------------------------------------------------------ +*Problem* Some video editors have very poor documentation (and that's an + understatement *cough* Jahshaka *cough* ) +*Severity* Critical. +*Solution* Have a team for the documentation. +*Required* Yes. +--------------------------------------------------------------------- + +Nuff said. + + +Comments +^^^^^^^^ + +Quote from Ohloh.net: (http://www.ohloh.net/projects/lumiera)[] + +------------------------------------------------------------ +Extremely well-commented source code + +Lumiera is written mostly in C++. Across all C++ projects on Ohloh, 23% of all +source code lines are comments. For Lumiera, this figure is 46%. This very +impressive number of comments puts Lumiera among the best 10% of all C++ +projects on Ohloh. +------------------------------------------------------------ + + +Nuff saied... Oh well, about user docs we like to get that impressive ratings +there too, any helpers? + -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] + + +6. Lack of cross-platform support +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +[grid="all"] +`------------`------------------------------------------------------ +*Problem* Where's my Windows version? +*Severity* Blocker +*Solution* Use a cross-platform toolkit for the UI. +*Required* Depends, do you plan to make it Cross-Platform? +-------------------------------------------------------------------- + +A good example for this is the Code::Blocks IDE, which was thought of being +cross-platform from the beginning. Curiously, at first the project was +Windows-only, and its only F/OSS alternative was Dev-C++ from Bloodshed (eew). +Otherwise you'd have to stick with proprietary applications like Visual C++. + +In Linux there were various IDE's, but they were Linux-only. Since Code::Blocks +uses a cross-platform toolkit (wxWidgets), it can be compiled either in Windows +and Linux. There are RPM's for various distros now that the first public +version (8.02) got out. I've heard that QT is also cross-platform, but I +haven't tried it yet. + +Of course - if you separate the UI from the project engine, someone could make +his own Windows UI for the project. Now what needs to be taken care of, is that +the rendering libraries are cross-platform too. + + +Comments +^^^^^^^^ + +We refuse to make it cross platform intentionally. Most things are written +portable, POSIX compatible, some might need platform specific fixes. But our +target is primary Linux (because thats what we use) secondary any other Free OS +(hopefully we find some testers/maintainers for that). Lumiera ''might'' run on +OSX and patches will be accepted, but it is not a free platform so we don't +care by ourself. Windows due its diffrent system interfaces will be hard to +port, if someone wants to do that, have fun, we will accept patches to, but we +do not support it in *any* way by ourself. + -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] + + +7. Dependency on scripted languages like Python, which make installation a mess +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +[grid="all"] +`------------`------------------------------------------------------ +*Problem* Installation can be a mess if we depend on scripted languages. +*Severity* Annoying, the end user might just conform with another project + that "just works". +*Solution* Make it in C++ or other easily-compilable language. +*Required* VERY recommended. +--------------------------------------------------------------------- + +I've had to install several packages for my distro (whose repository is not as +large as others like Ubuntu's) from source. Some of them depend on very +esoteric scripted languages which I also need to install. And then the +libraries, etc. My suggestion is to free the end user from this burden, and +work on a common language, like C++. + + +Comments +^^^^^^^^ + +At some point a scripting language ''will'' be required, yet to drive the +testsuite, make headless rendering work and so on. We need to provide +installation instructions and/or even bundle this language with Lumiera. This +will likely become a small embedded language like Lua or some kind of forth (or +maybe some scheme?) it should not depend on strange modules which are not part +of the core scripting language distribution (or we shall provide them too), +needs to be worked out. + -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] + + + +Author's comments +^^^^^^^^^^^^^^^^^ + +Some of the measures stated in this document are optional, but separating the +processes for the rendering engine, editor and User Interface are the optimal +solution and required to avoid common problems. + + +Discussion +---------- + +Mostly we agree with the general statements in this Design Entry. But there are +some points which don't stand the test of a detailed technical discussion. For +example, you simply can't state it's a 'mistake' not to write code which +similarly runs on windows and *nix. Well. You could try to write it in Java. +See my point? While today it's quite feasible to write office stuff or banking +applications in a cross-platform manner, a video editor still is a different +kind of a beast. + +A similar argumentation holds true for the question, wether or not to use +separate processes and IPC. While it certainly is a good idea to have the X +server or a database running in a separate process, the situation is really +quite different for editing video. Hopefully it's clear why. + +Could you please rework this Design Entry in a way that we can finalize +(accept) it? + +* Please remove the section about windows +* Please separate out things needing technical discussion and are not just + "mistakes", thus retaining only the big picture statements (on which we all + agree) +* How to secure the application against crashes +* If it is viable/desirable to run the gui in a separate process really needs + in-depth technical discussion (create a new Design Entry for this) +* How to deal with the dependencies problem in combination with + plugins/extensions and script languages + -- link:Ichthyostega[] [[DateTime(2008-10-05T01:51:50Z)]] + + +Conclusion +---------- +The October.2008 dev meeting decided to 'drop' this design proposal as is. + +Basically, this text just tells us "to make Lumiera good", and especially it +contains a mixture of topics + +* We fully agree to 80% of the statements made there, but we think those + statements are so very basic and self-evident as to be considered off-topic + here. We are aware of the recurring problems with open source video editing. + That's why we are here. +* The proposal draws conclusions on two technically substantial points, at + which we don't agree. And it fails to provide sufficient (technically sound) + arguments to prove these statements. + +While it is certainly 'desirable' to be cross-platform as much as possible and +especially '''target Microsoft Windows''', we don't see much possibilities with +today's mainstream technology to build an application which is as +technologically demanding as a video editor is. We would end up developing two +or even three sister applications, or we are forced to sacrifice performance +for portability. When put up to face such options, we have a clear preference +to concentrate on a really free and open platform. + +While it is certainly 'desirable' to make the application as robust as +possible, we don't see how '''using multiple separate processes''' could help +us with this goal ''without creating major scalability or performance +problems'' due to the use of shared memory. And, yet more important: we don't +share the basic assumption made in the proposal, namely that video processing +is inherently dangerous. We think the basic algorithms involved are +sufficiently well-known and understandable to implement them in a sound manner. + +'''' +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/NormalizedDeviceCoordinates.txt b/doc/devel/rfc/NormalizedDeviceCoordinates.txt new file mode 100644 index 000000000..b594cc3b2 --- /dev/null +++ b/doc/devel/rfc/NormalizedDeviceCoordinates.txt @@ -0,0 +1,112 @@ +[grid="all"] +`------------`----------------------- +*State* _Parked_ +*Date* _2009-01-14_ +*Proposed by* link:ct[] +------------------------------------- + + +Normalized Device Coordinates +----------------------------- + +AkhIL pointed me out to some blender problem and how renderman fixes that. We +should use this too. + + + + +Description +~~~~~~~~~~~ + +Just snippet from IRC log: + +------------------------------------------------------------ +[15:09] and I hope lumiera will use some resolution independend + measuring for all parameters +[15:09] one can rotate where the node actually sits +[15:09] like NDC +[15:09] or pass transistions through the renderpipe, make all effects + transisition aware and apply them at the end +[15:10] the later is better but needs more efforts and some rethinking +[15:10] we will prolly support both in lumiera :) +[15:11] in renderman's NDC for horizontal image with 4:3 aspect ration + (-1.33,-1) is lower-left corner and (1.33,1) upper-right +[15:11] ah +[15:11] so moving to different resolutions and different aspect ratios + in renderman makes no problems +[15:11] well good point, we will measure in pixel but need to convert + between them . using a float would be good to address pixels +[15:12] yes +[15:12] what stands NDC for? +[15:13] Normalized Device Coordinates +[15:14] ok +[15:14] so from -1 to 1 is a range by smallest image size +[15:15] yes sounds reasonable +[15:15] * cehteh adds a note to the lumiera design docs +[15:15] so far we dont do anything where it matters .. but that will + come +[15:16] when you move some logo to (0.8,-0.8) it will stay on screen + even when you chenge resolution and image aspect ratio +[15:17] all input images should be scaled to this range (-1,1) by + smalles side + +------------------------------------------------------------ + + + +Tasks +^^^^^ + + + + +Discussion +~~~~~~~~~~ + +Pros +^^^^ + + + +Cons +^^^^ + + + +Alternatives +^^^^^^^^^^^^ + + + +Rationale +~~~~~~~~~ +TBD + + + + + +Comments +-------- + +One issue where I always assumed we'd need to define something of this sort is +for proxy editing. Especially this is a problem in conjunction with masks. +Basically, this means a bit more of "vector graphics". With film/video editing, +this was rather unusual, but with the advent of more and new digital video/film +formats it gets more and more important. Also, our considerations regarding +time handling and quantisation to single frames somewhat fit into this line of +thought. Up to now, rather the standard way of thinkin was to use a "project +framerate" and a fixed resolution in pixels. But we certainly can do better. + + -- Ichthyostega 18:09:50 + + +Parked +~~~~~~ +deferred for later, generally accepted. + + Do 14 Apr 2011 03:06:42 CEST Christian Thaeter + + + +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/OfficialAssemblyLanguage.txt b/doc/devel/rfc/OfficialAssemblyLanguage.txt new file mode 100644 index 000000000..a60444951 --- /dev/null +++ b/doc/devel/rfc/OfficialAssemblyLanguage.txt @@ -0,0 +1,161 @@ +Design Process : Official Assembly Language +=========================================== + +[grid="all"] +`------------`----------------------- +*State* _Dropped_ +*Date* _2008-08-01_ +*Proposed by* link:PercivalTiglao[] +------------------------------------- + + +Official Assembly Language +-------------------------- + +I describe here an optimization that might have to be be taken into account at +the design level. At very least, we should design our code with +auto-vectorization in mind. At the most, we can choose to manually write parts +of our code in assembly language and manually vectorize it using x86 SSE +Instructions or !PowerPC !AltiVec instructions. By keeping these instructions +in mind, we can easily achieve a large increase in speed. + + +Description +~~~~~~~~~~~ + +While the C / C++ core should be designed efficiently and as portable as +possible, nominating an official assembly language or an official platform can +create new routes for optimization. For example, the x86 SSE instruction set +can add / subtract 16 bytes in parallel (interpreted as 8-bit, 16-bit, 32-bit, +or 64-bit integers, or 32-bit/64-bit floats), with some instructions supporting +masks, blending, dot products, and other various instructions specifically +designed for media processing. While the specific assembly level optimizations +should be ignored for now, structuring our code in such a way to encourage a +style of programming suitable for SSE Optimization would make Lumiera +significantly faster in the long run. At very least, we should structure our +innermost loop in such a way that it is suitable for gcc's auto-vectorization. + +The problem is that we will be splitting up our code. Bugs may appear on some +platforms where assembly-specific commands are, or perhaps the C/C++ code would +have bugs that the assembly code does not. We will be maintaining one more +codebase for the same set of code. Remember though, we don't have to do +assembly language now, we just leave enough room in the design to add +assembly-level libraries somewhere in our code. + + +Tasks +~~~~~ + +* Choose an "Official" assembly language / platform. +* Review the SIMD instructions avaliable for that assembly language. +* For example, the Pentium 2 supports MMX instructions. Pentium 3 supports MMX + and SSE Instructions. Early Pentium4s support MMX, SSE, and SSE2 + instructions. Core Duo supports upto SSE4 instructions. AMD announced SSE5 + instructions to come in 2009. +* Consider SIMD instructions while designing the Render Nodes and Effects + architecture. +* Write the whole application in C/C++ / Lua while leaving sections to optimize + in assembly later. (Probably simple tasks or a library written in C) +* Rewrite these sections in Assembly using only instructions we agreed upon. + + +Pros +~~~~ + +Assuming we go all the way with an official assembly language / platform... + +* Significantly faster render and previews. (Even when using a high-level + library like http://www.pixelglow.com/macstl/valarray/[macstl valarray], we + can get 3.6x -- 16.2x the speed in our inner loop. We can probably expect + greater if we hand-optimize the assembly) + + +Cons +~~~~ + +* Earlier architectures of that family will be significantly slower or + unsupported +* Other architectures will rely on C / C++ port instead of optimized assembly +* Redundant Code + + +Alternatives +^^^^^^^^^^^^ + +* We only consider auto-vectorization -- GCC is attempting to convert trivial + loops into common SSE patterns. Newer or Higher level instructions may not be + supported by GCC. This is turned on + http://gcc.gnu.org/projects/tree-ssa/vectorization.html[in GCC4.3 with + specific compiler flags] +* We can consider assembly but we don't officially support it -- We leave the + holes there for people to patch up later. Unofficial ports may come up, and + maybe a few years down the line we can reconsider assembly and start to + reimplement it down the road. +* Find a SIMD library for C/C++ -- Intel's ICC and + http://gcc.gnu.org/onlinedocs/gcc-3.4.6/gcc/Vector-Extensions.html[GCC] both + have non-standard extensions to C that roughly translate to these + instructions. There is also the + http://www.pixelglow.com/macstl/valarray/[macstl valarray library] mentioned + earlier. Depending on the library, the extensions can be platform specific. +* Write in a language suitable for auto-vectorization -- Maybe there exists + some vector-based languages? Fortran might be one, but I don't really know. + + +Rationale +~~~~~~~~~ + +I think this is one of those few cases where the design can evolve in a way +that makes this kind of optimization impossible. As long as we try to keep this +optimization avaliable in the future, then we should be good. + + +Comments +-------- + +* I have to admit that I don't know too much about SSE instructions aside from + the fact that they can operate on 128-bits at once in parallel and there are + some cache tricks involved when using them. (you can move data in from memory + without bringing in the whole cache line). Nonetheless, keeping these + assembly level instructions in mind will ease optimization of this Video + Editor. Some of the instructions are high-level enough that they may effect + design decisions. Considering them now while we are still in early stages of + development might prove to be advantagous. Optimize early? Definitely not. + However, if we don't consider this means of optimization, we may design + ourselves into a situation where this kind of optimization becomes + impossible. + +* I don't think we should change any major design decisions to allow for + vectorization. At most, we design a utility library that can be easily + optimized using SIMD instructions. Render Nodes and Effects can use this + library. When this library is optimized, then all Render Nodes and Effects + can be optimized as well. -- link:PercivalTiglao[] + [[DateTime(2008-08-01T16:12:11Z)]] + +* Uhm, the Lumiera core (backend, proc, gui) doesn't do any numbercrunching. + This is all delegated to plugins (libgavl, effects, encoders). I think we + don't need any highly assembler/vector optimized code in the core (well, lets + see). This plugins and libraries are somewhat out of our scope and thats good + so, the people working on it know better than we how to optimize this stuff. + It might be even worthwile to try if when we leave all vectorization out, if + then the plugins can use the vector registers better and we gain overall + performance! + -- link:ct[] [[DateTime(2008-08-03T02:27:14Z)]] + +* Another idea about a probably worthwhile optimization: gcc can instumentate + code for profileing and then do arc profileing and build it a second time + with feedback what it learnd from the profile runs, this mostly affects + branch prediction and can give a reasonable performance boost. If somone + likes challenges, prepare the build system to do this: +. build it with -fprofile-arcs +. profile it by running ''carefully'' selected benchmarks and tests. +. rebuild it again this time with -fbranch-probabilities +. PROFIT + -- link:ct[] [[DateTime(2008-08-03T02:27:14Z)]] + +* I've discussed general ideas around, and I agree now that "core Lumiera" is + not the place to think of these kinds of optimizations. So I'll just move + this over to dropped. -- link:PercivalTiglao[] + [[DateTime(2008-08-04T18:33:58Z)]] + +'''' +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/RefactorLiblumieraOut.txt b/doc/devel/rfc/RefactorLiblumieraOut.txt new file mode 100644 index 000000000..c31fb06a2 --- /dev/null +++ b/doc/devel/rfc/RefactorLiblumieraOut.txt @@ -0,0 +1,240 @@ +Refactor Liblumiera Out +======================= + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Idea_ +*Date* _Fr 22 Apr 2011 10:46:50 CEST_ +*Proposed by* Christian Thaeter +------------------------------------- + +[abstract] +******************************************************************************** +liblumiera contains alot useful and reuseable code which is already in use by +other projects +******************************************************************************** + +Description +----------- +//description: add a detailed description: +Over the time we've put some efforts into the liblumiera. I've added +some from my code which predates the lumiera project which I am using +on many other projects. This now caused that I maintain this sources in +different unrelated projects and have to cross merge and update stuff +when I do updates and fixes somewhere. I think its time to factor the +reuseable parts out into a independent library (like glib does for +gtk), in fact I had this plan long ago. + + +.What parts are eligible for a standalone library + +Anything which is something tool alike and useful for other projects and not +tied to Lumiera only. This are the algorithms/datastructures, allocators, tool +macros. Additionally some of the src/common things should be moved into the +library. I give some lists below. + +.How to name it + +Long time ago my plan was to name it 'ctlib' or 'cehlib' but meanwhile there is +enough code done by others. So I'd propose a more neutral name, still +'lumieralib' or 'lulib' would be approbiate. The only thing we have to account +for is that some parts which are too specific for Lumiera and should not be +integrated into this spinoff need either to stay in a lumiera-internal lib +(src/lib/) as currently or being moved to the respective subsystems using them +(src/backend, src/proc, src/common, ...), so the names should not clash. + +.C, C++ ... + +For myself I need the C parts, while there is C++ code which interfaces to the +C implementations and also a lot code which does nice C++ things on its own. +This possibly means that we should in fact make 2 packages out of this, one C +and one C++ library (where the C++ part is more than just the wrappers, but +also the tools and tricks which are currently in src/lib/ and reuseable). + +.Who maintains it + +Despite a spin of I think we don't want to change anything from our current +practice and maintain it by the Lumiera developers. For many parts I feel +responsible for it, but its really a part of the Lumiera codebase, despite +independently useable. + +.How to maintain it + +We need to decide about build system and documentation system. As build system +we may right start using scons. For documentation the situation is a but +different since some of my code uses pipadoc/asciidoc and other uses doxygen. + +.What not to do + +Some of the code is currently quite specific to Lumiera while it could be made +more generic. This is *NOT* subject of this RFC we may or may not do such a +refactoring but this RFC and any work resulting from this should only restrict +to simple things like necessary namespace and variable renaming and integration +in the build system. + + +C Parts +------- + +Library +~~~~~~~ +What belongs to the library + +Containers +^^^^^^^^^^ + * cuckoo hashing (cuckoo.c|h) + * linked lists (llist.h slist.h) + * cache lists (mrucache.c|h) + * splay trees (psplay.c|h) + * priority queues (not done yet) + +Runtime tools +^^^^^^^^^^^^^ + * error handling (error.h error.c) used by the other facilities too + * clib convinience wrapers (safeclib.c|h) needs better name, maybe refactor + into new facilities + +Multithreading +^^^^^^^^^^^^^^ + * locking, condition variables etc. (condition.c|h (rec)mutex.c|h, rwlock ...) + +Memory management +^^^^^^^^^^^^^^^^^ + * Memory pools (mpool.c|h) + * Temporary buffers (tmpbuf.c|h) + +Metaprogramming +^^^^^^^^^^^^^^^ + * preprecessor tools (ppmpl.h) move common preprocessor macros here + * polymorphic call helper for C (vcall.h) + +Interface system and module loader +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +except for some hardcoded references to 'lumiera_org' and '.lum' plugin names +this is quite generic, possibly moving this over could be postponed, but might +eventually be done. + +From 'src/common' +------ +interface.c interfacedescriptor.h interface.h interfaceproxy.cpp +interfaceregistry.c interfaceregistry.h plugin.c plugin_dynlib.c plugin.h +------ + + +The 'config' system could become a candidate too if it ever gets finished and +proved useful, but for the time being its better kept in Lumiera. + + +Not Library +~~~~~~~~~~~ +Too specific to Lumiera: +----- +luid.c luid.h time.h +----- + + +C++ Parts +--------- + +For most of the C++ parts I am not sure, ichthyo should decided upon these +(please edit this here) + +Library +~~~~~~~ +These look 'generic' or wrap the C parts: +------ +singleton-factory.hpp singleton.hpp singleton-policies.hpp +singleton-preconfigure.hpp singleton-ref.hpp singleton-subclass.hpp +sync-classlock.hpp sync.cpp sync.hpp thread-local.hpp +typed-allocation-manager.hpp typed-counter.hpp util.cpp util-foreach.hpp +util.hpp variant.hpp +------ + +Not Sure +~~~~~~~~ +------ +access-casted.hpp advice advice.hpp allocation-cluster.cpp +allocation-cluster.hpp bool-checkable.hpp cmdline.cpp cmdline.hpp del-stash.hpp +diagnostic-context.hpp element-tracker.hpp error.hpp (currently too +lumiera specific) exception.cpp (as before) factory.hpp format.hpp +frameid.hpp functor-util.hpp handle.hpp hash-indexed.hpp iter-adapter.hpp +iter-adapter-stl.hpp iter-source.hpp itertools.hpp iter-type-binding.hpp +lifecycle.cpp lifecycleregistry.hpp lumitime-fmt.hpp lumitime.hpp +multifact-arg.hpp multifact.hpp meta/* null-value.hpp observable-list.hpp +opaque-holder.hpp optional-ref.hpp p.hpp query.cpp query.hpp ref-array.hpp +ref-array-impl.hpp result.hpp scoped-holder.hpp scoped-holder-transfer.hpp +scoped-ptrvect.hpp searchpath.cpp searchpath.hpp sub-id.hpp symbol.hpp +symbol-impl.cpp visitor-dispatcher.hpp visitor.hpp visitor-policies.hpp +wrapper.hpp wrapperptr.hpp appstate.cpp appstate.hpp basic-setup.cpp +basic-setup.hpp DIR_INFO external guifacade.cpp instancehandle.hpp option.cpp +option.hpp query subsys.cpp subsys.hpp subsystem-runner.hpp +---- + + +Not Library +~~~~~~~~~~~ +------ +logging.cpp nobug-init.cpp nobug-init.hpp streamtype.cpp streamtype.hpp test/* +time/* time.cpp tree.hpp +----- + +Tasks +~~~~~ +// List what needs to be done to implement this Proposal: +// * first step ([green]#✔ done#) + + * decide on name, namespaces [,yellow]#WIP# + * create git repository, setup boilerplace (build system, legalese) + [,yellow]#WIP# + * move all code over into the git repos, refactor (namespace renames) () + [,yellow]#WIP# + * make Lumiera use the new lib [,yellow]#WIP# + +Discussion +~~~~~~~~~~ + +Pros +^^^^ +// add a fact list/enumeration which make this suitable: + + * I am already reuse much of its code, making it independent makes maintaining + it less burden + + +Cons +^^^^ +// fact list of the known/considered bad implications: + + * new packages, new dependencies for Lumiera instead 'batteries included' + +Alternatives +^^^^^^^^^^^^ +//alternatives: explain alternatives and tell why they are not viable: + + +Rationale +--------- +//rationale: Give a concise summary why it should be done *this* way: + + + +//Conclusion +//---------- +//conclusion: When approbate (this proposal becomes a Final) +// write some conclusions about its process: + + + + +Comments +-------- +//comments: append below + + +//endof_comments: + +'''' +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/RenderOptimizer.txt b/doc/devel/rfc/RenderOptimizer.txt new file mode 100644 index 000000000..74604e77e --- /dev/null +++ b/doc/devel/rfc/RenderOptimizer.txt @@ -0,0 +1,100 @@ +[grid="all"] +`------------`----------------------- +*State* _Parked_ +*Date* _2007-06-07_ +*Proposed by* link:ct[] +------------------------------------- + +Render Optimizer +---------------- +Render only parts of a frame which are necessary for the Output; Optimize +render pipeline for efficiency + + +Description +~~~~~~~~~~~ +This Idea is just stored here for later reference/implementation. + +Effects give some information on which data their output depends (like +transitions, temporal dependencies, color/alpha etc) and what the operation +costs. Based on this information we optimize the render pipeline, for example +if the output is a zoom, then we only need to calculate the parts of a frame +which will be viewable in the output (plus some more dependencies, like blur +has radius and so on). Further in some cases it might be favorable to reorder +some effects for the actual render process, as long it would produce the same +output as the original sequence of effects. + + + + + +Tasks +^^^^^ + + + + +Discussion +~~~~~~~~~~ + +Pros +^^^^ + + + +Cons +^^^^ + + + +Alternatives +^^^^^^^^^^^^ + + + +Rationale +~~~~~~~~~ + + + + + + +Comments +-------- + +Possible classification for video filters: + 1. The filter only changes the color of each pixel in the same way + 2. The filter deforms the image but leaves the color + 3. The filter makes complex things. The only additional hint it can export is + the + number of referenced past frames, if such a limit exists (sometimes it + doesn't). + +Filters of type 1 and type 2 never use any previous frames, and are strictly +one frame in - one frame out. Filters of type 1 can always be swapped with +filters of type 2, the output is the same. All other filters cannot be swapped +in general. + +The good news is, that: + + 1. All commonly used filters are either type 1 or type 2 + (type 3 are more the fun effects) + 2. Filters of type 2 are colormodel agnostic + 3. If a filter of type 1 makes only linear transformations of the color + vectors (new_color = matrix * old_color), + the matrix can be transformed from e.g. RGB to YUV, so these filters can + always work in both colorspaces directly + + +Parked +~~~~~~ +Generally this is accepted but needs some more polishing when we go over it. + + Do 14 Apr 2011 03:06:42 CEST Christian Thaeter + + + + + +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/ResourceManagementBudgeting.txt b/doc/devel/rfc/ResourceManagementBudgeting.txt new file mode 100644 index 000000000..ffa488e5c --- /dev/null +++ b/doc/devel/rfc/ResourceManagementBudgeting.txt @@ -0,0 +1,121 @@ +Resource Management: Budgeting +============================== + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Idea_ +*Date* _Fri Jul 23 20:33:32 2010_ +*Proposed by* Christian Thaeter +------------------------------------- + +[abstract] +****************************************************************************** +The Profiler will give some Idea about how much Resources can me used to +optimally utilize the system. Knowing this number leads to the next challenge, +distributing the resources to different subsystems, jobs and objects. I here +introduce a budgeting system which takes care for this. +****************************************************************************** + + +Description +----------- +//description: add a detailed description: + +The idea is quite simple, for each kind of resource we have a global budget +manager which accounts for the available and used amounts of this resource. + +Each user of a resource has its own account managing his share on the resource. + +The system is completely voluntary giving only hints how much of a resource is +available for anyone. + + + +[source,C] +------------------------------------------------------------------------------ +typedef ssize_t budget_count; + +struct budgetmanager +{ + rwlock lock; + + void (*callback)(); // called on resource shortage + (resource collector) + + int sum_priorities; // sum of all client budgets .. each + client is granted available/(sum_priorities/own_priority) of the resource + + budget_count available_prefs; // configuration from preferences + budget_count available_profile; // tuned by profiler + int available_factor; // how much % from prefs vs profile + + budget_count available; // caclulated from above + budget_count allocated; // actively in use +}; + +struct budget +{ + BudgetManager manager; + int priority; + + budget_count allocated; +}; +------------------------------------------------------------------------------ + + + + +Tasks +~~~~~ +// List what would need to be done to implement this Proposal in a few words: +// * item ... + + + + +Discussion +~~~~~~~~~~ + +Pros +^^^^ +// add just a fact list/enumeration which make this suitable: +// * foo +// * bar ... + + + +Cons +^^^^ +// fact list of the known/considered bad implications: + + + +Alternatives +^^^^^^^^^^^^ +//alternatives: if possible explain/link alternatives and tell why they are not + viable: + + + +Rationale +--------- +//rationale: Describe why it should be done *this* way: + + + +//Conclusion +//---------- +//conclusion: When approbate (this proposal becomes a Final) write some + conclusions about its process: + + + + +Comments +-------- +//comments: append below + + +//endof_comments: diff --git a/doc/devel/rfc/ResourceManagementProfiling.txt b/doc/devel/rfc/ResourceManagementProfiling.txt new file mode 100644 index 000000000..bfd1b2731 --- /dev/null +++ b/doc/devel/rfc/ResourceManagementProfiling.txt @@ -0,0 +1,182 @@ +Resource Management: Profiling +============================== + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Idea_ +*Date* _Fri Jul 23 19:34:29 2010_ +*Proposed by* Christian Thaeter +------------------------------------- + +[abstract] +****************************************************************************** +From the beginning on we planned some kind of 'profiling' to adapt dynamically +to workload and machine capabilities. I describe here how statistic data can be +gathered in a generic way. This will later work together with other components +tuning the system automatically. +****************************************************************************** + + +Description +----------- +//description: add a detailed description: + +I just introduce some ideas about the planned profiling framework here, nothing +is defined/matured yet this is certainly subject for futher discussion and +refinement. + +.Requirements/Evaluation generic:: + Profiling should be sufficiently abstracted to have a single set of + datastructures and algorithms to work on a broad range of subjects + being profiled. Moreover the profiling core just offers unitless + counters, semantic will be added on top of that on a higher level. + + least possible overhead:: + Profiling itself must not cost much, it must not block and should avoid + expensive operations. Simple integer arithmetic without divisions is + suggested. + + accurate:: + We may sample data on in stochastic way to reduce the overhead, + nevertheless data which gets sampled must be accurately stored and + processed without rounding losses and drifts. + + transient values:: + It's quite common that some values can be far off either in maximum or + in minimum direction, the system should adapt to this and recover from + such false alarms. Workload also changes over time we need to find some + way to measure the current/recent workload an grand total over the + whole application runtime is rather uninteresting. While it is also + important that we adapt slow enough not to get into some osccilating + cycle. + + active or passive system:: + Profiling can be only passive collecting data and let it be analyzed by + some other component or active triggering some action when some limits + are reached. I am yet a bit undecided and keep it open for both. + + + + + + + +.Brainstorming in Code +[source,C] +------------------------------------------------------------------------------ + +typedef int64_t profile_value; + +struct profile +{ + ProfileVTable vtable; + + /* + Using trylock for sampling makes it never contend on the lock but some + samples are lost. Should be ok. + */ + mutex_t lock; /* with trylock? */ + + + /* statistics / running averages */ + + /* n being a small number 2-5 or so */ + profile_value max[n]; /* n maximum values seen so far, + decreased by recovery */ + profile_value min[n]; /* n minimum values seen so far, + increased by recovery */ + + /* store sum & count, but average calculation implies a division and will be + only done on demand */ + profile_value count; /* count profile calls */ + profile_value sum; /* sum up all calls, average = + sum/count */ + + /* current is the sampled value to be integrated */ + + /* trend is caclulated before theb new run_average */ + profile_value trend; /* trend = (trend + + (run_average-current))>>1 */ + + /* we may need some slower diverging formula for running average */ + profile_value run_average; /* run_average = (run_average + + current)>>1) */ + + + /* active limits, define whats good and whats bad, calls back to vtable when + limit is hit */ + profile_value max_limit; + profile_value min_limit; + /* do we want limits for trends too? */ + + /* we count how often we hit limits, a hit/miss ratio will give a good value + for optimization */ + profile_value hit_cnt; + profile_value high_miss_cnt; + profile_value low_miss_cnt; + + /* recovery state */ + int rec_init; + int rec_current; + int rec_percent; + + + void* extra; +}; +------------------------------------------------------------------------------ + + +Tasks +~~~~~ +// List what would need to be done to implement this Proposal in a few words: +// * item ... + + + + +Discussion +~~~~~~~~~~ + +Pros +^^^^ +// add just a fact list/enumeration which make this suitable: +// * foo +// * bar ... + + + +Cons +^^^^ +// fact list of the known/considered bad implications: + + + +Alternatives +^^^^^^^^^^^^ +//alternatives: if possible explain/link alternatives and tell why they are not + viable: + + + +Rationale +--------- +//rationale: Describe why it should be done *this* way: + + + +//Conclusion +//---------- +//conclusion: When approbate (this proposal becomes a Final) write some + conclusions about its process: + + + + +Comments +-------- +//comments: append below + + +//endof_comments: diff --git a/doc/devel/rfc/SemanticTags.txt b/doc/devel/rfc/SemanticTags.txt new file mode 100644 index 000000000..1816d7bed --- /dev/null +++ b/doc/devel/rfc/SemanticTags.txt @@ -0,0 +1,199 @@ +Semantic tags +============= + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Idea_ +*Date* _Do 30 Aug 2012 21:06:54 CEST_ +*Proposed by* Christian Thaeter +------------------------------------- + +******************************************************************************** +.Abstract +We have a lot documentation which needs to be cross referenced. Adding well the +known 'tags' concept and extend it slightly with some semantics will aid future +automatic processing. +******************************************************************************** + +Description +----------- +//description: add a detailed description: + +Every document (including sourcecode) could extended with some metadata, aka tags +which are then used to build automatic crossreferences. + +Commonly tags are just 'words' which are picked up and crossreferenceds. I propose +to extend this scheme slightly. + +Overall this scheme must be very natual and easy to use. A user should not need to +know about the underlying machinery and a tag as in a single lowercase 'word' should +be sufficient in almost all cases. Moreover Tags should be optional. + + +.Ontology + +To give tags some sematics we introduce a simple ontology: + +- Tags can have namespaces, delimited by a dot 'foo.bar.baz'. + Tags are looked up from right to left 'baz' would suffice as long it is unique. + Non unique cases will be handled in context (sometimes non uniqunes is desired) +- We introduce simple "Is a" and "Has a" relationships. These are defined by the + casing of the tag: 'ALL_UPPERCASE' means "Is a" and anything else (including + mixed case) means "Has a". Note that for most cases the "Is a" relation will be + defined implicitly, ín normal cases one doesnt need to care. +- define some tag algebra for lookups (group tags by comma and semicolons, where + comma means 'and' and semicolon means 'or'). Used to query the tag database. + regex/globbing might become handy too. + +.Implicit Tags + +Tags can be implicit by generating them from the document: + +- Derrive tags from the type and location of the Document. + RFC's are 'RFC', source files are 'SOURCE.C' and so on. + +- Derrive Tags from the content of the document. + Asciidoc titles will be used here. A simple preprocessor + generates a tag from a title (make it CamelCase, simplify etc.) + The resulting tag is only used iff it is unique + + +.Use this tags + +Tags are collected/discovered by some script which creates a tag-database +(possibly plaintext asciidoc files) as big project index linking back to the content, +details need to be worked out. + +We create special asciidoc macros for crossreferencing tags for example: 'RFC:foobar' +'SOURCE:builder', details need to be worked out later. + +Note: this Proposal is about including tags in the first place, processing them is only +suggested and left out for later. + + +Tasks +~~~~~ +// List what needs to be done to implement this Proposal: +// * first step ([green]#✔ done#) +// * second step [,yellow]#WIP# + +We need to define how to integrate tags in different documents syntactically. +For RFC's these will likely become a part of the initial table. in other Asciidoc +documents they could be a special comment or header. For Source files special comments +will be used. + +Tags themself will be added lazily on demand (unless we find someone with the patience +to go over all documents and tag them properly). + +Creating the infrastructure handling this tags (cross indexing etc) is not part of +this proposal, nevertheless we planning this since some time and it will be defined in +other RFC's. + + +Discussion +~~~~~~~~~~ + +Pros +^^^^ +// add a fact list/enumeration which make this suitable: +// * foo +// * bar ... + + * Gives a simple graspable way to build a cross reference over the whole project + + +Cons +^^^^ +// fact list of the known/considered bad implications: + + * adding tags and developing the tools manging them will take some time + + +Alternatives +^^^^^^^^^^^^ +//alternatives: explain alternatives and tell why they are not viable: + +We have the ht/dig search function over the Website which give a much simpler way to +find documents. + + +Rationale +--------- +//rationale: Give a concise summary why it should be done *this* way: + +It is very urgent and important that we make our content much easier accessible. + + + +//Conclusion +//---------- +//conclusion: When approbate (this proposal becomes a Final) +// write some conclusions about its process: + + + + +Comments +-------- +//comments: append below + +//edit comment +You may recall this proposal created some heated debate at the last developer meeting. +After thinking it over some time, I can see now more clearly what irritated me. + +. for me, the proposal seems somewhat to lack focus. Right now we have some shortcomings at + rather basic operations when *authoring content* at the website. This proposal tends to be more + interested in some kind of automated content discovery. +. the term ``tag'' in this proposal is overlayed with different meanings. For one it means an attached + textual property of some document, but also it denotes to some kind of inferred categorisation. + I'd rather propose to stick to the former meaning (which is common place) and treat the latter + as one _source for data_ within an categorisation algorithm. This way, such categorisation + sources can remain an implementation detail and don't need to be fixed in an universal way. +. I have serious concerns against the _ontology_ part of the proposal. Not only is the syntax + unintuitive, but more importantly, this ontology is not well aligned with real world usage. ++ +To underpin the last diagnosis, just look at the existing tags in our Wiki: + + * automation (3) + * Builder (20) + * classes (6) + * Concepts (9) + * decision (19) + * def (90) + * design (43) + * discuss (19) + * draft (55) + * example (3) + * excludeMissing (6) + * GuiIntegration (6) + * img (40) + * impl (36) + * Model (22) + * operational (19) + * overview (20) + * Player (12) + * plugin (2) + * Rendering (24) + * rewrite (3) + * Rules (8) + * SessionLogic (30) + * spec (76) + * systemConfig (9) + * Types (4) + +The absolute majority of these are neither _is-a_ nor _has-a_. The great thing with +tags, why everyone seems to love them, is exactly that they are *not formalized*. +You can just throw in some tags and keywords and use them for a plethora of +unrelated and unstructured purposes and generally just assume that your +reader will somehow ``get it''. + +Ichthyostega:: 'Mi 10 Okt 2012 05:36:35 CEST' ~~ + + + +//endof_comments: + +'''' +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/SkillsCollection.txt b/doc/devel/rfc/SkillsCollection.txt new file mode 100644 index 000000000..c2c4f3d68 --- /dev/null +++ b/doc/devel/rfc/SkillsCollection.txt @@ -0,0 +1,80 @@ +[grid="all"] +`------------`----------------------- +*State* _Parked_ +*Date* _2007-06-13_ +*Proposed by* link:ct[] +--------------------------------- + + +Skills Collection +----------------- +Make a page where people can tell in which areas they are willing to support +others. + + +Description +~~~~~~~~~~~ +Some Page should list different things needed for working on the project and +users should attach themself when they offer support for it. This is meant that +people who run into problems know who to ask. In contrast this is not meant +like these Skill pages on Sourceforge or such. I don't like this rating and +posing system. We let people assing themself to skill and not skills to people +and there is no rating. + +Skills shall be anything which is needed like the tools we use, the code we +create etc. + + + +Example +^^^^^^^ + +.Git +* ct +* j6t + +.autotools +* ct + +.lumiera/renderpipe +* ichthyo + +... shall this contain emails? + + +Tasks +^^^^^ + * just set this page up .. either on this wiki or in a tiddlywiki which + becomes checked into the repo + + +Pros +^^^^ + * inter developer support and help network + + +Cons +^^^^ + * privacy concerns, people might not publish what they know or better what + they ''not'' know + + +Alternatives +^^^^^^^^^^^^ +...urgs + + +Rationale +~~~~~~~~~ +This only announces where people offer support within the lumiera developer +community and is absolutely voluntary. + + + + + +Comments +-------- + + +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/StreamTypeSystem.txt b/doc/devel/rfc/StreamTypeSystem.txt new file mode 100644 index 000000000..12323606f --- /dev/null +++ b/doc/devel/rfc/StreamTypeSystem.txt @@ -0,0 +1,268 @@ +Stream Type System +================== + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Draft_ +*Date* _2008-10-05_ +*Proposed by* link:Ichthyostega[] +------------------------------------- + + + +******************************************************************************** +.Abstract +Especially in the Proc-Layer, within the Builder and at the interface to the +Engine we need sort of a framework to deal with different »kinds« of +media streams. + +This is the foundation to be able to define what can be connected and to +separate out generic parts and isolate specific parts. +******************************************************************************** + + +Description +----------- +//description: add a detailed description: +The general idea is that we need meta information, and -- more precisely -- +that _we_ need to control the structure of this metadata. Because it has +immediate consequences on the way the code can test and select the appropriate +path to deal with some data or a given case. This brings us in a difficult +situation: + + * almost everything regarding media data and media handling is notoriously + convoluted + * because we can't hope ever to find a general umbrella, we need an extensible + solution + * we want to build on existing libraries rather then re-inventing media + processing. + * a library well suited for some processing task not necessarily has a type + classification system which fits our needs. + +The proposed solution is to create an internal Stream Type System which acts as +a bridge to the detailed (implementation type) classification provided by the +library(s). Moreover, the approach was chosen especially in a way as to play +well with the rule based configuration, which is envisioned to play a central +role for some of the more advanced things possible within the session. + + +Terminology +~~~~~~~~~~~ + * *Media* is comprised of a set of streams or channels + * *Stream* denotes a homogeneous flow of media data of a single kind + * *Channel* denotes an elementary stream, which -- _in the given context_ -- + can't be decomposed any further + * all of these are delivered and processed in a smallest unit called *Frame*. + Each frame corresponds to a time interval. + * a *Buffer* is a data structure capable of holding one or multiple Frames of media data. + * the *Stream Type* describes the kind of media data contained in the stream + + +Concept of a Stream Type +~~~~~~~~~~~~~~~~~~~~~~~~ + +The Goal of our Stream Type system is to provide a framework for precisely +describing the ``kind'' of a media stream at hand. The central idea is to +structure the description/classification of streams into several levels. +A complete stream type (implemented by a stream type descriptor) contains +a tag or selection regarding each of these levels. + +Levels of classification +^^^^^^^^^^^^^^^^^^^^^^^^ + + * Each media belongs to a fundamental *kind of media*, examples being _Video, + Image, Audio, MIDI, Text,..._ This is a simple Enum. + * Below the level of distinct kinds of media streams, within every kind we + have an open ended collection of *Prototypes*, which, within the high-level + model and for the purpose of wiring, act like the "overall type" of the + media stream. Everything belonging to a given Prototype is considered to be + roughly equivalent and can be linked together by automatic, lossless + conversions. Examples for Prototypes are: stereoscopic (3D) video versus the + common flat video lacking depth information, spatial audio systems + (Ambisonics, Wave Field Synthesis), panorama simulating sound systems (5.1, + 7.1,...), binaural, stereophonic and monaural audio. + * Besides the distinction by prototypes, there are the various *media + implementation types*. This classification is not necessarily hierarchically + related to the prototype classification, while in practice commonly there + will be some sort of dependency. For example, both stereophonic and monaural + audio may be implemented as 96kHz 24bit PCM with just a different number of + channel streams, but we may as well have a dedicated stereo audio stream + with two channels multiplexed into a single stream. + + +Working with media stream implementations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +For dealing with media streams of various implementation type, we need +_library_ routines, which also yield a _type classification system_ suitable +for their intended use. Most notably, for raw sound and video data we use the +http://gmerlin.sourceforge.net/[GAVL] library, which defines a fairly complete +classification system for buffers and streams. For the relevant operations in +the Proc-Layer, we access each such library by means of a Façade; it may sound +surprising, but actually we just need to access a very limited set of +operations, like allocating a buffer. _Within_ the Proc-Layer, the actual +implementation type is mostly opaque; all we need to know is if we can connect +two streams and get an conversion plugin. + +Thus, to integrate an external library into Lumiera, we need explicitly to +implement such a Lib Façade for this specific case, but the intention is to be +able to add this Lib Façade implementation as a plugin (more precisely as a +"Feature Bundle", because it probably includes several plugins and some +additional rules) + + +Link between implementation type and prototype +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +At this point the rules based configuration comes into play. Mostly, to start +with, determining a suitable prototype for a given implementation type is sort +of a tagging operation. But it can be supported by heuristic rules and an +flexible configuration of defaults. For example, if confronted with a media +with 6 sound channels, we simply can't tell if it's a 5.1 sound source, or if +it's a pre mixed orchestra music arrangement to be routed to the final balance +mixing or if it's a prepared set of spot pick-ups and overdubbed dialogue. But a +heuristic rule defaulting to 5.1 would be a good starting point, while +individual projects should be able to set up very specific additional rules +(probably based on some internal tags, conventions on the source folder or the +like) to get a smooth workflow. + +Moreover, the set of prototypes is deliberately kept open ended. Because some +projects need much more fine grained control than others. For example, it may +be sufficient to subsume any video under a single prototype and just rely on +automatic conversions, while other projects may want to distinguish between +digitized film and video NTSC and PAL. Meaning they would be kept in separate +pipes an couldn't be mixed automatically without manual intervention. + + +connections and conversions +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + * It is _impossible to connect_ media streams of different kind. Under some + circumstances there may be the possibility of a _transformation_ though. For + example, sound may be visualized, MIDI may control a sound synthesizer, + subtitle text may be rendered to a video overlay. Anyway, this includes some + degree of manual intervention. + * Streams subsumed by the same prototype may be _converted_ lossless and + automatically. Streams tagged with differing prototypes may be _rendered_ + into each other. + * Conversions and judging the possibility of making connections at the level + of implementation types is coupled tightly to the used library; indeed, most + of the work to provide a Lib Façade consists of coming up with a generic + scheme to decide this question for media streams implemented by this + library. + + +Tasks +~~~~~ +// List what needs to be done to implement this Proposal: + * draft the interfaces ([green]#✔ done#) + * define a fall-back and some basic behaviour for the relation between + implementation type and prototypes [,yellow]#WIP# + * find out if it is necessary to refer to types in a symbolic manner, or if it + is sufficient to have a ref to a descriptor record or Façade object. + * provide a Lib Façade for GAVL [,yellow]#WIP# + * evaluate if it's a good idea to handle (still) images as a separate distinct + kind of media + + + +Discussion +~~~~~~~~~~ + +Alternatives +^^^^^^^^^^^^ +//alternatives: explain alternatives and tell why they are not viable: +Instead of representing types by metadata, leave the distinction implicit and +instead implement the different behaviour directly in code. Have video tracks +and audio tracks. Make video clip objects and audio clip objects, each +utilising some specific flags, like sound is mono or stereo. Then either +switch, switch-on-type or scatter out the code into a bunch of virtual +functions. See the Cinelerra source code for details. + +In short, following this route, Lumiera would be plagued by the same notorious +problems as most existing video/sound editing software. Which is, implicitly +assuming ``everyone'' just does ``normal'' things. Of course, users always were +and always will be clever enough to work around this assumption, but the problem +is, all those efforts will mostly stay isolated and can't crystallise into a +reusable extension. Users will do manual tricks, use some scripting or rely on +project organisation and conventions, which in turn creates more and more +coercion for the ``average'' user to just do ``normal'' things. + +To make it clear: both approaches discussed here do work in practice, and it's +more a cultural issue, not a question guided by technical necessities to select +the one or the other. + + +Rationale +--------- +//rationale: Give a concise summary why it should be done *this* way: + + * use type metadata to factor out generic behaviour and make variations in + behaviour configurable. + * don't use a single classification scheme, because we deal with distinctions + and decisions on different levels of abstraction + * don't try to create an universal classification of media implementation type + properties, rather rely on the implementation libraries to provide already a + classification scheme well suited for _their_ needs. + * decouple the part of the classification guiding the decisions on the level + of the high level model from the raw implementation types, reduce the former + to a tagging operation. + * provide the possibility to incorporate very project specific knowledge as + rules. + +//Conclusion +//---------- +//conclusion: When approbate (this proposal becomes a Final) +// write some conclusions about its process: + + + + +Comments +-------- +//comments: append below +As usual, see the +http://www.lumiera.org/wiki/renderengine.html#StreamType[Proc-Layer impl doku] +for more information and implementation details. + +Practical implementation related note: I found I was blocked by this one in +further working out the details of the processing nodes wiring, and thus make +any advance on the builder and thus to know more precisely how to organize the +objects in the link:EDL/Session[]. Because I need a way to define a viable +abstraction for getting a buffer and working on frames. The reason is not +immediately obvious (because initially you could just use an opaque type). The +problem is related to the question what kind of structures I can assume for the +builder to work on for deciding on connections. Because at this point, the +high-level view (pipes) and the low level view (processing functions with a +number of inputs and outputs) need in some way to be connected. + +The fact that we don't have a rule based system for deciding queries currently +is not much of a problem. A table with some pre configured default answers for +a small number of common query cases is enough to get the first clip rendered. +(Such a solution is already in place and working.) + + -- link:Ichthyostega[] 2008-10-05 + +Woops fast note, I didn't read this proposal completely yet. Stream types could +or maybe should be coopertatively handled together with the backend. Basically +the backend offers one to access regions of a file in a continous block, this +regions are addressed as "frames" (this are not necessary video frames). The +backend will keep indices which associate this memory management with the frame +number, plus adding the capabilitiy of per frame metadata. This indices get +abstracted by "indexing engines" it will be possible to have different kinds of +indices over one file (for example, one enumerating single frames, one +enumerating keyframes or gops). Such a indexing engine would be also the place +to attach per media metadata. From the proc layer it can then look like `struct +frameinfo* get_frame(unsigned num)` where `struct frameinfo` (not yet defined) +is something like `{ void* data; size_t size; struct metadata* meta; ...}` + + -- link:ct[] 2008-10-06 + +Needs Work +~~~~~~~~~~ +There are a lot details to be worked out for an actual implementation but we +agreed that we want this concept as proposed here. + + Do 14 Apr 2011 03:06:42 CEST Christian Thaeter + + +//endof_comments: + +'''' +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/SystematicMetadata.txt b/doc/devel/rfc/SystematicMetadata.txt new file mode 100644 index 000000000..ab407416c --- /dev/null +++ b/doc/devel/rfc/SystematicMetadata.txt @@ -0,0 +1,155 @@ +SystematicMetadata +================== + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Idea_ +*Date* _Mo 08 Okt 2012 04:39:16 CEST_ +*Proposed by* Ichthyostega +------------------------------------- + +******************************************************************************** +.Abstract +_give a short summary of this proposal_ +******************************************************************************** + +Lumiera is a metadata processing application: _Data_ is _media data_, and everything +else is _metadata_. Since our basic decision is to rely on existing libraries for +handling data, the ``metadata part'' is what _we are building anew._ + +This RfC describes a fundamental approach towards metadata handling. + + +Description +----------- +//description: add a detailed description: +Metadata is conceived as a huge uniform tree. This tree is conceptual -- it is +never represented as a whole. In the implemented system, we only ever see parts +of this virtual tree being cast into concrete data representations. These parts +are like islands of explicitly defined and typed structure, yet they never need +to span the whole virtual model, and thus there never needs to be an universal +model data structure definition. Data structure becomes implementation detail. + +Parts of the system talk to each other by _describing_ some subtree of metadata. +This description is _always pushed:_ the receiver implements an API allowing the +sender to navigate to some path-like scope and populate it with values, similar +to populating a filesystem. It is up to the receiver to assemble these information +into a suitable representation. Some receiver might invoke an object factory, while +another serialises data into an external textual or binary representation. + + +Abstract Metadata Model +~~~~~~~~~~~~~~~~~~~~~~~ +The conceptual model for metadata is close to what the JSON format uses: + +There are primitive values as +null+, string, number and boolean. Compund values +can be arrays or records, the latter being a sub-scope populated with key-value pairs. + +We might consider some extensions + + * having data values similar to BSON of MongoDB: integrals, floats, timestamps + * introducing two _special magic keys_ for records: `"type"` and `"id"` + + +Sources and Overlays +~~~~~~~~~~~~~~~~~~~~ +Metadata is delivered from _sources_, which can be _layered_. Similarly, on the +receiving side, there can be multiple _writeable layers_, with a routing strategy +to decide which writeable layer receives a given metadata element. This routing +is implemented within a pipeline connecting sender and receiver; if the default +routing strategy isn't sufficient, we can control the routing by introducing a +a meta-tree in some separate branch, this way making the metadata self-referential. + + +Some points to note +~~~~~~~~~~~~~~~~~~~ +- this concept doesn't say anything about the actual meaning of the metadata elements, + since that is always determined by the receiver, based on the current context. +- likewise, this concept doesn't state anything about the actual interactions, the + involved parts and how the interaction is initiated and configured; this is considered + an external topic, which needs to be solved within the applicable context (e.g. the + session has a specific protocol how to retrieve a persisted session snapshot) +- there is no separate _system configuration_ -- configuration appears just as a + local record of key-value pairs, which is interpreted according to the context. +- in a similar vein, this concept deliberately doesn't state anything regarding the + handling of _defaults_, since these are so highly dependent on the actual context. + + +Tasks +~~~~~ +// List what needs to be done to implement this Proposal: +// * first step ([green]#✔ done#) + * define the interaction API [yellow-background]#WIP# + * scrutinise this concept to find the pitfalls [yellow-background]#WIP# + * build a demonstration prototype, where the receiver fabricates an object [red yellow-background]#TBD# + + +Discussion +~~~~~~~~~~ + +Pros +^^^^ +- the basic implementation is strikingly simple, much simpler than building + a huge data structure or any kind of serialisation/deserialisation scheme +- parts can be combined in an open fashion, we don't need a final concept up-front +- even complex routing and overlaying strategies become manageable, since they can be + treated in isolation, local for a given scope and apart from the storage representation +- library implementations for textual representations can be integrated. + + + +Cons +^^^^ +- the theoretical view is challenging and rather uncommon +- a naive implementation holds the whole data tree in memory twice +- how the coherent ``islands'' are combined is only a matter of invocation order + and thus dangerously flexible + + + + +Alternatives +^^^^^^^^^^^^ +//alternatives: explain alternatives and tell why they are not viable: +The classical alternative is to define a common core data structure, which +needs to be finalised quickly. Isolated functional modules will then be written +to work on that common data set, which leads to a high degree of coupling. +Since this approach effectively doesn't scale well, what happens in practice is +that several independent storage and exchange systems start to exist in parallel, +e.g. system configuration, persisted object model, plug-in parameters, presentation +state. + + + +Rationale +--------- +//rationale: Give a concise summary why it should be done *this* way: +Basically common (meta) data could take on a lot of shapes between two extremes: + +- the _precise typed structure_, which also is a contract +- the _open dynamic structure_, which leaves the contract implicit + +The concept detailed in this RfC tries to reconcile those extremes by avoiding +a global concrete representation; + +this way the actual interaction -- with the necessity +of defining a contract -- is turned into a local problem. + + +//Conclusion +//---------- +//conclusion: When approbate (this proposal becomes a Final) +// write some conclusions about its process: + + + + +Comments +-------- +//comments: append below + + +//endof_comments: + +'''' +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/TagCloudsOnResources.txt b/doc/devel/rfc/TagCloudsOnResources.txt new file mode 100644 index 000000000..2c36d76dc --- /dev/null +++ b/doc/devel/rfc/TagCloudsOnResources.txt @@ -0,0 +1,81 @@ +Design Process : Tag Clouds for Resources +========================================= + +[grid="all"] +`------------`----------------------- +*State* _Dropped_ +*Date* _2008-07-15_ +*Proposed by* link:PercivalTiglao[] +------------------------------------- + + +Tag Clouds for Resources +------------------------ + +Perhaps a Cloud of tags is unnecessary, but tagging resources similar to +Youtube or like Tag Clouds allows for efficient searching and filtering. Anyone +who uses the web would know how to use them. If a "Cloud of Tags" approach is +used, then organizing the tags by some sort of frequency would be useful. IE: +the more a specific tag is used, the larger it gets, or perhaps the more often +that tag is searched on. + + +Description +~~~~~~~~~~~ + + + +Tasks +~~~~~ + + +Pros +~~~~ + +* Simple GUI Concept +* Eases management of resources with Search +* Orthogonal to other resource management schemes like Folders + + +Cons +~~~~ + + +Alternatives +~~~~~~~~~~~~ + + + +Rationale +~~~~~~~~~ + + + + + +Comments +-------- + +* Note: I was inspired with this idea during an email conversation with + Rick777. -- link:PercivalTiglao[] [[DateTime(2008-07-17T14:29:57Z)]] + +* Agreed, this is usefull. Also, more advanced config rules can make use of + such tags and wiring can depend on them, for example to route your dialogue + audio to another global bus than the music or ambiance. + -- link:Ichthyostega[] [[DateTime(2008-07-27T22:23:38Z)]] + + + +Conclusion +---------- + +This Design Proposal is 'superseded' by a much more advanced proposal: +link:DelectusShotEvaluator[Delectus] + +(Dropping it doesn't mean disapproval) + + +'''' + + +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/TodoLists.txt b/doc/devel/rfc/TodoLists.txt new file mode 100644 index 000000000..1d3b987ac --- /dev/null +++ b/doc/devel/rfc/TodoLists.txt @@ -0,0 +1,49 @@ +[grid="all"] +`------------`----------------------- +*State* _Parked_ +*Date* _2008-03-05_ +*Proposed by* link:ct[] +------------------------------------- + +Todo Lists +---------- +We need some way to organize tasks to be done (tiddlywiki, testsuite, ...?) + + +Description +~~~~~~~~~~~ + + +Tasks +^^^^^ + + +Pros +^^^^ + + + +Cons +^^^^ + + + +Alternatives +^^^^^^^^^^^^ + + + +Rationale +^^^^^^^^^ + + + + + + +Comments +-------- +We decided to use a Tiddlywiki for now until this is further worked out + -- link:ct[] [[DateTime(2008-03-08T03:38:50Z)]] + +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/UnitTests_Python.txt b/doc/devel/rfc/UnitTests_Python.txt new file mode 100644 index 000000000..9d71205e6 --- /dev/null +++ b/doc/devel/rfc/UnitTests_Python.txt @@ -0,0 +1,82 @@ +Design Process : Unit Tests Python +================================== + + +[grid="all"] +`------------`----------------------- +*State* _Dropped_ +*Date* _2007-06-17_ +*Proposed by* link:Ichthyostega[] +------------------------------------- + + +UnitTests in Python +------------------- + +Use the Python scripting language for the actual Unit Tests and access the +Cinelerra Code via SWIG + + +Description +~~~~~~~~~~~ + +Define Test classes in Python, using e.g. the link:PyUnit[] framework of the +Python Standard lib. The SWIG compiler can generate wrapper code automatically, +so we can access the C++ Classes and Facilities of Cinelerra as Python Modules +and Classes. The Classes to be tested in Cinelerra need to provide some +Interface for carrying out these tests (and this is one of the main benefits of +the whole Test driven aproach). + + +Tasks +~~~~~ + + * Find out how the SWIG generated wrappers play together with Python's List + and Map types. Without the ability to use the latter in the tests, this + whole proposal is rather pointless. + * Think on how we can test video data processing (at least in its basics, e.g. + does additive overlay work) + + +Pros +~~~~ + +Programming Unit and Self tests in a Scripting language facillates this task. +The X-Language bindings are quite usable today. As a side effect, it helps to +get a clean program structure, because the tests need some Interface and/or +some object factories to create the test candidates. Python is proposed, +because it is fairly mainstream, has a flat learning curve and but is +moderately modern and functional-style at the same time. + +Cons +~~~~ + + * Adds to the complexity + * Some old-style hackers have a quite distinct aversion against Python + + +Alternatives +~~~~~~~~~~~~ + +Rationale +~~~~~~~~~ + +Why am I proposing this? Out of lazyness. Python is there, many devs (on linux) +have some Python skills, SWIG is not overly complicated to use. + +And last but not least: just to get the discussion going... ;-) + + +Comments +-------- + +* I'd rather consider to use some embedded language in cinelerra which we can + use to drive tests, should be something smaller and more sane than python. + Needs certainly more discussion. For simple unit tests some C/C++ harness and + bit shell scripting would suffice, I really want to integrate this with + link:NoBug[]. + -- link:ct[] [[DateTime(2007-06-17T17:32:27Z)]] + + +'''' +Back to link:../DesignProcess[] diff --git a/doc/devel/rfc/UseCases.txt b/doc/devel/rfc/UseCases.txt new file mode 100644 index 000000000..7f1b96d5c --- /dev/null +++ b/doc/devel/rfc/UseCases.txt @@ -0,0 +1,310 @@ +[grid="all"] +`------------`----------------------- +*State* _Parked_ +*Date* _2008-10-31_ +*Proposed by* link:Ichthyostega[] +------------------------------------- + + +Use Case analysis +----------------- + +The only way to defeat "featuritis" is to build upon a coherent design -- + + +which in turn relies upon a more or less explicit understanding what the +application should be like, and the way the prospective user is thought to work +with the program. Today, a generally accepted 'method' for building up such +an understanding is to do a *use case analysis*. Such a formal analysis would +require to identify all usage scenarios with the involved actors and parts of +the system, and then to refine them in detail and break them down into distinct +use cases. Here, I'll try a rather informal variant of such an analysis. I'll +restrain myself to describing the most important usage situations. + +'please participate in the discussion. It well may be that everything detailed + here is self-evident, but I doubt so. At least the grouping and the omissions + kind-of reflect a certain focus of the project' + + +Describing basic Lumiera usage situations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The fundamental assumption is that the user works on a project, which is +reflected in the fact that the user is working on a single session over an +extended period of time (several hours to several years). External media will +be imported and incorporated into this session, additional media will be +created within this session, and finally there is at least one render or export +procedure to harvest the results of this work. + + +Scenario (1) : Exploring Media +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Various external media files are opened. You play, cue and examine the media. +Tagging, labeling and adding notes. Marking of interesting points and ranges. +Possibly breaking down into clips, or at least extract some ranges as clips. +Draft arranging the clips, applying some effects to check the result and thus +to find out about the viability of the footage. Playback of several media at +the same time (several videos, but also video and music). Grouping of assets +(media, clips, effects, markers) into folders. + + +Scenario (2) : Simple assembly +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You build up a simple linear cut sequence. Either by + + - using a single source media, trimming it and cutting away (a small number + of) unwanted parts + - playing source media and spilling over (insert, overwrite) some parts into + the final assembly + - dragging over the pre-organised clips from clip folders to build up the + assembly. + +Sound is either used immediately as-is (the soundtrack attached to the media), +or there is a similarly simple, linear music bed. Some people prefer to switch +sound off entirely for this kind of work. In any case, the link is either +automatic, or rather vague and soft (as music being vaguely correlated) + + +Scenario (3) : Augmenting an assembly +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Without the intention to rework it from scratch, an already existing simple +assembly is augmented, beautified and polished, maybe to conform with +professional standards. This includes the ``rescue'' of a somewhat questionable +assembly by repairing localized technical problems, but also shortening and +re-arranging, and in extreme cases even changing the narrative structure. A +distinctive property of this usage scenario is that work happens rather in the +context of 'tasks' (passes) -- not so much isolated operations: + + - the task may be to get the rhythm or overall tempo right, and thus you go + over the sequence and do trim, roll, shuffle or slide edits. + - you may want to ``fold-out'' parts of the sound, thus interweaving o-sound + and music + - there may be a sound overdubbing and replacing pass + - you may want to walk certain automation curves and adjust levels (sound + volume or tone, fade, brightness/contrast/colour) + - general polishing may include adding title overlays, fading in and out, + adding (typically a single type of) transition(s) in a coherent manner + + +Scenario (4) : Compositional work +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Here I define *compositional work* as a situation where you deal with +multiple more or less independent sequences going on in parallel, similar to a +musical score. Frequently, we encounter compositional parts embedded in a +otherwise linear work, and often those parts evolve when Scenario (3) is driven +to the extreme. + + - the most common situation is that o-sound, sound design and music work + together with the temporal structure created in the image edits. + - a movie with a complex narrative structure may induce compositional work on + a very large scale (and existing applications frequently fall short on + supporting such) + - _compositing_ often leads to compositional work. Special FX, masked objects + being arranged, artificial elements to be integrated. + - similarly any collage-like or heavily layered arrangements lead themselves + to requiring compositional work. + +The common distinctive property of all those situations is: objects are +embedded into a primary context and have to obey the rules of this context, and +at the same time have a close correlation to other objects which are embedded +in a completely different (``orthogonal'') context. (To give a catchy example: +assume, a CG monster has to be integrated. Besides the masked monster object, +you have several colouring and blurring layers at completely different levels +in the layering order, and at the same time you have correlated sound objects, +which need to be integrated into the general sound-scape. And now your primary +job is to get the movement and timings of the monster right in relation to the +primary timing grid established by the existing edit) + +The working style and thus the tool support necessary for compositional work is +completely different to Scenario (3). After an initial build-up (which often is +very systematic), the working profile can be characterized by tweaks to various +parameters to be done in-sync at widely separated sites within the session, +together with repeated cycles of ``do it'', ``assess the result'', ``undo all and +do some small detail differently''. Typically there is the need for much navigation +(contrast this to Scenario (3) where you work in _tasks_ or _passes_) + + +Scenario (5) : Working with Sound +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The degree of integrating sound work is worth discussing. Often, due to +limitations in existing tools, sound work is done in separate applications to a +large extent. Which in turn forces the whole production into a sequential +organisation scheme. First the edit has to be roughly final, and then the sound +people can step in. (Of course this is an simplification). To list the common +operations: + + - cleaning and preparing original sound + - fitting sound library elements or separately produced sound + - overdubbing + - playing or building music to match the rhythm of the edit or the original + footage + - montage of dialogue and/or noise correlated to the primary content of the + sequence + - sound design, shaping the pace and the feel of a sequence + - final balance mix + +While clearly some of those tasks are always better done within a dedicated +application, the ability to carry out this work partially within the main +session and even while the basic edit is still in flux -- may open new artistic +possibilities. + + +Scenario (6) : Large Projects +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +At first sight, the operations and the work to be done in large projects is the +same as in small ones. But large projects tend to create sort of an additional +``layer'' on top of the usage scenarios described thus far, which will ``kick in'' +at various places. + + - work may be divided upon several editors, working on separate parts + (sequences) which then need to be re-integrated + - there may be a global asset organisation (naming scheme), which will be + extended locally, resulting in nested naming scopes. + - some quite basic stuff needs to be done in a coherent fashion, e.g. titles, + a certain transition (template), the way fade-outs are done, a certain + colour profile. Possibly, this stuff needs to be adjusted all over the + project. + - there will be a general (large scale) timing grid with distinct ``check points'' + and probably there is the need to navigate to the different parts of the + whole project. + - there may be the necessity to build several versions of the same project in + parallel (e.g. a short version and a extended director's cut) + - you may have to care for such nasty and tedious things as keeping sub-titles + in-sync while the edit is still in flux + - you may want to do integration builds, where you add placeholders just for + the purpose to get an impression of the work as a whole. + + +Scenario (7) : Teamwork +^^^^^^^^^^^^^^^^^^^^^^^ + +Several people work on a project. + + - A longer sequence might be split up into parts, each one edited by another + person. The parts will be collected and assembled by the chief editor. Edits + to the parts will still be possible, but a system of permissions allows to + lock down access to parts of the edit, so to prevent unexpected interferences. + - Arrangements based on the same resources can be branched, tagged and merged. + - Edits are logged with usernames + - Markers can be shown/hidden on a per creator base. + - Team members need ways to share and store notes and suggestion for each + other work. Annotations can be added to clips, markers or arrangements + - A pen tool could allow to scribble on top of frames or arrangements. An + expressive and fast way to leave suggestions about deletions, movements and + all other kinds of edits. + + +Scenario (8) : Script driven +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The application is started ``headless'' (without GUI) and controlled via an +API. Either an existing session is loaded, or a new session is created and +populated. Then, some operations have to be done in a systematic manner, +requiring a way to address parts of the session both unambiguously and in a way +easy to access and control from a programming environment (you can't just +``see'' the right clip, it needs to be tagged). Finally, there might be an +export or render step. A variation of this scenario is the automatic extraction +of some informations from an existing project. + + +Discussion +~~~~~~~~~~ + +.Pros +* describing such scenarios, even if hypothetical, create an anchor or point of + referral for feature/GUI design work to be done in detail +* relating features to working situations helps to see what is really important + and what is rather of technical merit +* compiling and discussing this list helps shaping the character of the + application as a whole +* the above compilation relates individual features to a general production + process. +* the goal of this compilation is to be _fairly complete_ + + +.Cons +* any of those descriptions is artificial +* sometimes it is better to develop an application technology driven, + especially when it is technologically challenging to get it to work properly. +* having such a large-scale vision may freak away people which otherwise + might jump in and implement some crazy but valuable new feature +* the listed usage scenarios intend to be _fairly complete_, which can be a + limitation or even self-deception. Better have an open ended list. +* the above compilation seems quite conventional and explicitly leaves out some + scenarios + - networked, distributed scenarios, compound applications + - television, life video, VeeJay-ing + - cartoons, animations, game design + + + +.Alternatives +* avoiding a general plan, just sharing a vague general vision +* just start out with one scenario directly at hand (e.g. the simple assembly) + and not worrying about the rest +* rather then defining those scenarios (which are necessarily hypothetical), + rather stick to the operation level. E.g. a use case would be rather + on the level of ``triming a clip'' +* doing a complete state-of-the art UML use case analysis. +* after having created the foundation, rather stick to an XP approach, i.e. + implement, integrate and release small ``usage stories'' + + + +Rationale +^^^^^^^^^ + +Well, after having considered, compiled and written such an concept, altogether +avoiding a big picture view of the application is not longer an option. To the +other extreme, we neither have the resources, nor the circumstances for doing a +rigid and formal analysis. Finally, the XP approach really sounds promising, +and it should be clear that it is in no way ruled out. Nothing hinders us to +have a detailed vision, but then to implement small usage stories which fit +into this vision. + +Besides, another consideration. The above compilation builds upon the notion, +that there is a common denominator of film making craft, a core editing art, +which has been shaped in the first 100 years of cinema, and which won't go away +within the next generation, even if the technological and practical +circumstances of production change quite dramatically. + + + + + + + + +Comments +-------- +//comments: append below + +.Template e.g. for regular TV series +Constraints to fit all contents within fixed timeline, cover topic, select +collage of iconic scenes from archived and collected footage. Update intro and +credit roll for each episode. Add in stopmotion, and 3D model animations with +vocal commentaries. Gather together separate items from "outworkers". + +Tree:: '2008-12-27 08:36:36' + + +//endof_comments: + + + +Parked +~~~~~~ +We have to revisit this, possibly someone (or a group) who wants to work on +the workflow. For now its parked until revisited. + + Do 14 Apr 2011 03:06:42 CEST Christian Thaeter + + + + +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/WebsiteNavigation.txt b/doc/devel/rfc/WebsiteNavigation.txt new file mode 100644 index 000000000..144c41a79 --- /dev/null +++ b/doc/devel/rfc/WebsiteNavigation.txt @@ -0,0 +1,153 @@ +WebsiteNavigation +================= + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Draft_ +*Date* _Mi 08 Dez 2010 11:32:32 CET_ +*Proposed by* Ichthyostega +------------------------------------- + +[abstract] +******************************************************************************** +The Lumiera website is assumed to accumulate a lot of content. Thus we need +to care about making that content accessible, to help finding the relevant +topics and to keep the overall structure intact. This RfC is to collect, +discuss and agree upon the guidelines and requirements. +******************************************************************************** + +Description +----------- +//description: add a detailed description: + +Issues to care +~~~~~~~~~~~~~~ + +Navigation:: + The page hierarchy becomes at least 5 levels deep, likely even deeper. + When reading a page, the current subtree leading down to this page should + be right at hand; especially access to the siblings and the parent's siblings + is important. For re-accessing content, it is necessary to be able to drill + down to an known location (``within the design docs, detailing the application, + I need the configuration section'') + + -> we need an *auto generated navigation* and an embedded *menu tree widget* in the web pages. + +Tagging:: + There should be an easy way to categorise individual pages *by keyword(s)* + and an automatically generated indexing by tags, possibly with an per tag + overview page. + +Search:: + The usual *site search*. It should include the contents of the issue tracker. + Even today such a scoped search is valuable and even necessary for working + with the informations collected within the Lumiera project + +Sanity:: + Each relevant page needs to be reachable. There are some additional pages and + especially subdirectories which should not be linked into the website navigation. + Moreover, all (internal) links on the pages should be valid. + + -> this could be addressed by a **sanity checker script** + +Usage situations +~~~~~~~~~~~~~~~~ + +(a) working on content +^^^^^^^^^^^^^^^^^^^^^^ +Working on content should be readily accessible for _everyone_. One time contributions +are especially encouraged. This leads to the following usage scenario: + +A contributor has some informations to share or wants to do some additions or modifications. +(S)he locates somehow the place where relevant informations are stored, adds some text, +possibly adds a new page or splits another page in two. + +_Note_: no awareness of the issues of navigation can be assumed. The occasional contributor +won't notice any concern which isn't right at hand. + +(b) maintaining a subsystem +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Some person(s) will be responsible for a subsystem or some segment of the informations +on the website. This responsibility is content centric. It might include frequent rearranging, +regrouping and reordering of pages to accommodate the increasing scope of informations. + +_Note_: while here some awareness of website organisational issues can be assumed, +any requirement to care for external organisational issues is a burden and distracts +from the actual work to be done -- thus it is likely to be short circuited or postponed +``for later''. Note especially, reorganising content in a subsection *must not* incur +the burden of re-doing the same reorganisation steps mirrored in some central navigation +configuration or table of contents. (this is a knock out criterion) + +(c) maintaining the website +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The website maintainer is responsible for the overall sanity of the website, without +being familiar with all details of ongoing work in some part or section of the information. +Another concern here is the outward perception of the website, which might incur changes +on the top level navigation or some rewording of overview pages. + +_Note_: this kind of work is rather unrewarding. There is the danger of collisions with the +work of the subsystem maintainer + + +Conclusion: Requirements for any navigation solution +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * ability to to pick up a nested page structure + * ability to cope with any additions and changes in the lower levels automatically, without help by the user + * ability to override: + + - not including some subdirectories + - including links-to-external at arbitrary positions + +optional/additional features +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The following features would be handy, but can be considered optional + + * ability to change the displayed title of a page in the navigation + * ability to control the ordering of pages in the navigation + * complete manual override of the visible content of a specific subdirectory + + + +Tasks +~~~~~ + * define markup for the various features ([green]#✔ done#) + * get a technical solution for the menu to work ([green]#✔ done#) + * write a script to traverse contents and generate the menu ([green]#✔ done#) + * test and integrate it into the website ([green]#✔ done#) + + + +Rationale +--------- +//rationale: Describe why it should be done *this* way: +Maintaining the navigation within a website beyond just some pages is a daunting task. +When frequent rearrangements of pages are to be expected, the only viable solution is +to automate this task. Moreover, there needs to be a secondary path to each page, +asside of the direct links which might or might not be provided. A automatically +generated navigation menu separate of the actual page content helps to address +these issues. + + + +//Conclusion +//---------- +//conclusion: When approbate (this proposal becomes a Final) +// write some conclusions about its process: + + + + +Comments +-------- +//comments: append below + +.State -> Draft +//add reason +A Menu generator script based on these principles is deployed and working +since a looooong time now. We still need to build the tagging facility though. +This is covered by another RfC. + +Ichthyostega:: 'So 07 Okt 2012 07:30:17 CEST' ~~ + + +//endof_comments: diff --git a/doc/devel/rfc/WebsiteSupportMarkup.txt b/doc/devel/rfc/WebsiteSupportMarkup.txt new file mode 100644 index 000000000..2b4c6ad28 --- /dev/null +++ b/doc/devel/rfc/WebsiteSupportMarkup.txt @@ -0,0 +1,245 @@ +WebsiteSupportMarkup +==================== + +// please don't remove the //word: comments + +[grid="all"] +`------------`----------------------- +*State* _Idea_ +*Date* _Sa 06 Okt 2012 16:47:44 CEST_ +*Proposed by* Ichthyostega +------------------------------------- + +******************************************************************************** +.Abstract +The Lumiera Website and Documentation uses a lightweight, plaintext based +Infrastructure built on top of Asciidoc and Git. To help with everyday +authoring and editorial tasks, a set of support facilities is proposed, +to be integrated seamlessly into the existing infrastructure. The front-end +to these tools is specific markup, allowing for *cross-linking*, *tag*-based +*link lists* and semi-automatic building of *glossary pages*. +******************************************************************************** + +Description +----------- +//description: add a detailed description: +Some time ago, the Lumiera core developer team _decided against_ using a Content +Management System for the Website and Documentation. While the rationale backing those +decisions still remains valid, CMS do exist for a reason. The everyday task of authoring +and editing a large body of text poses some specific challenges -- this RfC proposes a +set of rather simple support tools to help coping with these. + + +Use Cases +~~~~~~~~~ + +Authoring:: + For now, the _authoring of new content_ is mostly a responsibility of the core developers. + It needs to be done close to the actual coding activities, typically once a new facility + is roughly finished. It is crucial for this activity not to impose a _context switch_ + on the writer. It must be ``just writing down the obvious'' -- otherwise developers tend + to defer this activity ``for later''. This situation creates some unique problems... + + - during the write-up, the coder-author realises several external presuppositions, which, + during the act of coding, seemed to be ``obvious'' and ``self evident''. + - since expanding on all of those secondary topics is out of question (the author will rather + abandon the task of documentation altogether), the only solution is to allow for *cross linking*, + even if the created links are dangling for the moment. + - the author can't effort to locate other documents and determine URLs; he won't even be willing + to consult a markup syntax reference. Because, what he's about to write is essentially hard to + put into words and thus requires his full attention. + +Integrating Content:: + This task is often prompted by some kind of external cause: it might be someone asking for + explanations and while trying to respond, it was determined that ``this should be in the documentation''. + Thus, some existing markup is extracted from an external system and pasted into some location, + ``just for the moment''. Of course, this content will be forgotten and rest there for + years to come. To deal with this situation... + + - adapting the structural cross-references of the integrated markup needs to be an easy task. + - we need a way to hook the new content somehow into our existing categorisation + - the person integrating the content wont't be willing to visit a lot of other locations, + or to read a syntax reference for some kind of advanced markup. + +Editorial work:: + The _editor_ is reviewing existing content. He'll try to emulate an assumed user's point of view + to judge the adequacy of the presentation. This leads to rearranging and re-locating pages and whole + trees of pages, even splitting some of them, and adding introductory paragraphs and pages here and + there -- all without the ability to read and understand any of the reviewed material in-depth. + + - this work will be performed on the primary content using its ``natural'' representation: that is, + the editor will copy and move files in the file system and edit text. There is no room for using + any external system (unless such an external system is a fully integrated authoring environment). + - the editor needs an easy way for creating thematic groupings and overview pages. + - rearranging and splitting of pages must not break any meta-markup. + + +Tools for the task at hand +~~~~~~~~~~~~~~~~~~~~~~~~~~ +This RfC proposes a set of tools to cope with those problems. More specifically, this proposal details +a possible _front end_ for these tools, in a way which blends well with the existing Git / Asciidoc +website infrastructure. + +Cross-linking by textual ID +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Some kind of markup allowing the author _at his own discretion_ (read: not automagically) to create +a *cross-link* to another piece of information, identified just by a textual short-hand or ID (read: not +requiring any kind of URL). The markup must be very lightweight and should be very similar, if not identical +to the markup for setting an external link, e.g. `link:SomeTopic[see the explanation of Some Topic]` + +Variations and extensions + +- we might consider detecting _CamelCaseWords_. This has some Pros and Cons. If we do so, we need some + easy to use escape mechanism, and this CamelCase detection must not trigger within code examples and + other kinds of literal quotes. +- specifying the displayed link text should be optional +- we might consider adding some domain prefixes, e.g. +link:rfc:SomeTopic+ or +link:ticket:SomeTopic+ + or +link:code:SomeTopic+ + +Obviously, these cross-links needs to be resilient towards content reorganisation after-the-fact. +Effectively this mandates introducing some kind of indirection, since we can't effort to regenerate the +whole website's HTML pages after each localised change. A possible solution could be to make the rendered +cross link invoke a JavaScript function, which in turn consults some kind of index table. Another solution +would be to let the cross link point to a server sided script. + +Tag extractor and Index +^^^^^^^^^^^^^^^^^^^^^^^ +Define suitable ways for attaching *tags* to various kinds of content. The syntax will be tailored to the +kind of content in question, typically placing the tags in some kind of comment or specific header. For +larger documents, it would be desirable to attach tags in a more fine-grained manner, e.g. tag only one +paragraph or sub-section (but this is a nice-to-have, since it is anything but trivial to implement). + +Based on these tags, there should be a mechanism to integrate a list of links into some Asciidoc page. +Obviously this list needs to be dynamic, e.g. by using JavaScript or by including pre-fabricated HTML +fragments into an IFrame, since it is impossible to re-generate all overview pages whenever some new +resource gets tagged. + +Tags should optionally support a key-value structure, allowing for additional structures and +functionality to be built on top. E.g. the cross-linking facility detailed above could rely on +additional tags +id:SomeTopic+ for disambiguation. The values in such a key-value definition +should be an ordered list, allowing to use all, or alternatively for the first-one or last-one +to take precedence. + +Definition entries +^^^^^^^^^^^^^^^^^^ +Define a suitable format to promote an existing piece of information into a *definition*. While not +interfering with the presentation at the textual location of this definition, this mechanism should +allow to extract this definition and present it within a glossary of terms. It would be nice if +such a generated glossary could provide an automatic back-link to the location where the definition +was picked up initially. + +Of course, defining such a markup includes some reasoning about the suitable format of a glossary +and definition-of-terms. (Scope? Just a sentence? Just a paragraph? How to utilise existing headings?) + +Additionally, this term-definition facility could be integrated with the other facilities described above: + +- cross links could pick up the ID of term definitions +- tags could be used to create focussed definition lists. + + +Constraints +~~~~~~~~~~~ +Please consider that the user of these facilities, i.e. the author or documentation editor, is in no way +interested in using them. He will not recall any fancy syntax, and he won't stick to any rules for sure. +So, anything not outright obvious is out of question. + +- since we don't want fully dynamic page generation and we can't afford regenerating the whole website + for each small update, all of these facilities need some way to adapt after-the-fact. +- we need to build leeway into the system at various places. E.g. the cross-link facility needs a + strategy to generate and match the IDs and order possible matches in a sensible way. What initially + links to some doxygen comment might later on point to a glossary if applicable. +- since content will be re-arranged just by editing text, each markup needs to be close to the + related content text, to increase the chances of keeping it intact. + + + +Tasks +~~~~~ +// List what needs to be done to implement this Proposal: + * identify the actual use case(s) ([green]#✔ done#) + * define the required facilities ([green]#✔ done#) + * consider an implementation strategy [yellow-background]#WIP# + * define a suitable markup [yellow-background]#WIP# + * write the necessary scripts [red yellow-background]#TBD# + * test and integrate it into the website [red yellow-background]#TBD# + + +Discussion +~~~~~~~~~~ + +Pros +^^^^ + * in line with the general spirit of our Website infrastructure + * can be adopted gradually + + + +Cons +^^^^ + * the required scripts are non-trivial + * added complexity to the page template and website framework + * running multiple scripts on git push might become a performance bottleneck + + + +Alternatives +^^^^^^^^^^^^ +//alternatives: explain alternatives and tell why they are not viable: +. _status quo_: not doing anything to address this issues won't hurt us much right now, + + but increasingly works against building a well structured body of information + +. _using a mature CMS_: this is what most people do, so it can't be uttermost wrong. + + Yet still, the benefits of running a CMS need to outweigh the known problems, especially + + * lock-in, ``insular'' ecosystem, being tied to the evolution of a platform ``not invented here'' + * separation from the code tree, lack of seamless SCM integration + * the general penalties of using a database backed system + +. _writing our own integrated authoring framework_: obviously, this would be the perfect solution... + Anyone(TM) to volunteer? + + +Rationale +--------- +//rationale: Give a concise summary why it should be done *this* way: +Since we still want to run our website based on very lightweight infrastructure, we need to +amend some of the shortcomings and provide a minimal set of support tools. The primary purpose +of these tools is to reduce the burden of providing structured access to the documentation content. +Using some special markup and a preprocessor/extractor script allows for gradual adoption and seamless +integration with the existing content. The proposed markup is deliberately kept simple and self-evident +for the user; the price to pay for that ease of use comes in terms of script complexity -- the latter +can be considered a one-time investment. + + + +//Conclusion +//---------- +//conclusion: When approbate (this proposal becomes a Final) +// write some conclusions about its process: + + + + +Comments +-------- +//comments: append below + +To put this RfC into perspective, I'd like to add that Benny and myself reworked several +of the introductory pages during our last meeting at FrOSCon 2012. We had some discussions +about what needs to be done in order to make the existing content more readily available. + +In the previous years, I've written a good deal of the existing content, so I might claim +some knowledge about the real world usage situation. This RfC is an attempt to share my +understanding about the inherent impediments of our setup and infrastructure. Especially, +when compared with a full-featured wiki or CMS, a list of the most lacking features +can be distilled; I am in no way against fancy stuff, but if we're about to dedicate +some effort to our infrastructure, it should be directed foremost towards fixing +those stuff which matters in practice. + +Ichthyostega:: 'So 07 Okt 2012 07:31:25 CEST' ~~ + + +//endof_comments: + +'''' +Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc/index.txt b/doc/devel/rfc/index.txt index c9e8a4515..7c0ca2c92 100644 --- a/doc/devel/rfc/index.txt +++ b/doc/devel/rfc/index.txt @@ -1,11 +1,19 @@ -Accepted Design Proposals -========================= +Design Proposal Pool +==================== -//Menu: label accepted +//Menu: label (all) //Menu: sort children -> read link:../rfc.html[more about Lumiera RfC] and the Design Process -The RfC entries listed here where discussed, maybe modified and finally agreed upon -during some developers meeting in the past. So they represent design decisions taken. +This is the pool of all RfC Design Proposals. + +Please use the navigation menu to access individual entries. +++++ +


+++++ +'''' +[small]#technically, all the RfC documents are stored within this pool directory; + +symlinks are used to link them into the subgroupings, according to the current state +(pending / accepted / dropped / parked)# + diff --git a/doc/devel/rfc_dropped/ApplicationStructure.txt b/doc/devel/rfc_dropped/ApplicationStructure.txt deleted file mode 100644 index 3b18ab92b..000000000 --- a/doc/devel/rfc_dropped/ApplicationStructure.txt +++ /dev/null @@ -1,200 +0,0 @@ -Design Process : Application Structure -====================================== - -[grid="all"]] -`------------`---------------------- -*State* _Dropped_ -*Date* _2008-11-05_ -*Proposed by* link:ct[] ------------------------------------- - -Application Structure ---------------------- -Here I am going to propose some more refined structure of the application and -its components. - - -Description -~~~~~~~~~~~ -So far we came up with a simplified BACKEND/PROC/GUI structure where each of -this entities defines its own sub subcomponents. We agreed to glue that all -together with some portable versioned interfaces system, but details where not -laid out yet. At the time of this writing the interface system and plugin -loader are reasonable finished to be usable (some small refinements to do). We -recently discussed some details on IRC on how to engage this without a -definitive decision. The topic of this proposal is to make a detailed -description towards how the application components being glued together. - -In the discussion mentioned above we concluded that we want a 'lumiera' binary -which in turn loads the optional parts as plugins. There was no consent what -this parts are actually be, except that the GUI should be optional for headless -operation. I suggested to make as much as possible pluginable to make it easier -to validate our interfaces and try different things out. - -Now I introduce 'lumiera' here, this will become a new component in -./src/lumiera being the driver application for bootstraping all the rest: - -Then our application structure looks somewhat like (please refine): - - * the 'lumiera' loader - - commandline handling - - interface & plugin system - - session manager core - - configuration system - - lua scripting - * backend - - file and io handling - - caches - - streams - - threads - - scheduler - * proc - - asset management - - config rules system - - builder - - render graph management - * gui - - timelines - - viewers - - resources - - preferences - - ... - -Furthermore the interface&plugin system is flexible enough to provide things -independently of their origin (if it is build in or a plugin/dynamic library). -So deployment (where to link these things) is secondary. - -'lumiera' will then be the executable the user starts up, what exactly gets - initialized and booted up is then matter -of configuration and commmandline options (and maybe lua scripting?). - - - -Tasks -^^^^^ - - * create the 'lumiera' directory - - setup the build system - - move config, plugin and interfaces therein - - lua support can be done later - * write the main() part of the application - - start config system - - parse commandline opts - * librificate all other components (backend, proc gui) - - define their lumiera interfaces - - decide if they shall be statically linked, becoming shared libs or plugins - -This are rather distributed tasks, after the 'lumiera' being set up, all other -components have to be adapted to be loadable from it. - - -Pros -^^^^ - - * flexible plugin based architecture - - later: loads only things which are necessary for a given task - * very fast startup - * things which cant be used on a given environment can be left out (no gui on - a headless system, no $DISPLAY set) - * inter dependencies between interfaces and plugins are automatically tracked. - - -Cons -^^^^ - -Ichthyo raised concerns that this kind of flexibility might attract other -people to write things which are not in our intention and break future design -and compatibility. We need to carefully document and define interfaces that -people don't abuse those! - - - -Alternatives -^^^^^^^^^^^^ - -We discussed the startup/main() through the GUI as it is currently done, it -would be also possible to produce some more executables (lumigui, luminode, -lumiserver, ....). But I think we agreed that a common loader is the best way -to go. - - -Rationale -~~~~~~~~~ - -I just think this is the best way to ensure a enduring design even for future -changes we can not forsee yet. - - - - - -Comments --------- -We discussed this issue lately on IRC and I got the feeling we pretty much -agreed on it. - - * we don't want to build a bunch of specialized executables, rather we build - one core app which pulls up optional parts after parsing the config - * we change the GUI to be loaded via the module/interfaces system - -From reading the above text, this proposal seems to capture that. But I am -somewhat unsure if the purpose of this proposal isn't rather to load just a -micro kernel and the pull up components according to configuration. Because I -wouldn't accept such an architecture, and I clearly stated so right at the -beginning of our project. I accepted a very flexible and language neutral -plugin system on the condition the core remains in control, stays -''reasonable'' monolithic and componentization doesn't handicap us in creating -an architecture based on abstractions and exploiting the proven design -patterns. - -It has that flexibility, yes. But that means not that we have to abuse it in -any way. The main() there and thus the bootstrap of the application is under -our tight control, if we want to reject scriptable/highly configurable -bootstrapping there then we can just do so. Thats more a social than a -technical decision. I personally don't like if a design is 'nannying' and puts -too much constraints into unforeseen areas. If the computer can do some task -better than we, it shall do it. This still means that I want to stay very much -in control, it should only do some tedious, error-prone managing tasks for me. -For example the interfaces system already tracks inter-dependencies between -plugins and interfaces automatically, without the programmer needs to care or -define anything. The interface system gets it right and we wont need to care -for the order initialization. I added that because I consider such as -absolutely important for plugins which might be supplied by third parties where -we have no control over. But I now realized that we can nicely use that for our -own internal things too. Imo thats some very valuable service. - -- link:ct[] [[DateTime(2008-11-08T06:26:18Z)]] - -Some further minor details: We didn't finish the discussion about namespaces on -the last meeting. (I know I still have to write up a proposal showing the two -or three alternatives I see regarding namespace organisation). But probably, -"lumiera::" will be our top level interface namespace and then probably the -lumiera directory will be taken by that. I see no problem also putting some -startup facilities in there, but generally, it shouldn't contain implementation -code, only headers and abstract classes. If that's going to become a problem, -we should consider to use a separate package for the startup, e.g. "src/boot". - -Another point is, you need not write a main, because there is already one. -Please have a look at it, especially with regards to the -[wiki:self:../GlobalInitialization global initialisation]. Further, last year -I've investigated boost::program_options and think it's fine. I use it for my -test class runner since then. I don't think there is any reason why we should -bother with parsing options (most config is pulled up from the session). I -don't think we get much program options, maybe something to set a GUI skin. -Moreover, I've written last year a thin wrapper around the commandline and -integrated it with the boost options parser such that user code can receive the -remaining options as a vector of std::strings. Please have a look at -link:http://git.lumiera.org/gitweb?p=LUMIERA;a=blob;f=tests/common/mainsuite.cpp;h=455bfd98effd0b7dbe6597f712a1bdfa35232308;hb=80e1e382f42512ebf2e10a802f77e50327b8fb73[the test class runner main] -for an usage example. I really want our Lumiera main to be clean and expressive -in the way showed there. Probably the most important part of the startup is -pulling up the session core; because of that I think most of the startup -process falls into the realm of the Proc-Layer. Within Proc, I don't want any -significant string manipulations done with C-strings and I don't want raw -arrays when we can use std::vector. - -- link:Ichthyostega[] [[DateTime(2008-11-06T19:28:13Z)]] - -I 'dropped' this now because we do it somewhat differently now and I dont want -to document this here :P - -- link:ct[] [[DateTime(2009-02-03T17:28:28Z)]] - -'''' -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_dropped/ApplicationStructure.txt b/doc/devel/rfc_dropped/ApplicationStructure.txt new file mode 120000 index 000000000..0bba2a8db --- /dev/null +++ b/doc/devel/rfc_dropped/ApplicationStructure.txt @@ -0,0 +1 @@ +../rfc/ApplicationStructure.txt \ No newline at end of file diff --git a/doc/devel/rfc_dropped/MistakestoAvoid.txt b/doc/devel/rfc_dropped/MistakestoAvoid.txt deleted file mode 100644 index 4c671b8c8..000000000 --- a/doc/devel/rfc_dropped/MistakestoAvoid.txt +++ /dev/null @@ -1,427 +0,0 @@ -Design Process : Mistakes to avoid -================================== - -[grid="all"] -`------------`----------------------- -*State* _Dropped_ -*Date* _2008-04-21_ -*Proposed by* link:rick_777[] -------------------------------------- - - -Mistakes to avoid in the Lumiera design ---------------------------------------- - - -As a multimedia user and experienced programmer, I've found various flaws -present in open source Non Linear Video editors. Here I will list the problems -and their proposed (or mandatory) solutions. Please forgive me if some of the -ideas here have already been approved, I wrote this text before reaching this -wiki. - - -Description -~~~~~~~~~~~ - - -As a multimedia user and experienced programmer, I've found the following flaws -present in open source Non Linear Video editors (your mileage may vary) : - -. Frequent crashes (which most of the time make you lose your work) -. Reinventing the wheel for every new project -. Lack of a user-friendly (and extensible) UI -. Lack of support for certain video formats or codecs -. Lack of documentation -. Lack of cross-platform support -. Dependency on scripted languages like Python, which make installation a mess - -I will expand on the problems and their proposed (or mandatory) solutions. - - -1. Frequent crashes -~~~~~~~~~~~~~~~~~~~ - -[grid="all"] -`------------`------------------------------------------------------ -*Problem* Frequent Crashes and unsaved work. -*Severity* CRITICAL. -*Solution* Isolating the UI from the rendering and data handling (also - improves the extensibility) -*Required* Yes -*Workarounds* Auto-save (however it's not a real solution for the problem) --------------------------------------------------------------------- - -Working with multimedia (video / audio) editing is a magnet for segfaults -(crashes) due to the handling of pointers and compression algorithms. A bug in -a plugin (like in Audacity's low-pass filter) will crash and you suddenly -realize you lost your work - unless you have an auto-save feature, but that -doesn't go to the root of the problem. - -My proposal is to move the low-level handling of video to a separate process, -which then will do the processing - if it crashes, the UI will only report an -error with a dialog (i.e. "the process crashed. Try again?"), but you work will -stay safe. I'm not sure of the implementation difficulties that arise from -having a shared memory buffer for rendering / processing, but one thing is -certain: Whenever you move the cursor or rewind a part of a clip in your -resources, the application isn't supposed to crash. Just moving the cursor -isn't a time-critical task, so perhaps we can use temporary files for this. -It's safer if you're not doing the final rendering. - - -Comments -^^^^^^^^ - -I am not sure yet about separating things into processes, generally it is clear -that this would be more robust but there are some performance impacts and -programming problems (massisve amounts of data in shared memory). But most -importantly, when a subprocess gets a job and crashes on it, it won't complete -the job, we don't have a choice except gracefully abort it. From a user -perspective "It doesn't work!" there is no much difference to a complete crash. -Well and yes we aim to make it crash proof rather, crashes a bugs and have to -be fixed, point. - -Lumiera will never ever loose work, we don't plan to make a project file, -autosafe way. Lumiera will keep projects in an internal database like format -which consists of a Dumpfile and a contingous written logfile. After a -crash/powerdown whatever, this log just gets replayed. The advantages are -countless, imagine persistent, selective undo and so on. Any other format -(cinelerra2 XML, MXF, ...) will be realized by importer/exporter plugins. - -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] - - - -2. Reinventing the wheel for every new project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -[grid="all"] -`------------`------------------------------------------------------ -*Problem* Various projects compete and reinvent the wheel -*Severity* Serious (Slows down development time. A lot) -*Solution* Multi-tier design, turn the data handling into a backend and use - whatever UI you prefer -*Required* Yes. Better now that the project hasn't started ---------------------------------------------------------------------- - -Imagine the Linux kernel was tied to the window manager. You would have to -stick with KDE or GNOME and you couldn't improve it! Fortunately it's not like -that for Linux, but it is for some projects. If you want to change the user -interface from QT to wxWidgets or GTK you'll need to rewrite every module. - -If you separate the UI from the project handling engine, you can simply fork -the project and change the UI to one that supports skinning, without having to -do the complicated video-processing stuff. - -Separating the processes has an equivalent for web programming, it's called -"separation of concerns", or multi-tier design. When you suddenly change the -database engine, you don't need to change the whole program, just the database -module. Same goes for changing the UI from HTML to XML or Flash. If they're -separate modules that only communicate through a clearly defined API. - -Example case 1: The Code::Blocks IDE. The compiling engine supports various -compilers, and the engine itself is only a plugin for the main editor. If the -compiler crashes, you only get an error, but the IDE doesn't crash (unless it's -the UI part that's doing something wrong). - -Example case 2: Chessmaster. The user interface and speech synthesis stuff only -call the chess engine, called "theking.exe". Linux chess games also depend on -an engine to do the thinking. - -So I suggest to split the project into four separate tiers (not necessarily -processes): - -. User interface - communicates with the "project" tier, handles the user - events and does the calls. -. The project tier - the main part of the video editor. This one invokes the - renderer and decides which effects to apply, saving them as mere parameters - for later processing. It also tells you where the current pointer for the - track view is. Also calls the rendering engine for the current frame, or for - previews of a certain special effect. Note that if this process keeps running - even if the GUI crashes, later we can restart the GUI and keep working. -. The rendering engine - This one must be a separate process for the reasons - stated in problem #1. This also gives us the advantage that it can work on - the background while we keep working on the project (after all the project is - just a set of data stating which effects to apply to which tracks, and which - files are used for the tracks) - instead of just having a window saying - "Rendering, please wait". Even Adobe Premiere Pro suffered from this problem. - This means that if we put enough effort, we can surpass commercial software - in certain areas. Note that the rendering engine uses the same API than the - project tier, as it works on a copy of the project when doing the final - rendering. -. The video processing wrapper, which has interfaces for different video - processing toolkits (DirectX, GStreamer, etc). This also makes the project - cross-platform. Tiers 1 and 2 can go in one process, and the 3 and 4 in - another (this would make tier 2 a library which defines a C++ Class, and tier - 4 would also be a library which is used by the rendering engine). - -By separating the tiers, these can later become their own projects and overall -the community would receive great benefits. - - -Comments -^^^^^^^^ - -Please look at our design drafts, things will be separated (little different -than you describe here). We reuse things which are benefitful (gavl, ffmpeg, -..) but we are also aware that we reinvent the wheel for some things by -intention. Lumieras goal is not just to glue some existing libraries together -under a new gui, there are already a lot projects trying this way. We rather -aim for a ''Professional'' high performance Video editing solution which does -some things in a different (maybe more complex) way. We do not use existing -frameworks like MLT or gstreamer because we believe that these do not fit our -goals (gstreamer will be supported through plugins). We do not produce yet -another multimedia framework library (this only happen by coincidence) to be -used by others. - -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] - - -3. Lack of a user-friendly and extensible UI. -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -[grid="all"] -`------------`------------------------------------------------------ -*Problem* Often, editors provide a very poor and buggy interface. - Examples: Jahshaka doesn't even provide tooltips for the various tools, and - the documentation is poor; In Cinelerra I've noticed some bugs when using the - open dialog, I'd rather have the KDE one, thanks. -*Severity* From Annoying to Serious. -*Solution 1* Use a library that allows you to use different widget - libraries, like wxWidgets. -*Required* Recommended, but not obligatory. -*Solution 2* Write different user interfaces, but they'd be hard to maintain. -*Required*, No. ---------------------------------------------------------------------- - -This problem is complicated, we need a good framework for handling the tracks. -Perhaps this could become a separate project. Ideas are welcome. - - -Comments -^^^^^^^^ - -Joel started working on a GUI recently and making good progress. The UI should -finally be quite flexible as it mostly provides a skeletion where plugins -render to. We have quite a lot ideas about the UI and user input is welcome. -The UI is currently the most separate tier in the design, i'd like to make it a -plugin itself which is loaded when lumiera is started in a gui mode, but it is -to early to say how exactlly it will be integrated, except that we all agree -that GUI is optional and Lumiera can also run headless, script driven. - -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] - - -4. Lack of support for certain video formats or codecs -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -[grid="all"] -`------------`------------------------------------------------------ -*Problem* Lack of support for certain video formats or codecs. -*Severity* Critical. -*Workarounds* 1. Give a help page for the user to do his own conversion, but - this is very inelegant, annoying, and a waste of time. 2. Provide conversion - on the fly, and keep a separate "preprocessed" copy of the imported clip in a - separate directory. This is a nice middle ground, IMHO. -*Solution* Use a wrapper library as stated in problem # 2, having a - plugin-based design is recommended. -*Required* Yes. ---------------------------------------------------------------------- - -Some editors like Cinelerra are hardwired into using one format, or have a -phobia to certain formats / codecs (i.e. DivX AVI's). If we separate the -project editing engine from the video handling libraries, we can use unknown -formats by simply providing an input/output plugin. This would allows us to use -files encoded with lossless codecs like -http://lags.leetcode.net/codec.html[Lagarith]. This also provides forward -compatibility for future formats. - - -Comments -^^^^^^^^ - -Lumiera is a video editor we don't care (*cough*, not really true) about video -formats. Everything which comes In and goes Out is defined in plugins which -handle video formats. We currently decided to use 'gavl' because it is a nice -small library which does exactly what we want. Later on gstreamer and other -such kinds of decoder/encoder/processing-pipe libs will be realized. - -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] - - -5. Lack of documentation -~~~~~~~~~~~~~~~~~~~~~~~~ - -[grid="all"] -`------------`------------------------------------------------------ -*Problem* Some video editors have very poor documentation (and that's an - understatement *cough* Jahshaka *cough* ) -*Severity* Critical. -*Solution* Have a team for the documentation. -*Required* Yes. ---------------------------------------------------------------------- - -Nuff said. - - -Comments -^^^^^^^^ - -Quote from Ohloh.net: (http://www.ohloh.net/projects/lumiera)[] - ------------------------------------------------------------- -Extremely well-commented source code - -Lumiera is written mostly in C++. Across all C++ projects on Ohloh, 23% of all -source code lines are comments. For Lumiera, this figure is 46%. This very -impressive number of comments puts Lumiera among the best 10% of all C++ -projects on Ohloh. ------------------------------------------------------------- - - -Nuff saied... Oh well, about user docs we like to get that impressive ratings -there too, any helpers? - -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] - - -6. Lack of cross-platform support -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -[grid="all"] -`------------`------------------------------------------------------ -*Problem* Where's my Windows version? -*Severity* Blocker -*Solution* Use a cross-platform toolkit for the UI. -*Required* Depends, do you plan to make it Cross-Platform? --------------------------------------------------------------------- - -A good example for this is the Code::Blocks IDE, which was thought of being -cross-platform from the beginning. Curiously, at first the project was -Windows-only, and its only F/OSS alternative was Dev-C++ from Bloodshed (eew). -Otherwise you'd have to stick with proprietary applications like Visual C++. - -In Linux there were various IDE's, but they were Linux-only. Since Code::Blocks -uses a cross-platform toolkit (wxWidgets), it can be compiled either in Windows -and Linux. There are RPM's for various distros now that the first public -version (8.02) got out. I've heard that QT is also cross-platform, but I -haven't tried it yet. - -Of course - if you separate the UI from the project engine, someone could make -his own Windows UI for the project. Now what needs to be taken care of, is that -the rendering libraries are cross-platform too. - - -Comments -^^^^^^^^ - -We refuse to make it cross platform intentionally. Most things are written -portable, POSIX compatible, some might need platform specific fixes. But our -target is primary Linux (because thats what we use) secondary any other Free OS -(hopefully we find some testers/maintainers for that). Lumiera ''might'' run on -OSX and patches will be accepted, but it is not a free platform so we don't -care by ourself. Windows due its diffrent system interfaces will be hard to -port, if someone wants to do that, have fun, we will accept patches to, but we -do not support it in *any* way by ourself. - -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] - - -7. Dependency on scripted languages like Python, which make installation a mess -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -[grid="all"] -`------------`------------------------------------------------------ -*Problem* Installation can be a mess if we depend on scripted languages. -*Severity* Annoying, the end user might just conform with another project - that "just works". -*Solution* Make it in C++ or other easily-compilable language. -*Required* VERY recommended. ---------------------------------------------------------------------- - -I've had to install several packages for my distro (whose repository is not as -large as others like Ubuntu's) from source. Some of them depend on very -esoteric scripted languages which I also need to install. And then the -libraries, etc. My suggestion is to free the end user from this burden, and -work on a common language, like C++. - - -Comments -^^^^^^^^ - -At some point a scripting language ''will'' be required, yet to drive the -testsuite, make headless rendering work and so on. We need to provide -installation instructions and/or even bundle this language with Lumiera. This -will likely become a small embedded language like Lua or some kind of forth (or -maybe some scheme?) it should not depend on strange modules which are not part -of the core scripting language distribution (or we shall provide them too), -needs to be worked out. - -- link:ct[] [[DateTime(2008-04-21T11:27:23Z)]] - - - -Author's comments -^^^^^^^^^^^^^^^^^ - -Some of the measures stated in this document are optional, but separating the -processes for the rendering engine, editor and User Interface are the optimal -solution and required to avoid common problems. - - -Discussion ----------- - -Mostly we agree with the general statements in this Design Entry. But there are -some points which don't stand the test of a detailed technical discussion. For -example, you simply can't state it's a 'mistake' not to write code which -similarly runs on windows and *nix. Well. You could try to write it in Java. -See my point? While today it's quite feasible to write office stuff or banking -applications in a cross-platform manner, a video editor still is a different -kind of a beast. - -A similar argumentation holds true for the question, wether or not to use -separate processes and IPC. While it certainly is a good idea to have the X -server or a database running in a separate process, the situation is really -quite different for editing video. Hopefully it's clear why. - -Could you please rework this Design Entry in a way that we can finalize -(accept) it? - -* Please remove the section about windows -* Please separate out things needing technical discussion and are not just - "mistakes", thus retaining only the big picture statements (on which we all - agree) -* How to secure the application against crashes -* If it is viable/desirable to run the gui in a separate process really needs - in-depth technical discussion (create a new Design Entry for this) -* How to deal with the dependencies problem in combination with - plugins/extensions and script languages - -- link:Ichthyostega[] [[DateTime(2008-10-05T01:51:50Z)]] - - -Conclusion ----------- -The October.2008 dev meeting decided to 'drop' this design proposal as is. - -Basically, this text just tells us "to make Lumiera good", and especially it -contains a mixture of topics - -* We fully agree to 80% of the statements made there, but we think those - statements are so very basic and self-evident as to be considered off-topic - here. We are aware of the recurring problems with open source video editing. - That's why we are here. -* The proposal draws conclusions on two technically substantial points, at - which we don't agree. And it fails to provide sufficient (technically sound) - arguments to prove these statements. - -While it is certainly 'desirable' to be cross-platform as much as possible and -especially '''target Microsoft Windows''', we don't see much possibilities with -today's mainstream technology to build an application which is as -technologically demanding as a video editor is. We would end up developing two -or even three sister applications, or we are forced to sacrifice performance -for portability. When put up to face such options, we have a clear preference -to concentrate on a really free and open platform. - -While it is certainly 'desirable' to make the application as robust as -possible, we don't see how '''using multiple separate processes''' could help -us with this goal ''without creating major scalability or performance -problems'' due to the use of shared memory. And, yet more important: we don't -share the basic assumption made in the proposal, namely that video processing -is inherently dangerous. We think the basic algorithms involved are -sufficiently well-known and understandable to implement them in a sound manner. - -'''' -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_dropped/MistakestoAvoid.txt b/doc/devel/rfc_dropped/MistakestoAvoid.txt new file mode 120000 index 000000000..5931c6693 --- /dev/null +++ b/doc/devel/rfc_dropped/MistakestoAvoid.txt @@ -0,0 +1 @@ +../rfc/MistakestoAvoid.txt \ No newline at end of file diff --git a/doc/devel/rfc_dropped/OfficialAssemblyLanguage.txt b/doc/devel/rfc_dropped/OfficialAssemblyLanguage.txt deleted file mode 100644 index a60444951..000000000 --- a/doc/devel/rfc_dropped/OfficialAssemblyLanguage.txt +++ /dev/null @@ -1,161 +0,0 @@ -Design Process : Official Assembly Language -=========================================== - -[grid="all"] -`------------`----------------------- -*State* _Dropped_ -*Date* _2008-08-01_ -*Proposed by* link:PercivalTiglao[] -------------------------------------- - - -Official Assembly Language --------------------------- - -I describe here an optimization that might have to be be taken into account at -the design level. At very least, we should design our code with -auto-vectorization in mind. At the most, we can choose to manually write parts -of our code in assembly language and manually vectorize it using x86 SSE -Instructions or !PowerPC !AltiVec instructions. By keeping these instructions -in mind, we can easily achieve a large increase in speed. - - -Description -~~~~~~~~~~~ - -While the C / C++ core should be designed efficiently and as portable as -possible, nominating an official assembly language or an official platform can -create new routes for optimization. For example, the x86 SSE instruction set -can add / subtract 16 bytes in parallel (interpreted as 8-bit, 16-bit, 32-bit, -or 64-bit integers, or 32-bit/64-bit floats), with some instructions supporting -masks, blending, dot products, and other various instructions specifically -designed for media processing. While the specific assembly level optimizations -should be ignored for now, structuring our code in such a way to encourage a -style of programming suitable for SSE Optimization would make Lumiera -significantly faster in the long run. At very least, we should structure our -innermost loop in such a way that it is suitable for gcc's auto-vectorization. - -The problem is that we will be splitting up our code. Bugs may appear on some -platforms where assembly-specific commands are, or perhaps the C/C++ code would -have bugs that the assembly code does not. We will be maintaining one more -codebase for the same set of code. Remember though, we don't have to do -assembly language now, we just leave enough room in the design to add -assembly-level libraries somewhere in our code. - - -Tasks -~~~~~ - -* Choose an "Official" assembly language / platform. -* Review the SIMD instructions avaliable for that assembly language. -* For example, the Pentium 2 supports MMX instructions. Pentium 3 supports MMX - and SSE Instructions. Early Pentium4s support MMX, SSE, and SSE2 - instructions. Core Duo supports upto SSE4 instructions. AMD announced SSE5 - instructions to come in 2009. -* Consider SIMD instructions while designing the Render Nodes and Effects - architecture. -* Write the whole application in C/C++ / Lua while leaving sections to optimize - in assembly later. (Probably simple tasks or a library written in C) -* Rewrite these sections in Assembly using only instructions we agreed upon. - - -Pros -~~~~ - -Assuming we go all the way with an official assembly language / platform... - -* Significantly faster render and previews. (Even when using a high-level - library like http://www.pixelglow.com/macstl/valarray/[macstl valarray], we - can get 3.6x -- 16.2x the speed in our inner loop. We can probably expect - greater if we hand-optimize the assembly) - - -Cons -~~~~ - -* Earlier architectures of that family will be significantly slower or - unsupported -* Other architectures will rely on C / C++ port instead of optimized assembly -* Redundant Code - - -Alternatives -^^^^^^^^^^^^ - -* We only consider auto-vectorization -- GCC is attempting to convert trivial - loops into common SSE patterns. Newer or Higher level instructions may not be - supported by GCC. This is turned on - http://gcc.gnu.org/projects/tree-ssa/vectorization.html[in GCC4.3 with - specific compiler flags] -* We can consider assembly but we don't officially support it -- We leave the - holes there for people to patch up later. Unofficial ports may come up, and - maybe a few years down the line we can reconsider assembly and start to - reimplement it down the road. -* Find a SIMD library for C/C++ -- Intel's ICC and - http://gcc.gnu.org/onlinedocs/gcc-3.4.6/gcc/Vector-Extensions.html[GCC] both - have non-standard extensions to C that roughly translate to these - instructions. There is also the - http://www.pixelglow.com/macstl/valarray/[macstl valarray library] mentioned - earlier. Depending on the library, the extensions can be platform specific. -* Write in a language suitable for auto-vectorization -- Maybe there exists - some vector-based languages? Fortran might be one, but I don't really know. - - -Rationale -~~~~~~~~~ - -I think this is one of those few cases where the design can evolve in a way -that makes this kind of optimization impossible. As long as we try to keep this -optimization avaliable in the future, then we should be good. - - -Comments --------- - -* I have to admit that I don't know too much about SSE instructions aside from - the fact that they can operate on 128-bits at once in parallel and there are - some cache tricks involved when using them. (you can move data in from memory - without bringing in the whole cache line). Nonetheless, keeping these - assembly level instructions in mind will ease optimization of this Video - Editor. Some of the instructions are high-level enough that they may effect - design decisions. Considering them now while we are still in early stages of - development might prove to be advantagous. Optimize early? Definitely not. - However, if we don't consider this means of optimization, we may design - ourselves into a situation where this kind of optimization becomes - impossible. - -* I don't think we should change any major design decisions to allow for - vectorization. At most, we design a utility library that can be easily - optimized using SIMD instructions. Render Nodes and Effects can use this - library. When this library is optimized, then all Render Nodes and Effects - can be optimized as well. -- link:PercivalTiglao[] - [[DateTime(2008-08-01T16:12:11Z)]] - -* Uhm, the Lumiera core (backend, proc, gui) doesn't do any numbercrunching. - This is all delegated to plugins (libgavl, effects, encoders). I think we - don't need any highly assembler/vector optimized code in the core (well, lets - see). This plugins and libraries are somewhat out of our scope and thats good - so, the people working on it know better than we how to optimize this stuff. - It might be even worthwile to try if when we leave all vectorization out, if - then the plugins can use the vector registers better and we gain overall - performance! - -- link:ct[] [[DateTime(2008-08-03T02:27:14Z)]] - -* Another idea about a probably worthwhile optimization: gcc can instumentate - code for profileing and then do arc profileing and build it a second time - with feedback what it learnd from the profile runs, this mostly affects - branch prediction and can give a reasonable performance boost. If somone - likes challenges, prepare the build system to do this: -. build it with -fprofile-arcs -. profile it by running ''carefully'' selected benchmarks and tests. -. rebuild it again this time with -fbranch-probabilities -. PROFIT - -- link:ct[] [[DateTime(2008-08-03T02:27:14Z)]] - -* I've discussed general ideas around, and I agree now that "core Lumiera" is - not the place to think of these kinds of optimizations. So I'll just move - this over to dropped. -- link:PercivalTiglao[] - [[DateTime(2008-08-04T18:33:58Z)]] - -'''' -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_dropped/OfficialAssemblyLanguage.txt b/doc/devel/rfc_dropped/OfficialAssemblyLanguage.txt new file mode 120000 index 000000000..75b5afb1d --- /dev/null +++ b/doc/devel/rfc_dropped/OfficialAssemblyLanguage.txt @@ -0,0 +1 @@ +../rfc/OfficialAssemblyLanguage.txt \ No newline at end of file diff --git a/doc/devel/rfc_dropped/TagCloudsOnResources.txt b/doc/devel/rfc_dropped/TagCloudsOnResources.txt deleted file mode 100644 index 2c36d76dc..000000000 --- a/doc/devel/rfc_dropped/TagCloudsOnResources.txt +++ /dev/null @@ -1,81 +0,0 @@ -Design Process : Tag Clouds for Resources -========================================= - -[grid="all"] -`------------`----------------------- -*State* _Dropped_ -*Date* _2008-07-15_ -*Proposed by* link:PercivalTiglao[] -------------------------------------- - - -Tag Clouds for Resources ------------------------- - -Perhaps a Cloud of tags is unnecessary, but tagging resources similar to -Youtube or like Tag Clouds allows for efficient searching and filtering. Anyone -who uses the web would know how to use them. If a "Cloud of Tags" approach is -used, then organizing the tags by some sort of frequency would be useful. IE: -the more a specific tag is used, the larger it gets, or perhaps the more often -that tag is searched on. - - -Description -~~~~~~~~~~~ - - - -Tasks -~~~~~ - - -Pros -~~~~ - -* Simple GUI Concept -* Eases management of resources with Search -* Orthogonal to other resource management schemes like Folders - - -Cons -~~~~ - - -Alternatives -~~~~~~~~~~~~ - - - -Rationale -~~~~~~~~~ - - - - - -Comments --------- - -* Note: I was inspired with this idea during an email conversation with - Rick777. -- link:PercivalTiglao[] [[DateTime(2008-07-17T14:29:57Z)]] - -* Agreed, this is usefull. Also, more advanced config rules can make use of - such tags and wiring can depend on them, for example to route your dialogue - audio to another global bus than the music or ambiance. - -- link:Ichthyostega[] [[DateTime(2008-07-27T22:23:38Z)]] - - - -Conclusion ----------- - -This Design Proposal is 'superseded' by a much more advanced proposal: -link:DelectusShotEvaluator[Delectus] - -(Dropping it doesn't mean disapproval) - - -'''' - - -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_dropped/TagCloudsOnResources.txt b/doc/devel/rfc_dropped/TagCloudsOnResources.txt new file mode 120000 index 000000000..c3d9d0035 --- /dev/null +++ b/doc/devel/rfc_dropped/TagCloudsOnResources.txt @@ -0,0 +1 @@ +../rfc/TagCloudsOnResources.txt \ No newline at end of file diff --git a/doc/devel/rfc_dropped/UnitTests_Python.txt b/doc/devel/rfc_dropped/UnitTests_Python.txt deleted file mode 100644 index 9d71205e6..000000000 --- a/doc/devel/rfc_dropped/UnitTests_Python.txt +++ /dev/null @@ -1,82 +0,0 @@ -Design Process : Unit Tests Python -================================== - - -[grid="all"] -`------------`----------------------- -*State* _Dropped_ -*Date* _2007-06-17_ -*Proposed by* link:Ichthyostega[] -------------------------------------- - - -UnitTests in Python -------------------- - -Use the Python scripting language for the actual Unit Tests and access the -Cinelerra Code via SWIG - - -Description -~~~~~~~~~~~ - -Define Test classes in Python, using e.g. the link:PyUnit[] framework of the -Python Standard lib. The SWIG compiler can generate wrapper code automatically, -so we can access the C++ Classes and Facilities of Cinelerra as Python Modules -and Classes. The Classes to be tested in Cinelerra need to provide some -Interface for carrying out these tests (and this is one of the main benefits of -the whole Test driven aproach). - - -Tasks -~~~~~ - - * Find out how the SWIG generated wrappers play together with Python's List - and Map types. Without the ability to use the latter in the tests, this - whole proposal is rather pointless. - * Think on how we can test video data processing (at least in its basics, e.g. - does additive overlay work) - - -Pros -~~~~ - -Programming Unit and Self tests in a Scripting language facillates this task. -The X-Language bindings are quite usable today. As a side effect, it helps to -get a clean program structure, because the tests need some Interface and/or -some object factories to create the test candidates. Python is proposed, -because it is fairly mainstream, has a flat learning curve and but is -moderately modern and functional-style at the same time. - -Cons -~~~~ - - * Adds to the complexity - * Some old-style hackers have a quite distinct aversion against Python - - -Alternatives -~~~~~~~~~~~~ - -Rationale -~~~~~~~~~ - -Why am I proposing this? Out of lazyness. Python is there, many devs (on linux) -have some Python skills, SWIG is not overly complicated to use. - -And last but not least: just to get the discussion going... ;-) - - -Comments --------- - -* I'd rather consider to use some embedded language in cinelerra which we can - use to drive tests, should be something smaller and more sane than python. - Needs certainly more discussion. For simple unit tests some C/C++ harness and - bit shell scripting would suffice, I really want to integrate this with - link:NoBug[]. - -- link:ct[] [[DateTime(2007-06-17T17:32:27Z)]] - - -'''' -Back to link:../DesignProcess[] diff --git a/doc/devel/rfc_dropped/UnitTests_Python.txt b/doc/devel/rfc_dropped/UnitTests_Python.txt new file mode 120000 index 000000000..8e20f915a --- /dev/null +++ b/doc/devel/rfc_dropped/UnitTests_Python.txt @@ -0,0 +1 @@ +../rfc/UnitTests_Python.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/AllPluginInterfacesAreC.txt b/doc/devel/rfc_final/AllPluginInterfacesAreC.txt new file mode 120000 index 000000000..e7c654b8f --- /dev/null +++ b/doc/devel/rfc_final/AllPluginInterfacesAreC.txt @@ -0,0 +1 @@ +../rfc/AllPluginInterfacesAreC.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/ArchitectureOverview.txt b/doc/devel/rfc_final/ArchitectureOverview.txt new file mode 120000 index 000000000..381b9c79f --- /dev/null +++ b/doc/devel/rfc_final/ArchitectureOverview.txt @@ -0,0 +1 @@ +../rfc/ArchitectureOverview.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/CCodingStyleGuide.txt b/doc/devel/rfc_final/CCodingStyleGuide.txt new file mode 120000 index 000000000..17dc2f359 --- /dev/null +++ b/doc/devel/rfc_final/CCodingStyleGuide.txt @@ -0,0 +1 @@ +../rfc/CCodingStyleGuide.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/CodingStyle.txt b/doc/devel/rfc_final/CodingStyle.txt new file mode 120000 index 000000000..3542b0cd7 --- /dev/null +++ b/doc/devel/rfc_final/CodingStyle.txt @@ -0,0 +1 @@ +../rfc/CodingStyle.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/DIR_INFO b/doc/devel/rfc_final/DIR_INFO new file mode 100644 index 000000000..393ae51b8 --- /dev/null +++ b/doc/devel/rfc_final/DIR_INFO @@ -0,0 +1 @@ +accepted design proposals diff --git a/doc/devel/rfc_final/DataBackend.txt b/doc/devel/rfc_final/DataBackend.txt new file mode 120000 index 000000000..72d42844e --- /dev/null +++ b/doc/devel/rfc_final/DataBackend.txt @@ -0,0 +1 @@ +../rfc/DataBackend.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/DevelopmentFramework.txt b/doc/devel/rfc_final/DevelopmentFramework.txt new file mode 120000 index 000000000..0f6ff716e --- /dev/null +++ b/doc/devel/rfc_final/DevelopmentFramework.txt @@ -0,0 +1 @@ +../rfc/DevelopmentFramework.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/DistributedDevelopmentFramework.txt b/doc/devel/rfc_final/DistributedDevelopmentFramework.txt new file mode 120000 index 000000000..ea5c1dcf9 --- /dev/null +++ b/doc/devel/rfc_final/DistributedDevelopmentFramework.txt @@ -0,0 +1 @@ +../rfc/DistributedDevelopmentFramework.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/EDLsAreMetaClips.txt b/doc/devel/rfc_final/EDLsAreMetaClips.txt new file mode 120000 index 000000000..3a2ad62ed --- /dev/null +++ b/doc/devel/rfc_final/EDLsAreMetaClips.txt @@ -0,0 +1 @@ +../rfc/EDLsAreMetaClips.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/GitCommitMessageFormat.txt b/doc/devel/rfc_final/GitCommitMessageFormat.txt new file mode 120000 index 000000000..1de41b3f6 --- /dev/null +++ b/doc/devel/rfc_final/GitCommitMessageFormat.txt @@ -0,0 +1 @@ +../rfc/GitCommitMessageFormat.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/GloballInitialization.txt b/doc/devel/rfc_final/GloballInitialization.txt new file mode 120000 index 000000000..cca5f8b26 --- /dev/null +++ b/doc/devel/rfc_final/GloballInitialization.txt @@ -0,0 +1 @@ +../rfc/GloballInitialization.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/HowToProceed.txt b/doc/devel/rfc_final/HowToProceed.txt new file mode 120000 index 000000000..d282ce97b --- /dev/null +++ b/doc/devel/rfc_final/HowToProceed.txt @@ -0,0 +1 @@ +../rfc/HowToProceed.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/InterfaceNamespaces.txt b/doc/devel/rfc_final/InterfaceNamespaces.txt new file mode 120000 index 000000000..fe26b9f57 --- /dev/null +++ b/doc/devel/rfc_final/InterfaceNamespaces.txt @@ -0,0 +1 @@ +../rfc/InterfaceNamespaces.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/LumieraDesignProcess.txt b/doc/devel/rfc_final/LumieraDesignProcess.txt new file mode 120000 index 000000000..d17096282 --- /dev/null +++ b/doc/devel/rfc_final/LumieraDesignProcess.txt @@ -0,0 +1 @@ +../rfc/LumieraDesignProcess.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/LumieraForwardIterator.txt b/doc/devel/rfc_final/LumieraForwardIterator.txt new file mode 120000 index 000000000..ea1de15fe --- /dev/null +++ b/doc/devel/rfc_final/LumieraForwardIterator.txt @@ -0,0 +1 @@ +../rfc/LumieraForwardIterator.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/MakeSconsTheOfficialBuildSystem.txt b/doc/devel/rfc_final/MakeSconsTheOfficialBuildSystem.txt new file mode 120000 index 000000000..0f912dab7 --- /dev/null +++ b/doc/devel/rfc_final/MakeSconsTheOfficialBuildSystem.txt @@ -0,0 +1 @@ +../rfc/MakeSconsTheOfficialBuildSystem.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/Manifest.txt b/doc/devel/rfc_final/Manifest.txt new file mode 120000 index 000000000..79f57e208 --- /dev/null +++ b/doc/devel/rfc_final/Manifest.txt @@ -0,0 +1 @@ +../rfc/Manifest.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/MarbleMode.txt b/doc/devel/rfc_final/MarbleMode.txt new file mode 120000 index 000000000..e0ae6aa60 --- /dev/null +++ b/doc/devel/rfc_final/MarbleMode.txt @@ -0,0 +1 @@ +../rfc/MarbleMode.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/MasterRepositorySetup.txt b/doc/devel/rfc_final/MasterRepositorySetup.txt new file mode 120000 index 000000000..62979ff09 --- /dev/null +++ b/doc/devel/rfc_final/MasterRepositorySetup.txt @@ -0,0 +1 @@ +../rfc/MasterRepositorySetup.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/MonthlyDeveloperMeetingOnIRC.txt b/doc/devel/rfc_final/MonthlyDeveloperMeetingOnIRC.txt new file mode 120000 index 000000000..40a5b74c4 --- /dev/null +++ b/doc/devel/rfc_final/MonthlyDeveloperMeetingOnIRC.txt @@ -0,0 +1 @@ +../rfc/MonthlyDeveloperMeetingOnIRC.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/NoBugFlags.txt b/doc/devel/rfc_final/NoBugFlags.txt new file mode 120000 index 000000000..e1c41a2b5 --- /dev/null +++ b/doc/devel/rfc_final/NoBugFlags.txt @@ -0,0 +1 @@ +../rfc/NoBugFlags.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/ProcBuilder.txt b/doc/devel/rfc_final/ProcBuilder.txt new file mode 120000 index 000000000..05c489045 --- /dev/null +++ b/doc/devel/rfc_final/ProcBuilder.txt @@ -0,0 +1 @@ +../rfc/ProcBuilder.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/ProcHighLevelModel.txt b/doc/devel/rfc_final/ProcHighLevelModel.txt new file mode 120000 index 000000000..5b8ffe111 --- /dev/null +++ b/doc/devel/rfc_final/ProcHighLevelModel.txt @@ -0,0 +1 @@ +../rfc/ProcHighLevelModel.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/ProcPlacementMetaphor.txt b/doc/devel/rfc_final/ProcPlacementMetaphor.txt new file mode 120000 index 000000000..0c27e6afc --- /dev/null +++ b/doc/devel/rfc_final/ProcPlacementMetaphor.txt @@ -0,0 +1 @@ +../rfc/ProcPlacementMetaphor.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/RepositorySetup.txt b/doc/devel/rfc_final/RepositorySetup.txt new file mode 120000 index 000000000..beb22c4bc --- /dev/null +++ b/doc/devel/rfc_final/RepositorySetup.txt @@ -0,0 +1 @@ +../rfc/RepositorySetup.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/Roadmap-first.txt b/doc/devel/rfc_final/Roadmap-first.txt new file mode 120000 index 000000000..eb67764c2 --- /dev/null +++ b/doc/devel/rfc_final/Roadmap-first.txt @@ -0,0 +1 @@ +../rfc/Roadmap-first.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/ScriptingLanguage.txt b/doc/devel/rfc_final/ScriptingLanguage.txt new file mode 120000 index 000000000..085085137 --- /dev/null +++ b/doc/devel/rfc_final/ScriptingLanguage.txt @@ -0,0 +1 @@ +../rfc/ScriptingLanguage.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/ThreadsSignalsAndImportantManagementTasks.txt b/doc/devel/rfc_final/ThreadsSignalsAndImportantManagementTasks.txt new file mode 120000 index 000000000..239615881 --- /dev/null +++ b/doc/devel/rfc_final/ThreadsSignalsAndImportantManagementTasks.txt @@ -0,0 +1 @@ +../rfc/ThreadsSignalsAndImportantManagementTasks.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/TimeHandling.txt b/doc/devel/rfc_final/TimeHandling.txt new file mode 120000 index 000000000..b0bbe82a4 --- /dev/null +++ b/doc/devel/rfc_final/TimeHandling.txt @@ -0,0 +1 @@ +../rfc/TimeHandling.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/TimelineSequenceOutput.txt b/doc/devel/rfc_final/TimelineSequenceOutput.txt new file mode 120000 index 000000000..c16a20c51 --- /dev/null +++ b/doc/devel/rfc_final/TimelineSequenceOutput.txt @@ -0,0 +1 @@ +../rfc/TimelineSequenceOutput.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/VersionNumberScheme.txt b/doc/devel/rfc_final/VersionNumberScheme.txt new file mode 120000 index 000000000..40cf75236 --- /dev/null +++ b/doc/devel/rfc_final/VersionNumberScheme.txt @@ -0,0 +1 @@ +../rfc/VersionNumberScheme.txt \ No newline at end of file diff --git a/doc/devel/rfc_final/index.txt b/doc/devel/rfc_final/index.txt new file mode 100644 index 000000000..a29cff26d --- /dev/null +++ b/doc/devel/rfc_final/index.txt @@ -0,0 +1,13 @@ +Accepted Design Proposals +========================= + +//Menu: label accepted +//Menu: sort children + +-> read link:../rfc.html[more about Lumiera RfC] and the Design Process + +The RfC entries listed here where discussed, modified and finally agreed upon +during some developers meeting in the past. + +So they represent design decisions taken and can be considered binding. + diff --git a/doc/devel/rfc_parked/ClipCatalogingSystem.txt b/doc/devel/rfc_parked/ClipCatalogingSystem.txt deleted file mode 100644 index 8149c23ac..000000000 --- a/doc/devel/rfc_parked/ClipCatalogingSystem.txt +++ /dev/null @@ -1,128 +0,0 @@ -Design Process : Clip Cataloging System -======================================= - -[grid="all"] -`------------`----------------------- -*State* _Parked_ -*Date* _2008-07-26_ -*Proposed by* link:JordanN[] -------------------------------------- - -Clip Cataloging System ------------------------ - -A system for storing, organizing, and retrieving assets, such as images and -videos. - - -Description -~~~~~~~~~~~ - -Organizations that work with video, and even home users, tend to have massive -collections of stock videos and images that they will need to find and use in -their projects. A Linux-based system is needed to help them to organize, tag, -and retrieve assets from those collections. Being able to find the clips the -user needs and bring them into his timeline, will mean that the user will be -able to more rapidly complete his project. - -This could be implemented as a separate application, but integrated for use in -a Linux-based video workflow suite, including apps like Lumiera and Blender. - - -Tasks -~~~~~ - -* Identify ways in which existing groups organize their collections. -* Determine pros / cons of each method -* Implement a solution that will be modular enough for other content creation - projects to also use - - -Pros -~~~~ - -* Faster, more efficient workflow - - -Cons -~~~~ -Not directly a part of Lumiera. If not implemented separately, could cause -undue bloat. - - -Alternatives -~~~~~~~~~~~~ -Storage-based organization. User must remember where files are, and must store -them correctly. Not clip-based, so the entire video must be imported and the -desired portion selected. - - -Rationale -~~~~~~~~~ - - -Comments --------- - -* Such is planned, but as you pointed out, this would be a rather standalone - application which needs a lot of efforts to be implemented. We don't have the - development power to do that now. If someone wants to work on that, please - contact me. General idea is to put all kinds of resources (Footage, Clips, - Effects, Subprojects, Sounds ....) into a database with then gets - tagged/attributed in different ways (implicit things like 'filename', 'type', - 'length'; automatic deduceable things like 'Exposure', 'Timecode', ...; And - manual tags like: who was on set, location, ....). Then present this all in a - *good* GUI (by default just showing filesysten like) but one can define - queries on this database and the generated views will then be storeable. -Back to Lumiera, for now we will likely just use 'normal' file open dialogs -until the above system becomes available. - -- link:ct[] [[DateTime(2008-07-26T08:31:42Z)]] -* Yes, it's indeed an important feature we should care for. But cehteh is - right, we have more important things to do first. But feel free to target it. -* Also, we'd need integration with production support systems, for example - http://celtx.com/[CELTX]. -* The interface to the Lumiera App would be to populate the asset manager with - the required assets - -- link:Ichthyostega[] [[DateTime(2008-07-27T22:19:38Z)]] - - -Videos, Audio, Clips and Resources Manager by using plugins for FOSS GPL -"Library & Collections Management" programs. -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The video and audio raw material, clips, etc could be managed using code that -is already available in project that carry out the same tasks. For example as -library managers, or media (video, audio or CD) collections, Integrated -Library Systems (ILS). - -Examples of a library management program ; - -. Kete - http://kete.net.nz/[] -. Koha - http://www.koha.org/[] -. link:GreenStone[] - http://www.greenstone.org/[] -. Evergreen - http://open-ils.org/faq.php[] - -An additional benefit to using "library" managers, is that it can handle -interloans, referencing of "other" (people's/organization's) libraries, -numbering systems, descriptions, and classifications, thousands to millions of -items, search systems, review and comment systems, plus the benefits of open -source that allow the expansion of features easily. The use of task oriented -programs in this way, makes use of established code, that has been developed by -experts in their field. Any database system would be useful for managing all -these media. But one that has been developed by the people that have been -working with cataloging systems for a long time is likely to do well. Plus it -can be readily improved, by people who do not have to know the first thing -about how to design video editing programs. The program also gets improved -because of it own community, which adds features or performance to Lumiera, -without even having to "drive" the development.. ---link:Tree[][[DateTime(2008-08-27T20:38:00NZ)]]. - -'''' - -Parked until someone cares -~~~~~~~~~~~~~~~~~~~~~~~~~~ -Decided on Developer meeting - - Do 14 Apr 2011 02:52:30 CEST Christian Thaeter - - -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_parked/ClipCatalogingSystem.txt b/doc/devel/rfc_parked/ClipCatalogingSystem.txt new file mode 120000 index 000000000..1f0872d0b --- /dev/null +++ b/doc/devel/rfc_parked/ClipCatalogingSystem.txt @@ -0,0 +1 @@ +../rfc/ClipCatalogingSystem.txt \ No newline at end of file diff --git a/doc/devel/rfc_parked/DelectusShotEvaluator.txt b/doc/devel/rfc_parked/DelectusShotEvaluator.txt deleted file mode 100644 index 0d5b55a88..000000000 --- a/doc/devel/rfc_parked/DelectusShotEvaluator.txt +++ /dev/null @@ -1,397 +0,0 @@ -[grid="all"] -`------------`----------------------- -*State* _Parked_ -*Date* _2008-09-21_ -*Proposed by* link:nasa[] -------------------------------------- - - -Delectus Shot Evaluator ------------------------ -This is a brain dump about the shot evaluator subproject. - - -Description -~~~~~~~~~~~ - - -Brainstorm on Delectus -~~~~~~~~~~~~~~~~~~~~~~ -Some (many) of the ideas presented herein come from the various parties -involved in the Lumiera discussion list and IRC channel #lumiera. -http://lists.lumiera.org/pipermail/lumiera/2008-September/000053.html[] -- the -main discussion thread - -Additionally, a lot of great concepts for how to streamline the interface are -derived in part from link:KPhotoAlbum[]. - -I use tags, keywords, and metadata almost interchangeably, with the exception -that metadata includes computer generated metadata as well. These are not tags -in the conventional sense -- they don't have to be text. In fact the planned -support (please add more!) is: - - * Text -- both simple strings (tags) and blocks - * Audio -- on the fly (recorded from the application) or pregenerated - * Video -- same as audio - * Link -- back to a Celtx or other document resource, forward to a final cut, - URL, etc - * Still image -- inspiration image, on set details, etc - * ID -- such as the serial number of a camera used, the ISBN of a book to be - cited, etc - -As such, the tags themselves can have metadata. You can see where this is -going... - -Also, the tags are applied to "clips" -- which I use interchangeably between -source material imported into the application and slice of that material that -tags are applied to. Any section of a video or audio source can have tags -applied to it. - - -Two key functions: assign metadata and filter by metadata. - -clips are one thing; but in reality most clips are much longer than their -interesting parts. Especially for raw footage, the interesting sections of a -clip can be very slim compared to the total footage. Here is a typical workflow -for selecting footage: - -. Import footage. -. Remove all footage that is technically too flawed to be useful. -. Mark interesting sections of existing clips, possibly grouped into different - sections. -. Mark all other footage as uninteresting. -. Repeat 3-4 as many times as desired. - -Some key points: - - * Import and export should be as painless and fast as possible. - * Technically flawed footage can be both manual and computer classified. - * In some cases (e.g. documentaries, dialog) audio and video clips/footage can - follow different section processes. - It is possible to use video from footage with useless audio or use audio - from footage with useless video. - * "Interesting" is designed to be broad and is explained below. - * steps 2-5 can be performed in parallel by numerous people and can span many - different individual clips. - -In simple editors like Kino or iMovie, the fundamental unit used to edit video -is the clip. This is great for a large number of uses, such as home videos or -quick Youtube postings, but it quickly limits the expressive power of more -experienced engineers in large scale productions (which are defined for the -purposes of this document to include more than 2 post-production crew members). -The clip in those editors is trimmed down to include only the desired footage, -and these segments are coalesced together into some sort of coherent mess. - -The key to adequate expressive power is as follows: - - * Well designed, fast metadata entry. Any data that can be included should by - default, and ideally the metadata entry process should run no less than - about 75% as fast as simple raw footage viewing. Powerful group commands - that act on sections of clips and also grouping commands that recognize the - differences between takes and angles (or individual mics) enhance and speed - up the process. - * Good tools to classify the metadata into categories that are actually - useful. Much of the metadata associated with a clip is not actively used in - any part of the footage generation. - * Merging and splicing capabilities. The application should be smart enough to - fill in audio if the existing source is missing. For example, in a recent - project I was working on a camera op accidently set the shotgun mike to test - mode, ruining about 10% of the audio for the gig. I was running sound, and - luckily I had a backup copy of the main audio being recorded. This - application should, when told that these two are of the same event at the - same time, seamlessly overlay the backup audio over the section of the old - audio that has been marked bad and not even play the bad audio. This is just - background noise, and streamlining the immense task of sorting through - footage needs to be simplified as much as possible. - * Connection to on site documentation and pre-production documentation. When - making decisions about what material to use and how to classify it, it is - essential to use any tools and resources available. The two most useful are - onsite documentation (what worked/didn't work, how the weather was, pictures - of the setup, etc all at the shoot) and pre-production (what the ideal scene - would be, what is intended, etc). Anything else that would be useful should - be supported as well. - * Be easily accessible when making the final cut. Lumiera is, if the - application gets up to speed, going to serve primarily to render effects, - finalize the cut, and fine tune what material best fits together. Any - metadata, and certainly any clipping decisions, should be very visible in - Lumiera. - * Notes, notes, notes! The application should support full multimedia notes. - These differ from (4) in that they are generated during the CLASSIFICATION - process, not before. This fits in with (5) as well -- Lumiera should display - these notes prominently on clip previews. The main way for multiple parties - to communicate and even for a single person to stay organized is to add in - notes about tough decisions made and rationale, questionable sections, etc. - These notes can be video, audio, text, etc from one of the clips, from the - machine used to edit (such as using a webcam or microphone), or over the - network (other people's input). - - -Too technically flawed -^^^^^^^^^^^^^^^^^^^^^^ -A clip is said to be too technically flawed if it has no chance of making it to -the final product whatsoever. This does not, however, preclude its use -throughout the post-production process; for example, part of a clip in which -the director describes his vision of the talent's facial expression in a -particular scene is never going to make it into the final product, but is -invaluable in classifying the scene. In this case, the most reasonable place to -put the clip would be as a multimedia note referenced by all takes/angles of -the scene it refers to. - -As mentioned above, flawed video doesn't necessarily mean flawed audio or -vice-versa. - - -Interesting -^^^^^^^^^^^ -An "interesting" clip is one that has potential -- either as a metadata piece -(multimedia note, talent briefing, etc) or footage (for the final product OR -intermediary step). The main goal of the application is to find and classify -interesting clips of various types as quickly as possible. - - -Parallel Processing -^^^^^^^^^^^^^^^^^^^ -Many people, accustomed to different interfaces and work styles, should be able -to work on the same project and add interactive metadata at the same time. - - -Classification interface -++++++++++++++++++++++++ -The classification interface is divided into two categories: technical and -effective. Technical classification is simply facts about a clip or part of a -clip: what weather there is, who is on set, how many frames are present, the -average audio level, etc. Effective classification allows the artist to express -their feelings of the subjective merits (or failures) of a clip. - - -DCMS -^^^^ -The project is organized around a distributed content management system which -allows access to all existing materials at all times. Content narrowing allows -for a more digestible amount of information to process, but everything is -non-destructive; every change to the clip structure and layout is recorded, -preferably with a reason as to why it was necessary or desired. - - -Content narrowing -^^^^^^^^^^^^^^^^^ -With all of the information of an entire production available from a single -application, information overload is easy. Content narrowing is designed to fix -that by having parts of individual clips, metadata, or other files be specific -to one aspect of the overall design. This allows for much more successful use -of the related information and a cleaner, streamlined layout. As an example, -metadata involving file size has no effect whatsoever on the vast majority of -most major decisions -- the answer is almost always "whatever it takes." Thus, -it would not appear most of the time. Content narrowing means that it is easy -to add back footage -- "widen the view" one step, add it back, and "narrow the -view" again. - - -Multiple cuts -^^^^^^^^^^^^^ -There is no need to export a final cut from this application; it merely is the -first step in the post-production chain. It is the missing link between -receiving raw footage from the camera and adding the well executed scenes to -the timeline. What should come out of the application is a classification of - - -Situational, take, and instance tagging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -This is VERY powerful. The first step to using the application is to mark which -scenes are the same in all source clips -- where same means that they contain -sections which would both not run. This can include multiple takes, different -microphones or camera angles, etc. The key to fast editing is that the -application can edit metadata for the situation (what is actually going on IN -THE SCENE), take (what is actually going on IN THIS SPECIFIC RUN), and instance -(what is actually going on IN THIS CLIP). If editing a situation, the other -referenced clips AUTOMATICALLY add metadata and relevant sections. This can be -as precise and nested as desired, though rough cuts for level one editing -(first watchthrough after technically well executed clips have been selected) -and more accurate ones for higher levels is the recommended method. - - -Subtitling -^^^^^^^^^^ -This came up on the discussion list for Lumiera, and it will be supported, -probably as a special tag. - - -nasa's Laws of Tagging -^^^^^^^^^^^^^^^^^^^^^^ -. There is always more variety in data than tags. There are always more - situations present in the data than can be adequately expressed with any - (reasonable) number of tags. This is OK. All that is needed is the minimum - set of unique tags to progress to the next cycle without losing editing - intent or the ability to rapidly evaluate many situations. -. Many tags are used many times. "Outdoors" will be a very, very common tag; so - will "redub." If conventional names are decided upon and stuck to, it is - significantly easier to map the complex interactions between different - content situations. -. Avoid compound tags. Do not have "conversation_jill_joe" as a tag; use - "conversation," "jill," and "joe" instead. It is very easy to search for - multiple tags and very hard to link data that doesn't use overlapping tags. - - - - - - - - - - -The interface -- random idea -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This is not meant to be a final interface design, just something I wrote up to -get ideas out there. - -key commands - mutt/vim-style -- much faster than using a mouse, though GUI supported. - Easy to map to joystick, midi control surface, etc. -Space stop/start and tag enter Tab (auto pause) adds metadata special Tracks -have letters within scenes -- Audio[a-z], Video[a-z], Other[a-z] (these are not -limits) -- or names. Caps lock adds notes. This is really, really fast. It -works anywhere. This means that up to 26 different overlapping metadata -sections are allowed. - -Prompting Prompting for metadata is a laborious, time-consuming process. There -is no truly efficient way to do it. This application uses a method similar to -link:KPhotoAlbum[]. When the space key is held and a letter is pressed, the tag -that corresponds to that letter is assigned to the track for the duration of -the press. (If the space is pressed and no other key is pressed at the same -time, it stops the track.) For example, suppose that the following mapping is -present: -o = outside -x = extra -p = protagonist -c = closeup - -Then holding SPACE over a section and pressing one of these keys would assign -the tag to the audio AND video of the section over which the space was held. If -instead just the key is pressed (without space being held), that tag is -assigned to the section over which it is held. This is very fast and maps well -to e.g. PS3 controller or MIDI control. - -If LALT is held down instead of SPACE, the audio is effected instead. If RALT -is held, just the video is effected. - -In order to support scenario/take/clip tagging: - The default is situation. If the keybinding to x is: - x = t:extra ; effect only take - x = ts:extra ; effect take and scenario - x = c:extra ; extra only visible in this clip! - x = tc:extra ; this take and clip show the extra - etc - -Other keyargs (the part in front of the colon) can be added to account for -other uses (e.g. l = all taken on the same location). - -Tab is pressed to add metadata mappings. Tab is pressed to enter metadata edit -mode; this pauses video. Then press any key to map; and type the tag to -associate (with space, multiple tags can be added.). The following specials are -defined: - [:keyarg:]:TAG is special tagging for scenario/take/clip. - !TAG removes TAG if it is present. This is useful because it allows huge - sections of the clip to be defined as a certain tag, then have parts - removed later. - a:TAG applies TAG only to the audio. - v:TAG applies TAG only to the video. - p:PATH adds a link to PATH as a special tag. - -(This will have a nice GUI as well, I just will always use the keyboard method - so I am describing it first. Mapping configurations can be stored in a - separate file, as a user config, or in the specific project.) - -If ESC is pressed, all currently ranged tags are ended. - -Finally, if single_quote is pressed without SPACE or {L,R}ALT down, it marks an -"interesting location." Pressing SHIFT+single_quote goes to the next -"interesting location" and pressing CNTRL+' goes to the previous "interesting -location." This allows for very quick review of footage. - - - - - - - - - - - - - - - -Comments --------- - - -Rating - Quantitative Rating as well as Qualitative Tagging -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The importance/value of the video for various factors uses, can vary through -the video. It would be helpful to have the ability to create continuous ratings -over the entire track. Ratings would be numerical. Automatic clip -selection/suggestion could be generated by using algorithms to compute the -usefulness of video based on these ratings (aswell as "boolean -operations"/"binary decisions" done with tags). The ratings could be viewed -just like levels are - color coded and ovelayed on track thumbnails. - -- Tree 2008-10-25 - - -link:MultiView[] - useful for concurrent ratings input -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It would be convenient to have an ability to view the different tracks (of the -same scene/time sequence) at once, so the viewer can input their ratings of the -video "on the fly", including a priority parameter that helps decide which -video is better than what other video.See the GUI brainstorming for a viewer -widget, and key combinations that allow both right and left hand input, that -could be used for raising/lowing ratings for up to six tracks at once. - -- Tree 2008-10-25 - - -I like the idea of rating clips (or rather, takes) a lot. It would be cool to -include both "hard," "relative," and "fuzzy" rating. Hard is an exactly defined -value (scaled 0-1) that puts the clip in an exact location in the queue. -Relative means that one is higher or lower rated than another. Fuzzy is a -slider which is approximate value, and there is some randomness. The best part -is that these can be assigned to hardware sliders/faders. Pressure sensitive -buttons + fuzzy ratings = really easy entry interface. Just hit as hard as -needed! Multiple tracks at once also an astounding idea. I could image some -sort of heap (think binary heap, at least for the data structure) which -determines the priorities and decides which clips are played. Then the highest -rated clips are played first, down to the worst. - -- link:NicholasSA[] 2009-01-04 - - -Possible Collaboration with the people from Ardour? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -I guess if the thing can do all the things we talked about here, it would be -perfectly suitable for sound classification too, and maybe could fill another -gap in FOSS: Audio Archival Software, like this: -http://www.soundminer.com/SM_Site/Home.html[] (which is very expensive)... -maybe the Ardour people would be interested in a collaboration on this? - -I like the suggestion of sound classification with a similar (or, even better, -identical) evaluator. link:SoundMiner[] looks interesting, but like you say -very expensive. I'm a sound guy, so I feel your pain... - -- link:NicholasSA[] 2009-01-04 - - -Parked -~~~~~~ - -Decided on Developer meeting, until someone wants to investigate this further. - - Do 14 Apr 2011 02:52:30 CEST Christian Thaeter - - -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_parked/DelectusShotEvaluator.txt b/doc/devel/rfc_parked/DelectusShotEvaluator.txt new file mode 120000 index 000000000..d6de4100e --- /dev/null +++ b/doc/devel/rfc_parked/DelectusShotEvaluator.txt @@ -0,0 +1 @@ +../rfc/DelectusShotEvaluator.txt \ No newline at end of file diff --git a/doc/devel/rfc_parked/DesignRenderNodesInterface.txt b/doc/devel/rfc_parked/DesignRenderNodesInterface.txt deleted file mode 100644 index 201c34a72..000000000 --- a/doc/devel/rfc_parked/DesignRenderNodesInterface.txt +++ /dev/null @@ -1,128 +0,0 @@ -[grid="all"] -`------------`----------------------- -*State* _Parked_ -*Date* _2008-03-06_ -*Proposed by* link:Ichthyostega[] -------------------------------------- - - -Design the Render Nodes interface ---------------------------------- -In the current design, the low-level model is comprised of "Render Nodes"; -Proc-Layer and Backend carry out some colaboration based on this node network. - + -Three different interfaces can be identified - * the node wiring interface - * the node invocation interface - * the processing function interface - - -Description -~~~~~~~~~~~ -Render Nodes are created and wired by the Builder in the Proc-Layer. On the -other hand, the rendering process is controlled by the backend, which also -provides the implementation for the individual data processing tasks. To create -a result, output nodes are ''pulled'' via the invocation interface, resulting -in the affected nodes to recursively pull their predecessor(s). In the course -of this call sequence, the nodes activate their processing function to work on -a given set of buffers. Moreover, we plan to use the render network also for -gathering statistics. - -'''Note''': Render Node is an internal interface used by Proc-Layer and - activated by the Backend. Plugins are planned to be added via Adapter nodes. - Thus the Render Node interface needs ''not'' to be exported. - - -the wiring interface -^^^^^^^^^^^^^^^^^^^^ -This part of the design defines how nodes can be combined and wired up by the -builder to form a network usable for rendering. For this purpose, the -link:ProcNode[] is used as a shell / container, which is then configured by a -const WiringDescriptor. Thus, the node gets to know its predecessor(s) and is -preselected to use a combination of specific working modes: - - * participate in caching - * calculate in-place - * source reading - * (planned) use hardware acceleration - * (planned) remote dispatched calculation - -Most nodes will just have a single predecessor, but we can't limit nodes to a -single input, because there are some calculation algorithms which natively need -to work on several data streams simultaneously. This means, a single node can -be involved into the calculations for multiple streams (several pull calls on -the same frame number but for different channel, and in each case maybe a -different output node). I decided to rely solely on the cache for avoiding -duplicate calculations caused by this complication, because I deem it to be an -corner case. - - -the invocation interface -^^^^^^^^^^^^^^^^^^^^^^^^ -this is intended to be a rather simple "call-style" interface, without much -possibilites to influence the way things are happening. You pull a node and -will find the results in a provided buffer or the cache, but you can't even -change the frame data type type of the result. Besides the node invocation, -functions for collecting statistics will be accessible here too (Probably these -functions will be ''implemented'' in a classic-OO fashion by virtual functions, -but that's another story) - - -the processing interface -^^^^^^^^^^^^^^^^^^^^^^^^ -the individual nodes are configured to call a plain-C {{{process()}}} function -and provide an array of buffer pointers to be used within this function. For -the purpose of invoking actual data processing, it is irrelevant if this -function is implemented somewhere in the backend or provided by a plugin. At -this point, no type- and other meta-information is passed, rather the -processing function is supposed to do The Right Thing ^TM^ - - - -Tasks -^^^^^ - * What services do we expect from Render Nodes. What do we plan to do with a - render node? - * What different kinds (if any) of Render Nodes can be foreseen? - * order the required functionality by Proc / Backend. Find out specific - implementation constraints. - * work out a design based on this informations - - - - - - - - - -Rationale -~~~~~~~~~ -The purpose of this Design Entry is to give a summary; the questions and the -details of carrying out the operations are much more involved. - + -Please see the -http://www.lumiera.org/wiki/renderengine.html#Rendering[Proc-Layer impl -documentation (TiddlyWiki)] and the -http://www.lumiera.org/gitweb?p=lumiera/ichthyo;a=blob;f=src/proc/engine/procnod -.hpp;h=9cf3a2ea8c33091d0ee992ec0fc8f37bb5874d34;hb=refs/heads/proc[Source Code] -for details -(and/or contact Ichthyo for in-depth discussion of those technical details) - - - - - - -Comments --------- - - -Parked -~~~~~~ -We park this until we have time to revisit the details. It is accepted that we -need to design this interfaces. - - Do 14 Apr 2011 03:06:42 CEST Christian Thaeter - -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_parked/DesignRenderNodesInterface.txt b/doc/devel/rfc_parked/DesignRenderNodesInterface.txt new file mode 120000 index 000000000..bb91c30fe --- /dev/null +++ b/doc/devel/rfc_parked/DesignRenderNodesInterface.txt @@ -0,0 +1 @@ +../rfc/DesignRenderNodesInterface.txt \ No newline at end of file diff --git a/doc/devel/rfc_parked/FeatureBundle_PluggableModules.txt b/doc/devel/rfc_parked/FeatureBundle_PluggableModules.txt deleted file mode 100644 index 06b9fe814..000000000 --- a/doc/devel/rfc_parked/FeatureBundle_PluggableModules.txt +++ /dev/null @@ -1,195 +0,0 @@ -[grid="all"] -`------------`----------------------- -*State* _Parked_ -*Date* _2008-09-03_ -*Proposed by* link:Ichthyostega[] -------------------------------------- - - - -Describe pluggable modules by a "Feature Bundle" ------------------------------------------------- -This proposal builds upon Cehteh's Plugin Loader, which is the fundamental -mechanism for integrating variable parts into the application. - -It targets the special situation when several layers have to cooperate in order -to provide some pluggable functionality. The most prominent example are the -"effects plugins" visible for the user. Because, in order to provide such an -effect - - * the engine needs a processing function - * the builder needs description data - * the gui may need a custom control plugin - * and all together need a deployment descriptor detailing how they are - related. - - - - -Description -~~~~~~~~~~~ -The Application has a fixed number of *Extension Points*. Lumiera deliberately -by design does _not build upon a component architecture_ -- which means that -plugins can not themselves create new extension points and mechanisms. New -extension points are created by the developers solely, by changing the code -base. Each extension point can be addressed by a fixed textual ID, e.g. -"Effect", "Transition", .... - -Now, to provide a pluggable extension for such an Extension Point, we use a -*Feature Bundle* Such a Feature Bundle is comprised of - - * a Deployment Descriptor (provided as "structured data" -- TODO: define the - actual data format) - * the corresponding resources mentioned by this Deployment Descriptor - -The Deployment Descriptor contains - - * Metadata describing the Feature Bundle - - ID of the Extension point - - ID of the Bundle (textual ID) - - ID of origin / provider (could be a domain name) - - Category (textual, tree-like) - - Version number (major, minor) - - required Extension point version number (or Lumiera version no.?) - - Author name (utf8) - - Support email (utf8) - - textual description in a single line (utf8) - * A List of Resources, each with: - - ResourceID - - SubID - - Type of Resource, which may be - . Plugin - . Properties - . Script - . ...? - - - one of: - . the Resource provided inline in suitable quoted form (for textual - resources only) - . an URL or path or similar locator for accessing the Resource (TODO: - define) - - Additional Metadata depending on Type of Resource (e.g. the language of a - script) - - - -We do _not_ provide a meta-language for defining requirements of an Extension -Point, rather, each extension point has hard wired requirements for a Feature -Bundle targeted at this extension point. There is an API which allows code -within lumiera to access the data found in the Feature Bundle's Deployment -Descriptor. Using this API, the code operating and utilizing the Extension -Point has to check if a given feature bundle is usable. - -It is assumed that these Feature Bundles are created / maintained by a third -party, which we call a *Packager*. This packager may use other resources from -different sources and assemble them as a Feature Bundle loadable by Lumiera. Of -course, Lumiera will come with some basic Feature Bundles (e.g. for colour -correction, sound panning,....) which are maintained by the core dev team. -(please don't confuse the "packager" mentioned here with the packager creating -RPMs or DEBs or tarballs for installation in a specific distro). Additionally, -we may allow for the auto-generation of Feature Bundles for some simple cases, -if feasible (e.g. for LADSPA plugins). - - -The individual resources -^^^^^^^^^^^^^^^^^^^^^^^^ -In most cases, the resources referred by a Feature Bundle will be Lumiera -Plugins. Which means, there is an Interface (with version number), which can be -used by the code within lumiera for accessing the functionality. Besides, we -allow for a number of further plugin architectures which can be loaded by -specialized loader code found in the core application. E.g. Lumiera will -probably provide a LADSPA host and a GStreamer host. If such an adapter is -applicable depends on the specific Extension point. - -The ResourceID is the identifyer by which an Extension point tries to find -required resources. For example, the Extension Point "Effect" will try to find -an ResourceID called "ProcFunction". There may be several Entries for the same -ResourceID, but with distinct SubID. This can be used to provide several -implementations for different platforms. It is up to the individual Extension -Pont to impose additional semantic requirements to this SubID datafield. (Which -means: define it as we go). Similarly, it is up to the code driving the -individual Extension point to define when a Feature Bundle is fully usable, -partially usable or to be rejected. For example, an -"Effect" Feature Bundle may be partially usable, even if we can't load any - "ProcFunction" for -the current platform, but it will be unusable (rejected) if the proc layer -can't access the properties describing the media stream type this effect is -supposed to handle. - -Besides binary plugins, other types of resources include: - * a set of properties (key/value pairs) - * a script, which is executed by the core code using the Extension Point and - which in turn may access certain interfaces provided by the core for "doing - things" - -Probably there will be some discovery mechanism for finding (new) Feature -Bundles similar to what we are planning for the bare plugins. It would be a -good idea to store the metadata of Feature Bundles in the same manner as we -plan to store the metadata of bare plugins in a plugin registry. - - - - -Tasks -^^^^^ - - -Pros -^^^^ - - - -Cons -^^^^ - - - -Alternatives -^^^^^^^^^^^^ -Use or adapt one of the existing component systems or invent a new one. - - - -Rationale -~~~~~~~~~ -The purpose of this framework is to decouple the core application code from the -details of accessing external functionality, while providing a clean -implementation with a basic set of sanity checks. Moreover, it allows us to -create an unique internal description for each loaded module, and this -description data e.g. is what is stored as an "Asset" into the user session. - -Today it is well understood what is necessary to make a real component -architecture work. This design proposal deliberately avoids to create a -component architecture and confines itself to the bare minimum needed to avoid -the common maintenance problems. As a guideline, for each flexibility available -to the user or packager, we should provide clearly specified bounds which can -be checked and enforced automatically. Because our main goal isn't to create a -new platform, framework or programming language, it is sufficient to allow the -user to _customize_ things, while structural and systematic changes can be done -by the lumiera developers only. - - - - - - -Comments --------- - -From a fast reading, I like this, some things might get refined. For example -I'd strongly suggest to make the Deployment Descriptor itself an Interface -which is offered by a plugin, all data will then be queried by functions on -this interface, not by some 'dataformat'. Also Resource ID's and a lot other -metadata can be boiled down to interfaces: names, versions, uuid of these -instead reiventing another system for storing metadata. My Idea is to make the -link:Plugin/Interface[] system self-describing this will also be used to -bootstrap a session on itself (by the serializer which is tightly integrated) - -- link:ct[] [[DateTime(2008-09-04T09:28:37Z)]] 2008-09-04 09:28:37 - -Parked -~~~~~~ -Needs to ne reviewed some time later. - - Do 14 Apr 2011 03:06:42 CEST Christian Thaeter - -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_parked/FeatureBundle_PluggableModules.txt b/doc/devel/rfc_parked/FeatureBundle_PluggableModules.txt new file mode 120000 index 000000000..fb7a2d2c2 --- /dev/null +++ b/doc/devel/rfc_parked/FeatureBundle_PluggableModules.txt @@ -0,0 +1 @@ +../rfc/FeatureBundle_PluggableModules.txt \ No newline at end of file diff --git a/doc/devel/rfc_parked/GitSubmoduleTransistion.txt b/doc/devel/rfc_parked/GitSubmoduleTransistion.txt deleted file mode 100644 index fdbcd69db..000000000 --- a/doc/devel/rfc_parked/GitSubmoduleTransistion.txt +++ /dev/null @@ -1,87 +0,0 @@ -[grid="all"] -`------------`----------------------- -*State* _Parked_ -*Date* _2008-04-09_ -*Proposed by* link:ct[] -------------------------------------- - - -Use Git Submodules to organize the project ------------------------------------------- -We planned this long time ago when the project started, this proposal is for to -work out the details and define a turnover point in time. - - -Description -~~~~~~~~~~~ -There is a git-filter-branch command which helps in doing the dirty work -isolating commits which touch certain dirs. This can moderately easily be used -to create a new repository with a rewritten history containing only sub parts -of the original history. - -The basic idea is that one developer who wants to works on a certain subsystem -clones the 'official' master and then updates and tracks only the development -state of a certain subsystem. - - -Tasks -^^^^^ - * what shall be in the master repository? - * boilerplate files, license, build infrastructure - * the _admin_ dir with supplemental scripts - * define which submodules shall be defined? - * _doc/devel_ - * _doc/user_ - * _wiki_ - * _uml_ - * _src/backend_ - * _src/proc_ - * _src/gui_ - * _src/lib_ - -Not yet decided: - * _tests_ move them into the _src/$subsystem_ as symlink? - * _src/tool_ - - -Pros -^^^^ - * better isolation of single subprojects - * one who is interested on one subproject can track a master and only - following certain subproject updates - * smaller/faster updates/downloads - - -Cons -^^^^ - * needs some more git-fu to be used by the developers - * we will host considerably more git repositories (bigger list in gitweb), - this is not a problem but might look more confusing - - - -Alternatives -^^^^^^^^^^^^ -Go as we do currently with one big repository per developer. The decision to -use submodules is not urgend and it can be transfered at any time. The -turnaround should just be planned and be scheduled to one day to minimize the -confusion and merging issues. - - -Rationale -~~~~~~~~~ -When all people get used to it it allows a cleaner more sane work flow and well -isolated, less conflicting commits. - - - - - -Comments --------- - -We concluded that that submodules are not yet needed with exception for the -./doc folder. Parked for now. - -- ct 2008-07-26 09:09:57 - -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_parked/GitSubmoduleTransistion.txt b/doc/devel/rfc_parked/GitSubmoduleTransistion.txt new file mode 120000 index 000000000..1205cf6ba --- /dev/null +++ b/doc/devel/rfc_parked/GitSubmoduleTransistion.txt @@ -0,0 +1 @@ +../rfc/GitSubmoduleTransistion.txt \ No newline at end of file diff --git a/doc/devel/rfc_parked/NormalizedDeviceCoordinates.txt b/doc/devel/rfc_parked/NormalizedDeviceCoordinates.txt deleted file mode 100644 index b594cc3b2..000000000 --- a/doc/devel/rfc_parked/NormalizedDeviceCoordinates.txt +++ /dev/null @@ -1,112 +0,0 @@ -[grid="all"] -`------------`----------------------- -*State* _Parked_ -*Date* _2009-01-14_ -*Proposed by* link:ct[] -------------------------------------- - - -Normalized Device Coordinates ------------------------------ - -AkhIL pointed me out to some blender problem and how renderman fixes that. We -should use this too. - - - - -Description -~~~~~~~~~~~ - -Just snippet from IRC log: - ------------------------------------------------------------- -[15:09] and I hope lumiera will use some resolution independend - measuring for all parameters -[15:09] one can rotate where the node actually sits -[15:09] like NDC -[15:09] or pass transistions through the renderpipe, make all effects - transisition aware and apply them at the end -[15:10] the later is better but needs more efforts and some rethinking -[15:10] we will prolly support both in lumiera :) -[15:11] in renderman's NDC for horizontal image with 4:3 aspect ration - (-1.33,-1) is lower-left corner and (1.33,1) upper-right -[15:11] ah -[15:11] so moving to different resolutions and different aspect ratios - in renderman makes no problems -[15:11] well good point, we will measure in pixel but need to convert - between them . using a float would be good to address pixels -[15:12] yes -[15:12] what stands NDC for? -[15:13] Normalized Device Coordinates -[15:14] ok -[15:14] so from -1 to 1 is a range by smallest image size -[15:15] yes sounds reasonable -[15:15] * cehteh adds a note to the lumiera design docs -[15:15] so far we dont do anything where it matters .. but that will - come -[15:16] when you move some logo to (0.8,-0.8) it will stay on screen - even when you chenge resolution and image aspect ratio -[15:17] all input images should be scaled to this range (-1,1) by - smalles side - ------------------------------------------------------------- - - - -Tasks -^^^^^ - - - - -Discussion -~~~~~~~~~~ - -Pros -^^^^ - - - -Cons -^^^^ - - - -Alternatives -^^^^^^^^^^^^ - - - -Rationale -~~~~~~~~~ -TBD - - - - - -Comments --------- - -One issue where I always assumed we'd need to define something of this sort is -for proxy editing. Especially this is a problem in conjunction with masks. -Basically, this means a bit more of "vector graphics". With film/video editing, -this was rather unusual, but with the advent of more and new digital video/film -formats it gets more and more important. Also, our considerations regarding -time handling and quantisation to single frames somewhat fit into this line of -thought. Up to now, rather the standard way of thinkin was to use a "project -framerate" and a fixed resolution in pixels. But we certainly can do better. - - -- Ichthyostega 18:09:50 - - -Parked -~~~~~~ -deferred for later, generally accepted. - - Do 14 Apr 2011 03:06:42 CEST Christian Thaeter - - - -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_parked/NormalizedDeviceCoordinates.txt b/doc/devel/rfc_parked/NormalizedDeviceCoordinates.txt new file mode 120000 index 000000000..97174abca --- /dev/null +++ b/doc/devel/rfc_parked/NormalizedDeviceCoordinates.txt @@ -0,0 +1 @@ +../rfc/NormalizedDeviceCoordinates.txt \ No newline at end of file diff --git a/doc/devel/rfc_parked/RenderOptimizer.txt b/doc/devel/rfc_parked/RenderOptimizer.txt deleted file mode 100644 index 74604e77e..000000000 --- a/doc/devel/rfc_parked/RenderOptimizer.txt +++ /dev/null @@ -1,100 +0,0 @@ -[grid="all"] -`------------`----------------------- -*State* _Parked_ -*Date* _2007-06-07_ -*Proposed by* link:ct[] -------------------------------------- - -Render Optimizer ----------------- -Render only parts of a frame which are necessary for the Output; Optimize -render pipeline for efficiency - - -Description -~~~~~~~~~~~ -This Idea is just stored here for later reference/implementation. - -Effects give some information on which data their output depends (like -transitions, temporal dependencies, color/alpha etc) and what the operation -costs. Based on this information we optimize the render pipeline, for example -if the output is a zoom, then we only need to calculate the parts of a frame -which will be viewable in the output (plus some more dependencies, like blur -has radius and so on). Further in some cases it might be favorable to reorder -some effects for the actual render process, as long it would produce the same -output as the original sequence of effects. - - - - - -Tasks -^^^^^ - - - - -Discussion -~~~~~~~~~~ - -Pros -^^^^ - - - -Cons -^^^^ - - - -Alternatives -^^^^^^^^^^^^ - - - -Rationale -~~~~~~~~~ - - - - - - -Comments --------- - -Possible classification for video filters: - 1. The filter only changes the color of each pixel in the same way - 2. The filter deforms the image but leaves the color - 3. The filter makes complex things. The only additional hint it can export is - the - number of referenced past frames, if such a limit exists (sometimes it - doesn't). - -Filters of type 1 and type 2 never use any previous frames, and are strictly -one frame in - one frame out. Filters of type 1 can always be swapped with -filters of type 2, the output is the same. All other filters cannot be swapped -in general. - -The good news is, that: - - 1. All commonly used filters are either type 1 or type 2 - (type 3 are more the fun effects) - 2. Filters of type 2 are colormodel agnostic - 3. If a filter of type 1 makes only linear transformations of the color - vectors (new_color = matrix * old_color), - the matrix can be transformed from e.g. RGB to YUV, so these filters can - always work in both colorspaces directly - - -Parked -~~~~~~ -Generally this is accepted but needs some more polishing when we go over it. - - Do 14 Apr 2011 03:06:42 CEST Christian Thaeter - - - - - -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_parked/RenderOptimizer.txt b/doc/devel/rfc_parked/RenderOptimizer.txt new file mode 120000 index 000000000..6c80c340f --- /dev/null +++ b/doc/devel/rfc_parked/RenderOptimizer.txt @@ -0,0 +1 @@ +../rfc/RenderOptimizer.txt \ No newline at end of file diff --git a/doc/devel/rfc_parked/SkillsCollection.txt b/doc/devel/rfc_parked/SkillsCollection.txt deleted file mode 100644 index c2c4f3d68..000000000 --- a/doc/devel/rfc_parked/SkillsCollection.txt +++ /dev/null @@ -1,80 +0,0 @@ -[grid="all"] -`------------`----------------------- -*State* _Parked_ -*Date* _2007-06-13_ -*Proposed by* link:ct[] ---------------------------------- - - -Skills Collection ------------------ -Make a page where people can tell in which areas they are willing to support -others. - - -Description -~~~~~~~~~~~ -Some Page should list different things needed for working on the project and -users should attach themself when they offer support for it. This is meant that -people who run into problems know who to ask. In contrast this is not meant -like these Skill pages on Sourceforge or such. I don't like this rating and -posing system. We let people assing themself to skill and not skills to people -and there is no rating. - -Skills shall be anything which is needed like the tools we use, the code we -create etc. - - - -Example -^^^^^^^ - -.Git -* ct -* j6t - -.autotools -* ct - -.lumiera/renderpipe -* ichthyo - -... shall this contain emails? - - -Tasks -^^^^^ - * just set this page up .. either on this wiki or in a tiddlywiki which - becomes checked into the repo - - -Pros -^^^^ - * inter developer support and help network - - -Cons -^^^^ - * privacy concerns, people might not publish what they know or better what - they ''not'' know - - -Alternatives -^^^^^^^^^^^^ -...urgs - - -Rationale -~~~~~~~~~ -This only announces where people offer support within the lumiera developer -community and is absolutely voluntary. - - - - - -Comments --------- - - -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_parked/SkillsCollection.txt b/doc/devel/rfc_parked/SkillsCollection.txt new file mode 120000 index 000000000..b0d00d674 --- /dev/null +++ b/doc/devel/rfc_parked/SkillsCollection.txt @@ -0,0 +1 @@ +../rfc/SkillsCollection.txt \ No newline at end of file diff --git a/doc/devel/rfc_parked/TodoLists.txt b/doc/devel/rfc_parked/TodoLists.txt deleted file mode 100644 index 1d3b987ac..000000000 --- a/doc/devel/rfc_parked/TodoLists.txt +++ /dev/null @@ -1,49 +0,0 @@ -[grid="all"] -`------------`----------------------- -*State* _Parked_ -*Date* _2008-03-05_ -*Proposed by* link:ct[] -------------------------------------- - -Todo Lists ----------- -We need some way to organize tasks to be done (tiddlywiki, testsuite, ...?) - - -Description -~~~~~~~~~~~ - - -Tasks -^^^^^ - - -Pros -^^^^ - - - -Cons -^^^^ - - - -Alternatives -^^^^^^^^^^^^ - - - -Rationale -^^^^^^^^^ - - - - - - -Comments --------- -We decided to use a Tiddlywiki for now until this is further worked out - -- link:ct[] [[DateTime(2008-03-08T03:38:50Z)]] - -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_parked/TodoLists.txt b/doc/devel/rfc_parked/TodoLists.txt new file mode 120000 index 000000000..89793b79c --- /dev/null +++ b/doc/devel/rfc_parked/TodoLists.txt @@ -0,0 +1 @@ +../rfc/TodoLists.txt \ No newline at end of file diff --git a/doc/devel/rfc_parked/UseCases.txt b/doc/devel/rfc_parked/UseCases.txt deleted file mode 100644 index 7f1b96d5c..000000000 --- a/doc/devel/rfc_parked/UseCases.txt +++ /dev/null @@ -1,310 +0,0 @@ -[grid="all"] -`------------`----------------------- -*State* _Parked_ -*Date* _2008-10-31_ -*Proposed by* link:Ichthyostega[] -------------------------------------- - - -Use Case analysis ------------------ - -The only way to defeat "featuritis" is to build upon a coherent design -- - + -which in turn relies upon a more or less explicit understanding what the -application should be like, and the way the prospective user is thought to work -with the program. Today, a generally accepted 'method' for building up such -an understanding is to do a *use case analysis*. Such a formal analysis would -require to identify all usage scenarios with the involved actors and parts of -the system, and then to refine them in detail and break them down into distinct -use cases. Here, I'll try a rather informal variant of such an analysis. I'll -restrain myself to describing the most important usage situations. - -'please participate in the discussion. It well may be that everything detailed - here is self-evident, but I doubt so. At least the grouping and the omissions - kind-of reflect a certain focus of the project' - - -Describing basic Lumiera usage situations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The fundamental assumption is that the user works on a project, which is -reflected in the fact that the user is working on a single session over an -extended period of time (several hours to several years). External media will -be imported and incorporated into this session, additional media will be -created within this session, and finally there is at least one render or export -procedure to harvest the results of this work. - - -Scenario (1) : Exploring Media -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Various external media files are opened. You play, cue and examine the media. -Tagging, labeling and adding notes. Marking of interesting points and ranges. -Possibly breaking down into clips, or at least extract some ranges as clips. -Draft arranging the clips, applying some effects to check the result and thus -to find out about the viability of the footage. Playback of several media at -the same time (several videos, but also video and music). Grouping of assets -(media, clips, effects, markers) into folders. - - -Scenario (2) : Simple assembly -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You build up a simple linear cut sequence. Either by - - - using a single source media, trimming it and cutting away (a small number - of) unwanted parts - - playing source media and spilling over (insert, overwrite) some parts into - the final assembly - - dragging over the pre-organised clips from clip folders to build up the - assembly. - -Sound is either used immediately as-is (the soundtrack attached to the media), -or there is a similarly simple, linear music bed. Some people prefer to switch -sound off entirely for this kind of work. In any case, the link is either -automatic, or rather vague and soft (as music being vaguely correlated) - - -Scenario (3) : Augmenting an assembly -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Without the intention to rework it from scratch, an already existing simple -assembly is augmented, beautified and polished, maybe to conform with -professional standards. This includes the ``rescue'' of a somewhat questionable -assembly by repairing localized technical problems, but also shortening and -re-arranging, and in extreme cases even changing the narrative structure. A -distinctive property of this usage scenario is that work happens rather in the -context of 'tasks' (passes) -- not so much isolated operations: - - - the task may be to get the rhythm or overall tempo right, and thus you go - over the sequence and do trim, roll, shuffle or slide edits. - - you may want to ``fold-out'' parts of the sound, thus interweaving o-sound - and music - - there may be a sound overdubbing and replacing pass - - you may want to walk certain automation curves and adjust levels (sound - volume or tone, fade, brightness/contrast/colour) - - general polishing may include adding title overlays, fading in and out, - adding (typically a single type of) transition(s) in a coherent manner - - -Scenario (4) : Compositional work -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Here I define *compositional work* as a situation where you deal with -multiple more or less independent sequences going on in parallel, similar to a -musical score. Frequently, we encounter compositional parts embedded in a -otherwise linear work, and often those parts evolve when Scenario (3) is driven -to the extreme. - - - the most common situation is that o-sound, sound design and music work - together with the temporal structure created in the image edits. - - a movie with a complex narrative structure may induce compositional work on - a very large scale (and existing applications frequently fall short on - supporting such) - - _compositing_ often leads to compositional work. Special FX, masked objects - being arranged, artificial elements to be integrated. - - similarly any collage-like or heavily layered arrangements lead themselves - to requiring compositional work. - -The common distinctive property of all those situations is: objects are -embedded into a primary context and have to obey the rules of this context, and -at the same time have a close correlation to other objects which are embedded -in a completely different (``orthogonal'') context. (To give a catchy example: -assume, a CG monster has to be integrated. Besides the masked monster object, -you have several colouring and blurring layers at completely different levels -in the layering order, and at the same time you have correlated sound objects, -which need to be integrated into the general sound-scape. And now your primary -job is to get the movement and timings of the monster right in relation to the -primary timing grid established by the existing edit) - -The working style and thus the tool support necessary for compositional work is -completely different to Scenario (3). After an initial build-up (which often is -very systematic), the working profile can be characterized by tweaks to various -parameters to be done in-sync at widely separated sites within the session, -together with repeated cycles of ``do it'', ``assess the result'', ``undo all and -do some small detail differently''. Typically there is the need for much navigation -(contrast this to Scenario (3) where you work in _tasks_ or _passes_) - - -Scenario (5) : Working with Sound -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The degree of integrating sound work is worth discussing. Often, due to -limitations in existing tools, sound work is done in separate applications to a -large extent. Which in turn forces the whole production into a sequential -organisation scheme. First the edit has to be roughly final, and then the sound -people can step in. (Of course this is an simplification). To list the common -operations: - - - cleaning and preparing original sound - - fitting sound library elements or separately produced sound - - overdubbing - - playing or building music to match the rhythm of the edit or the original - footage - - montage of dialogue and/or noise correlated to the primary content of the - sequence - - sound design, shaping the pace and the feel of a sequence - - final balance mix - -While clearly some of those tasks are always better done within a dedicated -application, the ability to carry out this work partially within the main -session and even while the basic edit is still in flux -- may open new artistic -possibilities. - - -Scenario (6) : Large Projects -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -At first sight, the operations and the work to be done in large projects is the -same as in small ones. But large projects tend to create sort of an additional -``layer'' on top of the usage scenarios described thus far, which will ``kick in'' -at various places. - - - work may be divided upon several editors, working on separate parts - (sequences) which then need to be re-integrated - - there may be a global asset organisation (naming scheme), which will be - extended locally, resulting in nested naming scopes. - - some quite basic stuff needs to be done in a coherent fashion, e.g. titles, - a certain transition (template), the way fade-outs are done, a certain - colour profile. Possibly, this stuff needs to be adjusted all over the - project. - - there will be a general (large scale) timing grid with distinct ``check points'' - and probably there is the need to navigate to the different parts of the - whole project. - - there may be the necessity to build several versions of the same project in - parallel (e.g. a short version and a extended director's cut) - - you may have to care for such nasty and tedious things as keeping sub-titles - in-sync while the edit is still in flux - - you may want to do integration builds, where you add placeholders just for - the purpose to get an impression of the work as a whole. - - -Scenario (7) : Teamwork -^^^^^^^^^^^^^^^^^^^^^^^ - -Several people work on a project. - - - A longer sequence might be split up into parts, each one edited by another - person. The parts will be collected and assembled by the chief editor. Edits - to the parts will still be possible, but a system of permissions allows to - lock down access to parts of the edit, so to prevent unexpected interferences. - - Arrangements based on the same resources can be branched, tagged and merged. - - Edits are logged with usernames - - Markers can be shown/hidden on a per creator base. - - Team members need ways to share and store notes and suggestion for each - other work. Annotations can be added to clips, markers or arrangements - - A pen tool could allow to scribble on top of frames or arrangements. An - expressive and fast way to leave suggestions about deletions, movements and - all other kinds of edits. - - -Scenario (8) : Script driven -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The application is started ``headless'' (without GUI) and controlled via an -API. Either an existing session is loaded, or a new session is created and -populated. Then, some operations have to be done in a systematic manner, -requiring a way to address parts of the session both unambiguously and in a way -easy to access and control from a programming environment (you can't just -``see'' the right clip, it needs to be tagged). Finally, there might be an -export or render step. A variation of this scenario is the automatic extraction -of some informations from an existing project. - - -Discussion -~~~~~~~~~~ - -.Pros -* describing such scenarios, even if hypothetical, create an anchor or point of - referral for feature/GUI design work to be done in detail -* relating features to working situations helps to see what is really important - and what is rather of technical merit -* compiling and discussing this list helps shaping the character of the - application as a whole -* the above compilation relates individual features to a general production - process. -* the goal of this compilation is to be _fairly complete_ - - -.Cons -* any of those descriptions is artificial -* sometimes it is better to develop an application technology driven, - especially when it is technologically challenging to get it to work properly. -* having such a large-scale vision may freak away people which otherwise - might jump in and implement some crazy but valuable new feature -* the listed usage scenarios intend to be _fairly complete_, which can be a - limitation or even self-deception. Better have an open ended list. -* the above compilation seems quite conventional and explicitly leaves out some - scenarios - - networked, distributed scenarios, compound applications - - television, life video, VeeJay-ing - - cartoons, animations, game design - - - -.Alternatives -* avoiding a general plan, just sharing a vague general vision -* just start out with one scenario directly at hand (e.g. the simple assembly) - and not worrying about the rest -* rather then defining those scenarios (which are necessarily hypothetical), - rather stick to the operation level. E.g. a use case would be rather - on the level of ``triming a clip'' -* doing a complete state-of-the art UML use case analysis. -* after having created the foundation, rather stick to an XP approach, i.e. - implement, integrate and release small ``usage stories'' - - - -Rationale -^^^^^^^^^ - -Well, after having considered, compiled and written such an concept, altogether -avoiding a big picture view of the application is not longer an option. To the -other extreme, we neither have the resources, nor the circumstances for doing a -rigid and formal analysis. Finally, the XP approach really sounds promising, -and it should be clear that it is in no way ruled out. Nothing hinders us to -have a detailed vision, but then to implement small usage stories which fit -into this vision. - -Besides, another consideration. The above compilation builds upon the notion, -that there is a common denominator of film making craft, a core editing art, -which has been shaped in the first 100 years of cinema, and which won't go away -within the next generation, even if the technological and practical -circumstances of production change quite dramatically. - - - - - - - - -Comments --------- -//comments: append below - -.Template e.g. for regular TV series -Constraints to fit all contents within fixed timeline, cover topic, select -collage of iconic scenes from archived and collected footage. Update intro and -credit roll for each episode. Add in stopmotion, and 3D model animations with -vocal commentaries. Gather together separate items from "outworkers". - -Tree:: '2008-12-27 08:36:36' - - -//endof_comments: - - - -Parked -~~~~~~ -We have to revisit this, possibly someone (or a group) who wants to work on -the workflow. For now its parked until revisited. - - Do 14 Apr 2011 03:06:42 CEST Christian Thaeter - - - - -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_parked/UseCases.txt b/doc/devel/rfc_parked/UseCases.txt new file mode 120000 index 000000000..f0e56ff79 --- /dev/null +++ b/doc/devel/rfc_parked/UseCases.txt @@ -0,0 +1 @@ +../rfc/UseCases.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/ApplicationInstall.txt b/doc/devel/rfc_pending/ApplicationInstall.txt deleted file mode 100644 index 44ed4cdea..000000000 --- a/doc/devel/rfc_pending/ApplicationInstall.txt +++ /dev/null @@ -1,225 +0,0 @@ -ApplicationInstall -================== - -// please don't remove the //word: comments - -[grid="all"] -`------------`----------------------- -*State* _Draft_ -*Date* _Di 11 Jan 2011 17:00:55 CET_ -*Proposed by* Ichthyostega -------------------------------------- - -[abstract] -********************************************************************************* -Lumiera should be a _freely relocatable_ application bundle. -Relying only on the relative folder structure within this bundle, the application -will be fully functional at any location, provided that the external library -dependencies are resolvable using the standard mechanisms of the platform. -The setup mechanism must be obvious, self-explanatory and must not rely -on compiled in magic or buildsystem trickery. Yet packaging into a FSH conforming -installation location should be supported by the same mechanisms. -********************************************************************************* - -Description ------------ -//description: add a detailed description: -Lumiera is planned to become a large professional application bundle, relying -on several external resources for proper operation. An installed Lumiera -application will be more like Gimp, Blender, OpenOffice or Eclipse, -not like bash, autotools or emcas. - -Besides that, it can be expected that Lumiera frequently will be used in a -project or studio like setup, where the application isn't installed, but just -unZIPped / unTARed and used as-is. Thus, it should be sufficient to unpack -the application bundle and point it to the session file and maybe the -media storage. - -The Eclipse platform can serve as a model for the setup of an modern -application of that style: It can be just unpacked, and when looking -into the folder structure, the meaning of the parts is obvious, and the -basic bootstrap is controlled by two short text based INI files. -While Lumiera presumably won't get _that_ heavyweight and is clearly -not intended to become a general business application platform like OSGi -- -the underlying principles can serve as a point of reference for modern -development standards. - -This leads to the following conclusions: - -- we need to define a standard folder layout for the bundle -- the application must not rely on any compiled-in absolute paths -- the executable should fetch the directly accompanying shared modules -- all other lib dependencies should be handled by the system mechanisms -- the bootstrap configuration likewise must not be compiled-in -- this configuration must be human readable and clearly exhibit its purpose -- the same system must be able to adapt to a FSH conforming installation layout - -Judging from our current planning and the existing codebase, Lumiera -is on a good way in that direction, yet some cleanup needs to be done, -especially removing convenience shortcuts from the early days of development -and catching up with the repair of some traits of sloppyness here and there. - -Library resolution -~~~~~~~~~~~~~~~~~~ -In former days, it was common habit to compile-in a hard wired absolute -+RPATH+. This can be considered obsolete practice; for example, the Debian -policy forbids doing so. This is the result from numerous maintainability -problems in the past. On the other hand, the GNU linker and other modern -linkers support a relative resolution of shared modules directly accompanying -an specific executable. The Debian policy allows this, if and only if these -shared modules are installed with the same binary package and only used by -this specific executable(s). Together, this is exactly what we need to -solve our requirement. - -Thus, the build process enables the new-style DT-tags in the Elf binary -and sets the +DT_RUNPATH+ with an value relative to +$ORIGIN+, which resolves -to the path of the currently executing binary. Moreover, it is _sufficient_ -to set this on the initial executable _only,_ because this creates a common -searchpath for all lib resolution events in the scope of that loaded executable. -Besides that, we need to care that our private libraries have a unique +SONAME+, -in this case all starting with the prefix +liblumiera*+. Note moreover that this -new-style +DT_RUNPATH+ indeed _can_ be overridden by an +LD_LIBRARY_PATH+ in the -environment, should there be the need for very special experiments. - -Bootstrap location -~~~~~~~~~~~~~~~~~~ -Thus, a single relative library folder becomes the only hard wired start -configuration. In our case, the folder +$ORIGIN/modules+ was chosen. The -root of the package then holds all the binaries depending on these common -internal libraries, that is the +lumiera+ executable and any accompanying -special tools. As usual with such large application bundles, these get -only _symlinked_ into the +/usr/bin+ folder on installation. - -For sake of clarity, after starting the executable, the _same location_ -is used to load the bootstrap configuration. This configuration in turn -defines all further locations like the extended configuration, project -templates, plugin search path, the GUI module to load, the search path -for icons and GUI resources, project templates and similar basics. - -Relative paths and the location of the executable -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -According to the general requirements outlined above, most paths should -be given in a relative fashion. Unfortunately there is no portable solution -for self-discovering the currently running executable. But at least there -is a solution for all current major platforms. Under Linux, this information -can be retrieved from the kernel through the +/proc+ filesystem. - -Again for sake of clarity, the same token +$ORIGIN+ was chosen to denote -this executable location (note: this is _not_ the current working directory). -Moreover, due to the folder layout detailed above, this coincides with the -root of the application bundle, thus making for a self-explanatory convention. -Besides +$ORIGIN+, these search paths later on likely will contain locations -below the user's home directory, e.g. +~/.lumiera/themes+ - - -Tasks -~~~~~ -// List what needs to be done to implement this Proposal: -* identify what impedes such a modern setup procedure ([green]#✔ done#) -* rectify the folder structure created in the build target - directory ([green]#✔ done#) -* build the executables in a way to allow relative resolution of the - internal shared modules ([green]#✔ done#) -* replace the compiled-in path definitions for plugin loading by a - configurable bootstrap ([green]#✔#) -* add an working library implementation for a config loader ([green]#✔ done#) -* add a mechanism for establishing the path of the current execubable. + - This is _non-portable_ ([green]#✔ done#) -* wire the prepared API in the GUI to use this working config loader - for resolving GUI resources ([green]#✔ done#) -* try to extract the path search code from the existing config loader, - or build a new solution based on standard libraries ([green]#✔ done#) -* introduce an output root directory into the buildsystem, allowing - for package builds ([green]#✔#) -* define a _Debian packaging_ as proof-of-concept ([green]#✔ done#) - - -Discussion -~~~~~~~~~~ - -Pros -^^^^ -* self-contained -* self-explanatory -* based on _best practices_ -* conforming with FSH and Debian policy - - -Cons -^^^^ -* requires work -* raises the bar at the implementation side -* requires an bootstrap sequence to be explicitly performed - on application startup -* breaks with some beloved habits of the Unix community - - -Alternatives -^^^^^^^^^^^^ -//alternatives: explain alternatives and tell why they are not viable: -I can think of two alternatives - -. dealing with all those problems _later_ -. not making an concept, rather sticking to UNIX habits - -The first alternative is indeed worth considering, because we're settling -some things to be really implemented way later, which bears some dangers. -But, on the other hand, it is a common practice known from extreme programming -to deliver early and regularly, which effectively means to set up the deploy -path of an application really early in the development cycle. The rationale -is that -- according to general experience -- the deployment always turns -up some very specific problems and constraints, which can be a serious -threat when discovered late in the development process. - -The second alternative isn't really applicable IMHO. The original UNIX philosophy -breeds on an academic setup and really excels with small nifty commandline utils -combined by pipes, each specialised to do a single thing very well. These utils -are more like the objects within our implementation. The concept of large -application software bundles and desktop software was always a bit alien -within the classic UNIX environment. - - - -Rationale ---------- -//rationale: Give a concise summary why it should be done *this* way: - -This RfC can be seen as an commitment to an professional approach and as -clarification: Traditionally, the Unix community hailed a lot of _black magic_ -practices like compiled-in installation paths, macro magic, +sed+ and +awk+ -trickery, inline code compiled on-the-fly, relying on very specific and -un-obvious behaviour of some build script, configuration via environment -variables and a lot of similar idioms. These practices might be adequate -in a quickly moving Research & Development setup, but turned out to be -not so helpful when it comes to industrial strength development, -as they are known to lead to maintenance problems. - - - - -//Conclusion -//---------- -//conclusion: When approbate (this proposal becomes a Final) -// write some conclusions about its process: - - - - -Comments --------- -//comments: append below - -.State -> Draft -There is now a complete implementation of this concept on my ``proc'' branch. + -Moreover, I was able to define an initial Debian packaging for Lumiera on top -of that implementation. - -During that work, I had opportunity to visit various areas of the existing codebase, -which reminded me of several small issues, which seem to become unhealthy when lying -around unfixed for such a long time. Probably I'll start a clean-up initiative and -try to bring these points to discussion separately. - - So 13 Feb 2011 20:04:00 CET Ichthyostega - - -//endof_comments: diff --git a/doc/devel/rfc_pending/ApplicationInstall.txt b/doc/devel/rfc_pending/ApplicationInstall.txt new file mode 120000 index 000000000..46e568659 --- /dev/null +++ b/doc/devel/rfc_pending/ApplicationInstall.txt @@ -0,0 +1 @@ +../rfc/ApplicationInstall.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/DesignParamAutomation.txt b/doc/devel/rfc_pending/DesignParamAutomation.txt deleted file mode 100644 index 9ed2d9b4a..000000000 --- a/doc/devel/rfc_pending/DesignParamAutomation.txt +++ /dev/null @@ -1,84 +0,0 @@ -[grid="all"] -`------------`----------------------- -*State* _Idea_ -*Date* _2008-03-06_ -*Proposed by* link:Ichthyostega[] -------------------------------------- - - -Design the handling of Parameters and Automation ------------------------------------------------- -Parameters of Plugin Components and/or Render Nodes play a role at various -levels of the application. - + -Thus it seems reasonable to do a formal requirements analysis and design prior -to coding. - - -Description -~~~~~~~~~~~ -Regarding components directly participating in the render (which may be -implemented by plugins), we distinguish between *configuration* (static) and -*parameters* (dynamic). The point of reference for this distinction is the -render process: a plugin configuration may well be variable in some manner, -e.g. the plugin may provide different flavours of the same algorithm. But this -choice has to be fixed prior to feeding the corresponding plugin asset to the -builder. Contrary to such fixed configuration setup, the _parameters_ are -considered to be _variable_ during the rendering process. They can be changed -on-the-fly from GUI, and they may be automated. Probably, each Render Node will -have at least one such _parameter_ -- namely a bypass switch. - - -Tasks -^^^^^ - - * we need to work out an introspection mechanism for parameters - - asses what different types of parameters we need - - find out how much structured parameters will be (do simple values - suffice?) - - define how parameters can be discovered/enumerated - - define a naming scheme for parameters, so they can be addressed - unambiguously - * value parameters have a value range. Work out how to handle this - * parameters may need a specific presentation in the GUI - - linear/logarithmic scale, scale reference - - selecting the right widget - -So... - -. find out to which extend we need these properties -. find out what parts of the App will have what requirements? -. chose a best fitting implementation based on this information - -A closely related issue is the handling of *Automation*. The current draft -calls for an abstract interface "ParamProvider", which just allows the -link:Plugin/RenderComponent[] to pull a current value, without knowing if the -ParamProvider is a GUI widget or an automation data set with interpolation. The -component using the param value should not need to do any interpolation. We -should re-asses and refine this draft as needed. Note: Render Nodes are -stateless; this creates some tricky situations. - - - - - - - -Alternatives -^^^^^^^^^^^^ -?? (any ideas?) - - -Rationale -~~~~~~~~~ - - - - - - -Comments --------- - - -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_pending/DesignParamAutomation.txt b/doc/devel/rfc_pending/DesignParamAutomation.txt new file mode 120000 index 000000000..bec0a6722 --- /dev/null +++ b/doc/devel/rfc_pending/DesignParamAutomation.txt @@ -0,0 +1 @@ +../rfc/DesignParamAutomation.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/DeveloperDocumentationStructure.txt b/doc/devel/rfc_pending/DeveloperDocumentationStructure.txt deleted file mode 100644 index 1f88d130e..000000000 --- a/doc/devel/rfc_pending/DeveloperDocumentationStructure.txt +++ /dev/null @@ -1,136 +0,0 @@ -Developer Documentation Structure -================================= - -// please don't remove the //word: comments - -[grid="all"] -`------------`----------------------- -*State* _Idea_ -*Date* _Mon Aug 2 18:03:25 2010_ -*Proposed by* Christian Thaeter -------------------------------------- - -[abstract] -******************************************************************************** -I describe here how to bring the Lumiera Developer Documentation into an simple -hierarchical structure. Previously we accumulated a lot Documentation which -ended in quite a few different places. This should be tidied up. -******************************************************************************** - -Description ------------ -//description: add a detailed description: - -I propose to reorganize the developer documentation in the following way: - - * make a 3 (or more, see below) level documentation structure: - 1. The entry level becomes the 'Lumiera: The inner Core' document which shall - not go into details but give a hint what everything is made for. This - will be the first introductory doc for new developers. - 2. second level are the RFC's which descibe the design as planned on a - general level, not going (except for some example snippets) into - implementation details. - 3. the third level is the doxygen documentation which describes what - actually got implemented in detail. This can be further split into - an external reference and a internal part. - -We using test-driven-development, our tests are our specifications. This leads -to the idea that ideas, design and intentions for tests should be documented -there too. In a higher level abstract human written form. I propose to use my -pipadoc documentation extractor (that means, writing asciidoc within the code as -special comments) for this. - - -Tasks -~~~~~ -// List what would need to be done to implement this Proposal in a few words: -// * item ... - - * Go over the old content of the asciidoced tiddlywikis, integrate it either in - the "Lumiera: The inner Core" document or write single RFC's for them. - * The 'proc' tiddlywiki is a bit special, we need a plan how to integrate this. - Possibly making a own document-dir for this, or refactor it in plenty RFC's. - This is ichthyos decision. - * Decide how to proceed with the UML model - - - -Pros -^^^^ -// add just a fact list/enumeration which make this suitable: - -Much easier entry to the whole developer documentation. Reading the "Inner Core" -document should be sufficient to get a good idea about the Lumiera design and -layout. All details are linked from there and thus easily findable. - - -Cons -^^^^ -// fact list of the known/considered bad implications: - -There are some open ends yet, doxygen for example doesn't integrate nicely, we -possibly can't link to single doxygen entities since these have no permanent -link (to my understanding, to be investigated). Other parts like the UML model -are not yet decided and moving the other existing content over needs some (not -really much) work. - -Alternatives ------------- -//alternatives: explain alternatives and tell why they are not viable: - -Spring 2010 we discussed and decided an overall website and documentation structure. -We could just stick to that. - - -Rationale ---------- -//rationale: Describe why it should be done *this* way: - -This approach fits nicely into our overall infrastructure and the way we wanted -to do things. Using git and asciidoc mostly, making the developer documentation -part of the source tree and reasonable easy available/maintainable to -developers. - -//Conclusion -//---------- -//conclusion: When approbated (this proposal becomes a Final) -// write some conclusions about its process: - - - - -Comments --------- -//comments: append below - -* The general idea of having three levels, with 'The Inner Core' as entry point, - looks OK for me. -* beyond that -- we had a detailed discussion about the overall website structure, - which includes the documentation. Why should we overthrow these results now and - re-start the discussion? Lets just stick to this agreed on structure! -* especially I don't like the way this proposal tries to squeeze everything into - an completely uniform structure. It is simply not true that the RFCs are just the - second level, and doxygen would cover the 3^rd^ level. Look at the existing - documentation to see why. - - RFCs are a 'kind' of document, not a 'hierarchy level.' Indeed, our existing - RFCs span all three hierarchy levels, and this is OK so and should remain this - way. (And yes, I like the RFCs much and want to retain them) - - RFCs are well suited to topics requiring discussion and agreement by the whole - core developer team. I see no point in 'pseudo-RFC-ing' the individual design - decisions only relevant for an isolated part of the application and without - any potential for discussion. - - similarily, in the TiddlyWiki, besides just working notes (``extended brain'') - you'll find finished text pages belonging to all different levels, from very - high-level conceptual down to explanation of technical details, with - cross references and tags for categorisation (and this will be retained - when asciidocing the content). -* so my conclusion is rather having one overview text, and then the split into - *conceptual* and *technical* documentation, each of which has a separate sub - structure not necessarily congruent to the structure on the other half. RFCs, - UML model and doxygen are just separate and consistent bodies of documentation - and can be referred to from the main documentation. (I agree with the observation - regarding permanent links into doxygen. But I can't imagine there isn't some - existing solution to this problem) - -- link:Ichthyostega[] 2010-10-15 - -//endof_comments: diff --git a/doc/devel/rfc_pending/DeveloperDocumentationStructure.txt b/doc/devel/rfc_pending/DeveloperDocumentationStructure.txt new file mode 120000 index 000000000..e95a5fda2 --- /dev/null +++ b/doc/devel/rfc_pending/DeveloperDocumentationStructure.txt @@ -0,0 +1 @@ +../rfc/DeveloperDocumentationStructure.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/EngineInterfaceOverview.txt b/doc/devel/rfc_pending/EngineInterfaceOverview.txt deleted file mode 100644 index e9e931661..000000000 --- a/doc/devel/rfc_pending/EngineInterfaceOverview.txt +++ /dev/null @@ -1,271 +0,0 @@ -Engine Interface Overview -========================= -:Date: 2010 - -// please don't remove the //word: comments - -[grid="all"] -`------------`----------------------- -*State* _Draft_ -*Date* _2010-04-16_ -*Proposed by* link:Ichthyostega[] -------------------------------------- - - -Overview Engine Interface(s) ----------------------------- - -******************************************************************************** -At the Engine Interfaces, Lumiera's Backend and Session get connected and work -together to produce rendered output. This design proposal intends to give an -overview of the connection points and facilities involved, to define some terms -and concepts and to provide a foundation for discussion and working out the -APIs in detail. -******************************************************************************** - - - -Participants -~~~~~~~~~~~~ - *Render Process*:: represents an ongoing calculation as a whole - *Engine Model*:: encloses the details of the current engine configuration and - wiring - *Dispatcher*:: translates a render process into the (planned) invocation of - individual nodes - *Scheduler*:: cares for calculations actually to happen, in the right order - and just in time, if at all - *Node*:: abstraction of an processing unit, supports planning by the - dispatcher, allows to pull data, thereby driving the actual calculation. - -Render Process -~~~~~~~~~~~~~~ -The render process brackets an ongoing calculation as a whole. It is not to be -confused with a operating system process or thread; rather it is a point of -reference for the relevant entities in the GUI and Proc-Layer in need to -connect to such a "rendering", and it holds the specific definitions for this -calculation series. A render process -_corresponds to a single data stream_ to be rendered. Thus, when the play - controller of some timeline in the model is -in _playing_ or _paused_ state, typically multiple corresponding render -processes exist. - -* there is an displayer- or output slot, which got allocated on creation - of the process -* the process disposes calculated data frames "into" this slot -* the process can be paused/started and stopped (aborted, halted). -* some processes allow for changing parameters dynamically (e.g. speed, - direction) -* each process has to ensure that the output/display slot gets closed or - released finally - -.Process parameters -A process is linked to a single stream data format (a -> -link:StreamTypeSystem.html[stream implementation type]). + -It is configured with _frame quantisation_ and _timings_, and a _model port_ -identifier and _channel selector_. - - quantisation:: - translates time values into frame numbers. (In the most general - case this is a function, connected to the session) - - timings:: - a definition to translate global model time units in real clock time, - including _alignment_ to an external _time grid_. - - model port:: - a point in the (high level) model where output can be produced. + - This might be a global pipe in one of the model's timelines, or - it might be a _probe point_. - - channel:: - within the session and high level model, details of the stream - implementation are abstracted. Typically, a global pipe (master bus - or subgroup) corresponds to a multichannel stream, and each of these - channels might be hooked up to an individual render process - (we have to work out if that's _always the case_ or just under - _some circumstances_) - - -[NOTE] -=================== -While certainly the port and channel definition is fixed, unfortunately the -quantisation and the timings are'nt. The timings may be changed in the middle -of an ongoing render process, due to changed playback speed, shuffling or -requirements forwarded from chase-and-lock synchronisation to an external -source. We still need to discuss if Lumiera is going to support variable -framerates (several media professionals I've talked to were rather positive we -need to support that -- personally I'm still in doubt we do). Variable -framerates force us to determine the frame numbers by an integration over time -from a start position up to the time position in question. The relevant data to -be integrated is located in the session / high-level model; probably we'll then -create an excerpt of this data, but still the less quantisation will be a -function of time. Anyway, it is the render processes job to translate all kinds -of parameter changes into relevant internal API calls to reconfigure the -calculation process to fit. -=================== - - - -Engine Model (low-level Model) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The low level model is a network of interconnected render nodes. It is created -by the build process to embody any configuration, setup and further -parametrisation derived from the high-level description within the session. But -the data structure of this node network is _opaque_ and considered an -implementation detail. It is not intended to be inspected and processed by -outward entities (contrast this to the high-level model within the session, -which provides an extensive discovery API and can be manipulated by model -mutating commands). We just provide a set of _query and information retrieval -functions_ to suit the needs of the calculation process. The engine model is -_not persisted._ - -* the engine model is partitioned by a _segmentation_ of the time axis. - Individual segments can be hot-swapped. -* the engine has _exit nodes,_ corresponding to the model ports mentioned above -* each exit node provides a stream type definition plus quantisation and - alignment constraints. - -Thus, for any pair (port, time) it is possible to figure out a segment and an -exit node to serve this position. The segmentation(s) for multiple ports might -differ. To allow for effective dispatching, the model should provide -convenience functions to translate these informations into frame number ranges. -The mentioned quantisation and alignment constraints stem from the fact that -the underlying media source(s) are typically themselves quantised and the -timings might be manipulated within the processing chain. We might or might not -be able to shift the underlying media source -(it might be a live input or it might be tied to a fixed timecode) - - - -Processing Node -~~~~~~~~~~~~~~~ -In this context, a node is a conceptual entity: it is an elementary unit of -processing. It might indeed be a single invocation of a _processor_ (plugin or -similar processing function), or it might be a chain of nodes, a complete -subtree, it might _represent_ a data source (file, external input or peer in -case of distributed rendering), or it might stand for a pipeline implemented in -hardware. The actual decision about these possibilities happened during the -build process and can be configured by rules. Information about these decisions -is retained only insofar it is required for the processing, most of the -detailed type information is discarded after the wiring and configuration step. -As mentioned above, each node serves two distinct purposes, namely to assist -with the planning and dispatching, and to pull data by performing the -calculations. - -Nodes can be considered _stateless_ -- pulling a node has no effect outside the -invocation context. While a node _might_ actually be configured to drive a -whole chain or subtree and propagate the pull request -_within_ this tree or chain internally, the node _never propagates a pull - request beyond its realm._ The pull() -call expects to be provided with all prerequisite data, intermediary and output -buffers. - - -Dispatching Step -~~~~~~~~~~~~~~~~ -The dispatcher translates a render process (actually a _calculation stream_ as -part of a render process) into sequences of node invocations, -which then can be analysed further (including planning the invocation of -prerequisites) and scheduled. This mapping is assisted by the engine model API -(to find the right exit node in the right segment), the render process (for -quantisation) and the involved node's invocation API (to find the -prerequisites) - - -Node Invocation API -~~~~~~~~~~~~~~~~~~~ -As nodes are stateless, they need to be embedded into an invocation context in -order to be of any use. The node invocation has two distinct stages and thus -the invocation API can be partitioned in two groups - -Planning -^^^^^^^^ -During the planning phase, the dispatcher retrieves various informations -necessary to _schedule_ the following pull call. These informations include - - * reproducible invocation identifier, usable to label frames for caching - * opaque source identifier (owned by the backed) in case this node - represents a source - * prerequisite nodes - * index (channel) of the prerequisite's output to be fed as input buffer(s) - * number and size of the output buffers required - * additional memory required - * control data frame(s) - - -Node pull -^^^^^^^^^ - * the pull call expects to be provided with all the resources announced during - the planning step - * moreover, the pull call needs to know (or some way to figure out) the time - coordinates - * after retrieving automation, the control flow forwards to the actual - processing function - * there is an result/error code (assuming the scheduler prefers error codes - over exceptions) - - -'''' - -Tasks -~~~~~ - * find out if we need to support variable framerate - ([green]#-> yes, implementation deferred#) - * find out about the exact handling of multichannel data streams ([green]#✔ done#) - * design and prototypical implementation of frame quantisation ([green]#✔ done#) - * design a buffer descriptor ([green]#✔ done#) - * design a buffer designation scheme [red]#TODO# - * expand on the node identification scheme [red]#TODO# - * clarify how control data frames can be addressed [red]#TODO# - - -Discussion -~~~~~~~~~~ - -Pros/Cons/Alternatives -^^^^^^^^^^^^^^^^^^^^^^ -Currently we're focussing on how to implement _this_ concept, not on -evaluating alternatives. Especially the idea of scheduling individual frame jobs -is a core concept of Lumiera. This RfC tries to bridge from the session model to -an engine based on these concepts. It's the attempt to link two concepts already -defined and decided on.... - - -Rationale -^^^^^^^^^ -* allow for optimal resource use and avoid blocking of threads -* shift away complexity from the engine into the builder, which is by far not - so performance critical -* allow to adjust the actual behaviour of the engine in a wide range, based on - actual measurements -* create a code structure able to support the foreseeable extensions (hardware - and distributed rendering) without killing maintainability - - - - -//Conclusion -//---------- -//conclusion: When approbate (this proposal becomes a Final) -// write some conclusions about its process: - - - - -Comments --------- -//comments: append below - -.State -> Draft -Requirements and details of the design are sufficiently clear meanwhile. -Ther seems to be not much room for alternative approaches, given our -general planning for the application - - Mi 11 Mai 2011 19:27:12 CEST Ichthyostega - - -//endof_comments: - -'''' -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] - diff --git a/doc/devel/rfc_pending/EngineInterfaceOverview.txt b/doc/devel/rfc_pending/EngineInterfaceOverview.txt new file mode 120000 index 000000000..9fe44386a --- /dev/null +++ b/doc/devel/rfc_pending/EngineInterfaceOverview.txt @@ -0,0 +1 @@ +../rfc/EngineInterfaceOverview.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/EngineInterfaceSpec.txt b/doc/devel/rfc_pending/EngineInterfaceSpec.txt deleted file mode 100644 index 2b4be964e..000000000 --- a/doc/devel/rfc_pending/EngineInterfaceSpec.txt +++ /dev/null @@ -1,234 +0,0 @@ -Engine Interface Spec -===================== - -// please don't remove the //word: comments - -[grid="all"] -`------------`----------------------- -*State* _Idea_ -*Date* _Mi 11 Mai 2011 17:53:16 CEST_ -*Proposed by* Ichthyostega -------------------------------------- - -[abstract] -******************************************************************************** -The main service of the Renderengine is to deliver a stream of calculations -bound by timing constraints. The parameters of this delivery can be reconfigured -******************************************************************************** - -Try to start the integration and overall design of the Lumiera Render Engine. -Point of reference is the functionality other parts of the application are relying on. - -Description ------------ -//description: add a detailed description: -The Engine is driven by the Player subsystem and draws on the low-level Model -(Render nodes network) for all local parameters and control data. The goal is -to deliver all the typical playback and rendering operations commonly found -in NLE applications (functional requirements). Moreover, this functionality -shall be delivered in a robust and reliable fashion, while making optimal -use of the available I/O bandwidth and computation power (non-functional -requirements). - -Requirements Specification -~~~~~~~~~~~~~~~~~~~~~~~~~~ -.functional -- simple calculation stream -- with or without defined end point -- deliver to fixed output buffer(s) with high timing precision -- ability to deliver individual data chunks (e.g. single frames) -- ``free wheeling'' operation for maximum calculation throughput -- throttled operation using idle calculation or bandwidth resources -- streams with regular stepping and arbitrary block sizes -- looping, seamless chaining of calculation streams with ongoing timing -- ability to pause and to change / combine any of the above any time -- ability to abort or change, providing reliable feedback on completion -- ability to relocate (shift in time) parts of an ongoing calculation stream -- support for chase-and-lock synchronisation - -.non-functional -- protection against overload and deadlocks -- gracious degradation in case of problems -- maintain a configurable quality-of-service level -- utilise precisely the resources actually available - -Functionality description in detail -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> see the link:EngineInterfaceOverview.html[Engine/Interface overview] for -a description of the involved entities and for definitions for common terms. - -Definitions -^^^^^^^^^^^ -Calculation Stream:: - A series of similar but parametrised calculations, - bound to deliver results in sequence and in accordance to timing constraints -Timed Delivery:: - Calculation result data has to be delivered within a fixed time interval - immediately preceding the delivery deadline, so it can be consumed by - an (possibly external) output process without the need for further buffering -Output Slot:: - An abstracted facility receiving calculated data; including a scheme - to organise the output buffers, which get handed over to an independent - thread running in parallel, or maybe even to an external process - -Operation description -^^^^^^^^^^^^^^^^^^^^^ -When *creating a calculation stream*, the _exit node_ and an already opened -_output slot_ are provided, plus the _timing parameters_ (frame duration, -stepping interval length, optional start and/or endtime) - -A *free wheeling calculation stream* is created in a similar fashion, just -without timing constraints on the output delivery; i.e. just the output slot -is parametrised differently. This invocation is used to create a ``mixdown'' -or ``final render'' to be saved into an output file. - -A *background rendering mandate* is created analogous, but _without_ providing -an output slot. Rather, it is expected that the engine will cache the generated -data internally. - -When calculation stream definitions are *chained*, the follow-up calculation -stream is expected to be delivered seamlessly after the preceding stream, -without interrupting the output timings. - -*Looping* is a special kind of chained calculations, where the same segment -is delivered continuously. But note, the loop boundaries are not necessarily -aligned with the frame spacing or the output timing requirements. - -For supporting *chase-and-lock*, the engine needs a mechanism to follow an -externally provided synchronisation goal, _without_ altering the output -delivery timings. Obviously, we need to build in a strategy for handling -this problem (because the solution is bound to be different for different -kinds of media). - -The engine can expect the output slot to support *de-clicking* or *flicker -protection* -- yet the engine needs to signal precisely when this is necessary - -Quality of service -^^^^^^^^^^^^^^^^^^ -The Engine is expected to provide different quality-of-service classes, -which are requested as part of the definition parameters for a calculation stream. - -- 'SYNC_PRIORITY' means to keep up to the delivery requirements, - even if this means failing to deliver data altogether. -- 'PERFECT_RESULT' means to deliver data perfect up to the definition, - even if this means violating the timing constraints. -- 'COMPROMISE' allows the engine to take some shortcuts in order to - deliver an roughly satisfactory behaviour. Likely there will be - multiple classes of compromise. - -The quality of service is partially implemented directly by the engine -and partially passed on as parameter to the individual node invocations. -For example, the engine might decide to switch down to proxy media, while -actually the node network will perform the actual switch and reconfiguration. - -The quality of service could be implemented as a strategy, to be consulted at -various decision points. The above cases would then be just some preconfigured -default strategies. - -Reconfiguration and abort -^^^^^^^^^^^^^^^^^^^^^^^^^ -There needs to be a separate ``control channel'' to cause various reconfigurations -during an ongoing calculation process. With the exception of the output slot, all -parameters defining an calculation stream might be changed on-the-fly -- including -the possibility to abort calculation altogether. - -The engine is _not required_ to react on such change requests immediately or -synchronously. The goal is rather to integrate such changes seamlessly. -Yet we _require_... - -- a guarantee that the change request is observed within some tolerance interval - (i.e. we may block waiting on the change to happen, without risking a deadlock) -- a reliable feedback _after_ the change has happened, by invoking a response signal - (functor/callback provided with the change request) -- a guarantee not to proceed with the original setup after this signalling - (read: after receiving this feedback, resources required only by the initial - setup may be deallocated) - -Especially note that the following things might be changed in the middle of an -ongoing calculation: - -- timing parameters of the calculation stream (frame durations, stepping interval) -- start and end time -- splitting and chaining of calculation streams (e.g introducing jumps) -- adjust the looping boundaries -- toggle _paused_ state -- change the exit node to use for pulling -- relocate the nominal time position of parts of the calculation stream; - especially we expect already calculated and cached data to be re-labeled -- invalidate parts of the (nominal) time axis, forcing recalculation -- abort individual calculation streams without interfering with others. - - -Tasks -~~~~~ -// List what needs to be done to implement this Proposal: -* analyse requirements of the player subsystem ([green]#✔ done#) -* determine further informations needed during calculation [,yellow]#WIP# -* find out about timing requirements and constraints in detaill [red]#TODO# -* define the interface functions in detail [red]#TODO# -* prepare a test fixture with mock-up calculations [red]#TODO# -* implement the invocation backbone with stubbed functionality [red]#TODO# - - -Discussion -~~~~~~~~~~ - -Pros -^^^^ -// add a fact list/enumeration which make this suitable: -// * foo -// * bar ... - - - -Cons -^^^^ -// fact list of the known/considered bad implications: -The requirements placed on life changes are quite high - - - -Alternatives -^^^^^^^^^^^^ -//alternatives: explain alternatives and tell why they are not viable: - - - -Rationale ---------- -//rationale: Give a concise summary why it should be done *this* way: -The expectations for the playback and render functionality of a NLE are -pretty much set. There isn't much room for reducing functionality. -So the goal for this RfC is to precisely define the inevitable -and break it down into tangible functionality on the implementation level. - - - -//Conclusion -//---------- -//conclusion: When approbate (this proposal becomes a Final) -// write some conclusions about its process: - - - - -Comments --------- -//comments: append below - -Discussed in the May developers meeting. Seems to be basically acceptable. -_Cehteh_ proposed some small adjustments: - -- making the _QualityOfService_ rather a strategy to be queried -- treating the rescheduling a bit separate from the other changes, because - that is very common and needs to be performant. -- introducing a separate scheduler/queue for time scheduled tasks, like - with rater soft realtime requirements - - So 15 Mai 2011 00:55:24 CEST Ichthyostega - - -//endof_comments: - -'''' -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_pending/EngineInterfaceSpec.txt b/doc/devel/rfc_pending/EngineInterfaceSpec.txt new file mode 120000 index 000000000..a0c8a7ce9 --- /dev/null +++ b/doc/devel/rfc_pending/EngineInterfaceSpec.txt @@ -0,0 +1 @@ +../rfc/EngineInterfaceSpec.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/RefactorLiblumieraOut.txt b/doc/devel/rfc_pending/RefactorLiblumieraOut.txt deleted file mode 100644 index c31fb06a2..000000000 --- a/doc/devel/rfc_pending/RefactorLiblumieraOut.txt +++ /dev/null @@ -1,240 +0,0 @@ -Refactor Liblumiera Out -======================= - -// please don't remove the //word: comments - -[grid="all"] -`------------`----------------------- -*State* _Idea_ -*Date* _Fr 22 Apr 2011 10:46:50 CEST_ -*Proposed by* Christian Thaeter -------------------------------------- - -[abstract] -******************************************************************************** -liblumiera contains alot useful and reuseable code which is already in use by -other projects -******************************************************************************** - -Description ------------ -//description: add a detailed description: -Over the time we've put some efforts into the liblumiera. I've added -some from my code which predates the lumiera project which I am using -on many other projects. This now caused that I maintain this sources in -different unrelated projects and have to cross merge and update stuff -when I do updates and fixes somewhere. I think its time to factor the -reuseable parts out into a independent library (like glib does for -gtk), in fact I had this plan long ago. - - -.What parts are eligible for a standalone library - -Anything which is something tool alike and useful for other projects and not -tied to Lumiera only. This are the algorithms/datastructures, allocators, tool -macros. Additionally some of the src/common things should be moved into the -library. I give some lists below. - -.How to name it - -Long time ago my plan was to name it 'ctlib' or 'cehlib' but meanwhile there is -enough code done by others. So I'd propose a more neutral name, still -'lumieralib' or 'lulib' would be approbiate. The only thing we have to account -for is that some parts which are too specific for Lumiera and should not be -integrated into this spinoff need either to stay in a lumiera-internal lib -(src/lib/) as currently or being moved to the respective subsystems using them -(src/backend, src/proc, src/common, ...), so the names should not clash. - -.C, C++ ... - -For myself I need the C parts, while there is C++ code which interfaces to the -C implementations and also a lot code which does nice C++ things on its own. -This possibly means that we should in fact make 2 packages out of this, one C -and one C++ library (where the C++ part is more than just the wrappers, but -also the tools and tricks which are currently in src/lib/ and reuseable). - -.Who maintains it - -Despite a spin of I think we don't want to change anything from our current -practice and maintain it by the Lumiera developers. For many parts I feel -responsible for it, but its really a part of the Lumiera codebase, despite -independently useable. - -.How to maintain it - -We need to decide about build system and documentation system. As build system -we may right start using scons. For documentation the situation is a but -different since some of my code uses pipadoc/asciidoc and other uses doxygen. - -.What not to do - -Some of the code is currently quite specific to Lumiera while it could be made -more generic. This is *NOT* subject of this RFC we may or may not do such a -refactoring but this RFC and any work resulting from this should only restrict -to simple things like necessary namespace and variable renaming and integration -in the build system. - - -C Parts -------- - -Library -~~~~~~~ -What belongs to the library - -Containers -^^^^^^^^^^ - * cuckoo hashing (cuckoo.c|h) - * linked lists (llist.h slist.h) - * cache lists (mrucache.c|h) - * splay trees (psplay.c|h) - * priority queues (not done yet) - -Runtime tools -^^^^^^^^^^^^^ - * error handling (error.h error.c) used by the other facilities too - * clib convinience wrapers (safeclib.c|h) needs better name, maybe refactor - into new facilities - -Multithreading -^^^^^^^^^^^^^^ - * locking, condition variables etc. (condition.c|h (rec)mutex.c|h, rwlock ...) - -Memory management -^^^^^^^^^^^^^^^^^ - * Memory pools (mpool.c|h) - * Temporary buffers (tmpbuf.c|h) - -Metaprogramming -^^^^^^^^^^^^^^^ - * preprecessor tools (ppmpl.h) move common preprocessor macros here - * polymorphic call helper for C (vcall.h) - -Interface system and module loader -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -except for some hardcoded references to 'lumiera_org' and '.lum' plugin names -this is quite generic, possibly moving this over could be postponed, but might -eventually be done. - -From 'src/common' ------- -interface.c interfacedescriptor.h interface.h interfaceproxy.cpp -interfaceregistry.c interfaceregistry.h plugin.c plugin_dynlib.c plugin.h ------- - - -The 'config' system could become a candidate too if it ever gets finished and -proved useful, but for the time being its better kept in Lumiera. - - -Not Library -~~~~~~~~~~~ -Too specific to Lumiera: ------ -luid.c luid.h time.h ------ - - -C++ Parts ---------- - -For most of the C++ parts I am not sure, ichthyo should decided upon these -(please edit this here) - -Library -~~~~~~~ -These look 'generic' or wrap the C parts: ------- -singleton-factory.hpp singleton.hpp singleton-policies.hpp -singleton-preconfigure.hpp singleton-ref.hpp singleton-subclass.hpp -sync-classlock.hpp sync.cpp sync.hpp thread-local.hpp -typed-allocation-manager.hpp typed-counter.hpp util.cpp util-foreach.hpp -util.hpp variant.hpp ------- - -Not Sure -~~~~~~~~ ------- -access-casted.hpp advice advice.hpp allocation-cluster.cpp -allocation-cluster.hpp bool-checkable.hpp cmdline.cpp cmdline.hpp del-stash.hpp -diagnostic-context.hpp element-tracker.hpp error.hpp (currently too -lumiera specific) exception.cpp (as before) factory.hpp format.hpp -frameid.hpp functor-util.hpp handle.hpp hash-indexed.hpp iter-adapter.hpp -iter-adapter-stl.hpp iter-source.hpp itertools.hpp iter-type-binding.hpp -lifecycle.cpp lifecycleregistry.hpp lumitime-fmt.hpp lumitime.hpp -multifact-arg.hpp multifact.hpp meta/* null-value.hpp observable-list.hpp -opaque-holder.hpp optional-ref.hpp p.hpp query.cpp query.hpp ref-array.hpp -ref-array-impl.hpp result.hpp scoped-holder.hpp scoped-holder-transfer.hpp -scoped-ptrvect.hpp searchpath.cpp searchpath.hpp sub-id.hpp symbol.hpp -symbol-impl.cpp visitor-dispatcher.hpp visitor.hpp visitor-policies.hpp -wrapper.hpp wrapperptr.hpp appstate.cpp appstate.hpp basic-setup.cpp -basic-setup.hpp DIR_INFO external guifacade.cpp instancehandle.hpp option.cpp -option.hpp query subsys.cpp subsys.hpp subsystem-runner.hpp ----- - - -Not Library -~~~~~~~~~~~ ------- -logging.cpp nobug-init.cpp nobug-init.hpp streamtype.cpp streamtype.hpp test/* -time/* time.cpp tree.hpp ------ - -Tasks -~~~~~ -// List what needs to be done to implement this Proposal: -// * first step ([green]#✔ done#) - - * decide on name, namespaces [,yellow]#WIP# - * create git repository, setup boilerplace (build system, legalese) - [,yellow]#WIP# - * move all code over into the git repos, refactor (namespace renames) () - [,yellow]#WIP# - * make Lumiera use the new lib [,yellow]#WIP# - -Discussion -~~~~~~~~~~ - -Pros -^^^^ -// add a fact list/enumeration which make this suitable: - - * I am already reuse much of its code, making it independent makes maintaining - it less burden - - -Cons -^^^^ -// fact list of the known/considered bad implications: - - * new packages, new dependencies for Lumiera instead 'batteries included' - -Alternatives -^^^^^^^^^^^^ -//alternatives: explain alternatives and tell why they are not viable: - - -Rationale ---------- -//rationale: Give a concise summary why it should be done *this* way: - - - -//Conclusion -//---------- -//conclusion: When approbate (this proposal becomes a Final) -// write some conclusions about its process: - - - - -Comments --------- -//comments: append below - - -//endof_comments: - -'''' -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_pending/RefactorLiblumieraOut.txt b/doc/devel/rfc_pending/RefactorLiblumieraOut.txt new file mode 120000 index 000000000..20241682f --- /dev/null +++ b/doc/devel/rfc_pending/RefactorLiblumieraOut.txt @@ -0,0 +1 @@ +../rfc/RefactorLiblumieraOut.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/ResourceManagementBudgeting.txt b/doc/devel/rfc_pending/ResourceManagementBudgeting.txt deleted file mode 100644 index ffa488e5c..000000000 --- a/doc/devel/rfc_pending/ResourceManagementBudgeting.txt +++ /dev/null @@ -1,121 +0,0 @@ -Resource Management: Budgeting -============================== - -// please don't remove the //word: comments - -[grid="all"] -`------------`----------------------- -*State* _Idea_ -*Date* _Fri Jul 23 20:33:32 2010_ -*Proposed by* Christian Thaeter -------------------------------------- - -[abstract] -****************************************************************************** -The Profiler will give some Idea about how much Resources can me used to -optimally utilize the system. Knowing this number leads to the next challenge, -distributing the resources to different subsystems, jobs and objects. I here -introduce a budgeting system which takes care for this. -****************************************************************************** - - -Description ------------ -//description: add a detailed description: - -The idea is quite simple, for each kind of resource we have a global budget -manager which accounts for the available and used amounts of this resource. - -Each user of a resource has its own account managing his share on the resource. - -The system is completely voluntary giving only hints how much of a resource is -available for anyone. - - - -[source,C] ------------------------------------------------------------------------------- -typedef ssize_t budget_count; - -struct budgetmanager -{ - rwlock lock; - - void (*callback)(); // called on resource shortage - (resource collector) - - int sum_priorities; // sum of all client budgets .. each - client is granted available/(sum_priorities/own_priority) of the resource - - budget_count available_prefs; // configuration from preferences - budget_count available_profile; // tuned by profiler - int available_factor; // how much % from prefs vs profile - - budget_count available; // caclulated from above - budget_count allocated; // actively in use -}; - -struct budget -{ - BudgetManager manager; - int priority; - - budget_count allocated; -}; ------------------------------------------------------------------------------- - - - - -Tasks -~~~~~ -// List what would need to be done to implement this Proposal in a few words: -// * item ... - - - - -Discussion -~~~~~~~~~~ - -Pros -^^^^ -// add just a fact list/enumeration which make this suitable: -// * foo -// * bar ... - - - -Cons -^^^^ -// fact list of the known/considered bad implications: - - - -Alternatives -^^^^^^^^^^^^ -//alternatives: if possible explain/link alternatives and tell why they are not - viable: - - - -Rationale ---------- -//rationale: Describe why it should be done *this* way: - - - -//Conclusion -//---------- -//conclusion: When approbate (this proposal becomes a Final) write some - conclusions about its process: - - - - -Comments --------- -//comments: append below - - -//endof_comments: diff --git a/doc/devel/rfc_pending/ResourceManagementBudgeting.txt b/doc/devel/rfc_pending/ResourceManagementBudgeting.txt new file mode 120000 index 000000000..bff328cb6 --- /dev/null +++ b/doc/devel/rfc_pending/ResourceManagementBudgeting.txt @@ -0,0 +1 @@ +../rfc/ResourceManagementBudgeting.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/ResourceManagementProfiling.txt b/doc/devel/rfc_pending/ResourceManagementProfiling.txt deleted file mode 100644 index bfd1b2731..000000000 --- a/doc/devel/rfc_pending/ResourceManagementProfiling.txt +++ /dev/null @@ -1,182 +0,0 @@ -Resource Management: Profiling -============================== - -// please don't remove the //word: comments - -[grid="all"] -`------------`----------------------- -*State* _Idea_ -*Date* _Fri Jul 23 19:34:29 2010_ -*Proposed by* Christian Thaeter -------------------------------------- - -[abstract] -****************************************************************************** -From the beginning on we planned some kind of 'profiling' to adapt dynamically -to workload and machine capabilities. I describe here how statistic data can be -gathered in a generic way. This will later work together with other components -tuning the system automatically. -****************************************************************************** - - -Description ------------ -//description: add a detailed description: - -I just introduce some ideas about the planned profiling framework here, nothing -is defined/matured yet this is certainly subject for futher discussion and -refinement. - -.Requirements/Evaluation generic:: - Profiling should be sufficiently abstracted to have a single set of - datastructures and algorithms to work on a broad range of subjects - being profiled. Moreover the profiling core just offers unitless - counters, semantic will be added on top of that on a higher level. - - least possible overhead:: - Profiling itself must not cost much, it must not block and should avoid - expensive operations. Simple integer arithmetic without divisions is - suggested. - - accurate:: - We may sample data on in stochastic way to reduce the overhead, - nevertheless data which gets sampled must be accurately stored and - processed without rounding losses and drifts. - - transient values:: - It's quite common that some values can be far off either in maximum or - in minimum direction, the system should adapt to this and recover from - such false alarms. Workload also changes over time we need to find some - way to measure the current/recent workload an grand total over the - whole application runtime is rather uninteresting. While it is also - important that we adapt slow enough not to get into some osccilating - cycle. - - active or passive system:: - Profiling can be only passive collecting data and let it be analyzed by - some other component or active triggering some action when some limits - are reached. I am yet a bit undecided and keep it open for both. - - - - - - - -.Brainstorming in Code -[source,C] ------------------------------------------------------------------------------- - -typedef int64_t profile_value; - -struct profile -{ - ProfileVTable vtable; - - /* - Using trylock for sampling makes it never contend on the lock but some - samples are lost. Should be ok. - */ - mutex_t lock; /* with trylock? */ - - - /* statistics / running averages */ - - /* n being a small number 2-5 or so */ - profile_value max[n]; /* n maximum values seen so far, - decreased by recovery */ - profile_value min[n]; /* n minimum values seen so far, - increased by recovery */ - - /* store sum & count, but average calculation implies a division and will be - only done on demand */ - profile_value count; /* count profile calls */ - profile_value sum; /* sum up all calls, average = - sum/count */ - - /* current is the sampled value to be integrated */ - - /* trend is caclulated before theb new run_average */ - profile_value trend; /* trend = (trend + - (run_average-current))>>1 */ - - /* we may need some slower diverging formula for running average */ - profile_value run_average; /* run_average = (run_average + - current)>>1) */ - - - /* active limits, define whats good and whats bad, calls back to vtable when - limit is hit */ - profile_value max_limit; - profile_value min_limit; - /* do we want limits for trends too? */ - - /* we count how often we hit limits, a hit/miss ratio will give a good value - for optimization */ - profile_value hit_cnt; - profile_value high_miss_cnt; - profile_value low_miss_cnt; - - /* recovery state */ - int rec_init; - int rec_current; - int rec_percent; - - - void* extra; -}; ------------------------------------------------------------------------------- - - -Tasks -~~~~~ -// List what would need to be done to implement this Proposal in a few words: -// * item ... - - - - -Discussion -~~~~~~~~~~ - -Pros -^^^^ -// add just a fact list/enumeration which make this suitable: -// * foo -// * bar ... - - - -Cons -^^^^ -// fact list of the known/considered bad implications: - - - -Alternatives -^^^^^^^^^^^^ -//alternatives: if possible explain/link alternatives and tell why they are not - viable: - - - -Rationale ---------- -//rationale: Describe why it should be done *this* way: - - - -//Conclusion -//---------- -//conclusion: When approbate (this proposal becomes a Final) write some - conclusions about its process: - - - - -Comments --------- -//comments: append below - - -//endof_comments: diff --git a/doc/devel/rfc_pending/ResourceManagementProfiling.txt b/doc/devel/rfc_pending/ResourceManagementProfiling.txt new file mode 120000 index 000000000..022da4af2 --- /dev/null +++ b/doc/devel/rfc_pending/ResourceManagementProfiling.txt @@ -0,0 +1 @@ +../rfc/ResourceManagementProfiling.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/SemanticTags.txt b/doc/devel/rfc_pending/SemanticTags.txt new file mode 120000 index 000000000..661e961b3 --- /dev/null +++ b/doc/devel/rfc_pending/SemanticTags.txt @@ -0,0 +1 @@ +../rfc/SemanticTags.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/StreamTypeSystem.txt b/doc/devel/rfc_pending/StreamTypeSystem.txt deleted file mode 100644 index 12323606f..000000000 --- a/doc/devel/rfc_pending/StreamTypeSystem.txt +++ /dev/null @@ -1,268 +0,0 @@ -Stream Type System -================== - -// please don't remove the //word: comments - -[grid="all"] -`------------`----------------------- -*State* _Draft_ -*Date* _2008-10-05_ -*Proposed by* link:Ichthyostega[] -------------------------------------- - - - -******************************************************************************** -.Abstract -Especially in the Proc-Layer, within the Builder and at the interface to the -Engine we need sort of a framework to deal with different »kinds« of -media streams. + -This is the foundation to be able to define what can be connected and to -separate out generic parts and isolate specific parts. -******************************************************************************** - - -Description ------------ -//description: add a detailed description: -The general idea is that we need meta information, and -- more precisely -- -that _we_ need to control the structure of this metadata. Because it has -immediate consequences on the way the code can test and select the appropriate -path to deal with some data or a given case. This brings us in a difficult -situation: - - * almost everything regarding media data and media handling is notoriously - convoluted - * because we can't hope ever to find a general umbrella, we need an extensible - solution - * we want to build on existing libraries rather then re-inventing media - processing. - * a library well suited for some processing task not necessarily has a type - classification system which fits our needs. - -The proposed solution is to create an internal Stream Type System which acts as -a bridge to the detailed (implementation type) classification provided by the -library(s). Moreover, the approach was chosen especially in a way as to play -well with the rule based configuration, which is envisioned to play a central -role for some of the more advanced things possible within the session. - - -Terminology -~~~~~~~~~~~ - * *Media* is comprised of a set of streams or channels - * *Stream* denotes a homogeneous flow of media data of a single kind - * *Channel* denotes an elementary stream, which -- _in the given context_ -- - can't be decomposed any further - * all of these are delivered and processed in a smallest unit called *Frame*. - Each frame corresponds to a time interval. - * a *Buffer* is a data structure capable of holding one or multiple Frames of media data. - * the *Stream Type* describes the kind of media data contained in the stream - - -Concept of a Stream Type -~~~~~~~~~~~~~~~~~~~~~~~~ - -The Goal of our Stream Type system is to provide a framework for precisely -describing the ``kind'' of a media stream at hand. The central idea is to -structure the description/classification of streams into several levels. -A complete stream type (implemented by a stream type descriptor) contains -a tag or selection regarding each of these levels. - -Levels of classification -^^^^^^^^^^^^^^^^^^^^^^^^ - - * Each media belongs to a fundamental *kind of media*, examples being _Video, - Image, Audio, MIDI, Text,..._ This is a simple Enum. - * Below the level of distinct kinds of media streams, within every kind we - have an open ended collection of *Prototypes*, which, within the high-level - model and for the purpose of wiring, act like the "overall type" of the - media stream. Everything belonging to a given Prototype is considered to be - roughly equivalent and can be linked together by automatic, lossless - conversions. Examples for Prototypes are: stereoscopic (3D) video versus the - common flat video lacking depth information, spatial audio systems - (Ambisonics, Wave Field Synthesis), panorama simulating sound systems (5.1, - 7.1,...), binaural, stereophonic and monaural audio. - * Besides the distinction by prototypes, there are the various *media - implementation types*. This classification is not necessarily hierarchically - related to the prototype classification, while in practice commonly there - will be some sort of dependency. For example, both stereophonic and monaural - audio may be implemented as 96kHz 24bit PCM with just a different number of - channel streams, but we may as well have a dedicated stereo audio stream - with two channels multiplexed into a single stream. - - -Working with media stream implementations -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -For dealing with media streams of various implementation type, we need -_library_ routines, which also yield a _type classification system_ suitable -for their intended use. Most notably, for raw sound and video data we use the -http://gmerlin.sourceforge.net/[GAVL] library, which defines a fairly complete -classification system for buffers and streams. For the relevant operations in -the Proc-Layer, we access each such library by means of a Façade; it may sound -surprising, but actually we just need to access a very limited set of -operations, like allocating a buffer. _Within_ the Proc-Layer, the actual -implementation type is mostly opaque; all we need to know is if we can connect -two streams and get an conversion plugin. - -Thus, to integrate an external library into Lumiera, we need explicitly to -implement such a Lib Façade for this specific case, but the intention is to be -able to add this Lib Façade implementation as a plugin (more precisely as a -"Feature Bundle", because it probably includes several plugins and some -additional rules) - - -Link between implementation type and prototype -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -At this point the rules based configuration comes into play. Mostly, to start -with, determining a suitable prototype for a given implementation type is sort -of a tagging operation. But it can be supported by heuristic rules and an -flexible configuration of defaults. For example, if confronted with a media -with 6 sound channels, we simply can't tell if it's a 5.1 sound source, or if -it's a pre mixed orchestra music arrangement to be routed to the final balance -mixing or if it's a prepared set of spot pick-ups and overdubbed dialogue. But a -heuristic rule defaulting to 5.1 would be a good starting point, while -individual projects should be able to set up very specific additional rules -(probably based on some internal tags, conventions on the source folder or the -like) to get a smooth workflow. - -Moreover, the set of prototypes is deliberately kept open ended. Because some -projects need much more fine grained control than others. For example, it may -be sufficient to subsume any video under a single prototype and just rely on -automatic conversions, while other projects may want to distinguish between -digitized film and video NTSC and PAL. Meaning they would be kept in separate -pipes an couldn't be mixed automatically without manual intervention. - - -connections and conversions -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - * It is _impossible to connect_ media streams of different kind. Under some - circumstances there may be the possibility of a _transformation_ though. For - example, sound may be visualized, MIDI may control a sound synthesizer, - subtitle text may be rendered to a video overlay. Anyway, this includes some - degree of manual intervention. - * Streams subsumed by the same prototype may be _converted_ lossless and - automatically. Streams tagged with differing prototypes may be _rendered_ - into each other. - * Conversions and judging the possibility of making connections at the level - of implementation types is coupled tightly to the used library; indeed, most - of the work to provide a Lib Façade consists of coming up with a generic - scheme to decide this question for media streams implemented by this - library. - - -Tasks -~~~~~ -// List what needs to be done to implement this Proposal: - * draft the interfaces ([green]#✔ done#) - * define a fall-back and some basic behaviour for the relation between - implementation type and prototypes [,yellow]#WIP# - * find out if it is necessary to refer to types in a symbolic manner, or if it - is sufficient to have a ref to a descriptor record or Façade object. - * provide a Lib Façade for GAVL [,yellow]#WIP# - * evaluate if it's a good idea to handle (still) images as a separate distinct - kind of media - - - -Discussion -~~~~~~~~~~ - -Alternatives -^^^^^^^^^^^^ -//alternatives: explain alternatives and tell why they are not viable: -Instead of representing types by metadata, leave the distinction implicit and -instead implement the different behaviour directly in code. Have video tracks -and audio tracks. Make video clip objects and audio clip objects, each -utilising some specific flags, like sound is mono or stereo. Then either -switch, switch-on-type or scatter out the code into a bunch of virtual -functions. See the Cinelerra source code for details. - -In short, following this route, Lumiera would be plagued by the same notorious -problems as most existing video/sound editing software. Which is, implicitly -assuming ``everyone'' just does ``normal'' things. Of course, users always were -and always will be clever enough to work around this assumption, but the problem -is, all those efforts will mostly stay isolated and can't crystallise into a -reusable extension. Users will do manual tricks, use some scripting or rely on -project organisation and conventions, which in turn creates more and more -coercion for the ``average'' user to just do ``normal'' things. - -To make it clear: both approaches discussed here do work in practice, and it's -more a cultural issue, not a question guided by technical necessities to select -the one or the other. - - -Rationale ---------- -//rationale: Give a concise summary why it should be done *this* way: - - * use type metadata to factor out generic behaviour and make variations in - behaviour configurable. - * don't use a single classification scheme, because we deal with distinctions - and decisions on different levels of abstraction - * don't try to create an universal classification of media implementation type - properties, rather rely on the implementation libraries to provide already a - classification scheme well suited for _their_ needs. - * decouple the part of the classification guiding the decisions on the level - of the high level model from the raw implementation types, reduce the former - to a tagging operation. - * provide the possibility to incorporate very project specific knowledge as - rules. - -//Conclusion -//---------- -//conclusion: When approbate (this proposal becomes a Final) -// write some conclusions about its process: - - - - -Comments --------- -//comments: append below -As usual, see the -http://www.lumiera.org/wiki/renderengine.html#StreamType[Proc-Layer impl doku] -for more information and implementation details. - -Practical implementation related note: I found I was blocked by this one in -further working out the details of the processing nodes wiring, and thus make -any advance on the builder and thus to know more precisely how to organize the -objects in the link:EDL/Session[]. Because I need a way to define a viable -abstraction for getting a buffer and working on frames. The reason is not -immediately obvious (because initially you could just use an opaque type). The -problem is related to the question what kind of structures I can assume for the -builder to work on for deciding on connections. Because at this point, the -high-level view (pipes) and the low level view (processing functions with a -number of inputs and outputs) need in some way to be connected. - -The fact that we don't have a rule based system for deciding queries currently -is not much of a problem. A table with some pre configured default answers for -a small number of common query cases is enough to get the first clip rendered. -(Such a solution is already in place and working.) + - -- link:Ichthyostega[] 2008-10-05 - -Woops fast note, I didn't read this proposal completely yet. Stream types could -or maybe should be coopertatively handled together with the backend. Basically -the backend offers one to access regions of a file in a continous block, this -regions are addressed as "frames" (this are not necessary video frames). The -backend will keep indices which associate this memory management with the frame -number, plus adding the capabilitiy of per frame metadata. This indices get -abstracted by "indexing engines" it will be possible to have different kinds of -indices over one file (for example, one enumerating single frames, one -enumerating keyframes or gops). Such a indexing engine would be also the place -to attach per media metadata. From the proc layer it can then look like `struct -frameinfo* get_frame(unsigned num)` where `struct frameinfo` (not yet defined) -is something like `{ void* data; size_t size; struct metadata* meta; ...}` + - -- link:ct[] 2008-10-06 - -Needs Work -~~~~~~~~~~ -There are a lot details to be worked out for an actual implementation but we -agreed that we want this concept as proposed here. - - Do 14 Apr 2011 03:06:42 CEST Christian Thaeter - - -//endof_comments: - -'''' -Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview] diff --git a/doc/devel/rfc_pending/StreamTypeSystem.txt b/doc/devel/rfc_pending/StreamTypeSystem.txt new file mode 120000 index 000000000..8b989a554 --- /dev/null +++ b/doc/devel/rfc_pending/StreamTypeSystem.txt @@ -0,0 +1 @@ +../rfc/StreamTypeSystem.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/SystematicMetadata.txt b/doc/devel/rfc_pending/SystematicMetadata.txt new file mode 120000 index 000000000..e4e30fde0 --- /dev/null +++ b/doc/devel/rfc_pending/SystematicMetadata.txt @@ -0,0 +1 @@ +../rfc/SystematicMetadata.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/WebsiteNavigation.txt b/doc/devel/rfc_pending/WebsiteNavigation.txt deleted file mode 100644 index 172b6543f..000000000 --- a/doc/devel/rfc_pending/WebsiteNavigation.txt +++ /dev/null @@ -1,160 +0,0 @@ -WebsiteNavigation -================= - -// please don't remove the //word: comments - -[grid="all"] -`------------`----------------------- -*State* _Idea_ -*Date* _Mi 08 Dez 2010 11:32:32 CET_ -*Proposed by* Ichthyostega -------------------------------------- - -[abstract] -******************************************************************************** -The Lumiera website is assumed to accumulate a lot of content. Thus we need -to care about making that content accessible, to help finding the relevant -topics and to keep the overall structure intact. This RfC is to collect, -discuss and agree upon the guidelines and requirements. -******************************************************************************** - -Description ------------ -//description: add a detailed description: - -Issues to care -~~~~~~~~~~~~~~ - -Navigation:: - The page hierarchy becomes at least 5 levels deep, likely even deeper. - When reading a page, the current subtree leading down to this page should - be right at hand; especially access to the siblings and the parent's siblings - is important. For re-accessing content, it is necessary to be able to drill - down to an known location (``within the design docs, detailing the application, - I need the configuration section'') + - -> we need an *auto generated navigation* and an embedded *menu tree widget* in the web pages. - -Tagging:: - There should be an easy way to categorise individual pages *by keyword(s)* - and an automatically generated indexing by tags, possibly with an per tag - overview page. - -Search:: - The usual *site search*. It should include the contents of the issue tracker. - Even today such a scoped search is valuable and even necessary for working - with the informations collected within the Lumiera project - -Sanity:: - Each relevant page needs to be reachable. There are some additional pages and - especially subdirectories which should not be linked into the website navigation. - Moreover, all (internal) links on the pages should be valid. + - -> this could be addressed by a **sanity checker script** - -Usage situations -~~~~~~~~~~~~~~~~ - -(a) working on content -^^^^^^^^^^^^^^^^^^^^^^ -Working on content should be readily accessible for _everyone_. One time contributions -are especially encouraged. This leads to the following usage scenario: - -A contributor has some informations to share or wants to do some additions or modifications. -(S)he locates somehow the place where relevant informations are stored, adds some text, -possibly adds a new page or splits another page in two. - -_Note_: no awareness of the issues of navigation can be assumed. The occasional contributor -won't notice any concern which isn't right at hand. - -(b) maintaining a subsystem -^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Some person(s) will be responsible for a subsystem or some segment of the informations -on the website. This responsibility is content centric. It might include frequent rearranging, -regrouping and reordering of pages to accommodate the increasing scope of informations. - -_Note_: while here some awareness of website organisational issues can be assumed, -any requirement to care for external organisational issues is a burden and distracts -from the actual work to be done -- thus it is likely to be short circuited or postponed -``for later''. Note especially, reorganising content in a subsection *must not* incur -the burden of re-doing the same reorganisation steps mirrored in some central navigation -configuration or table of contents. (this is a knock out criterion) - -(c) maintaining the website -^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The website maintainer is responsible for the overall sanity of the website, without -being familiar with all details of ongoing work in some part or section of the information. -Another concern here is the outward perception of the website, which might incur changes -on the top level navigation or some rewording of overview pages. - -_Note_: this kind of work is rather unrewarding. There is the danger of collisions with the -work of the subsystem maintainer - - -Conclusion: Requirements for any navigation solution -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * ability to to pick up a nested page structure - * ability to cope with any additions and changes in the lower levels automatically, without help by the user - * ability to override: - - - not including some subdirectories - - including links-to-external at arbitrary positions - -optional/additional features -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The following features would be handy, but can be considered optional - - * ability to change the displayed title of a page in the navigation - * ability to control the ordering of pages in the navigation - * complete manual override of the visible content of a specific subdirectory - - - -Tasks -~~~~~ -// List what would need to be done to implement this Proposal in a few words: -// * item ... - - - -Discussion -~~~~~~~~~~ - -Pros -^^^^ -// add just a fact list/enumeration which make this suitable: -// * foo -// * bar ... - - - -Cons -^^^^ -// fact list of the known/considered bad implications: - - - -Alternatives -^^^^^^^^^^^^ -//alternatives: explain alternatives and tell why they are not viable: - - - -Rationale ---------- -//rationale: Describe why it should be done *this* way: - - - -//Conclusion -//---------- -//conclusion: When approbate (this proposal becomes a Final) -// write some conclusions about its process: - - - - -Comments --------- -//comments: append below - - -//endof_comments: diff --git a/doc/devel/rfc_pending/WebsiteNavigation.txt b/doc/devel/rfc_pending/WebsiteNavigation.txt new file mode 120000 index 000000000..d9f14895f --- /dev/null +++ b/doc/devel/rfc_pending/WebsiteNavigation.txt @@ -0,0 +1 @@ +../rfc/WebsiteNavigation.txt \ No newline at end of file diff --git a/doc/devel/rfc_pending/WebsiteSupportMarkup.txt b/doc/devel/rfc_pending/WebsiteSupportMarkup.txt new file mode 120000 index 000000000..b0601a272 --- /dev/null +++ b/doc/devel/rfc_pending/WebsiteSupportMarkup.txt @@ -0,0 +1 @@ +../rfc/WebsiteSupportMarkup.txt \ No newline at end of file diff --git a/doc/devel/template/new_rfc.sh b/doc/devel/template/new_rfc.sh index eccaecc8d..a78af057b 100644 --- a/doc/devel/template/new_rfc.sh +++ b/doc/devel/template/new_rfc.sh @@ -27,7 +27,8 @@ Tasks ~~~~~ // List what needs to be done to implement this Proposal: // * first step ([green]#✔ done#) -// * second step [,yellow]#WIP# +// * second step [yellow-background]#WIP# +// * third step [red yellow-background]#TBD# Discussion diff --git a/doc/index.txt b/doc/index.txt index 36ab3139a..268479de4 100644 --- a/doc/index.txt +++ b/doc/index.txt @@ -4,12 +4,16 @@ This documentation section contains documentation for both users and developers. == User == -The user manual containing the full reference for using Lumiera : +As a start, you should visit the introductory text +link:user/intro/intro.html[Lumiera (as seen) from Outer Space] + +A user manual containing the full reference for using Lumiera is planned: * link:user/manual.html[User Manual] _(planned)_ == Developer == -Informations about the structure of Lumiera, they give an overview on the main components of the application : +Information about the structure and concepts Lumiera is based upon. +These documents give an overview on the main components of the application: * link:design/index.html[Design Documents] diff --git a/doc/technical/infra/rfcsh.txt b/doc/technical/infra/rfcsh.txt index 07790f7c2..a75fb3640 100644 --- a/doc/technical/infra/rfcsh.txt +++ b/doc/technical/infra/rfcsh.txt @@ -1,12 +1,6 @@ Lumiera RFC maintenance script ============================== -// Note: the source of this documentation is maintained -// directly admin/rfc.sh in its usage() function -// edit it only there and then regenerate -// doc/devel/technical/infra/ with: -// ./admin/rfc.sh help >doc/technical/infra/rfcsh.txt - Usage ----- diff --git a/doc/user/index.txt b/doc/user/index.txt index 6b967999c..f52a9f06d 100644 --- a/doc/user/index.txt +++ b/doc/user/index.txt @@ -5,7 +5,7 @@ User Documentation // Menu : attach child 'manual' after 'intro' * link:manual.html[User Manual] _(planned)_ - * The following document might become an introductory overview: + + * The following document contains an introductory overview: + link:intro/intro.html[Lumiera (as seen) from Outer Space] * link:intro/Glossary.html[Glossary of common terms] diff --git a/doc/user/intro/Glossary.txt b/doc/user/intro/Glossary.txt index 9f5f7c2ea..cd2ba0636 100644 --- a/doc/user/intro/Glossary.txt +++ b/doc/user/intro/Glossary.txt @@ -2,15 +2,142 @@ Glossary ======== -'NOTE Draft, please help rephrase/review and sort this terms, shorten +'NOTE Draft, please help rephrase/review and shorten explanations, the long explanation is the topic of the document above..' + anchor:AssetsView[] link:#AssetsView[->]Assets View:: + Windows displaying and managing various things that can be edited, + worked on, etc are collectively known as the Assets View. + AssetsView consists of ingested footage, clips that have already been composed, available + sub-projects, effects, transitions and internal artifacts. + + anchor:Builder[] link:#Builder[->]Builder:: + This is a kind of compiler that creates low-level, or processing, graphs + by traversing and evaluating the relevant parts of the high-level-model + and using the Rules System. + + anchor:Busses[] link:#Busses[->]Busses:: + A list of 'global Pipes' representing the possible outputs (master + busses) similar to audio mixing desk. A bus defines the properties of + the rendered output (Framerate, Resolution, Colorformat and so on). + Busses are part of a Timeline. + + anchor:ConfigSystem_Preferences[] link:#ConfigSystem_Preferences[->]Config System/Preferences:: + TODO: agree on one term here + Provides defaults for all kinds of application configurations. These + include machine specific configurations for performance + characteristics, File and Plugins Paths and configuration data and so + on. Note that this only provides defaults for data that has not already + been set. Many settings will then be stored within the project which + override Config/Preferences. + + anchor:ControllerGui[] link:#ControllerGui[->]Controller Gui:: + This can be either a full Software implementation for a Transport + control (Widgets for Start/Stop/Rev/Ffw etc) or some Gui managing an + Input Device. They share some feature to attach them to controllable + gui-entities (Viewers, Timeline Views) + + anchor:Cursor[] link:#Cursor[->]Cursor:: + Playback- or edit position + + anchor:Focus[] link:#Focus[->]Focus:: + TBD + + anchor:HighLevelModel[] link:#HighLevelModel[->]High Level Model:: + All the session content to be edited and manipulated by the user + through the GUI. The high-level-model will be translated by the + Builder into the Low Level Model for rendering. + + anchor:InputDevice[] link:#InputDevice[->]Input Device:: + Some hardware controler, like an extra keyboard, midi mixer, Jog, .. + TODO: we still need to decide on whether we can treat the main keyboard + as special global state or whether we can handle the keyboard as a generic + input devide. + + anchor:LowLevelModel[] link:#LowLevelModel[->]Low Level Model:: + The generated Processing Graph, to be ``performed'' within the engine + to yield rendered output + + anchor:MediaStream[] link:#MediaStream[->]MediaStream:: + Media data is supposed to appear structured as stream(s) over time. + While there may be an inherent internal structuring, at a given + perspective any stream is a unit and homogeneous. In the context of + digital media data processing, streams are always quantized, which means + they appear as a temporal sequence of data chunks called frames. + + anchor:OutputDesignation[] link:#OutputDesignation[->]OutputDesignation:: + A specification denoting where to connect the output of a pipe. + It might either be given _absoulutely_, i.e as a Pipe-ID, + or by a _relative_ or an _indirect_ specification + + anchor:OutputManager[] link:#OutputManager[->]OutputManager:: + Manages all external outputs of the application and provides output + slots targetting these. + + anchor:OutputMapping[] link:#OutputMapping[->]OutputMapping:: + Diverts one output designation into another designation, e.g. when hooking + up a sequence as a virtual clip within another sequence. + + anchor:OutputSlot[] link:#OutputSlot[->]OutputSlot:: + Opaque descriptor for an output facility, ready to dispose frames + of data to be output. + + anchor:Pipe[] link:#Pipe[->]Pipe:: + Conceptual building block of the high-level model. It can be thought + of as simple linear processing chain. A stream can be 'sent to' a + pipe, in which case it will be mixed in at the input, and you can + 'plug' the output of a pipe to another destination. Furthermore, effects + or processors can be attached to the pipe. In addition to global pipes + (busses) in each Timeline, each clip automatically creates N pipes + (one for each distinct content stream. Typically N=2, for video and + audio) + + anchor:Placement[] link:#Placement[->]Placement:: + A Placement represents a relation: it is always linked to a Subject + (this being a Media Object) and has the meaning to place this Subject + in some manner, either relatively to other Media Objects, by some + Constraint or simply absolute at (time, output). Placements are used + to stitch together the objects in the high-level-model. Placements + thus are organised hierarchically and need to be _resolved_ to obtain + a specific value (time point, output routing, layering, fade,...) + + anchor:PlayController[] link:#PlayController[->]PlayController:: + coordinating playback, cueing and rewinding of a playback position, + visible as 'Playhead' cursor in the GUI. When in play state, a + PlayController requests and directs a render process to deliver the + media data needed for playback. + + anchor:ProcessingGraph[] link:#ProcessingGraph[->]Processing Graph:: + Rendering is expressed as a detailed network of nodes. The edges can be + envisaged as data flow, while the nodes represent data processing. anchor:Project[] link:#Project[->]Project:: - the top-level context in which all edit work is done over an extended - period of time. The Project can be saved and re-opened. It is - comprised of the collection of all things the user is working on, it - contains all informations, assets, state and objects to be edited. + The top-level context in which all editing is done over an extended + period of time. A Project can be saved and re-opened. It consists of + various things a user is working on: user-information, assets, state and + objects to be edited. + + anchor:RenderTask[] link:#RenderTask[->]RenderTask:: + This is basically a PlayController, but directly collects output without + moving a PlayheadCursor (could be a progress indicator) and not operating + in a timed fashion, but freewheeling or in background mode + + anchor:RulesSystem[] link:#RulesSystem[->]Rules System:: + Translating the Timeline to the underlying Processing Graphs involves + some logic and knowledge about handling/converting data. This may be + configued with this Rules System. Typically Lumiera will provide sane + defaults for most purposes but may extended/refined for site specific + things. + + anchor:Sequence[] link:#Sequence[->]Sequence:: + A collection of *Media Objects* (clips, effects, transitions, labels, + automation) placed onto a tree of tracks. By means of this placement, + the objects could be anchored relative to each other, relative to + external objects, absolute in time. A sequence can connect to + global pipes when used as a top-level sequence within a timeline, or + alternatively it can act as a virtual-media when used within a + meta-clip (nested sequence). A Sequence by default contains just a + single root track and directly sends to the master bus of the Timeline. anchor:Session[] link:#Session[->]Session:: the current in-memory representation of the Project when opened within @@ -18,6 +145,51 @@ explanations, the long explanation is the topic of the document above..' the GUI and the users POV we should always prefer the term "Project" for the general concept. + anchor:StreamType[] link:#StreamType[->]StreamType:: + Classification of a media stream. StreamType is a descriptor record. + While external media processing libraries usually do provide some kind + of classification already, within lumiera we rely on an uniform yet + abstract classification which is owned by the project and geared to + fit the internal needs, especially for the wiring and connecting. + A Lumiera stream type is comprised of the parts + - media kind (Video, Image, Audio, MIDI, Text,... ) + - prototype (open ended collection of semantical kinds of media, + examples being stereoscopic, periphonic, monaural, binaural, + film quality, TV, youtube). + - implementation type (e.g. 96kHz 24bit PCM, 2 channels muxed) + - intention tag (Source, Raw, Intermediary and Target) + + anchor:TimeAxis[] link:#TimeAxis[->]Time Axis:: + An entity defining the temporal properties of a timeline. A time axis + defines the time base, kind of timecode and absolute anchor point. + Besides, it manages a set of frame quantisation grids, corresponding + to the outputs configured for this timeline (through the global + busses). The GUI representation is a time ruler with configurable time + ticks showed on top of the timeline view + + anchor:Timeline[] link:#Timeline[->]Timeline:: + the top level element(s) within the Project. It is visible within a + 'timeline view' in the GUI and represents the effective (resulting) + arrangement of media objects, resolved to a finite time axis, to be + rendered for output or viewed in a Monitor (viewer window). + Timeline(s) are top-level and may not be further combined. A timeline + is comprised of: + * Time axis, defining the time base + * Play Controller (WIP: discussion if thats belongs to the timeline + and if we want a 1:N relation here). Note by Ichthyo: yes, our + current discussion showed us that a play controller rather gets + allocated to a timeline, but isn't contained therein. + * global pipes, i.e. global busses like in a mixing desk + * exactly one top level Sequence + + anchor:TimelineSegment[] link:#TimelineSegment[->]Timeline Segment:: + A range in the timeline which yields in one Processing graph, commonly + the range between cut points (which require a reconfiguration of the + graph). + +// Note by Ichthyo: "Extent" sounds somewhat cool, just it didn't occur to me as a term. +// We may well agree on it, if "extent" communicates the meaning better. Up to now, I called it "segment" + anchor:TimelineView[] link:#TimelineView[->]Timeline View:: A view in the GUI featuring a given Timeline. There might be multiple views of the same timeline, all sharing the same PlayController. A @@ -38,185 +210,16 @@ explanations, the long explanation is the topic of the document above..' patchbay, that is not the main purpose and they can do things beyond that.. - anchor:Timeline[] link:#Timeline[->]Timeline:: - the top level element(s) within the Project. It is visible within a - 'timeline view' in the GUI and represents the effective (resulting) - arrangement of media objects, resolved to a finite time axis, to be - rendered for output or viewed in a Monitor (viewer window). - Timeline(s) are top-level and may not be further combined. A timeline - is comprised of: - * Time axis, defining the time base - * Play Controller (WIP: discussion if thats belongs to the timeline - and if we want a 1:N relation here). Note by Ichthyo: yes, our - current discussion showed us that a play controller rather gets - allocated to a timeline, but isn't contained therein. - * global pipes, i.e. global busses like in a mixing desk - * exactly one top level Sequence - - anchor:TimeAxis[] link:#TimeAxis[->]Time Axis:: - An entity defining the temporal properties of a timeline. A time axis - defines the time base, kind of timecode and absolute anchor point. - Besides, it manages a set of frame quantisation grids, corresponding - to the outputs configured for this timeline (through the global - busses). The GUI representation is a time ruler with configurable time - ticks showed on top of the timeline view - - anchor:Busses[] link:#Busses[->]Busses:: - A list of 'global Pipes' representing the possible outputs (master - busses) similar to audio mixing desk. A bus defines the properties of - the rendered output (Framerate, Resolution, Colorformat and so on). - Busses are part of a Timeline. - - anchor:Sequence[] link:#Sequence[->]Sequence:: - A collection of *Media Objects* (clips, effects, transitions, labels, - automation) placed onto a tree of tracks. By means of this placement, - the objects could be anchored relative to each other, relative to - external objects, absolute in time. A sequence can connect to the - global pipes when used as top-level sequence within a timeline, or - alternatively it can act as a virtual-media when used within a - meta-clip (nested sequence). In the default configuration, a Sequence - contains just a single root track and sends directly to the master bus - of the timeline. - - anchor:Placement[] link:#Placement[->]Placement:: - A Placement represents a relation: it is always linked to a Subject - (this being a Media Object) and has the meaning to place this Subject - in some manner, either relatively to other Media Objects, by some - Constraint or simply absolute at (time, output). Placements are used - to stitch together the objects in the high-level-model. Placements - thus are organised hierarchically and need to be _resolved_ to obtain - a specific value (time point, output routing, layering, fade,...) - - anchor:Pipe[] link:#Pipe[->]Pipe:: - Conceptual building block of the high-level model. It can be thought - off as simple linear processing chain. A stream can be 'sent to' a - pipe, in which case it will be mixed in at the input, and you can - 'plug' the output of a pipe to another destination. Further, effects - or processors can be attached to the pipe. Besides the global pipes - (busses) in each Timeline, each clip automatically creates N pipes - (one for each distinct content stream. Typically N=2, for video and - audio) - - anchor:MediaStream[] link:#MediaStream[->]MediaStream:: - Media data is supposed to appear structured as stream(s) over time. - While there may be an inherent internal structuring, at a given - perspective any stream is a unit and homogeneous. In the context of - digital media data processing, streams are always quantized, which means - they appear as a temporal sequence of data chunks called frames. - - anchor:StreamType[] link:#StreamType[->]StreamType:: - Classification of a media stream. StreamType is a descriptor record. - While external media processing libraries usually do provide some kind - of classification already, within lumiera we rely on an uniform yet - abstract classification which is owned by the project and geared to - fit the internal needs, especially for the wiring and connecting. - A Lumiera stream type is comprised of the parts - - media kind (Video, Image, Audio, MIDI, Text,... ) - - prototype (open ended collection of semantical kinds of media, - examples being stereoscopic, periphonic, monaural, binaural, - film quality, TV, youtube). - - implementation type (e.g. 96kHz 24bit PCM, 2 channels muxed) - - intention tag (Source, Raw, Intermediary and Target) - - anchor:OutputDesignation[] link:#OutputDesignation[->]OutputDesignation:: - A specification denoting where to connect the output of a pipe. - It might either be given _absoulutely_, i.e as Pipe-ID, - or by an _relative_ or _indirect_ specification - - anchor:OutputMapping[] link:#OutputMapping[->]OutputMapping:: - translates one output designation into another one, e.g. when hooking - up a sequence as virtual clip within another sequence - - anchor:OutputSlot[] link:#OutputSlot[->]OutputSlot:: - opaque descriptor for an output facility, ready to dispose frames - of data to be output. - - anchor:OutputManager[] link:#OutputManager[->]OutputManager:: - manages all external outputs of the application and provides output - slots targetting these. - - anchor:PlayController[] link:#PlayController[->]PlayController:: - coordinating playback, cueing and rewinding of a playback position, - visible as 'Playhead' cursor in the GUI. When in play state, a - PlayController requests and directs a render process to deliver the - media data needed for playback. - - anchor:RenderTask[] link:#RenderTask[->]RenderTask:: - basically a PlayController, but collecting output directly, without - moving a PlayheadCursor (maybe a progress indicator) and not operating - in a timed fashion, but freewheeling or in background mode - - anchor:ControllerGui[] link:#ControllerGui[->]Controller Gui:: - This can be either a full Software implementation for a Transport - control (Widgets for Start/Stop/Rev/Ffw etc) or some Gui managing an - Input Device. They share some feature to attach them to controllable - gui-entities (Viewers, Timeline Views) - anchor:Viewer[] link:#Viewer[->]Viewer:: - the display destination showing video frame and possibly some effect + The display destination showing video frame and possibly some effect overlays (masking etc.). When attached to a timeline, a viewer reflects the state of the timeline's associated PlayController, and it attaches to the timeline's global pipes (stream-type match or - explicitly), showing video as monitor image and sending audio to the - system audio port. Possible extensions are for a viewer to be able to - attach to probe points within the render network, to show a second - stream as (partial) overlay for comparison, or to be collapsed to a - mere control for sending video to a dedicated monitor (separate X - display or firewire) - - anchor:HighLevelModel[] link:#HighLevelModel[->]High Level Model:: - All the session content to be edited and manipulated by the user - through the GUI. The high-level-model will be translated by the - Builder into the Low Level Model for rendering. - - anchor:LowLevelModel[] link:#LowLevelModel[->]Low Level Model:: - The generated Processing Graph, to be ``performed'' within the engine - to yield rendered output - - anchor:Builder[] link:#Builder[->]Builder:: - A kind of compiler which creates Low Level/Processing Graphs, by - traversing and evaluating the relevant parts of the high-level-model - and using the Rules System. - - anchor:TimelineSegment[] link:#TimelineSegment[->]Timeline Segment:: - A range in the timeline which yields in one Processing graph, commonly - the range between cut points (which require a reconfiguration of the - graph). - -// Note by Ichthyo: "Extent" sounds somewhat cool, just it didn't occur to me as a term. -// We may well agree on it, if "extent" communicates the meaning better. Up to now, I called it "segment" - - anchor:AssetsView[] link:#AssetsView[->]Assets View:: - The windows showing and managing the available things to work with. - This are the ingested footage, already composed Clips, available - Sub-Projects, Effects, Transitions and internal artefacts. - - anchor:RulesSystem[] link:#RulesSystem[->]Rules System:: - Translating the Timeline to the underlying Processing Graphs involves - some logic and knowledge about handling/converting data. This may be - configued with this Rules System. Typically Lumiera will provide sane - defaults for most purposes but may extended/refined for site specific - things. - - anchor:ProcessingGraph[] link:#ProcessingGraph[->]Processing Graph:: - Rendering is expressed as detailed network of Nodes, each defining a - processing step. - - anchor:ConfigSystem_Preferences[] link:#ConfigSystem_Preferences[->]Config System/Preferences:: - TODO: agree on one term here - Provides defaults for all kinds of application configurations. These - include machine specific configurations for performance - characteristics, File and Plugins Paths and configuration data and so - on. Note that this only provides defaults for otherwise not yet set - data. Many settings will then be stored within the project and the - Config/Preferences becomes overridden by that. - - anchor:InputDevice[] link:#InputDevice[->]Input Device:: - some hardware controler, like a extra Keyboard, Midi Mixer, Jog, .. - TODO: decide if the main keyboard as special (global) state. - - anchor:Focus[] link:#Focus[->]Focus:: - TBD - - anchor:Cursor[] link:#Cursor[->]Cursor:: - playback- or edit position + explicitly), showing video as a monitor image and sending audio to the + system audio port. A number of supplimentary features are possible: the + viewer could support the ability to attach to view points within the + render network; the capability to display a second stream as an overlay + (partially) which would enable user comparison or it might even be + possible to allow the Viewer to be collapsed to a control, thus, + allowing a video to be sent to a dedicated monitor, e.g., to a separate + X/-display or to a firewire. diff --git a/doc/user/intro/intro.txt b/doc/user/intro/intro.txt old mode 100644 new mode 100755 index dcbf377ab..6e77f2523 --- a/doc/user/intro/intro.txt +++ b/doc/user/intro/intro.txt @@ -2,32 +2,33 @@ Lumiera (as seen) from Outer Space ================================== :Author: Lumiera_Core_Developers :Date: Summer 2010 - + [abstract] ****************************************************************************** -The Lumiera Community creates a non linear video editing and compositing FOSS -application for Linux/Unix/Posix Operating Systems, suitable for professional -and quality oriented work, building on common open source video, sound and GUI -toolkits and libraries, providing flexibility and a high degree of -configurability and full control of all parameters, but at the same time a -smooth workflow which scales well to larger and more complicated editing -projects. This Document outlines the Design from some distance, -helping people to understand the Ideas behind Lumiera and understand the tools -they get to work with. It is aimed for workflow designers any anyone who wants -to know how the program works in general. +The Lumiera Community is in the process of making a non-linear video editing +and compositing FOSS application for Linux/Unix/Posix operating systems. The +application is geared towards professional, high-quality work; but +it is equally suitable for low-end users, due to its in-design scalability. +Lumiera builds on common open source video, sound and GUI toolkits and +libraries, being highly flexibile, configurable---user-control over a broad +spectrum of configurable parameters---and with smooth workflows that scale well +to larger more intricate projects and smaller projects. + +This document outlines the design from a more general perspective, +providing potential users with sufficient insight into the tools and technology +behind Lumiera to start working with Lumiera quickly. ****************************************************************************** // all things starting with '//' are asciidoc comments and drafts/notes while // working on this document .About this Document -This document is meant to be read electronically, it contains a lot -hyper-links between explanations denoted by an arrow ->. Lumiera is still in -development, we describe here planned features without explicitly tagging them; -some things are not worked out in detail yet. Although this document is heavily -cross-linked we try to start with a broad overview and work out more detailed -things towards the end. +// It contains many hyper-links to explanations which are denoted by an arrow ->. +Lumiera is still under active development. Here we describe planned features +without explicitly tagging them; some points have still to be worked out in +detail. Although this document is heavily cross-linked, we try to start with a +broad overview, then develop details towards the end. Vision @@ -35,41 +36,48 @@ Vision // objective and goals of the project -Lumiera claims to be a _professional non-linear video editor_. To start with, we should +Lumiera strives towards being a _professional non-linear video editor_. To start with, we should point out that ``professional'' does not necessarily mean ``commercial'' or ``industrial''. -It's more of an attitude or mindset -- doing work seriously, and to be subject to any -kind of wider goal, demand, or purpose. When it comes to editing film, this might be -artistry, a narration or meaning to convey, a political message or something to show -to your audience. Anyhow, for the tools, the editing software used to this end, -we can identify several properties and requirements, to be labeled ``professional'': +It's more of an attitude or frame of mind -- doing work seriously, and to be subject to any +kind of wider goal, requirement, or purpose. + +The concept of professionality in film editing can mean something of an artistic +nature, a narrative or having a meaning to convey, a political message, or +portray something to an audience. + +Anyhow, for the tools, the editing software used to this end, we can identify +several properties and requirements, to be labeled ``professional'': + +With this perspective in mind, we can identify a number of key properties +of professional film production tools: Reliability:: - Whatever happens, your work must be safe, protected against software - glitches and incompatibilities. Ideally Lumiera should be very stable and - never crash, in practice even crashes or power outages should not - result in lost work. + Your work must be safe and protected at all costs against software + glitches and incompatibilities. Ideally, Lumiera should be reliable, + very stable and not crash. In practice, even crashes or power outages + should not result in data or work loss.. Quality:: - If you work with high quality, cinema grade digital video material you - want to be sure that you can deliver crisp quality without compromise, - throughout the whole workflow to your final product. All rendering - must be reproducible to the bit. - + The demands placed on high-quality, cinema grade digital video material + requires crisp-quality without any compromsise throught the entire work + flow in the final product. All rendering will have to be reproducable + down to the last digit. + Performance and Productivity:: - Professionals want to get things done, in time, but optionally with control - over every aspect. Balancing these goals should be the central concern for - workflow design and usability. + Professionals want to get things done, in time and content, but ideally + with control over all details. The fine balance of these goals is a + central goal of workflow design and usability. Scalability and Adaptability:: Projects and budgets differ, hardware advances, Lumiera must scale - in different dimensions and use the available resources as best as it + in different dimensions and use available resources as best it can. From small Laptops to multi core computers and Renderfarms. Durability:: Soft and Hardware advances at a fast pace. We must not lock into the - current state of technology but being flexible to extend the System - without breaking compatibility. Projects you create nowadays with - Lumiera should be usable in foreseeable future, at least there needs + current state of technology but must be flexible enough to extend the + system without breaking compatibility. Projects you create nowadays with + Lumiera should be usable in the foreseeable future, there at least needs to be a guaranteed upgrade path. @@ -79,20 +87,21 @@ Fundamental Forces // the basic ideas which drive the Lumiera design The Lumiera design is guided by a small number of basic principles. Keeping -these in mind will help to understand how actually more interesting things can -be built up on that foundation. +these principles in mind will help you understand how more interesting things can +be built up from these fundamental principles. -Open ended combining of Building Blocks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Open Ended Combination of Building Blocks +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Lumiera is not so much defined in terms of _features_ -- rather it allows to -combine basic _building blocks._ These basic modules, entities or objects each -have a distinct _type_ explicitly limiting the connections. Within these -limits, any conceivable combination shall be supported without further hidden -limitations. +Lumiera is not so much defined in terms of _features_. It is more like a +workshop, where individual _basic building-blocks_ can be combined together in +interesting ways to form more complex structures. These basic modules, entities +or objects each have a distinct _type_ explicitly limiting the +connections. Within these limits, any conceivable combination is supported +without further hidden limitations. -Lumiera is neither a set of Lego bricks, nor is it the business application +Lumiera is neither a set of Lego bricks, nor is it a business application driven by finite usage stories. @@ -104,13 +113,17 @@ user may, if desired, directly manipulate through the GUI clips, individual effects, masks, and even the placements xref:placement[->] used to stitch the objects together, which is comparatively low-level. On the other hand, these abstractions shield the user from the actual technical details like format -conversions and the accessing of individual channels. +conversions and accessing individual channels. To complement this approach, Lumiera does _not_ rely on hard wired, global conventions -- rather we allow to build up project specific conventions and rules xref:rules[->] to fit the given requirements and preferred working -style. To help getting started, Lumiera will ship with a fairly conventional -project template and default configuration. +style. + +To ease the user in getting started with Lumiera, it will be supplied with a +rather conventional template and a default configuration will be supplied that +is relative conventional. + [[graphs]] @@ -137,24 +150,24 @@ final video. Pulling not Pushing ~~~~~~~~~~~~~~~~~~~ -On a first glance, it looks fairly natural to set up the graphs xref:graphs[->] +At a first glance, it looks fairly natural to set up the graphs xref:graphs[->] as described above and then push data into the system through the input nodes whereas the final result can then be seen soon on the output node. Several -multimedia frameworks use this approach. But it has a lot of shortcomings -which make it inappropriate for non-linear video editing. +multimedia frameworks use this approach. However this scheme exhibits a number +of shortcomings which make it inappropriate for non-linear video editing. -Lumiera instead pulls data though the pipe, that is a request starts at the -output node and makes it way up to the inputs. This has certain advantages -xref:pull[->], explained later. +Lumiera instead pulls data though the pipe, i.e., a request starts at the +output node and makes its way up to the inputs. This has certain advantages +xref:pull[->], which will be explained later. Don't waste work ~~~~~~~~~~~~~~~~ -Rendering A/V Data can be quite CPU intensive, to ensure that we do not waste -CPU power by rendering things twice, or the worse, have to throw results away -because they couldn't be rendered in time, we use sophisticated caching -xref:caching[->] and profiling xref:profiling[->]. +Rendering A/V Data can be quite CPU intensive. To ensure that we do not waste +any CPU power by rendering things twice, or worse still, having to throw away +results because it couldn't rendered in time, we use in Lumiera sophisticated +caching xref:caching[->] and profiling xref:profiling[->]. The visible Universe @@ -166,32 +179,30 @@ Now its time to take a look at the prelimary Lumiera GUI: image:{l}/images/lumiera_gui_small.png[Current Lumiera GUI Screenshot] -The GUI is a plugin by itself and only one way to work Lumiera, it will become -possible to create special-purpose GUIs or control Lumiera in different ways, -like a headless rendernode xref:rendernode[->] or frameserver -xref:frameserver[->]. Completely script driven interfaces for automated -processing are also planned. -The GUI screenshot you see above is faily default as when you start Lumiera up -for the first time (the plan is to add a 2nd Viewer to the default -configuration). While we support a much more sophisticated screen concept +Lumiera consists of three major parts: the GUI, proc and backend. The initial +GUI Lumiera will ship with is not the only GUI possible, indeed Lumiera can work +satisfactorily without a GUI, for example, for specail purposes. +headless rendernode xref:rendernode[->] or frameserver +xref:frameserver[->]. Later scripts will be written for automated +processing. Special purpose GUIs are also envisaged later. + +The GUI screenshot you above is more or less the default you see when when you +start Lumiera up for the first time. A 2nd viewer is planned for later to be +added to the default. We support a much more sophisticated screen concept xref:screenconcept[->] to adapt to different workplaces and workflows. Viewer ~~~~~~ -[red]#to be written# - -// only one viewer type used for everything -// how is audio integrated in the viewer -// effects may add overlays (masking/rotoscoping, information for example) -// these may be manipulateable in the viewer, but not part of the rendered -// video. Maybe effects can add widgets to the viewer too (how, where?) -// one can open as many viewers he needs -// these can be attached everyhere in the processing graph (pre/post effect) -// have bus in front to adapt output format -// detachable window, fullscreen, external screen +The viewer is an area where material can be displayed, i.e., ``play-back'', +which also supports audio playback connections. As there are many sources that +can be displayed, a viewer is attached to a source via the viewer switch board. +Timelines, probepoints, wiretaps and individual clips are examlpes of sources +that can be atached to a viewer. Moreover, the number of viewers open at any one +time is only limited by the hardware, and each viewer can be collapsed, hooked +up to a beamer or monitor. Transport Controls @@ -222,13 +233,26 @@ no magic here. Timeline View ~~~~~~~~~~~~~ -hierarchical tracks, not just a list +A timeline is a container that provides a time axis and an output. The output +can de derived from various sources and have different configurations. An +output can have various configurations, for each output configuration, there +will be one timeline. A timeline does not temporally arrange material, this is +performed by a sequence, which can be snapped to a timeline. -Format Independent Timeline, one can put anything on the timeline. -the busses constrain what kind of data is pulled out and in turn the -builder creates a processing graph which does the necessary conversions and -stuff. +A typical film will define many sequences, but only a few timelines. A sequence +contains a number of tracks which are ordered in a hierarchy. Tracs do not have +any format associated with them and more or less anything can be put into a +track. Consequently, audio and video material can be equally assigned to a +track, there is no discrimination between audio and video in the Lumiera concept +of a track. +A timeline must be assigned to viewer if playback viewing is desired. + +//Format Independent Timeline, one can put anything on the timeline. +//the busses constrain what kind of data is pulled out and in turn the +//builder creates a processing graph which does the necessary conversions and +//stuff. +// // Q: how to handle interaction, for example when some conversion can only be // done in a lossy way and some conversion node may or may not be inserted // (i mean gui wise)? @@ -246,19 +270,30 @@ stuff. Busses ~~~~~~ + The GUI provides a separate _bus view_, showing the master busses (subgroups) -in a manner similar to an audio mixing desk. Any bus is just a means to collect -and sum up the output of a specific kind of media (video, audio, number of channels), -produced by various processing elements and other busses. Conceptionally, these global -busses are considered a part of the timeline +in a manner similar to an audio mixing desk. Any bus is just a means of collecting +and and adding (aka overlaying) together the output of various kinds of media +(video, audio, number of channels), produced by various processing elements and +from other busses. These global busses can be concieved as being part of the +timeline. Asset View ~~~~~~~~~~ +We can conceive the Asset View as the timeline's book keeper: it manages the various +constituent in the timeline. Moreover, in addition to managing timeline +constituents, raw material, clips, bins (folders) are managed by the Asset +View, i.e., typical management operations including, deleting, adding, +naming, tagging, groupping into bins, etc. all occur here. + +Plugins are also managed in the Asset View. + + Manages all assets available in one project. * source media/footage/soundfiles - * prepared clips, known subprojects + * all available effects and transitions * internal artefacts like sequences and automation data sets @@ -285,18 +320,21 @@ Manages all assets available in one project. Dark Matter ----------- -[red]#to be written# -coarse overview about things the user does not see but have some contact -with, details later... +The material in this section provides a cursory view of features not required by +a typical user, but of more importance to people loking under the hud, i.e., +programers, etc. - -Now lets take a look under the hood. +Most of the material in this section is to be found in the proc layer and in the +backend. Session storage ~~~~~~~~~~~~~~~ [red]#to be written# + + +//databank with logging, no data loss. // not generateable data // its the timeline mostly @@ -335,9 +373,32 @@ rendering... rules system [red]#to be written# -I/O Subsystem -~~~~~~~~~~~~~ -[red]#to be written# +Input-Output Subsystem +~~~~~~~~~~~~~~~~~~~~~~~ + +Lumiera will process large quantities of data and it is of critical imnportance +to perform this efficiently. The input and output subsystem are all processed +in the backend, in fact, this is one very important function provided by the +back end. + +The typical Lumiera user will have many clips, in various configurations located +at various places. All this data will have to be stored by the backend, +moreover all this data will have to be rapidly retrieved from storage and +provided to the user. The demands on memory are high: huge chunks of data, +that can be quickly stored and equally quickly fetched, even if stored over +longer periods of time. Moreover, due to the scalability requirement of Lumiera, +this process will have to perform adequately on lower-end hardware, and perform +efficiently on higher-end hardware. + +Lumiera will break down processes that need to be processed into smaller units +called _tasks_. Typically, there will be many hundreds of tasks waiting for +processing at any one time. These tasks are qued for processing and the order in +which this is performed is managed by the _scheduler_. This is all done in the +back end. + +Apart from memory, the backend will be responsible for accessing and saving +files. It will be of the utmost to do this efficiently. This will be carried out +in the backend using low-level mechanisms. // file handling // vault, work, cache @@ -355,11 +416,60 @@ Configuration Plugins/Interfaces ~~~~~~~~~~~~~~~~~~ -[red]#to be written# -// explain whats it is -// portability -// versioning + +What are Plugins? +^^^^^^^^^^^^^^^^^ + +A Plug-in is a kind of generalisation of a library. + +All applications use, to varying degrees of intensity, libraries. A programmer +will not reinvent the wheel each time he sits down to programme an +application. A programmer will typically borrow and use features and +functionality from other programmers---or even borrow from himself, stuff +written long ago in the past. Such features are collected together in +libraries. + +A library is used in an application by _linking_ the library into the +application. (There are other things to be done, but we'll call these 'details', +which wont concern us here.) There are different ways to _link_ a library +into an application: statically linking and dynamically linking. + +_Staticall Linking_ is done while the application is being built, or +compiled. It is performed by the linker. The linker can perform some checks +(mostly checks on syntax) and warn the user that some particular feature is +being used incorrectly. The user can then correct the offending code, and +recompile. +There are a number of disadvantages associated with static linking. Features and +libraries are being constantly improved. If the application wants to use new +features, it will have to be recompiled with the new library which provides the +new features. + +_Dynamic Linking_ helps rectify the necessity of having to recompile. If a +new, improved library becomes available, all the user has to do is to install +the new library onto the operating system, restart the application and the new +features can be used by the application. The features provided by a dynamic +library are loaded when the application starts to run. + +However both methods exibit a number of shortcomings. Wouldn't it be better if +all features could be loaded only when needed? If features could be loaded only +when needed, then they could also be unloaded when not required, thus saving +memory and possibly increasing performance. This scheme of making features +available to an application is known as run-time linking, aka plug-ins. +Plug-ins offer other benifits: the application can continue to use both the old +features and the new features together, side-by-side, by using the version +number associated with the plug-in. This saves the application from considerable +headaches associated with other linking methods, havocked library version +incompatibility. + +Most modern applications use plug-ins, some are heavily dependent on plug-ins +and only provide limited functionality without any plug-ins. +Lumiera will not reinvent the wheel. One major goal is to provide considerable +functionality via well-designed, external code supplied to Lumiera by plug-ins. + +How are Plugins Implemented? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + diff --git a/doc/user/tutorials/contributing.txt b/doc/user/tutorials/contributing.txt index df10c1a0d..6658706ae 100644 --- a/doc/user/tutorials/contributing.txt +++ b/doc/user/tutorials/contributing.txt @@ -1,6 +1,22 @@ Contributing to Lumiera ======================= +All files in the Lumiera project are managed by *Git*. Although *Git* was +primarily brought to life to manage source code, it plays a fundamental role in +Lumiera project communication and contributing to Lumiera. +If you'd like to contribute to Lumiera, you will have to acquire some +understanding of *Git* at some stage or other. Please note, this is not the only +way to contribute to Lumiera, you can always send direct contributions to the +mailing list. + + +a powerful tool +that not only + + + which is a distributed +source code management system. + The Lumiera project uses an infrastructure based on *Git*, the distibuted sourcecode management software. This deliberately places the barrier for contributing very low: No formal ``commit right'' is necessary; you can @@ -10,6 +26,8 @@ For starters, follow the http://www.kernel.org/pub/software/scm/git/docs/gittutorial.html[basic instructions] for using Git, notably the following parts: +http://gitref.org/ + ------------------------------------------------------------ $ git config --global user.name "Your Name Comes Here" $ git config --global user.email you@yourdomain.example.com diff --git a/src/common/interface.h b/src/common/interface.h index 3a8e2fb5c..82dabddf3 100644 --- a/src/common/interface.h +++ b/src/common/interface.h @@ -28,69 +28,82 @@ /* TODO better doxygen formating */ /** - * @file - * Lumiera interface macros and structures. + * @file interface.h + * @brief Lumiera interface macros and structures. * - * Instead just simple function/library bindings, Lumiera uses a system of - * versioned interfaces. This interfaces are C-binding compatible and thus - * can be used by any language which can bind to C. This interfaces are versioned - * to provide exceptional forward and backward compatibility for both, source and - * binary deployment of modules. This interfaces play a central role on the Lumiera - * architecture, other facilities, like serializing sessions and distributed computing + * Lumiera uses a system of versioned interfaces instead of just employing a + * simple library (containg the functions and data) and header file strategy. + * The interfaces defined here are C compatible and, thus, can be used by any + * language able to bind to C. The interfaces are versioned to provide forward + * and backward compatibility for both source and binary deployment of + * modules. The interfaces play a central role in the Lumiera architecture. + * Other facilities, such as serializing sessions and distributed computing * will use them extensively. * * Overview * - * Interfaces are used for two purposes in Lumiera: - * 1. The core uses them internally and exports its functionality though them. - * 2. Plugins (effects,...) extend Lumiera by providing interfaces + * Interfaces are used for 2 reasons in Lumiera: + * -# The Lumiera core system uses them internally and exports its functionality though them. + * -# Plugins (effects, ...) extend Lumiera by providing interfaces * - * We define some macros here which ease the declaration and definition of interfaces. + * To make an interface available to code so that the code can use the + * interface, the interface needs to be declared and then defined. We provide a + * number of macros here which ease this process. * - * Declaration of an interface happens in a header and has the form: - * LUMIERA_INTERFACE_DECLARE(name, version, - * LUMIERA_INTERFACE_SLOT(ret, name, params), - * ... - * ) - * Any code which want to use this interface must then include its declaration. - * - * Basic definition of an interface is done by mapping functions to slots or giving - * inline definitions for slot functions: - * LUMIERA_INTERFACE_INSTANCE(iname, version, name, descriptor, acquire, release, - * LUMIERA_INTERFACE_MAP (slot, luid, function), - * LUMIERA_INTERFACE_INLINE (slot, luid, ret, params, {body}), + * The interface is declared by placing the following macro in a + * header file: + * \code + * LUMIERA_INTERFACE_DECLARE(name, version, + * LUMIERA_INTERFACE_SLOT(ret, name, params), * ... * ) + * + * \endcode + * Any code that wants to use this interface must then include the header file. * - * There are 2 ways to define collections of interfaces: + * The interface is defined by mapping interface functions to slots, or + * providing inline definitions for slot functions. Defining the interface has + * the following form: + * \code + * LUMIERA_INTERFACE_INSTANCE(iname, version, name, descriptor, acquire, release, + * LUMIERA_INTERFACE_MAP (slot, luid, function), + * LUMIERA_INTERFACE_INLINE (slot, luid, ret, params, {body}), + * ... + * ) + * \endcode + * + * A collection of interfaces can be defined in 2 different ways depending on + * where whether the interface is exported by the core, or by a plugin: + * \code * LUMIERA_EXPORT(queryfunc, * LUMIERA_INTERFACE_DEFINE(...), * ... - * ) - * to export interfaces from the core. - * - * LUMIERA_PLUGIN(descriptor, acquire, release, luid, - * LUMIERA_INTERFACE_DEFINE(...), - * ... - * ) - * is used to export interfaces from a plugin. + * ) // Exporting from the core + * + * LUMIERA_PLUGIN(descriptor, acquire, release, luid, + * LUMIERA_INTERFACE_DEFINE(...), + * ... + * ) // Exporting from an interface + * \endcode * * Naming and Versioning + * * Interfaces have unique names and a major and minor version. The name and the major version - * is used to construct a C identifier for the interface, the minor version is implicit defined - * by the number of functions a interface. Interface instances are not versioned by the - * interface system, versioning these shall be defined somewhere else. + * is used to construct a C identifier for the interface, the minor version is implicitly defined + * by the number of functions in the interface. Interface instances are not versioned by the + * interface system, versioning these wii be defined somewhere else. * * Slot names are normal C identifiers, how these shall be versioned has to be defined somewhere - * else and is not subject of the interface system. Each function can has its own unique uuid. + * else and is not the subject of the interface system. Each function can have its own unique uuid. */ /* - Interface declaration macros + Macros to Declare an Interface */ /** + * * Construct a type identifier for an interface * @param name name of the interface * @param version major version of this interface @@ -107,7 +120,7 @@ #define LUMIERA_INTERFACE_DNAME(iname, version, dname) PPMPL_CAT (LUMIERA_INTERFACE_INAME (iname, version), _##dname) /** - * Construct a definition string r for an interface + * Construct a definition string for an interface * @param iname name of the interface * @param version major version of the interface * @param dname name for the instance @@ -145,14 +158,14 @@ /** * Declare an interface. * @param name name of the interface - * @param version major version of this interface declaration. 0 denotes a experimental interface, - * otherwise this shall be counting from 1 upwards for each new (incompatible) change of an interface. + * @param version major version of this interface declaration. 0 denotes an experimental interface, + * otherwise this will begin at 1 onwards for each new (incompatible) change to an interface. * The older interface declarations may still be maintained in parallel (backwards compatibility!). * @param ... Slot declarations for the functions provided by this interface @see LUMIERA_INTERFACE_SLOT * The number of Slots in an interface defines its 'minor' version. * New slots must be added at the end. The prototype and order of existing slots must not be changed. * Slots may be renamed, for example a slot 'foo' can be renamed to 'foo_old' when a new 'foo' slot is - * added. Binary modules will then still use the 'foo_old' slot which was the 'foo' slot at their + * added. Binary modules will then still use the 'foo_old' slot which was the 'foo' slot at the * compile time while compiling modules from source will use the new 'foo' slot. This may be * intentionally used to break compilation and force the update of modules to a new api. */ @@ -164,7 +177,7 @@ LUMIERA_INTERFACE_TYPE(name, version) \ } /** - * Declare function slot inside an interface. + * Declare a function slot inside an interface. * @param ret return type of the function * @param name name of this slot * @param params parentized list of parameters for the function @@ -175,11 +188,11 @@ LUMIERA_INTERFACE_TYPE(name, version) \ /* - Interface definition macros + Macros to Define an Interface */ /** - * Define a interface instance. + * Define an interface instance. * @param iname name of the interface to instance * @param version major version of the interface to instance * @param name name of the instance