Merge branch 'master' of git://git.lumiera.org/LUMIERA

This commit is contained in:
Lumiera Project 2012-03-14 23:20:04 +00:00
commit 6331237b35
738 changed files with 28022 additions and 56177 deletions

2
.gitignore vendored
View file

@ -7,6 +7,8 @@
*.gch
,valgrind.log*
*.pyc
/.sconf_temp
/.settings
optcache
Makefile.in
build/*

8
README
View file

@ -66,8 +66,8 @@ or the local Copy of this page in the file INSTALL
Debian Package
--------------
[verse]
Hermann Vosseler (aka Ichthyo) maintains a *Debian* packaging of the source tree,
which can be pulled from +git://git.lumiera.org/lumiera/debian+
It can be built by +git-buildpackage+
Hermann Vosseler (aka Ichthyo) maintains a *Debian* packaging of the source tree
- the package definition can be pulled from +git://git.lumiera.org/lumiera/debian+
- the package can be built by +git-buildpackage+

View file

@ -1,6 +1,6 @@
# -*- python -*-
##
## SConstruct - SCons based build-sytem for Lumiera
## SConstruct - SCons based build-system for Lumiera
##
# Copyright (C) Lumiera.org
@ -23,422 +23,62 @@
# NOTE: scons -h for help.
# This script /defines/ the components and how they fit together.
# SCons will derive dependencies and the necessary build steps.
# Read more about the SCons build system at: http://www.scons.org
# Basically, this script just /defines/ the components and how they
# fit together. SCons will derive the necessary build steps.
#-----------------------------------Configuration
TARGDIR = 'target'
VERSION = '0.pre.01'
TOOLDIR = './admin/scons' # SCons plugins
SCRIPTDIR = './admin'
OPTCACHE = 'optcache'
CUSTOPTFILE = 'custom-options'
# these are accessible via env.path.xxxx
srcIcon = 'icons'
srcConf = 'data/config'
buildExe = '#$TARGDIR'
buildLib = '#$TARGDIR/modules'
buildPlug = '#$TARGDIR/modules'
buildIcon = '#$TARGDIR/gui/icons'
buildUIRes = '#$TARGDIR/'
buildConf = '#$TARGDIR/config'
installExe = '#$DESTDIR/lib/lumiera'
installLib = '#$DESTDIR/lib/lumiera/modules'
installPlug = '#$DESTDIR/lib/lumiera/modules'
installIcon = '#$DESTDIR/share/lumiera/icons'
installUIRes = '#$DESTDIR/share/lumiera/'
installConf = '#$DESTDIR/lib/lumiera/config'
localDefinitions = locals()
#-----------------------------------Configuration
import os
# SCons plugins and extension modules
#------------------------------------------------
import sys
sys.path.append('./admin/scons')
#------------------------------------------------
sys.path.append(TOOLDIR)
sys.path.append(SCRIPTDIR)
import Setup
import Options
import Platform
from Buildhelper import *
from LumieraEnvironment import *
#####################################################################
def setupBasicEnvironment(localDefinitions):
""" define cmdline options, build type decisions
"""
EnsurePythonVersion(2,4)
EnsureSConsVersion(1,0)
Decider('MD5-timestamp') # detect changed files by timestamp, then do a MD5
vars = defineCmdlineVariables()
env = LumieraEnvironment(variables=vars
,toolpath = [TOOLDIR]
,pathConfig = extract_localPathDefs(localDefinitions) # e.g. buildExe -> env.path.buildExe
,TARGDIR = TARGDIR
,DESTDIR = '$INSTALLDIR/$PREFIX'
,VERSION = VERSION
)
handleVerboseMessages(env)
env.Append ( CCCOM=' -std=gnu99')
env.Append ( SHCCCOM=' -std=gnu99') # workaround for a bug: CCCOM currently doesn't honour CFLAGS, only CCFLAGS
env.Replace( CPPPATH =["#src"] # used to find includes, "#" means always absolute to build-root
, CPPDEFINES=['LUMIERA_VERSION='+VERSION ] # note: it's a list to append further defines
, CCFLAGS='-Wall -Wextra '
, CFLAGS='-std=gnu99'
)
handleNoBugSwitches(env)
env.Append(CPPDEFINES = '_GNU_SOURCE')
appendCppDefine(env,'DEBUG','DEBUG', 'NDEBUG')
# appendCppDefine(env,'OPENGL','USE_OPENGL')
appendVal(env,'ARCHFLAGS','CCFLAGS') # for both C and C++
appendVal(env,'OPTIMIZE', 'CCFLAGS', val=' -O3')
appendVal(env,'DEBUG', 'CCFLAGS', val=' -ggdb')
# setup search path for Lumiera plugins
appendCppDefine(env,'PKGLIBDIR','LUMIERA_PLUGIN_PATH=\\"$PKGLIBDIR/:ORIGIN/modules\\"'
,'LUMIERA_PLUGIN_PATH=\\"ORIGIN/modules\\"')
appendCppDefine(env,'PKGDATADIR','LUMIERA_CONFIG_PATH=\\"$PKGLIBDIR/:.\\"'
,'LUMIERA_CONFIG_PATH=\\"$DESTDIR/share/lumiera/:.\\"')
prepareOptionsHelp(vars,env)
vars.Save(OPTCACHE, env)
return env
def appendCppDefine(env,var,cppVar, elseVal=''):
if env[var]:
env.Append(CPPDEFINES = env.subst(cppVar) )
elif elseVal:
env.Append(CPPDEFINES = env.subst(elseVal))
def appendVal(env,var,targetVar,val=None):
if env[var]:
env.Append( **{targetVar: env.subst(val) or env[var]})
def handleNoBugSwitches(env):
""" set the build level for NoBug.
Release builds imply no DEBUG
whereas ALPHA and BETA require DEBUG
"""
level = env['BUILDLEVEL']
if level in ['ALPHA', 'BETA']:
if not env['DEBUG']:
print 'Warning: NoBug ALPHA or BETA builds requires DEBUG=yes, switching DEBUG on!'
env.Replace( DEBUG = 1 )
env.Append(CPPDEFINES = 'EBUG_'+level)
elif level == 'RELEASE':
env.Replace( DEBUG = 0 )
def handleVerboseMessages(env):
""" toggle verbose build output """
if not env['VERBOSE']:
# SetOption('silent', True)
env['CCCOMSTR'] = env['SHCCCOMSTR'] = " Compiling $SOURCE"
env['CXXCOMSTR'] = env['SHCXXCOMSTR'] = " Compiling++ $SOURCE"
env['LINKCOMSTR'] = " Linking --> $TARGET"
env['LDMODULECOMSTR'] = " creating module [ $TARGET ]"
def defineCmdlineVariables():
""" several toggles and configuration variables can be set on the commandline,
current settings will be persisted in a options cache file.
you may define custom variable settings in a separate file.
Commandline will override both.
"""
vars = Variables([OPTCACHE, CUSTOPTFILE])
vars.AddVariables(
('ARCHFLAGS', 'Set architecture-specific compilation flags (passed literally to gcc)','')
,('CC', 'Set the C compiler to use.', 'gcc')
,('CXX', 'Set the C++ compiler to use.', 'g++')
,PathVariable('CCACHE', 'Integrate with CCache', '', PathVariable.PathAccept)
,PathVariable('DISTCC', 'Invoke C/C++ compiler commands through DistCC', '', PathVariable.PathAccept)
,EnumVariable('BUILDLEVEL', 'NoBug build level for debugging', 'ALPHA', allowed_values=('ALPHA', 'BETA', 'RELEASE'))
,BoolVariable('DEBUG', 'Build with debugging information and no optimisations', False)
,BoolVariable('OPTIMIZE', 'Build with strong optimisation (-O3)', False)
,BoolVariable('VALGRIND', 'Run Testsuite under valgrind control', True)
,BoolVariable('VERBOSE', 'Print full build commands', False)
,('TESTSUITES', 'Run only Testsuites matching the given pattern', '')
# ,BoolVariable('OPENGL', 'Include support for OpenGL preview rendering', False)
# ,EnumVariable('DIST_TARGET', 'Build target architecture', 'auto',
# allowed_values=('auto', 'i386', 'i686', 'x86_64' ), ignorecase=2)
,PathVariable('PREFIX', 'Installation dir prefix', 'usr/local', PathVariable.PathAccept)
,PathVariable('INSTALLDIR', 'Root output directory for install. Final installation will happen in INSTALLDIR/PREFIX/... ', '/', PathVariable.PathIsDir)
,PathVariable('PKGLIBDIR', 'Installation dir for plugins, defaults to PREFIX/lib/lumiera/modules', '',PathVariable.PathAccept)
,PathVariable('PKGDATADIR', 'Installation dir for default config, usually PREFIX/share/lumiera', '',PathVariable.PathAccept)
)
return vars
def prepareOptionsHelp(vars,env):
prelude = """
USAGE: scons [-c] [OPTS] [key=val [key=val...]] [TARGETS]
Build and optionally install Lumiera.
Without specifying any target, just the (re)build target will run.
Add -c to the commandline to clean up anything a given target would produce
Special Targets:
build : just compile and link
testcode: additionally compile the Testsuite
check : build and run the Testsuite
doc : generate documentation (Doxygen)
all : build and testcode and doc
install : install created artifacts at PREFIX
Configuration Options:
"""
Help(prelude + vars.GenerateHelpText(env))
def configurePlatform(env):
""" locate required libs.
setup platform specific options.
Abort build in case of failure.
"""
conf = env.Configure()
# run all configuration checks in the given env
# Perform checks for prerequisites --------------------------------------------
problems = []
if not conf.TryAction('pkg-config --version > $TARGET')[0]:
problems.append('We need pkg-config for including library configurations, exiting.')
if not conf.CheckLibWithHeader('m', 'math.h','C'):
problems.append('Did not find math.h / libm.')
if not conf.CheckLibWithHeader('dl', 'dlfcn.h', 'C'):
problems.append('Functions for runtime dynamic loading not available.')
if not conf.CheckLibWithHeader('pthread', 'pthread.h', 'C'):
problems.append('Did not find the pthread lib or pthread.h.')
else:
conf.env.Append(CPPFLAGS = ' -DHAVE_PTHREAD')
conf.env.Append(CCFLAGS = ' -pthread')
if conf.CheckCHeader('execinfo.h'):
conf.env.Append(CPPFLAGS = ' -DHAVE_EXECINFO_H')
if conf.CheckCHeader('valgrind/valgrind.h'):
conf.env.Append(CPPFLAGS = ' -DHAVE_VALGRIND_H')
else:
print 'Valgrind not found. The use of Valgrind is optional; building without.'
if not conf.CheckPkgConfig('nobugmt', 201006.1):
problems.append('Did not find NoBug [http://www.lumiera.org/nobug_manual.html].')
else:
conf.env.mergeConf('nobugmt')
if not conf.CheckCXXHeader('tr1/memory'):
problems.append('We rely on the std::tr1 standard C++ extension for shared_ptr.')
if not conf.CheckCXXHeader('boost/config.hpp'):
problems.append('We need the C++ boost-libraries.')
else:
if not conf.CheckCXXHeader('boost/scoped_ptr.hpp'):
problems.append('We need boost::scoped_ptr (scoped_ptr.hpp).')
if not conf.CheckCXXHeader('boost/format.hpp'):
problems.append('We need boost::format (header).')
if not conf.CheckLibWithHeader('boost_program_options-mt','boost/program_options.hpp','C++'):
problems.append('We need boost::program_options (including binary lib for linking).')
if not conf.CheckLibWithHeader('boost_filesystem-mt','boost/filesystem.hpp','C++'):
problems.append('We need the boost::filesystem (including binary lib for linking).')
if not conf.CheckLibWithHeader('boost_regex-mt','boost/regex.hpp','C++'):
problems.append('We need the boost regular expression lib (incl. binary lib for linking).')
if conf.CheckLib(symbol='clock_gettime'):
print 'Using function clock_gettime() as defined in the C-lib...'
else:
if not conf.CheckLib(symbol='clock_gettime', library='rt'):
problems.append('No library known to provide the clock_gettime() function.')
if not conf.CheckPkgConfig('gavl', 1.0):
problems.append('Did not find Gmerlin Audio Video Lib [http://gmerlin.sourceforge.net/gavl.html].')
else:
conf.env.mergeConf('gavl')
if not conf.CheckPkgConfig('gtkmm-2.4', 2.8):
problems.append('Unable to configure GTK--')
if not conf.CheckPkgConfig('glibmm-2.4', '2.16'):
problems.append('Unable to configure Lib glib--')
if not conf.CheckPkgConfig('gthread-2.0', '2.12.4'):
problems.append('Need gthread support lib for glib-- based thread handling.')
if not conf.CheckPkgConfig('cairomm-1.0', 0.6):
problems.append('Unable to configure Cairo--')
verGDL = '2.27.1'
if not conf.CheckPkgConfig('gdl-1.0', verGDL, alias='gdl'):
print 'No sufficiently recent (>=%s) version of GDL found. Maybe use custom package gdl-lum?' % verGDL
if not conf.CheckPkgConfig('gdl-lum', verGDL, alias='gdl'):
problems.append('GNOME Docking Library not found. We either need a sufficiently recent GDL '
'version (>=%s), or the custom package "gdl-lum" from Lumiera.org.' % verGDL)
if not conf.CheckPkgConfig('librsvg-2.0', '2.18.1'):
problems.append('Need rsvg Library for rendering icons.')
if not conf.CheckCHeader(['X11/Xutil.h', 'X11/Xlib.h'],'<>'):
problems.append('Xlib.h and Xutil.h required. Please install libx11-dev.')
if not conf.CheckPkgConfig('xv') : problems.append('Need libXv...')
if not conf.CheckPkgConfig('xext'): problems.append('Need libXext.')
# report missing dependencies
if problems:
print "*** unable to build due to the following problems:"
for isue in problems:
print " * %s" % isue
print
print "build aborted."
Exit(1)
print "** Gathered Library Info: %s" % conf.env.libInfo.keys()
# create new env containing the finished configuration
return conf.Finish()
def defineSetupTargets(env, artifacts):
""" build operations and targets to be done /before/ compiling.
things like creating a source tarball or preparing a version header.
"""
pass ## currently none
def defineBuildTargets(env, artifacts):
""" define the source file/dirs comprising each artifact to be built.
setup sub-environments with special build options if necessary.
We use a custom function to declare a whole tree of srcfiles.
"""
# use PCH to speed up building // disabled for now due to strange failures
# env['GCH'] = ( env.PrecompiledHeader('src/pre.hpp')
# + env.PrecompiledHeader('src/pre_a.hpp')
# )
lLib = env.SharedLibrary('lumiera', srcSubtree(env,'src/lib'), install=True)
lApp = env.SharedLibrary('lumieracommon', srcSubtree(env,'src/common'), install=True, LIBS=lLib)
lBack = env.SharedLibrary('lumierabackend', srcSubtree(env,'src/backend'),install=True)
lProc = env.SharedLibrary('lumieraproc', srcSubtree(env,'src/proc'), install=True)
core = lLib+lApp+lBack+lProc
artifacts['corelib'] = core
artifacts['support'] = lLib
artifacts['config'] = ( env.ConfigData(env.path.srcConf+'setup.ini', targetDir='$ORIGIN')
+ env.ConfigData(env.path.srcConf+'dummy_lumiera.ini')
)
artifacts['lumiera'] = ( env.Program('lumiera', ['src/lumiera/main.cpp'] + core, install=True)
+ artifacts['config']
)
# building Lumiera Plugins
artifacts['plugins'] = [] # currently none
# render and install Icons
vector_icon_dir = env.path.srcIcon+'svg'
prerendered_icon_dir = env.path.srcIcon+'prerendered'
artifacts['icons'] = ( [env.IconRender(f) for f in scanSubtree(vector_icon_dir, ['*.svg'])]
+ [env.IconResource(f) for f in scanSubtree(prerendered_icon_dir, ['*.png'])]
)
# the Lumiera GTK GUI
envGtk = env.Clone()
envGtk.mergeConf(['gtkmm-2.4','gthread-2.0','cairomm-1.0','gdl','xv','xext','sm'])
envGtk.Append(LIBS=core)
objgui = srcSubtree(envGtk,'src/gui', appendCPP='LUMIERA_PLUGIN')
guimodule = envGtk.LumieraPlugin('gtk_gui', objgui, install=True)
artifacts['gui'] = ( guimodule
+ [env.GuiResource(f) for f in env.Glob('src/gui/*.rc')]
+ artifacts['icons']
)
# call subdir SConscript(s) for independent components
SConscript(dirs=['src/tool'], exports='env artifacts core')
SConscript(dirs=['tests'], exports='env artifacts core')
def definePostBuildTargets(env, artifacts):
""" define further actions after the core build (e.g. Documentaion).
define alias targets to trigger the installing.
"""
build = env.Alias('build', ( artifacts['lumiera']
+ artifacts['plugins']
+ artifacts['tools']
+ artifacts['gui']
))
# additional files to be cleaned when cleaning 'build'
env.Clean ('build', [ 'scache.conf', '.sconf_temp', '.sconsign.dblite', 'config.log' ])
env.Clean ('build', [ 'src/pre.gch' ])
doxydoc = artifacts['doxydoc'] = env.Doxygen('doc/devel/Doxyfile')
env.Alias ('doc', doxydoc)
env.Clean ('doc', doxydoc + ['doc/devel/,doxylog','doc/devel/warnings.txt'])
env.Alias ('all', build+artifacts['testsuite']+doxydoc)
env.Default('build')
# SCons default target
def defineInstallTargets(env, artifacts):
""" define additional artifacts to be installed into target locations.
@note: we use customised SCons builders defining install targets
for all executables automatically. see LumieraEnvironment.py
"""
env.SymLink('$DESTDIR/bin/lumiera',env.path.installExe+'lumiera','../lib/lumiera/lumiera')
# env.Install(dir = '$DESTDIR/share/doc/lumiera$VERSION/devel', source=artifacts['doxydoc'])
env.Alias('install', artifacts['gui'])
env.Alias('install', '$DESTDIR')
#####################################################################
env = Setup.defineBuildEnvironment() # dirs & compiler flags
env = Platform.configure(env) # library dependencies
### === MAIN BUILD === ##############################################
# call subdir SConscript(s) to define the actual build targets...
SConscript(dirs=['data','src','src/tool','research','tests','doc'], exports='env')
### === MAIN === ####################################################
# additional files to be cleaned when cleaning 'build'
env.Clean ('build', [ 'scache.conf', '.sconf_temp', '.sconsign.dblite', 'config.log' ])
env.Clean ('build', [ 'src/pre.gch' ])
env = setupBasicEnvironment(localDefinitions)
if not (isCleanupOperation(env) or isHelpRequest()):
env = configurePlatform(env)
artifacts = {}
# the various things we build.
# Each entry actually is a SCons-Node list.
# Passing these entries to other builders defines dependencies.
# 'lumiera' : the App
# 'gui' : the GTK UI (plugin)
# 'plugins' : plugin shared lib
# 'tools' : small tool applications (e.g mpegtoc)
defineSetupTargets(env, artifacts)
defineBuildTargets(env, artifacts)
definePostBuildTargets(env, artifacts)
defineInstallTargets(env, artifacts)
### === Alias Targets === ###########################################
# pick up the targets defined by the sub SConscripts
Import('lumiera plugins tools gui testsuite doxydoc')
build = env.Alias('build', lumiera + plugins + tools +gui)
env.Default('build')
# SCons default target
env.Alias ('all', build + testsuite + doxydoc)
env.Alias ('doc', doxydoc)
env.Alias('install', gui)
env.Alias('install', '$DESTDIR')
#####################################################################

View file

@ -1,213 +0,0 @@
#!/usr/bin/python
#
# render_icons.py - Icon rendering utility script
#
# Copyright (C) Lumiera.org
# 2008, Joel Holdsworth <joel@airwebreathe.org.uk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sys
import getopt
from xml.dom import minidom
import os
import shutil
#svgDir = "svg"
#prerenderedDir = "prerendered"
inkscapePath = "/usr/bin/inkscape"
rsvgPath = "./rsvg-convert"
artworkLayerPrefix = "artwork:"
#
# 2/2011 some notes by Ichthyo
# The purpose of this python script is
# - to parse a SVG
# - to invoke Inkscape to render this SVG into a raster image (icon)
#
# For the actual call into Incscape we rely on an executable 'rsvg-convert',
# which is built during the Lumiera build process.
#
# Judging from the code and the actual SVGs, this seems to work as follows:
# The SVG contains a design to be rendered into raster images of various sizes.
# These sizes are determined by special rectangles, which act as bounding box and
# are placed on a special 'plate' layer, which is a child layer of the main
# 'artwork:' layer. The grid of the SVG is setup such as to result in pixel sizes
# suitable for icon generation. The actual size of the generated icons are then
# parsed from the height and width attributes of the mentioned bounding box
# rectangles.
#
# The parser seems to be rather simplistic; the sizes and positions need to be
# integral numbers. In one instance we had a float number in the y coordinate,
# which resulted in an invalid, zero sized output icon
#
#
def createDirectory( name ):
try:
if os.path.isfile(name):
os.remove(name)
if not os.path.exists(name):
os.mkdir(name)
except:
print 'WARNING: createDirectory("%s") failed. Permission problems?' % name
def copyMergeDirectory( src, dst ):
listing = os.listdir(src)
for file_name in listing:
src_file_path = os.path.join(src, file_name)
dst_file_path = os.path.join(dst, file_name)
shutil.copyfile(src_file_path, dst_file_path)
def getDocumentSize( svg_element ):
width = float(svg_element.getAttribute("width"))
height = float(svg_element.getAttribute("height"))
return [width, height]
def findChildLayerElement( parent_element ):
for node in parent_element.childNodes:
if node.nodeType == minidom.Node.ELEMENT_NODE:
if node.tagName == "g":
if node.getAttribute("inkscape:groupmode") == "layer":
return node
return None
def parsePlateLayer( layer ):
rectangles = []
for node in layer.childNodes:
if node.nodeType == minidom.Node.ELEMENT_NODE:
if node.tagName == "rect":
x = float(node.getAttribute("x"))
y = float(node.getAttribute("y"))
width = float(node.getAttribute("width"))
height = float(node.getAttribute("height"))
rectangles.append([x, y, width, height])
return rectangles
def parseSVG( file_path ):
print "Parsing " + file_path
svgdoc = minidom.parse(file_path)
for root_node in svgdoc.childNodes:
if root_node.nodeType == minidom.Node.ELEMENT_NODE:
if root_node.tagName == "svg":
size = getDocumentSize( root_node )
layer = findChildLayerElement( root_node )
if layer != None:
layer_name = layer.getAttribute("inkscape:label")
if layer_name[:len(artworkLayerPrefix)] == artworkLayerPrefix:
artwork_name = layer_name[len(artworkLayerPrefix):]
plate = findChildLayerElement( layer )
if plate != None:
return artwork_name, size, parsePlateLayer( plate )
return None
def renderSvgInkscape(file_path, out_dir, artwork_name, rectangle, doc_size):
# Calculate the rendering rectangle
x1 = rectangle[0]
y1 = doc_size[1] - rectangle[1] - rectangle[3]
x2 = x1 + rectangle[2]
y2 = y1 + rectangle[3]
# Call Inkscape to do the render
os.spawnlp(os.P_WAIT, inkscapePath, inkscapePath,
file_path,
"-z",
"-a %g:%g:%g:%g" % (x1, y1, x2, y2),
"-w %g" % (rectangle[2]), "-h %g" % (rectangle[3]),
"--export-png=" + os.path.join(out_dir, "%gx%g/%s.png" % (rectangle[2], rectangle[3], artwork_name)))
def renderSvgRsvg(file_path, out_dir, artwork_name, rectangle, doc_size):
# Prepare a Cairo context
width = int(rectangle[2])
height = int(rectangle[3])
if not os.path.exists(rsvgPath):
print "Error: executable %s not found." % rsvgPath
os.spawnlp(os.P_WAIT, rsvgPath, rsvgPath,
"--source-rect=%g:%g:%g:%g" % (rectangle[0], rectangle[1], rectangle[2], rectangle[3]),
"--output=" + os.path.join(out_dir, "%gx%g/%s.png" % (rectangle[2], rectangle[3], artwork_name)),
file_path)
def renderSvgIcon(file_path, out_dir):
artwork_name, doc_size, rectangles = parseSVG(file_path)
for rectangle in rectangles:
renderSvgRsvg(file_path, out_dir, artwork_name, rectangle, doc_size)
def getTargetNames(file_path):
"""get a list of target names to be rendered from the given source SVG
usable to setup the build targets for SCons
"""
artwork_name, _ , rectangles = parseSVG(file_path)
return ["%gx%g/%s.png" % (rectangle[2], rectangle[3], artwork_name) for rectangle in rectangles ]
#def renderSvgIcons():
# listing = os.listdir(svgDir)
# for file_path in listing:
# [root, extension] = os.path.splitext(file_path)
# if extension.lower() == ".svg":
# renderSvgIcon(os.path.join(svgDir, file_path))
#def copyPrerenderedIcons():
# listing = os.listdir(prerenderedDir)
# for list_item in listing:
# src_dir = os.path.join(prerenderedDir, list_item)
# copyMergeDirectory(src_dir, list_item)
def printHelp():
print "render-icon.py SRCFILE.svg TARGETDIR"
print "An icon rendering utility script for lumiera"
def parseArguments(argv):
optlist, args = getopt.getopt(argv, "")
if len(args) == 2:
return args[0], args[1]
printHelp()
return None, None
def main(argv):
in_path, out_dir = parseArguments(argv)
if not (in_path and out_dir):
print "Missing arguments in_path and out_dir."
sys.exit(1)
if os.path.isfile(out_dir):
print "Unable to use '%s' as output directory, because it\'s a file." % out_dir
sys.exit(1)
if not os.path.isdir(out_dir):
print "Output directory '%s' not found." % out_dir
sys.exit(1)
# Create the icons folders
createDirectory(os.path.join(out_dir, "48x48"))
createDirectory(os.path.join(out_dir, "32x32"))
createDirectory(os.path.join(out_dir, "24x24"))
createDirectory(os.path.join(out_dir, "22x22"))
createDirectory(os.path.join(out_dir, "16x16"))
renderSvgIcon(in_path, out_dir)
# Copy in prerendered icons
#copyPrerenderedIcons()
if __name__=="__main__":
main(sys.argv[1:])

View file

@ -293,7 +293,7 @@ function add_comment()
{
local name="$1"
local nl=$'\n'
local comment="//edit comment$nl $(date +%c) $(git config --get user.name) <$(git config --get user.email)>$nl"
local comment="//edit comment$nl$nl$(git config --get user.name):: '$(date +%c)' ~<$(git config --get user.email)>~$nl"
ed "$name" 2>/dev/null <<EOF
/endof_comments:/-1i

View file

@ -25,11 +25,8 @@ import os
import sys
import glob
import fnmatch
import re
import tarfile
from SCons import Util
from SCons.Action import Action
@ -49,20 +46,11 @@ def isHelpRequest():
def srcSubtree(env,tree,isShared=True,builder=None,appendCPP=None, **args):
""" convenience wrapper: scans the given subtree, which is
relative to the current SConscript, find all source files and
declare them as Static or SharedObjects for compilation
def srcSubtree(tree, **args):
""" convenience wrapper: scan the given subtree, which is relative
to the current SConscript, and find all source files.
"""
if appendCPP: env.Append(CPPDEFINES=appendCPP)
root = env.subst(tree) # expand Construction Vars
if not builder:
if isShared:
builder = lambda f: env.SharedObject(f, **args)
else:
builder = lambda f: env.Object(f, **args)
return [builder(f) for f in scanSubtree(root)]
return list(scanSubtree(tree, **args))
@ -74,12 +62,12 @@ def scanSubtree(roots, patterns=SRCPATTERNS):
(python generator function)
"""
for root in globRootdirs(roots):
for (dir,_,files) in os.walk(root):
if dir.startswith('./'):
dir = dir[2:]
for (d,_,files) in os.walk(root):
if d.startswith('./'):
d = d[2:]
for p in patterns:
for f in fnmatch.filter(files, p):
yield os.path.join(dir,f)
yield os.path.join(d,f)
@ -87,16 +75,16 @@ def globRootdirs(roots):
""" helper: expand shell wildcards and filter the resulting list,
so that it only contains existing directories
"""
filter = lambda f: os.path.isdir(f) and os.path.exists(f)
isDirectory = lambda f: os.path.isdir(f) and os.path.exists(f)
roots = glob.glob(roots)
return (dir for dir in roots if filter(dir) )
return (d for d in roots if isDirectory(d) )
def findSrcTrees(location, patterns=SRCPATTERNS):
""" find possible source tree roots, starting with the given location.
When delving down from the initial location(s), a source tree is defined
as a directory containing source files and possibly further sub directories.
as a directory containidsource files and possibly further sub directories.
After having initially expanded the given location with #globRootdirs, each
directory is examined depth first, until encountering a directory containing
source files, which then yields a result. Especially, this can be used to traverse
@ -104,11 +92,11 @@ def findSrcTrees(location, patterns=SRCPATTERNS):
to be built into packages, plugins, individual tool executables etc.
@return: the relative path names of all source root dirs found (generator function).
"""
for dir in globRootdirs(location):
if isSrcDir(dir,patterns):
yield dir
for directory in globRootdirs(location):
if isSrcDir (directory,patterns):
yield directory
else:
for result in findSrcTrees(str(dir)+'/*'):
for result in findSrcTrees (str(directory)+'/*'):
yield result
@ -118,7 +106,7 @@ def isSrcDir(path, patterns=SRCPATTERNS):
@return: True if it's a directory containing any source file
"""
if not os.path.isdir(path):
return False
return False
else:
for p in patterns:
if glob.glob(path+'/'+p):
@ -133,38 +121,39 @@ def filterNodes(nlist, removeName=None):
if removeName:
predicate = lambda n : not fnmatch.fnmatch(os.path.basename(str(n[0])), removeName)
else:
predicate = lambda n : True;
predicate = lambda n : True
return filter(predicate, nlist)
def getDirname(dir, basePrefix=None):
def getDirname (d, basePrefix=None):
""" extract directory name without leading path,
or without the explicitly given basePrefix
"""
dir = os.path.realpath(dir)
if not os.path.isdir(dir):
dir,_ = os.path.split(dir)
d = os.path.realpath(d)
if not os.path.isdir(d):
d,_ = os.path.split(d)
if basePrefix:
basePrefix = os.path.realpath(basePrefix)
if str(dir).startswith(basePrefix):
name = str(dir)[len(basePrefix):]
name = str(d)
if str(d).startswith(basePrefix):
name = name[len(basePrefix):]
else:
_, name = os.path.split(dir)
_, name = os.path.split(d)
return name
def createPlugins(env, dir, **kw):
def createPlugins(env, directory, **kw):
""" investigate the given source directory to identify all contained source trees.
@return: a list of build nodes defining a plugin for each of these source trees.
"""
return [env.LumieraPlugin( getDirname(tree)
, srcSubtree(env, tree, appendCPP='LUMIERA_PLUGIN')
, srcSubtree(tree)
, **kw
)
for tree in findSrcTrees(dir)
for tree in findSrcTrees(directory)
]

186
admin/scons/IconSvgRenderer.py Executable file
View file

@ -0,0 +1,186 @@
#!/usr/bin/python
#
# IconSvgRenderer.py - Icon rendering utility script
#
# Copyright (C) Lumiera.org
# 2008, Joel Holdsworth <joel@airwebreathe.org.uk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import os
import sys
import getopt
import shutil
from xml.dom import minidom
rsvgPath = "./rsvg-convert"
artworkLayerPrefix = "artwork:"
#
# 2/2011 some notes by Ichthyo
# The purpose of this python script is
# - to parse a SVG
# - to invoke Inkscape to render this SVG into a raster image (icon)
#
# For the actual Cairo based SVG rendering we rely on an executable 'rsvg-convert',
# which is built during the Lumiera build process.
#
# Judging from the code and the actual SVGs, this seems to work as follows:
# The SVG contains a design to be rendered into raster images of various sizes.
# These sizes are determined by special rectangles, which act as bounding box and
# are placed on a special 'plate' layer, which is a child layer of the main
# 'artwork:' layer. The grid of the SVG is setup such as to result in pixel sizes
# suitable for icon generation. The actual size of the generated icons are then
# parsed from the height and width attributes of the mentioned bounding box
# rectangles.
#
# The parser seems to be rather simplistic; the sizes and positions need to be
# integral numbers. In one instance we had a float number in the y coordinate,
# which resulted in an invalid, zero sized output icon
#
#
def createDirectory (name):
try:
if os.path.isfile (name):
os.remove (name)
if not os.path.exists (name):
os.mkdir (name)
except:
print 'WARNING: createDirectory("%s") failed. Permission problems?' % name
def copyMergeDirectory (src, dst):
listing = os.listdir (src)
for file_name in listing:
src_file_path = os.path.join (src, file_name)
dst_file_path = os.path.join (dst, file_name)
shutil.copyfile (src_file_path, dst_file_path)
def getDocumentSize (svg_element):
width = float(svg_element.getAttribute("width"))
height = float(svg_element.getAttribute("height"))
return [width, height]
def findChildLayerElement (parent_element):
for node in parent_element.childNodes:
if node.nodeType == minidom.Node.ELEMENT_NODE:
if node.tagName == "g":
if node.getAttribute("inkscape:groupmode") == "layer":
return node
return None
def parsePlateLayer (layer):
rectangles = []
for node in layer.childNodes:
if node.nodeType == minidom.Node.ELEMENT_NODE:
if node.tagName == "rect":
x = float(node.getAttribute("x"))
y = float(node.getAttribute("y"))
width = float(node.getAttribute("width"))
height = float(node.getAttribute("height"))
rectangles.append([x, y, width, height])
return rectangles
def parseSVG (file_path):
print "Parsing " + file_path
svgdoc = minidom.parse (file_path)
for root_node in svgdoc.childNodes:
if root_node.nodeType == minidom.Node.ELEMENT_NODE:
if root_node.tagName == "svg":
size = getDocumentSize (root_node)
layer = findChildLayerElement (root_node)
if layer != None:
layer_name = layer.getAttribute ("inkscape:label")
if layer_name[:len(artworkLayerPrefix)] == artworkLayerPrefix:
artwork_name = layer_name[len(artworkLayerPrefix):]
plate = findChildLayerElement(layer)
if plate != None:
return artwork_name, size, parsePlateLayer(plate)
return None
def renderSvgRsvg (file_path, out_dir, artwork_name, rectangle, _doc_size):
# Prepare a Cairo context
width = int(rectangle[2])
height = int(rectangle[3])
if not os.path.exists(rsvgPath):
print "Error: executable %s not found." % rsvgPath
os.spawnlp(os.P_WAIT, rsvgPath, rsvgPath,
"--source-rect=%g:%g:%g:%g" % (rectangle[0], rectangle[1], width, height),
"--output=" + os.path.join(out_dir, "%gx%g/%s.png" % (width, height, artwork_name)),
file_path)
def renderSvgIcon (file_path, out_dir):
artwork_name, doc_size, rectangles = parseSVG (file_path)
for rectangle in rectangles:
renderSvgRsvg(file_path, out_dir, artwork_name, rectangle, doc_size)
def getTargetNames (file_path):
"""get a list of target names to be rendered from the given source SVG
usable to setup the build targets for SCons
"""
artwork_name, _ , rectangles = parseSVG (file_path)
return ["%gx%g/%s.png" % (rectangle[2], rectangle[3], artwork_name) for rectangle in rectangles ]
def printHelp():
print "render-icon.py SRCFILE.svg TARGETDIR"
print "An icon rendering utility script for lumiera"
def parseArguments(argv):
_optlist, args = getopt.getopt(argv, "")
if len(args) == 2:
return args[0], args[1]
printHelp()
return None, None
def main (argv):
in_path, out_dir = parseArguments(argv)
if not (in_path and out_dir):
print "Missing arguments in_path and out_dir."
sys.exit(1)
if os.path.isfile(out_dir):
print "Unable to use '%s' as output directory, because it\'s a file." % out_dir
sys.exit(1)
if not os.path.isdir(out_dir):
print "Output directory '%s' not found." % out_dir
sys.exit(1)
# Create the icons folders
createDirectory(os.path.join(out_dir, "48x48"))
createDirectory(os.path.join(out_dir, "32x32"))
createDirectory(os.path.join(out_dir, "24x24"))
createDirectory(os.path.join(out_dir, "22x22"))
createDirectory(os.path.join(out_dir, "16x16"))
renderSvgIcon (in_path, out_dir)
if __name__=="__main__":
main(sys.argv[1:])

View file

@ -22,11 +22,10 @@
#####################################################################
import os
from os import path
import SCons
import SCons.SConf
from SCons.Action import Action
from SCons.Environment import Environment
from Buildhelper import *
@ -38,9 +37,15 @@ class LumieraEnvironment(Environment):
This allows us to carry structured config data without
using global vars. Idea inspired by Ardour.
"""
def __init__(self, pathConfig, **kw):
Environment.__init__ (self,**kw)
self.path = Record (pathConfig)
def __init__(self, buildSetup, buildVars, **kw):
kw.update(VERSION = buildSetup.VERSION
,TARGDIR = buildSetup.TARGDIR
,DESTDIR = '$INSTALLDIR/$PREFIX'
,toolpath = [buildSetup.TOOLDIR ]
,variables = buildVars
)
Environment.__init__ (self, **kw)
self.path = Record (extract_localPathDefs(buildSetup)) # e.g. buildExe -> env.path.buildExe
self.libInfo = {}
self.Tool("BuilderGCH")
self.Tool("BuilderDoxygen")
@ -49,6 +54,7 @@ class LumieraEnvironment(Environment):
register_LumieraResourceBuilder(self)
register_LumieraCustomBuilders(self)
def Configure (self, *args, **kw):
kw['env'] = self
return apply(LumieraConfigContext, args, kw)
@ -123,7 +129,7 @@ def register_LumieraResourceBuilder(env):
used to generate png from the svg source using librsvg.
"""
import render_icon as renderer # load Joel's python script for invoking the rsvg-convert (SVG render)
import IconSvgRenderer as renderer # load Joel's python script for invoking the rsvg-convert (SVG render)
renderer.rsvgPath = env.subst("$TARGDIR/rsvg-convert")
def invokeRenderer(target, source, env):
@ -151,43 +157,44 @@ def register_LumieraResourceBuilder(env):
return (generateTargets, source)
def IconResource(env, source):
"""Copy icon pixmap to corresponding icon dir. """
subdir = getDirname(str(source))
toBuild = env.path.buildIcon+subdir
toInstall = env.path.installIcon+subdir
env.Install (toInstall, source)
return env.Install(toBuild, source)
"""Copy icon pixmap to corresponding icon dir. """
subdir = getDirname(str(source))
toBuild = env.path.buildIcon+subdir
toInstall = env.path.installIcon+subdir
env.Install (toInstall, source)
return env.Install(toBuild, source)
def GuiResource(env, source):
subdir = getDirname(str(source))
toBuild = env.path.buildUIRes+subdir
toInstall = env.path.installUIRes+subdir
env.Install (toInstall, source)
return env.Install(toBuild, source)
subdir = getDirname(str(source))
toBuild = env.path.buildUIRes+subdir
toInstall = env.path.installUIRes+subdir
env.Install (toInstall, source)
return env.Install(toBuild, source)
def ConfigData(env, source, targetDir=None):
""" install (copy) configuration- and metadata.
target dir is either the install location configured (in SConstruct),
or an explicitly given absolute or relative path segment, which might refer
to the location of the executable through the $ORIGIN token
"""
subdir = getDirname(str(source), env.path.srcConf) # removes source location path prefix
if targetDir:
if path.isabs(targetDir):
toBuild = toInstall = path.join(targetDir,subdir)
else:
if targetDir.startswith('$ORIGIN'):
targetDir = targetDir[len('$ORIGIN'):]
toBuild = path.join(env.path.buildExe, targetDir, subdir)
toInstall = path.join(env.path.installExe, targetDir, subdir)
else:
toBuild = path.join(env.path.buildConf, targetDir, subdir)
toInstall = path.join(env.path.installConf, targetDir, subdir)
else:
toBuild = path.join(env.path.buildConf,subdir)
toInstall = path.join(env.path.installConf,subdir)
env.Install (toInstall, source)
return env.Install(toBuild, source)
def ConfigData(env, prefix, source, targetDir=None):
""" install (copy) configuration- and metadata.
target dir is either the install location configured (in SConstruct),
or an explicitly given absolute or relative path segment, which might refer
to the location of the executable through the $ORIGIN token
"""
source = path.join(prefix,str(source))
subdir = getDirname(source, prefix) # removes source location path prefix
if targetDir:
if path.isabs(targetDir):
toBuild = toInstall = path.join(targetDir,subdir)
else:
if targetDir.startswith('$ORIGIN'):
targetDir = targetDir[len('$ORIGIN'):]
toBuild = path.join(env.path.buildExe, targetDir, subdir)
toInstall = path.join(env.path.installExe, targetDir, subdir)
else:
toBuild = path.join(env.path.buildConf, targetDir, subdir)
toInstall = path.join(env.path.installConf, targetDir, subdir)
else:
toBuild = path.join(env.path.buildConf,subdir)
toInstall = path.join(env.path.installConf,subdir)
env.Install (toInstall, source)
return env.Install(toBuild, source)
buildIcon = env.Builder( action = Action(invokeRenderer, "rendering Icon: $SOURCE --> $TARGETS")
@ -205,8 +212,8 @@ def register_LumieraResourceBuilder(env):
class WrappedStandardExeBuilder(SCons.Util.Proxy):
""" Helper to add customisations and default configurations to SCons standard builders.
The original builder object is wrapped and most calls are simply forwarded to this
wrapped object by Python magic. But some calls are intecepted in order to inject
suitalbe default configuration based on the project setup.
wrapped object by Python magic. But some calls are intercepted in order to inject
suitable default configuration based on the project setup.
"""
def __init__(self, originalBuilder):
@ -291,7 +298,7 @@ class LumieraModuleBuilder(WrappedStandardExeBuilder):
explicit spec, falling back on the lib filename
"""
if 'soname' in kw:
soname = self.subst(kw['soname']) # explicitely defined by user
soname = self.subst(kw['soname']) # explicitly defined by user
else: # else: use the library filename as DT_SONAME
if SCons.Util.is_String(target):
pathname = target.strip()
@ -331,7 +338,7 @@ class LumieraPluginBuilder(LumieraModuleBuilder):
def register_LumieraCustomBuilders (lumiEnv):
""" install the customised builder versions tightly integrated with our buildsystem.
""" install the customised builder versions tightly integrated with our build system.
Especially, these builders automatically add the build and installation locations
and set the RPATH and SONAME in a way to allow a relocatable Lumiera directory structure
"""
@ -362,7 +369,7 @@ def register_LumieraCustomBuilders (lumiEnv):
action = Action(makeLink, "Install link: $TARGET -> "+srcSpec)
env.Command (target,source, action)
# adding SymLink direclty as method on the environment object
# adding SymLink directly as method on the environment object
# Probably that should better be a real builder, but I couldn't figure out
# how to get the linktext through literally, which is necessary for relative links.
# Judging from the sourcecode of SCons.Builder.BuilderBase, there seems to be no way

81
admin/scons/Options.py Normal file
View file

@ -0,0 +1,81 @@
# -*- python -*-
##
## Options.py - SCons build: command line options and help
##
# Copyright (C) Lumiera.org
# 2012, Hermann Vosseler <Ichthyostega@web.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#####################################################################
from SCons.Script import PathVariable, EnumVariable, BoolVariable, Help
def defineCmdlineVariables(buildVars):
""" several toggles and configuration variables can be set on the commandline,
current settings will be persisted in a options cache file.
you may define custom variable settings in a separate file.
Commandline will override both.
"""
buildVars.AddVariables(
('ARCHFLAGS', 'Set architecture-specific compilation flags (passed literally to gcc)','')
,('CC', 'Set the C compiler to use.', 'gcc')
,('CXX', 'Set the C++ compiler to use.', 'g++')
,PathVariable('CCACHE', 'Integrate with CCache', '', PathVariable.PathAccept)
,PathVariable('DISTCC', 'Invoke C/C++ compiler commands through DistCC', '', PathVariable.PathAccept)
,EnumVariable('BUILDLEVEL', 'NoBug build level for debugging', 'ALPHA', allowed_values=('ALPHA', 'BETA', 'RELEASE'))
,BoolVariable('DEBUG', 'Build with debugging information and no optimisations', False)
,BoolVariable('OPTIMIZE', 'Build with strong optimisation (-O3)', False)
,BoolVariable('VALGRIND', 'Run Testsuite under valgrind control', True)
,BoolVariable('VERBOSE', 'Print full build commands', False)
,('TESTSUITES', 'Run only Testsuites matching the given pattern', '')
# ,BoolVariable('OPENGL', 'Include support for OpenGL preview rendering', False)
# ,EnumVariable('DIST_TARGET', 'Build target architecture', 'auto',
# allowed_values=('auto', 'i386', 'i686', 'x86_64' ), ignorecase=2)
,PathVariable('PREFIX', 'Installation dir prefix', 'usr/local', PathVariable.PathAccept)
,PathVariable('INSTALLDIR', 'Root output directory for install. Final installation will happen in INSTALLDIR/PREFIX/... ', '/', PathVariable.PathIsDir)
,PathVariable('PKGLIBDIR', 'Installation dir for plugins, defaults to PREFIX/lib/lumiera/modules', '',PathVariable.PathAccept)
,PathVariable('PKGDATADIR', 'Installation dir for default config, usually PREFIX/share/lumiera', '',PathVariable.PathAccept)
)
def prepareOptionsHelp(buildVars,env):
prelude = """
USAGE: scons [-c] [OPTS] [key=val [key=val...]] [TARGETS]
Build and optionally install Lumiera.
Without specifying any target, just the (re)build target will run.
Add -c to the commandline to clean up anything a given target would produce
Special Targets:
build : just compile and link
research: build experimental code (might fail)
testcode: additionally compile the Testsuite
check : build and run the Testsuite
doc : generate documentation (Doxygen)
all : build and testcode and doc
install : install created artifacts at PREFIX
Configuration Options:
"""
Help(prelude + buildVars.GenerateHelpText(env))

149
admin/scons/Platform.py Normal file
View file

@ -0,0 +1,149 @@
# -*- python -*-
##
## Platform.py - SCons build: platform configuration and library detection
##
# Copyright (C) Lumiera.org
# 2012, Hermann Vosseler <Ichthyostega@web.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#####################################################################
from SCons.Script import Exit
from Buildhelper import isCleanupOperation, isHelpRequest
def configure(env):
""" locate required libraries.
setup platform specific options.
Abort build in case of failure.
"""
if isCleanupOperation(env) or isHelpRequest():
return env # skip configure in these cases
conf = env.Configure()
# run all configuration checks in the build environment defined thus far
# Perform checks for prerequisites --------------------------------------------
problems = []
if not conf.TryAction('pkg-config --version > $TARGET')[0]:
problems.append('We need pkg-config for including library configurations, exiting.')
if not conf.CheckLibWithHeader('m', 'math.h','C'):
problems.append('Did not find math.h / libm.')
if not conf.CheckLibWithHeader('dl', 'dlfcn.h', 'C'):
problems.append('Functions for runtime dynamic loading not available.')
if not conf.CheckLibWithHeader('pthread', 'pthread.h', 'C'):
problems.append('Did not find the pthread lib or pthread.h.')
else:
conf.env.Append(CPPFLAGS = ' -DHAVE_PTHREAD')
conf.env.Append(CCFLAGS = ' -pthread')
if conf.CheckCHeader('execinfo.h'):
conf.env.Append(CPPFLAGS = ' -DHAVE_EXECINFO_H')
if conf.CheckCHeader('valgrind/valgrind.h'):
conf.env.Append(CPPFLAGS = ' -DHAVE_VALGRIND_H')
else:
print 'Valgrind not found. The use of Valgrind is optional; building without.'
if not conf.CheckPkgConfig('nobugmt', 201006.1):
problems.append('Did not find NoBug [http://www.lumiera.org/nobug_manual.html].')
else:
conf.env.mergeConf('nobugmt')
if not conf.CheckCXXHeader('tr1/memory'):
problems.append('We rely on the std::tr1 standard C++ extension for shared_ptr.')
if not conf.CheckCXXHeader('boost/config.hpp'):
problems.append('We need the C++ boost-libraries.')
else:
if not conf.CheckCXXHeader('boost/scoped_ptr.hpp'):
problems.append('We need boost::scoped_ptr (scoped_ptr.hpp).')
if not conf.CheckCXXHeader('boost/format.hpp'):
problems.append('We need boost::format (header).')
if not conf.CheckLibWithHeader('boost_program_options-mt','boost/program_options.hpp','C++'):
problems.append('We need boost::program_options (including binary lib for linking).')
if not conf.CheckLibWithHeader('boost_system-mt','boost/system/error_code.hpp','C++'):
problems.append('We need the boost::system support library (including binary lib).')
if not conf.CheckLibWithHeader('boost_filesystem-mt','boost/filesystem.hpp','C++'):
problems.append('We need the boost::filesystem lib (including binary lib for linking).')
if not conf.CheckLibWithHeader('boost_regex-mt','boost/regex.hpp','C++'):
problems.append('We need the boost regular expression lib (incl. binary lib for linking).')
if conf.CheckLib(symbol='clock_gettime'):
print 'Using function clock_gettime() as defined in the C-lib...'
else:
if not conf.CheckLib(symbol='clock_gettime', library='rt'):
problems.append('No library known to provide the clock_gettime() function.')
if not conf.CheckPkgConfig('gavl', 1.0):
problems.append('Did not find Gmerlin Audio Video Lib [http://gmerlin.sourceforge.net/gavl.html].')
else:
conf.env.mergeConf('gavl')
if not conf.CheckPkgConfig('alsa', '1.0.23'):
problems.append('Support for ALSA sound output is required')
if not conf.CheckPkgConfig('gtkmm-2.4', 2.8):
problems.append('Unable to configure GTK--')
if not conf.CheckPkgConfig('glibmm-2.4', '2.16'):
problems.append('Unable to configure Lib glib--')
if not conf.CheckPkgConfig('gthread-2.0', '2.12.4'):
problems.append('Need gthread support lib for glib-- based thread handling.')
if not conf.CheckPkgConfig('cairomm-1.0', 0.6):
problems.append('Unable to configure Cairo--')
verGDL = '2.27.1'
if not conf.CheckPkgConfig('gdl-1.0', verGDL, alias='gdl'):
print 'No sufficiently recent (>=%s) version of GDL found. Maybe use custom package gdl-lum?' % verGDL
if not conf.CheckPkgConfig('gdl-lum', verGDL, alias='gdl'):
problems.append('GNOME Docking Library not found. We either need a sufficiently recent GDL '
'version (>=%s), or the custom package "gdl-lum" from Lumiera.org.' % verGDL)
if not conf.CheckPkgConfig('librsvg-2.0', '2.18.1'):
problems.append('Need rsvg Library for rendering icons.')
if not conf.CheckCHeader(['X11/Xutil.h', 'X11/Xlib.h'],'<>'):
problems.append('Xlib.h and Xutil.h required. Please install libx11-dev.')
if not conf.CheckPkgConfig('xv') : problems.append('Need libXv...')
if not conf.CheckPkgConfig('xext'): problems.append('Need libXext.')
# report missing dependencies
if problems:
print "*** unable to build due to the following problems:"
for isue in problems:
print " * %s" % isue
print
print "build aborted."
Exit(1)
print "** Gathered Library Info: %s" % conf.env.libInfo.keys()
# create new env containing the finished configuration
return conf.Finish()

137
admin/scons/Setup.py Normal file
View file

@ -0,0 +1,137 @@
# -*- python -*-
##
## Setup.py - SCons build: setup, definitions and compiler flags
##
# Copyright (C) Lumiera.org
# 2012, Hermann Vosseler <Ichthyostega@web.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#####################################################################
from SCons.Script import EnsurePythonVersion, EnsureSConsVersion, Variables, Decider
from LumieraEnvironment import *
from Buildhelper import *
import Options
#-------------------------------------------------------Configuration
TARGDIR = 'target'
VERSION = '0.pre.01'
TOOLDIR = './admin/scons' # SCons plugins
OPTCACHE = 'optcache'
CUSTOPTFILE = 'custom-options'
# these are accessible via env.path.xxxx
buildExe = '#$TARGDIR'
buildLib = '#$TARGDIR/modules'
buildPlug = '#$TARGDIR/modules'
buildIcon = '#$TARGDIR/gui/icons'
buildUIRes = '#$TARGDIR/'
buildConf = '#$TARGDIR/config'
installExe = '#$DESTDIR/lib/lumiera'
installLib = '#$DESTDIR/lib/lumiera/modules'
installPlug = '#$DESTDIR/lib/lumiera/modules'
installIcon = '#$DESTDIR/share/lumiera/icons'
installUIRes = '#$DESTDIR/share/lumiera/'
installConf = '#$DESTDIR/lib/lumiera/config'
#-------------------------------------------------------Configuration
buildSetup = Record(locals())
def defineBuildEnvironment():
""" create a custom build environment,
define the basic compiler and linker flags,
define locations in source and target tree,
parse the commandline and pick up options
"""
EnsureSConsVersion(1,0)
EnsurePythonVersion(2,4)
Decider('MD5-timestamp') # detect changed files by timestamp, then do a MD5
buildVars = Variables([OPTCACHE, CUSTOPTFILE])
Options.defineCmdlineVariables(buildVars)
env = LumieraEnvironment(buildSetup, buildVars)
env.Replace( CPPPATH =["#src"] # used to find includes, "#" means always absolute to build-root
, CPPDEFINES=['LUMIERA_VERSION='+VERSION ] # note: it's a list to append further defines
, CCFLAGS='-Wall -Wextra '
, CFLAGS='-std=gnu99'
)
handleVerboseMessages(env)
handleNoBugSwitches(env)
env.Append(CPPDEFINES = '_GNU_SOURCE')
appendCppDefine(env,'DEBUG','DEBUG', 'NDEBUG')
# appendCppDefine(env,'OPENGL','USE_OPENGL')
appendVal(env,'ARCHFLAGS','CCFLAGS') # for both C and C++
appendVal(env,'OPTIMIZE', 'CCFLAGS', val=' -O3')
appendVal(env,'DEBUG', 'CCFLAGS', val=' -ggdb')
# setup search path for Lumiera plugins
appendCppDefine(env,'PKGLIBDIR','LUMIERA_PLUGIN_PATH=\\"$PKGLIBDIR/:ORIGIN/modules\\"'
,'LUMIERA_PLUGIN_PATH=\\"ORIGIN/modules\\"')
appendCppDefine(env,'PKGDATADIR','LUMIERA_CONFIG_PATH=\\"$PKGLIBDIR/:.\\"'
,'LUMIERA_CONFIG_PATH=\\"$DESTDIR/share/lumiera/:.\\"')
Options.prepareOptionsHelp(buildVars,env)
buildVars.Save(OPTCACHE, env)
return env
def appendCppDefine(env,var,cppVar, elseVal=''):
if env[var]:
env.Append(CPPDEFINES = env.subst(cppVar) )
elif elseVal:
env.Append(CPPDEFINES = env.subst(elseVal))
def appendVal(env,var,targetVar,val=None):
if env[var]:
env.Append( **{targetVar: env.subst(val) or env[var]})
def handleNoBugSwitches(env):
""" set the build level for NoBug.
Release builds imply no DEBUG
whereas ALPHA and BETA require DEBUG
"""
level = env['BUILDLEVEL']
if level in ['ALPHA', 'BETA']:
if not env['DEBUG']:
print 'Warning: NoBug ALPHA or BETA builds requires DEBUG=yes, switching DEBUG on!'
env.Replace( DEBUG = 1 )
env.Append(CPPDEFINES = 'EBUG_'+level)
elif level == 'RELEASE':
env.Replace( DEBUG = 0 )
def handleVerboseMessages(env):
""" toggle verbose build output """
if not env['VERBOSE']:
# SetOption('silent', True)
env['CCCOMSTR'] = env['SHCCCOMSTR'] = " Compiling $SOURCE"
env['CXXCOMSTR'] = env['SHCXXCOMSTR'] = " Compiling++ $SOURCE"
env['LINKCOMSTR'] = " Linking --> $TARGET"
env['LDMODULECOMSTR'] = " creating module [ $TARGET ]"

25
data/SConscript Normal file
View file

@ -0,0 +1,25 @@
# -*- python -*-
##
## SConscript - SCons buildscript for Icons and Resources
##
from Buildhelper import scanSubtree
Import('env')
# define Icons to render and install
vector_icon_dir = 'icons/svg'
prerendered_icon_dir = 'icons/prerendered'
icons = ( [env.IconRender(f) for f in scanSubtree(vector_icon_dir, ['*.svg'])]
+ [env.IconResource(f) for f in scanSubtree(prerendered_icon_dir, ['*.png'])]
)
#define Configuration files to install (dir-prefix, name)
config = ( env.ConfigData('config','setup.ini', targetDir='$ORIGIN')
+ env.ConfigData('config','dummy_lumiera.ini')
)
Export('icons config')

26
data/icons/README Normal file
View file

@ -0,0 +1,26 @@
#
# Lumiera Icon Artwork
#
# Copyright (C) Lumiera.org
# 2008, Joel Holdsworth <joel@airwebreathe.org.uk>
#
# Icon Artwork and similar materials accompanying the Lumiera Application
# is dual-licensed under the GNU General Public License version 2 or above,
# and
# Creative Commons Attribution-ShareAlike 3.0 Unported License
#
#
This directory holds Icons and similar graphics resources for the Lumiera GUI.
- prerendered: Raster image Icons rendered into a selection of sizes
- svg: Scalable Vector Grahpics Icons, to be rendered into suitable sizes
by the build process. Rendering is done with the help of lib Cairo.
The build creates an executable from src/tools/rsvg-convert.c
The invocation of the icon rendering is done with the help of a
Python script IconSvgRenderer.py, which first parses the SVG document
and then invokes rsvg-convert to generate the raster images.

View file

Before

Width:  |  Height:  |  Size: 540 B

After

Width:  |  Height:  |  Size: 540 B

View file

Before

Width:  |  Height:  |  Size: 453 B

After

Width:  |  Height:  |  Size: 453 B

View file

Before

Width:  |  Height:  |  Size: 631 B

After

Width:  |  Height:  |  Size: 631 B

View file

Before

Width:  |  Height:  |  Size: 695 B

After

Width:  |  Height:  |  Size: 695 B

View file

Before

Width:  |  Height:  |  Size: 788 B

After

Width:  |  Height:  |  Size: 788 B

View file

Before

Width:  |  Height:  |  Size: 1 KiB

After

Width:  |  Height:  |  Size: 1 KiB

View file

Before

Width:  |  Height:  |  Size: 1.5 KiB

After

Width:  |  Height:  |  Size: 1.5 KiB

View file

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 58 KiB

View file

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 19 KiB

View file

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View file

Before

Width:  |  Height:  |  Size: 7.2 KiB

After

Width:  |  Height:  |  Size: 7.2 KiB

View file

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

View file

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

View file

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View file

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

17
doc/SConscript Normal file
View file

@ -0,0 +1,17 @@
# -*- python -*-
##
## SConscript - SCons buildscript for Documentation
##
from Buildhelper import scanSubtree
Import('env')
doxydoc = env.Doxygen('devel/Doxyfile')
# env.Install(dir = '$DESTDIR/share/doc/lumiera$VERSION/devel', source=documentation)
env.Clean (doxydoc, doxydoc + ['devel/,doxylog','devel/warnings.txt'])
Export('doxydoc')

View file

@ -70,8 +70,8 @@ proceeding along a timeline).
Reconfiguration
^^^^^^^^^^^^^^^
Some of these operation modes need to be prepared to an unpredictable live reconfiguration,
driven by user interactions:
Some of these operation modes need to be prepared to encounter an unpredictable live
reconfiguration, driven by user interactions:
- any part of background rendering can be invalidated and restarted, while other parts
should be re-integrated, possibly with adjusted position
@ -108,7 +108,7 @@ playback location, and it can be hooked up with a play-control GUI widget
Each play-controller in turn gets associated with several *play/render-processes*,
one for each independent media stream (channel) to be produced. Of course this
isn't an operating system process; rather, ach such process is a compound of entries
isn't an operating system process; rather, each such process is a compound of entries
in a registration table, which serve the purpose of tying several other services together,
which we initiate and use in order to make that render process happen.
Most notably, we'll use the services of the actual engine, which provides us with kind of
@ -126,7 +126,7 @@ Viewer and Output connection
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Creating a player instance binds together three partners: a _timeline_, a _viewer_
and _the engine_. While the timeline provides the content to play, the _viewer connection_
is crutial for working out the actual output sink(s) and thus the output format to use.
is crucial for working out the actual output sink(s) and thus the output format to use.
Thus, a viewer connection is prerequisite for creating a player instance.
Viewer connections exist as associations in the session/model -- as entities separate
@ -135,13 +135,13 @@ such a connection is (still) missing, building a player instance recurs to the s
to get a suitable viewer _allocated_. The viewer connection can't be broken during the
lifetime of that player instance (or putting it the other way: breaking that viewer
connection, e.g. by forcing a different connection or by shutting down the viewer,
immediately terminates the player. This detaching works synchroneously, i.e. it
blocks untlil all the allocated _output slots_ could be released.
immediately terminates the player. This detaching works synchronously, i.e. it
blocks until all the allocated _output slots_ could be released.
Live switching
^^^^^^^^^^^^^^
While the viewer connection can be treated as fixed during the lifespan of a player
instance, several life switching and reconfiguration operations might happen anytime:
instance, several life switching and reconfiguration operations might happen any time:
The _model port_ (place where data is retrieved from calculation), the output characteristics
(framerate, direction) and the delivery goals (playback position, loop playing, scrubbing)
all may be changed during playback -- we need a way for the player to ``cancel'' and
@ -164,7 +164,7 @@ the quantisation, which leaves us with just a few possible junction points
where to place quantisation: The backend, the GUI, the player, the session.
- putting it into the backend seems to be the most reasonable at first sight:
We can ``do away'' with nasty things soon, especially if they are technicallities,
We can ``do away'' with nasty things soon, especially if they are technicalities,
``get a clean state soon'' -- and hasn't frame quantisation something to do
with media data, which is handled in the backend?
+
@ -174,7 +174,7 @@ amount of degraded information flows throughout the whole system; thus the
general rule to do it as late as possible. Uncrippled information is
enablement. And last but not least: the frame quantisation is connected
to the _output_ format -- and the backend is likely within the whole
application the subsytem most remote and unaware of output requirements.
application the subsystem most remote and unaware of output requirements.
- rounding/quantising in the GUI is extremely common within media applications;
unfortunately there seems to be not a single rational argument supporting that habit.
@ -184,7 +184,7 @@ Which leaves us with the player and the session. Both positions could
arguably be supported. Here, a more careful consideration shows, that
the ``act of frame rounding'' can be decomposed: into the _act of quantisation_
and the _frame grid:. Basically its the session which has the ability
to form the *frame grid*, but it is lacking crutial information about
to form the *frame grid*, but it is lacking crucial information about
the output. Only when connecting both -- which is the essence of the
player -- frame quantisation can actually be performed. Thus, the
player is the natural location to perform that quantisation operation.

View file

@ -0,0 +1,25 @@
The Scheduler
-------------
:Author: CehTeh
:Date: 6/2007
//MENU: label Scheduler
Scheduling is done with two priority queues, one for high priority jobs and one for low priority jobs.
These priority queues are ordered by absolute time values plus some job specific identified.
There are following (non exhaustive) kinds of jobs:
* started job
* job to be canceled
* unscheduled job
* dependency providing jobs
Jobs implement a kind of future. We try hard to avoid any blocking waits.
The Job scheduler runs singlethreaded. Its only task is to schedule and delegate jobs to worker threads,
by itself it will never do any extensive processing.
Each job has an pre configured behaviour for the case of failure or deadline miss.
Any canceling and expireing jobs gets noted in *Statistics* to adjust performance and timings
for optimal performance and I/O throughput.

View file

@ -1,5 +1,114 @@
Design Documents: Backend
=========================
Eventually, this will have design documentation for the Backend.
What follows is a summary regarding the design of Lumiera's *Data Handling Backend*
This is the foundation layer responsible for any high performance or high volume
data access. Within Lumiera, ther are two main kinds how data is handled:
* The Session and the object models manipulated through the GUI is kept in memory.
It is backed by a _storage backend,_ which provides database like storage and
especially logging, replaying and ``Undo'' of all ongoing modifications..
* Media data is handled _frame wise_ -- as described below.
The backend uses *memory mapping* to make data available to the program.
This is somewhat different to the more common open/read/write/close file access,
while giving superior performance and much better memory utilization.
The data backend must be capable to handle more data than will fit into the memory
or even address space on 32 bit architectures. Moreover, a project might access more files
than the OS can handle at a any time, thus the for _Files used by the Backend,_ it needs a
*FilehandleCache* to manage file handle dynamically.
Which parts of a file are actually mapped to physical RAM is managed by the kernel;
it keeps a *FileMapCache* to manage the *FileMaps* we've set up.
In the End, the application itself only requests *Data Frames* from the Backend.
To minimize latency and optimize CPU utilization we have a *Prefetch thread* which operates
a *Scheduler* to render and cache frames which are _expected to be consumed soon_. The intention
is to manage the rendering _just in time_.
The prefetcher keeps *Statistics* for optimizing performance.
Accessing Files
---------------
+FileDescriptor+ is the superclass of all possible filetypes, it has a weak reference to a
+FileHandle+ which is managed in within the +FilehandleCache+. On creation, only the existence
(when reading) or access for write for new files are checked. The +FileDescriptor+ stores some
generic metadata about the underlying file and intended use. But the actual opening is done on demand.
The _content of files is memory mapped_ into the process address space.
This is managed by +FileMap+ entries and a +FileMapCache+.
File Handles
~~~~~~~~~~~~
A +FilehandleCache+ serves to store a finite maximum number of +FileHandles+ as a MRU list.
FileHandles are managed by the +FilehandleCache+; basically they are just storing the underlying OS file
handles and managed in a lazy/weak way, (re)opened when needed and aging in the cache when not needed,
since the amount of open file handles is limited aged ones will be closed and reused when the system
needs to open another file.
File Mapping
~~~~~~~~~~~~
The +FileMapCache+ keeps a list of +FileMaps+, which are currently not in use and subject of aging.
Each +FileMap+ object contains many +Frames+. The actual layout depends on the type of the File.
Mappings need to be _page aligned_ while Frames can be anywhere within a file and dynamically sized.
All established ++FileMap++s are managed together in a central +FileMapCache+.
Actually, +FileMap+ objects are transparent to the application. The upper layers will just
request Frames by position and size. Thus, the +File+ entities associate a filename with the underlying
low level File Descriptor and access
Frames
~~~~~~
+Frames+ are the smallest datablocks handled by the Backend. The application tells the Backend to make
Files available and from then on just requests Frames. Actually, those Frames are (references to) blocks
of continuous memory. They can be anything depending on the usage of the File (Video frames, encoder frames,
blocks of sound samples). Frames are referenced by a smart-pointer like object which manages the lifetime
and caching behavior.
Each frame referece can be in one out of three states:
readonly::
the backing +FileMap+ is checked out from the aging list, frames can be read
readwrite::
the backing +FileMap+ is checked out from the aging list, frames can be read and written
weak::
the +FileMap+ object is checked back into the aging list, the frame can't be accessed but we can
try to transform a weak reference into a readonly or readwrite reference
Frames can be addressed uniquely whenever a frame is not available. The backend can't serve a cached
version of the frame, a (probably recursive) rendering request will be issued.
Prefetching
~~~~~~~~~~~
There are 2 important points when we want to access data with low latency:
. Since we handle much more data than it will fit into most computers RAM.
The data which is backed in files has to be paged in and available when needed.
The +Prefetch+ Thread manages page hinting to the kernel (posix_madvise()..)
. Intermediate Frames must eventually be rendered to the cache.
The Backend will send +Renderjobs+ to the +Scheduler+.
Whenever something queries a +Frame+ from the backend it provides hints about what it is doing.
These hints contain:
* Timing constraints
- When will the +Frame+ be needed
- could we drop the request if it won't be available (rendered) in-time
* Priority of this job (as soon as possible, or just in time?)
* action (Playing forward, playing backward, tweaking, playback speed, recursive rendering of dependent frames)
.Notes
* The Backend will try to render related frames in groups.
* This means that following frames are scheduled with lower priority.
* Whenever the program really requests them the priority will be adjusted.
-> more about link:Scheduler.html[the Scheduling of calculation jobs]

View file

@ -0,0 +1,15 @@
Design: Output Handling
=======================
:Date: June 2011
:Author: Ichthyostega
//Menu: label Output handling
Some ideas....
- abstract away the actual technology used for output
- have generic *output designations* and translate them into an *output slot*
- the OutputSlot interface can be designed to match the requirements of the Engine
- assume a mechanism to handle timeouts, glitches and skips within each concrete OutputSlot implementation

View file

@ -1,5 +1,6 @@
Design Documents: Renderengine
==============================
Eventually, this will have design documentation for the Engine.
This section contains design documents regarding the overall workings of the Render Engine,
and the handling of output generation and output connections.

View file

@ -3,12 +3,81 @@ Plugin Brainstorm
:Author: Christian Thäter
:Date: 2008-09-15
Raw Version
-----------
Lumiera will use a very simple and language neutral plugin system. The focus is on easy and independent distribution of plugins and small specific interfaces. Ultimate flexibility is of second concern.
.Concept
Plugins are just shared libraries which offer well defined Interfaces.
A Plugin may offer more than one interface and may in turn request/use interfaces
from other Plugins or from the main application.
[cpp]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Interface Definition
--------------------
Interfaces are declared in header files. They use some tool macros to give a convenient definition language.
Basically, Interfaces are fixed -- with the exception that new functions may be added.
link:PluginVersioning.html[Plugin Version Management] should stay out of the view most of the time.
Plugin interfaces are simple C structs with some metadata at the beginning and function prototypes
added at the end. With some macros we can map simple functions to versioned interfaces.
Compiled plugins will stay compatible even if the interface is extended, while sourcecode need maintenance.
An interface needs a name and a version. They define a block where the actual function prototypes can be added.
New prototypes have to be added at the end, existing prototypes must never be changed.
Each function prototype must be given with its different parts:
- return type
- name
- arguments list
- version.
.Example
[source,c]
-----------------------------------------
LUMIERA_INTERFACE(foo, 1,
LUMIERA_IPROTO(void, bar, (void)),
LUMIERA_IPROTO(int, baz, (int i))
);
LUMIERA_INTERFACE(foo, 2,
LUMIERA_IPROTO(void, bar, (void)),
LUMIERA_IPROTO(int, baz, (float i))
);
-----------------------------------------
Note that the version 2 interface _changed the parameter from int to float_ for the 'baz' function.
The interface/plugin framework will expand the above definitions into:
[source,c]
-----------------------------------------
struct lumiera_interface_foo_1
{
struct lumiera_interface interface_header_;
void (*bar) (void);
int (*baz) (int i);
};
struct lumiera_interface_foo_2
{
struct lumiera_interface interface_header_;
void (*bar) (void);
int (*baz) (float i);
};
-----------------------------------------
Implementation of Interfaces
----------------------------
Interfaces can be implemented either in core code or through plugins.
In each case, such an _instantiation_ of an interface means that actual functions are mapped
to the corresponding slots in the interface structure.
.Implementing an interface
[source,c]
-----------------------------------------
LUMIERA_INTERFACE_DECLARE (interface_descriptor, 0,
/* The following slots are some human-readable descriptions of certain properties */
LUMIERA_INTERFACE_SLOT (const char*, name, (LumieraInterface)),
@ -18,4 +87,21 @@ LUMIERA_INTERFACE_DECLARE (interface_descriptor, 0,
LUMIERA_INTERFACE_SLOT (const char*, license, (LumieraInterface))
/* TODO add more things here, dependencies, provisions etc */
);
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-----------------------------------------
Lumiera Plugin API
-------------------
The Lumiera Interface/Plugin framework provides some functions to manage Plugins.
Actually a user requests interfaces. The libraries which implement Plugins are managed transparently.
Interfaces are exported as instances and are not necessary singleton. This means that a single Plugin
can export the same interface type several times under different names. The naming rules for interfaces
need to be defined elsewhere.
loading and opening a Plugin
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Plugins are looked up in `$LUMIERA_PLUGIN_PATH`, which is a colon separated list of directories,
and then in a specific ``Lumiera plugin dir'', where standard plugins get installed alongside
with the Application

View file

@ -0,0 +1,44 @@
PluginVersioningCases
======================
:Author: MichaelPloujnikov
:Date Created: 200707121127
:Date Changed: 200707160404
:Count Changes: 46
//MENU: label Plugin Version
Compatibility matrix
--------------------
.Source compatibility
[grid="rows"]
`100`200`200~~~~
*~CALLER~ \ ^CALLEE^**,OLD^**^,NEW^**^
OLD,works,works but a recent interface definition must be available
NEW,works,works
~~~~
.Binary compatibility
[grid="rows"]
`100`200`200~~~~
*~CALLER~ \ ^CALLEE^**,OLD^**^,NEW^**^
OLD,works,works
NEW,caller gets "revision not sufficient" at runtime and should implement fallbacks,works
~~~~
^*^) CALLER is the user of an interface, CALLEE is the interface provider (usually a plugin)
^**^) OLD means an initial revision, NEW means some later revision of an interface
Observations
------------
Compiling a newer Plugin for some older main application release has some quirks (interface definitions are intended to be shipped with the main application). This should be rarely the case.
When compiling, older Plugins should be updated to new interface revisions.
Caller should provide a fallback to older interface revisions for binary compatibility.
Generally, sources should just be properly maintained and updated to use the most recent interfaces revision.
For binary compatibility everything will work well, provided that the caller kept proper fallback functionality for older interface revisions. Plugins which are independently distributed (packaged) in binary form don't need to be updated with every new main application release and just work.

View file

@ -0,0 +1,785 @@
2008-11-12 Lumiera Developers Meeting
=====================================
:Author: Ichthyostega
:Date: 2008-11-12
Dec 11, 2012 on #lumiera 20:00 - 01:15 UTC +
__Participants__
* cehteh
* ichthyo
* joelholdsworth
* raffa
_Summary and transcript supplemented by Ichthyo 11/2011_
Platform and lib dependency status
----------------------------------
Right away, there is agreement between all 3 core devs that the Lumiera project
should try to stay close to a *stable reference linux distribution*. Usually,
developers tend to stick to ``latest greatest'', causing lots of difficult to resolve
problems to packagers and ursers of their software. The Lumiera team wants to go into
the opposite direction: staying close to a mature and stable distro and rather try to
backport any newer libraries, when they are _really required_ for getting ahead with
the coding. Since _cehteh_ and _ichthyo_ are Debian users and _joelholdsworth_ tends
to use a moderately recent Ubuntu, the intention is to use Debian/stable as reference.
Conclusion
~~~~~~~~~~
* use *Debian/stable* (currently Lenny) as reference distribution
* care for backports of any newer libraries required
* use VMs to verify the build on a moderate selection of more recent distros
Problems with GUI Plugins
-------------------------
The core devs are aware of some challenging problems regarding plug-ins for the
GUI and effect plug-ins requiring an specific GUI control or option pane. Still the more
since the intention is for Lumiera to adapt _existing plug-in systems_ rather then
rolling yet-another new plug-in standard no one else cares to support. Initially,
specific Lumiera plug-ins will be provided only for some very basic modular functionality,
eg. fader, panner, blur.
_joelholdsworth_ points out that configurability can't replace real GUI and workflow design.
He quotes from a recent Blender GUI discussion
[quote, the Belnder UI reviews]
____________________________________________________________________
Lastly, Id like to address another misconception, this time about customizability.
There has been a notion that the solution to most of the UI problems can be solved with added customizability.
The notion goes that if the UI is inefficient, the user can simply customize it herself to suit her needs.
This claim is wrong for several reasons:
- It is impossible to customize something you have not fully comprehended yet,
so it in no way helps the learning process.
- It makes an application less predictable, because the user cannot rely on the application
to always act the same.
- It takes focus away from the users content, and over to managing the application itself,
which is what we wanted to avoid in the first place.
- It undermines the portability of Blender, because you cannot rely on Blender
functioning the same way across different systems.
- Customizability does not equate to flexibility.
- 99% of Blenders user will use the default setup anyway.
This is not to say that customizability is always bad - having the ability to change the hotkeys
from the defaults to match another 3D application such as Maya or Softimage XSI can make it easier
for users of those applications to adapt to Blender.
____________________________________________________________________
Consequently, Joel wants to try just doing the UI _right_, then add a little bit of customization back in here and there.
Additionally, agreement is to have a kind-of ``perspective switcher'' (like in Eclipse), so saving different
panel layouts would be covered easily
.-- Discussion about foreseeable problems with GUI Plugins --
[caption="☉Transcript☉ "]
----------------------------
[2008-11-12 21:53:50] <ichthyo> I thought we could just discuss the problem informally
and share some thoughts what we think is most critical
[2008-11-12 22:06:01] <ichthyo> there are plugins which don't provide a GUI, and some plugin systems allow
for a standard method for creating plugin guis
[2008-11-12 22:05:46] <joelholdsworth> so bundling GUI together with plugins is easier now, right?
[2008-11-12 22:05:59] <joelholdsworth> we implement some kind of parameters system
[2008-11-12 22:06:03] <ichthyo> yes
[2008-11-12 22:06:14] <cehteh> joelholdsworth: thats up to you .. but the plugin system is ready
[2008-11-12 22:06:27] <joelholdsworth> yeah that's fine
[2008-11-12 22:06:38] <joelholdsworth> if you want to extend the GUI you just expose and interface for it
[2008-11-12 22:07:51] * cehteh thinks about some canvas widget which the plugin can use to draw stuff there
[2008-11-12 22:08:26] <joelholdsworth> cehteh: as in overlays on the video canvas?
[2008-11-12 22:09:19] <cehteh> joelholdsworth: up to you, not necessary overlays
[2008-11-12 22:05:07] <ichthyo> I see some problems we should thik about
[2008-11-12 22:06:50] <ichthyo> so, basically loading a processing routine from
a plugin and using it to process video/audio is no problem. Basically you define
some interface (first in C++) then translate it to the lumiera interface macros
but with the GUI there are several conceptual problems
[2008-11-12 22:07:35] <ichthyo> first: There are plugins which don't have a GUI. Thats fine
[2008-11-12 22:08:05] <ichthyo> we need to define a parameter system, as joel pointed out
[2008-11-12 22:08:38] <ichthyo> and then we need to fit the existing plugin systems, each one, to use this parameter system
[2008-11-12 22:08:45] <cehteh> thats type:key:value at simplest
[2008-11-12 22:08:51] <cehteh> maybe with some representation hints
[2008-11-12 22:08:58] <ichthyo> there is more about it
[2008-11-12 22:09:03] <ichthyo> representation hints
[2008-11-12 22:09:07] <ichthyo> type of gui control
[2008-11-12 22:09:15] <ichthyo> logarithmic/linear scale
[2008-11-12 22:09:17] <ichthyo> zero point
[2008-11-12 22:09:19] <ichthyo> range
[2008-11-12 22:09:27] <ichthyo> but it is manageable
[2008-11-12 22:09:38] <ichthyo> LADSPA is a prominent example
[2008-11-12 22:09:55] <ichthyo> then, the problem in the GUI is how to arrange those controls
[2008-11-12 22:10:20] <ichthyo> anyway, I think this alternative is the most simple one and the one which will
create the least problems
[2008-11-12 22:10:34] <joelholdsworth> ichthyo: I figured the UI will just expose parameters
[2008-11-12 22:10:42] <joelholdsworth> so you expose them semantically - maybe in a tree
[2008-11-12 22:10:51] <joelholdsworth> and add rich presentation hints
[2008-11-12 22:11:00] <joelholdsworth> that hit the UI how to lay out out
[2008-11-12 22:10:59] <cehteh> i think layout should be optional by some config/stylesheet
[2008-11-12 22:11:05] <ichthyo> cehteh: possible. But those things are already defined by the
plugin systems we want to wrap up.
[2008-11-12 22:13:01] <cehteh> you have a list (or maybe tree) of parameters .. presentation aside
... tree just because some parameters depend on the presence of some else
and you have a presentation ..
[2008-11-12 22:11:53] <ichthyo> next problem is:
[2008-11-12 22:12:16] <ichthyo> the plugins need to allocate some space in the GUI and should not clutter
the workspace this is a tough really tough interface design problem
because some plugins aren't just some "optional stuff", but wired
right into the core operation
[2008-11-12 22:12:54] <joelholdsworth> well ichthyo: I'm not planning to allow plugins to "take over"
handling workflow. they get to extend very specific parts of the UI
[2008-11-12 22:13:09] <joelholdsworth> e.g. params
[2008-11-12 22:13:11] <joelholdsworth> or the canvas
[2008-11-12 22:13:15] <joelholdsworth> or timeline
[2008-11-12 22:13:22] <joelholdsworth> and only in very specific ways
[2008-11-12 22:14:15] <joelholdsworth> ichthyo: and most of the time plugins wont extend anything --
their whole UI will be exposed semantically (with UI presentation hints)
(in addition to the plain semantics)
[2008-11-12 22:13:27] <ichthyo> thats fine
[2008-11-12 22:14:58] <ichthyo> but do you see the interface design problem?
basically you want to design a smooth handling. but now you need to rely
on some module which is plugged in and you don't know much about this module
how is it possible to tweak effects easily while you just don't know much
about those effects?
[2008-11-12 22:15:23] <joelholdsworth> yes I agree
[2008-11-12 22:15:48] <ichthyo> then there is another problem:
[2008-11-12 22:15:56] <ichthyo> key bindings and MIDI bindings
[2008-11-12 22:16:10] <ichthyo> you want to bind key shortcuts and external controllers to GUI elements
[2008-11-12 22:16:20] <ichthyo> and of course plugins need to be included into this system
[2008-11-12 22:16:35] <joelholdsworth> ok that's quite easy - you just right click on the control
(or click a tiny button) that opens up a menu that lets you set this stuff
[2008-11-12 22:16:59] <joelholdsworth> UI design? well that's what presentation hints help with
[2008-11-12 22:17:26] <joelholdsworth> they set size, layout, the appropriate type of slider
for example to expose this value
[2008-11-12 22:17:29] <ichthyo> and what happens when the respective plugin options window isn't opened at all?
[2008-11-12 22:17:47] <ichthyo> or when you have multiple instances of the same plugin?
[2008-11-12 22:17:48] <joelholdsworth> what do you mean?
[2008-11-12 22:18:11] <ichthyo> for example: you use a sound panner plugin
and you want to adjust the sound pan in 3 dimensions by key shortcuts,
or to bind them to some knobs on an external control surface
and now you have multiple instances of the same panner plugin
so we need a naming scheme
[2008-11-12 22:19:03] <cehteh> how i thought about this:
1) plugin provides the list of its parameters (maybe as tree)
2) there is a config file (config system) which gets installed
with the plugins which defines the default layout
3) user might override that in his own configs ...
but still only defaults for new instances of the things
4) the actual configured state is saved with the session
while all this config just is a definition which gets applied to a kindof stylesheet
[2008-11-12 22:20:05] <ichthyo> as tree for sure, because many plugins have nested windows for each instance
[2008-11-12 22:21:49] <ichthyo> which means we need a naming/addressing scheme
[2008-11-12 22:22:20] <ichthyo> ...which creates sort of a cascading scheme
[2008-11-12 22:22:42] <ichthyo> I mean, a naming/addressing scheme
[2008-11-12 22:23:04] <ichthyo> for distinguishing identical parameters in different instances of yet-unknown plugins
[2008-11-12 22:23:19] <ichthyo> and now consider: where should these key bindings attach to?
[2008-11-12 22:23:39] <ichthyo> to a specific instance, or to the suitable instance which is just in focus?
[2008-11-12 22:23:46] <ichthyo> or both (probably)
[2008-11-12 22:24:03] <ichthyo> you may want to bind a different knob to each different panner plugin
[2008-11-12 22:24:25] <ichthyo> or alternatively, you may want to bind "the pan knob" to the "current pannner"
[2008-11-12 22:24:42] <ichthyo> this would require that we introduce a focus concept
[2008-11-12 22:25:00] <ichthyo> I think thorwil proposed something similar in the last discussion
[2008-11-12 22:25:16] <ichthyo> so we have a notion of the "current track", "current clip"
[2008-11-12 22:25:49] <ichthyo> and then there is another problem with plugin GUIs
[2008-11-12 22:26:10] <ichthyo> many plugin systems, especially VST have custom GUIs
[2008-11-12 22:26:27] <ichthyo> and may not even work properly without using those
[2008-11-12 22:26:48] <ichthyo> which means, some GUI code in the plugin takes over. That is something I utterly dislike
[2008-11-12 22:33:29] <cehteh> no thats not needed
[2008-11-12 22:33:29] <cehteh> in the session each object has a uuid thats its address
[2008-11-12 22:33:29] <cehteh> each parameter has a uuid too(interface system)
[2008-11-12 22:33:31] <cehteh> so you can say parameter 0x2344545645654534 of object 0x4543765732525t643 ....
[2008-11-12 22:33:34] <cehteh> "yet unknown" doesnt exist .. it is either unknown or known :)
[2008-11-12 22:55:26] <ichthyo> well ..the problem is just: you can't avoid things like a user binding
some external control surface to a certain gui control. All I wanted is
to bring the problem to everyones attention, also, that we need a parameter system
and that this parameter system is probably tree-like, not just flat
[2008-11-12 22:57:06] <cehteh> i want anythnig be addressable with uuid(manipulating_function)+uuid(object)
[2008-11-12 22:58:41] <ichthyo> for me, this is just a opaque reference which is implemented in some way. So, assumed
I have the object representing the connectionn to the plugin, I can provide the GUI with
some means for re-addressing a certain parameter, e.g. for a key binding, and, of course
for re-establishing the same binding when re-creating the session from storage
[2008-11-12 23:00:50] <ichthyo> Further, with plugin guis instances there is yet another problem in that you probably
have more option windows of some plugins as you can show in any reasonable GUI
These issues were one of the reasons I made this "FeatureBundle" proposal some time ago,
Meaning there is a mechanism allowing you to bundle
- an existing external plugin
- an adaptation we do for connecting it to the gui
- maybe a script or gui
- customisation info we need for the gui layout
...and provide all together as a single bundle the user can install
[2008-11-12 23:11:15] <ichthyo> joelholdsworth: maybe you have already some ideas how to deal with all those plugin
option windows? they are a real problem for getting a smooth handling and workflow.
because: you'll spend a considerable amount of the whole project time with tewaking
[2008-11-12 23:12:28] <ichthyo> (yes, that's what I am about to point out....)
[2008-11-12 23:13:05] <ichthyo> ...if you work on tweaking, you typically have a small amount of plugin windows
which need to stay open with maybe subtabs for all active plugins
say some color correction on 2 tracks, and then you open and close variouos plugins
for several different clips ...plus, maybe the possibility to "pin" some of the plugin
windows, while others just allocate the next free slot
[2008-11-12 23:13:14] <joelholdsworth> I guess we need some modeless way of putting plugin controls in a panel
[2008-11-12 23:15:32] <joelholdsworth> so maybe having panels open up - or do it like inkscape with a scrollable panel
container that gets longer as more panels are opened
[2008-11-12 23:20:10] <cehteh> Cinelerra's plugin guis who constantly get lost behind the main window are really a mess
[2008-11-12 23:19:28] <ichthyo> joelholdsworth: have you already any ideas about focus handling?
[2008-11-12 23:20:15] <ichthyo> joelholdsworth: do we get a "current object", a "current track" or the like
which receives keybord input
[2008-11-12 23:20:37] <joelholdsworth> keybaord focus, yes - that was the plan
[2008-11-12 23:20:54] <ichthyo> thorwil recently pointed at the concept of having an "active element" within
a larger selected group of elements e.g. you have a "current clip" and within it
a currently active effect attachment and if you press the shortcut, you switch over
to the automation curve of this plugin if there is one, etc. etc...
[2008-11-12 23:21:44] <cehteh> think about future ... multiple elements have focus from different devices
[2008-11-12 23:22:06] <cehteh> multitouch ... and other controlers
[2008-11-12 23:22:06] <joelholdsworth> I think we have to treat keyboard focus as special
[2008-11-12 23:22:23] <cehteh> next X version will utilitze multiple mouse pointers
[2008-11-12 23:22:35] <ichthyo> e.g. what's with the mouse wheel?
[2008-11-12 23:22:42] <ichthyo> what's with a jog dial on a control surface
[2008-11-12 23:23:03] <ichthyo> or with the play/stop buttons on a control surface?
[2008-11-12 23:23:03] <joelholdsworth> my plan is to treat the jog dial as special
[2008-11-12 23:23:23] <joelholdsworth> yes again - special button assignment
[2008-11-12 23:23:23] <ichthyo> sort of like the mouse wheel?
[2008-11-12 23:23:34] <cehteh> imagine user wants a 2nd normal pc keyboard as video controler
[2008-11-12 23:23:42] <ichthyo> because all those rather follow sort of a focus
[2008-11-12 23:23:56] <ichthyo> maybe together with modifier keys
[2008-11-12 23:24:04] <joelholdsworth> yes again that would work via an auxialiry controller setup config
the primary keybaord and mouse work in the normal way
we can't have two keyboard focusses - it's just won't work
we can have a second keyboard - a board of hotkeys
[2008-11-12 23:25:21] <joelholdsworth> there's mouse, keyboard and auxiliary controllers
[2008-11-12 23:25:34] <joelholdsworth> and those controllers would be mapped to the UI using a cunning dialog
[2008-11-12 23:25:58] <ichthyo> and what I said some time ago today with the binding for the "current" pan
is a similar idea. Because also such a binding should rather follow the focus
it doesn't help if you bind it to a specific instance of the panner plugin
the moment you have more then 8 panners, it won't scale
rather, it would be helpful, if you had a "current" fade and pan and sound level,
which would follow the focus, so you could e.g. navigate the focus with the
right hand at the cursor keys, and leave the left hand on your control surface's knobs
please understand me right: I don't want to push features, and I will stay out of GUI
issues. All I want is that everyone in the project is aware of this problems
[2008-11-12 23:27:44] <joelholdsworth> yes ok
[2008-11-12 23:28:45] <joelholdsworth> I see the problem
[2008-11-12 23:28:58] <ichthyo> ok thanks
[2008-11-12 23:31:54] <ichthyo> but it's fine we are all aware of the problem and seemingly share the same approach
config system + some sort of automatic persistent references within proc + some
indirection + maybe a focus system
[2008-11-12 23:33:10] <ichthyo> and another point I want to note is: all of us are rather reluctant to allow plugins
to create GUIs on their own
[2008-11-12 23:33:27] <joelholdsworth> depends what you mean by that
[2008-11-12 23:33:29] <cehteh> i guess we cant avoid that
[2008-11-12 23:33:38] <ichthyo> but note: there is a great urge from some some users for such things
[2008-11-12 23:33:39] <cehteh> for really external plugins
[2008-11-12 23:33:47] <joelholdsworth> yes
[2008-11-12 23:34:01] <cehteh> i am more cared about them fireing threads up :P
[2008-11-12 23:34:13] <ichthyo> thats part of the problem
[2008-11-12 23:34:37] <ichthyo> some start registering their own key shortcuts and all sorts of horrible things
[2008-11-12 23:34:42] <joelholdsworth> if you let each plugin create every bit of it's ui, you get a lot of code
copy/paste and a lot of diverging GUI, so you need to "help" the plugins
by putting them in a strict framework. Then, on top of that we allow
extensions which allow a little more flexibility
[2008-11-12 23:35:20] <ichthyo> Steinberg failed to address this problem properly and VST bears on this heritage until today
[2008-11-12 23:35:35] <cehteh> make it in a way where the 'correct' way is also the most convenient for any programmer is
all and the best we can do
[2008-11-12 23:35:49] * ichthyo nods
[2008-11-12 23:36:04] <joelholdsworth> yes
[2008-11-12 23:36:07] <cehteh> note the 'any' ...
[2008-11-12 23:36:20] <ichthyo> :-P
[2008-11-12 23:36:26] <cehteh> means mad programmers, beginner programmers, experienced programmers, html programmers ...
[2008-11-12 23:36:52] <joelholdsworth> but I'm thinking we do need to remember that the majority of our plugin developers
will be part of this project team
[2008-11-12 23:38:09] <ichthyo> joelholdsworth: no, it won't be this way. Rather, there are some mainstream plugin systems.
People will foremost want to use plugins they know already, letting asside a small set of
core plugins, yes, those are the ones we provide as native lumiera plugins
[2008-11-12 23:38:42] <joelholdsworth> hmm ok
[2008-11-12 23:38:44] <ichthyo> a fader, a mask etc.
[2008-11-12 23:38:50] <ichthyo> but anything besides that
[2008-11-12 23:39:06] <ichthyo> every sound engenieer has his favorite eq and compressor
[2008-11-12 23:39:26] <ichthyo> and he will rather swich to another application than being forced
to use a compressor he isn't fine with. Same for advanced video stuff
----------------------------
Source code organisation
------------------------
In the weeks before that meeting some discussions regarding the start-up of the main application
took place, including the question how to load and pull up the Lumiera GTK GUI as a plug-in.
Moreover, _cehteh_ just ``landed'' the branch with the first draft of his new plugin/interface
system. Which prompted us to sort out the locations and arrangement of those parts of the source
dealing with the application framework. The decision was to place that code into a small library,
which we call the *Lumiera common application services* (+liblumieracommon.so+), to be kept separate
from the *support library* (+liblumiera.so+).
.-- Include dir and namespaces --
[caption="☉Transcript☉ "]
----------------------------
[2008-11-12 23:49:43] <ichthyo> There is the namespace issue we left open on the last meeting
[2008-11-12 23:50:34] <cehteh> its really oblivious that you dont need to reflect the C++ namespaces
1:1 in the directory structure. I still think having all our stuff into lumiera::
would be good .. but :)
[2008-11-12 23:51:33] <ichthyo> in any case, it is closely interconnected to the "interface namespace":
which means the following: there are some interfaces (opaque datatypes
plus forward decls for C, abstract base classes for C++)
plus the things defined via the interface/plugin system
this stuff is our "public interface"
[2008-11-12 23:53:01] <ichthyo> and I really try hard to keep it separate from the implementation
[2008-11-12 23:53:14] <cehteh> the interface/plugin system has now its own rules which are somewhat special
and good by that way. I am thinking about a interface_doc.h which can be used
to generate documentation from the interface language with some special gcc/cpp flags
[2008-11-12 23:53:18] <ichthyo> those interface parts will be contained in a "Lumiera sdk"
and probably all those interfaces (for C++) will go into namespace lumiera::
because it's just the best and most natural pick
and, as said, I really want us to try hard to get this interface part
a real interface part and cleanly separated from the implementation
meaning: I don't want *.c and *.cpp files in this package/directory/namespace,
unless they are really needed there
[2008-11-12 23:55:55] <cehteh> well interfaces themself need to be .c they need to be implemented
and the implementation might adapt the code for example the config system
sometimes returns internal state as success/failure indicator
but its interface just returns integers
[2008-11-12 23:56:13] <ichthyo> so the question regarding namespaces is connected to the question:
how is the implementation organized in relation to the interface part?
[2008-11-12 23:59:41] <cehteh> config_interface.h needs to be installed someday (with a better name maybe)
[2008-11-13 00:01:52] <cehteh> do we want a src/includes/ ?
[2008-11-13 00:02:04] <cehteh> for includes which get installed
[2008-11-13 00:00:07] <ichthyo> so, someone *using* the interface system probably will need to include config_interface.h
[2008-11-13 00:00:19] <ichthyo> but he doesn't need config.c
[2008-11-13 00:00:29] <cehteh> config_interface.c is the mapping/actual implementation ...
which gets linked in but the user doesn't need it
[2008-11-13 00:00:52] <ichthyo> yes, thats my point. So we should try to separate those cleanly.
[2008-11-12 23:59:38] <ichthyo> so lets assume src/lumiera contains our interface
(I just take it as an example to make my point more clear)
[2008-11-13 00:01:01] <ichthyo> Let me explain it on another more elaborate example
[2008-11-13 00:01:47] <ichthyo> I have a class "Session" which is located in a file session.hpp
[2008-11-13 00:01:58] <ichthyo> but it is just a ABC
[2008-11-13 00:02:18] <ichthyo> and I have a Class SessionImpl which is contained in sessionimpl.hpp and sessionimpl.cpp
[2008-11-13 00:02:44] <ichthyo> and in the "interface package" there should be *only* session.hpp
[2008-11-13 00:03:11] <ichthyo> probably this class Session should be moved into namespace lumiera::
[2008-11-13 00:03:16] <ichthyo> then the situation would be clean
[2008-11-13 00:03:32] <ichthyo> note: up to now i wrote only implementation code.
[2008-11-13 00:03:50] <cehteh> yes, me too
[2008-11-13 00:04:07] <ichthyo> but I already made the class hierarchy this way, i.e. everyone *uses* just the class Session
[2008-11-13 00:04:28] <ichthyo> and only the class SessManagerImpl knows of the implementation class SessionImpl
[2008-11-13 00:04:32] <ichthyo> and so on
[2008-11-13 00:04:50] <cehteh> well thats not important yet...
I currently have some 'mess' of includes because its just easier for now to mix them
all together and relative easy to clean up when done, and I don't have a differentiation
between interface and implementation headers .. because
a) its all implementation code yet
[2008-11-13 00:06:06] <cehteh> b) i was under the impression that i only wanted to export interfaces
over 'Interfaces'
[2008-11-13 00:06:25] <ichthyo> ...which is fine
[2008-11-13 00:08:24] <cehteh> so .. public interfaces ... place in:
[ ] src/include/*.h
[ ] src/*/*_interface.h
[2008-11-13 00:08:45] <ichthyo> I am for the former: put them into a separate directory
[2008-11-13 00:08:55] <ichthyo> put them in a clearly separated namespace
[2008-11-13 00:08:29] <ichthyo> but... on the long run I am sure we don't just get one "includes" package,
rather I guess we get a (very small) tree of interfaces.
That's the rationale behind my keeping the implementation namespaces completely apart
[2008-11-13 00:08:53] <cehteh> ok
[2008-11-13 00:09:43] <cehteh> the _interface.c is the glue code which can stay in the implementation dir imo
[2008-11-13 00:10:16] <cehteh> actually i wanted a include dir too .. i just didnt start because i wanted to ask you too
[2008-11-13 00:10:41] <cehteh> ah another thing i asked joel before .. i dump the tests dir out of the Doxyfile
[2008-11-13 00:10:58] <ichthyo> really?
[2008-11-13 00:11:09] <cehteh> main documentation should have less noise,
the include graphs are messed up with all test dependencies.
But we can make special doxyfiles for special purposes
Doxyfile.all Doxyfile.small Doxyfile.browser
[2008-11-13 00:13:34] <ichthyo> yes, thats fine.
So we make a "small" doc set with just the core facilities, and a "complete" doc set,
which includes: test cases and documentation on private members, so someone really
needing to find out how something works in detail can use the "complete" set.
At the end, I bet we will have almost 60% test code and 40% actual code
[2008-11-13 00:16:29] <ichthyo> ok, back to the include dir question:
My proposal would be to use src/lumiera/... and namespace lumiera::...
(and its children) for the public interfaces
[2008-11-13 00:17:04] <cehteh> mhm thats the usual reversal operation
[2008-11-13 00:17:31] <cehteh> src/lumiera/include/ ... gets installed in /usr/include/lumiera :P
[2008-11-13 00:17:40] <cehteh> ok you dont have the include
[2008-11-13 00:17:52] <cehteh> but i'd rather use the name include
[2008-11-13 00:17:59] <ichthyo> so src/lumiera/** -> /usr/include/lumiera/**
[2008-11-13 00:18:22] <ichthyo> this would make the namespaces match 100% to the directories
[2008-11-13 00:18:13] <cehteh> i dont like that much
[2008-11-13 00:18:18] <cehteh> better src/include
[2008-11-13 00:18:22] <cehteh> rationale:
[2008-11-13 00:18:43] <cehteh> when i look at some project i dont know and see a ./include
i know that there are the interfaces. If it is ./projectname then
i expect there a huge tree of code which makes the project
[2008-11-13 00:20:04] <cehteh> which is actually true for most project ...
at least i dont know anyone which names its includes projectname
[2008-11-13 00:20:14] <ichthyo> thats a good argument
[2008-11-13 00:20:23] <cehteh> the projectname/include is somewhat common .. but bit bloated
[2008-11-13 00:20:48] <cehteh> so better include
[2008-11-13 00:20:49] <ichthyo> yeah, thats a good point
[2008-11-13 00:21:04] <ichthyo> so namespace lumiera:: -> /src/include
[2008-11-13 00:21:35] <ichthyo> namespace lumiera::edit -> /src/include/edit (just as an example)
[2008-11-13 00:22:00] <ichthyo> while namespace proc::builder -> src/proc/builder
[2008-11-13 00:22:31] <cehteh> but i think keeping it flat would be ok
[2008-11-13 00:22:58] <ichthyo> I didn't say I want it. But it may happen to be necessary or just better,
when the root of the interfaces gets too big
[2008-11-13 00:23:00] <cehteh> at least i dont plan to make a tree behind it
[2008-11-13 00:23:08] <ichthyo> neither do I
[2008-11-13 00:23:11] <ichthyo> but... well
[2008-11-13 00:23:14] <cehteh> i doubt that it will get very big
[2008-11-13 00:23:29] <ichthyo> I've seen enough projects to prove the contrary
[2008-11-13 00:23:45] <ichthyo> and when an interface dir as 60 entries it starts to get messy
[2008-11-13 00:23:37] <cehteh> even with 100 files there it will still be manageable
[2008-11-13 00:24:02] <cehteh> and if not .. we may reconsider that before we release
[2008-11-13 00:24:38] <cehteh> a deeper hierarchy which is very sparse is much more annoying
than a dir with up to 100 files
[2008-11-13 00:24:49] <ichthyo> but anyway, the important point is I really want to try hard to get public interfaces
and implementation code cleanly separated
[2008-11-13 00:25:31] <cehteh> for C++ absolutely .. for C i less that a *little* bit
[2008-11-13 00:25:54] <cehteh> for performance reasons and because most of the backend wont end in public interfaces anyway
[2008-11-13 00:26:27] <ichthyo> just out of curiosity: what are the performance reasons?
[2008-11-13 00:26:51] <cehteh> i have some static inline accessor functions ..
which need to be in the .h and need full public structure definitions
which expose private details. I think these will stay private
but if not i wont really care.
[2008-11-13 00:27:06] <ichthyo> ah, I see
[2008-11-13 00:27:59] <ichthyo> probably those just count as "reasonable exceptions from the rule"
[2008-11-13 00:28:33] <cehteh> well C has no access protection at all .. you have to do whats allowed/documented
[2008-11-13 00:28:47] <cehteh> any improvisation counts as undefined behaviour .. point
[2008-11-13 00:28:26] <ichthyo> and besides: also within the implementation of one layer,
there are further interfaces, e.g the builder has an interface
[2008-11-13 00:32:03] <ichthyo> actually, I wanted to write something down, but couldn't find the time
[2008-11-13 00:32:17] <ichthyo> But I may well tell it just informally if that's ok ?
[2008-11-13 00:33:02] <ichthyo> I always assume we have separate interface namespace(s)
and implementation is in a different namespace then the exported interface
basically I see two scenarios:
so, the first scenario I'd call "libarry type"
it means that the implementation namespace is nested
eg namespace mylib <-- the stuff the library users include and use in their code
[2008-11-13 00:34:23] <ichthyo> namespace mylib::impl and mylib::impl::special <- the actual code making the library work
[2008-11-13 00:34:50] <ichthyo> second scenario is rather what I was aiming for
namespace lumiera <-- the interfaces
[2008-11-13 00:35:45] <ichthyo> namespace proc::builder:.... <-- the implementation has a completely separate hierarchy
for each subsystem
[2008-11-13 00:36:08] <ichthyo> my rationale for prefering the second aproach is:
[2008-11-13 00:36:32] <ichthyo> in our case, we are "implementation heavy".
We will certainly have much more implementation code than interfaces
we often will build more nested namespaces for the implementation,
but can live with just one shallow public interface
besides, you can just grab one directory and install it as "include",
the way we discussed
for a real library, the situation is reversed: Often you have much code close
to the interface, maybe even inline
e.g. think at boost:
half of the code is immediately inline within the interface classes you use
and the more technical parts are in shallow sub-namespaces below
the good side of course is, that the implementation code doesn't need
to pull in the interface, because he already sees it because of the nesting.
but for our situation, for me, the need to pull in explicitly any interface
you want to use or implement counts rather as a good thing
because it clearly documents what non-local parts you use
that was my rationale for keeping the implementation part
in a completely separate hierarchy.
[2008-11-13 00:43:17] <ichthyo> joelholdsworth: so you moved your gui code within namespace gui, is that correct?
[2008-11-13 00:43:50] <joelholdsworth> yes that's right!
[2008-11-13 00:43:59] <ichthyo> because, then the issue seems to be settled.
Originally I had yet more shallow trees.
But your argument with the matching directory names and the consistency
is an important point. So then, I'll go ahead and move my implementationn code
into a new root namespace proc and then start cleaning up the library part
[2008-11-13 00:44:47] <joelholdsworth> great :)
[2008-11-13 00:45:20] <ichthyo> meaning, you can expect quite some stuff moving between /src/common and /src/lib
[2008-11-13 00:45:40] <ichthyo> on the long run, probably the intention should be for "common" to disappear
[2008-11-13 00:46:08] <ichthyo> either stuff goes into namespace lumiera::, meaning it's an interface
and the header should go into "src/include" or stuff is heavy support lib
implementation stuff and thus goes below src/lib
----------------------------
.-- build system, plugin tree --
[caption="☉Transcript☉ "]
----------------------------
[2008-11-13 00:47:06] <ichthyo> which brings us to another question
[2008-11-13 00:47:18] <ichthyo> how do we organize building plugins?
because: one plugin may contain several source files and headers,
but needs to be linked into one module
[2008-11-13 00:47:55] <cehteh> plugins dir
[2008-11-13 00:48:11] <cehteh> and a tree there
[2008-11-13 00:48:22] <cehteh> plugins/video/effects
[2008-11-13 00:49:56] <cehteh> plugins/video/effects/foo/ ... with foobar.c foobaz.c links to foo.lum
[2008-11-13 00:52:38] <cehteh> plugins/video/effects/foo/foo.c builds foo.lum and
installs it in $(pkglibdir)/plugins/video/effects/foo.lum
[2008-11-13 00:48:24] <ichthyo> background is: I am aiming to get the build process as much rules-directed as possible
[2008-11-13 00:48:27] <ichthyo> thus:
[2008-11-13 00:48:54] <ichthyo> can we come up with a rule about how plugins will be built?
[2008-11-13 00:50:33] <ichthyo> again the notorious namespace question
[2008-11-13 00:51:08] <ichthyo> if it's a rather large plugin, e.g. a plugin providing an adapter for an
external media type libarary or such stuff then it probably can have nested namespaces
[2008-11-13 00:51:56] <cehteh> we really dont need a 1:1 relation between dirs and namespaces
[2008-11-13 00:52:08] <ichthyo> so, then, how can the build system tell at what level of the tree below plugins
we'd need to start with building one shared libarary?
[2008-11-13 00:52:46] <ichthyo> would the following rule be ok for you?
[2008-11-13 00:52:58] <ichthyo> start with dir src/plugins and descend
[2008-11-13 00:53:12] <ichthyo> depth-first tree search
[2008-11-13 00:53:28] <ichthyo> when you enter a directory which contains a real source file
[2008-11-13 00:53:46] <ichthyo> then build everything below it into one shared module
[2008-11-13 00:53:31] <cehteh> well i just suggesting here .. for the plugin loader ist relative simple
[2008-11-13 00:53:56] <cehteh> it just searches in the paths you give him (and doesnt descend itself)
[2008-11-13 00:54:19] <ichthyo> no, I really want to configure as few pathes as possible
[2008-11-13 00:54:23] <cehteh> if you think thats ok, then do it
[2008-11-13 00:54:57] <ichthyo> note, the limitation is: you can't just put some isolated code
file *.c *cpp in some of the root directories...
the moment you add a *c or *cpp file in some new sub tree,
you'll find a <subtree-rootdirname>.lum in the corresponding bin dir
[2008-11-13 00:55:17] <cehteh> thats prolly a good choice anyways
[2008-11-13 00:55:28] <cehteh> plugins shall be only at the leaves of the tree
[2008-11-13 00:55:42] <cehteh> be careful when linking
[2008-11-13 00:56:16] <cehteh> the autoconf build included a lot libs from configure which where not needed for plugins
[2008-11-13 00:56:57] <ichthyo> besides, I will set up a different "build environment" for plugins
[2008-11-13 00:57:16] <ichthyo> in SCons, each build environment has a fixed set of libraries attached
[2008-11-13 00:57:28] <cehteh> you likely need specialized ones for some plugins
[2008-11-13 00:57:43] <cehteh> but you can inherit them
[2008-11-13 00:58:02] <ichthyo> yes, I can inherit them
[2008-11-13 00:58:12] <ichthyo> and I match them from the directory name
[2008-11-13 00:58:20] <ichthyo> the rest is fully automatic
[2008-11-13 00:58:14] * cehteh thinks
[2008-11-13 00:58:38] <cehteh> a bare plugin doesnt need any lib
[2008-11-13 00:59:04] <ichthyo> not even the lumiera support lib?
[2008-11-13 00:59:11] <ichthyo> liblumiera.a
[2008-11-13 00:59:12] <ichthyo> ?
[2008-11-13 00:59:29] <cehteh> nope
[2008-11-13 00:59:43] <cehteh> only if it needs somthing from that
[2008-11-13 01:00:01] <cehteh> -DLUMIERA_PLUGIN for compiling plugins
[2008-11-13 01:00:11] <ichthyo> plus libdl
[2008-11-13 01:00:24] <cehteh> stop. no
[2008-11-13 01:00:34] <cehteh> libdl actually breaks the isolation :P
[2008-11-13 01:00:41] <ichthyo> ah, only for the part which loads the lib
[2008-11-13 01:00:51] <cehteh> the host needs libdl not the plugin
[2008-11-13 01:00:54] <ichthyo> but what is when a plugin wants to open another interface
[2008-11-13 01:01:11] <cehteh> it asks the host to do it
[2008-11-13 01:01:17] <ichthyo> doesn't it need to link against your plugin system impl?
[2008-11-13 01:01:32] <cehteh> nope .. just the interface.h
[2008-11-13 01:02:49] <cehteh> interface.h defines some functions .. but these are not available for the plugin either
but the nature of plugins is to extend the system ..
thus follows that they will need some more specific libs in many cases
[2008-11-13 01:02:29] <ichthyo> please understand my goal: I want to make the build as automatic and rules based as possible
[2008-11-13 01:02:58] <cehteh> yes ...
[2008-11-13 01:03:09] <ichthyo> for SCons, this is already mostly the case currently,
but the autotools build is a maintainance nightmare right now
because the paths and dependencies are so much hard wired there
[2008-11-13 01:03:24] <ichthyo> i spent already a considerable time to fix it again and again
[2008-11-13 01:03:52] <cehteh> huh i feel comfortable with autotools
[2008-11-13 01:04:04] <ichthyo> fine for you, but I fix it all the time
[2008-11-13 01:06:44] <ichthyo> for now I didn't use any special switches
[2008-11-13 01:06:59] <ichthyo> I just defined exampleplugin to be an dynamic module
[2008-11-13 01:07:10] <cehteh> sounds reasonable
[2008-11-13 01:07:41] <ichthyo> as said, in future I'll tell scons
"build everything below this root dir into a dynamic module"
and scons will derive the necessary compile and link commands
[2008-11-13 01:10:32] <ichthyo> ah, another little issue
[2008-11-13 01:10:42] <ichthyo> we have two tools directories currently
[2008-11-13 01:10:53] <ichthyo> one as "src/tool"
[2008-11-13 01:10:57] <ichthyo> and another as
[2008-11-13 01:11:12] <ichthyo> just "admin"
[2008-11-13 01:11:29] <ichthyo> within admin, there is the icon rendering and the vgsuppression
[2008-11-13 01:12:01] <ichthyo> the idea was, that all this special support tools would be in src/tool
[2008-11-13 01:12:52] <cehteh> admin are administrative scripts .. tools shall be moved into src/tool
[2008-11-13 01:13:00] <ichthyo> ok
[2008-11-13 01:13:07] <ichthyo> so joelholdsworth:
[2008-11-13 01:13:29] <ichthyo> would it be ok for you to move the icon building executable into src/tool ?
[2008-11-13 01:13:41] <ichthyo> and probably vgsuppression too?
[2008-11-13 01:14:00] <ichthyo> so the build process doesn't need to build anything in admin
[2008-11-13 01:15:29] <joelholdsworth> yes that's ok!
[2008-11-13 01:15:39] <joelholdsworth> that really is just a tool
[2008-11-13 01:15:43] <ichthyo> that is built lilke a tool, so I'd like to move it into src/tool
[2008-11-13 01:16:15] <joelholdsworth> it's the standard rsvg utility with some options added
[2008-11-13 01:16:58] <ichthyo> a propos: joelholdsworth:
I've built some additional directory checks into the accompanying python script
you have probably noticed that I call directly from the SConstruct into your
python script to invoke the icon rendering, and I built some additional checks
and move non-empty dirs away as "*.bak"
[2008-11-13 01:17:50] <joelholdsworth> I hadn't actually looked
[2008-11-13 01:17:59] <joelholdsworth> but yes that makes perfect snese
[2008-11-13 01:18:31] <joelholdsworth> actually it is supposed to overwrite any prior renderings
[2008-11-13 01:18:39] <ichthyo> ah?
[2008-11-13 01:19:00] <joelholdsworth> so if you change the SVG the PNGs get created fresh
[2008-11-13 01:19:01] <ichthyo> so I could change it and just do a recursive remove if the target dir exists?
[2008-11-13 01:19:10] <joelholdsworth> yes
[2008-11-13 01:19:32] <joelholdsworth> like obj files - it's supposed to just overwrite any previous bits
[2008-11-13 01:19:50] <joelholdsworth> well the targets are just build temporaries
[2008-11-13 01:20:01] <ichthyo> ok
[2008-11-13 01:22:25] <ichthyo> ok. then regariding the build system, the only bit of cleanup is the tests
[2008-11-13 01:22:46] <ichthyo> cehteh: we talked about it when I visited you
[2008-11-13 01:23:02] <ichthyo> it's not urgent right now
[2008-11-13 01:23:14] <ichthyo> but.. as we agreed at that time
[2008-11-13 01:23:39] <ichthyo> we should make a separate tree for the test-support code, maybe in a subdirectory of /tests
[2008-11-13 01:24:01] <ichthyo> and this tree of course reflects the main tree. That was the idea if I recall correct
[2008-11-13 01:25:11] <cehteh> ichthyo: yes .. i know
[2008-11-13 01:25:13] <cehteh> pending
[2008-11-13 01:25:18] <ichthyo> yes, not urgent
[2008-11-13 01:25:26] <ichthyo> works as-is for now
[2008-11-13 01:25:45] <ichthyo> I have some similar cleanup for my part
[2008-11-13 01:27:24] <ichthyo> btw: do we need "test plugins"?
[2008-11-13 01:27:52] <ichthyo> i.e. something similar to the src/plugin subdirs we discussed just a moment ago
[2008-11-13 01:28:53] <cehteh> you mean some which get installed?
[2008-11-13 01:29:09] <ichthyo> no, just plugins which are needed only from within tests
[2008-11-13 01:29:18] <cehteh> yes we need them
[2008-11-13 01:29:19] <ichthyo> i.e. /tests/plugin
[2008-11-13 01:29:39] <ichthyo> and everything below will be built exactly similar as everything below /src/plugin
[2008-11-13 01:29:44] <cehteh> i currently build them in place example_plugin.c
[2008-11-13 01:29:54] <ichthyo> just it will be only used for test runs
[2008-11-13 01:29:56] <ichthyo> yes, ok
[2008-11-13 01:32:18] <cehteh> another thing: shall plugins override existing interfaces or barf out?
[2008-11-13 01:32:33] <cehteh> both has pros and cons
[2008-11-13 01:32:46] <cehteh> both can easily circumvented with some effort
[2008-11-13 01:33:27] <ichthyo> ehm, I don't fully understand the situation.
Could you explain it? Lets say: we have interface A
now a plugin wants to provide an implementation of interface A
i.e. the client of this plugin would use interface A to access it
[2008-11-13 01:34:03] <cehteh> in a mail to you i explained that i would like mockup plugins which
override core functionality
[2008-11-13 01:35:07] <ichthyo> so, wouldn't the mockup just be another implementation of interface A ?
[2008-11-13 01:35:09] <cehteh> well you query an interface+implementation tuple
[2008-11-13 01:35:35] <cehteh> you cant just open a interface .. you open a implementation of an interface
[2008-11-13 01:35:49] <ichthyo> yes, but you use this (interface+implemntation) just for opening the interface, or?
[2008-11-13 01:36:05] <cehteh> so mocplugin can provide blurfoo
[2008-11-13 01:36:15] <cehteh> even if blurfoo.lum provides it too
[2008-11-13 01:40:32] <cehteh> imagine 2 *.lum modules implement and export exactly the same thing
(or at least announce it as exactly the same thing)
[2008-11-13 01:40:57] <ichthyo> I'd just try to relate it to the similar situation with classes
[2008-11-13 01:41:16] <ichthyo> you have an interface and now you have two subclasses
[2008-11-13 01:41:12] <cehteh> nah classes are different
[2008-11-13 01:41:27] <ichthyo> no they aren't. really different
[2008-11-13 01:41:43] <cehteh> subtle different
[2008-11-13 01:41:47] <ichthyo> so both subclasses claim to implement the contract which is defined by the interface
[2008-11-13 01:42:04] <ichthyo> now I have a factory which instantiates one of those subclasses
[2008-11-13 01:42:14] <ichthyo> or the other or both
[2008-11-13 01:42:18] <ichthyo> same situation as for the plugins
[2008-11-13 01:42:44] <cehteh> err no
[2008-11-13 01:42:48] <cehteh> you have only one class
[2008-11-13 01:42:49] <ichthyo> the client just wants an "instance" of the contract defined by the interface
[2008-11-13 01:43:01] <ichthyo> and doesn't care which one it is
[2008-11-13 01:43:14] <cehteh> you didnt understand my problem because it doesnt exist in C++
[2008-11-13 01:43:33] <cehteh> class foo {...};
[2008-11-13 01:43:43] <cehteh> foo this_is_it;
[2008-11-13 01:43:44] <cehteh> foo this_is_it;
[2008-11-13 01:43:57] <cehteh> ... redefinition of the same instance is my problem
[2008-11-13 01:44:08] <cehteh> not even exactly
[2008-11-13 01:44:37] <ichthyo> ah, in C++ the two would either be rejected by the compiler,
or they would be allowed if in different scopes
[2008-11-13 01:44:43] <cehteh> even worse the 2nd can say the it is a identical instance but it isnt
[2008-11-13 01:45:07] <cehteh> thats a good way to inject moc objects
[2008-11-13 01:45:15] <cehteh> so now my solution:
[2008-11-13 01:46:01] <cehteh> *thinking*
[2008-11-13 01:46:29] <cehteh> minor versions must differ, biggier minor will win
[2008-11-13 01:46:41] <cehteh> (newer one for certain)
[2008-11-13 01:46:53] <cehteh> thats intended
[2008-11-13 01:47:11] <cehteh> now a mock object might claim it is some *exact* same ..
[2008-11-13 01:47:57] <cehteh> that will be rejected .. so a mock object must do some heavy work
to throw out the old one from the nest
[2008-11-13 01:48:10] <cehteh> thats ok for testing purposes
[2008-11-13 01:48:12] <ichthyo> well... at some point you will be able to tell the loader:
load this module for use as interface A
[2008-11-13 01:48:20] <cehteh> nah
[2008-11-13 01:48:37] <cehteh> but you forget that modules != interfaces
[2008-11-13 01:49:07] <ichthyo> I understand the modules as being like a "subclass" of the interface
[2008-11-13 01:49:23] <ichthyo> meaning, each of them can "stand-in" for the interface
[2008-11-13 01:49:18] <cehteh> nope wrong
[2008-11-13 01:49:41] <cehteh> modules are a collection of interfaces
[2008-11-13 01:49:47] <cehteh> completely unrelated
[2008-11-13 01:49:54] <ichthyo> huh?
[2008-11-13 01:50:19] <cehteh> a module could provide a video effect and a gui plugin and a fancy format exporter at once
[2008-11-13 01:50:36] <cehteh> no one shall do that ... but modules are just collections of interfaces
[2008-11-13 01:50:37] <ichthyo> which would be 3 interfaces
[2008-11-13 01:50:41] <cehteh> yes
[2008-11-13 01:51:00] <ichthyo> but what then is the term for the implementation of an interface?
[2008-11-13 01:51:08] <ichthyo> because, at an extension point
[2008-11-13 01:51:21] <ichthyo> you just want to tell: this is the interface to be used at this extension point
[2008-11-13 01:51:28] <cehteh> but the module is just a bunch of interfaces .. and module itself doesnt give them a purpose
[2008-11-13 01:52:09] <ichthyo> thats clear. Something like the feature bundle could give it a speficic purpose,
or something similar, which itself is loaded as plugin
[2008-11-13 01:52:46] <cehteh> modules are just the bucket .. it is pretty unspecified what you put in
[2008-11-13 01:53:01] <ichthyo> yes thats fine, to start with
[2008-11-13 01:53:23] <ichthyo> anything more specific can be built on top of such a system
[2008-11-13 01:53:35] <ichthyo> for example take the video effect
at some point, you define an interface "VideoEffect"
probably this interface isn't loaded from a module, rather it's defined from the core,
but this doesn't matter for the point in question here
now, everything you can ever use as an implementation of this "VideoEffect"
must use this interface, because the client of the interface
(which in this case is the core which uses this interface for rendering)
doesn't care about the implementation and doesn't know any differences
----------------------------

View file

@ -0,0 +1,35 @@
2012-01-11 Lumiera Developers Meeting
=====================================
:Author: cehteh
:Date: 2012-01-11
Jan 11, 2011 on #lumiera 20:00
__Participants__
* cehteh
* ichthyo
* benn
* raffa
Conclusions
-----------
. ichthyo removed most of the tiddly wikis, and worked the content into the website
. cehteh reports that Lumiera got another donation (75Eur), arrangements with
the ffis to get access (view) about the donations account are under way. We'll
ask donors then if they want to be published or stay anonym and will set up
a wiki page listing donations and expenses.
. ichthy rewrote the SCons build, as discussed last time
. cehteh writes a very short RfC, to document that we're using SCons for now.
. possibly no one going to LAC, too far away
. we discussed a link checker / link resolver for the website.
The idea is to have a semi automatic tool, which is used locally when
authoring website content to find cross references.
. benn and ichthyo follow up on the libregraphics magazine and try to get into
discussion with them and see what can be done within our limited time.
ichthyo respond to the mail, and put you (benn and ct) on CC.
. when it comes to have a working example for media file output, we stick to the
mainstream solutions ffmpeg and or gstreamer, but care not to lock ourselves
into a single solution. Concluded that we do this over plugin interfaces and
it mostly boils down to support ffmped .. and investigate something simpler too.

View file

@ -12,6 +12,15 @@ Anyone interested in Lumiera development is also encouraged to read mailing list
archives and other documentation.
During Summer 2011 we kept up our monthly IRC meetings -- rather casual exchange
for the time being.
- 9 Nov 2011
- 12 Oct 2011
- 14 Sep 2011
- 10 Aug 2011
- 13 Jul 2011
- 8 Jun 2011
11 May 2011
@ -143,7 +152,7 @@ Topics
Summary
^^^^^^^
_(missing)_
* link:2008-11-12.html[Transcript (supplemented by Ichthyo in 2011)]

View file

@ -1,7 +1,7 @@
Statistics and Reports
======================
//Menu: label Statisticts
//Menu: label Statistics
++++++++++++++++++++++++++++++++++++++

View file

@ -65,16 +65,11 @@ For each `struct namespace_foo_struct` we have following typedefs:
[source,C]
----
typedef struct namespace_foo_struct namespace_foo; // canonical
typename
typedef const namespace_foo * const_NamespaceFoo; // pointer to const
object
typedef namespace_foo* link:NamespaceFoo[]; // canonical
pointer/handle
typedef namespace_foo ** link:NamespaceFoo[]_ref; // when intend to
mutate the handle itself
typedef const namespace_foo ** const_NamespaceFoo_ref; // for const object
handle
typedef struct namespace_foo_struct namespace_foo; // basic struct name
typedef namespace_foo* NamespaceFoo; // canonical object pointer/handle
typedef const namespace_foo * const_NamespaceFoo; // pointer to const object
typedef namespace_foo ** NamespaceFoo_ref; // when intend to mutate the handle itself
typedef const namespace_foo ** const_NamespaceFoo_ref; // same for const object handle
----
Examples:

View file

@ -149,6 +149,12 @@ for the most common STL containers, plus Map, key and value extractors.
Ichthyostega:: 'Sa 16 Apr 2011 00:20:13 CEST'
minor change: removed support for post-increment. It doesn't fit with the concept
and caused serious problems in practice. A correct implementation of post-increment
would require a ``deep copy'' of any underlying data structures.
Ichthyostega:: 'Sa 07 Jan 2012 21:49:09 CET' ~<prg@ichthyostega.de>~
//endof_comments:

View file

@ -0,0 +1,83 @@
Make Scons the official build System
====================================
// please don't remove the //word: comments
[grid="all"]
`------------`-----------------------
*State* _Final_
*Date* _Mi 11 Jan 2012 21:45:58 CET_
*Proposed by* Christian Thaeter <ct@pipapo.org>
-------------------------------------
********************************************************************************
.Abstract
_Bless Scons the default build system for Lumiera._
********************************************************************************
Description
-----------
//description: add a detailed description:
So far we using autotools and scons in parallel. Over time the need arose to have one
reliable supported build system. This shall be scons.
Tasks
~~~~~
// List what needs to be done to implement this Proposal:
// * first step ([green]#✔ done#)
// * second step [,yellow]#WIP#
Nothing to do except for releases scons *must* be working and all non functional
build systems will be stripped from releases (branches?).
Discussion
~~~~~~~~~~
Pros
^^^^
// add a fact list/enumeration which make this suitable:
// * foo
// * bar ...
Cons
^^^^
// fact list of the known/considered bad implications:
Alternatives
^^^^^^^^^^^^
//alternatives: explain alternatives and tell why they are not viable:
Rationale
---------
//rationale: Give a concise summary why it should be done *this* way:
Conclusion
----------
//conclusion: When approbate (this proposal becomes a Final)
// write some conclusions about its process:
Comments
--------
//comments: append below
.State -> Final
//add reason
Decided on the December 2011 Developer meeting.
Mi 11 Jan 2012 22:28:36 CET Christian Thaeter <ct@pipapo.org>
//endof_comments:
''''
Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview]

View file

@ -163,7 +163,8 @@ buffers.
Dispatching Step
~~~~~~~~~~~~~~~~
The dispatcher translates a render process into sequences of node invocations,
The dispatcher translates a render process (actually a _calculation stream_ as
part of a render process) into sequences of node invocations,
which then can be analysed further (including planning the invocation of
prerequisites) and scheduled. This mapping is assisted by the engine model API
(to find the right exit node in the right segment), the render process (for
@ -212,7 +213,7 @@ Tasks
([green]#-> yes, implementation deferred#)
* find out about the exact handling of multichannel data streams ([green]#✔ done#)
* design and prototypical implementation of frame quantisation ([green]#✔ done#)
* design a buffer descriptor [red]#TODO#
* design a buffer descriptor ([green]#✔ done#)
* design a buffer designation scheme [red]#TODO#
* expand on the node identification scheme [red]#TODO#
* clarify how control data frames can be addressed [red]#TODO#
@ -223,7 +224,11 @@ Discussion
Pros/Cons/Alternatives
^^^^^^^^^^^^^^^^^^^^^^
Possible variants to consider....
Currently we're focussing on how to implement _this_ concept, not on
evaluating alternatives. Especially the idea of scheduling individual frame jobs
is a core concept of Lumiera. This RfC tries to bridge from the session model to
an engine based on these concepts. It's the attempt to link two concepts already
defined and decided on....
Rationale

View file

@ -1,30 +1,37 @@
Stream Type System
==================
// please don't remove the //word: comments
[grid="all"]
`------------`-----------------------
*State* _Idea_
*State* _Draft_
*Date* _2008-10-05_
*Proposed by* link:Ichthyostega[]
-------------------------------------
Stream Type System
------------------
Especially in the Proc-Layer, we need a framework to deal with different
"kinds" of media streams. +
********************************************************************************
.Abstract
Especially in the Proc-Layer, within the Builder and at the interface to the
Engine we need sort of a framework to deal with different »kinds« of
media streams. +
This is the foundation to be able to define what can be connected and to
separate out generic parts and isolate specific parts.
********************************************************************************
Description
~~~~~~~~~~~
-----------
//description: add a detailed description:
The general idea is that we need meta information, and -- more precisely --
that _we_ need to control the structure of this metadata. Because it has
immediate consequences on the way the code can test and select the appropriate
path to deal with some data or a given case. This brings us in a difficult
situation:
* almost everything regarding media data and media handling is notriously
* almost everything regarding media data and media handling is notoriously
convoluted
* because we can't hope ever to find a general umbrella, we need an extensible
solution
@ -41,30 +48,36 @@ role for some of the more advanced things possible within the session.
Terminology
^^^^^^^^^^^
~~~~~~~~~~~
* *Media* is comprised of a set of streams or channels
* *Stream* denotes a homogeneous flow of media data of a single kind
* *Channel* denotes a elementary stream, which can't be further separated _in
the given context_
* *Channel* denotes an elementary stream, which -- _in the given context_ --
can't be decomposed any further
* all of these are delivered and processed in a smallest unit called *Frame*.
Each frame corresponds to a time interval.
* a *Buffer* is a data structure capable of holding a Frame of media data.
* a *Buffer* is a data structure capable of holding one or multiple Frames of media data.
* the *Stream Type* describes the kind of media data contained in the stream
Concept of a Stream Type
~~~~~~~~~~~~~~~~~~~~~~~~
The Goal of our Stream Type system is to provide a framework for precisely
describing the ``kind'' of a media stream at hand. The central idea is to
structure the description/classification of streams into several levels.
A complete stream type (implemented by a stream type descriptor) contains
a tag or selection regarding each of these levels.
Levels of classification
^^^^^^^^^^^^^^^^^^^^^^^^
The description/classification of streams is structured into several levels. A
complete stream type (implemented by a stream type descriptor) containts a tag
or selection regarding each of these levels.
* Each media belongs to a fundamental *kind of media*, examples being _Video,
Image, Audio, MIDI, Text,..._ This is a simple Enum.
* Below the level of distinct kinds of media streams, within every kind we
have an open ended collection of *Prototypes*, which, whithin the high-level
have an open ended collection of *Prototypes*, which, within the high-level
model and for the purpose of wiring, act like the "overall type" of the
media stream. Everything belonging to a given Prototype is considered to be
roughly equivalent and can be linked together by automatic, lossles
roughly equivalent and can be linked together by automatic, lossless
conversions. Examples for Prototypes are: stereoscopic (3D) video versus the
common flat video lacking depth information, spatial audio systems
(Ambisonics, Wave Field Synthesis), panorama simulating sound systems (5.1,
@ -85,15 +98,15 @@ _library_ routines, which also yield a _type classification system_ suitable
for their intended use. Most notably, for raw sound and video data we use the
http://gmerlin.sourceforge.net/[GAVL] library, which defines a fairly complete
classification system for buffers and streams. For the relevant operations in
the Proc-Layer, we access each such library by means of a Facade; it may sound
the Proc-Layer, we access each such library by means of a Façade; it may sound
surprising, but actually we just need to access a very limited set of
operations, like allocating a buffer. _Within_ the Proc-Layer, the actual
implementation type is mostly opaque; all we need to know is if we can connect
two streams and get an conversion plugin.
Thus, to integrate an external library into Lumiera, we need explicitly to
implement such a Lib Facade for this specific case, but the intention is to be
able to add this Lib Facade implementation as a plugin (more precisely as a
implement such a Lib Façade for this specific case, but the intention is to be
able to add this Lib Façade implementation as a plugin (more precisely as a
"Feature Bundle", because it probably includes several plugins and some
additional rules)
@ -105,8 +118,8 @@ with, determining a suitable prototype for a given implementation type is sort
of a tagging operation. But it can be supported by heuristic rules and an
flexible configuration of defaults. For example, if confronted with a media
with 6 sound channels, we simply can't tell if it's a 5.1 sound source, or if
it's a pre mixed orchesrta music arrangement to be routed to the final balance
mixing or if it's a prepared set of spot pickups and overdubbed dialogue. But a
it's a pre mixed orchestra music arrangement to be routed to the final balance
mixing or if it's a prepared set of spot pick-ups and overdubbed dialogue. But a
heuristic rule defaulting to 5.1 would be a good starting point, while
individual projects should be able to set up very specific additional rules
(probably based on some internal tags, conventions on the source folder or the
@ -132,42 +145,46 @@ connections and conversions
into each other.
* Conversions and judging the possibility of making connections at the level
of implementation types is coupled tightly to the used library; indeed, most
of the work to provide a Lib Facade consists of coming up with a generic
of the work to provide a Lib Façade consists of coming up with a generic
scheme to decide this question for media streams implemented by this
library.
Tasks
^^^^^
~~~~~
// List what needs to be done to implement this Proposal:
* draft the interfaces ([green]#✔ done#)
* define a fallback and some basic behaviour for the relation between
* define a fall-back and some basic behaviour for the relation between
implementation type and prototypes [,yellow]#WIP#
* find out if it is necessary to refer to types in a symbolic manner, or if it
is sufficient to have a ref to a descriptor record or Facade object.
* provide a Lib Facade for GAVL [,yellow]#WIP#
is sufficient to have a ref to a descriptor record or Façade object.
* provide a Lib Façade for GAVL [,yellow]#WIP#
* evaluate if it's a good idea to handle (still) images as a separate distinct
kind of media
Discussion
~~~~~~~~~~
Alternatives
^^^^^^^^^^^^
Instead of representing types my metadata, leave the distinction implicit and
//alternatives: explain alternatives and tell why they are not viable:
Instead of representing types by metadata, leave the distinction implicit and
instead implement the different behaviour directly in code. Have video tracks
and audio tracks. Make video clip objects and audio clip objects, each
utilizing some specific flags, like sound is mono or stereo. Then either
switch, swich-on-type or scatter out the code into a bunch of virtual
utilising some specific flags, like sound is mono or stereo. Then either
switch, switch-on-type or scatter out the code into a bunch of virtual
functions. See the Cinelerra source code for details.
In short, following this route, Lumiera would be plagued by the same notorious
problems as most existing video/sound editing software. Which is, implicitly
assuming "everyone" just does "normal" things. Of course, users always were and
always will be clever enough to work around this assumption, but the problem
is, all those efforts will mostly stay isolated and can't crystalize into a
assuming ``everyone'' just does ``normal'' things. Of course, users always were
and always will be clever enough to work around this assumption, but the problem
is, all those efforts will mostly stay isolated and can't crystallise into a
reusable extension. Users will do manual tricks, use some scripting or rely on
project organisation and conventions, which in turn creates more and more
coercion for the "normal" user to just do "normal" things.
coercion for the ``average'' user to just do ``normal'' things.
To make it clear: both approaches discussed here do work in practice, and it's
more a cultural issue, not a question guided by technical necessities to select
@ -175,6 +192,8 @@ the one or the other.
Rationale
---------
//rationale: Give a concise summary why it should be done *this* way:
* use type metadata to factor out generic behaviour and make variations in
behaviour configurable.
@ -189,13 +208,17 @@ Rationale
* provide the possibility to incorporate very project specific knowledge as
rules.
//Conclusion
//----------
//conclusion: When approbate (this proposal becomes a Final)
// write some conclusions about its process:
Comments
--------
//comments: append below
As usual, see the
http://www.lumiera.org/wiki/renderengine.html#StreamType[Proc-Layer impl doku]
for more information and implementation details.
@ -214,8 +237,8 @@ number of inputs and outputs) need in some way to be connected.
The fact that we don't have a rule based system for deciding queries currently
is not much of a problem. A table with some pre configured default answers for
a small number of common query cases is enough to get the first clip rendered.
(Such a solution is already in place and working.)
-- link:Ichthyostega[] 2008-10-05
(Such a solution is already in place and working.) +
-- link:Ichthyostega[] 2008-10-05
Woops fast note, I didn't read this proposal completely yet. Stream types could
or maybe should be coopertatively handled together with the backend. Basically
@ -226,9 +249,9 @@ number, plus adding the capabilitiy of per frame metadata. This indices get
abstracted by "indexing engines" it will be possible to have different kinds of
indices over one file (for example, one enumerating single frames, one
enumerating keyframes or gops). Such a indexing engine would be also the place
to attach per media metadata. From the proc layer it can then look like +struct
frameinfo* get_frame(unsigned num)+ where +struct frameinfo+ (not yet defined)
is something like +{ void* data; size_t size; struct metadata* meta; ...}+
to attach per media metadata. From the proc layer it can then look like `struct
frameinfo* get_frame(unsigned num)` where `struct frameinfo` (not yet defined)
is something like `{ void* data; size_t size; struct metadata* meta; ...}` +
-- link:ct[] 2008-10-06
Needs Work
@ -239,5 +262,7 @@ agreed that we want this concept as proposed here.
Do 14 Apr 2011 03:06:42 CEST Christian Thaeter
//endof_comments:
''''
Back to link:/documentation/devel/rfc.html[Lumiera Design Process overview]

View file

@ -12,9 +12,9 @@ ${TITLE//?/=}
*Proposed by* $(git config --get user.name) <$(git config --get user.email)>
-------------------------------------
[abstract]
********************************************************************************
.Abstract
_give a short summary of this proposal_
********************************************************************************
Description

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 19 KiB

BIN
doc/devel/uml/fig143877.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

BIN
doc/devel/uml/fig144005.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

BIN
doc/devel/uml/fig145157.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

BIN
doc/devel/uml/fig151685.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

View file

@ -16,6 +16,16 @@ Informations about the structure of Lumiera, they give an overview on the main c
Programming-oriented details about the functional layers of Lumiera as well as a Doxygen documentation :
* link:technical/index.html[Technical Documents]
* link:technical/code/codingGuidelines.html[Style and Coding Guidelines]
=== the TiddlyWiki ===
Currently, Lumiera is still in the design- and evolution phase.
There is an embedded JavaScript wiki (TiddlyWiki) within the source tree, mostly
used as design notebook, featuring day-to-day design sketches, notes but also
quite some more persistent planning. Finished documentation text is constantly
moved over to the documentation section(s) of the Lumiera website.
-> access the Proy-Layer link:http://lumiera.org/wiki/renderengine.html[TiddlyWiki online here]
== Media and Presentations ==

View file

@ -1,8 +1,9 @@
Technical Documentation: Backend
================================
Eventually, this will have technical documentation for the Backend.
Here we collect bits of technical documentation for the Backend.
For now, we have:
* link:ConfigLoader.html[Config Loader brainstorming from 2008]
* link:scheduler.html[Scheduler and Jobs]

View file

@ -0,0 +1,40 @@
Scheduler and Job handling
==========================
The purpose of the _Scheduler_ is to run small self contained _Jobs_
ordered by priority and observing specific timing constraints.
Scheduler implementation ideas
------------------------------
Use multiple priority queues
- background work
- foreground high-priority
- soft-realtime actions
About Jobs
----------
A job is a closure to run a small and limited action or operation, which
in itself _should not block_. Job may depend on each other and on resources
to be provided. A job may be conained in multiple queues and may be marked
as _canceled_ -- in which case the job function will never run and the job
will be discarded on occasion.
Job States
~~~~~~~~~~
[source,C]
--------------
enum job_state
{
done, // already done, nothing to do
running, // job is running
waiting, // waits for some resource (annother job)
rejected, // sorry, cant do that dave, time will run out
expired, // time expired
aborted // got aborted
}
--------------

View file

@ -0,0 +1,90 @@
Dependencies
------------
:Author: CehTeh
:Date: 3/2008
Lumiera is written for GNU/Linux. We try to make the best out of modern system programming techniques
to reach the best possible performance. Lumiera shall scale with the provided Hardware,
the more RAM and the more/faster CPU's you have the better.
Nevertheless lower end 32bit machines are supported too.
Secondary targets will be other free operating systems which offer a decent Posix API. +
Porting to other more or less similar platforms will be possible, if -- by coincidence --
Someone(TM) helps with porting.
Having said that -- for the time being, the core team won't spend much effort on porting.
Platform
--------
We work and test on PC hardware, 32 and 64 bit. It is intended that Lumiera supports
other platforms running run GNU/Linux.
Lumiera expects a 'standard' desktop installation running a Xserver.
Graphics::
There are no special requirements for the graphic system.
Hardware accelleration will likely be added later through extensions,
but will remain strictyl optional. (For now we'll watch the evolution
in that area and might revisit that topic when there are more compelling
and widely supported solutions available)
Disks::
Video editing requires decent disk speed, so it is suggested to use a
fast/big array of disks configured as raid.
Special Hardware::
Sopport for special hardware would be possible, but depends on certain conditions
+
* we need access / donations for the hardware
* specs and APIs must be open.
* someone to do the actual interfacing and support needs to join the team
Languages and Tools
-------------------
* C / C++
- a C99 / C++98 compatible compiler
- GCC 4.4 or better is fine. Basically we try to use just the stock language.
On rare occasions, we _did_ use some GCC extensions, but there would be workarounds,
should this become a problem.
- std::tr1 extensions for C++ (smart-ptrs, hashtables, function objects)
* BOOST (listed below are the DEBIAN package names)
- libboost-dev (at least 1.40)
- libboost-program-options-dev
- libboost-program-options-dev
- libboost-filesystem-dev
- libboost-regex-dev
* Script languages
- Python (2.5) for build scripts
- bash (some test scripts use bash specific extensions)
- Lua is planned to become a general glue and binding language
Build Tools
~~~~~~~~~~~
* Git
* SCons
* pkg-config
* Doxygen
We maintain a Debian package (debhelper, CDBS, git-buildpackage)
Libraries
~~~~~~~~~
* BOOST
* NoBug
* http://gmerlin.sourceforge.net/gavl.html[GAVL] (for raw media support)
* for the GUI: gtkmm-2.4 gdl-1.0 libglibmm-2.4 cairomm-1.0 xv
- libgtkmm-2.4-dev
- libcairomm-1.0-dev
- libgdl-1-dev
- libglibmm-2.4-dev, requiring glib2.0 and gthread-2.0
- libxv-dev
- librsvg-2.0 and librsvg2-dev for rendering Icons

View file

@ -0,0 +1,89 @@
The Lumiera Debian Package
==========================
:Author: Hermann Voßeler deb@ichthyostega.de
:Date: 11/2011
//Menu: label Debian Package
Since several of the Lumiera core developers run a Debian flavour as their primary development platform,
it seems natural to care for the debian packaging of Lumiera ourselves. Moreover, we've declared
Debian/Stable to be our *reference platform* -- we'll provide any additionally required, more recent
packages through our own link:http://Lumiera.org/debian[Debian Apt-Repository] (Debian depot).
TIP: readers new to debian packages may want to have a look into our
link:{ldoc}/user/tutorials/DebianBuilding.html[Debian build tutorial],
describing the commands for building and the structure of a debian
(source) package in general
Package build process
---------------------
As our whole infrastructure relies heavily on the Git version management tool, it's only natural
also to organise the (debian) packaging with the help of Git. Fortunatlely, there is a nifty tool
called link:https://honk.sigxcpu.org/piki/projects/git-buildpackage/[git-buildpackage], written
by Guido Günther exactly for this purpuse: It treats the _debianisation_ as a branch in the
Git repository, forking off the mainline at the release point.
For Lumiera, this _debianisation branch_ is called 'deb' and can be found in the
link:http://git.lumiera.org/gitweb?p=lumiera/debian;a=summary[git:/git.lumiera.org/lumiera/debian] repository.
Installing Lumiera
~~~~~~~~~~~~~~~~~~
The link:SCons.html[SCons build] generates a relocatable distribution directory structure,
holding the main executable, additional tools and utilities, plus the core libraries and resources.
Here _relocatable_ means that this subtree can be moved and placed anywhere on the system.
As long as the relative directory layout remains intact, the executables will be able to find
and load the accompanying libraries and resources. By invoking the +scons install+ target,
this directory structure is placed into the installation target directory.
Actually, this lookup process at application startup is performed in two phases
- in the first phase the main application locates the directly linked shared libraries.
Especially those libraries belonging first class to the Lumiera application are built
with a relative searche path ('rpath' with $ORIGIN token). These libraries are placed
into the +modules/+ subfolder
- after successfully launching the executable, the second phase performes a lookup programatically,
starting from the path location of the executable, trying to find a +setup.ini+ which defines
additional plug-ins and resources to load. Notably, the GUI to launch is loaded as a
plug-in through this mechanism. Moreover, this bootstrap configuration defines the
additional platform and user configuration to load for further outfitting of the system.
LSB Installation Layout
~~~~~~~~~~~~~~~~~~~~~~~
This organisation is used as foundation for packaging and installing. The primariy application
distribution structure will be located into a subfolder below +/usr/lib/+. Only the main application
executable will be symlinked into +/usr/bin/+. The LSB explicitly allows for such a layout, which is
typically used by large application bundles (OpenOffice, Gimp, Eclipse). Since the application
startup encompasses two phases, loading the extended configuration programmatically after
launching the application, such extended resources can easily be relocated into a separate
folder below +/usr/share/+, as required by LSB.
Releases, Branches and Conventions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Official releases are marked by a tag on the master branch. Usually, we'll also fork a
*release branch* at that point, featuring bugfixes only. From here we'll merge to the
*debian branch*
Package build commands
~~~~~~~~~~~~~~~~~~~~~~
To (re)build the debian package
. +git clone git:/git.lumiera.org/lumiera/debian+
. +mkdir pack.deb+
. +cd debian+
. +git-buildpackage --git-upstream-branch=+ _RELEASE-TAG_
-- here _RELEASE-TAG_ denotes the point in the Git history, which should become
the reference source and be packaged into the *.orig.tar.gz. Usually, it's just
sufficient to use 'master' for that purpose.
Debian-Depot for installation via Apt
-------------------------------------
In addition to the packaging, we maintain a dedicated Apt-Repository for automated
installation and upgrades. We try to build the package for several Debian derived
distributions (like Ubuntu). -> link:../infra/debianDepot.html[more on the repository organisation]

View file

@ -0,0 +1,191 @@
SCons Build-System
==================
:author: Ichthyo
:date: 2012
//MENU: label SCons Build
Lumiera uses a build system based on link:http://scons.org[SCons]
SCons is an open source software construction tool based on Python build definition scripts.
Within these build scripts, we define a data structure to describe the parts and dependencies
of our software. When executed, SCons evaluates those definitions and the actual files in
the source tree to derive a build strategy, which is then performed.
SCons core concepts
-------------------
^_this section is based on the introductory pages on the link:http://www.scons.org/wiki/BasicConcepts[SCons Wiki]_^
.SCons Environment
When SCons starts building the project, it creates its own environment with dependency trees,
helper functions, builders and other stuff. The SCons environment is created in memory and some parts of it
are saved to disk to speed up things on the next start. The definition of the build happens within this
artificial build environment. This often confuses people who used Makefiles, where environment is actually
the System Environment.
.System Environment
the familiar operating system container with environment variables such as PATH, HOME etc.
It is usually accessible via os.environ mapping in Python and therefore in SCons too.
SCons doesn't import any settings from System Environment automatically (like flags for compilers,
or paths for tools), because it's designed to be a cross platform tool with _predictable behaviour._
That's why, if you rely on any system PATHs or environment variables -- you need to extract
those settings explicitly in your build definition.
.SConstruct
when SCons executes, it performs a build definition python script written by the user.
By convention, this main script is called 'SConstruct' and is located in the root of the source tree.
It is a full featured Python module executed within a specifically prepared environment.
.SConscript
these files are also SCons scripts, but they are placed in subdirectories of the project.
Typically they are used to organize hierarchical builds and are included from the main SConstruct file
from the project root. Often, all of the actual build definitions reside in SConscript files in
the sub-trees of the project.
.Builder
The SCons buildsystem revolves around the metaphor of a _Builder_. This is a SCons object that you explicitly
invoke from the scripts to _define_ that there is something to build, transforming a _source_ into a _target_.
So the target depends on the sources, and typically those _source nodes_ were created by previous builder invocations.
The power of SCons is in the fact that dependencies are tracked automatically. When source files change, the system
is able to derive which targets to rebuild.
.Scanner
when defining a builder, SCons relies on modular scanner components to ``understand'' the source of the build step.
They may scan source files to discover additional dependencies referenced inside. Thus, SCons comes with built-in
knowledge about the source files and artefacts to be created by a typical build, and further types can be added
through plug-ins.
.Tool
any further, external component that adds Builders, Scanners and other helpers to SCons environments
for use within scripts. There are special tools for _configuring the platform_ to detect libraries and
further requirements. Relying on those tools. the build environment will be outfitted to reflect the
needs of the specific build. Sub-environments with special configuration may be created.
.Target
any _node_ or ``build step'' encountered through the definition of the build is a _target_. The actual build
will be triggered by requesting a target, which typically might be just an executable known to reside at some
location in the tree. Special _alias targets_ may be defined, based on other targets, to trigger off standard
build situations. Especially, a _default_ target may be defined.
''''
Organisation of the Lumiera SCons build
---------------------------------------
Within our build system, we exploit the power of the Python programming language to create abstractions
tailored to the needs ouf our project. Located in the `admin/scons` subdirectory, you'll find a collection
of Python modules defining these building blocks.
- the *LumieraEnvironment* is created as a subclass of the standard SCons build environment; it is
outfitted with pre-configured custom builders for executables, libraries, extension module, Lumiera plug-in
and icon resources.
- all these *custom builders* implement a set of conventions and directory locations within the tree.
These are defined (and can be adjusted) in the *Setup.py* module. This way, each builder automatically
places the generated artefacts at standard build and installation locations.
- for defining individual targets and builder invocations, we rely on *build helpers* to process whole
*source sub trees* rather than individual files. Mostly, just placing a source file into the appropriate
sub tree is sufficient to get it compiled, linked and installed in a standard way.
Sub-trees
~~~~~~~~~
.the source tree
All sourcecode of the core application resides below `src/`. Building these components is controlled by
the SConscript residing in this application source root. By convention, this is also the root for header
includes -- _all headers should be included relative_ to `src/`.
.the three layers
Within this application core tree, there are sub-trees for the main layers comprising the application.
Each of these sub-trees will be built into a shared library and then linked against the application framework
and common services residing in `src/common`. Besides, there is a sub-tree for core plug-ins and support tools.
.the GTK Gui
one of these sub-trees, residing in `src/gui` forms the upper layer or user-interaction layer. Contrary to
the lower layers, the GUI is _optional_ and the application is fully operational _without Gui._ Thus, the
GTK Gui is built and loaded as Lumiera a plug-in.
.unit tests
Since we're developing test-driven, about half of the overall code can be found in unit- and integration
tests, residing below `test/`. There is a separate SConscript file, defining the various kinds of test
artefacts to be created.
- plain-C tests are defined in _test-collections_, grouped thematically into several subdirectories.
Here, each translation unit provides a separate +main()+ function and is linked into a stand-alone
executable (yet still linked against the appropriate shared libraries of the main application layers)
- the tests covering C++ components are organised into test-suites, residing in separate sub-trees.
Currently (as of 1/2012), there is the *library suite* and the *proc components suite*. Here
individual translation units define individual test case classes, which are linked together with
a testrunner +main()+ function.
.research
There is a separate subtree for research and experiments. The rationale being to avoid re-building most
of the core application when it comes to experimenting and trying out new technologies.
.icons and resources
the +data/+ subtree holds resources, configuration files and icons for the Gui. Most of our icons
are defined as SVG graphics. The build process creates a helper executable (+rsvg_convert+) to render
these vector graphics with the help of lib Cairo into icon collections of various sizes.
.documentation
Most of the documentation is written in Asciidoc and provided online at link:{ldoc}[the documentation section]
of our website. The plain-text sources of this documentation tree is shipped alongside with the code.
Besides, we build *Doxygen* API documentation there, and we create design and technical specs and drawings
in SVG and in UML.
.the target directory
This is where the results of the build process are created. Lumiera is organised into a
_self contained folder structure_. As long as the relative locations, as found within +target/+,
are kept intact, the Application will be able to start up and find all its resources. Consequently,
there is no need to ``install'' Lumiera (and the ``install'' target just copies this folder structure
into the standard installation locations of a typical Unix system)
Unfortunately SCons is a bit wired regarding the object files created during the build process.
So currently, we're just building in-tree. Apologies for that.
Invoking the Build
~~~~~~~~~~~~~~~~~~
All of the build process is launched through the `scons` python script, usually installed into
`/usr/bin` when installing the SCons package onto the system. Just invoking
scons -h
prints a summary of all custom options, targets and toggles defined for our build.
Targets
^^^^^^^
- *build* is the default target: it creates the shared libs, the application, core plug-ins and the Gui.
- *testcode* additionally builds the research and uint test code
- *check* builds testcode and runs our testsuites
- *research* builds just the research tree
- *doc* builds documentation (currently just Doxygen)
- *all* builds the Application, testsuites and documentation
- *install* builds and installs the Lumiera Application
By convention, invoking +scons -c+ <TARGET> will _clean up_ everything the given target _would_ build.
Thus, invoking ++scons -c /++ is the most global clean operation: it will clean up al build artefacts and
will un-install Lumiera (recall: every defined node, or directory is also a target).
Configure checks
^^^^^^^^^^^^^^^^
SCons doesn't know a separate ``configure'' step. The necessary dependency detection is performed
before each build. Currenntly, we expect _all dependencies to be installed first-class_ into the
system. Please use your package manager.
Caching and MD5 sums
^^^^^^^^^^^^^^^^^^^^
SCons stores MD5 sums of all source files, all configure checks and all the command lines used
to invoke compilers and external tools. The decision, what needs to be rebuilt is based entirely
on these checksums. For one, this means that configure checks are re-run only when necessary.
It also means that changes to some compiler switches will automatically cause all affected parts
of the application to be re-built. And of course it means, that you only ever compile what is
necessary.
With SCons, there is no need for the usual ``cleaning paranoia''. Similarily, there is no need
for CCache (but using DistCC rocks !). Unfortunatly, calculating those MD5 sums requires some
time on each build, even if the net result is that nothing will happen at all.
Configuration options
^^^^^^^^^^^^^^^^^^^^^
We provide several custom configuration options (run +scons -h + to get a summary). All of these
options are *sticky*: once set, the build system will recall them in a file '.optcache' and apply
them the same way in subsequent builds. It is fine to edit '.optcache' with a text editor.

View file

@ -3,13 +3,17 @@ Lumiera build system
As work progresses, we will add more information on the Lumiera build system.
//Menu: label Build System
//Menu: prepend child 'Dependencies'
//Menu: prepend child 'SCons'
build -- continuous integration -- packaging
* SCons
* Autotools
* Dependencies
* link:SCons.html[Buildsystem]
* link:Dependencies.html[Dependencies]
* link:BuildDroneDraft.html[»Builddrone« concept from 2008]
* Packaging: Debian RPM
* Lumiera debian depot
* Packaging: link:LumieraDebianPackage.html[Debian] RPM
* Lumiera link:../infra/debianDepot.html/[debian depot]

View file

@ -0,0 +1,117 @@
Coding Guidelines
=================
:Date: Autumn 2011
_this page summarises some style and coding guidelines for the Lumiera code base_
Style Guide
-----------
The Lumiera project uses GNU indentation style with slight adaptations.
- *no tabs* please. The typical ``semi indent'' of GNU style thus becomes 2 spaces.
- maximum line length is rather around *110 characters*.
- originally, GNU style focussed on plain-C code. +
We thus apply some relaxations and clarifications ...
* the braces for a class scope are indented by 2 spaces
* the access modifiers start at this brace level, while all declarations and definitions
within the class are again indented by 2 spaces
* the line breaking rules are relaxed. Definitions and statements may be written as single line,
provided that the length remains below 110 chars and the result _remains legible_. Otherwise,
we'll fall back to the default and wrap the lines. More specifically
** function declarations may be written in one line
** same for definitions with just a single statement in the body
** same for simple if-statements without else-branch.
* the space between function name and opening parenthesis of the argument list is not
enforced when this doesn't make sense, esp. for argument-less functions, chained calls
or constructor syntax. But in all other cases, we really value this additional space,
it improves legibility.
* template argument declarations are _always_ written on a separate line, above the
return type declaration. This rule holds even if the rest of a definition can be
written within a single line.
* the opening brace of namespaces is placed on the same line. Optionally, the namespace
body may be indented, as long as this helps underpinning the nesting structure. But
there is no need to use 3 indents on a 3 level nested namespace. One level is enough
to highlight the presence of a nesting.
Naming conventions
~~~~~~~~~~~~~~~~~~
Naming conventions are used to characterise the kind of element at hand and give a visual
clue to the reader. We use our own conventions -- there is no point in arguing that this
and that library or language uses other conventions.
- type names start with an uppercase letter
- variable and function names start with lowercase.
- fields within a class, especially the private ones are decorated with a trailing underscore
- a leading underscore may be used to emphasize the strictly internal or technical nature of a type,
variable or function
- namespaces are all-lowercase
- macros and constants are preferably all-caps (at least where this makes sense)
There is a preference for *CamelCase* -- yet underscores are acceptable, especially when the
name is more like a sentence than just a compound term.
plain-C names
^^^^^^^^^^^^^
Since C has no namespaces, we strictly require a +lumiera_+ prefix on all non-local names and definitions.
Generally, names should be formed according to the following pattern:
namespace[_object][_verb[_subjects]][_version]
In case a definition actually denotes an object, there should be
- a basic struct name: `typedef struct namespace_foo_struct namespace_foo;`
- plus an object pointer/handle: `typedef namespace_foo* NamespaceFoo;`
The object pointer/handle should be passed as 1^st^ argument with the name +self+
General Code Arrangement and Layout
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Headers and translation units are named `*.hpp` and `*.cpp` rsp. `*.h` and `*.c` +
Multilingual headers are called `*.h`
- Each header should be named according to the primary facility it exposes. For the filesystem name,
the +CamelCaseWords+ of this type are translated into +camel-case-words.hpp+
- Each file should start with the GNU licence preamble. The headline should give a one-line summary.
The primary author(s) and the year of the initial copyright claim should be mentioned.
- Each header should be focussed on a specific purpose. Preferably it starts with a file-level
doxygen comment explaining the intention and anything not obvious from reading the code.
- when arranging headers and compilation units, please take care of the compilation times and the
code size. Avoid unnecessary includes. Use forward declarations where applicable.
Yet still, _all immediately required includes should be mentioned_ (even if already included by
another dependency)
- The include block starts with our own dependencies, followed by a second block with the library
dependencies. After that, optionally some symbols may be brought into scope (through +using+ clauses).
Avoid cluttering top-level namespaces. Never import full namespaces (no +using namespace boost;+ please!)
- the includes for our own dependencies shall be given relative to source-root (or test root). Don't use
relative includes for headers located in the same directory, or -- worse still -- in the parent directory.
- sometimes, the actual translation units will combine several facilities for technical reasons, e.g.
when sharing an implementation-level class or even singleton instance.
Anonymous namespaces should be used liberally to avoid unnecessary exports.
- template code mostly needs to reside in headers. (same for metaprogramming code).
We employ the simple inclusion model (``Borland model'') for template instantiation.
- But in some specific situations it is preferable to drive explicit instantiations from within
a +*.cpp+ file. Most notably this is the case when defining some core class hierarchies.
Such explicit instantiations should be limited to just a view obvious places. They should be
written into a block at the end of some central implementation file. See +assetmanager.cpp+
for an example.
- deliberately there is _no single top-level namespace._ The +namespace lumiera+ is the root of
our _exported interfaces_ -- intended for use by external scripts and libraries. Everything
implementation related is arranged in per-subsystem trees of namespaces. The APIs of the
subsystems are exported explicitly into the +lumiera+ namespace.
Design Guidelines and Values
----------------------------
Code is written for *being read by humans*; code shall convey its meaning even to the casual reader.
On the long run, this language nature of code is more important than any performance tweaks. Recall,
every idiot can figure out how to make a computer perform something. Yet the real challenge is to
write legible code. Code that operates exactly the way you'd guess just from reading it.
Black magic and all kinds of surprise trickery and cleverness are nothing to be proud off.
-> please have a look at the link:/project/background/CleanCodeDevelopment.html[Clean Code] page
for a coherent system of design principles

View file

@ -0,0 +1,54 @@
Dark Corners
============
_this page accounts for some problematic areas, sketchy solutions,
nonportable hacks, terrorism and other misdemeanour_
Library
-------
Equality of Functors
~~~~~~~~~~~~~~~~~~~~
One of the more important recent additions to the C++ language are function objects.
In addition to the features actually provided by the boost implementation, the tr1 report
also requires function instances to implement an equality operator. Unfortunately the
implementation approach choosen by boost makes a 100% correct implementation of
comparision very dificult, if not impossible. Thus, the boost developers refused
to implement this feature.
The bad news is that really using the power of opaque function objects quickly drove
us (Lumiera) into a situation where such an equalty test and a hash calculation on
function objects would be necessary. The whole point of using function objects is
the ability to ``erase'' specific details, which has the downside that the resulting
generic objects are opaque and often dificult to manage, when it comes to storing
and retrieving objects building on such functors.
Thus I built an hack, based on the implementation details of boost::function.
In +functor-util.hpp+ we define a +class HijackedFunction+, which has the same
data layout as the original boost::function. After forcibly casting such an function
(reference or pointer) into a +HijackedFunction+, we're able to inspect and evaluate
the implementation pointers for equality comparison and hash value calculation.
This approach works and actually detects copied functions to be _equal_, but is
unable to pinpoint _equivalence_, e.g. functors bound to the same function with
the same arguments through separate but otherwise identical invocations of +bind+.
Besides, should boost or the standard library implementors eventually change the
implementation, this workaround will break.
Size of standard library facilities
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes we need to know the size of an STL or Boost class, but can't afford
to include the header and just write a `sizeof()`. Because including some of those
headers incurs quite some price in terms of compilation time and even size of the
debug executable.
Obviously, a simple solution would be to measure those sizes and hardcode them.
But what about portability? To get out of that dilemma, I created a traits class
which mimics the implementation memory layout of those facilities in question,
simplified as much as possible. As long as the GNU libstdc++ or Boost don't
change their implementation layout, this give us precise and fast size bounds.
When relying on that hack, we should make sure always to place some kind of
STATIC_ASSERT into the corresponding implementation files to ensure the real
facilites actually _do fit_ into the guessed storage dimensions.

View file

@ -0,0 +1,49 @@
Code Base Organisation
======================
//Menu: label Code Base
This section of Lumiera's technical documentation deals with the code base as such,
not so much with the internals of the Lumiera application. It is targeted towards
people participating in Lumiera development.
The Lumiera source base is huge and expected to grow for some time to come. Just
judging by the _lines of code_, Lumiera can be considered as a *medium sized system*,
which gives us some hints towards the adequate practices, conventions and the
degree of formalism to apply.
Generally speaking, a *small system* doesn't exceed the mental capacity of a single
knowledgeable human. A single person can know everything and understand everything
in such a code base. Thus, any formalism and organisational overhead is detrimental;
we're best off doing away with ``Methods'' and just rely on taste and craftsmanship.
But there is a hard limit for this approach, kind of a _sonic wall_ -- usually somewhere
around a size of 60000 LoC. At that point, the adequate value system flips around as a
whole. Since beyond that point, no single person can understand and know everything about
such a system, _communication and understanding become the primary concerns_ of the
development process. Suddenly, we _do need_ some organisation, some rituals and
a methodical approach. Yet still we don't need a high degree of formalism, since
at least we can easily _know and recall everyone involved_.
To give a delineation to the other side, a constantly growing project might eventually
reach the point where the actual coding of new functionality makes up less than 25%
of the overall work required. More people might be involved than anyone can recall or
overlook reasonably. A code size of 1 Million LoC seems to exceed any kind of sufficiently
precise imagination. From that point on, a rigorosely formalsed organisation is vital
to keep such a *large scale project* from floundering.
////
Conventions
-----------
_to be written_
Guidelines
----------
////
''''
* also see the link:{ldoc}/devel/rfc.html[Design Process] for ongoing discussions
* see the link:/devs-vault/[Developers Vault] for frequently used developer's resources

View file

@ -5,16 +5,24 @@ Since some time, GDB supports Python written extension modules, especially
for pretty printing of data structures. A selection of pre-defined pretty printers
for STL containers is part of the standard distribution. The graphical debugger frontend
provided by the Eclipse CDT automatically uses this debugger provided presentation
to show the value of variables in the detail pane of the variables view, while the
individual variable entries always show the expandable structure view.
to show the value of variables in the detail pane of the variables view. The most recent
version of CDT (Version 8 for Eclipe 3.7 »Indigo«) is even able to populate the structure view
based on the python pretty printer's output, which is a big step ahead towards easy debugging
of more elaborate data structures based on STL containers.
Installation notes
------------------
This feature requires an python enabled GDB (>6.8.50). Debian/Lenny isn't enough,
but compiling the GDB package from Debian/Squeeze (GDB-7.1) is straightforward.
Moreover, you need to check out and install the python pretty-printers and
you need to activate them through your +~/.gdbinit+
This feature requires an python enabled GDB (>6.8.50). Actually, even the most recent stable
GDB (Version 7.2) is recommended, because it contains some relevant bugfixes. Debian users
might want to backport the the GDB package from Debian/Wheezy (GDB-7.2).
Moreover, you need to check out and install a recent version of the python pretty-printers
from the GNU Subversion repository:
* 'svn checkout'
http://gcc.gnu.org/viewcvs/trunk/libstdc%2B%2B-v3/python/[svn://gcc.gnu.org/svn/gcc/trunk/libstdc++-v3/python stlPrettyPrinter]
* you need to activate them explicitly through your +~/.gdbinit
[source,python]
----
@ -61,4 +69,8 @@ When selecting the string or the vector in the Eclipse variables view
(or when issuing "print str" at the GDB prompt), the GDB python pretty-printer
should be activated.
NOTE: to get the full support in _Eclipse Indigo + CDT-8_, you need to activate
the setting ``enable pretty printers in variable/expression tree'', which
can be accessed as Window/Preferences > C++ / debug / GDB

View file

@ -0,0 +1,111 @@
Hash functions (C++)
====================
_This page is for collecting know-how related to hash functions and hash tables._
The original STL was lacking proper support for hashtables, hash based associative arrays
and hash calculation in general. To quite some developers, hash tables feel like some kind
of _impure_ data structure -- unfortunately the properties of modern CPUs turned the balance
significantly in favour of hash tables due to memory locality. Pointer based datastructures
can't be considered especially _performant_ as they were in the good old times.
The tr1 extension and the new C++11 standard amended the problem by defining a framework
for hash functions and hash tables. When sticking to some rules, custom written hash functions
can be picked up automatically by the standard library and -containers.
Standard Hash Definitions
-------------------------
Hash values::
hash values are unsigned integral numbers of type 'size_t'
+
Basically this means that the range of hash values roughly matches the memory address space.
But it also means that this range is _platform dependant_ (32 or 64bit) and -- given the usual
hash calculation based on modulus (wrap around) -- that generated hash values are nonportable.
Hash function::
a hash function calculates a hash value for objects of its argument type. Thus, for every
supported type, there is a dedicated hash function. Quite some hash functions are generated
from function templates though.
Hash functor::
a function object able to calculate hash values when invoked. The standard library and the
corresponding boost libraries accept functors of type 'hash<TY>' to calculate hash values
for objects or values of type 'TY'
Hash based containers::
While the standard Set and Map types (including the Multiset and Multimap) are based on
balanced binary trees, the new C\+\+11 standard includes hash based variants (with name
prefix +unordered_+). These hashtable based containers require a +hash<KEY>+ functor
to be able to derive the hash value of any encountered key value. Hash functors may
be provided as additional type parameter to the container; if omitted, the compiler
tries to find a (maybe custom defined) hash functor by *ADL* (see below)
C++11 versus Boost
~~~~~~~~~~~~~~~~~~
The Boost library *functional-hash* provided the foundation for the definition now accepted
into the new C++ standard. Yet the boost library provides some additional facilities not
part of the standard. Thus we're bound to choose
* either including +<tr1/functional>+ and +using std::tr1::hash+
* or including +<boost/functional-hash>+ and +using boost::hash+
The boost version additionally provides pre defined hash functors for STL containers holding
custom types -- and it provides an easy to use extension mechanism for writing hash functions
for custom types. Effectively this means that, assuming usage of the boost-include, the actual
implementation and the way it is picked up is _different but compatible_ to the standard way.
Boost: hashing custom types
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The extension mechanism used by the boost version is best explained by looking
at the code
.boost/functional/hash/extensions.hpp
[source,C]
----
template <class T> struct hash
: std::unary_function<T, std::size_t>
{
std::size_t operator()(T const& val) const
{
return hash_value(val);
}
}
----
So this templated standard implementation just _invokes an unqualified function_
with the name +hash_value(val)+ -- when instantiating this template for your custom
class or type, the compiler will search this function not only in the current scope,
but also in the namespace defining your custom type +T+ (this mechanism is known as
``**A**rgument **D**ependant **L**ookup''). Meaning that all we'd need to do is to define a
free function or friend function named +hash_value+ alongside with our custom data types (classes).
To further facilitate providing custom hash functions, boost defines a function
+boost::hash_combine(size_t seed, size_t hashValue)+, allowing to _chain up_ the
calculated hash values of the parts forming a composite data structure.
- see Lumiera's link:http://git.lumiera.org/gitweb?p=LUMIERA;a=blob;f=src/proc/asset/category.hpp;h=b7c8df2f2ce69b0ccf89439954de8346fe8d9276;hb=master#l104[asset::Category]
for a simple usage example
- our link:http://git.lumiera.org/gitweb?p=LUMIERA;a=blob;f=src/lib/symbol-impl.cpp;h=9e09b4254ac57baefeb0a0c06ccd423318e923c1;hb=master#l67[lib::Symbol datatype]
uses the standard implementation of a string hash function combining the individual
character's hashes.
LUID values
-----------
Lumiera's uniform identifier values shouldn't be confused with regular hash values.
The purpose of LUID values is to use just plain random numbers as ID values. But, because
of using such a incredibly large number space (128bit), we can just assume any collision
between such random LUID to be so unlikely as to reasonably ignore this possibility
altogether. Let's say, the collision of random LUID values won't ever happen, same as
the meltdown of an atomic power plant, which, as we all know, won't ever happen either.
Relation to hash values
~~~~~~~~~~~~~~~~~~~~~~~
When objects incorporate sich an unique LUID, this provides for a prime candidate to
derive hash values as a side-effect of that design: Since incorporating an LUID typically
means that this object has an _distinguishable identity_, all objects with the same LUID
should be considered _equivalent_ and thus hash to the same value. Consequently we can just
use a +size_t+ prefix of the LUID bitstring as hash value, without any further calculations.

View file

@ -0,0 +1,167 @@
Using Boost Libraries
=====================
//Menu: label using boost
_some arbitrary hints and notes regarding the use of link::http://www.boost.org[Boost Libraries] in Lumiera_
Notable Features
----------------
Some of the Boost Libraries are especially worth mentioning. You should familiarise yourself with
those features, as we're using them heavily throughout our code base. As it stands, the C/C\++ language(s)
are quite lacking and outdated, compared with today's programming standards. To fill some of these gaps
and to compensate for compiler deficiencies, some members of the C\++ committee and generally very
knowledgeable programmers created a set of C\++ libraries generally known as *Boost*. Some of these
especially worthy additions are also proposed for inclusion into the C++ standard library.
.memory
The `<boost/memory.hpp>` rsp. `<tr1/memory>` libraries define a family of smart-pointers to serve
several needs of basic memory management. In almost all cases, they're superior to using `std::auto_ptr`. +
When carefully combining these nifty templates with the RAII pattern, most concerns for memory
management, clean-up and error handling simply go away. (but please understand how to avoid
circular references and care for the implications of parallelism though)
.functional
The `function` template adds generic functor objects to C++. In combination with the `bind` function
(which binds or ties an existing function invocation into a functor object), this allows to ``erase'' (hide)
the difference between functions, function pointers and member functions at your interfaces and thus enables
building all sorts of closures, signals (generic callbacks) and notification services. Picking up on these
concepts might be mind bending at start, but it's certainly worth the effort (in terms of programmer
productivity)
.hashtables and hash functions
The `unordered_*` collection types amend a painful omission in the STL. The `functional_hash` library
supplements hash function for the primitive types and a lot of standard constructs using the STL; moreover
there is an extension point for using custom types in those hashtables
(-> read more link:HashFunctions.html[here...])
.noncopyable
Inheriting from `boost::noncoypable` inhibits any copy, assignment and copy construction. It's a highly
recommended practice _by default to use that for every new class you create_ -- unless you know for sure
your class is going to have _value semantics_. The C++ language has kind of a ``fixation'' on value
semantics, passing objects by value, and the language adds a lot of magic on that behalf. Which might lead
to surprising results if you aren't aware of the fine details.
.type traits
Boost provides a very elaborate collection of type trait templates, allowing to ``detect'' several
properties of a given type at compile time. Since C++ has no reflection and only a very weak introspection
feature (RTTI, run time type information), using these type traits is often indispensable.
.enable_if
a simple but ingenious metaprogramming trick, allowing to control precisely in which cases the compiler
shall pick a specific class or function template specialisation. Basically this allows to control the
code generation, based on some type traits or other (metaprogramming) predicates you provide. Again,
since C++ is lacking introspection features, we're frequently forced to resort to metaprogramming
techniques, i.e to influence the way the compiler translates our code from within that very code.
.STATIC_ASSERT
a helper to check and enforce some conditions regarding types _at compile time_.
Because there is no support for this feature by the compiler, in case of assertion failure
a compilation error is provoked, trying to give at least a clue to the real problem by
creative use of variable names printed in the compiler's error message.
.metaprogramming library
A very elaborate, and sometimes mind-bending library and framework. While heavily used within
Boost to build the more advanced features, it seems too complicated and esoteric for general purpose
and everyday use. Code written using the MPL tends to be very abstract and almost unreadable for
people without math background. In Lumiera, we _try to avoid using MPL directly._ Instead, we
supplement some metaprogramming helpers (type lists and tuples), written in a simple LISP style,
which -- hopefully -- should be decipherable without having to learn an abstract academic
terminology and framework.
.lambda
In a similar vein, the `boost::lambda` library might be worth exploring a bit, yet doesn't add
much value in practical use. It is stunning to see how this library adds the capability to define
real _lambda expressions_ on-the-fly, but since C++ was never designed for language extensions of
that kind, using lambda in practice is surprisingly tricky, sometimes even obscure and rarely
not worth the hassle. (An notable exception might be when it comes to defining parser combinators)
.operators
The `boost::operators` library allows to build families of types/objects with consistent
algebraic properties. Especially, it eases building _equality comparable_, _totally ordered_,
_additive_, _mulitplicative_ entities: You're just required to provide some basic operators
and the library will define all sorts of additional operations to care for the logical
consequences, removing the need for writing lots of boilerplate code.
.lexical_cast
Converting numerics to string and back without much ado, as you'd expect it from any decent language.
.boost::format
String formatting with `printf` style directives. Interpolating values into a template string for
formatted output -- but typesafe, using defined conversion operators and without the dangers of
the plain-C `printf` famility of functions. But beware: `boost::format` is implemented on top of
the C++ output stream operations (`<<` and manipulators), which in turn are implemented based
on `printf` -- you can expect it to be 5 to 10 times slower than the latter, and it has
quite some compilation overhead and size impact (-> see our own
link:http://git.lumiera.org/gitweb?p=lumiera/ichthyo;a=blob;f=src/lib/format-string.hpp;h=716aa0e3d23f09269973b7659910d74b3ee334ea;hb=37384f1b681f5bbfa7dc4d50b8588ed801fbddb3[formatting front-end]
to reduce this overhead)
.variant and any
These library provide a nice option for building data structures able to hold a mixture of
multiple types, especially types not directly related to each other. `boost::variant` is a
typeseafe union record, while `boost::any` is able to hold just any other type you provide
_at runtime_, still with some degree of type safety when retrieving the stored values.
Both libraries are compellingly simple to use, yet add some overhead in terms of size,
runtime, and compile time.
.regular expressions
Boost provides a full blown regular expression library, supporting roughly the feature set of
perl regular expressions. The usage and handling is somewhat brittle though, when compared
with perl, python, java, etc.
.program-options and filesystem
Same as the aforementioned, these two libraries just supply a familiar programming model for these tasks
(parsing the command line and navigating the filesystem) which can be considered quasi standard today,
and is available pretty much in the same style in Java, Python, Ruby, Perl and others.
Negative Impact
---------------
Most Boost libraries are _header only_ and all of them make heavy use of template related features of C++.
Thus, _every inclusion of a Boost library_ might lead to _increased compilation times._ We pay that penalty
per compilation unit (not per header). Yet still, using a boost library within a header frequently included
throughout the code base might dangerously leverage that effect.
debug mode
~~~~~~~~~~
Usually, when developing, we translate our code without optimisation and with full debugging informations
switched on. Unfortunately, C++ templates were never designed to serve as a functional metaprogramming language
to start with -- but that's exactly what we're (ab)using them for. The Boost libraries drive that to quite
some extreme. This leads to lots and lots of debugging information to be added to the object files,
mentioning each and every intermediary type created in the course of expanding the metaprogramming
facilities. Even seemingly simple things may result in object files becoming several megabytes large.
Fortunately, all of this overhead gets removed when _stripping_ your executable and libraries (or when
compiling without debug information). So this is solely an issue relevant for the developers, as it increases
compilation, linking and startup times.
runtime overhead and template bloat
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The core Boost libraries (the not-so experimental ones) have a reputation for being of very high
quality. The're written by experts with a deep level of understanding of the language, the usual
implementation and the performance implications. Mostly, those quite elaborate metaprogramming
techniques where chosen exactly to minimise runtime overhead or code size.
Since each instantiation of a template constitutes a completely new class, carelessly written
template code can lead to heavily bloated executables. Every instantiated _function_ and every
class with _virtual methods_ (i.e. with a VTable) adds to the weight. But this negative effect can
be balanced by the ability of reducing inline code. According to my own experience, I'd be much
more concerned _about my own code adding template bloat,_ then being concerned about the Boost
libraries (those people know very well what they're doing...)
some practical guidelines
~~~~~~~~~~~~~~~~~~~~~~~~~
- `boost::format`, `boost::variant`, `boost::any`, `boost::lambda` and the more elaborate metaprogramming
stuff adds considerable weight. A good advice is to confine those features to the implementation level:
use them within individual translation units (`*.cpp`) where this makes sense, but don't cast
general interfaces in terms of those library constructs.
- the _functional_ tools (`function`, `bind` and friends), the _hash functions_, _lexical_cast_ and the
_regular expressions_ create a moderate overhead. Probably fine in general purpose code, but you should
be aware that there is a price tag. About the same as with many STL features.
- the `shared_ptr` `weak_ptr`, `intrusive_ptr` and `scoped_ptr` are really indispensable and can be used
liberally everywhere. Same for `enable_if` and the `boost::type_traits`. The impact of those features
on compilation times and code size is negligible and the runtime overhead is zero, compared to _performing
the same stuff manually_ (obviously a ref-counting pointer like `smart_ptr` has the size of two raw pointers
and incurs an overhead on each creation, copy and disposal of a variable -- but that's besides the point I'm
trying to make here, and generally unavoidable)

View file

@ -12,3 +12,5 @@ similar usefull pieces of information targeted at Lumiera developers. See also
== Notepad
- link:DebugGdbPretty.html[Python pretty printers for GDB]
- link:HashFunctions.html[Notes regarding standard hash functions]

View file

@ -1,6 +1,7 @@
Technical Documentation
=======================
// Menu : prepend child library
// Menu : prepend child backend
// Menu : prepend child proc
// Menu : prepend child gui
@ -29,7 +30,9 @@ components.
== Tools
.Development
* link:http://www.lumiera.org/doxy/[*Doxygen generated documentation*] : Complete reference for the code of Lumiera.
* link:http://www.lumiera.org/doxy/[*Doxygen generated documentation*] : API documentation of the Lumiera code.
* organisation of the link:code/index.html[Code Base] in general
* link:code/codingGuidelines.html[Coding Style] and further guidelines
.Building
* link:build/index.html[*Buildsystem*] : Installation & compilation tools, dependencies and packaging.

View file

@ -25,7 +25,7 @@ To (re)build the debian package
. +git clone git:/git.lumiera.org/lumiera/debian+
. +mkdir pack.deb+
. +cd debian+
. +git-buildpackage --git-upstream-branch=+_RELEASE-TAG_
. +git-buildpackage --git-upstream-branch=+ _RELEASE-TAG_
-- here _RELEASE-TAG_ denotes the point in the Git history, which should become
the reference source and be packaged into the *.orig.tar.gz. Usually, it's just
@ -76,15 +76,15 @@ everyday usage
~~~~~~~~~~~~~~
import a package::
+reprepro -V -C experimental include lenny lumiera_0.pre.01-1+maverick_i386.changes+
+reprepro -V -C experimental include squeeze lumiera_0.pre.01-1+squeeze_i386.changes+
+
this adds the given binary lumiera package, together with all sources and the original
tarball to the 'lenny' repository, into the 'experimental' section
tarball to the 'squeeze' repository, into the 'experimental' section
dump out an entire repository::
+reprepro -V export lenny+
+reprepro -V export squeeze+
+
this will __re__generate all of the indices, signatures and metadata of the 'lenny' repository
this will __re__generate all of the indices, signatures and metadata of the 'squeeze' repository
Configuration
@ -98,12 +98,12 @@ I choose this layout because of the marginal relevance of this depot management;
want to create _yet another not so useful Git repository..._)
* primary link:http://git.lumiera.org/gitweb?p=lumiera/debian;a=blob;f=conf/distributions;hb=refs/heads/depot[configuration]
* Logfile of imports: link:http://git.lumiera.org/gitweb?p=lumiera/debian;a=blob;f=log/lenny.log;hb=refs/heads/depot[for Lenny]
* Logfile of imports: link:http://git.lumiera.org/gitweb?p=lumiera/debian;a=blob;f=log/squeeze.log;hb=refs/heads/depot[for Debian/Squeeze]
[NOTE]
.some special details to note in our setup
=======================================================================================================================
- each block in the 'distributions' file defines a repository for a ``distribution'' (e.g. Lenny, Lucid, Maverick).
- each block in the 'distributions' file defines a repository for a ``distribution'' (e.g. Squeeze, Lucid, Maverick).
Within such a repo, there are sections named 'Components'.
- The _override_ files mentioned in the configuration allow to overwrite / replace arbitrary fields in the metadata of
all packages added to that distribution.

View file

@ -0,0 +1,41 @@
Library documentation
=====================
//Menu: label Support Lib
Alongside with the core Lumiera Application, we're also creating several *Libraries*. +
This section holds pages with in-depth technical documentation regarding those. Please don't
expect a coherent or complete coverage for the time being -- library functionality is typically
``discovered'' while in the middle of the implementation, then factored out, reused and remoulded
over time.
Interface Library
-----------------
The Lumiera Application is designed deliberately in a way to be controllable by scripts. All of
the Session, the models and the backend are fully operational without the GUI. Any significant
functionality can be addressed through the primary Façade Interfaces.
But, in order to make such an open and extensible application a reality, we need to build up
an 'interface layer' -- provided as library for client code and script adapters to link against.
This library allows to access the primary external interfaces and exposes everything needed to
work with the specifics of the application.
[,yellow]#TODO as of 1/2012:# _this is just a nebulous plan for now._ Nothing of the work required
really to address facilities from the outside has been done. All we've managed to achieve is actually
to use clean interfaces and decouple the parts of the application appropriately.
- we still need to identify those core concepts which need to be exposed
- we need to refactor the corresponding data types to resolve cross dependencies to the implementation
- then we could think about moving those data types into an _official Lumiera interface_
Support Library
---------------
There is a huge collection of generic building blocks, re-used more or less frequently.
The're written in a generic way, but in accordance to the rest of the source base, relying only
on the general error handling and logging framework used within Lumiera. (Some facilities may depend
on other parts of the Application; this can be qualified as a bug -- please complain when you've found
something of that sort).

View file

@ -0,0 +1,141 @@
Iterators and Pipelines
=======================
The link:http://c2.com/cgi/wiki?IteratorPattern[Iterator Pattern] allows to
expose the contents or elements of any kind of collection, set or container
for use by client code, without exposing the implementation of the underlying
data structure. Thus, iterators are one of the primary API building blocks.
Lumiera Forward Iterator
------------------------
While most modern languages provide some kind of _Iterator,_ the actual semantics
and the fine points of the implementation vary greatly from language to language.
Unfortunately the C++ standard library uses a very elaborate and rather low-level
notion of iterators, which doesn't mix well with the task of building clean interfaces.
Thus, within the Lumiera core application, we're using our own Iterator concept,
initially defined as link:{ldoc}/devel/rfc/LumieraForwardIterator.html[RfC],
which places the primary focus on interfaces and decoupling, trading off
readability and simplicity for (lesser) performance.
.Definition
An Iterator is a self-contained token value,
representing the promise to pull a sequence of data
- rather then deriving from an specific interface, anything behaving
appropriately _is a Lumiera Forward Iterator._ (``duck typing'')
- the client finds a typedef at a suitable, nearby location. Objects of this
type can be created, copied and compared.
- any Lumiera forward iterator can be in _exhausted_ (invalid) state, which
can be checked by +bool+ conversion.
- especially, default constructed iterators are fixed to that state.
Non-exhausted iterators may only be obtained by API call.
- the exhausted state is final and can't be reset, meaning that any iterator
is a disposable one-way-off object.
- when an iterator is _not_ in the exhausted state, it may be _dereferenced_
(`*i`), yielding the ``current'' value
- moreover, iterators may be incremented (`++i`) until exhaustion.
Motivation
~~~~~~~~~~
The Lumiera Forward Iterator concept is a blend of the STL iterators and
iterator concepts found in Java, C#, Python and Ruby. The chosen syntax should
look familiar to C++ programmers and indeed is compatible to STL containers and
ranges. To the contrary, while a STL iterator can be thought of as being just
a disguised pointer, the semantics of Lumiera Forward Iterators is deliberately
reduced to a single, one-way-off forward iteration, they can't be reset,
manipulated by any arithmetic, and the result of assigning to an dereferenced
iterator is unspecified, as is the meaning of post-increment and stored copies
in general. You _should not think of an iterator as denoting a position_ --
just a one-way off promise to yield data.
Another notable difference to the STL iterators is the default ctor and the
+bool+ conversion. The latter allows using iterators painlessly within +for+
and +while+ loops; a default constructed iterator is equivalent to the STL
container's +end()+ value -- indeed any _container-like_ object exposing
Lumiera Forward Iteration is encouraged to provide such an `end()`-function,
additionally enabling iteration by `std::for_each` (or Lumiera's even more
convenient `util::for_each()`).
Implementation
~~~~~~~~~~~~~~
As pointed out above, within Lumiera the notion of ``Iterator'' is a concept
(generic programming) and doesn't mean a supertype (object orientation). Any
object providing a suitable set of operations can be used for iteration.
- must be default constructible to _exhausted state_
- must be a copyable value object
- must provide a `bool` conversion to detect _exhausted state_
- must provide a pre-increment operator (`++i`)
- must allow dreferentiation (`*i`) to yield the current object
- must throw on any usage in _exhausted state_.
But, typically you wouldn't write all those operations again and again.
Rather, there are two basic styles of iterator implementations, each of which
is supported by some pre defined templates and a framework of helper functions.
Iterator Adapters
^^^^^^^^^^^^^^^^^
Iterators built based on these adaptor templates are lightweight and simple to use
for the implementor. But they don't decouple from the actual implementation, and
the resulting type of the iterator usually is rather convoluted. So the typical
usage scenario is, when defining some kind of custom container, we want to add
a `begin()` and `end()` function, allowing to make it behave similar to a STL
container. There should be an embedded typedef `iterator` (and maybe `const_iterator`).
This style is best used within generic code at the implementation level, but is not
well suited for interfaces.
-> see 'lib/iter-adapter.hpp'
Iteration Sources
^^^^^^^^^^^^^^^^^
Here we define a classical abstract base class to be used at interfaces. The template
`lib::IterSource<TY>` is an abstract promise to yield elements of type TY. It defines
an embedded type `iterator` (which is an iterator adapter, just only depending on
this abstract interface). Typically, interfaces declare to return an
`IterSource<TY>::iterator` as the result of some API call. These iterators will
hold an embedded back-reference to ``their'' source, while the exact nature of this
source remains opaque. Obviously, the price to pay for this abstraction barrier is
calling through virtual functions into the actual implementation of the ``source''.
Helpers to define iterators
^^^^^^^^^^^^^^^^^^^^^^^^^^^
For both kinds of iterator implementation, there is a complete set of adaptors based
on STL containers. Thus, it's possible to expose the contents of such a container,
or the keys, the values or the unique values just with a single line of code. Either
as iterator adapter (-> see 'lib/iter-adapter-stl.hpp'), or as iteration source
(-> see 'lib/iter-source.hpp')
Pipelines
---------
The extended use of iterators as an API building block naturally leads to building
_filter pipelines_: This technique form functional programming completely abstracts
away the actual iteration, focussing solely on the selecting and processing of
individual items. For this to work, we need special manipulation functions, which
take an iterator and yield a new iterator incorporating the manipulation. (Thus,
in the terminology of functional programming, these would be considered to be
``higher order functions'', i.e. functions processing other functions, not values).
The most notable building blocks for such pipelines are
filtering::
each element yielded by the _source iterator_ is evaluated by a _predicate function,_
i.e. a function taking the element as argument and returning a `bool`, thus answering
a ``yes or no'' question. Only elements passing the test by the predicate can pass
on and will appear from the result iterator, which thus is a _filtered iterator_
transforming::
each element yielded by the _source iterator_ is passed through a _transformnig function,_
i.e. a function taking an source element and returing a ``transformed'' element, which
thus may be of a completely different type than the source.
Since these elements can be chained up, such a pipeline may pass several abstraction barriers
and APIs, without either the source or the destination being aware of this fact. The actual
processing only happens _on demand_, when pulling elements from the end of the pipeline.
Oten, this end is either a _collecting step_ (pulling all elements and filling a new container)
or again a IterSource to expose the promise to yield elements of the target type.
Pipelines work best on _value objects_ -- special care is necessary when objects with _reference
semantics_ are involved.

View file

@ -48,20 +48,28 @@ The extra components are:
Coding Style
~~~~~~~~~~~~
The Lumiera team agreed on using GNU coding style with some exceptions (no
tabs, line wrap must not always be enforced). Otherwise we are a bit pedantic
to be consistent to make the codebase uniform. Function naming conventions
and other details are described in several RFCs.
The Lumiera team agreed on using GNU coding style with slight adaptations.
Generally speaking, we strive to keep the code base consistent and stick to
widely accepted guidelines and best practices. See our separate
link:{ldoc}/technical/code/codingGuidelines.html[Coding Guidelines] page.
Function naming conventions and other details are also described in several RFCs.
Documentation
~~~~~~~~~~~~~
The central location for all design and technical documentation is the Lumiera
website you're reading right now. Besides that, a summary and introduction
for various components can be found in the file-level doxygen comments, while
details are usualy explained in the class and function level comments.
Every public function should be documented with doxygen comments. The overall
design is outlined in this text and documented in the various detail pages and
accompanying RFCs. Bootstrapping the Lumiera design used several other places,
in several TiddlyWikis, an UML model and cehteh's private wiki, most of this
information is asciidoced meanwhile and in progress to be integrated in the
central documentation hierarchy.
==== the TiddlyWiki
Currently, Lumiera is still in the design- and evolution phase. Documentation
is written as an ongoing effort.
There is an embedded JavaScript wiki (TiddlyWiki) within the source tree, mostly
used as design notebook, featuring day-to-day design sketches and notes, but also
quite some more persistent planning. Finished documentation text is constantly
moved over to the documentation section(s) of the Lumiera website. +
-> access the Proy-Layer link:http://lumiera.org/wiki/renderengine.html[TiddlyWiki online here]
Test Driven Development
@ -73,7 +81,7 @@ implemented until they pass their tests. While increasing the initial
development effort significantly, this approach is known to lead to
clearly defined components and overall increases code quality.
In practice, this approach might not be suitable at times,
nevertheless we try to sick to it as far as possible
nevertheless we try to stick to it as far as possible
and maintain fairly complete test coverage.
@ -92,14 +100,20 @@ several important parts of the applications are loaded as plug-ins, starting
with the GUI.
User Interfaces
---------------
User Interface(s)
-----------------
The purpose of the user interface(s) is to act on the _high-level data model_
contained within the Session, which belongs to the _processing layer_ below.
User interfaces are implemented as plug-ins and are pulled up on demand,
they won't contain any relevant persistent state beyond presentation.
_As of 2011, the one and only interface under active development is
the Lumiera GTK GUI,_ based on GTK-2 / gtkmm. The sources are in tree
(directory 'src/gui') and it is integrated into the standard build and
installation process. By default, running the 'lumiera' executable will
load and start this GUI as a Lumiera module from 'modules/gtk_gui.lum'
Processing Layer
@ -157,9 +171,65 @@ Low Level Model
~~~~~~~~~~~~~~~
_tbw_
Play/Render processes
~~~~~~~~~~~~~~~~~~~~~
_tbw_
Play/Rendering subsystem
~~~~~~~~~~~~~~~~~~~~~~~~
Within Lumiera, »Player« is the name for a subsystem responsible for organising and tracking
_ongoing playback and render processes._ The player subsystem does not perform or even manage
any render operations, nor does it handle the outputs directly. +
Yet it addresses some central concerns:
uniformity::
All playback and render processes are on equal footing, handled in a similar way.
integration::
The player cares for the necessary integration with the other subsystems
+
it consults the _Output Management,_ retrieves the necessary informations from the _Session_
and coordinates the forwarding of Backend calls.
time quantisation::
The player translates continuous time values into discrete frame counts.
+
To perform this _quantisation,_ the help of the session for building a TimeGrid
for each output channel is required.
The player service
^^^^^^^^^^^^^^^^^^
Client code accesses the player (subsystem) through the play-facade (`lumiera::Play`).
The exposed service allows to _set up an output connection for playback or rendering,_
resulting in a play-controller object.
.Play::Controller
This controller frontend represents the presence of such an active output connection
and incorporates a state machine supporting the usual things you'd expect to do with
a player (Play, pause, FFwd, Rew, scrubbing, jumping, looping). This controller object
is a copyable smart-handle -- all instances act as if wired in parallel.
.time control
The play-controller frontend makes heavy use of `time::Control`. This is a mediator
to accept and forward _mutations_ on time values and time ranges, possibly involving
frame quantisation. After attaching it to a target time value, it accepts changes,
offsets and nudging, translates these into the appropriate target modifications
and notifies any attached _change listeners_.
.play process
Ongoing effort to calculate a stream of frames for playback or rendering. +
The play process is an conceptual entity linking together several activities in the backend
and the render engine. It maintains a registration entry for the process to keep track of
associated entities, resources allocated and calls dispatched as a consequence. Besides
each play process is wired to at leas one play-controller acting as frontend interface
and information hub for the client code.
NOTE: the player is in no way engaged in any of the actual calculation and management tasks
necessary to make the stream of calculations actually happen. The play process code contained
within the player subsystem is largely comprised of organisational concerns and not especially
performance critical.
- the backend is responsible for dispatching the calculation stream and scheduling calculation jobs
- the render engine has the ability to carry out individual frame calculations
- the OutputSlot exposed by the output manager is responsible for accepting timed frame delivery
@ -182,9 +252,9 @@ reopened on demand. Hardlinked files are recognized and opened only once.
All file access is done by memory mapping to reduce data copies between
userland and kernel. Moreover the kernel becomes responsible to schedule
paging (which will be augmented by lumiera) to make the best use of available
resources. Memory is mapped in biggier possibly overlapping windows of
resonable sized chunks. Requests asking for a contingous set of data from the
file in memory.
resources. Memory is mapped in larger windows of reasonable sized chunks, possibly
overlapping each other. Clients may request a specific continuous set of data from
the file to be accessible as memory block.
.Indexing
@ -199,13 +269,41 @@ Manages serveral classes of threads in pools. The threadpool is reasonable
dumb. Higher level management will be done by the Schedulers and Jobs.
Engine Interface
~~~~~~~~~~~~~~~~
While on itself just a thin interface and adaptation layer forwarding calls to
the primary backend facilities, the Engine Interface is the primary point of service
accessed by Proc-Layer to use the backend services for rendering content.
.Calculation Streams
The Engine Interface is cast in terms of an _calculation stream_ entity. This is
a stream of expected and ongoing frame calculations for multiple channels, to be
managed as a compound. The calculated frames will be delivered into an output slot
right away. No assumptions are made regarding the ordering of these individual
calculations -- they may be performed in parallel, constrained by input and
resource prerequisites solely.
.Frame Dispatcher
For the actual processing, calculation streams need to be translated into individual
calculation jobs to be scheduled. For each uniform _segment of the effective timeline,_
the typical recursive descent call characteristic for _pull processing_ results in a
Job Ticket.
.Job Ticket
This structural descriptor of the actual calculations to be performed is the base
for creating individual jobs: Already specialised for a distinct segment of the
effective timeline and tailored for the needs of a given calculation stream,
the job ticket acts as blueprint for the actual jobs to be enqueued
with the _Scheduler._
Schedulers
~~~~~~~~~~
Scheduling Queues for different purposes:
.Deadline
Higher priority jobs ordered by a deadline time plus some (negative) hystersis. Jobs are
Higher priority jobs ordered by a deadline time plus some (negative) hysteresis. Jobs are
started when they approach their deadline. Jobs who miss their deadline are
never scheduled here.
@ -291,6 +389,27 @@ Plugin loader
~~~~~~~~~~~~~
_tbw_
Advice framework
~~~~~~~~~~~~~~~~
This is a ``whiteboard'' system, allowing implementation-level services to _publish_
some piece of information as _advice_, while other parts of the system may pick up
this advice just by a name token, without requiring a direct knowledge of the
original _advisor._ The _Advice System_ is a singleton service maintaining a
lookup and registration data structure. Individual _piece of advice_ elements
are stored _as value copy_. Publishing new advice requires locking, but accessing
advice is lock-free (actually there needs to be a memory barrier ``somewhere'',
otherwise the advice requesting client might not see new advice)
.Advice topics
Advice is organised into categories, based on the type of the advice item and
some additional symbolic identifiers. Actually these are syntactically represented
similar to the _atoms_ of a rules based system (``Prolog syntax''). Currently (2010)
only ground terms (completely static symbols) are supported. But the intention is to
extend the system to allow for variables in these terms. This will turn the matching
of advice provisions and requests into an unification, allowing the advice item to
be parametrised.
Rules system
~~~~~~~~~~~~
_tbw_
@ -308,17 +427,40 @@ Lua Scripting
_tbw_
Library
-------
The Lumiera support library contains lots of helper functionality
factored out from the internals and re-used. It is extended as we go.
practical shortcuts
~~~~~~~~~~~~~~~~~~~
The header 'lib/util.hpp' defines some shortcuts heavily used throughout
the code base. The idea is to highlight a common semantic meaning, while
hide differentiation on the technical details.
isnil:: indicate a _missing value_, irrespective if this is a NULL pointer,
an empty string or an empty container. Several of our custom wrappers also
support this notion.
contains:: indicate that a value is _somehow contained_ within a collection,
irrespective if this is a set, a map, a vector or string (substring test)
sanitise:: make any string usable as identifier.
In a similar vein, the header 'lib/util-foreach.hpp' provides a generic
``for-each-element'' mechanism, which works for all STL containers, but
also for all _Lumiera Forward Iterators_. The loop body is provided
as a functor. In case this functor is a predicate (boolean result),
the +and_all+ and +has_any+ functions allow to test for conjunction
and disjunction.
Locking
~~~~~~~
Based on object monitors. Performance critical code
uses mutexes, condition vars and rwlocks direcly.
General purpose Locking is based on object monitors. Performance critical code
in the backend uses mutexes, condition vars and rwlocks direcly.
Intentionally no semaphores.
- C++ locks are managed by scoped automatic variables
@ -328,15 +470,80 @@ Intentionally no semaphores.
Time
~~~~
Time values are represented by an opaque date type `lumiera::Time`
Time values are represented by a family of opaque date types
with overloaded operators. The implementation is based on `gavl_time_t`,
an integral (µsec) time tick value. Any Time handling and conversions
is centralised in library routines.
an integral (µsec) time tick value. Thus, the arithmetic on time values
and time spans is limited and any Time handling and conversion is
centralised in library routines.
We distinguish between time values and a _quantisation_ into a frame
or sample grid. In any case, quantisation has to be done once, explicitly
and as late as possible. See the link:{rfc}/TimeHandling.html[Time handling RfC].
.time values
The Lumiera library defines several flavours of time values. All of
these internal time values have in common that they are _opaque_ and not
directly related to any human readable or external (wall clock) time.
Moreover, most of these time values are immutable, yet there are two
mechanisms to allow for changing time values (TimeVar and Mutation).
.quantised time
Special flavours of these time values additionally carry the reference
to an (frame) alignment grid, while being time value entities in all other
respects. But _only those quantised time values_ expose functions to
convert the internal opaque time into a human readable or externally
relevant time format -- including SMPTE or frame counts.
.time (alignment) grid
Thus, any usage of time values is forced to refer to such a time alignment
grid explicitly, at least when leaving the realm of the internal opaque
time values. This is the point where the *time quantisation* is actually
performed, imposing some information loss (as any rounding operation does). +
A time alignment grid is exactly that: a set of functions to perform
this lossy conversion. Implicitly this involves the definition of an
_time origin_ (reference point where the external time is zero), and
typically this also includes the definition of a _frame rate_ (but
in the most general case, this frame rate might be variable and
change at various places of the time axis). Consequently, all time
grids are Assets and defined as part of the concrete session.
Time Code
^^^^^^^^^
Historically, Time Code was seen as the foundation of any film editing.
Similarly, the first generation of editing systems used Time Code as a
foundation. Today, we consider such a design as questionable.
Lumiera takes a different approach here: Time code is reduced to a mere
mode of presentation, i.e. a formatting of existing time values. It is
void of any substantial meaning. To the contrary, the operation of
_frame quantisation_ (see above) is considered to be fundamental,
causing a irreversible loss of information. The design of time handling
chosen within Lumiera forces you to decide on using a specific _time grid_,
prior to being able to format an internal (opaque) time value in any kind
of time code. And only as late as when _actually retrieving_ this time code
formatted value, the actual quantisation (grid alignment) happens.
In practice, establishing a time grid requires knowledge about the output
format. Thus, an (sufficiently resolved) *output designation* is required
to perform any grid alignment a and time code formatting. Typically this
happens when a timeline or similar part of the High-Level-Model is connected
to a concrete output or an global bus defining a frame rate already. The model
contents as such are _frame rate independent_.
The following time code formats are supported, both for programmatic access
and display within the GUI
- frame count
- SMPTE
- SMPTE drop-frame _[,yellow]#TODO# as of 2011_
- hours:mins:secs _[,yellow]#TODO# as of 2011_
- fractional seconds _[,yellow]#TODO# as of 2011_
- musical bars:beats _[,yellow]#TODO# as of 2011_
As a corollary, as any rendering is based on frame numbers, it requires an
output connection or something similar to establish a frame grid.
Errors
~~~~~~
@ -425,7 +632,33 @@ mayfail()
Singletons
~~~~~~~~~~
_tbw_
Deliberately, Lumiera stays below the level of utilising a dependency injection framework.
Consequently, we access most services _by type name_, pulling up a single implementation
instance on demand. Rather than placing this singleton lifecycle logic into the individual
implementation classes, we use a _singleton factory_, managing the lifecycle through static
variables and placing the singleton object into a static memory block. Singleton initialisation
is protected by a monitor per singleton type, while shutdown is triggered by the clean-up of
a static variable. This results in the general policy that within Lumiera, performing any
``business'' code in the application shutdown phase (after exiting +main()+) is _strictly
prohibited._ Generally speaking, destructors _must not perform any significant work_ and
are are expected to be failsafe.
.accessing the singleton instance
By convention, when clients are expected actively to access the singleton instance,
the interface class holds the singleton factory as a public static member with the
name +instance+. This allows clients to write `SomeService::instance()` to get a
reference to the implementation.
.subclasses and mock testing support
There is a mechanism to ``push aside'' the existing singleton instance and shadow
it temporarily with a mock implementation. Besides, there is a variation of the
general singleton factory, allowing to fabricate a specific subclass of the
exposed interface. Both of these facilities are rather technically involved
and require some specific set-up -- fortunately it turned out that they are
used only occasionally and rarely required. (Probably this is a result of
Lumiera being developed _test-driven_ -- things are being written mostly
in a unit test friendly fashion).
Extensible Factory
~~~~~~~~~~~~~~~~~~
@ -437,27 +670,162 @@ _tbw_
Iterators
~~~~~~~~~
Iterators serve to decouple a collection of elements from the actual data type
implementation used to manage those elements. The use of iterators is a
design pattern.
-> see link:{ldoc}/technical/library/iterator.html[detailed library documentation]
Lumiera Forward Iterator
^^^^^^^^^^^^^^^^^^^^^^^^
_tbw_
Within Lumiera, we don't treat _Iterator_ as a base class -- we treat it as a _concept_
for generic programming, similar to the usage in the STL. But we use our own definition
of the iterator concept, placing the primary focus on interfaces and decoupling.
Our ``Lumiera Forward Iterator'' concept deliberately removes most of the features
known from the STL. Rather, such an iterator is just the promise for pulling values
_once_. The iterator can be disposed when _exhausted_ -- there is no way of resetting,
moving backwards or doing any kind of arithmetic with such an object. The _exhausted
state can be detected by a +bool+ conversion (contrast this with STL iterators, where
you need to compare to an +end+ iterator). Beyond that, the usage is quite similar,
even compatible to +std::for_each+.
Iterator Adapters
^^^^^^^^^^^^^^^^^
_tbw_
We provide a collection of pre defined adapter templates to ease building
Lumiera Forward Iterators.
- a generic solution using a _iteration control_ callback API
- the `lib::RangeIter` just wraps up a pair of iterators for ``current position''
and ``and'' -- compatible with the STL
- there is a variant for automatically dereferencing pointers
- plus a set of adapters for STL containers, allowing to expose each value, each
key, distinct values and so on.
Iterator Adapters are designed for ease of use, they don't conceal the underlying
implementation (and the actual type is often quite convoluted).
Iteration Sources
^^^^^^^^^^^^^^^^^
To the contrary, the `lib::IterSource<TY>` template is an abstract base class.
This allows to expose the promise to deliver values through any kind of API, without
disclosing the actual implementation. Obviously, this requires the use of virtual
functions for the actual iteration.
Again, there are pre-defined adaptors for STL containers, but the actual container
is concealed in this case.
Itertools
^^^^^^^^^
_tbw_
Iterators can be used to build pipelines. This technique from functional programming
allows to abstract away the actual iteration completely, focussing only on the way
individual elements are processed. To support this programming style, several support
templates are provided to build _filtering iterators, transforming iterators,_ to pick
only _unique values,_ to _take a snapshot on-the-fly_ etc. There are convenience
builder functions for those operations, figuring out the actual source and destination
types by template metaprogramming.
Front-end for boost::format
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatting values with `printf` is a notorious source for intricate errors.
Additionally, using (s|n)`printf` can be clunky in practice, and it doesn't
support custom defined string conversions, which are an important diagnostic
aid when working with objects. We might
link:{ldoc}/technical/howto/UsingBoost.html[use Boost], which provides the
`boost::format` library to address those problems. Unfortunately including this
header-only library solution incurs a significant overhead, both in terms of
compilation time and code size. While maybe still acceptable at the implementation
level, using boost::format is thus certainly a ``no go'' for any code residing
in headers frequently included.
To work around these problems, we provide a front-end wrapper, defined in
'lib/format-string.hpp'. This allows to keep the actual boost::format based
implementation confined to a single translation unit, while still being able
to use all primitive types as usual with boost::format or printf. Additionally,
our frontend automatically invokes a custom or built-in string conversion, if
applicable, it dereferences pointers and catches all errors encountered while
in formatting. So it's well suited for usage in error handling code.
[source,C]
------------------------------------------------------------
#include "lib/format-string.hpp"
using util::_Fmt;
double total = 22.9499;
const char * currency = "€";
cout << _Fmt("price %+5.2f %s") % total % currency << endl;
------------------------------------------------------------
WARNING: `boost::format` is known to be about 10 times slower than `printf` --
best to avoid it for performance critical code.
Wrappers and Opaque Holders
~~~~~~~~~~~~~~~~~~~~~~~~~~~
- smart handle
- unified value/ptr/reference holder
- ownership managing collection
- opaque holder to ``piggypack'' an object inline,
without the need for heap allocated storage
- vector of references
.smart handle
This pervasively used handle type is based on the reference counting
smart-pointer type of C\++ (`boost::shared_ptr` and C++11). Typically
this also includes a pointer to some kind of implementation service. +
Yet still, handles based on `lib::Handle<TY>` should not be confused with
smart pointers. Rather, we use the ref-counting mechanism to invoke a custom
clean-up callback when the last handle goes out of scope. Typically, the
implementation service is kept entirely opaque, while the copyable handle
objects also implement a front-end interface for client access.
.unified holder for value/ptr/reference
_tbw_
.ownership managing collection
When implementing services, frequently we encountered the situation that
a manager object creates and owns some component elements or sub-services.
The library provides two special collections, which also take ownership
of their contents and care for automatic clean-up. Especially, contrary
to the STL containers, those custom containers support use of
_non-copyable object_ (as a rule, all objects with reference semantics
are defined non-copyable in Lumiera).
- the `ScopedPtrVect` is a vector taking ownership of heap allocated objects
- the `ScopedCollection` is a fixed-size collection, holding all the child
objects within a single (heap allocated) storage block
.opaque holder
There is a family of holder objects, all based on placement-new of the
contained object into an embedded buffer. The purpose is to ``piggyback''
an object inline, without the need for heap allocated storage. Frequently
the motivation for this usage pattern is *type erasure*: the detailed knowledge
context used to build some kind of object is discarded prior to further use,
relying on generic information and the hidden parametrisation henceforth.
.polymorphic values
The C++ language has direct support for _value semantics_ and allows to build
value objects to be treated as first class citizens. Unfortunately this doesn't
fit well with the chosen approach to object orientation, where polymorphism relies
on reference semantics. Thus, most of the fundamental design patterns drive us into
having an object manager somewhere hidden within the implementation level, to
manage the memory for maintaining the subclass instances to be concealed
at the usage site.
To avoid this dilemma, we utilise the technique of the opaque holder to provide
objects with value semantics, while actually placing the instance of a subclass
into the inline buffer. Clients access this embedded object by automatic type
conversion to the interface type, which gives us polymorphism. While the
definition of such a beast is quite involved, the runtime overhead is
surprisingly low. When compared with standard polymorphism, creating
objects and invoking operations has zero overhead, while copying
and assignment incur the cost of an additional virtual call,
assuming the target objects cooperate by mixing in a
copy management interface.
.vector of references
a minimal interface for an array-like entity, but exposing only references
to the contained elements. Obviously this means to use a virtual call for
the subscript operation. This interface allows interfaces to expose something
_array-like_ without committing to a specific implementation type for the
exposed elements within the ``array''. The Lumiera library provides a set
of standard implementations for this +lib::RefArray+ interface, including
a vector based and a directly array based variant.
Unique Identifiers
~~~~~~~~~~~~~~~~~~
@ -477,16 +845,42 @@ at runtime (type erasure)
Typed Lookup
^^^^^^^^^^^^
_planned_ a system of per-type lookup tables, based on `EntryID`, together
_[,yellow]#planned#_ a system of per-type lookup tables, based on `EntryID`, together
with an type specific access functor. Together, this allows to translate
transparently and typesafe from symbolic ID to object instance, which
is an prerequisite for integrating a rules based system. Besides, these
tables allow unique IDs per type
tables allow unique IDs per type +
-> more details about this concept link:{l}/wiki/renderengine.html#EntryID%20TypedID%20TypedLookup[in the TiddlyWiki]
Allocators
~~~~~~~~~~
_tbw_
Lumiera utilises several custom allocators, each tailored to a specific purpose.
All these allocator-_frontends_ share a common pooling allocation backend
WARNING: currently (as of 2011) the low-level pooled allocator within the
backend isn't implemented; instead we do just heap allocations.
See Ticket #231
.Allocation Cluster
This allocation scheme is used within the context of the Builder and the
Low-Level-Model. The predominant usage trend in this realm is to create
and wire a family of small objects right after each other, within a build
process. These objects are intended to work together and will be discarded
all at once, after hot-swapping a new version of that model segment.
.Typed Allocation Manager
This allocation framework is used at various places when a large number of
similar objects is expected to be coming and going. New objects are
placement-constructed into the allocated space and immediately wrapped
with a ref-counting smart-ptr to manage ownership.
.Simple Allocator
Based on the allocator interface of the STL, allowing just for plain
allocations and de-allocations without any further instance and lifecycle
management. Currently (as of 2011) this allocator isn't used much -- it is
conceivable that later we'll detect some specific STL based containers to be
performance critical with respect to allocation.
Memory Pools
@ -525,6 +919,11 @@ Functor Utils
^^^^^^^^^^^^^
_tbw_
Duck Typing
^^^^^^^^^^^
_tbw_
Preprocessor Metaprogramming
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -546,7 +945,7 @@ fail. NULL strings are propagated to "" empty strings.
Polymorphic Programming in C
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Just a macro for simplyfying vtable function calls
Just a macro for simplifying vtable function calls
VCALL(Handle, function, arguments...)
translates to
Handle->vtable->function (Handle, arguments...)

View file

@ -1,6 +1,6 @@
Lumiera (as seen) from Outer Space
==================================
:Author: Christian Thäter ct@pipapo.org
:Author: Lumiera_Core_Developers
:Date: Summer 2010
@ -18,13 +18,10 @@ they get to work with. It is aimed for workflow designers any anyone who wants
to know how the program works in general.
******************************************************************************
About this Document
-------------------
// all things starting with '//' are asciidoc comments and drafts/notes while
// working on this document
.About this Document
This document is meant to be read electronically, it contains a lot
hyper-links between explanations denoted by an arrow ->. Lumiera is still in
development, we describe here planned features without explicitly tagging them;
@ -38,8 +35,13 @@ Vision
// objective and goals of the project
Lumiera claims to be `professional', this is quite a vague term and needs
some explanation what it means to us:
Lumiera claims to be a _professional non-linear video editor_. To start with, we should
point out that ``professional'' does not necessarily mean ``commercial'' or ``industrial''.
It's more of an attitude or mindset -- doing work seriously, and to be subject to any
kind of wider goal, demand, or purpose. When it comes to editing film, this might be
artistry, a narration or meaning to convey, a political message or something to show
to your audience. Anyhow, for the tools, the editing software used to this end,
we can identify several properties and requirements, to be labeled ``professional'':
Reliability::
Whatever happens, your work must be safe, protected against software
@ -47,34 +49,34 @@ some explanation what it means to us:
never crash, in practice even crashes or power outages should not
result in lost work.
Productivity::
Quality::
If you work with high quality, cinema grade digital video material you
want to be sure that you can deliver crisp quality without compromise,
throughout the whole workflow to your final product. All rendering
must be reproducible to the bit.
Performance and Productivity::
Professionals want to get things done, in time, but optionally with control
over every aspect. Balancing these goals should be the central concern for
workflow design and usability.
Quality::
If you work with high quality, cinema grade digital video material you
want to be sure that you can deliver this crisp quality without
compromise throughout you workflow to your final product. All rendering
must be reproducible to the bit.
Scalability::
Scalability and Adaptability::
Projects and budgets differ, hardware advances, Lumiera must scale
in different dimensions and use the available resources as best as it
can. From small Laptops to multicore Computers and Renderfarms.
can. From small Laptops to multi core computers and Renderfarms.
Future Proofness::
Durability::
Soft and Hardware advances at a fast pace. We must not lock into the
current state of technology but being flexible to extend the System
without breaking compatibility. Projects you create nowadays with
Lumiera should be usable in foreseeable future, at least there needs
to be a guaranteed upgrade path.
<
Fundamental Forces
------------------
// the basic ideas which drive the lumiera design
// the basic ideas which drive the Lumiera design
The Lumiera design is guided by a small number of basic principles. Keeping
these in mind will help to understand how actually more interesting things can

View file

@ -35,7 +35,7 @@ should _not_ be done as root.
. add a suitable source line to your *Apt configuration* ('/etc/apt/sources.lst')
+
----
deb-src http://lumiera.org/debian/ lenny experimental
deb-src http://lumiera.org/debian/ squeeze experimental
----
. get all the *build dependencies*
+

View file

@ -1,97 +0,0 @@
# Copyright (C) Lumiera.org
# 2008, Joel Holdsworth <joel@airwebreathe.org.uk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
svgdir = $(top_srcdir)/icons/svg
prerendereddir = $(top_srcdir)/icons/prerendered
icondir = $(top_builddir)
iconcommand = python $(top_srcdir)/admin/render_icon.py
16x16 = $(icondir)/16x16
22x22 = $(icondir)/22x22
24x24 = $(icondir)/24x24
32x32 = $(icondir)/32x32
48x48 = $(icondir)/48x48
16x16pre = $(prerendereddir)/16x16
22x22pre = $(prerendereddir)/22x22
24x24pre = $(prerendereddir)/24x24
32x32pre = $(prerendereddir)/32x32
48x48pre = $(prerendereddir)/48x48
icons = \
$(16x16)/app-icon.png $(22x22)/app-icon.png $(24x24)/app-icon.png $(32x32)/app-icon.png $(48x48)/app-icon.png \
$(16x16)/tool-arrow.png $(22x22)/tool-arrow.png $(24x24)/tool-arrow.png $(32x32)/tool-arrow.png $(48x48)/tool-arrow.png \
$(16x16)/tool-i-beam.png $(22x22)/tool-i-beam.png $(24x24)/tool-i-beam.png $(32x32)/tool-i-beam.png $(48x48)/tool-i-beam.png \
$(16x16)/track-disabled.png \
$(16x16)/track-enabled.png \
$(16x16)/track-locked.png \
$(16x16)/track-unlocked.png \
$(16x16)/panel-assets.png $(22x22)/panel-assets.png $(32x32)/panel-assets.png \
$(16x16)/panel-timeline.png \
$(16x16)/panel-viewer.png $(22x22)/panel-viewer.png $(32x32)/panel-viewer.png
dist_pkgdata_DATA += $(icons)
all: $(icons)
clean-local:
rm -rf $(16x16) $(22x22) $(24x24) $(32x32) $(48x48)
# ========== SVG Icons ==========
# App Icon
$(16x16)/app-icon.png $(22x22)/app-icon.png $(24x24)/app-icon.png $(32x32)/app-icon.png $(48x48)/app-icon.png : $(svgdir)/app-icon.svg $(top_builddir)/rsvg-convert
$(iconcommand) $< $(icondir)
# Timeline Tools
$(16x16)/tool-arrow.png $(22x22)/tool-arrow.png $(24x24)/tool-arrow.png $(32x32)/tool-arrow.png $(48x48)/tool-arrow.png : $(svgdir)/tool-arrow.svg $(top_builddir)/rsvg-convert
$(iconcommand) $< $(icondir)
$(16x16)/tool-i-beam.png $(22x22)/tool-i-beam.png $(24x24)/tool-i-beam.png $(32x32)/tool-i-beam.png $(48x48)/tool-i-beam.png : $(svgdir)/tool-i-beam.svg $(top_builddir)/rsvg-convert
$(iconcommand) $< $(icondir)
# Timeline Tracks
$(16x16)/track-disabled.png : $(svgdir)/track-disabled.svg $(top_builddir)/rsvg-convert
$(iconcommand) $< $(icondir)
$(16x16)/track-enabled.png : $(svgdir)/track-enabled.svg $(top_builddir)/rsvg-convert
$(iconcommand) $< $(icondir)
$(16x16)/track-locked.png : $(svgdir)/track-locked.svg $(top_builddir)/rsvg-convert
$(iconcommand) $< $(icondir)
$(16x16)/track-unlocked.png : $(svgdir)/track-unlocked.svg $(top_builddir)/rsvg-convert
$(iconcommand) $< $(icondir)
# ========== Prerendered Icons ==========
# Panels
$(16x16)/panel-assets.png: $(16x16pre)/panel-assets.png
cp $(16x16pre)/panel-assets.png $(16x16)
$(22x22)/panel-assets.png: $(22x22pre)/panel-assets.png
cp $(22x22pre)/panel-assets.png $(22x22)
$(32x32)/panel-assets.png: $(32x32pre)/panel-assets.png
cp $(32x32pre)/panel-assets.png $(32x32)
$(16x16)/panel-timeline.png: $(16x16pre)/panel-timeline.png
cp $(16x16pre)/panel-timeline.png $(16x16)
$(16x16)/panel-viewer.png: $(16x16pre)/panel-viewer.png
cp $(16x16pre)/panel-viewer.png $(16x16)
$(22x22)/panel-viewer.png: $(22x22pre)/panel-viewer.png
cp $(22x22pre)/panel-viewer.png $(22x22)
$(32x32)/panel-viewer.png: $(32x32pre)/panel-viewer.png
cp $(32x32pre)/panel-viewer.png $(32x32)

1
research/DIR_INFO Normal file
View file

@ -0,0 +1 @@
Experiments and Investigations. Not installed

22
research/SConscript Normal file
View file

@ -0,0 +1,22 @@
# -*- python -*-
##
## SConscript - SCons buildscript for experiments and investigations.
## Things defined here usuall won't be installed
##
Import('env core support_lib')
envR = env.Clone()
# envR.Append(CCFLAGS=' -O3 ')
# build additional test and administrative tools....
experiments = [ envR.Program('try', ['try.cpp'] + support_lib) #### to try out some feature...
]
#
# define Phony targets
# - 'scons research' triggers building of experimental code
#
env.Alias('research', experiments )

165
research/try.cpp Normal file
View file

@ -0,0 +1,165 @@
/* try.cpp - for trying out some language features....
* scons will create the binary bin/try
*
*/
// 8/07 - how to control NOBUG??
// execute with NOBUG_LOG='ttt:TRACE' bin/try
// 1/08 - working out a static initialisation problem for Visitor (Tag creation)
// 1/08 - check 64bit longs
// 4/08 - comparison operators on shared_ptr<Asset>
// 4/08 - conversions on the value_type used for boost::any
// 5/08 - how to guard a downcasting access, so it is compiled in only if the involved types are convertible
// 7/08 - combining partial specialisation and subclasses
// 10/8 - abusing the STL containers to hold noncopyable values
// 6/09 - investigating how to build a mixin template providing an operator bool()
// 12/9 - tracking down a strange "warning: type qualifiers ignored on function return type"
// 1/10 - can we determine at compile time the presence of a certain function (for duck-typing)?
// 4/10 - pretty printing STL containers with python enabled GDB?
// 1/11 - exploring numeric limits
// 1/11 - integer floor and wrap operation(s)
// 1/11 - how to fetch the path of the own executable -- at least under Linux?
// 10/11 - simple demo using a pointer and a struct
// 11/11 - using the boost random number generator(s)
// 12/11 - how to detect if string conversion is possible?
// 1/12 - is partial application of member functions possible?
/** @file try.cpp
** Research: perform a partial application of a member function.
** The result of this partial application should be a functor expecting the remaining arguments.
** The idea was to use this at various library functions expecting a functor or callback, so to
** improve readability of the client code: clients could then just pass a member pointer, without
** the need to use any tr1::bind expression.
**
** \par Costs in code size
** While this turned out to be possible, even without much work, just based on the existing
** templates for partial functor application (function-closure.hpp), the resulting code size
** is rather sobering. Especially in debug mode, quite some overhead is created, which makes
** usage of this convenience feature in general purpose library code rather questionable.
** When compiling with -O3 though, most of the overhead will be removed
**
** The following numbers could be observed:
** \code
** debug / stripped // debug-O3 / stripped
** just using a member pointer: 39013 / 7048 42061 / 7056
** using tr1::bind and function: 90375 / 15416 65415 / 9376
** partial apply, passing functor: 158727 / 23576 97479 / 11296
** partial apply with mem pointer: 119495 / 17816 78031 / 9440
** \endcode
*/
#include "lib/meta/tuple.hpp"
#include "lib/meta/function-closure.hpp"
//#include <tr1/functional>
#include <iostream>
using lib::meta::Types;
using lib::meta::Tuple;
//using std::tr1::placeholders::_1;
//using std::tr1::placeholders::_2;
using std::tr1::function;
using std::tr1::bind;
using std::string;
using std::cout;
using std::endl;
namespace lib {
namespace meta{
namespace func{
template<typename SIG, uint num>
struct _PupS
{
typedef typename _Fun<SIG>::Ret Ret;
typedef typename _Fun<SIG>::Args::List Args;
typedef typename Splice<Args,NullType,num>::Front ArgsFront;
typedef typename Splice<Args,NullType,num>::Back ArgsBack;
typedef typename Types<ArgsFront>::Seq ArgsToClose;
typedef typename Types<ArgsBack>::Seq ArgsRemaining;
typedef typename _Sig<Ret,ArgsRemaining>::Type ReducedSignature;
typedef function<ReducedSignature> Function;
};
template<typename SIG, typename A1>
inline
typename _PupS<SIG,1>::Function
papply (SIG f, A1 a1)
{
typedef typename _PupS<SIG,1>::ArgsToClose ArgsToClose;
typedef Tuple<ArgsToClose> ArgTuple;
ArgTuple val(a1);
return PApply<SIG,ArgsToClose>::bindFront (f, val);
}
template<typename SIG, typename A1, typename A2>
inline
typename _PupS<SIG,2>::Function
papply (SIG f, A1 a1, A2 a2)
{
typedef typename _PupS<SIG,2>::ArgPrefix ArgsToClose;
typedef Tuple<ArgsToClose> ArgTuple;
ArgTuple val(a1,a2);
return PApply<SIG,ArgsToClose>::bindFront (f, val);
}
}}} // namespace lib::meta::func
class Something
{
int i_;
void
privateFun(char a)
{
char aa(a + i_);
cout << "Char-->" << aa <<endl;
}
public:
Something(int ii=0)
: i_(ii)
{ }
typedef function<void(char)> FunP;
FunP
getBinding()
{
// function<void(Something*,char)> memf = bind (&Something::privateFun, _1, _2);
// return lib::meta::func::papply (memf, this);
return lib::meta::func::papply (&Something::privateFun, this);
}
// typedef void (Something::*FunP) (char);
//
// FunP
// getBinding()
// {
// return &Something::privateFun;
// }
};
int
main (int, char**)
{
Something some(23);
Something::FunP fup = some.getBinding();
fup ('a');
cout << "\n.gulp.\n";
return 0;
}

48
src/SConscript Normal file
View file

@ -0,0 +1,48 @@
# -*- python -*-
##
## SConscript - SCons buildscript for the Lumiera Application.
## Definitions how to build the main tree
##
from Buildhelper import srcSubtree
from Buildhelper import scanSubtree
Import('env icons config')
# define the source file/dirs comprising each artifact to be built.
lLib = env.SharedLibrary('lumiera', srcSubtree('lib'), install=True)
lApp = env.SharedLibrary('lumieracommon', srcSubtree('common'), install=True, LIBS=lLib)
lBack = env.SharedLibrary('lumierabackend', srcSubtree('backend'),install=True)
lProc = env.SharedLibrary('lumieraproc', srcSubtree('proc'), install=True)
core = lLib+lApp+lBack+lProc
core_lib = core
support_lib = lLib
lumiera = ( env.Program('lumiera', ['lumiera/main.cpp'] + core, install=True)
+ config
)
# Install the lumiera application:
# symlink the executable into the bin dir
env.SymLink('#$DESTDIR/bin/lumiera',env.path.installExe+'lumiera','../lib/lumiera/lumiera')
# building Lumiera Plugins
plugins = [] # currently none
# the Lumiera GTK GUI
envGtk = env.Clone()
envGtk.mergeConf(['gtkmm-2.4','gthread-2.0','cairomm-1.0','gdl','xv','xext','sm'])
envGtk.Append(LIBS=core)
guimodule = envGtk.LumieraPlugin('gtk_gui', srcSubtree('gui'), install=True)
gui = ( guimodule
+ icons
+ [env.GuiResource(f) for f in env.Glob('gui/*.rc')]
)
Export('lumiera core core_lib support_lib plugins gui')

View file

@ -31,7 +31,7 @@ liblumierabackend_la_SOURCES = \
$(liblumierabackend_la_srcdir)/filehandle.c \
$(liblumierabackend_la_srcdir)/filehandlecache.c \
$(liblumierabackend_la_srcdir)/fileheader.c \
$(liblumierabackend_la_srcdir)/mediaaccessfacade.cpp \
$(liblumierabackend_la_srcdir)/media-access-facade.cpp \
$(liblumierabackend_la_srcdir)/mmap.c \
$(liblumierabackend_la_srcdir)/mmapcache.c \
$(liblumierabackend_la_srcdir)/mmapings.c \

View file

@ -102,7 +102,8 @@ lumiera_backend_init (void)
lumiera_backend_pagesize = sysconf(_SC_PAGESIZE);
TODO ("add config options to override following defaults");
/////////////////////////////////////////////////////////////////////TICKET #838 add config options to override following defaults"
const char* filehandles = lumiera_tmpbuf_snprintf (SIZE_MAX,
"backend.file.max_handles = %d",
@ -168,7 +169,7 @@ lumiera_backend_mpool_purge (enum lumiera_resource_try itr, void* data, void* co
(void) context;
(void) data;
(void) itr;
TODO("mpool_purge ((MPool) data)");
///////////////////////////////////////////////////////////TICKET #837 actually implement mpool purging
return LUMIERA_RESOURCE_NONE;
}

View file

@ -1,8 +1,8 @@
/*
BuffHandle - Buffer handling support for the render engine
SchedulerFrontend - access point to the scheduler within the renderengine
Copyright (C) Lumiera.org
2008, Hermann Vosseler <Ichthyostega@web.de>
2011, Hermann Vosseler <Ichthyostega@web.de>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
@ -21,12 +21,14 @@
* *****************************************************/
#include "proc/engine/buffhandle.hpp"
#include "backend/engine/scheduler-frontend.hpp"
namespace backend{
namespace engine {
/** */
} // namespace engine
/** */
}} // namespace backend::engine

View file

@ -0,0 +1,55 @@
/*
SCHEDULER-FRONTEND.hpp - access point to the scheduler within the renderengine
Copyright (C) Lumiera.org
2011, Hermann Vosseler <Ichthyostega@web.de>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef BACKEND_ENGINE_SCHEDULER_FRONTEND_H
#define BACKEND_ENGINE_SCHEDULER_FRONTEND_H
//using std::list;
namespace backend{
namespace engine {
/**
* @todo this is planned to become the frontend
* to the render node network, which can be considered
* at the lower end of the middle layer; the actual
* render operations are mostly implemented by the backend
* ////////TODO WIP as of 12/2010
*/
class SchedulerFrontend
{
public:
///// TODO: find out about the public operations
// note: the play controller lives in the proc-layer,
// but is a subsystem separate of the sesison.
private:
};
}} // namespace backend::engine
#endif

View file

@ -83,8 +83,8 @@ namespace backend {
/**
* Descriptor holding the global informations,
* needed for further handling this media within Lumiera.
* Descriptor holding the global information record
* required for further handling this kind of media within Lumiera.
*/
struct MediaDesc
{

View file

@ -188,7 +188,7 @@ namespace backend {
/** @note by design there is no possibility to find out
* just based on the thread handle, if the thread is alive.
* just based on the thread handle if some thread is alive.
* We define our own accounting here based on the internals
* of the thread wrapper. This will break down, if you mix
* uses of the C++ wrapper with the raw C functions. */
@ -200,7 +200,7 @@ namespace backend {
/** Synchronisation barrier. In the function executing in this thread
* needs to be a corresponding Thread::sync() call. Blocking until
* needs to be a corresponding Thread::syncPoint() call. Blocking until
* both the caller and the thread have reached the barrier.
*/
void

View file

@ -76,8 +76,9 @@ lumiera_threadpool_destroy(void)
{
LUMIERA_CONDITION_SECTION (cond_sync, &threadpool.pool[i].sync)
{
TODO ("check threads deadlines, kill them when they are stalled");
TODO ("for threads without deadline use a timeout from config system, 500ms or so by default");
//////////////////////////////////////////TICKET #843 check threads deadlines, kill them when they are stalled"
//////////////////////////////////////////TICKET #843 for threads without deadline use a timeout from config system, 500ms or so by default
LUMIERA_CONDITION_WAIT(llist_is_empty (&threadpool.pool[i].working_list));
}
}
@ -136,7 +137,9 @@ lumiera_threadpool_acquire_thread (enum lumiera_thread_class kind,
llist_insert_head (&threadpool.pool[kind].working_list, &ret->node);
ENSURE (ret, "did not create a valid thread");
TODO ("no error handing, let the resourcecollector do it, no need when returning the thread");
//////////////////////////////////////////////////////////////////////TICKET #844 no error must be pending here
//////////////////////////////////////////////////////////////////////TICKET #844 let the resourcecollector do it, no need when returning the thread
LUMIERA_CONDITION_WAIT (!llist_is_empty (&threadpool.pool[kind].idle_list));
}
// use an existing thread, pick the first one

View file

@ -37,7 +37,7 @@
#include <errno.h>
/**
* @file
* @file threads.c
*
*/
@ -93,17 +93,17 @@ thread_loop (void* thread)
do {
lumiera_threadpool_release_thread(t);
LUMIERA_CONDITION_WAIT (t->state != LUMIERA_THREADSTATE_IDLE);
INFO (threads, "Thread awaken with state %s", lumiera_threadstate_names[t->state]);
TRACE (threads, "Thread awaken with state %s", lumiera_threadstate_names[t->state]);
// NULL function means: no work to do
INFO (threads, "function %p", t->function);
TRACE (threads, "function %p", t->function);
if (t->function)
t->function (t->arguments);
TRACE (threads, "function done");
if (t->kind & LUMIERA_THREAD_JOINABLE)
{
INFO (threads, "Thread zombified");
TRACE (threads, "Thread zombified");
/* move error state to data the other thread will it pick up from there */
t->arguments = (void*)lumiera_error ();
t->state = LUMIERA_THREADSTATE_ZOMBIE;
@ -111,16 +111,16 @@ thread_loop (void* thread)
LUMIERA_CONDITION_SIGNAL;
LUMIERA_CONDITION_WAIT (t->state == LUMIERA_THREADSTATE_JOINED);
INFO (threads, "Thread joined");
TRACE (threads, "Thread joined");
}
} while (t->state != LUMIERA_THREADSTATE_SHUTDOWN);
// SHUTDOWN state
INFO (threads, "Thread Shutdown");
TRACE (threads, "Thread done.");
}
TODO ("no error must be pending here, else do app shutdown");
//////////////////////////////////////////////////////////////////////TICKET #844 no error must be pending here, else do app shutdown
return 0;
}
@ -322,7 +322,7 @@ lumiera_thread_sync (void)
self->state = LUMIERA_THREADSTATE_SYNCING;
lumiera_condition_signal (&self->signal, &NOBUG_FLAG(threads), NOBUG_CONTEXT);
TODO("error handing, maybe timed mutex (using the threads heartbeat timeout, shortly before timeout)");
//////////////////////////////////////////TICKET #843 error handing, maybe timed mutex (using the threads heartbeat timeout, shortly before timeout)
while (self->state == LUMIERA_THREADSTATE_SYNCING) {
lumiera_condition_wait (&self->signal, &NOBUG_FLAG(threads), self->rh, NOBUG_CONTEXT);

View file

@ -37,7 +37,7 @@
/**
* @file
* @file threads.h
*
*/

View file

@ -23,7 +23,7 @@
#include "lib/safeclib.h"
#include "lib/tmpbuf.h"
#include "common/config_lookup.h"
#include "common/config-lookup.h"
#include "common/config.h"
/* we only use one fatal error for now, when allocation in the config system fail, something else is pretty wrong */
@ -76,7 +76,8 @@ lumiera_config_lookup_insert (LumieraConfigLookup self, LumieraConfigitem item)
REQUIRE (item->key);
REQUIRE (item->key_size);
FIXME ("implement section prefix/suffix for the key");
////////////////////////////////////////TICKET #839 implement section prefix/suffix for the key"
const char* key = lumiera_tmpbuf_strcat3 (NULL, 0, item->key, item->key_size, NULL, 0);
LumieraConfigLookupentry entry = (LumieraConfigLookupentry)psplay_find (&self->tree, key, 100);
@ -101,7 +102,9 @@ lumiera_config_lookup_insert_default (LumieraConfigLookup self, LumieraConfigite
LumieraConfigLookupentry entry = (LumieraConfigLookupentry)psplay_find (&self->tree, key, 100);
if (!entry)
entry = (LumieraConfigLookupentry)psplay_insert (&self->tree, &lumiera_config_lookupentry_new (key)->node, 100);
TODO ("else check that no 'default' item already exists, that is, the tail element's parent points to the 'defaults' in config");
////////////////////////////////////////TICKET #839 check that no 'default' item already exists when inserting a default
////////////////////////////////////////TICKET #839 ...that is, the tail element's parent points to the 'defaults' in config
llist_insert_tail (&entry->configitems, &item->lookup);
return entry;

View file

@ -1,5 +1,5 @@
/*
config_lookup.h - Lookup functions for the config subsystem
CONFIG-LOOKUP.h - Lookup functions for the config subsystem
Copyright (C) Lumiera.org
2008, Christian Thaeter <ct@pipapo.org>

View file

@ -51,7 +51,7 @@ LUMIERA_ERROR_DECLARE (CONFIG_NO_ENTRY);
//TODO: Lumiera header includes//
#include "common/config_lookup.h"
#include "common/config-lookup.h"
#include "common/configitem.h"
//TODO: System includes//

View file

@ -43,7 +43,7 @@ lumiera_configentry_new (LumieraConfigitem tmp)
LumieraConfigentry self = lumiera_malloc (sizeof (*self));
lumiera_configitem_move ((LumieraConfigitem)self, tmp);
TODO ("initialize other stuff here (lookup, parent, ...)");
//////////////////////////////////////////////////////////////////TICKET #839 initialise other stuff here (lookup, parent, ...)
return (LumieraConfigitem)self;
}
@ -52,7 +52,7 @@ lumiera_configentry_new (LumieraConfigitem tmp)
LumieraConfigitem
lumiera_configentry_destroy (LumieraConfigitem self)
{
TODO ("cleanup other stuff here (lookup, parent, ...)");
//////////////////////////////////////////////////////////////////TICKET #839 cleanup other stuff here (lookup, parent, ...)
return self;
}

View file

@ -20,6 +20,13 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/** @file configitem.c
** create a configitem out of a single line.
**
*/
#include "include/logging.h"
#include "lib/llist.h"
#include "lib/safeclib.h"
@ -43,12 +50,6 @@ static LumieraConfigitem parse_configentry (LumieraConfigitem self, char* itr);
#include <stdint.h>
/**
* @file
* create a configitem out of a single line.
*
*/
LumieraConfigitem
lumiera_configitem_init (LumieraConfigitem self)
@ -176,11 +177,11 @@ lumiera_configitem_parse (LumieraConfigitem self, const char* line)
lumiera_free (self->line);
self->line = lumiera_strndup (line, SIZE_MAX);
FIXME ("MOCKUP START");
/////////////////////////TODO do a real startup here
char* itr = self->line;
/* skip leading whitespaces */
/* skip leading whitespace */
while (*itr && isspace (*itr))
itr++;
@ -215,7 +216,7 @@ parse_directive (LumieraConfigitem self, char* itr)
/* itr points now to @ */
self->key = itr;
/* check whether there are illegal whitespaces after @ */
/* check whether there are illegal whitespace after @ */
itr++;
if (*itr && !isspace(*itr))
{
@ -224,7 +225,7 @@ parse_directive (LumieraConfigitem self, char* itr)
itr += self->key_size;
/* we need a key with a length greather than zero and */
/* we need a key with a length greater than zero and */
/* either end of line or whitespace after key */
if ( self->key_size && ( !*itr || (*itr && isspace(*itr)) ))

View file

@ -33,7 +33,7 @@ typedef lumiera_configitem* LumieraConfigitem;
struct lumiera_configitem_vtable;
//TODO: Lumiera header includes//
#include "common/config_lookup.h"
#include "common/config-lookup.h"
//TODO: System includes//

View file

@ -68,7 +68,7 @@
namespace lumiera { ///////TODO: shouldn't that be namespace lib? or proc?
using std::string;
using lumiera::P;
using lib::P;
@ -185,7 +185,7 @@ namespace lumiera { ///////TODO: shouldn't that be namespace lib? or proc?
*/
template<typename TYPES>
class ConfigRules
: public typelist::InstantiateForEach<TYPES, QueryHandler>
: public lib::meta::InstantiateForEach<TYPES, QueryHandler>
{
protected:
ConfigRules () {}
@ -212,13 +212,13 @@ namespace lumiera { ///////TODO: shouldn't that be namespace lib? or proc?
* the list of all concrete types participating in the
* rule based config query system
*/
typedef lumiera::typelist::Types < mobject::session::Track
, asset::Pipe
, const asset::ProcPatt
, asset::Timeline
, asset::Sequence
> ::List
InterfaceTypes;
typedef lib::meta::Types < proc::mobject::session::Track
, proc::asset::Pipe
, const proc::asset::ProcPatt
, proc::asset::Timeline
, proc::asset::Sequence
> ::List
InterfaceTypes;
/**
* user-visible Interface to the ConfigRules subsystem.

View file

@ -156,7 +156,7 @@ namespace gui {
if (facade)
{
WARN (guifacade, "GUI subsystem terminates, but GuiFacade isn't properly closed. "
"Closing it forcedly; this indicates broken startup logic and should be fixed.");
"Closing it forcedly; this indicates broken startup logic and should be fixed.");
try { facade.reset (0); }
catch(...) { WARN_IF (lumiera_error_peek(), guifacade, "Ignoring error: %s", lumiera_error()); }
lumiera_error(); // clear any remaining error state...

View file

@ -184,7 +184,7 @@ namespace lumiera {
public:
/** Set up an InstanceHandle representing a plugin.
* Should be placed at the client side.
* Should be placed at the client side.
* @param iName unmangled name of the interface
* @param version major version
* @param minminor minimum acceptable minor version number
@ -203,7 +203,7 @@ namespace lumiera {
* registration and deregistration of interface(s).
* Should be placed at the service providing side.
* @param a (single) interface descriptor, which can be created with
* LUMIERA_INTERFACE_INSTANCE and referred to by LUMIERA_INTERFACE_REF
* LUMIERA_INTERFACE_INSTANCE and referred to by LUMIERA_INTERFACE_REF
*/
InstanceHandle (LumieraInterface descriptor)
: desc_(descriptor)
@ -222,9 +222,9 @@ namespace lumiera {
/** act as smart pointer providing access through the facade.
/** act as smart pointer providing access through the facade.
* @note we don't provide operator* */
FA * operator-> () const { return &(facadeLink_(*this)); }
FA * operator-> () const { return &(facadeLink_(*this)); }
/** directly access the instance via the CL interface */
I& get () const { ENSURE(instance_); return *instance_; }

View file

@ -0,0 +1,123 @@
/*
INTERFACE-FACADE-LINK - a switchable link from interface to service implementation
Copyright (C) Lumiera.org
2011, Hermann Vosseler <Ichthyostega@web.de>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/** @file interface-facade-link.hpp
** Opening, accessing and closing the service access through a facade interface.
** Client code is assumed to access an application level service through an facade
** interface, while the actual implementation object remains an opaque internal detail.
** Moreover, services may come up and shut down, so the implementation might change
** during the Lifecycle. The facility defined here in this header provides a basic
** implementation for this access mechanism, but without any adaptation, binding
** or plugin access layer. It works only under the assumption that both the
** interface and the actual service implementation coexist in the same
** executable and are written in C++, so any invocation of an
** interface method boils down to a language-level call.
**
** Usually, client code doesn't need to include this header. Clients are assumed
** to use the facade interface of the service in question. This facade interface
** contains a static member of type \c lumiera::facade::Accessor<I> (where I is
** the type of the facade interface). The Accessor baseclass is defined in
** interfaceproxy.hpp and typically included through the facade header.
**
** @note there is a way more elaborate implementation of the same mechanism
** for use with the Lumiera Interface/Plugin system.
**
** @see interfaceproxy.hpp description of the more general use case
** @see PlayService example for the simple use case
*/
#ifndef LUMIERA_FACADE_INTERFACE_FACADE_LINK_H
#define LUMIERA_FACADE_INTERFACE_FACADE_LINK_H
#include "lib/error.hpp"
#include "lib/test/test-helper.hpp"
#include "include/interfaceproxy.hpp"
#include "lib/symbol.hpp"
#include <boost/noncopyable.hpp>
namespace lumiera {
namespace facade {
using lib::Literal;
/************************************************************************
* simple access-frontend to the implementation of a service (C++ only).
* Usually, an instance of Accessor is placed as static member right into
* the facade interface used to access the service. This implementation
* of the access mechanism handles the simple case that both the facade
* and the service implementation are written in C++ and calls happen
* within the main executable as direct language calls, without an
* binding layer and without involving the Interface/Plugin system.
*
* Typically, the InterfaceFacadeLink becomes a member of the service
* implementation class and is directly tied into the constructor of
* the latter. Being a subclass of lumiera::facade::Accessor, it is
* allowed to "open" the facade access just by setting the static
* protected pointer Accessor::implProxy_
*/
template<class FA>
class InterfaceFacadeLink
: protected Accessor<FA>
, boost::noncopyable
{
Literal displayName_;
void
__checkLifecycle ()
{
if (Accessor<FA>::implProxy_)
throw error::State("Attempt to open an already opened Facade interface."
, error::LUMIERA_ERROR_LIFECYCLE);
}
public:
InterfaceFacadeLink(FA& serviceImpl, Literal interfaceName_for_Log=0)
: displayName_(lib::test::showType<FA>(interfaceName_for_Log))
{
__checkLifecycle();
Accessor<FA>::implProxy_ = &serviceImpl;
INFO (interface, "interface %s opened", displayName_.c());
}
~InterfaceFacadeLink()
{
INFO (interface, "closing interface %s...", displayName_.c());
Accessor<FA>::implProxy_ = 0;
}
};
/** storage for the static access pointer */
template<class FA>
FA* Accessor<FA>::implProxy_;
}} // namespace lumiera::facade
#endif

View file

@ -28,73 +28,87 @@
using util::cStr;
namespace lumiera {
namespace facade {
LUMIERA_ERROR_DEFINE (FACADE_LIFECYCLE, "facade interface currently not accessible");
template<class IHA>
class Holder;
template<class FA, class I>
class Holder<InstanceHandle<I,FA> >
: Accessor<FA>,
protected FA
{
protected:
typedef InstanceHandle<I,FA> IHandle;
typedef Holder<IHandle> THolder;
typedef Proxy<IHandle> TProxy;
typedef Accessor<FA> Access;
I& _i_;
Holder (IHandle const& iha)
: _i_(iha.get())
{ }
public:
static TProxy& open(IHandle const& iha)
{
static char buff[sizeof(TProxy)];
TProxy* p = new(buff) TProxy(iha);
Access::implProxy_ = p;
return *p;
}
static void close()
{
if (!Access::implProxy_) return;
TProxy* p = static_cast<TProxy*> (Access::implProxy_);
Access::implProxy_ = 0;
p->~TProxy();
}
};
template<class FA>
FA* Accessor<FA>::implProxy_;
template<class IHA>
void
openProxy (IHA const& iha)
{
Proxy<IHA>::open(iha);
}
template<class IHA>
void
closeProxy ()
{
Proxy<IHA>::close();
}
} // namespace facade
namespace lumiera{
namespace facade {
} // namespace lumiera
LUMIERA_ERROR_DEFINE (FACADE_LIFECYCLE, "facade is closed; service currently not accessible");
/**
* Implementation Base
* for building Facade Proxy implementations.
* Typically the purpose of such a proxy is to route
* any calls through the C-Bindings of the Lumiera Interface system.
* The actual storage for the concrete proxy object is embedded,
* inline within the #open() function. For access by the clients,
* a frontend-object of type \c Accessor<FA> may be placed into
* the facade interface; this accessor-frontend is basically
* a concealed static pointer to the proxy, and will be set,
* when the interface is opened. This opening and closing
* of the interface itself is controlled by the
* InstanceHandle, which in turn is typically
* created and managed within the context
* of the service implementation.
*/
template<class IHA>
class Holder;
template<class FA, class I>
class Holder<InstanceHandle<I,FA> >
: Accessor<FA>
, protected FA
{
protected:
typedef InstanceHandle<I,FA> IHandle;
typedef Holder<IHandle> THolder;
typedef Proxy<IHandle> TProxy;
typedef Accessor<FA> Access;
I& _i_;
Holder (IHandle const& iha)
: _i_(iha.get())
{ }
public:
static TProxy& open(IHandle const& iha)
{
static char buff[sizeof(TProxy)];
TProxy* p = new(buff) TProxy(iha);
Access::implProxy_ = p;
return *p;
}
static void close()
{
if (!Access::implProxy_) return;
TProxy* p = static_cast<TProxy*> (Access::implProxy_);
Access::implProxy_ = 0;
p->~TProxy();
}
};
template<class FA>
FA* Accessor<FA>::implProxy_;
template<class IHA>
void
openProxy (IHA const& iha)
{
Proxy<IHA>::open(iha);
}
template<class IHA>
void
closeProxy ()
{
Proxy<IHA>::close();
}
}} // namespace lumiera::facade

Some files were not shown because too many files have changed in this diff Show more