2024-07-23 03:52:44 +02:00
/*
WEAVING - PATTERN - BUILDER . hpp - build an invocation pattern for media calculations
Copyright ( C ) Lumiera . org
2024 , Hermann Vosseler < Ichthyostega @ web . de >
This program is free software ; you can redistribute it and / or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation ; either version 2 of
the License , or ( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program ; if not , write to the Free Software
Foundation , Inc . , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*/
/** @file weaving-pattern-builder.hpp
Invocation: work out solution for a precisely fitting allocation
Conduct in-depth analysis to handle a secondary, implementation-related
(and frankly quite challenging) concern regarding the placement of node
and port connectivity data in memory. The intention is for the low-level
model to use a custom data structure based on `lib::Several`, allowing for
flexible and compact arrangement of the connectivity descriptors within
tiled memory blocks, which can then later be discarded in bulk, whenever
a segment of the render graph is superseded. Yet since the generated
descriptors are heterogeneous and, due to virtual functions, can not be
trivially copied, the corresponding placement invocations on the
data builder API must not be mixed, but rather given in ordered strikes
and preceded by a dimensioning call to pre-reserve a bulk of storage
However, doing so directly would jeopardise the open and flexible nature
of the node builder API, thereby creating a dangerous coupling between
the implementation levels of the node graph and of prospective library
wrapper plug-ins in charge of controlling details of the graph layout.
The solution devised here entails a functional helper data structure
created temporarily within the builder API stack frames; the detailed
and local type information provided from within the library plug-in
can thereby be embedded into opaque builder functors, allowing to
delay the actual data generation up until the final builder step,
at which point the complete number and size requirements of
connectivity data is known and can be used for dimensioning.
2024-10-20 21:51:34 +02:00
* * Construction kit to establish an invocation scheme for media calculations .
2024-07-23 03:52:44 +02:00
* *
* * @ see turnout . hpp
* * @ see node - builder . hpp
* * @ see NodeLinkage_test
* *
* * @ todo WIP - WIP - WIP as of 7 / 2024 prototyping how to build and invoke render nodes /////////////////////////TICKET #1367
* *
*/
# ifndef STEAM_ENGINE_WEAVING_PATTERN_BUILDER_H
# define STEAM_ENGINE_WEAVING_PATTERN_BUILDER_H
//#include "steam/common.hpp"
//#include "steam/engine/channel-descriptor.hpp"
//#include "vault/gear/job.h"
# include "lib/several-builder.hpp"
# include "steam/engine/turnout.hpp"
2024-08-03 03:21:59 +02:00
# include "steam/engine/engine-ctx.hpp"
2024-07-24 20:29:37 +02:00
# include "steam/engine/buffer-provider.hpp"
2024-10-14 04:07:47 +02:00
# include "steam/engine/buffhandle-attach.hpp" /////////////////OOO why do we need to include this? we need the accessAs<TY>() template function
# include "lib/test/test-helper.hpp"
2024-07-23 03:52:44 +02:00
//#include "lib/util-foreach.hpp"
//#include "lib/iter-adapter.hpp"
//#include "lib/meta/function.hpp"
//#include "lib/itertools.hpp"
Invocation: work out solution for a precisely fitting allocation
Conduct in-depth analysis to handle a secondary, implementation-related
(and frankly quite challenging) concern regarding the placement of node
and port connectivity data in memory. The intention is for the low-level
model to use a custom data structure based on `lib::Several`, allowing for
flexible and compact arrangement of the connectivity descriptors within
tiled memory blocks, which can then later be discarded in bulk, whenever
a segment of the render graph is superseded. Yet since the generated
descriptors are heterogeneous and, due to virtual functions, can not be
trivially copied, the corresponding placement invocations on the
data builder API must not be mixed, but rather given in ordered strikes
and preceded by a dimensioning call to pre-reserve a bulk of storage
However, doing so directly would jeopardise the open and flexible nature
of the node builder API, thereby creating a dangerous coupling between
the implementation levels of the node graph and of prospective library
wrapper plug-ins in charge of controlling details of the graph layout.
The solution devised here entails a functional helper data structure
created temporarily within the builder API stack frames; the detailed
and local type information provided from within the library plug-in
can thereby be embedded into opaque builder functors, allowing to
delay the actual data generation up until the final builder step,
at which point the complete number and size requirements of
connectivity data is known and can be used for dimensioning.
2024-10-20 21:51:34 +02:00
# include "lib/util.hpp"
2024-07-23 03:52:44 +02:00
//#include <utility>
2024-07-24 20:29:37 +02:00
# include <functional>
2024-07-23 03:52:44 +02:00
//#include <array>
2024-07-24 20:29:37 +02:00
# include <vector>
2024-07-23 03:52:44 +02:00
namespace steam {
namespace engine {
using std : : forward ;
using lib : : Several ;
2024-10-12 04:17:39 +02:00
using lib : : Depend ;
Invocation: work out solution for a precisely fitting allocation
Conduct in-depth analysis to handle a secondary, implementation-related
(and frankly quite challenging) concern regarding the placement of node
and port connectivity data in memory. The intention is for the low-level
model to use a custom data structure based on `lib::Several`, allowing for
flexible and compact arrangement of the connectivity descriptors within
tiled memory blocks, which can then later be discarded in bulk, whenever
a segment of the render graph is superseded. Yet since the generated
descriptors are heterogeneous and, due to virtual functions, can not be
trivially copied, the corresponding placement invocations on the
data builder API must not be mixed, but rather given in ordered strikes
and preceded by a dimensioning call to pre-reserve a bulk of storage
However, doing so directly would jeopardise the open and flexible nature
of the node builder API, thereby creating a dangerous coupling between
the implementation levels of the node graph and of prospective library
wrapper plug-ins in charge of controlling details of the graph layout.
The solution devised here entails a functional helper data structure
created temporarily within the builder API stack frames; the detailed
and local type information provided from within the library plug-in
can thereby be embedded into opaque builder functors, allowing to
delay the actual data generation up until the final builder step,
at which point the complete number and size requirements of
connectivity data is known and can be used for dimensioning.
2024-10-20 21:51:34 +02:00
using util : : max ;
2024-07-23 03:52:44 +02:00
2024-10-11 03:33:05 +02:00
namespace { // Introspection helpers....
using lib : : meta : : _Fun ;
using lib : : meta : : is_BinaryFun ;
using std : : remove_reference_t ;
/** Helper to pick up the parameter dimensions from the processing function
* @ remark this is the rather simple yet common case that media processing
* is done by a function , which takes an array of input and output
* buffer pointers with a common type ; this simple case is used
* 7 / 2024 for prototyping and validate the design .
* @ tparam FUN a _function - like_ object , expected to accept two arguments ,
* which both are arrays of buffer pointers ( input , output ) .
*/
template < class FUN >
struct _ProcFun
{
static_assert ( _Fun < FUN > ( ) , " something funktion-like required " ) ;
static_assert ( is_BinaryFun < FUN > ( ) , " function with two arguments expected " ) ;
using ArgI = remove_reference_t < typename _Fun < FUN > : : Args : : List : : Head > ;
using ArgO = remove_reference_t < typename _Fun < FUN > : : Args : : List : : Tail : : Head > ;
template < class ARG >
struct MatchBuffArray
{
static_assert ( not sizeof ( ARG ) , " processing function expected to take array-of-buffer-pointers " ) ;
} ;
template < class BUF , size_t N >
struct MatchBuffArray < std : : array < BUF * , N > >
{
using Buff = BUF ;
enum { SIZ = N } ;
} ;
using BuffI = typename MatchBuffArray < ArgI > : : Buff ;
using BuffO = typename MatchBuffArray < ArgO > : : Buff ;
enum { FAN_I = MatchBuffArray < ArgI > : : SIZ
, FAN_O = MatchBuffArray < ArgO > : : SIZ
} ;
} ;
/**
* Pick a suitable size for the FeedManifold to accommodate the given function .
* @ remark only returning one of a small selection of sizes , to avoid
* excessive generation of template instances .
* @ todo 10 / 24 this is a premature safety guard ;
* need to assess if there is actually a problem
* ( chances are that the optimiser absorbs most of the combinatoric complexity ,
* or that , to the contrary , other proliferation mechanisms cause more harm )
*/
template < class FUN >
inline constexpr uint
manifoldSiz ( )
{
using _F = _ProcFun < FUN > ;
2024-10-13 03:49:01 +02:00
auto constexpr bound = std : : max ( _F : : FAN_I , _F : : FAN_O ) ;
2024-10-11 03:33:05 +02:00
static_assert ( bound < = 10 ,
" Limitation of template instances exceeded " ) ;
return bound < 3 ? bound
: bound < 6 ? 5
: 10 ;
}
} //(End)Introspection helpers.
/**
* Adapter to handle a simple yet common setup for media processing
* - somehow we can invoke processing as a simple function
* - this function takes two arrays : the input - and output buffers
* @ remark this setup is useful for testing , and as documentation example ;
* actually the FeedManifold is mixed in as baseclass , and the
* buffer pointers are retrieved from the BuffHandles .
* @ tparam MAN a FeedManifold , providing arrays of BuffHandles
* @ tparam FUN the processing function
*/
template < class MAN , class FUN >
struct SimpleFunctionInvocationAdapter
: MAN
{
using BuffI = typename _ProcFun < FUN > : : BuffI ;
using BuffO = typename _ProcFun < FUN > : : BuffO ;
2024-10-14 04:07:47 +02:00
enum { N = MAN : : STORAGE_SIZ
2024-10-11 03:33:05 +02:00
, FAN_I = _ProcFun < FUN > : : FAN_I
, FAN_O = _ProcFun < FUN > : : FAN_O
} ;
static_assert ( FAN_I < = N and FAN_O < = N ) ;
using ArrayI = std : : array < BuffI * , FAN_I > ;
using ArrayO = std : : array < BuffO * , FAN_O > ;
FUN process ;
ArrayI inParam ;
ArrayO outParam ;
template < typename . . . INIT >
SimpleFunctionInvocationAdapter ( INIT & & . . . funSetup )
2024-10-14 04:07:47 +02:00
: process { forward < INIT > ( funSetup ) . . . }
2024-10-11 03:33:05 +02:00
{ }
void
connect ( uint fanIn , uint fanOut )
{
REQUIRE ( fanIn > = FAN_I and fanOut > = FAN_O ) ;
for ( uint i = 0 ; i < FAN_I ; + + i )
inParam [ i ] = & MAN : : inBuff [ i ] . template accessAs < BuffI > ( ) ;
for ( uint i = 0 ; i < FAN_O ; + + i )
outParam [ i ] = & MAN : : outBuff [ i ] . template accessAs < BuffO > ( ) ;
}
void
invoke ( )
{
process ( inParam , outParam ) ;
}
} ;
/**
* Example base configuration for a Weaving - Pattern chain :
* - use a simple processing function
* - pass an input / output buffer array to this function
* - map all » slots « directly without any re - ordering
* - use a sufficiently sized FeedManifold as storage scheme
*/
template < uint N , class FUN >
struct Conf_DirectFunctionInvocation
: util : : MoveOnly
{
using Manifold = FeedManifold < N > ;
using Feed = SimpleFunctionInvocationAdapter < Manifold , FUN > ;
enum { MAX_SIZ = N } ;
std : : function < Feed ( ) > buildFeed ;
2024-10-14 04:07:47 +02:00
// template<typename INIT>
Conf_DirectFunctionInvocation ( FUN fun )
: buildFeed { [ = ] //procFun = forward<INIT> (fun)]
{
// using URGS = decltype(procFun);
// lib::test::TypeDebugger<URGS> murks;
return Feed { fun } ;
} }
{ }
2024-10-11 03:33:05 +02:00
} ;
2024-07-23 03:52:44 +02:00
template < class POL , class I , class E = I >
using DataBuilder = lib : : SeveralBuilder < I , E , POL : : template Policy > ;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////TICKET #1367 : Prototyping: how to assemble a Turnout
Invocation: work out solution for a precisely fitting allocation
Conduct in-depth analysis to handle a secondary, implementation-related
(and frankly quite challenging) concern regarding the placement of node
and port connectivity data in memory. The intention is for the low-level
model to use a custom data structure based on `lib::Several`, allowing for
flexible and compact arrangement of the connectivity descriptors within
tiled memory blocks, which can then later be discarded in bulk, whenever
a segment of the render graph is superseded. Yet since the generated
descriptors are heterogeneous and, due to virtual functions, can not be
trivially copied, the corresponding placement invocations on the
data builder API must not be mixed, but rather given in ordered strikes
and preceded by a dimensioning call to pre-reserve a bulk of storage
However, doing so directly would jeopardise the open and flexible nature
of the node builder API, thereby creating a dangerous coupling between
the implementation levels of the node graph and of prospective library
wrapper plug-ins in charge of controlling details of the graph layout.
The solution devised here entails a functional helper data structure
created temporarily within the builder API stack frames; the detailed
and local type information provided from within the library plug-in
can thereby be embedded into opaque builder functors, allowing to
delay the actual data generation up until the final builder step,
at which point the complete number and size requirements of
connectivity data is known and can be used for dimensioning.
2024-10-20 21:51:34 +02:00
/**
* Recursive functional data structure to collect weaving pattern data
* and finally to emplace a Turnout instance into the data storage
* for each port , as specified by preceding builder - API invocations .
* @ tparam PAR recursive layering for preceding entries
* @ tparam BUILD a builder functor to emplace one Turnout instance ,
* opaquely embedding all specific data typing .
* @ tparam siz storage in bytes to hold data produced by \ a BUILD
*/
template < class PAR , class BUILD , uint siz >
struct PatternData
: PAR
{
BUILD buildEntry ;
template < class DAB >
void
collectEntries ( DAB & dataBuilder , uint cntElm = 0 , uint maxSiz = 0 )
{
PAR : : collectEntries ( dataBuilder , cntElm + 1 , max ( siz , maxSiz ) ) ;
buildEntry ( dataBuilder ) ;
}
2024-10-21 03:40:19 +02:00
PatternData ( PAR & & predecessor , BUILD & & entryBuilder )
: PAR { move ( predecessor ) }
, buildEntry { move ( entryBuilder ) }
{ }
Invocation: work out solution for a precisely fitting allocation
Conduct in-depth analysis to handle a secondary, implementation-related
(and frankly quite challenging) concern regarding the placement of node
and port connectivity data in memory. The intention is for the low-level
model to use a custom data structure based on `lib::Several`, allowing for
flexible and compact arrangement of the connectivity descriptors within
tiled memory blocks, which can then later be discarded in bulk, whenever
a segment of the render graph is superseded. Yet since the generated
descriptors are heterogeneous and, due to virtual functions, can not be
trivially copied, the corresponding placement invocations on the
data builder API must not be mixed, but rather given in ordered strikes
and preceded by a dimensioning call to pre-reserve a bulk of storage
However, doing so directly would jeopardise the open and flexible nature
of the node builder API, thereby creating a dangerous coupling between
the implementation levels of the node graph and of prospective library
wrapper plug-ins in charge of controlling details of the graph layout.
The solution devised here entails a functional helper data structure
created temporarily within the builder API stack frames; the detailed
and local type information provided from within the library plug-in
can thereby be embedded into opaque builder functors, allowing to
delay the actual data generation up until the final builder step,
at which point the complete number and size requirements of
connectivity data is known and can be used for dimensioning.
2024-10-20 21:51:34 +02:00
} ;
/**
* Data recursion end : prime the port data storage
* by reserving appropriate storage to hold all known Turnout elements .
*/
2024-10-21 03:40:19 +02:00
struct PatternDataAnchor
Invocation: work out solution for a precisely fitting allocation
Conduct in-depth analysis to handle a secondary, implementation-related
(and frankly quite challenging) concern regarding the placement of node
and port connectivity data in memory. The intention is for the low-level
model to use a custom data structure based on `lib::Several`, allowing for
flexible and compact arrangement of the connectivity descriptors within
tiled memory blocks, which can then later be discarded in bulk, whenever
a segment of the render graph is superseded. Yet since the generated
descriptors are heterogeneous and, due to virtual functions, can not be
trivially copied, the corresponding placement invocations on the
data builder API must not be mixed, but rather given in ordered strikes
and preceded by a dimensioning call to pre-reserve a bulk of storage
However, doing so directly would jeopardise the open and flexible nature
of the node builder API, thereby creating a dangerous coupling between
the implementation levels of the node graph and of prospective library
wrapper plug-ins in charge of controlling details of the graph layout.
The solution devised here entails a functional helper data structure
created temporarily within the builder API stack frames; the detailed
and local type information provided from within the library plug-in
can thereby be embedded into opaque builder functors, allowing to
delay the actual data generation up until the final builder step,
at which point the complete number and size requirements of
connectivity data is known and can be used for dimensioning.
2024-10-20 21:51:34 +02:00
{
template < class DAB >
void
collectEntries ( DAB & dataBuilder , uint cntElm , uint maxSiz )
{
dataBuilder . reserve ( cntElm , maxSiz ) ;
}
} ;
2024-10-21 03:40:19 +02:00
2024-07-23 03:52:44 +02:00
template < uint N , class FUN >
using SimpleDirectInvoke = SimpleWeavingPattern < Conf_DirectFunctionInvocation < N , FUN > > ;
template < class POL , uint N , class FUN >
2024-10-11 02:45:51 +02:00
struct WeavingBuilder
2024-07-23 03:52:44 +02:00
: util : : MoveOnly
{
DataBuilder < POL , PortRef > leadPort ;
DataBuilder < POL , BuffDescr > outTypes ;
2024-07-24 20:29:37 +02:00
using TypeMarker = std : : function < BuffDescr ( BufferProvider & ) > ;
using ProviderRef = std : : reference_wrapper < BufferProvider > ;
std : : vector < TypeMarker > buffTypes ;
std : : vector < ProviderRef > providers ;
2024-07-30 23:44:55 +02:00
uint resultSlot { 0 } ;
2024-10-12 04:17:39 +02:00
Depend < EngineCtx > ctx ;
2024-07-24 20:29:37 +02:00
2024-10-11 03:33:05 +02:00
FUN fun_ ;
WeavingBuilder ( FUN & & init )
: fun_ { move ( init ) }
{ }
2024-10-11 02:45:51 +02:00
WeavingBuilder
2024-07-23 03:52:44 +02:00
attachToLeadPort ( ProcNode & lead , uint portNr )
{
PortRef portRef ; /////////////////////////////////////OOO TODO need Accessor on ProcNode!!!!!
2024-10-14 04:07:47 +02:00
leadPort . append ( portRef ) ;
2024-07-23 03:52:44 +02:00
ENSURE ( leadPort . size ( ) < N ) ;
return move ( * this ) ;
}
2024-07-24 20:29:37 +02:00
template < class BU >
2024-10-11 02:45:51 +02:00
WeavingBuilder
2024-07-30 23:44:55 +02:00
appendBufferTypes ( uint cnt )
2024-07-23 03:52:44 +02:00
{
2024-07-24 20:29:37 +02:00
while ( cnt - - )
buffTypes . emplace_back ( [ ] ( BufferProvider & provider )
{ return provider . getDescriptor < BU > ( ) ; } ) ;
ENSURE ( buffTypes . size ( ) < N ) ;
2024-07-23 03:52:44 +02:00
return move ( * this ) ;
}
2024-10-14 04:07:47 +02:00
WeavingBuilder
fillRemainingBufferTypes ( )
{
using FunSpec = _ProcFun < FUN > ;
auto constexpr FAN_O = FunSpec : : FAN_O ;
using BuffO = typename FunSpec : : BuffO ;
uint cnt = FAN_O - buffTypes . size ( ) ;
return appendBufferTypes < BuffO > ( cnt ) ;
}
2024-10-11 02:45:51 +02:00
WeavingBuilder
2024-07-30 23:44:55 +02:00
selectResultSlot ( uint idx )
2024-07-24 20:29:37 +02:00
{
2024-07-30 23:44:55 +02:00
this - > resultSlot = idx ;
return move ( * this ) ;
}
2024-07-24 20:29:37 +02:00
2024-07-23 03:52:44 +02:00
auto
build ( )
{
2024-07-24 20:29:37 +02:00
maybeFillDefaultProviders ( buffTypes . size ( ) ) ;
2024-10-14 04:07:47 +02:00
REQUIRE ( providers . size ( ) = = buffTypes . size ( ) ) ;
2024-07-24 20:29:37 +02:00
uint i = 0 ;
2024-07-30 23:44:55 +02:00
for ( auto & typeConstructor : buffTypes )
2024-10-14 04:07:47 +02:00
outTypes . append (
typeConstructor ( providers [ i + + ] ) ) ;
2024-07-24 20:29:37 +02:00
ENSURE ( leadPort . size ( ) < N ) ;
ENSURE ( outTypes . size ( ) < N ) ;
2024-07-23 03:52:44 +02:00
using Product = Turnout < SimpleDirectInvoke < N , FUN > > ;
///////////////////////////////OOO need a way to prepare SeveralBuilder-instances for leadPort and outDescr --> see NodeBuilder
2024-10-11 03:33:05 +02:00
return Product { leadPort . build ( ) , outTypes . build ( ) , move ( fun_ ) } ;
2024-07-23 03:52:44 +02:00
}
2024-07-24 20:29:37 +02:00
private :
void
maybeFillDefaultProviders ( size_t maxSlots )
{
for ( uint i = providers . size ( ) ; i < maxSlots ; + + i )
2024-10-12 04:17:39 +02:00
providers . emplace_back ( ctx ( ) . mem ) ;
2024-07-24 20:29:37 +02:00
}
2024-07-23 03:52:44 +02:00
} ;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////TICKET #1367 : (End)Prototyping: how to assemble a Turnout
} } // namespace steam::engine
# endif /*STEAM_ENGINE_WEAVING_PATTERN_BUILDER_H*/