Refactoring IV: move the (still problematic) ClassLock out of the Sync compound

(no semantic change, but better notation)
This commit is contained in:
Fischlurch 2008-12-26 03:47:12 +01:00
parent 54e88e6914
commit eaedab90ea
7 changed files with 47 additions and 40 deletions

View file

@ -81,7 +81,7 @@ namespace lib {
void
AllocationCluster::MemoryManager::reset (TypeInfo info)
{
Sync<>::ClassLock<MemoryManager> guard();
ClassLock<MemoryManager> guard();
if (0 < mem_.size()) purge();
type_ = info;
@ -96,7 +96,7 @@ namespace lib {
void
AllocationCluster::MemoryManager::purge()
{
Sync<>::ClassLock<MemoryManager> guard();
ClassLock<MemoryManager> guard();
REQUIRE (type_.killIt, "we need a deleter function");
REQUIRE (0 < type_.allocSize, "allocation size unknown");
@ -120,7 +120,7 @@ namespace lib {
inline void*
AllocationCluster::MemoryManager::allocate()
{
Sync<>::ClassLock<MemoryManager> guard();
ClassLock<MemoryManager> guard();
REQUIRE (0 < type_.allocSize);
REQUIRE (top_ <= mem_.size());
@ -140,7 +140,7 @@ namespace lib {
inline void
AllocationCluster::MemoryManager::commit (void* pendingAlloc)
{
Sync<>::ClassLock<MemoryManager> guard();
ClassLock<MemoryManager> guard();
REQUIRE (pendingAlloc);
ASSERT (top_ < mem_.size());
@ -175,7 +175,7 @@ namespace lib {
{
try
{
Sync<>::ClassLock<AllocationCluster> guard();
ClassLock<AllocationCluster> guard();
TRACE (memory, "shutting down AllocationCluster");
for (size_t i = typeHandlers_.size(); 0 < i; --i)
@ -214,7 +214,7 @@ namespace lib {
ASSERT (0 < slot);
{
Sync<>::ClassLock<AllocationCluster> guard(); /////TODO: decide tradeoff: lock just the instance, or lock the AllocationCluster class?
ClassLock<AllocationCluster> guard(); /////TODO: decide tradeoff: lock just the instance, or lock the AllocationCluster class?
if (slot > typeHandlers_.size())
typeHandlers_.resize(slot);

View file

@ -222,7 +222,7 @@ namespace lib {
static TypeInfo
setup()
{
Sync<>::ClassLock<AllocationCluster> guard();
ClassLock<AllocationCluster> guard();
if (!id_)
id_= ++maxTypeIDs;

View file

@ -221,7 +221,7 @@ namespace lib {
return created_? &_ThisType::created_ : 0;
}
bool operator! () const { return !created_; }
bool operator! () const { return !created_; }
friend void

View file

@ -40,10 +40,10 @@ This code is heavily inspired by
#include <vector>
namespace lumiera
{
namespace singleton
{
namespace lumiera {
namespace singleton {
/* === several Policies usable in conjunction with lumiera::Singleton === */
/**
@ -139,7 +139,7 @@ namespace lumiera
struct Multithreaded
{
typedef volatile S VolatileType;
typedef lib::Sync<>::ClassLock<S> Lock;
typedef lib::ClassLock<S> Lock;
};

View file

@ -284,7 +284,7 @@ namespace lib {
* @todo actually implement this facility using the Lumiera backend.
*/
template<class CONF = NonrecursiveLock_NoWait>
struct Sync
class Sync
{
typedef sync::Monitor<CONF> Monitor;
Monitor objectMonitor_;
@ -296,22 +296,8 @@ namespace lib {
return forThis->objectMonitor_;
}
template<class X>
static Monitor&
getMonitor()
{
//TODO: a rather obscure race condition is hidden here:
//TODO: depending on the build order, the dtor of this static variable may be called, while another thread is still holding an ClassLock.
//TODO: An possible solution would be to use an shared_ptr to the Monitor in case of a ClassLock and to protect access with another Mutex.
//TODO. But I am really questioning if we can't ignore this case and state: "don't hold a ClassLock when your code maybe still running in shutdown phase!"
//TODO: probably best Idea is to detect this situation in DEBUG or ALPHA mode
static scoped_ptr<Monitor> classMonitor_ (0);
if (!classMonitor_) classMonitor_.reset (new Monitor ());
return *classMonitor_;
}
public:
class Lock
{
Monitor& mon_;
@ -332,14 +318,35 @@ namespace lib {
};
template<class X>
struct ClassLock : Lock
{
ClassLock() : Lock (getMonitor<X>()) {}
};
};
template<class X, class CONF = NonrecursiveLock_NoWait>
class ClassLock
: public Sync<CONF>::Lock
{
typedef typename Sync<CONF>::Lock Lock;
typedef typename Sync<CONF>::Monitor Monitor;
static Monitor&
getMonitor()
{
//TODO: a rather obscure race condition is hidden here:
//TODO: depending on the build order, the dtor of this static variable may be called, while another thread is still holding an ClassLock.
//TODO: An possible solution would be to use an shared_ptr to the Monitor in case of a ClassLock and to protect access with another Mutex.
//TODO. But I am really questioning if we can't ignore this case and state: "don't hold a ClassLock when your code maybe still running in shutdown phase!"
//TODO: probably best Idea is to detect this situation in DEBUG or ALPHA mode
static scoped_ptr<Monitor> classMonitor_ (0);
if (!classMonitor_) classMonitor_.reset (new Monitor ());
return *classMonitor_;
}
public:
ClassLock() : Lock (getMonitor()) {}
};
} // namespace lumiera

View file

@ -36,7 +36,7 @@
namespace lumiera {
namespace visitor {
using lib::Sync;
using lib::ClassLock;
template<class TOOL> class Tag;
@ -62,7 +62,7 @@ namespace lumiera {
static void
generateID (size_t& id)
{
Sync<>::ClassLock<Tag> guard();
ClassLock<Tag> guard();
if (!id)
id = ++lastRegisteredID;
}
@ -138,7 +138,7 @@ namespace lumiera {
void
accomodate (size_t index)
{
Sync<>::ClassLock<Dispatcher> guard();
ClassLock<Dispatcher> guard();
if (index > table_.size())
table_.resize (index); // performance bottleneck?? TODO: measure the real impact!
}

View file

@ -58,7 +58,7 @@ namespace mobject {
using lumiera::P;
using lumiera::Query;
using lib::Sync;
using lib::ClassLock;
using std::tr1::weak_ptr;
using std::string;
@ -162,7 +162,7 @@ namespace mobject {
static void
createSlot (Table& table)
{
Sync<>::ClassLock<TableEntry> guard();
ClassLock<TableEntry> guard();
if (!index)
index = ++maxSlots;
if (index > table.size())