lumiera_/src/lib/allocation-cluster.cpp

294 lines
8 KiB
C++
Raw Normal View History

/*
2008-10-30 04:34:05 +01:00
AllocationCluster - allocating and owning a pile of objects
2010-12-17 23:28:49 +01:00
Copyright (C) Lumiera.org
2008, Hermann Vosseler <Ichthyostega@web.de>
2010-12-17 23:28:49 +01:00
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
2010-12-17 23:28:49 +01:00
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
2010-12-17 23:28:49 +01:00
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
2010-12-17 23:28:49 +01:00
* *****************************************************/
/** @file allocation-cluster.cpp
** TODO allocation-cluster.cpp
*/
#include "lib/allocation-cluster.hpp"
2008-12-27 00:53:35 +01:00
#include "lib/error.hpp"
#include "lib/util.hpp"
#include "lib/sync.hpp"
using util::isnil;
2008-10-30 04:34:05 +01:00
namespace lib {
/**
* "Low-level" Memory manager for allocating small objects of a fixed size.
* The usage pattern is definite: Objects will be allocated in the course of
* a build process and then live until all Objects will be purged in one sway.
* Allocations will be requested one by one and immediately committed after
* successful ctor call of the object being allocated. Allocations and commits
* can be assumed to come in pairs, thus if an allocation immediately follows
* another one (without commit), the previous allocation can be considered
* a failure and can be dropped silently. After an allocation succeeds
* (i.e. was committed), the MemoryManager is in charge for the lifecycle
* of the object within the allocated space and has to guarantee calling
* it's dtor, either on shutdown or on explicit #purge() -- the type info
* structure handed in on initialisation provides a means for invoking
* the dtor without actually knowing the object's type.
*
* @todo this is a preliminary or pseudo-implementation based on
2008-10-30 04:34:05 +01:00
* a vector of raw pointers, i.e. actually the objects are heap
* allocated. What actually should happen is for the MemoryManager to
* allocate raw memory chunk wise, sub partition it and place the objects
* into this private memory buffer. Further, possibly we could maintain
* a pool of raw memory chunks used by all MemoryManager instances. I am
* skipping those details for now (10/2008) because they should be based
* on real-world measurements, not guessing.
*/
2008-10-20 03:13:02 +02:00
class AllocationCluster::MemoryManager
: public Sync<RecursiveLock_NoWait>
2008-10-20 03:13:02 +02:00
{
typedef std::vector<char*> MemTable;
TypeInfo type_;
MemTable mem_;
2008-10-30 04:03:14 +01:00
size_t top_; ///< index of the next slot available for allocation
public:
MemoryManager(TypeInfo info) : top_(0) { reset(info); }
~MemoryManager() { purge(); }
size_t size() const;
void purge();
void reset(TypeInfo info);
2008-10-20 03:13:02 +02:00
void* allocate();
void commit (void* pendingAlloc);
2008-10-30 04:03:14 +01:00
private:
void clearStorage();
2008-10-20 03:13:02 +02:00
};
2008-10-30 04:34:05 +01:00
/** the top_ index always points at the next slot
* not yet holding a finished, committed allocation.
* Index is zero based, thus top_ == count of living objects
*/
size_t
AllocationCluster::MemoryManager::size() const
{
return top_;
}
void
AllocationCluster::MemoryManager::reset (TypeInfo info)
{
Lock sync(this);
if (0 < mem_.size()) purge();
type_ = info;
ENSURE (0==top_);
ENSURE (isnil (mem_));
ENSURE (0 < type_.allocSize);
ENSURE (type_.killIt);
}
void
AllocationCluster::MemoryManager::purge()
{
Lock sync(this);
2008-10-30 04:34:05 +01:00
REQUIRE (type_.killIt, "we need a deleter function");
REQUIRE (0 < type_.allocSize, "allocation size unknown");
REQUIRE (top_ == mem_.size() || (top_+1) == mem_.size());
while (top_)
2008-10-30 04:03:14 +01:00
type_.killIt (mem_[--top_]);
clearStorage();
}// note: unnecessary to kill pending allocations
2008-10-30 04:03:14 +01:00
inline void
AllocationCluster::MemoryManager::clearStorage()
{
for (size_t i=mem_.size(); 0 < i; )
delete[] mem_[--i];
mem_.clear();
}
2008-10-30 04:34:05 +01:00
2008-10-30 04:03:14 +01:00
inline void*
AllocationCluster::MemoryManager::allocate()
{
Lock sync(this);
REQUIRE (0 < type_.allocSize);
REQUIRE (top_ <= mem_.size());
2008-10-30 04:34:05 +01:00
if (top_==mem_.size())
mem_.resize(top_+1);
2008-10-30 04:34:05 +01:00
if (!mem_[top_]) // re-use existing allocation, if any
2008-10-30 04:03:14 +01:00
mem_[top_] = new char[type_.allocSize];
ENSURE (top_ < mem_.size());
2008-10-30 04:03:14 +01:00
ENSURE (mem_[top_]);
return mem_[top_];
}
inline void
AllocationCluster::MemoryManager::commit (void* pendingAlloc)
{
Lock sync(this);
2008-10-30 04:34:05 +01:00
REQUIRE (pendingAlloc);
ASSERT (top_ < mem_.size());
ASSERT (pendingAlloc == mem_[top_], "allocation protocol violated");
++top_;
ENSURE (top_ == mem_.size());
}
/** storage for static bookkeeping of type allocation slots */
size_t AllocationCluster::maxTypeIDs;
2008-10-30 04:34:05 +01:00
/** creating a new AllocationCluster prepares a table capable
* of holding the individual object families to come. Each of those
* is managed by a separate instance of the low-level memory manager.
*/
AllocationCluster::AllocationCluster()
{
TRACE (memory, "new AllocationCluster");
}
/** On shutdown of the AllocationCluster we need to assure a certain
* destruction order is maintained by explicitly invoking a cleanup
* operation on each of the low-level memory manager objects.
*/
AllocationCluster::~AllocationCluster() throw()
{
try
{ // avoiding a per-instance lock for now.
ClassLock<AllocationCluster> guard; // (see note in the class description)
TRACE (memory, "shutting down AllocationCluster");
for (size_t i = typeHandlers_.size(); 0 < i; --i)
2008-10-30 04:03:14 +01:00
if (handler(i))
handler(i)->purge();
typeHandlers_.clear();
}
catch (lumiera::Error & ex)
{
2009-01-24 03:15:02 +01:00
WARN (progress, "Exception while closing AllocationCluster: %s", ex.what());
}
catch (...)
{
2009-01-24 03:15:02 +01:00
ALERT (progress, "Unexpected fatal Exception while closing AllocationCluster.");
lumiera::error::lumiera_unexpectedException(); // terminate
}
}
void*
AllocationCluster::initiateAlloc (size_t& slot)
{
if (!slot || slot > typeHandlers_.size() || !handler(slot) )
return 0; // Memory manager not yet initialised
else
return handler(slot)->allocate();
}
void*
AllocationCluster::initiateAlloc (TypeInfo type, size_t& slot)
{
ASSERT (0 < slot);
{ // avoiding a per-instance lock for now.
ClassLock<AllocationCluster> guard; // (see note in the class description)
if (slot > typeHandlers_.size())
typeHandlers_.resize(slot);
if (!handler(slot))
handler(slot).reset (new MemoryManager (type));
}
ASSERT (handler(slot));
2008-10-30 04:34:05 +01:00
return initiateAlloc(slot);
}
void
AllocationCluster::finishAlloc (size_t& slot, void* allocatedObj)
{
ASSERT (handler(slot));
ASSERT (allocatedObj);
handler(slot)->commit(allocatedObj);
}
/* === diagnostics helpers === */
/** @return total number of objects
* currently managed by this allocator */
size_t
AllocationCluster::size() const
{
size_t size(0);
typedef ManagerTable::const_iterator Iter;
for (Iter ii= typeHandlers_.begin(); ii != typeHandlers_.end(); ++ii )
if (*ii)
size += (*ii)->size();
return size;
}
size_t
AllocationCluster::countActiveInstances (size_t& slot) const
{
if (handler (slot))
return handler(slot)->size();
else
return 0;
}
} // namespace lib