2008-10-18 02:32:34 +02:00
|
|
|
/*
|
2010-12-18 00:58:19 +01:00
|
|
|
ALLOCATION-CLUSTER.hpp - allocating and owning a pile of objects
|
2010-12-17 23:28:49 +01:00
|
|
|
|
2008-10-18 02:32:34 +02:00
|
|
|
Copyright (C) Lumiera.org
|
|
|
|
|
2008, Hermann Vosseler <Ichthyostega@web.de>
|
2010-12-17 23:28:49 +01:00
|
|
|
|
2008-10-18 02:32:34 +02:00
|
|
|
This program is free software; you can redistribute it and/or
|
|
|
|
|
modify it under the terms of the GNU General Public License as
|
2010-12-17 23:28:49 +01:00
|
|
|
published by the Free Software Foundation; either version 2 of
|
|
|
|
|
the License, or (at your option) any later version.
|
|
|
|
|
|
2008-10-18 02:32:34 +02:00
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
GNU General Public License for more details.
|
2010-12-17 23:28:49 +01:00
|
|
|
|
2008-10-18 02:32:34 +02:00
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
|
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
2010-12-17 23:28:49 +01:00
|
|
|
|
2008-10-18 02:32:34 +02:00
|
|
|
*/
|
|
|
|
|
|
2010-12-18 00:58:19 +01:00
|
|
|
/** @file allocation-cluster.hpp
|
2008-10-18 02:32:34 +02:00
|
|
|
** Memory management for the low-level model (render nodes network).
|
|
|
|
|
** The model is organised into temporal segments, which are considered
|
|
|
|
|
** to be structurally constant and uniform. The objects within each
|
2024-05-27 21:21:03 +02:00
|
|
|
** segment are strongly interconnected, and thus each segment is created
|
|
|
|
|
** within a single build process and is replaced or released as a whole.
|
|
|
|
|
** AllocationCluster implements memory management to support this usage
|
|
|
|
|
** pattern. Optionally it is even possible to skip invocation of object
|
|
|
|
|
** destructors, making de-allocation highly efficient (typically the
|
|
|
|
|
** memory pages are already cache-cold when about to discarded).
|
2024-06-13 23:46:17 +02:00
|
|
|
** \par using as STL allocator
|
|
|
|
|
** AllocationCluster::Allocator is an adapter to expose the interface
|
|
|
|
|
** expected by std::allocator_traits (and thus usable by all standard compliant
|
|
|
|
|
** containers). With this usage, the container _manages_ the contained objects,
|
|
|
|
|
** including the invocation of their destructors, while relying on the allocator
|
|
|
|
|
** to allot and discard bare memory. However, to avoid invoking any destructors,
|
|
|
|
|
** the container itself can be created with AllocationCluster::createDisposable.
|
2024-05-27 21:21:03 +02:00
|
|
|
** @warning deliberately *not threadsafe*.
|
|
|
|
|
** @remark confine usage to a single thread or use thread-local clusters.
|
2010-12-18 00:58:19 +01:00
|
|
|
** @see allocation-cluster-test.cpp
|
2008-10-18 02:32:34 +02:00
|
|
|
** @see builder::ToolFactory
|
2024-05-25 19:27:17 +02:00
|
|
|
** @see linked-elements.hpp
|
2008-10-18 02:32:34 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
2010-12-18 00:58:19 +01:00
|
|
|
#ifndef LIB_ALLOCATION_CLUSTER_H
|
|
|
|
|
#define LIB_ALLOCATION_CLUSTER_H
|
2008-10-18 02:32:34 +02:00
|
|
|
|
2008-12-27 00:53:35 +01:00
|
|
|
#include "lib/error.hpp"
|
2018-03-24 05:35:13 +01:00
|
|
|
#include "lib/nocopy.hpp"
|
2008-10-20 03:13:02 +02:00
|
|
|
|
2024-05-25 05:14:36 +02:00
|
|
|
#include <type_traits>
|
|
|
|
|
#include <utility>
|
2024-05-15 23:46:37 +02:00
|
|
|
#include <memory>
|
2014-08-17 08:03:21 +02:00
|
|
|
|
2008-10-18 02:32:34 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
namespace lib {
|
2024-05-19 17:53:51 +02:00
|
|
|
namespace test { class AllocationCluster_test; } // declared friend for low-level-checks
|
2024-06-13 23:46:17 +02:00
|
|
|
namespace err = lumiera::error;
|
2008-12-27 01:04:20 +01:00
|
|
|
|
|
|
|
|
|
2024-05-25 19:27:17 +02:00
|
|
|
/**
|
2008-10-18 02:32:34 +02:00
|
|
|
* A pile of objects sharing common allocation and lifecycle.
|
2024-05-25 19:27:17 +02:00
|
|
|
* AllocationCluster owns a heterogeneous collection of objects of various types.
|
|
|
|
|
* Typically, allocation happens during a short time span when building a new segment,
|
|
|
|
|
* and objects are used together until the segment is discarded. The primary leverage
|
|
|
|
|
* is to bulk-allocate memory, and to avoid invoking destructors (and thus to access
|
|
|
|
|
* a lot of _cache-cold memory pages_ on clean-up). A Stdlib compliant #Allocator
|
|
|
|
|
* is provided for use with STL containers. The actual allocation uses heap memory
|
2024-05-27 21:21:03 +02:00
|
|
|
* in _extents_ of hard-wired size, maintained by the accompanying StorageManager.
|
2024-05-25 19:27:17 +02:00
|
|
|
* @warning use #createDisposable whenever possible, but be sure to understand
|
|
|
|
|
* the ramifications of _not invoking_ an object's destructor.
|
2008-10-18 02:32:34 +02:00
|
|
|
*/
|
|
|
|
|
class AllocationCluster
|
2024-05-14 23:37:49 +02:00
|
|
|
: util::MoveOnly
|
2008-10-18 02:32:34 +02:00
|
|
|
{
|
2024-05-17 23:34:48 +02:00
|
|
|
class StorageManager;
|
|
|
|
|
|
|
|
|
|
/** maintaining the Allocation */
|
|
|
|
|
struct Storage
|
|
|
|
|
{
|
|
|
|
|
void* pos{nullptr};
|
|
|
|
|
size_t rest{0};
|
2024-05-19 17:53:51 +02:00
|
|
|
|
2024-06-13 23:46:17 +02:00
|
|
|
auto bytePos() { return static_cast<std::byte*> (pos); }
|
|
|
|
|
|
2024-05-19 17:53:51 +02:00
|
|
|
void*
|
|
|
|
|
allot (size_t bytes, size_t alignment)
|
|
|
|
|
{
|
|
|
|
|
void* loc = std::align (alignment, bytes, pos, rest);
|
|
|
|
|
if (loc)
|
|
|
|
|
{ // requested allocation indeed fits in space
|
2024-06-13 23:46:17 +02:00
|
|
|
pos = bytePos() + bytes;
|
2024-05-19 17:53:51 +02:00
|
|
|
rest -= bytes;
|
|
|
|
|
}
|
|
|
|
|
return loc;
|
|
|
|
|
}
|
2024-06-13 23:46:17 +02:00
|
|
|
|
|
|
|
|
void adjustPos (int offset);
|
|
|
|
|
bool hasReserve (int offset) const;
|
|
|
|
|
bool matches_last_allocation (void const* loc, size_t siz) const;
|
2024-05-17 23:34:48 +02:00
|
|
|
};
|
|
|
|
|
Storage storage_;
|
2024-05-15 23:46:37 +02:00
|
|
|
|
2024-05-25 19:27:17 +02:00
|
|
|
|
2008-10-18 02:32:34 +02:00
|
|
|
public:
|
|
|
|
|
AllocationCluster ();
|
2023-04-20 18:53:17 +02:00
|
|
|
~AllocationCluster () noexcept;
|
2008-10-18 02:32:34 +02:00
|
|
|
|
2024-05-27 19:02:31 +02:00
|
|
|
/* === diagnostics === */
|
|
|
|
|
size_t numExtents() const;
|
|
|
|
|
size_t numBytes() const;
|
2008-10-30 04:34:05 +01:00
|
|
|
|
2008-10-18 02:32:34 +02:00
|
|
|
|
2024-05-25 19:27:17 +02:00
|
|
|
template<class TY, typename...ARGS>
|
|
|
|
|
TY& create (ARGS&& ...);
|
|
|
|
|
|
|
|
|
|
template<class TY, typename...ARGS>
|
|
|
|
|
TY& createDisposable (ARGS&& ...);
|
|
|
|
|
|
|
|
|
|
|
2024-05-27 19:02:31 +02:00
|
|
|
template<typename X>
|
|
|
|
|
struct Allocator
|
|
|
|
|
{
|
|
|
|
|
using value_type = X;
|
|
|
|
|
|
|
|
|
|
[[nodiscard]] X* allocate (size_t n) { return mother_->allot<X>(n); }
|
|
|
|
|
void deallocate (X*, size_t) noexcept { /* rejoice */ }
|
|
|
|
|
|
|
|
|
|
Allocator(AllocationCluster* m) : mother_{m} { }
|
|
|
|
|
// standard copy acceptable
|
|
|
|
|
template<typename T>
|
|
|
|
|
Allocator(Allocator<T> const& o) : mother_{o.mother_} { }
|
|
|
|
|
|
2024-06-06 23:15:49 +02:00
|
|
|
template<typename T>
|
|
|
|
|
bool operator== (Allocator<T> const& o) const { return mother_ == o.mother_; }
|
|
|
|
|
|
2024-05-27 19:02:31 +02:00
|
|
|
AllocationCluster* mother_;
|
|
|
|
|
};
|
2012-04-30 04:28:16 +02:00
|
|
|
|
2024-05-27 19:02:31 +02:00
|
|
|
template<typename X>
|
|
|
|
|
Allocator<X> getAllocator() { return this; }
|
2024-05-15 19:59:05 +02:00
|
|
|
|
2012-04-30 04:28:16 +02:00
|
|
|
|
2024-06-13 23:46:17 +02:00
|
|
|
bool canAdjust (void* loc, size_t oldSiz, size_t newSiz) const;
|
|
|
|
|
void doAdjust (void* loc, size_t oldSiz, size_t newSiz);
|
|
|
|
|
|
2008-10-23 23:08:27 +02:00
|
|
|
private:
|
2024-05-14 23:37:49 +02:00
|
|
|
/**
|
|
|
|
|
* portion out the requested amount of memory,
|
|
|
|
|
* possibly claiming a new pool block.
|
|
|
|
|
*/
|
|
|
|
|
void*
|
2024-05-15 23:46:37 +02:00
|
|
|
allotMemory (size_t bytes, size_t alignment)
|
2024-05-14 23:37:49 +02:00
|
|
|
{
|
2024-05-17 23:34:48 +02:00
|
|
|
ENSURE (_is_within_limits (bytes, alignment));
|
2024-05-19 17:53:51 +02:00
|
|
|
void* loc = storage_.allot(bytes, alignment);
|
|
|
|
|
if (loc) return loc;
|
2024-05-17 23:34:48 +02:00
|
|
|
expandStorage (bytes);
|
|
|
|
|
return allotMemory (bytes, alignment);
|
2024-05-14 23:37:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template<typename X>
|
|
|
|
|
X*
|
|
|
|
|
allot (size_t cnt =1)
|
|
|
|
|
{
|
2024-05-15 23:46:37 +02:00
|
|
|
return static_cast<X*> (allotMemory (cnt * sizeof(X), alignof(X)));
|
2024-05-14 23:37:49 +02:00
|
|
|
}
|
2024-05-17 23:34:48 +02:00
|
|
|
|
2024-05-25 05:14:36 +02:00
|
|
|
|
2024-05-25 19:27:17 +02:00
|
|
|
class Destructor
|
|
|
|
|
: util::NonCopyable
|
|
|
|
|
{
|
|
|
|
|
public:
|
|
|
|
|
virtual ~Destructor(); ///< this is an interface
|
|
|
|
|
Destructor* next{nullptr};// intrusive linked list...
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/** @internal storage frame with the actual payload object,
|
|
|
|
|
* which can be attached to a list of destructors to invoke
|
|
|
|
|
*/
|
2024-05-25 05:14:36 +02:00
|
|
|
template<typename X>
|
2024-05-25 19:27:17 +02:00
|
|
|
struct AllocationWithDestructor
|
|
|
|
|
: Destructor
|
|
|
|
|
{
|
|
|
|
|
X payload;
|
|
|
|
|
|
|
|
|
|
template<typename...ARGS>
|
|
|
|
|
AllocationWithDestructor (ARGS&& ...args)
|
|
|
|
|
: payload(std::forward<ARGS> (args)...)
|
|
|
|
|
{ }
|
|
|
|
|
};
|
2024-05-25 05:14:36 +02:00
|
|
|
|
2024-05-17 23:34:48 +02:00
|
|
|
void expandStorage (size_t);
|
2024-05-25 19:27:17 +02:00
|
|
|
void registerDestructor (Destructor&);
|
2024-05-17 23:34:48 +02:00
|
|
|
bool _is_within_limits (size_t,size_t);
|
2024-05-19 17:53:51 +02:00
|
|
|
|
|
|
|
|
friend class test::AllocationCluster_test;
|
2008-10-18 02:32:34 +02:00
|
|
|
};
|
2008-10-20 03:13:02 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//-----implementation-details------------------------
|
|
|
|
|
|
2024-05-25 19:27:17 +02:00
|
|
|
/**
|
|
|
|
|
* Factory function: place a new instance into this AllocationCluster,
|
|
|
|
|
* but *without invoking its destructor* on clean-up (for performance reasons).
|
|
|
|
|
*/
|
2024-05-25 05:14:36 +02:00
|
|
|
template<class TY, typename...ARGS>
|
2024-06-13 23:46:17 +02:00
|
|
|
inline TY&
|
2024-05-25 05:14:36 +02:00
|
|
|
AllocationCluster::createDisposable (ARGS&& ...args)
|
|
|
|
|
{
|
|
|
|
|
return * new(allot<TY>()) TY (std::forward<ARGS> (args)...);
|
|
|
|
|
}
|
2024-05-25 19:27:17 +02:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Factory function: place a new instance into this AllocationCluster;
|
|
|
|
|
* the object will be properly destroyed when the cluster goes out of scope.
|
|
|
|
|
* @note whenever possible, the #createDisposable variant should be preferred
|
|
|
|
|
*/
|
2024-05-25 05:14:36 +02:00
|
|
|
template<class TY, typename...ARGS>
|
2024-06-13 23:46:17 +02:00
|
|
|
inline TY&
|
2024-05-25 05:14:36 +02:00
|
|
|
AllocationCluster::create (ARGS&& ...args)
|
|
|
|
|
{
|
|
|
|
|
if constexpr (std::is_trivial_v<TY>)
|
|
|
|
|
return createDisposable<TY> (std::forward<ARGS> (args)...);
|
|
|
|
|
|
2024-05-25 19:27:17 +02:00
|
|
|
using Frame = AllocationWithDestructor<TY>;
|
|
|
|
|
auto& frame = createDisposable<Frame> (std::forward<ARGS> (args)...);
|
|
|
|
|
registerDestructor (frame);
|
|
|
|
|
return frame.payload;
|
2024-05-25 05:14:36 +02:00
|
|
|
}
|
|
|
|
|
|
2012-04-30 04:28:16 +02:00
|
|
|
|
2024-06-13 23:46:17 +02:00
|
|
|
/**
|
|
|
|
|
* Adjust the size of the latest raw memory allocation dynamically.
|
|
|
|
|
* @param loc an allocation provided by this AllocationCluster
|
|
|
|
|
* @param oldSiz the size requested for the allocation \a loc
|
|
|
|
|
* @param newSiz desired new size for this allocation
|
|
|
|
|
* @remarks since AllocationCluster must be used in a single threaded environment,
|
|
|
|
|
* the invoking code can sometimes arrange to adapt the latest allocation
|
|
|
|
|
* to a dynamically changing situation, like e.g. populating a container
|
|
|
|
|
* with a previously unknown number of elements. Obviously, the overall
|
|
|
|
|
* allocation must stay within the confines of the current extent; it
|
|
|
|
|
* is thus mandatory to [check](\ref canAdjust) the ability beforehand.
|
|
|
|
|
*/
|
|
|
|
|
inline void
|
|
|
|
|
AllocationCluster::doAdjust(void* loc, size_t oldSiz, size_t newSiz)
|
|
|
|
|
{
|
|
|
|
|
if (not canAdjust (loc,oldSiz,newSiz))
|
|
|
|
|
throw err::Invalid {"AllocationCluster: unable to perform this allocation adjustment."};
|
|
|
|
|
storage_.adjustPos (int(newSiz) - int(oldSiz));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inline bool
|
|
|
|
|
AllocationCluster::canAdjust(void* loc, size_t oldSiz, size_t newSiz) const
|
|
|
|
|
{
|
|
|
|
|
int offset{int(newSiz) - int(oldSiz)}; // is properly limited iff oldSiz is correct
|
|
|
|
|
return storage_.matches_last_allocation (loc, oldSiz)
|
|
|
|
|
and storage_.hasReserve (offset);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inline void
|
|
|
|
|
AllocationCluster::Storage::adjustPos (int offset) ///< @warning be sure a negative offset is properly limited
|
|
|
|
|
{
|
|
|
|
|
REQUIRE (pos);
|
|
|
|
|
REQUIRE (hasReserve (rest));
|
|
|
|
|
pos = bytePos() + offset;
|
|
|
|
|
rest -= offset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inline bool
|
|
|
|
|
AllocationCluster::Storage::hasReserve (int offset) const
|
|
|
|
|
{
|
|
|
|
|
return offset <= int(rest);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inline bool
|
|
|
|
|
AllocationCluster::Storage::matches_last_allocation (void const* loc, size_t siz) const
|
|
|
|
|
{
|
|
|
|
|
return loc == static_cast<std::byte const*> (pos) - siz;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2008-10-18 02:32:34 +02:00
|
|
|
} // namespace lib
|
2023-04-20 18:53:17 +02:00
|
|
|
#endif /*LIB_ALLOCATION_CLUSTER_H*/
|