From 055df59dde0d9fac6584453b6b66780a145194b1 Mon Sep 17 00:00:00 2001 From: Ichthyostega Date: Sun, 16 Jun 2024 23:55:22 +0200 Subject: [PATCH] Library: tracking diagnostic allocator now complete and tested --- src/lib/test/tracking-allocator.cpp | 23 +-- src/lib/test/tracking-allocator.hpp | 13 +- tests/library/test/test-tracking-test.cpp | 129 ++++++++++++++- wiki/thinkPad.ichthyo.mm | 192 ++++++++++++++++++---- 4 files changed, 304 insertions(+), 53 deletions(-) diff --git a/src/lib/test/tracking-allocator.cpp b/src/lib/test/tracking-allocator.cpp index c3e4b23fe..e9874a562 100644 --- a/src/lib/test/tracking-allocator.cpp +++ b/src/lib/test/tracking-allocator.cpp @@ -34,11 +34,8 @@ ** - each MemoryPool contains a hashtable, where each active allocation is ** stored, using the storage-location as hashtable key. Each such entry ** gets a further consecutive internal ID, which is visible in the EventLog - ** - ////////////////////OOO Mutex locking - ** ** @see tracking-allocator.hpp ** @see TestTracking_test#demonstrate_checkAllocator() - ** */ @@ -82,7 +79,7 @@ namespace test{ : util::MoveOnly { UninitialisedDynBlock buff{}; - size_t entryID; + size_t entryID{0}; }; using AllocTab = std::unordered_map; @@ -135,18 +132,22 @@ namespace test{ } + /** keep track of any distinct memory pools used */ class PoolRegistry : util::NonCopyable - { - std::unordered_map> pools_{}; - public: + { // note: obsolete entries never discarded + using PoolTab = std::unordered_map>; + PoolTab pools_{}; + + public: static PoolHandle locate (Literal poolID); private: PoolHandle fetch_or_create (Literal poolID); }; + /** singleton for default pool */ Depend globalPool; Depend poolReg; @@ -187,7 +188,7 @@ namespace test{ /** - * Allot a memory block with size \a bytes. + * Allot a memory block of given size \a bytes. * This allocation is recorded in the associated MemoryPool * and proper deallocation can thus be verified. * @return a `void*` to the start of the bare memory location @@ -245,6 +246,7 @@ namespace test{ logAlarm ("FreeUnknown", bytes, showAddr(loc)); } + MemoryPool::Allocation const* MemoryPool::findAlloc (Location loc) const { @@ -280,7 +282,7 @@ namespace test{ EventLog TrackingAllocator::log{"test::TrackingAllocator"}; - /** get Checksum for mem-pool */ + /** get Checksum for specific mem-pool */ HashVal TrackingAllocator::checksum (Literal poolID) { @@ -296,7 +298,7 @@ namespace test{ return pool.use_count() - 1; } - /** get allocation count for mem-pool */ + /** get active allocation count for mem-pool */ size_t TrackingAllocator::numAlloc (Literal poolID) { @@ -350,5 +352,4 @@ namespace test{ - }} // namespace lib::test diff --git a/src/lib/test/tracking-allocator.hpp b/src/lib/test/tracking-allocator.hpp index 2135a2638..d80c407e4 100644 --- a/src/lib/test/tracking-allocator.hpp +++ b/src/lib/test/tracking-allocator.hpp @@ -22,7 +22,7 @@ /** @file tracking-allocator.hpp - ** unittest helper code: a custom allocator to track memory usage. + ** Unittest helper code: a custom allocator to track memory usage. ** By registering each allocation and deallocation, correct memory handling ** can be verified and memory usage can be investigated in practice. ** \par TrackingAllocator @@ -43,6 +43,11 @@ ** @remark these classes also work in concert with the building blocks ** from allocator-handle.hpp; notably it is possible to create ** a OwnUniqueAdapter front-end for fabricating `unique_ptr` + ** @warning deliberately *not threadsafe* + ** - generally speaking, allocation should be kept outside of + ** any multithreaded environment, or at least requires + ** dedicated care beyond any standard scheme + ** - this is a test feature... ** @see TestTracking_test */ @@ -157,6 +162,12 @@ namespace test { using value_type = TY; + // define that (non-equivalent) allocators migrate alongside on assignments.... + using propagate_on_container_copy_assignment = std::true_type; ///< for sake of consistency + using propagate_on_container_move_assignment = std::true_type; ///< otherwise all elements must be copied + using propagate_on_container_swap = std::true_type; ///< otherwise they would have to deallocate cross-wise + + [[nodiscard]] TY* allocate (size_t cnt); void deallocate (TY*, size_t) noexcept; }; diff --git a/tests/library/test/test-tracking-test.cpp b/tests/library/test/test-tracking-test.cpp index 11789fccc..cdff7c330 100644 --- a/tests/library/test/test-tracking-test.cpp +++ b/tests/library/test/test-tracking-test.cpp @@ -32,7 +32,6 @@ #include "lib/allocator-handle.hpp" #include "lib/format-cout.hpp" #include "lib/format-util.hpp" -#include "lib/test/diagnostic-output.hpp"///////////////////////TODO #include @@ -88,8 +87,11 @@ namespace test{ Tracker delta(23); // (7) create δ with ID 23 delta = move(gamma); // (8) move-assign δ ⟵ γ log.event ("ID",delta.val); // (9) thus δ now bears the ID 55 (moved α ⟶ γ ⟶ δ) + CHECK (delta.val == 55); } log.event("ID",alpha.val); // (X) and thus α is now a zombie object + CHECK (alpha.val == Tracker::DEFUNCT); + cout << "____Tracker-Log_______________\n" << util::join(Tracker::log, "\n") @@ -112,6 +114,7 @@ namespace test{ } + /** @test dummy object with a tracking checksum. */ void @@ -160,11 +163,14 @@ namespace test{ } + /** @test custom allocator to track memory handling * - use the base allocator to perform raw memory allocation * - demonstrate checksum and diagnostic functions * - use a standard adapter to create objects with `unique_ptr` * - use as _custom allocator_ within STL containers + * - can use several distinct pools + * - swapping containers will move allocators alongside */ void demonstrate_checkAllocator() @@ -190,7 +196,7 @@ namespace test{ CHECK (TrackingAllocator::numBytes() == 55); CHECK (allo.manages (mem)); - CHECK (allo.getSize (mem) == 55); + CHECK (allo.getSize (mem) == 55); // individual registration recalls the allocation's size HashVal memID = allo.getID (mem); CHECK (0 < memID); CHECK (TrackingAllocator::checksum() == memID*55); @@ -259,18 +265,22 @@ namespace test{ CHECK (TrackingAllocator::checksum() == 0); - Tracker *t1, *t2, *t3, *t4; + // define a vector type to use the TrackingAllocator internally + using TrackVec = std::vector>; + + // the following pointers are used later to identify log entries... + Tracker *t1, *t2, *t3, *t4, *t5, *t6; + { // Test-3 : use as STL allocator log.event("Test-3"); - using SpyVec = std::vector>; log.event("fill with 3 default instances"); - SpyVec vec1(3); + TrackVec vec1(3); int v3 = vec1.back().val; - SpyVec vec2; + TrackVec vec2; log.event("move last instance over into other vector"); vec2.emplace_back (move (vec1[2])); CHECK (vec2.back().val == v3); @@ -300,12 +310,115 @@ namespace test{ .beforeCall("dtor").on(t3) .beforeCall("deallocate").on(GLOBAL) ); + CHECK (TrackingAllocator::checksum() == 0); + + + { // Test-4 : intermingled use of several pools + log.event("Test-4"); + + TrackAlloc allo1{"POOL-1"}; + TrackAlloc allo2{"POOL-2"}; + CHECK (allo1 != allo2); + + CHECK (TrackingAllocator::use_count(GLOBAL) == 0); + CHECK (TrackingAllocator::use_count("POOL-1") == 1); // referred by allo1 + CHECK (TrackingAllocator::use_count("POOL-2") == 1); // referred by allo2 + CHECK (TrackingAllocator::checksum ("POOL-1") == 0); + CHECK (TrackingAllocator::checksum ("POOL-2") == 0); + + TrackVec vec1{allo1}; + TrackVec vec2{allo2}; + CHECK (TrackingAllocator::use_count("POOL-1") == 2); // now also referred by the copy in the vectors + CHECK (TrackingAllocator::use_count("POOL-2") == 2); + + log.event("reserve space in vectors"); + vec1.reserve(20); + vec2.reserve(2); + CHECK (TrackingAllocator::numBytes("POOL-1") == 20*sizeof(Tracker)); + CHECK (TrackingAllocator::numBytes("POOL-2") == 2*sizeof(Tracker)); + + CHECK (TrackingAllocator::numBytes(GLOBAL) == 0); + CHECK (TrackingAllocator::numBytes() == 0); + + log.event("create elements in vec1"); + vec1.resize(5); + vec1.back().val = 11; + log.event("add element to vec2"); + vec2.push_back (Tracker{22}); + + // capture object locations for log verification later + t1 = & vec1[0]; + t2 = & vec1[1]; + t3 = & vec1[2]; + t4 = & vec1[3]; + t5 = & vec1[4]; + t6 = & vec2.front(); + + log.event ("swap vectors"); + std::swap (vec1, vec2); + + CHECK (vec1.back().val == 22); + CHECK (vec2.back().val == 11); + CHECK (vec1.size() == 1); + CHECK (vec2.size() == 5); + // the allocators were migrated alongside with the swap + CHECK (TrackingAllocator::numBytes("POOL-1") == 20*sizeof(Tracker)); + CHECK (TrackingAllocator::numBytes("POOL-2") == 2*sizeof(Tracker)); + // this can be demonstrated.... + log.event ("clear the elements migrated to vec2"); + vec2.clear(); + vec2.shrink_to_fit(); + CHECK (vec2.capacity() == 0); + CHECK (TrackingAllocator::numBytes("POOL-1") == 0 ); + CHECK (TrackingAllocator::numBytes("POOL-2") == 2*sizeof(Tracker)); + CHECK (vec1.size() == 1); + CHECK (vec1.capacity() == 2); // unaffected + + log.event ("leave scope"); + } + + CHECK (log.verifyEvent("Test-4") + .beforeEvent("reserve space in vectors") + .beforeCall("allocate").on("POOL-1").argPos(0, 20*sizeof(Tracker)) + .beforeCall("allocate").on("POOL-2").argPos(0, 2*sizeof(Tracker)) + + .beforeEvent("create elements in vec1") + .beforeCall("ctor").on(t1) + .beforeCall("ctor").on(t2) + .beforeCall("ctor").on(t3) + .beforeCall("ctor").on(t4) + .beforeCall("ctor").on(t5) + + .beforeEvent("add element to vec2") + .beforeCall("ctor").arg(22) + .beforeCall("ctor-move").on(t6).arg("Track{22}") + .beforeCall("dtor").arg(Tracker::DEFUNCT) + + .beforeEvent("swap vectors") + .beforeEvent("clear the elements migrated to vec2") + .beforeCall("dtor").on(t1) + .beforeCall("dtor").on(t2) + .beforeCall("dtor").on(t3) + .beforeCall("dtor").on(t4) + .beforeCall("dtor").on(t5).arg(11) + .beforeCall("deallocate").on("POOL-1").argPos(0, 20*sizeof(Tracker)) + + .beforeEvent("leave scope") + .beforeCall("dtor").on(t6).arg(22) + .beforeCall("deallocate").on("POOL-2").argPos(0, 2*sizeof(Tracker)) + ); + // everything clean and all pools empty again... + CHECK (TrackingAllocator::use_count(GLOBAL) == 0); + CHECK (TrackingAllocator::use_count("POOL-1") == 0); + CHECK (TrackingAllocator::use_count("POOL-2") == 0); + CHECK (TrackingAllocator::checksum("POOL-1") == 0); + CHECK (TrackingAllocator::checksum("POOL-2") == 0); + CHECK (TrackingAllocator::checksum(GLOBAL) == 0); + cout << "____Tracking-Allo-Log_________\n" << util::join(Tracker::log, "\n") << "\n───╼━━━━━━━━━━━━━━━━━╾────────"< - - + + @@ -83525,24 +83525,29 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + + - + + - - - + + + + - - + + @@ -83572,17 +83577,15 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + - - - +

Ich habe nun beschlossen, sie zu bilden als Produkt @@ -83596,8 +83599,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + @@ -83607,8 +83609,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -83631,11 +83633,15 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
+ + +
- + + @@ -83652,8 +83658,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -83694,8 +83700,46 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + + + + + + + + +

+ ...das ist die Art von bequemen Beschäftigungen, die sich nach viel Arbeit anfühlen, tatsächlich aber nur darin bestehen, ein gewohntes Schema durchzuziehen ... man erbt von lib::Sync, man bastelt die Guards in jede Methode, man zimmert einen geilen Test mit dem Threadwrapper. Dadurch wird der neue Code keinen Deut besser. +

+ +
+ +
+
+ + + + +

+ Der Zweck der TrackingFactory ist es, das saubere Verhalten eines Custom-Allocators zu belegen. In Grenzfällen könnte das zwar auch Concurrency involvieren — jedoch ist es aus heutigem Verständnis generell nicht mehr üblich, Allokationen in der »heißen Zone« zu machen. Typischerweise verwendet man genau dafür einen Builder oder einen Pool und teilt die Ressourcen schon im Vorhinen den Threads zu. +

+ + +
+ +
+ + + + +

+ im Zweifelsfall wäre ein Adapter über der Factory einfacher +

+ +
+ +
@@ -83720,8 +83764,8 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- - + + @@ -83731,15 +83775,37 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + + + + + + + + + + + + + +

+ kann man gefälligst selber machen +

+ +
+ +
+
+
+ + + + +
+
+ - - - -
-
-
@@ -83748,7 +83814,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -83757,6 +83823,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200

+
@@ -84009,7 +84076,7 @@ Date:   Thu Apr 20 18:53:17 2023 +0200
- + @@ -129632,6 +129699,65 @@ std::cout << tmpl.render({"what", "World"}) << s + + + + + + +

+ wenn ein Allocator nur seine eignen Allokationen hanhaben soll ⟹ Gefahr +

+ + +
+ + + +

+ ...und das ist leider der Fall, sobald die Allokationen in irgend einer Form von spezieller Datenstruktur verwaltet werden, die wir auch für die de-Allokation wieder brauchen... +

+ +
+ +
+ + + + +

+ Allokatoren äquivalent ⟺ eine Instanz kann von der anderen Instanz erzeugte Objekte deallozieren +

+ +
+
+ + + + + + + + + + + +

+ using propagate_on_container_copy_assignment = std::true_type; +

+

+ using propagate_on_container_move_assignment = std::true_type; +

+

+ using propagate_on_container_swap = std::true_type; +

+ +
+
+ + +
+