1 /**
2 	Utility functions for memory management
3 
4 	Note that this module currently is a big sand box for testing allocation related stuff.
5 	Nothing here, including the interfaces, is final but rather a lot of experimentation.
6 
7 	Copyright: © 2012-2013 RejectedSoftware e.K.
8 	License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file.
9 	Authors: Sönke Ludwig
10 */
11 module libasync.internals.memory;
12 
13 import core.exception : OutOfMemoryError;
14 import core.stdc.stdlib;
15 import core.memory;
16 import std.conv;
17 import std.exception : enforceEx;
18 import std.traits;
19 import std.algorithm;
20 
21 
22 Allocator defaultAllocator()
23 {
24 	version(VibeManualMemoryManagement){
25 		return manualAllocator();
26 	} else {
27 		static __gshared Allocator alloc;
28 		if( !alloc ){
29 			alloc = new GCAllocator;
30 			alloc = new AutoFreeListAllocator(alloc);
31 			alloc = new DebugAllocator(alloc);
32 			alloc = new LockAllocator(alloc);
33 		}
34 		return alloc;
35 	}
36 }
37 
38 Allocator manualAllocator()
39 {
40 	static __gshared Allocator alloc;
41 	if( !alloc ){
42 		alloc = new MallocAllocator;
43 		alloc = new AutoFreeListAllocator(alloc);
44 		alloc = new DebugAllocator(alloc);
45 		alloc = new LockAllocator(alloc);
46 	}
47 	return alloc;
48 }
49 
50 auto allocObject(T, bool MANAGED = true, ARGS...)(Allocator allocator, ARGS args)
51 {
52 	auto mem = allocator.alloc(AllocSize!T);
53 	static if( MANAGED ){
54 		static if( hasIndirections!T )
55 			GC.addRange(mem.ptr, mem.length);
56 		return emplace!T(mem, args);
57 	}
58 	else static if( is(T == class) ) return cast(T)mem.ptr;
59 	else return cast(T*)mem.ptr;
60 }
61 
62 T[] allocArray(T, bool MANAGED = true)(Allocator allocator, size_t n)
63 {
64 	auto mem = allocator.alloc(T.sizeof * n);
65 	auto ret = cast(T[])mem;
66 	static if( MANAGED ){
67 		static if( hasIndirections!T )
68 			GC.addRange(mem.ptr, mem.length);
69 		// TODO: use memset for class, pointers and scalars
70 		foreach (ref el; ret) {
71 			emplace!T(cast(void[])((&el)[0 .. 1]));
72 		}
73 	}
74 	return ret;
75 }
76 
77 void freeArray(T, bool MANAGED = true)(Allocator allocator, ref T[] array)
78 {
79 	static if (MANAGED && hasIndirections!T)
80 		GC.removeRange(array.ptr);
81 	allocator.free(cast(void[])array);
82 	array = null;
83 }
84 
85 
86 interface Allocator {
87 	enum size_t alignment = 0x10;
88 	enum size_t alignmentMask = alignment-1;
89 	
90 	void[] alloc(size_t sz)
91 	out { assert((cast(size_t)__result.ptr & alignmentMask) == 0, "alloc() returned misaligned data."); }
92 	
93 	void[] realloc(void[] mem, size_t new_sz)
94 	in {
95 		assert(mem.ptr !is null, "realloc() called with null array.");
96 		assert((cast(size_t)mem.ptr & alignmentMask) == 0, "misaligned pointer passed to realloc().");
97 	}
98 	out { assert((cast(size_t)__result.ptr & alignmentMask) == 0, "realloc() returned misaligned data."); }
99 	
100 	void free(void[] mem)
101 	in {
102 		assert(mem.ptr !is null, "free() called with null array.");
103 		assert((cast(size_t)mem.ptr & alignmentMask) == 0, "misaligned pointer passed to free().");
104 	}
105 }
106 
107 
108 /**
109 	Simple proxy allocator protecting its base allocator with a mutex.
110 */
111 class LockAllocator : Allocator {
112 	private {
113 		Allocator m_base;
114 	}
115 	this(Allocator base) { m_base = base; }
116 	void[] alloc(size_t sz) { synchronized(this) return m_base.alloc(sz); }
117 	void[] realloc(void[] mem, size_t new_sz)
118 	in {
119 		assert(mem.ptr !is null, "realloc() called with null array.");
120 		assert((cast(size_t)mem.ptr & alignmentMask) == 0, "misaligned pointer passed to realloc().");
121 	}
122 	body { synchronized(this) return m_base.realloc(mem, new_sz); }
123 	void free(void[] mem)
124 	in {
125 		assert(mem.ptr !is null, "free() called with null array.");
126 		assert((cast(size_t)mem.ptr & alignmentMask) == 0, "misaligned pointer passed to free().");
127 	}
128 	body { synchronized(this) m_base.free(mem); }
129 }
130 
131 final class DebugAllocator : Allocator {
132 	import libasync.internals.hashmap : HashMap;
133 	private {
134 		Allocator m_baseAlloc;
135 		HashMap!(void*, size_t) m_blocks;
136 		size_t m_bytes;
137 		size_t m_maxBytes;
138 	}
139 	
140 	this(Allocator base_allocator)
141 	{
142 		m_baseAlloc = base_allocator;
143 		m_blocks = HashMap!(void*, size_t)(manualAllocator());
144 	}
145 	
146 	@property size_t allocatedBlockCount() const { return m_blocks.length; }
147 	@property size_t bytesAllocated() const { return m_bytes; }
148 	@property size_t maxBytesAllocated() const { return m_maxBytes; }
149 	
150 	void[] alloc(size_t sz)
151 	{
152 		auto ret = m_baseAlloc.alloc(sz);
153 		assert(ret.length == sz, "base.alloc() returned block with wrong size.");
154 		assert(m_blocks.get(ret.ptr, size_t.max) == size_t.max, "base.alloc() returned block that is already allocated.");
155 		m_blocks[ret.ptr] = sz;
156 		m_bytes += sz;
157 		if( m_bytes > m_maxBytes ){
158 			m_maxBytes = m_bytes;
159 			//logDebug("New allocation maximum: %d (%d blocks)", m_maxBytes, m_blocks.length);
160 		}
161 		return ret;
162 	}
163 	
164 	void[] realloc(void[] mem, size_t new_size)
165 	{
166 		auto sz = m_blocks.get(mem.ptr, size_t.max);
167 		assert(sz != size_t.max, "realloc() called with non-allocated pointer.");
168 		assert(sz == mem.length, "realloc() called with block of wrong size.");
169 		auto ret = m_baseAlloc.realloc(mem, new_size);
170 		assert(ret.length == new_size, "base.realloc() returned block with wrong size.");
171 		assert(ret.ptr is mem.ptr || m_blocks.get(ret.ptr, size_t.max) == size_t.max, "base.realloc() returned block that is already allocated.");
172 		m_bytes -= sz;
173 		m_blocks.remove(mem.ptr);
174 		m_blocks[ret.ptr] = new_size;
175 		m_bytes += new_size;
176 		return ret;
177 	}
178 	void free(void[] mem)
179 	{
180 		auto sz = m_blocks.get(mem.ptr, size_t.max);
181 		assert(sz != size_t.max, "free() called with non-allocated object.");
182 		assert(sz == mem.length, "free() called with block of wrong size.");
183 		m_baseAlloc.free(mem);
184 		m_bytes -= sz;
185 		m_blocks.remove(mem.ptr);
186 	}
187 }
188 
189 final class MallocAllocator : Allocator {
190 	void[] alloc(size_t sz)
191 	{
192 		static err = new immutable OutOfMemoryError;
193 		auto ptr = .malloc(sz + Allocator.alignment);
194 		if (ptr is null) throw err;
195 		return adjustPointerAlignment(ptr)[0 .. sz];
196 	}
197 	
198 	void[] realloc(void[] mem, size_t new_size)
199 	{
200 		size_t csz = min(mem.length, new_size);
201 		auto p = extractUnalignedPointer(mem.ptr);
202 		size_t oldmisalign = mem.ptr - p;
203 		
204 		auto pn = cast(ubyte*).realloc(p, new_size+Allocator.alignment);
205 		if (p == pn) return pn[oldmisalign .. new_size+oldmisalign];
206 		
207 		auto pna = cast(ubyte*)adjustPointerAlignment(pn);
208 		auto newmisalign = pna - pn;
209 		
210 		// account for changed alignment after realloc (move memory back to aligned position)
211 		if (oldmisalign != newmisalign) {
212 			if (newmisalign > oldmisalign) {
213 				foreach_reverse (i; 0 .. csz)
214 					pn[i + newmisalign] = pn[i + oldmisalign];
215 			} else {
216 				foreach (i; 0 .. csz)
217 					pn[i + newmisalign] = pn[i + oldmisalign];
218 			}
219 		}
220 		
221 		return pna[0 .. new_size];
222 	}
223 	
224 	void free(void[] mem)
225 	{
226 		import std.stdio : writeln;
227 		writeln("free: ", mem.ptr, " sz ", mem.length);
228 		.free(extractUnalignedPointer(mem.ptr));
229 	}
230 }
231 
232 final class GCAllocator : Allocator {
233 	void[] alloc(size_t sz)
234 	{
235 		auto mem = GC.malloc(sz+Allocator.alignment);
236 		auto alignedmem = adjustPointerAlignment(mem);
237 		assert(alignedmem - mem <= Allocator.alignment);
238 		auto ret = alignedmem[0 .. sz];
239 		ensureValidMemory(ret);
240 		return ret;
241 	}
242 	void[] realloc(void[] mem, size_t new_size)
243 	{
244 		size_t csz = min(mem.length, new_size);
245 		
246 		auto p = extractUnalignedPointer(mem.ptr);
247 		size_t misalign = mem.ptr - p;
248 		assert(misalign <= Allocator.alignment);
249 		
250 		void[] ret;
251 		auto extended = GC.extend(p, new_size - mem.length, new_size - mem.length);
252 		if (extended) {
253 			assert(extended >= new_size+Allocator.alignment);
254 			ret = p[misalign .. new_size+misalign];
255 		} else {
256 			ret = alloc(new_size);
257 			ret[0 .. csz] = mem[0 .. csz];
258 		}
259 		ensureValidMemory(ret);
260 		return ret;
261 	}
262 	void free(void[] mem)
263 	{
264 		// For safety reasons, the GCAllocator should never explicitly free memory.
265 		//GC.free(extractUnalignedPointer(mem.ptr));
266 	}
267 }
268 
269 final class AutoFreeListAllocator : Allocator {
270 	import std.typetuple;
271 	
272 	private {
273 		enum minExponent = 5;
274 		enum freeListCount = 14;
275 		FreeListAlloc[freeListCount] m_freeLists;
276 		Allocator m_baseAlloc;
277 	}
278 	
279 	this(Allocator base_allocator)
280 	{
281 		m_baseAlloc = base_allocator;
282 		foreach (i; iotaTuple!freeListCount)
283 			m_freeLists[i] = new FreeListAlloc(nthFreeListSize!(i), m_baseAlloc);
284 	}
285 	
286 	void[] alloc(size_t sz)
287 	{
288 		if (sz > nthFreeListSize!(freeListCount-1)) return m_baseAlloc.alloc(sz);
289 		foreach (i; iotaTuple!freeListCount)
290 			if (sz <= nthFreeListSize!(i))
291 				return m_freeLists[i].alloc().ptr[0 .. sz];
292 		//logTrace("AFL alloc %08X(%d)", ret.ptr, sz);
293 		assert(false);
294 	}
295 	
296 	void[] realloc(void[] data, size_t sz)
297 	{
298 		foreach (fl; m_freeLists) {
299 			if (data.length <= fl.elementSize) {
300 				// just grow the slice if it still fits into the free list slot
301 				if (sz <= fl.elementSize)
302 					return data.ptr[0 .. sz];
303 				
304 				// otherwise re-allocate
305 				auto newd = alloc(sz);
306 				assert(newd.ptr+sz <= data.ptr || newd.ptr >= data.ptr+data.length, "New block overlaps old one!?");
307 				auto len = min(data.length, sz);
308 				newd[0 .. len] = data[0 .. len];
309 				free(data);
310 				return newd;
311 			}
312 		}
313 		// forward large blocks to the base allocator
314 		return m_baseAlloc.realloc(data, sz);
315 	}
316 	
317 	void free(void[] data)
318 	{
319 		//logTrace("AFL free %08X(%s)", data.ptr, data.length);
320 		if (data.length > nthFreeListSize!(freeListCount-1)) {
321 			m_baseAlloc.free(data);
322 			return;
323 		}
324 		foreach(i; iotaTuple!freeListCount) {
325 			if (data.length <= nthFreeListSize!i) {
326 				m_freeLists[i].free(data.ptr[0 .. nthFreeListSize!i]);
327 				return;
328 			}
329 		}
330 		assert(false);
331 	}
332 	
333 	private static pure size_t nthFreeListSize(size_t i)() { return 1 << (i + minExponent); }
334 	private template iotaTuple(size_t i) {
335 		static if (i > 1) alias iotaTuple = TypeTuple!(iotaTuple!(i-1), i-1);
336 		else alias iotaTuple = TypeTuple!(0);
337 	}
338 }
339 
340 final class PoolAllocator : Allocator {
341 	static struct Pool { Pool* next; void[] data; void[] remaining; }
342 	static struct Destructor { Destructor* next; void function(void*) destructor; void* object; }
343 	private {
344 		Allocator m_baseAllocator;
345 		Pool* m_freePools;
346 		Pool* m_fullPools;
347 		Destructor* m_destructors;
348 		size_t m_poolSize;
349 	}
350 	
351 	this(size_t pool_size, Allocator base)
352 	{
353 		m_poolSize = pool_size;
354 		m_baseAllocator = base;
355 	}
356 	
357 	@property size_t totalSize()
358 	{
359 		size_t amt = 0;
360 		for (auto p = m_fullPools; p; p = p.next)
361 			amt += p.data.length;
362 		for (auto p = m_freePools; p; p = p.next)
363 			amt += p.data.length;
364 		return amt;
365 	}
366 	
367 	@property size_t allocatedSize()
368 	{
369 		size_t amt = 0;
370 		for (auto p = m_fullPools; p; p = p.next)
371 			amt += p.data.length;
372 		for (auto p = m_freePools; p; p = p.next)
373 			amt += p.data.length - p.remaining.length;
374 		return amt;
375 	}
376 	
377 	void[] alloc(size_t sz)
378 	{
379 		auto aligned_sz = alignedSize(sz);
380 		
381 		Pool* pprev = null;
382 		Pool* p = cast(Pool*)m_freePools;
383 		while( p && p.remaining.length < aligned_sz ){
384 			pprev = p;
385 			p = p.next;
386 		}
387 		
388 		if( !p ){
389 			auto pmem = m_baseAllocator.alloc(AllocSize!Pool);
390 			
391 			p = emplace!Pool(pmem);
392 			p.data = m_baseAllocator.alloc(max(aligned_sz, m_poolSize));
393 			p.remaining = p.data;
394 			p.next = cast(Pool*)m_freePools;
395 			m_freePools = p;
396 			pprev = null;
397 		}
398 		
399 		auto ret = p.remaining[0 .. aligned_sz];
400 		p.remaining = p.remaining[aligned_sz .. $];
401 		if( !p.remaining.length ){
402 			if( pprev ){
403 				pprev.next = p.next;
404 			} else {
405 				m_freePools = p.next;
406 			}
407 			p.next = cast(Pool*)m_fullPools;
408 			m_fullPools = p;
409 		}
410 		
411 		return ret[0 .. sz];
412 	}
413 	
414 	void[] realloc(void[] arr, size_t newsize)
415 	{
416 		auto aligned_sz = alignedSize(arr.length);
417 		auto aligned_newsz = alignedSize(newsize);
418 		
419 		if( aligned_newsz <= aligned_sz ) return arr[0 .. newsize]; // TODO: back up remaining
420 		
421 		auto pool = m_freePools;
422 		bool last_in_pool = pool && arr.ptr+aligned_sz == pool.remaining.ptr;
423 		if( last_in_pool && pool.remaining.length+aligned_sz >= aligned_newsz ){
424 			pool.remaining = pool.remaining[aligned_newsz-aligned_sz .. $];
425 			arr = arr.ptr[0 .. aligned_newsz];
426 			assert(arr.ptr+arr.length == pool.remaining.ptr, "Last block does not align with the remaining space!?");
427 			return arr[0 .. newsize];
428 		} else {
429 			auto ret = alloc(newsize);
430 			assert(ret.ptr >= arr.ptr+aligned_sz || ret.ptr+ret.length <= arr.ptr, "New block overlaps old one!?");
431 			ret[0 .. min(arr.length, newsize)] = arr[0 .. min(arr.length, newsize)];
432 			return ret;
433 		}
434 	}
435 	
436 	void free(void[] mem)
437 	{
438 	}
439 	
440 	void freeAll()
441 	{
442 		version(VibeManualMemoryManagement){
443 			// destroy all initialized objects
444 			for (auto d = m_destructors; d; d = d.next)
445 				d.destructor(cast(void*)d.object);
446 			m_destructors = null;
447 			
448 			// put all full Pools into the free pools list
449 			for (Pool* p = cast(Pool*)m_fullPools, pnext; p; p = pnext) {
450 				pnext = p.next;
451 				p.next = cast(Pool*)m_freePools;
452 				m_freePools = cast(Pool*)p;
453 			}
454 			
455 			// free up all pools
456 			for (Pool* p = cast(Pool*)m_freePools; p; p = p.next)
457 				p.remaining = p.data;
458 		}
459 	}
460 	
461 	void reset()
462 	{
463 		version(VibeManualMemoryManagement){
464 			freeAll();
465 			Pool* pnext;
466 			for (auto p = cast(Pool*)m_freePools; p; p = pnext) {
467 				pnext = p.next;
468 				m_baseAllocator.free(p.data);
469 				m_baseAllocator.free((cast(void*)p)[0 .. AllocSize!Pool]);
470 			}
471 			m_freePools = null;
472 		}
473 	}
474 	
475 	private static destroy(T)(void* ptr)
476 	{
477 		static if( is(T == class) ) .destroy(cast(T)ptr);
478 		else .destroy(*cast(T*)ptr);
479 	}
480 }
481 
482 final class FreeListAlloc : Allocator
483 {
484 	private static struct FreeListSlot { FreeListSlot* next; }
485 	private {
486 		immutable size_t m_elemSize;
487 		Allocator m_baseAlloc;
488 		FreeListSlot* m_firstFree = null;
489 		size_t m_nalloc = 0;
490 		size_t m_nfree = 0;
491 	}
492 	
493 	this(size_t elem_size, Allocator base_allocator)
494 	{
495 		assert(elem_size >= size_t.sizeof);
496 		m_elemSize = elem_size;
497 		m_baseAlloc = base_allocator;
498 		//logDebug("Create FreeListAlloc %d", m_elemSize);
499 	}
500 	
501 	@property size_t elementSize() const { return m_elemSize; }
502 	
503 	void[] alloc(size_t sz)
504 	{
505 		assert(sz == m_elemSize, "Invalid allocation size.");
506 		return alloc();
507 	}
508 	
509 	void[] alloc()
510 	{
511 		void[] mem;
512 		if( m_firstFree ){
513 			auto slot = m_firstFree;
514 			m_firstFree = slot.next;
515 			slot.next = null;
516 			mem = (cast(void*)slot)[0 .. m_elemSize];
517 			m_nfree--;
518 		} else {
519 			mem = m_baseAlloc.alloc(m_elemSize);
520 			//logInfo("Alloc %d bytes: alloc: %d, free: %d", SZ, s_nalloc, s_nfree);
521 		}
522 		m_nalloc++;
523 		//logInfo("Alloc %d bytes: alloc: %d, free: %d", SZ, s_nalloc, s_nfree);
524 		return mem;
525 	}
526 	
527 	void[] realloc(void[] mem, size_t sz)
528 	{
529 		assert(mem.length == m_elemSize);
530 		assert(sz == m_elemSize);
531 		return mem;
532 	}
533 	
534 	void free(void[] mem)
535 	{
536 		assert(mem.length == m_elemSize, "Memory block passed to free has wrong size.");
537 		auto s = cast(FreeListSlot*)mem.ptr;
538 		s.next = m_firstFree;
539 		m_firstFree = s;
540 		m_nalloc--;
541 		m_nfree++;
542 	}
543 }
544 
545 template FreeListObjectAlloc(T, bool USE_GC = true, bool INIT = true)
546 {
547 	enum ElemSize = AllocSize!T;
548 	
549 	static if( is(T == class) ){
550 		alias T TR;
551 	} else {
552 		alias T* TR;
553 	}
554 	
555 	TR alloc(ARGS...)(ARGS args)
556 	{
557 		//logInfo("alloc %s/%d", T.stringof, ElemSize);
558 		auto mem = manualAllocator().alloc(ElemSize);
559 		static if( hasIndirections!T ) GC.addRange(mem.ptr, ElemSize);
560 		static if( INIT ) return emplace!T(mem, args);
561 		else return cast(TR)mem.ptr;
562 	}
563 	
564 	void free(TR obj)
565 	{
566 		static if( INIT ){
567 			auto objc = obj;
568 			.destroy(objc);//typeid(T).destroy(cast(void*)obj);
569 		}
570 		static if( hasIndirections!T ) GC.removeRange(cast(void*)obj);
571 		manualAllocator().free((cast(void*)obj)[0 .. ElemSize]);
572 	}
573 }
574 
575 
576 template AllocSize(T)
577 {
578 	static if (is(T == class)) {
579 		// workaround for a strange bug where AllocSize!SSLStream == 0: TODO: dustmite!
580 		enum dummy = T.stringof ~ __traits(classInstanceSize, T).stringof;
581 		enum AllocSize = __traits(classInstanceSize, T);
582 	} else {
583 		enum AllocSize = T.sizeof;
584 	}
585 }
586 
587 struct FreeListRef(T, bool INIT = true)
588 {
589 	enum ElemSize = AllocSize!T;
590 	
591 	static if( is(T == class) ){
592 		alias T TR;
593 	} else {
594 		alias T* TR;
595 	}
596 	
597 	private TR m_object;
598 	private size_t m_magic = 0x1EE75817; // workaround for compiler bug
599 	
600 	static FreeListRef opCall(ARGS...)(ARGS args)
601 	{
602 		//logInfo("refalloc %s/%d", T.stringof, ElemSize);
603 		FreeListRef ret;
604 		auto mem = manualAllocator().alloc(ElemSize + int.sizeof);
605 		static if( hasIndirections!T ) GC.addRange(mem.ptr, ElemSize);
606 		static if( INIT ) ret.m_object = cast(TR)emplace!(Unqual!T)(mem, args);
607 		else ret.m_object = cast(TR)mem.ptr;
608 		ret.refCount = 1;
609 		return ret;
610 	}
611 	
612 	~this()
613 	{
614 		//if( m_object ) logInfo("~this!%s(): %d", T.stringof, this.refCount);
615 		//if( m_object ) logInfo("ref %s destructor %d", T.stringof, refCount);
616 		//else logInfo("ref %s destructor %d", T.stringof, 0);
617 		clear();
618 		m_magic = 0;
619 		m_object = null;
620 	}
621 	
622 	this(this)
623 	{
624 		checkInvariants();
625 		if( m_object ){
626 			//if( m_object ) logInfo("this!%s(this): %d", T.stringof, this.refCount);
627 			this.refCount++;
628 		}
629 	}
630 	
631 	void opAssign(FreeListRef other)
632 	{
633 		clear();
634 		m_object = other.m_object;
635 		if( m_object ){
636 			//logInfo("opAssign!%s(): %d", T.stringof, this.refCount);
637 			refCount++;
638 		}
639 	}
640 	
641 	void clear()
642 	{
643 		checkInvariants();
644 		if( m_object ){
645 			if( --this.refCount == 0 ){
646 				static if( INIT ){
647 					//logInfo("ref %s destroy", T.stringof);
648 					//typeid(T).destroy(cast(void*)m_object);
649 					auto objc = m_object;
650 					.destroy(objc);
651 					//logInfo("ref %s destroyed", T.stringof);
652 				}
653 				static if( hasIndirections!T ) GC.removeRange(cast(void*)m_object);
654 				manualAllocator().free((cast(void*)m_object)[0 .. ElemSize+int.sizeof]);
655 			}
656 		}
657 		
658 		m_object = null;
659 		m_magic = 0x1EE75817;
660 	}
661 	
662 	@property const(TR) get() const { checkInvariants(); return m_object; }
663 	@property TR get() { checkInvariants(); return m_object; }
664 	alias get this;
665 	
666 	private @property ref int refCount()
667 	const {
668 		auto ptr = cast(ubyte*)cast(void*)m_object;
669 		ptr += ElemSize;
670 		return *cast(int*)ptr;
671 	}
672 	
673 	private void checkInvariants()
674 	const {
675 		assert(m_magic == 0x1EE75817);
676 		assert(!m_object || refCount > 0);
677 	}
678 }
679 
680 private void* extractUnalignedPointer(void* base)
681 {
682 	ubyte misalign = *(cast(ubyte*)base-1);
683 	assert(misalign <= Allocator.alignment);
684 	return base - misalign;
685 }
686 
687 private void* adjustPointerAlignment(void* base)
688 {
689 	ubyte misalign = Allocator.alignment - (cast(size_t)base & Allocator.alignmentMask);
690 	base += misalign;
691 	*(cast(ubyte*)base-1) = misalign;
692 	return base;
693 }
694 
695 unittest {
696 	void test_align(void* p, size_t adjustment) {
697 		void* pa = adjustPointerAlignment(p);
698 		assert((cast(size_t)pa & Allocator.alignmentMask) == 0, "Non-aligned pointer.");
699 		assert(*(cast(ubyte*)pa-1) == adjustment, "Invalid adjustment "~to!string(p)~": "~to!string(*(cast(ubyte*)pa-1)));
700 		void* pr = extractUnalignedPointer(pa);
701 		assert(pr == p, "Recovered base != original");
702 	}
703 	void* ptr = .malloc(0x40);
704 	ptr += Allocator.alignment - (cast(size_t)ptr & Allocator.alignmentMask);
705 	test_align(ptr++, 0x10);
706 	test_align(ptr++, 0x0F);
707 	test_align(ptr++, 0x0E);
708 	test_align(ptr++, 0x0D);
709 	test_align(ptr++, 0x0C);
710 	test_align(ptr++, 0x0B);
711 	test_align(ptr++, 0x0A);
712 	test_align(ptr++, 0x09);
713 	test_align(ptr++, 0x08);
714 	test_align(ptr++, 0x07);
715 	test_align(ptr++, 0x06);
716 	test_align(ptr++, 0x05);
717 	test_align(ptr++, 0x04);
718 	test_align(ptr++, 0x03);
719 	test_align(ptr++, 0x02);
720 	test_align(ptr++, 0x01);
721 	test_align(ptr++, 0x10);
722 }
723 
724 private size_t alignedSize(size_t sz)
725 {
726 	return ((sz + Allocator.alignment - 1) / Allocator.alignment) * Allocator.alignment;
727 }
728 
729 unittest {
730 	foreach( i; 0 .. 20 ){
731 		auto ia = alignedSize(i);
732 		assert(ia >= i);
733 		assert((ia & Allocator.alignmentMask) == 0);
734 		assert(ia < i+Allocator.alignment);
735 	}
736 }
737 
738 private void ensureValidMemory(void[] mem)
739 {
740 	auto bytes = cast(ubyte[])mem;
741 	swap(bytes[0], bytes[$-1]);
742 	swap(bytes[0], bytes[$-1]);
743 }