1 module nxt.region_allocator;
2 
3 // static if (__VERSION__ >= 2087)
4 // {
5 //     version(LDC) static assert(0, "TODO: Use std.experimental.allocator.building_blocks.region instead of this module");
6 // }
7 
8 import std.experimental.allocator.building_blocks.null_allocator;
9 import std.experimental.allocator.common;
10 import std.typecons : Flag, Yes, No;
11 
12 /**
13    Returns `true` if `ptr` is aligned at `alignment`.
14 */
15 @nogc nothrow pure
16 bool alignedAt(T)(T* ptr, uint alignment)
17 {
18     return cast(size_t) ptr % alignment == 0;
19 }
20 
21 /**
22    Returns s rounded up to a multiple of base.
23 */
24 @safe @nogc nothrow pure
25 size_t roundUpToMultipleOf(size_t s, uint base)
26 {
27     assert(base);
28     auto rem = s % base;
29     return rem ? s + base - rem : s;
30 }
31 
32 @safe @nogc nothrow pure
33 bool isGoodStaticAlignment(uint x)
34 {
35     import std.math : isPowerOf2;
36     return x.isPowerOf2;
37 }
38 
39 /**
40    Returns `n` rounded up to a multiple of alignment, which must be a power of 2.
41 */
42 @safe @nogc nothrow pure
43 size_t roundUpToAlignment(size_t n, uint alignment)
44 {
45     import std.math : isPowerOf2;
46     assert(alignment.isPowerOf2);
47     immutable uint slack = cast(uint) n & (alignment - 1);
48     const result = slack
49     ? n + alignment - slack
50     : n;
51     assert(result >= n);
52     return result;
53 }
54 
55 /**
56    Returns `n` rounded down to a multiple of alignment, which must be a power of 2.
57 */
58 @safe @nogc nothrow pure
59 size_t roundDownToAlignment(size_t n, uint alignment)
60 {
61     import std.math : isPowerOf2;
62     assert(alignment.isPowerOf2);
63     return n & ~size_t(alignment - 1);
64 }
65 
66 /**
67    Aligns a pointer up to a specified alignment. The resulting pointer is greater
68    than or equal to the given pointer.
69 */
70 @nogc nothrow pure
71 void* alignUpTo(void* ptr, uint alignment)
72 {
73     import std.math : isPowerOf2;
74     assert(alignment.isPowerOf2);
75     immutable uint slack = cast(size_t) ptr & (alignment - 1U);
76     return slack ? ptr + alignment - slack : ptr;
77 }
78 
79 /**
80    Aligns a pointer down to a specified alignment. The resulting pointer is less
81    than or equal to the given pointer.
82 */
83 @nogc nothrow pure
84 void* alignDownTo(void* ptr, uint alignment)
85 {
86     import std.math : isPowerOf2;
87     assert(alignment.isPowerOf2);
88     return cast(void*) (cast(size_t) ptr & ~(alignment - 1UL));
89 }
90 
91 /**
92    A `Region` allocator allocates memory straight from one contiguous chunk.
93    There is no deallocation, and once the region is full, allocation requests
94    return `null`. Therefore, `Region`s are often used (a) in conjunction with
95    more sophisticated allocators; or (b) for batch-style very fast allocations
96    that deallocate everything at once.
97 
98    The region only stores three pointers, corresponding to the current position in
99    the store and the limits. One allocation entails rounding up the allocation
100    size for alignment purposes, bumping the current pointer, and comparing it
101    against the limit.
102 
103    If `ParentAllocator` is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator), `Region`
104    deallocates the chunk of memory during destruction.
105 
106    The `minAlign` parameter establishes alignment. If $(D minAlign > 1), the
107    sizes of all allocation requests are rounded up to a multiple of `minAlign`.
108    Applications aiming at maximum speed may want to choose $(D minAlign = 1) and
109    control alignment externally.
110 
111 */
112 struct Region(ParentAllocator = NullAllocator,
113               uint minAlign = platformAlignment,
114               Flag!"growDownwards" growDownwards = No.growDownwards)
115 {
116     static assert(minAlign.isGoodStaticAlignment);
117     static assert(ParentAllocator.alignment >= minAlign);
118 
119     import std.traits : hasMember;
120     import std.typecons : Ternary;
121 
122     // state
123     /**
124        The _parent allocator. Depending on whether `ParentAllocator` holds state
125        or not, this is a member variable or an alias for
126        `ParentAllocator.instance`.
127     */
128     static if (stateSize!ParentAllocator)
129     {
130         ParentAllocator parent;
131     }
132     else
133     {
134         alias parent = ParentAllocator.instance;
135     }
136 
137     private void* _current, _begin, _end;
138 
139     private void* roundedBegin() const pure nothrow @trusted @nogc
140     {
141         return cast(void*) roundUpToAlignment(cast(size_t) _begin, alignment);
142     }
143 
144     private void* roundedEnd() const pure nothrow @trusted @nogc
145     {
146         return cast(void*) roundDownToAlignment(cast(size_t) _end, alignment);
147     }
148     /**
149        Constructs a region backed by a user-provided store.
150        Assumes the memory was allocated with `ParentAllocator`
151        (if different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator)).
152 
153        Params:
154        store = User-provided store backing up the region. If $(D
155        ParentAllocator) is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator), memory is assumed to
156        have been allocated with `ParentAllocator`.
157        n = Bytes to allocate using `ParentAllocator`. This constructor is only
158        defined If `ParentAllocator` is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator). If
159        `parent.allocate(n)` returns `null`, the region will be initialized
160        as empty (correctly initialized but unable to allocate).
161     */
162     this(ubyte[] store) pure nothrow @nogc
163     {
164         _begin = store.ptr;
165         _end = store.ptr + store.length;
166         static if (growDownwards)
167             _current = roundedEnd();
168         else
169             _current = roundedBegin();
170     }
171 
172     /// Ditto
173     static if (!is(ParentAllocator == NullAllocator))
174         this(size_t n)
175         {
176             this(cast(ubyte[]) (parent.allocate(n.roundUpToAlignment(alignment))));
177         }
178 
179     /*
180       TODO: The postblit of `BasicRegion` should be disabled because such objects
181       should not be copied around naively.
182     */
183 
184     /**
185        If `ParentAllocator` is not $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator) and defines `deallocate`,
186        the region defines a destructor that uses `ParentAllocator.deallocate` to free the
187        memory chunk.
188     */
189     static if (!is(ParentAllocator == NullAllocator)
190                && hasMember!(ParentAllocator, "deallocate"))
191         ~this() @nogc
192         {
193             parent.deallocate(_begin[0 .. _end - _begin]);
194         }
195 
196     /**
197        Rounds the given size to a multiple of the `alignment`
198     */
199     size_t goodAllocSize(size_t n) const pure nothrow @safe @nogc
200     {
201         return n.roundUpToAlignment(alignment);
202     }
203 
204     /**
205        Alignment offered.
206     */
207     alias alignment = minAlign;
208 
209     /**
210        Allocates `n` bytes of memory. The shortest path involves an alignment
211        adjustment (if $(D alignment > 1)), an increment, and a comparison.
212 
213        Params:
214        n = number of bytes to allocate
215 
216        Returns:
217        A properly-aligned buffer of size `n` or `null` if request could not
218        be satisfied.
219     */
220     void[] allocate(size_t n) pure nothrow @trusted @nogc
221     {
222         const rounded = goodAllocSize(n);
223         if (n == 0 || rounded < n || available < rounded) return null;
224 
225         static if (growDownwards)
226         {
227             assert(available >= rounded);
228             auto result = (_current - rounded)[0 .. n];
229             assert(result.ptr >= _begin);
230             _current = result.ptr;
231             assert(owns(result) == Ternary.yes);
232         }
233         else
234         {
235             auto result = _current[0 .. n];
236             _current += rounded;
237         }
238 
239         return result;
240     }
241 
242     /**
243        Allocates `n` bytes of memory aligned at alignment `a`.
244 
245        Params:
246        n = number of bytes to allocate
247        a = alignment for the allocated block
248 
249        Returns:
250        Either a suitable block of `n` bytes aligned at `a`, or `null`.
251     */
252     void[] alignedAllocate(size_t n, uint a) pure nothrow @trusted @nogc
253     {
254         import std.math : isPowerOf2;
255         assert(a.isPowerOf2);
256 
257         const rounded = goodAllocSize(n);
258         if (n == 0 || rounded < n || available < rounded) return null;
259 
260         static if (growDownwards)
261         {
262             auto tmpCurrent = _current - rounded;
263             auto result = tmpCurrent.alignDownTo(a);
264             if (result <= tmpCurrent && result >= _begin)
265             {
266                 _current = result;
267                 return cast(void[]) result[0 .. n];
268             }
269         }
270         else
271         {
272             // Just bump the pointer to the next good allocation
273             auto newCurrent = _current.alignUpTo(a);
274             if (newCurrent < _current || newCurrent > _end)
275                 return null;
276 
277             auto save = _current;
278             _current = newCurrent;
279             auto result = allocate(n);
280             if (result.ptr)
281             {
282                 assert(result.length == n);
283                 return result;
284             }
285             // Failed, rollback
286             _current = save;
287         }
288         return null;
289     }
290 
291     /// Allocates and returns all memory available to this region.
292     void[] allocateAll() pure nothrow @trusted @nogc
293     {
294         static if (growDownwards)
295         {
296             auto result = _begin[0 .. available];
297             _current = _begin;
298         }
299         else
300         {
301             auto result = _current[0 .. available];
302             _current = _end;
303         }
304         return result;
305     }
306 
307     /**
308        Expands an allocated block in place. Expansion will succeed only if the
309        block is the last allocated. Defined only if `growDownwards` is
310        `No.growDownwards`.
311     */
312     static if (growDownwards == No.growDownwards)
313         bool expand(ref void[] b, size_t delta) pure nothrow @safe @nogc
314         {
315             assert(owns(b) == Ternary.yes || b is null);
316             assert((() @trusted => b.ptr + b.length <= _current)() || b is null);
317             if (b is null || delta == 0) return delta == 0;
318             auto newLength = b.length + delta;
319             if ((() @trusted => _current < b.ptr + b.length + alignment)())
320             {
321                 immutable currentGoodSize = this.goodAllocSize(b.length);
322                 immutable newGoodSize = this.goodAllocSize(newLength);
323                 immutable goodDelta = newGoodSize - currentGoodSize;
324                 // This was the last allocation! Allocate some more and we're done.
325                 if (goodDelta == 0
326                     || (() @trusted => allocate(goodDelta).length == goodDelta)())
327                 {
328                     b = (() @trusted => b.ptr[0 .. newLength])();
329                     assert((() @trusted => _current < b.ptr + b.length + alignment)());
330                     return true;
331                 }
332             }
333             return false;
334         }
335 
336     /**
337        Deallocates `b`. This works only if `b` was obtained as the last call
338        to `allocate`; otherwise (i.e. another allocation has occurred since) it
339        does nothing.
340 
341        Params:
342        b = Block previously obtained by a call to `allocate` against this
343        allocator (`null` is allowed).
344     */
345     bool deallocate(void[] b) pure nothrow @nogc
346     {
347         assert(owns(b) == Ternary.yes || b.ptr is null);
348         auto rounded = goodAllocSize(b.length);
349         static if (growDownwards)
350         {
351             if (b.ptr == _current)
352             {
353                 _current += rounded;
354                 return true;
355             }
356         }
357         else
358         {
359             if (b.ptr + rounded == _current)
360             {
361                 assert(b.ptr !is null || _current is null);
362                 _current = b.ptr;
363                 return true;
364             }
365         }
366         return false;
367     }
368 
369     /**
370        Deallocates all memory allocated by this region, which can be subsequently
371        reused for new allocations.
372     */
373     bool deallocateAll() @safe pure nothrow @nogc
374     {
375         static if (growDownwards)
376         {
377             _current = roundedEnd();
378         }
379         else
380         {
381             _current = roundedBegin();
382         }
383         return true;
384     }
385 
386     /**
387        Queries whether `b` has been allocated with this region.
388 
389        Params:
390        b = Arbitrary block of memory (`null` is allowed; `owns(null)` returns
391        `false`).
392 
393        Returns:
394        `true` if `b` has been allocated with this region, `false` otherwise.
395     */
396     Ternary owns(const void[] b) const pure nothrow @trusted @nogc
397     {
398         return Ternary(b && (&b[0] >= _begin) && (&b[0] + b.length <= _end));
399     }
400 
401     /**
402        Returns `Ternary.yes` if no memory has been allocated in this region,
403        `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
404     */
405     Ternary empty() const pure nothrow @safe @nogc
406     {
407         static if (growDownwards)
408             return Ternary(_current == roundedEnd());
409         else
410             return Ternary(_current == roundedBegin());
411     }
412 
413     /// Nonstandard property that returns bytes available for allocation.
414     size_t available() const @safe pure nothrow @nogc
415     {
416         static if (growDownwards)
417         {
418             return _current - _begin;
419         }
420         else
421         {
422             return _end - _current;
423         }
424     }
425 }
426 
427 ///
428 @system nothrow unittest
429 {
430     import std.algorithm.comparison : max;
431     import std.experimental.allocator.building_blocks.allocator_list
432     : AllocatorList;
433     import std.experimental.allocator.mallocator : Mallocator;
434     import std.typecons : Ternary;
435     // Create a scalable list of regions. Each gets at least 1MB at a time by
436     // using malloc.
437     auto batchAllocator = AllocatorList!(
438         (size_t n) => Region!Mallocator(max(n, 1024 * 1024))
439         )();
440     assert(batchAllocator.empty ==  Ternary.yes);
441     auto b = batchAllocator.allocate(101);
442     assert(b.length == 101);
443     assert(batchAllocator.empty ==  Ternary.no);
444     // This will cause a second allocation
445     b = batchAllocator.allocate(2 * 1024 * 1024);
446     assert(b.length == 2 * 1024 * 1024);
447     // Destructor will free the memory
448 }
449 
450 // TODO activate
451 // @system nothrow @nogc unittest
452 // {
453 //     import std.experimental.allocator.mallocator : Mallocator;
454 //     import std.typecons : Ternary;
455 
456 //     static void testAlloc(Allocator)(ref Allocator a)
457 //     {
458 //         assert((() pure nothrow @safe @nogc => a.empty)() ==  Ternary.yes);
459 //         const b = a.allocate(101);
460 //         assert(b.length == 101);
461 //         assert((() nothrow @safe @nogc => a.owns(b))() == Ternary.yes);
462 
463 //         // Ensure deallocate inherits from parent allocators
464 //         auto c = a.allocate(42);
465 //         assert(c.length == 42);
466 //         assert((() nothrow @nogc => a.deallocate(c))());
467 //         assert((() pure nothrow @safe @nogc => a.empty)() ==  Ternary.no);
468 //     }
469 
470 //     // Create a 64 KB region allocated with malloc
471 //     auto reg = Region!(Mallocator, Mallocator.alignment,
472 //                        Yes.growDownwards)(1024 * 64);
473 //     testAlloc(reg);
474 
475 //     // Create a 64 KB shared region allocated with malloc
476 //     auto sharedReg = SharedRegion!(Mallocator, Mallocator.alignment,
477 //                                    Yes.growDownwards)(1024 * 64);
478 //     testAlloc(sharedReg);
479 // }
480 
481 @system nothrow @nogc unittest
482 {
483     import std.experimental.allocator.mallocator : AlignedMallocator;
484     import std.typecons : Ternary;
485 
486     ubyte[] buf = cast(ubyte[]) AlignedMallocator.instance.alignedAllocate(64, 64);
487     auto reg = Region!(NullAllocator, 64, Yes.growDownwards)(buf);
488     assert(reg.alignedAllocate(10, 32).length == 10);
489     assert(!reg.available);
490 }
491 
492 // TODO activate
493 // @system nothrow @nogc unittest
494 // {
495 //     // test 'this(ubyte[] store)' constructed regions properly clean up
496 //     // their inner storage after destruction
497 //     import std.experimental.allocator.mallocator : Mallocator;
498 
499 //     static shared struct LocalAllocator
500 //     {
501 //         nothrow @nogc:
502 //         enum alignment = Mallocator.alignment;
503 //         void[] buf;
504 //         bool deallocate(void[] b)
505 //         {
506 //             assert(buf.ptr == b.ptr && buf.length == b.length);
507 //             return true;
508 //         }
509 
510 //         void[] allocate(size_t n)
511 //         {
512 //             return null;
513 //         }
514 
515 //     }
516 
517 //     enum bufLen = 10 * Mallocator.alignment;
518 //     void[] tmp = Mallocator.instance.allocate(bufLen);
519 
520 //     LocalAllocator a;
521 //     a.buf = cast(typeof(a.buf)) tmp[1 .. $];
522 
523 //     auto reg = Region!(LocalAllocator, Mallocator.alignment,
524 //                        Yes.growDownwards)(cast(ubyte[]) a.buf);
525 //     auto sharedReg = SharedRegion!(LocalAllocator, Mallocator.alignment,
526 //                                    Yes.growDownwards)(cast(ubyte[]) a.buf);
527 //     reg.parent = a;
528 //     sharedReg.parent = a;
529 
530 //     Mallocator.instance.deallocate(tmp);
531 // }
532 
533 @system nothrow @nogc unittest
534 {
535     import std.experimental.allocator.mallocator : Mallocator;
536 
537     auto reg = Region!(Mallocator)(1024 * 64);
538     auto b = reg.allocate(101);
539     assert(b.length == 101);
540     assert((() pure nothrow @safe @nogc => reg.expand(b, 20))());
541     assert((() pure nothrow @safe @nogc => reg.expand(b, 73))());
542     assert((() pure nothrow @safe @nogc => !reg.expand(b, 1024 * 64))());
543     assert((() nothrow @nogc => reg.deallocateAll())());
544 }
545 
546 /**
547 
548    `InSituRegion` is a convenient region that carries its storage within itself
549    (in the form of a statically-sized array).
550 
551    The first template argument is the size of the region and the second is the
552    needed alignment. Depending on the alignment requested and platform details,
553    the actual available storage may be smaller than the compile-time parameter. To
554    make sure that at least `n` bytes are available in the region, use
555    $(D InSituRegion!(n + a - 1, a)).
556 
557    Given that the most frequent use of `InSituRegion` is as a stack allocator, it
558    allocates starting at the end on systems where stack grows downwards, such that
559    hot memory is used first.
560 
561 */
562 struct InSituRegion(size_t size, size_t minAlign = platformAlignment)
563 {
564     import std.algorithm.comparison : max;
565     import std.conv : to;
566     import std.traits : hasMember;
567     import std.typecons : Ternary;
568 
569     static assert(minAlign.isGoodStaticAlignment);
570     static assert(size >= minAlign);
571 
572     version (X86) enum growDownwards = Yes.growDownwards;
573     else version (X86_64) enum growDownwards = Yes.growDownwards;
574     else version (ARM) enum growDownwards = Yes.growDownwards;
575     else version (AArch64) enum growDownwards = Yes.growDownwards;
576     else version (PPC) enum growDownwards = Yes.growDownwards;
577     else version (PPC64) enum growDownwards = Yes.growDownwards;
578     else version (MIPS32) enum growDownwards = Yes.growDownwards;
579     else version (MIPS64) enum growDownwards = Yes.growDownwards;
580     else version (SPARC) enum growDownwards = Yes.growDownwards;
581     else version (SystemZ) enum growDownwards = Yes.growDownwards;
582     else static assert(0, "Dunno how the stack grows on this architecture.");
583 
584     @disable this(this);
585 
586     // state {
587     private Region!(NullAllocator, minAlign, growDownwards) _impl;
588     union
589     {
590         private ubyte[size] _store = void;
591         private double _forAlignmentOnly1 = void;
592     }
593     // }
594 
595     /**
596        An alias for `minAlign`, which must be a valid alignment (nonzero power
597        of 2). The start of the region and all allocation requests will be rounded
598        up to a multiple of the alignment.
599 
600        ----
601        InSituRegion!(4096) a1;
602        assert(a1.alignment == platformAlignment);
603        InSituRegion!(4096, 64) a2;
604        assert(a2.alignment == 64);
605        ----
606     */
607     alias alignment = minAlign;
608 
609     private void lazyInit()
610     {
611         assert(!_impl._current);
612         _impl = typeof(_impl)(_store);
613         assert(_impl._current.alignedAt(alignment));
614     }
615 
616     /**
617        Allocates `bytes` and returns them, or `null` if the region cannot
618        accommodate the request. For efficiency reasons, if $(D bytes == 0) the
619        function returns an empty non-null slice.
620     */
621     void[] allocate(size_t n)
622     {
623         // Fast path
624     entry:
625         auto result = _impl.allocate(n);
626         if (result.length == n) return result;
627         // Slow path
628         if (_impl._current) return null; // no more room
629         lazyInit;
630         assert(_impl._current);
631         goto entry;
632     }
633 
634     /**
635        As above, but the memory allocated is aligned at `a` bytes.
636     */
637     void[] alignedAllocate(size_t n, uint a)
638     {
639         // Fast path
640     entry:
641         auto result = _impl.alignedAllocate(n, a);
642         if (result.length == n) return result;
643         // Slow path
644         if (_impl._current) return null; // no more room
645         lazyInit;
646         assert(_impl._current);
647         goto entry;
648     }
649 
650     /**
651        Deallocates `b`. This works only if `b` was obtained as the last call
652        to `allocate`; otherwise (i.e. another allocation has occurred since) it
653        does nothing. This semantics is tricky and therefore `deallocate` is
654        defined only if `Region` is instantiated with `Yes.defineDeallocate`
655        as the third template argument.
656 
657        Params:
658        b = Block previously obtained by a call to `allocate` against this
659        allocator (`null` is allowed).
660     */
661     bool deallocate(void[] b)
662     {
663         if (!_impl._current) return b is null;
664         return _impl.deallocate(b);
665     }
666 
667     /**
668        Returns `Ternary.yes` if `b` is the result of a previous allocation,
669        `Ternary.no` otherwise.
670     */
671     Ternary owns(const void[] b) pure nothrow @safe @nogc
672     {
673         if (!_impl._current) return Ternary.no;
674         return _impl.owns(b);
675     }
676 
677     /**
678        Expands an allocated block in place. Expansion will succeed only if the
679        block is the last allocated.
680     */
681     static if (hasMember!(typeof(_impl), "expand"))
682         bool expand(ref void[] b, size_t delta)
683         {
684             if (!_impl._current) lazyInit;
685             return _impl.expand(b, delta);
686         }
687 
688     /**
689        Deallocates all memory allocated with this allocator.
690     */
691     bool deallocateAll()
692     {
693         // We don't care to lazily init the region
694         return _impl.deallocateAll;
695     }
696 
697     /**
698        Allocates all memory available with this allocator.
699     */
700     void[] allocateAll()
701     {
702         if (!_impl._current) lazyInit;
703         return _impl.allocateAll;
704     }
705 
706     /**
707        Nonstandard function that returns the bytes available for allocation.
708     */
709     size_t available()
710     {
711         if (!_impl._current) lazyInit;
712         return _impl.available;
713     }
714 }
715 
716 ///
717 @system unittest
718 {
719     // 128KB region, allocated to x86's cache line
720     InSituRegion!(128 * 1024, 16) r1;
721     auto a1 = r1.allocate(101);
722     assert(a1.length == 101);
723 
724     // 128KB region, with fallback to the garbage collector.
725     import std.experimental.allocator.building_blocks.fallback_allocator
726     : FallbackAllocator;
727     import std.experimental.allocator.building_blocks.free_list
728     : FreeList;
729     import std.experimental.allocator.building_blocks.bitmapped_block
730     : BitmappedBlock;
731     import std.experimental.allocator.gc_allocator : GCAllocator;
732     FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2;
733     const a2 = r2.allocate(102);
734     assert(a2.length == 102);
735 
736     // Reap with GC fallback.
737     InSituRegion!(128 * 1024, 8) tmp3;
738     FallbackAllocator!(BitmappedBlock!(64, 8), GCAllocator) r3;
739     r3.primary = BitmappedBlock!(64, 8)(cast(ubyte[]) (tmp3.allocateAll()));
740     const a3 = r3.allocate(103);
741     assert(a3.length == 103);
742 
743     // Reap/GC with a freelist for small objects up to 16 bytes.
744     InSituRegion!(128 * 1024, 64) tmp4;
745     FreeList!(FallbackAllocator!(BitmappedBlock!(64, 64), GCAllocator), 0, 16) r4;
746     r4.parent.primary = BitmappedBlock!(64, 64)(cast(ubyte[]) (tmp4.allocateAll()));
747     const a4 = r4.allocate(104);
748     assert(a4.length == 104);
749 }
750 
751 @system pure nothrow unittest
752 {
753     import std.typecons : Ternary;
754 
755     InSituRegion!(4096, 1) r1;
756     auto a = r1.allocate(2001);
757     assert(a.length == 2001);
758     import std.conv : text;
759     assert(r1.available == 2095, text(r1.available));
760     // Ensure deallocate inherits from parent
761     assert((() nothrow @nogc => r1.deallocate(a))());
762     assert((() nothrow @nogc => r1.deallocateAll())());
763 
764     InSituRegion!(65_536, 1024*4) r2;
765     assert(r2.available <= 65_536);
766     a = r2.allocate(2001);
767     assert(a.length == 2001);
768     const void[] buff = r2.allocate(42);
769     assert((() nothrow @safe @nogc => r2.owns(buff))() == Ternary.yes);
770     assert((() nothrow @nogc => r2.deallocateAll())());
771 }
772 
773 version(CRuntime_Musl)
774 {
775     // sbrk and brk are disabled in Musl:
776     // https://git.musl-libc.org/cgit/musl/commit/?id=7a995fe706e519a4f55399776ef0df9596101f93
777     // https://git.musl-libc.org/cgit/musl/commit/?id=863d628d93ea341b6a32661a1654320ce69f6a07
778 } else:
779       private extern(C) void* sbrk(long) nothrow @nogc;
780 private extern(C) int brk(shared void*) nothrow @nogc;
781 
782 /**
783 
784    Allocator backed by $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, sbrk))
785    for Posix systems. Due to the fact that `sbrk` is not thread-safe
786    $(HTTP lifecs.likai.org/2010/02/sbrk-is-not-thread-safe.html, by design),
787    `SbrkRegion` uses a mutex internally. This implies
788    that uncontrolled calls to `brk` and `sbrk` may affect the workings of $(D
789    SbrkRegion) adversely.
790 
791 */
792 version(Posix) struct SbrkRegion(uint minAlign = platformAlignment)
793 {
794     import core.sys.posix.pthread : pthread_mutex_init, pthread_mutex_destroy,
795         pthread_mutex_t, pthread_mutex_lock, pthread_mutex_unlock,
796         PTHREAD_MUTEX_INITIALIZER;
797     private static shared pthread_mutex_t sbrkMutex = PTHREAD_MUTEX_INITIALIZER;
798     import std.typecons : Ternary;
799 
800     static assert(minAlign.isGoodStaticAlignment);
801     static assert(size_t.sizeof == (void*).sizeof);
802     private shared void* _brkInitial, _brkCurrent;
803 
804     /**
805        Instance shared by all callers.
806     */
807     static shared SbrkRegion instance;
808 
809     /**
810        Standard allocator primitives.
811     */
812     enum uint alignment = minAlign;
813 
814     /**
815        Rounds the given size to a multiple of thew `alignment`
816     */
817     size_t goodAllocSize(size_t n) shared const pure nothrow @safe @nogc
818     {
819         return n.roundUpToMultipleOf(alignment);
820     }
821 
822     /// Ditto
823     void[] allocate(size_t bytes) shared @trusted nothrow @nogc
824     {
825         // Take alignment rounding into account
826         const rounded = goodAllocSize(bytes);
827 
828         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
829         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
830                     || assert(0);
831         // Assume sbrk returns the old break. Most online documentation confirms
832         // that, except for http://www.inf.udec.cl/~leo/Malloc_tutorial.pdf,
833         // which claims the returned value is not portable.
834         auto p = sbrk(rounded);
835         if (p == cast(void*) -1)
836         {
837             return null;
838         }
839         if (!_brkInitial)
840         {
841             _brkInitial = cast(shared) p;
842             assert(cast(size_t) _brkInitial % minAlign == 0,
843                    "Too large alignment chosen for " ~ typeof(this).stringof);
844         }
845         _brkCurrent = cast(shared) (p + rounded);
846         return p[0 .. bytes];
847     }
848 
849     /// Ditto
850     void[] alignedAllocate(size_t bytes, uint a) shared @trusted nothrow @nogc
851     {
852         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
853         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
854                     || assert(0);
855         if (!_brkInitial)
856         {
857             // This is one extra call, but it'll happen only once.
858             _brkInitial = cast(shared) sbrk(0);
859             assert(cast(size_t) _brkInitial % minAlign == 0,
860                    "Too large alignment chosen for " ~ typeof(this).stringof);
861             (_brkInitial != cast(void*) -1) || assert(0);
862             _brkCurrent = _brkInitial;
863         }
864         immutable size_t delta = cast(shared void*) roundUpToMultipleOf(
865             cast(size_t) _brkCurrent, a) - _brkCurrent;
866         // Still must make sure the total size is aligned to the allocator's
867         // alignment.
868         immutable rounded = (bytes + delta).roundUpToMultipleOf(alignment);
869 
870         auto p = sbrk(rounded);
871         if (p == cast(void*) -1)
872         {
873             return null;
874         }
875         _brkCurrent = cast(shared) (p + rounded);
876         return p[delta .. delta + bytes];
877     }
878 
879     /**
880 
881        The `expand` method may only succeed if the argument is the last block
882        allocated. In that case, `expand` attempts to push the break pointer to
883        the right.
884 
885     */
886     bool expand(ref void[] b, size_t delta) shared nothrow @trusted @nogc
887     {
888         if (b is null || delta == 0) return delta == 0;
889         assert(_brkInitial && _brkCurrent); // otherwise where did b come from?
890         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
891         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
892                     || assert(0);
893 
894         // Take alignment rounding into account
895         const rounded = goodAllocSize(b.length);
896 
897         const slack = rounded - b.length;
898         if (delta <= slack)
899         {
900             b = b.ptr[0 .. b.length + delta];
901             return true;
902         }
903 
904         if (_brkCurrent != b.ptr + rounded) return false;
905         // Great, can expand the last block
906         delta -= slack;
907 
908         const roundedDelta = goodAllocSize(delta);
909         auto p = sbrk(roundedDelta);
910         if (p == cast(void*) -1)
911         {
912             return false;
913         }
914         _brkCurrent = cast(shared) (p + roundedDelta);
915         b = b.ptr[0 .. b.length + slack + delta];
916         return true;
917     }
918 
919     /// Ditto
920     Ternary owns(const void[] b) shared pure nothrow @trusted @nogc
921     {
922         // No need to lock here.
923         assert(!_brkCurrent || !b || &b[0] + b.length <= _brkCurrent);
924         return Ternary(_brkInitial && b && (&b[0] >= _brkInitial));
925     }
926 
927     /**
928 
929        The `deallocate` method only works (and returns `true`)  on systems
930        that support reducing the  break address (i.e. accept calls to `sbrk`
931        with negative offsets). OSX does not accept such. In addition the argument
932        must be the last block allocated.
933 
934     */
935     bool deallocate(void[] b) shared nothrow @nogc
936     {
937         // Take alignment rounding into account
938         const rounded = goodAllocSize(b.length);
939         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
940         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
941                     || assert(0);
942         if (_brkCurrent != b.ptr + rounded) return false;
943         assert(b.ptr >= _brkInitial);
944         if (sbrk(-rounded) == cast(void*) -1)
945             return false;
946         _brkCurrent = cast(shared) b.ptr;
947         return true;
948     }
949 
950     /**
951        The `deallocateAll` method only works (and returns `true`) on systems
952        that support reducing the  break address (i.e. accept calls to `sbrk`
953        with negative offsets). OSX does not accept such.
954     */
955     nothrow @nogc
956     bool deallocateAll() shared
957     {
958         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
959         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
960                     || assert(0);
961         return !_brkInitial || brk(_brkInitial) == 0;
962     }
963 
964     /// Standard allocator API.
965     Ternary empty() shared pure nothrow @safe @nogc
966     {
967         // Also works when they're both null.
968         return Ternary(_brkCurrent == _brkInitial);
969     }
970 }
971 
972 version(Posix) @system nothrow @nogc unittest
973 {
974     // Let's test the assumption that sbrk(n) returns the old address
975     const p1 = sbrk(0);
976     const p2 = sbrk(4096);
977     assert(p1 == p2);
978     const p3 = sbrk(0);
979     assert(p3 == p2 + 4096);
980     // Try to reset brk, but don't make a fuss if it doesn't work
981     sbrk(-4096);
982 }
983 
984 version(Posix) @system nothrow @nogc unittest
985 {
986     import std.typecons : Ternary;
987     import std.algorithm.comparison : min;
988     alias alloc = SbrkRegion!(min(8, platformAlignment)).instance;
989     assert((() nothrow @safe @nogc => alloc.empty)() == Ternary.yes);
990     auto a = alloc.alignedAllocate(2001, 4096);
991     assert(a.length == 2001);
992     assert((() nothrow @safe @nogc => alloc.empty)() == Ternary.no);
993     auto oldBrkCurr = alloc._brkCurrent;
994     auto b = alloc.allocate(2001);
995     assert(b.length == 2001);
996     assert((() nothrow @safe @nogc => alloc.expand(b, 0))());
997     assert(b.length == 2001);
998     // Expand with a small size to fit the rounded slack due to alignment
999     assert((() nothrow @safe @nogc => alloc.expand(b, 1))());
1000     assert(b.length == 2002);
1001     // Exceed the rounded slack due to alignment
1002     assert((() nothrow @safe @nogc => alloc.expand(b, 10))());
1003     assert(b.length == 2012);
1004     assert((() nothrow @safe @nogc => alloc.owns(a))() == Ternary.yes);
1005     assert((() nothrow @safe @nogc => alloc.owns(b))() == Ternary.yes);
1006     // reducing the brk does not work on OSX
1007     version(OSX) {} else
1008                     {
1009                         assert((() nothrow @nogc => alloc.deallocate(b))());
1010                         // Check that expand and deallocate work well
1011                         assert(oldBrkCurr == alloc._brkCurrent);
1012                         assert((() nothrow @nogc => alloc.deallocate(a))());
1013                         assert((() nothrow @nogc => alloc.deallocateAll())());
1014                     }
1015     const void[] c = alloc.allocate(2001);
1016     assert(c.length == 2001);
1017     assert((() nothrow @safe @nogc => alloc.owns(c))() == Ternary.yes);
1018     assert((() nothrow @safe @nogc => alloc.owns(null))() == Ternary.no);
1019 }
1020 
1021 /**
1022    The threadsafe version of the `Region` allocator.
1023    Allocations and deallocations are lock-free based using $(REF cas, core,atomic).
1024 */
1025 shared struct SharedRegion(ParentAllocator = NullAllocator,
1026                            uint minAlign = platformAlignment,
1027                            Flag!"growDownwards" growDownwards = No.growDownwards)
1028 {
1029     nothrow @nogc:
1030     static assert(minAlign.isGoodStaticAlignment);
1031     static assert(ParentAllocator.alignment >= minAlign);
1032 
1033     import std.traits : hasMember;
1034     import std.typecons : Ternary;
1035 
1036     // state
1037     /**
1038        The _parent allocator. Depending on whether `ParentAllocator` holds state
1039        or not, this is a member variable or an alias for
1040        `ParentAllocator.instance`.
1041     */
1042     static if (stateSize!ParentAllocator)
1043     {
1044         ParentAllocator parent;
1045     }
1046     else
1047     {
1048         alias parent = ParentAllocator.instance;
1049     }
1050     private shared void* _current, _begin, _end;
1051 
1052     private void* roundedBegin() const pure nothrow @trusted @nogc
1053     {
1054         return cast(void*) roundUpToAlignment(cast(size_t) _begin, alignment);
1055     }
1056 
1057     private void* roundedEnd() const pure nothrow @trusted @nogc
1058     {
1059         return cast(void*) roundDownToAlignment(cast(size_t) _end, alignment);
1060     }
1061 
1062 
1063     /**
1064        Constructs a region backed by a user-provided store.
1065        Assumes the memory was allocated with `ParentAllocator`
1066        (if different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator)).
1067 
1068        Params:
1069        store = User-provided store backing up the region. If `ParentAllocator`
1070        is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator), memory is assumed to
1071        have been allocated with `ParentAllocator`.
1072        n = Bytes to allocate using `ParentAllocator`. This constructor is only
1073        defined If `ParentAllocator` is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator). If
1074        `parent.allocate(n)` returns `null`, the region will be initialized
1075        as empty (correctly initialized but unable to allocate).
1076     */
1077     this(ubyte[] store) pure nothrow @nogc
1078     {
1079         _begin = cast(typeof(_begin)) store.ptr;
1080         _end = cast(typeof(_end)) (store.ptr + store.length);
1081         static if (growDownwards)
1082             _current = cast(typeof(_current)) roundedEnd();
1083         else
1084             _current = cast(typeof(_current)) roundedBegin();
1085     }
1086 
1087     /// Ditto
1088     static if (!is(ParentAllocator == NullAllocator))
1089         this(size_t n)
1090         {
1091             this(cast(ubyte[]) (parent.allocate(n.roundUpToAlignment(alignment))));
1092         }
1093 
1094     /**
1095        Rounds the given size to a multiple of the `alignment`
1096     */
1097     size_t goodAllocSize(size_t n) const pure nothrow @safe @nogc
1098     {
1099         return n.roundUpToAlignment(alignment);
1100     }
1101 
1102     /**
1103        Alignment offered.
1104     */
1105     alias alignment = minAlign;
1106 
1107     /**
1108        Allocates `n` bytes of memory. The allocation is served by atomically incrementing
1109        a pointer which keeps track of the current used space.
1110 
1111        Params:
1112        n = number of bytes to allocate
1113 
1114        Returns:
1115        A properly-aligned buffer of size `n`, or `null` if request could not
1116        be satisfied.
1117     */
1118     void[] allocate(size_t n) pure nothrow @trusted @nogc
1119     {
1120         import core.atomic : cas, atomicLoad;
1121 
1122         if (n == 0) return null;
1123         const rounded = goodAllocSize(n);
1124 
1125         shared void* localCurrent, localNewCurrent;
1126         static if (growDownwards)
1127         {
1128             do
1129             {
1130                 localCurrent = atomicLoad(_current);
1131                 localNewCurrent = localCurrent - rounded;
1132                 if (localNewCurrent > localCurrent || localNewCurrent < _begin)
1133                     return null;
1134             } while (!cas(&_current, localCurrent, localNewCurrent));
1135 
1136             return cast(void[]) localNewCurrent[0 .. n];
1137         }
1138         else
1139         {
1140             do
1141             {
1142                 localCurrent = atomicLoad(_current);
1143                 localNewCurrent = localCurrent + rounded;
1144                 if (localNewCurrent < localCurrent || localNewCurrent > _end)
1145                     return null;
1146             } while (!cas(&_current, localCurrent, localNewCurrent));
1147 
1148             return cast(void[]) localCurrent[0 .. n];
1149         }
1150 
1151         assert(0, "Unexpected error in SharedRegion.allocate");
1152     }
1153 
1154     /**
1155        Deallocates `b`. This works only if `b` was obtained as the last call
1156        to `allocate`; otherwise (i.e. another allocation has occurred since) it
1157        does nothing.
1158 
1159        Params:
1160        b = Block previously obtained by a call to `allocate` against this
1161        allocator (`null` is allowed).
1162     */
1163     bool deallocate(void[] b) pure nothrow @nogc
1164     {
1165         import core.atomic : cas, atomicLoad;
1166 
1167         const rounded = goodAllocSize(b.length);
1168         shared void* localCurrent, localNewCurrent;
1169 
1170         // The cas is done only once, because only the last allocation can be reverted
1171         localCurrent = atomicLoad(_current);
1172         static if (growDownwards)
1173         {
1174             localNewCurrent = localCurrent + rounded;
1175             if (b.ptr == localCurrent)
1176                 return cas(&_current, localCurrent, localNewCurrent);
1177         }
1178         else
1179         {
1180             localNewCurrent = localCurrent - rounded;
1181             if (b.ptr == localNewCurrent)
1182                 return cas(&_current, localCurrent, localNewCurrent);
1183         }
1184 
1185         return false;
1186     }
1187 
1188     /**
1189        Allocates `n` bytes of memory aligned at alignment `a`.
1190        Params:
1191        n = number of bytes to allocate
1192        a = alignment for the allocated block
1193 
1194        Returns:
1195        Either a suitable block of `n` bytes aligned at `a`, or `null`.
1196     */
1197     void[] alignedAllocate(size_t n, uint a) pure nothrow @trusted @nogc
1198     {
1199         import core.atomic : cas, atomicLoad;
1200         import std.math : isPowerOf2;
1201 
1202         assert(a.isPowerOf2);
1203         if (n == 0) return null;
1204 
1205         const rounded = goodAllocSize(n);
1206         shared void* localCurrent, localNewCurrent;
1207 
1208         static if (growDownwards)
1209         {
1210             do
1211             {
1212                 localCurrent = atomicLoad(_current);
1213                 auto alignedCurrent = cast(void*)(localCurrent - rounded);
1214                 localNewCurrent = cast(shared(void*)) alignedCurrent.alignDownTo(a);
1215                 if (alignedCurrent > localCurrent || localNewCurrent > alignedCurrent ||
1216                     localNewCurrent < _begin)
1217                     return null;
1218             } while (!cas(&_current, localCurrent, localNewCurrent));
1219 
1220             return cast(void[]) localNewCurrent[0 .. n];
1221         }
1222         else
1223         {
1224             do
1225             {
1226                 localCurrent = atomicLoad(_current);
1227                 auto alignedCurrent = alignUpTo(cast(void*) localCurrent, a);
1228                 localNewCurrent = cast(shared(void*)) (alignedCurrent + rounded);
1229                 if (alignedCurrent < localCurrent || localNewCurrent < alignedCurrent ||
1230                     localNewCurrent > _end)
1231                     return null;
1232             } while (!cas(&_current, localCurrent, localNewCurrent));
1233 
1234             return cast(void[]) (localNewCurrent - rounded)[0 .. n];
1235         }
1236 
1237         assert(0, "Unexpected error in SharedRegion.alignedAllocate");
1238     }
1239 
1240     /**
1241        Queries whether `b` has been allocated with this region.
1242 
1243        Params:
1244        b = Arbitrary block of memory (`null` is allowed; `owns(null)` returns
1245        `false`).
1246 
1247        Returns:
1248        `true` if `b` has been allocated with this region, `false` otherwise.
1249     */
1250     Ternary owns(const void[] b) const pure nothrow @trusted @nogc
1251     {
1252         return Ternary(b && (&b[0] >= _begin) && (&b[0] + b.length <= _end));
1253     }
1254 
1255     /**
1256        Returns `Ternary.yes` if no memory has been allocated in this region,
1257        `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
1258     */
1259     Ternary empty() const pure nothrow @safe @nogc
1260     {
1261         import core.atomic : atomicLoad;
1262 
1263         auto localCurrent = atomicLoad(_current);
1264         static if (growDownwards)
1265             return Ternary(localCurrent == roundedEnd());
1266         else
1267             return Ternary(localCurrent == roundedBegin());
1268     }
1269 
1270     /**
1271        If `ParentAllocator` is not $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator) and defines `deallocate`,
1272        the region defines a destructor that uses `ParentAllocator.deallocate` to free the
1273        memory chunk.
1274     */
1275     static if (!is(ParentAllocator == NullAllocator)
1276                && hasMember!(ParentAllocator, "deallocate"))
1277         ~this() @nogc
1278         {
1279             parent.deallocate(cast(void[]) _begin[0 .. _end - _begin]);
1280         }
1281 }
1282 
1283 // TODO activate
1284 // @system unittest
1285 // {
1286 //     import std.experimental.allocator.mallocator : Mallocator;
1287 
1288 //     static void testAlloc(Allocator)(ref Allocator a, bool growDownwards)
1289 //     {
1290 //         import core.thread : ThreadGroup;
1291 //         import std.algorithm.sorting : sort;
1292 //         import core.internal.spinlock : SpinLock;
1293 
1294 //         SpinLock lock = SpinLock(SpinLock.Contention.brief);
1295 //         enum numThreads = 100;
1296 //         void[][numThreads] buf;
1297 //         size_t count = 0;
1298 
1299 //         void fun()
1300 //         {
1301 //             void[] b = a.allocate(63);
1302 //             assert(b.length == 63);
1303 
1304 //             lock.lock();
1305 //             buf[count] = b;
1306 //             count++;
1307 //             lock.unlock();
1308 //         }
1309 
1310 //         auto tg = new ThreadGroup;
1311 //         foreach (i; 0 .. numThreads)
1312 //         {
1313 //             tg.create(&fun);
1314 //         }
1315 //         tg.joinAll();
1316 
1317 //         sort!((a, b) => a.ptr < b.ptr)(buf[0 .. numThreads]);
1318 //         foreach (i; 0 .. numThreads - 1)
1319 //         {
1320 //             assert(buf[i].ptr + a.goodAllocSize(buf[i].length) == buf[i + 1].ptr);
1321 //         }
1322 
1323 //         assert(!a.deallocate(buf[1]));
1324 
1325 //         foreach (i; 0 .. numThreads)
1326 //         {
1327 //             if (!growDownwards)
1328 //                 assert(a.deallocate(buf[numThreads - 1 - i]));
1329 //             else
1330 //                 assert(a.deallocate(buf[i]));
1331 //         }
1332 //     }
1333 
1334 //     auto a1 = SharedRegion!(Mallocator, Mallocator.alignment,
1335 //                             Yes.growDownwards)(1024 * 64);
1336 
1337 //     auto a2 = SharedRegion!(Mallocator, Mallocator.alignment,
1338 //                             No.growDownwards)(1024 * 64);
1339 
1340 //     testAlloc(a1, true);
1341 //     testAlloc(a2, false);
1342 // }
1343 
1344 // TODO activate
1345 // @system unittest
1346 // {
1347 //     import std.experimental.allocator.mallocator : Mallocator;
1348 
1349 //     static void testAlloc(Allocator)(ref Allocator a, bool growDownwards)
1350 //     {
1351 //         import core.thread : ThreadGroup;
1352 //         import std.algorithm.sorting : sort;
1353 //         import core.internal.spinlock : SpinLock;
1354 
1355 //         SpinLock lock = SpinLock(SpinLock.Contention.brief);
1356 //         enum numThreads = 100;
1357 //         void[][2 * numThreads] buf;
1358 //         size_t count = 0;
1359 
1360 //         void fun()
1361 //         {
1362 //             void[] b = a.allocate(63);
1363 //             assert(b.length == 63);
1364 
1365 //             lock.lock();
1366 //             buf[count] = b;
1367 //             count++;
1368 //             lock.unlock();
1369 
1370 //             b = a.alignedAllocate(63, 32);
1371 //             assert(b.length == 63);
1372 //             assert(cast(size_t) b.ptr % 32 == 0);
1373 
1374 //             lock.lock();
1375 //             buf[count] = b;
1376 //             count++;
1377 //             lock.unlock();
1378 //         }
1379 
1380 //         auto tg = new ThreadGroup;
1381 //         foreach (i; 0 .. numThreads)
1382 //         {
1383 //             tg.create(&fun);
1384 //         }
1385 //         tg.joinAll();
1386 
1387 //         sort!((a, b) => a.ptr < b.ptr)(buf[0 .. 2 * numThreads]);
1388 //         foreach (i; 0 .. 2 * numThreads - 1)
1389 //         {
1390 //             assert(buf[i].ptr + buf[i].length <= buf[i + 1].ptr);
1391 //         }
1392 
1393 //         assert(!a.deallocate(buf[1]));
1394 //     }
1395 
1396 //     auto a1 = SharedRegion!(Mallocator, Mallocator.alignment,
1397 //                             Yes.growDownwards)(1024 * 64);
1398 
1399 //     auto a2 = SharedRegion!(Mallocator, Mallocator.alignment,
1400 //                             No.growDownwards)(1024 * 64);
1401 
1402 //     testAlloc(a1, true);
1403 //     testAlloc(a2, false);
1404 // }