1 module nxt.region_allocator;
2 
3 // static if (__VERSION__ >= 2087)
4 // {
5 //     version(LDC) static assert(0, "TODO: Use std.experimental.allocator.building_blocks.region instead of this module");
6 // }
7 
8 import std.experimental.allocator.building_blocks.null_allocator;
9 import std.experimental.allocator.common;
10 import std.typecons : Flag, Yes, No;
11 
12 /**
13    Returns `true` if `ptr` is aligned at `alignment`.
14 */
15 @nogc nothrow pure
16 bool alignedAt(T)(T* ptr, uint alignment)
17 {
18     return cast(size_t) ptr % alignment == 0;
19 }
20 
21 /**
22    Returns s rounded up to a multiple of base.
23 */
24 @safe @nogc nothrow pure
25 size_t roundUpToMultipleOf(size_t s, uint base)
26 {
27     assert(base);
28     auto rem = s % base;
29     return rem ? s + base - rem : s;
30 }
31 
32 @safe @nogc nothrow pure
33 bool isGoodStaticAlignment(uint x)
34 {
35     import std.math : isPowerOf2;
36     return x.isPowerOf2;
37 }
38 
39 /**
40    Returns `n` rounded up to a multiple of alignment, which must be a power of 2.
41 */
42 @safe @nogc nothrow pure
43 size_t roundUpToAlignment(size_t n, uint alignment)
44 {
45     import std.math : isPowerOf2;
46     assert(alignment.isPowerOf2);
47     immutable uint slack = cast(uint) n & (alignment - 1);
48     const result = slack
49     ? n + alignment - slack
50     : n;
51     assert(result >= n);
52     return result;
53 }
54 
55 /**
56    Returns `n` rounded down to a multiple of alignment, which must be a power of 2.
57 */
58 @safe @nogc nothrow pure
59 size_t roundDownToAlignment(size_t n, uint alignment)
60 {
61     import std.math : isPowerOf2;
62     assert(alignment.isPowerOf2);
63     return n & ~size_t(alignment - 1);
64 }
65 
66 /**
67    Aligns a pointer up to a specified alignment. The resulting pointer is greater
68    than or equal to the given pointer.
69 */
70 @nogc nothrow pure
71 void* alignUpTo(void* ptr, uint alignment)
72 {
73     import std.math : isPowerOf2;
74     assert(alignment.isPowerOf2);
75     immutable uint slack = cast(size_t) ptr & (alignment - 1U);
76     return slack ? ptr + alignment - slack : ptr;
77 }
78 
79 /**
80    Aligns a pointer down to a specified alignment. The resulting pointer is less
81    than or equal to the given pointer.
82 */
83 @nogc nothrow pure
84 void* alignDownTo(void* ptr, uint alignment)
85 {
86     import std.math : isPowerOf2;
87     assert(alignment.isPowerOf2);
88     return cast(void*) (cast(size_t) ptr & ~(alignment - 1UL));
89 }
90 
91 /**
92    A `Region` allocator allocates memory straight from one contiguous chunk.
93    There is no deallocation, and once the region is full, allocation requests
94    return `null`. Therefore, `Region`s are often used (a) in conjunction with
95    more sophisticated allocators; or (b) for batch-style very fast allocations
96    that deallocate everything at once.
97 
98    The region only stores three pointers, corresponding to the current position in
99    the store and the limits. One allocation entails rounding up the allocation
100    size for alignment purposes, bumping the current pointer, and comparing it
101    against the limit.
102 
103    If `ParentAllocator` is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator), `Region`
104    deallocates the chunk of memory during destruction.
105 
106    The `minAlign` parameter establishes alignment. If $(D minAlign > 1), the
107    sizes of all allocation requests are rounded up to a multiple of `minAlign`.
108    Applications aiming at maximum speed may want to choose $(D minAlign = 1) and
109    control alignment externally.
110 
111 */
112 struct Region(ParentAllocator = NullAllocator,
113               uint minAlign = platformAlignment,
114               Flag!"growDownwards" growDownwards = No.growDownwards)
115 {
116     static assert(minAlign.isGoodStaticAlignment);
117     static assert(ParentAllocator.alignment >= minAlign);
118 
119     import std.typecons : Ternary;
120 
121     // state
122     /**
123        The _parent allocator. Depending on whether `ParentAllocator` holds state
124        or not, this is a member variable or an alias for
125        `ParentAllocator.instance`.
126     */
127     static if (stateSize!ParentAllocator)
128     {
129         ParentAllocator parent;
130     }
131     else
132     {
133         alias parent = ParentAllocator.instance;
134     }
135 
136     private void* _current, _begin, _end;
137 
138     private void* roundedBegin() const pure nothrow @trusted @nogc
139     {
140         return cast(void*) roundUpToAlignment(cast(size_t) _begin, alignment);
141     }
142 
143     private void* roundedEnd() const pure nothrow @trusted @nogc
144     {
145         return cast(void*) roundDownToAlignment(cast(size_t) _end, alignment);
146     }
147     /**
148        Constructs a region backed by a user-provided store.
149        Assumes the memory was allocated with `ParentAllocator`
150        (if different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator)).
151 
152        Params:
153        store = User-provided store backing up the region. If $(D
154        ParentAllocator) is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator), memory is assumed to
155        have been allocated with `ParentAllocator`.
156        n = Bytes to allocate using `ParentAllocator`. This constructor is only
157        defined If `ParentAllocator` is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator). If
158        `parent.allocate(n)` returns `null`, the region will be initialized
159        as empty (correctly initialized but unable to allocate).
160     */
161     this(ubyte[] store) pure nothrow @nogc
162     {
163         _begin = store.ptr;
164         _end = store.ptr + store.length;
165         static if (growDownwards)
166             _current = roundedEnd();
167         else
168             _current = roundedBegin();
169     }
170 
171     /// Ditto
172     static if (!is(ParentAllocator == NullAllocator))
173         this(size_t n)
174         {
175             this(cast(ubyte[]) (parent.allocate(n.roundUpToAlignment(alignment))));
176         }
177 
178     /*
179       TODO: The postblit of `BasicRegion` should be disabled because such objects
180       should not be copied around naively.
181     */
182 
183     /**
184        If `ParentAllocator` is not $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator) and defines `deallocate`,
185        the region defines a destructor that uses `ParentAllocator.deallocate` to free the
186        memory chunk.
187     */
188     static if (!is(ParentAllocator == NullAllocator)
189                && __traits(hasMember, ParentAllocator, "deallocate"))
190         ~this() nothrow @nogc
191         {
192             parent.deallocate(_begin[0 .. _end - _begin]);
193         }
194 
195     /**
196        Rounds the given size to a multiple of the `alignment`
197     */
198     size_t goodAllocSize(size_t n) const pure nothrow @safe @nogc
199     {
200         return n.roundUpToAlignment(alignment);
201     }
202 
203     /**
204        Alignment offered.
205     */
206     alias alignment = minAlign;
207 
208     /**
209        Allocates `n` bytes of memory. The shortest path involves an alignment
210        adjustment (if $(D alignment > 1)), an increment, and a comparison.
211 
212        Params:
213        n = number of bytes to allocate
214 
215        Returns:
216        A properly-aligned buffer of size `n` or `null` if request could not
217        be satisfied.
218     */
219     void[] allocate(size_t n) pure nothrow @trusted @nogc
220     {
221         const rounded = goodAllocSize(n);
222         if (n == 0 || rounded < n || available < rounded) return null;
223 
224         static if (growDownwards)
225         {
226             assert(available >= rounded);
227             auto result = (_current - rounded)[0 .. n];
228             assert(result.ptr >= _begin);
229             _current = result.ptr;
230             assert(owns(result) == Ternary.yes);
231         }
232         else
233         {
234             auto result = _current[0 .. n];
235             _current += rounded;
236         }
237 
238         return result;
239     }
240 
241     /**
242        Allocates `n` bytes of memory aligned at alignment `a`.
243 
244        Params:
245        n = number of bytes to allocate
246        a = alignment for the allocated block
247 
248        Returns:
249        Either a suitable block of `n` bytes aligned at `a`, or `null`.
250     */
251     void[] alignedAllocate(size_t n, uint a) pure nothrow @trusted @nogc
252     {
253         import std.math : isPowerOf2;
254         assert(a.isPowerOf2);
255 
256         const rounded = goodAllocSize(n);
257         if (n == 0 || rounded < n || available < rounded) return null;
258 
259         static if (growDownwards)
260         {
261             auto tmpCurrent = _current - rounded;
262             auto result = tmpCurrent.alignDownTo(a);
263             if (result <= tmpCurrent && result >= _begin)
264             {
265                 _current = result;
266                 return cast(void[]) result[0 .. n];
267             }
268         }
269         else
270         {
271             // Just bump the pointer to the next good allocation
272             auto newCurrent = _current.alignUpTo(a);
273             if (newCurrent < _current || newCurrent > _end)
274                 return null;
275 
276             auto save = _current;
277             _current = newCurrent;
278             auto result = allocate(n);
279             if (result.ptr)
280             {
281                 assert(result.length == n);
282                 return result;
283             }
284             // Failed, rollback
285             _current = save;
286         }
287         return null;
288     }
289 
290     /// Allocates and returns all memory available to this region.
291     void[] allocateAll() pure nothrow @trusted @nogc
292     {
293         static if (growDownwards)
294         {
295             auto result = _begin[0 .. available];
296             _current = _begin;
297         }
298         else
299         {
300             auto result = _current[0 .. available];
301             _current = _end;
302         }
303         return result;
304     }
305 
306     /**
307        Expands an allocated block in place. Expansion will succeed only if the
308        block is the last allocated. Defined only if `growDownwards` is
309        `No.growDownwards`.
310     */
311     static if (growDownwards == No.growDownwards)
312         bool expand(ref void[] b, size_t delta) pure nothrow @safe @nogc
313         {
314             assert(owns(b) == Ternary.yes || b is null);
315             assert((() @trusted => b.ptr + b.length <= _current)() || b is null);
316             if (b is null || delta == 0) return delta == 0;
317             auto newLength = b.length + delta;
318             if ((() @trusted => _current < b.ptr + b.length + alignment)())
319             {
320                 immutable currentGoodSize = this.goodAllocSize(b.length);
321                 immutable newGoodSize = this.goodAllocSize(newLength);
322                 immutable goodDelta = newGoodSize - currentGoodSize;
323                 // This was the last allocation! Allocate some more and we're done.
324                 if (goodDelta == 0
325                     || (() @trusted => allocate(goodDelta).length == goodDelta)())
326                 {
327                     b = (() @trusted => b.ptr[0 .. newLength])();
328                     assert((() @trusted => _current < b.ptr + b.length + alignment)());
329                     return true;
330                 }
331             }
332             return false;
333         }
334 
335     /**
336        Deallocates `b`. This works only if `b` was obtained as the last call
337        to `allocate`; otherwise (i.e. another allocation has occurred since) it
338        does nothing.
339 
340        Params:
341        b = Block previously obtained by a call to `allocate` against this
342        allocator (`null` is allowed).
343     */
344     bool deallocate(void[] b) pure nothrow @nogc
345     {
346         assert(owns(b) == Ternary.yes || b.ptr is null);
347         auto rounded = goodAllocSize(b.length);
348         static if (growDownwards)
349         {
350             if (b.ptr == _current)
351             {
352                 _current += rounded;
353                 return true;
354             }
355         }
356         else
357         {
358             if (b.ptr + rounded == _current)
359             {
360                 assert(b.ptr !is null || _current is null);
361                 _current = b.ptr;
362                 return true;
363             }
364         }
365         return false;
366     }
367 
368     /**
369        Deallocates all memory allocated by this region, which can be subsequently
370        reused for new allocations.
371     */
372     bool deallocateAll() @safe pure nothrow @nogc
373     {
374         static if (growDownwards)
375         {
376             _current = roundedEnd();
377         }
378         else
379         {
380             _current = roundedBegin();
381         }
382         return true;
383     }
384 
385     /**
386        Queries whether `b` has been allocated with this region.
387 
388        Params:
389        b = Arbitrary block of memory (`null` is allowed; `owns(null)` returns
390        `false`).
391 
392        Returns:
393        `true` if `b` has been allocated with this region, `false` otherwise.
394     */
395     Ternary owns(const void[] b) const pure nothrow @trusted @nogc
396     {
397         return Ternary(b && (&b[0] >= _begin) && (&b[0] + b.length <= _end));
398     }
399 
400     /**
401        Returns `Ternary.yes` if no memory has been allocated in this region,
402        `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
403     */
404     Ternary empty() const pure nothrow @safe @nogc
405     {
406         static if (growDownwards)
407             return Ternary(_current == roundedEnd());
408         else
409             return Ternary(_current == roundedBegin());
410     }
411 
412     /// Nonstandard property that returns bytes available for allocation.
413     size_t available() const @safe pure nothrow @nogc
414     {
415         static if (growDownwards)
416         {
417             return _current - _begin;
418         }
419         else
420         {
421             return _end - _current;
422         }
423     }
424 }
425 
426 ///
427 @system nothrow unittest
428 {
429     import std.algorithm.comparison : max;
430     import std.experimental.allocator.building_blocks.allocator_list
431     : AllocatorList;
432     import std.experimental.allocator.mallocator : Mallocator;
433     import std.typecons : Ternary;
434     // Create a scalable list of regions. Each gets at least 1MB at a time by
435     // using malloc.
436     auto batchAllocator = AllocatorList!(
437         (size_t n) => Region!Mallocator(max(n, 1024 * 1024))
438         )();
439     assert(batchAllocator.empty ==  Ternary.yes);
440     auto b = batchAllocator.allocate(101);
441     assert(b.length == 101);
442     assert(batchAllocator.empty ==  Ternary.no);
443     // This will cause a second allocation
444     b = batchAllocator.allocate(2 * 1024 * 1024);
445     assert(b.length == 2 * 1024 * 1024);
446     // Destructor will free the memory
447 }
448 
449 // TODO: activate
450 // @system nothrow @nogc unittest
451 // {
452 //     import std.experimental.allocator.mallocator : Mallocator;
453 //     import std.typecons : Ternary;
454 
455 //     static void testAlloc(Allocator)(ref Allocator a)
456 //     {
457 //         assert((() pure nothrow @safe @nogc => a.empty)() ==  Ternary.yes);
458 //         const b = a.allocate(101);
459 //         assert(b.length == 101);
460 //         assert((() nothrow @safe @nogc => a.owns(b))() == Ternary.yes);
461 
462 //         // Ensure deallocate inherits from parent allocators
463 //         auto c = a.allocate(42);
464 //         assert(c.length == 42);
465 //         assert((() nothrow @nogc => a.deallocate(c))());
466 //         assert((() pure nothrow @safe @nogc => a.empty)() ==  Ternary.no);
467 //     }
468 
469 //     // Create a 64 KB region allocated with malloc
470 //     auto reg = Region!(Mallocator, Mallocator.alignment,
471 //                        Yes.growDownwards)(1024 * 64);
472 //     testAlloc(reg);
473 
474 //     // Create a 64 KB shared region allocated with malloc
475 //     auto sharedReg = SharedRegion!(Mallocator, Mallocator.alignment,
476 //                                    Yes.growDownwards)(1024 * 64);
477 //     testAlloc(sharedReg);
478 // }
479 
480 @system nothrow @nogc unittest
481 {
482     import std.experimental.allocator.mallocator : AlignedMallocator;
483     import std.typecons : Ternary;
484 
485     ubyte[] buf = cast(ubyte[]) AlignedMallocator.instance.alignedAllocate(64, 64);
486     auto reg = Region!(NullAllocator, 64, Yes.growDownwards)(buf);
487     assert(reg.alignedAllocate(10, 32).length == 10);
488     assert(!reg.available);
489 }
490 
491 // TODO: activate
492 // @system nothrow @nogc unittest
493 // {
494 //     // test 'this(ubyte[] store)' constructed regions properly clean up
495 //     // their inner storage after destruction
496 //     import std.experimental.allocator.mallocator : Mallocator;
497 
498 //     static shared struct LocalAllocator
499 //     {
500 //         nothrow @nogc:
501 //         enum alignment = Mallocator.alignment;
502 //         void[] buf;
503 //         bool deallocate(void[] b)
504 //         {
505 //             assert(buf.ptr == b.ptr && buf.length == b.length);
506 //             return true;
507 //         }
508 
509 //         void[] allocate(size_t n)
510 //         {
511 //             return null;
512 //         }
513 
514 //     }
515 
516 //     enum bufLen = 10 * Mallocator.alignment;
517 //     void[] tmp = Mallocator.instance.allocate(bufLen);
518 
519 //     LocalAllocator a;
520 //     a.buf = cast(typeof(a.buf)) tmp[1 .. $];
521 
522 //     auto reg = Region!(LocalAllocator, Mallocator.alignment,
523 //                        Yes.growDownwards)(cast(ubyte[]) a.buf);
524 //     auto sharedReg = SharedRegion!(LocalAllocator, Mallocator.alignment,
525 //                                    Yes.growDownwards)(cast(ubyte[]) a.buf);
526 //     reg.parent = a;
527 //     sharedReg.parent = a;
528 
529 //     Mallocator.instance.deallocate(tmp);
530 // }
531 
532 @system nothrow @nogc unittest
533 {
534     import std.experimental.allocator.mallocator : Mallocator;
535 
536     auto reg = Region!(Mallocator)(1024 * 64);
537     auto b = reg.allocate(101);
538     assert(b.length == 101);
539     assert((() pure nothrow @safe @nogc => reg.expand(b, 20))());
540     assert((() pure nothrow @safe @nogc => reg.expand(b, 73))());
541     assert((() pure nothrow @safe @nogc => !reg.expand(b, 1024 * 64))());
542     assert((() nothrow @nogc => reg.deallocateAll())());
543 }
544 
545 /**
546 
547    `InSituRegion` is a convenient region that carries its storage within itself
548    (in the form of a statically-sized array).
549 
550    The first template argument is the size of the region and the second is the
551    needed alignment. Depending on the alignment requested and platform details,
552    the actual available storage may be smaller than the compile-time parameter. To
553    make sure that at least `n` bytes are available in the region, use
554    $(D InSituRegion!(n + a - 1, a)).
555 
556    Given that the most frequent use of `InSituRegion` is as a stack allocator, it
557    allocates starting at the end on systems where stack grows downwards, such that
558    hot memory is used first.
559 
560 */
561 struct InSituRegion(size_t size, size_t minAlign = platformAlignment)
562 {
563     import std.algorithm.comparison : max;
564     import std.conv : to;
565     import std.typecons : Ternary;
566 
567     static assert(minAlign.isGoodStaticAlignment);
568     static assert(size >= minAlign);
569 
570     version (X86) enum growDownwards = Yes.growDownwards;
571     else version (X86_64) enum growDownwards = Yes.growDownwards;
572     else version (ARM) enum growDownwards = Yes.growDownwards;
573     else version (AArch64) enum growDownwards = Yes.growDownwards;
574     else version (PPC) enum growDownwards = Yes.growDownwards;
575     else version (PPC64) enum growDownwards = Yes.growDownwards;
576     else version (MIPS32) enum growDownwards = Yes.growDownwards;
577     else version (MIPS64) enum growDownwards = Yes.growDownwards;
578     else version (SPARC) enum growDownwards = Yes.growDownwards;
579     else version (SystemZ) enum growDownwards = Yes.growDownwards;
580     else static assert(0, "Dunno how the stack grows on this architecture.");
581 
582     @disable this(this);
583 
584     // state {
585     private Region!(NullAllocator, minAlign, growDownwards) _impl;
586     union
587     {
588         private ubyte[size] _store = void;
589         private double _forAlignmentOnly1 = void;
590     }
591     // }
592 
593     /**
594        An alias for `minAlign`, which must be a valid alignment (nonzero power
595        of 2). The start of the region and all allocation requests will be rounded
596        up to a multiple of the alignment.
597 
598        ----
599        InSituRegion!(4096) a1;
600        assert(a1.alignment == platformAlignment);
601        InSituRegion!(4096, 64) a2;
602        assert(a2.alignment == 64);
603        ----
604     */
605     alias alignment = minAlign;
606 
607     private void lazyInit()
608     {
609         assert(!_impl._current);
610         _impl = typeof(_impl)(_store);
611         assert(_impl._current.alignedAt(alignment));
612     }
613 
614     /**
615        Allocates `bytes` and returns them, or `null` if the region cannot
616        accommodate the request. For efficiency reasons, if $(D bytes == 0) the
617        function returns an empty non-null slice.
618     */
619     void[] allocate(size_t n)
620     {
621         // Fast path
622     entry:
623         auto result = _impl.allocate(n);
624         if (result.length == n) return result;
625         // Slow path
626         if (_impl._current) return null; // no more room
627         lazyInit;
628         assert(_impl._current);
629         goto entry;
630     }
631 
632     /**
633        As above, but the memory allocated is aligned at `a` bytes.
634     */
635     void[] alignedAllocate(size_t n, uint a)
636     {
637         // Fast path
638     entry:
639         auto result = _impl.alignedAllocate(n, a);
640         if (result.length == n) return result;
641         // Slow path
642         if (_impl._current) return null; // no more room
643         lazyInit;
644         assert(_impl._current);
645         goto entry;
646     }
647 
648     /**
649        Deallocates `b`. This works only if `b` was obtained as the last call
650        to `allocate`; otherwise (i.e. another allocation has occurred since) it
651        does nothing. This semantics is tricky and therefore `deallocate` is
652        defined only if `Region` is instantiated with `Yes.defineDeallocate`
653        as the third template argument.
654 
655        Params:
656        b = Block previously obtained by a call to `allocate` against this
657        allocator (`null` is allowed).
658     */
659     bool deallocate(void[] b)
660     {
661         if (!_impl._current) return b is null;
662         return _impl.deallocate(b);
663     }
664 
665     /**
666        Returns `Ternary.yes` if `b` is the result of a previous allocation,
667        `Ternary.no` otherwise.
668     */
669     Ternary owns(const void[] b) pure nothrow @safe @nogc
670     {
671         if (!_impl._current) return Ternary.no;
672         return _impl.owns(b);
673     }
674 
675     /**
676        Expands an allocated block in place. Expansion will succeed only if the
677        block is the last allocated.
678     */
679     static if (__traits(hasMember, typeof(_impl), "expand"))
680         bool expand(ref void[] b, size_t delta)
681         {
682             if (!_impl._current) lazyInit;
683             return _impl.expand(b, delta);
684         }
685 
686     /**
687        Deallocates all memory allocated with this allocator.
688     */
689     bool deallocateAll()
690     {
691         // We don't care to lazily init the region
692         return _impl.deallocateAll;
693     }
694 
695     /**
696        Allocates all memory available with this allocator.
697     */
698     void[] allocateAll()
699     {
700         if (!_impl._current) lazyInit;
701         return _impl.allocateAll;
702     }
703 
704     /**
705        Nonstandard function that returns the bytes available for allocation.
706     */
707     size_t available()
708     {
709         if (!_impl._current) lazyInit;
710         return _impl.available;
711     }
712 }
713 
714 ///
715 @system unittest
716 {
717     // 128KB region, allocated to x86's cache line
718     InSituRegion!(128 * 1024, 16) r1;
719     auto a1 = r1.allocate(101);
720     assert(a1.length == 101);
721 
722     // 128KB region, with fallback to the garbage collector.
723     import std.experimental.allocator.building_blocks.fallback_allocator
724     : FallbackAllocator;
725     import std.experimental.allocator.building_blocks.free_list
726     : FreeList;
727     import std.experimental.allocator.building_blocks.bitmapped_block
728     : BitmappedBlock;
729     import std.experimental.allocator.gc_allocator : GCAllocator;
730     FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2;
731     const a2 = r2.allocate(102);
732     assert(a2.length == 102);
733 
734     // Reap with GC fallback.
735     InSituRegion!(128 * 1024, 8) tmp3;
736     FallbackAllocator!(BitmappedBlock!(64, 8), GCAllocator) r3;
737     r3.primary = BitmappedBlock!(64, 8)(cast(ubyte[]) (tmp3.allocateAll()));
738     const a3 = r3.allocate(103);
739     assert(a3.length == 103);
740 
741     // Reap/GC with a freelist for small objects up to 16 bytes.
742     InSituRegion!(128 * 1024, 64) tmp4;
743     FreeList!(FallbackAllocator!(BitmappedBlock!(64, 64), GCAllocator), 0, 16) r4;
744     r4.parent.primary = BitmappedBlock!(64, 64)(cast(ubyte[]) (tmp4.allocateAll()));
745     const a4 = r4.allocate(104);
746     assert(a4.length == 104);
747 }
748 
749 @system pure nothrow unittest
750 {
751     import std.typecons : Ternary;
752 
753     InSituRegion!(4096, 1) r1;
754     auto a = r1.allocate(2001);
755     assert(a.length == 2001);
756     import std.conv : text;
757     assert(r1.available == 2095, text(r1.available));
758     // Ensure deallocate inherits from parent
759     assert((() nothrow @nogc => r1.deallocate(a))());
760     assert((() nothrow @nogc => r1.deallocateAll())());
761 
762     InSituRegion!(65_536, 1024*4) r2;
763     assert(r2.available <= 65_536);
764     a = r2.allocate(2001);
765     assert(a.length == 2001);
766     const void[] buff = r2.allocate(42);
767     assert((() nothrow @safe @nogc => r2.owns(buff))() == Ternary.yes);
768     assert((() nothrow @nogc => r2.deallocateAll())());
769 }
770 
771 version(CRuntime_Musl)
772 {
773     // sbrk and brk are disabled in Musl:
774     // https://git.musl-libc.org/cgit/musl/commit/?id=7a995fe706e519a4f55399776ef0df9596101f93
775     // https://git.musl-libc.org/cgit/musl/commit/?id=863d628d93ea341b6a32661a1654320ce69f6a07
776 } else:
777       private extern(C) void* sbrk(long) nothrow @nogc;
778 private extern(C) int brk(shared void*) nothrow @nogc;
779 
780 /**
781 
782    Allocator backed by $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, sbrk))
783    for Posix systems. Due to the fact that `sbrk` is not thread-safe
784    $(HTTP lifecs.likai.org/2010/02/sbrk-is-not-thread-safe.html, by design),
785    `SbrkRegion` uses a mutex internally. This implies
786    that uncontrolled calls to `brk` and `sbrk` may affect the workings of $(D
787    SbrkRegion) adversely.
788 
789 */
790 version(Posix) struct SbrkRegion(uint minAlign = platformAlignment)
791 {
792     import core.sys.posix.pthread : pthread_mutex_init, pthread_mutex_destroy,
793         pthread_mutex_t, pthread_mutex_lock, pthread_mutex_unlock,
794         PTHREAD_MUTEX_INITIALIZER;
795     private static shared pthread_mutex_t sbrkMutex = PTHREAD_MUTEX_INITIALIZER;
796     import std.typecons : Ternary;
797 
798     static assert(minAlign.isGoodStaticAlignment);
799     static assert(size_t.sizeof == (void*).sizeof);
800     private shared void* _brkInitial, _brkCurrent;
801 
802     /**
803        Instance shared by all callers.
804     */
805     static shared SbrkRegion instance;
806 
807     /**
808        Standard allocator primitives.
809     */
810     enum uint alignment = minAlign;
811 
812     /**
813        Rounds the given size to a multiple of thew `alignment`
814     */
815     size_t goodAllocSize(size_t n) shared const pure nothrow @safe @nogc
816     {
817         return n.roundUpToMultipleOf(alignment);
818     }
819 
820     /// Ditto
821     void[] allocate(size_t bytes) shared @trusted nothrow @nogc
822     {
823         // Take alignment rounding into account
824         const rounded = goodAllocSize(bytes);
825 
826         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
827         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
828                     || assert(0);
829         // Assume sbrk returns the old break. Most online documentation confirms
830         // that, except for http://www.inf.udec.cl/~leo/Malloc_tutorial.pdf,
831         // which claims the returned value is not portable.
832         auto p = sbrk(rounded);
833         if (p == cast(void*) -1)
834         {
835             return null;
836         }
837         if (!_brkInitial)
838         {
839             _brkInitial = cast(shared) p;
840             assert(cast(size_t) _brkInitial % minAlign == 0,
841                    "Too large alignment chosen for " ~ typeof(this).stringof);
842         }
843         _brkCurrent = cast(shared) (p + rounded);
844         return p[0 .. bytes];
845     }
846 
847     /// Ditto
848     void[] alignedAllocate(size_t bytes, uint a) shared @trusted nothrow @nogc
849     {
850         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
851         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
852                     || assert(0);
853         if (!_brkInitial)
854         {
855             // This is one extra call, but it'll happen only once.
856             _brkInitial = cast(shared) sbrk(0);
857             assert(cast(size_t) _brkInitial % minAlign == 0,
858                    "Too large alignment chosen for " ~ typeof(this).stringof);
859             (_brkInitial != cast(void*) -1) || assert(0);
860             _brkCurrent = _brkInitial;
861         }
862         immutable size_t delta = cast(shared void*) roundUpToMultipleOf(
863             cast(size_t) _brkCurrent, a) - _brkCurrent;
864         // Still must make sure the total size is aligned to the allocator's
865         // alignment.
866         immutable rounded = (bytes + delta).roundUpToMultipleOf(alignment);
867 
868         auto p = sbrk(rounded);
869         if (p == cast(void*) -1)
870         {
871             return null;
872         }
873         _brkCurrent = cast(shared) (p + rounded);
874         return p[delta .. delta + bytes];
875     }
876 
877     /**
878 
879        The `expand` method may only succeed if the argument is the last block
880        allocated. In that case, `expand` attempts to push the break pointer to
881        the right.
882 
883     */
884     bool expand(ref void[] b, size_t delta) shared nothrow @trusted @nogc
885     {
886         if (b is null || delta == 0) return delta == 0;
887         assert(_brkInitial && _brkCurrent); // otherwise where did b come from?
888         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
889         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
890                     || assert(0);
891 
892         // Take alignment rounding into account
893         const rounded = goodAllocSize(b.length);
894 
895         const slack = rounded - b.length;
896         if (delta <= slack)
897         {
898             b = b.ptr[0 .. b.length + delta];
899             return true;
900         }
901 
902         if (_brkCurrent != b.ptr + rounded) return false;
903         // Great, can expand the last block
904         delta -= slack;
905 
906         const roundedDelta = goodAllocSize(delta);
907         auto p = sbrk(roundedDelta);
908         if (p == cast(void*) -1)
909         {
910             return false;
911         }
912         _brkCurrent = cast(shared) (p + roundedDelta);
913         b = b.ptr[0 .. b.length + slack + delta];
914         return true;
915     }
916 
917     /// Ditto
918     Ternary owns(const void[] b) shared pure nothrow @trusted @nogc
919     {
920         // No need to lock here.
921         assert(!_brkCurrent || !b || &b[0] + b.length <= _brkCurrent);
922         return Ternary(_brkInitial && b && (&b[0] >= _brkInitial));
923     }
924 
925     /**
926 
927        The `deallocate` method only works (and returns `true`)  on systems
928        that support reducing the  break address (i.e. accept calls to `sbrk`
929        with negative offsets). OSX does not accept such. In addition the argument
930        must be the last block allocated.
931 
932     */
933     bool deallocate(void[] b) shared nothrow @nogc
934     {
935         // Take alignment rounding into account
936         const rounded = goodAllocSize(b.length);
937         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
938         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
939                     || assert(0);
940         if (_brkCurrent != b.ptr + rounded) return false;
941         assert(b.ptr >= _brkInitial);
942         if (sbrk(-rounded) == cast(void*) -1)
943             return false;
944         _brkCurrent = cast(shared) b.ptr;
945         return true;
946     }
947 
948     /**
949        The `deallocateAll` method only works (and returns `true`) on systems
950        that support reducing the  break address (i.e. accept calls to `sbrk`
951        with negative offsets). OSX does not accept such.
952     */
953     nothrow @nogc
954     bool deallocateAll() shared
955     {
956         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
957         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
958                     || assert(0);
959         return !_brkInitial || brk(_brkInitial) == 0;
960     }
961 
962     /// Standard allocator API.
963     Ternary empty() shared pure nothrow @safe @nogc
964     {
965         // Also works when they're both null.
966         return Ternary(_brkCurrent == _brkInitial);
967     }
968 }
969 
970 version(Posix) @system nothrow @nogc unittest
971 {
972     // Let's test the assumption that sbrk(n) returns the old address
973     const p1 = sbrk(0);
974     const p2 = sbrk(4096);
975     assert(p1 == p2);
976     const p3 = sbrk(0);
977     assert(p3 == p2 + 4096);
978     // Try to reset brk, but don't make a fuss if it doesn't work
979     sbrk(-4096);
980 }
981 
982 version(Posix) @system nothrow @nogc unittest
983 {
984     import std.typecons : Ternary;
985     import std.algorithm.comparison : min;
986     alias alloc = SbrkRegion!(min(8, platformAlignment)).instance;
987     assert((() nothrow @safe @nogc => alloc.empty)() == Ternary.yes);
988     auto a = alloc.alignedAllocate(2001, 4096);
989     assert(a.length == 2001);
990     assert((() nothrow @safe @nogc => alloc.empty)() == Ternary.no);
991     auto oldBrkCurr = alloc._brkCurrent;
992     auto b = alloc.allocate(2001);
993     assert(b.length == 2001);
994     assert((() nothrow @safe @nogc => alloc.expand(b, 0))());
995     assert(b.length == 2001);
996     // Expand with a small size to fit the rounded slack due to alignment
997     assert((() nothrow @safe @nogc => alloc.expand(b, 1))());
998     assert(b.length == 2002);
999     // Exceed the rounded slack due to alignment
1000     assert((() nothrow @safe @nogc => alloc.expand(b, 10))());
1001     assert(b.length == 2012);
1002     assert((() nothrow @safe @nogc => alloc.owns(a))() == Ternary.yes);
1003     assert((() nothrow @safe @nogc => alloc.owns(b))() == Ternary.yes);
1004     // reducing the brk does not work on OSX
1005     version(OSX) {} else
1006                     {
1007                         assert((() nothrow @nogc => alloc.deallocate(b))());
1008                         // Check that expand and deallocate work well
1009                         assert(oldBrkCurr == alloc._brkCurrent);
1010                         assert((() nothrow @nogc => alloc.deallocate(a))());
1011                         assert((() nothrow @nogc => alloc.deallocateAll())());
1012                     }
1013     const void[] c = alloc.allocate(2001);
1014     assert(c.length == 2001);
1015     assert((() nothrow @safe @nogc => alloc.owns(c))() == Ternary.yes);
1016     assert((() nothrow @safe @nogc => alloc.owns(null))() == Ternary.no);
1017 }
1018 
1019 /**
1020    The threadsafe version of the `Region` allocator.
1021    Allocations and deallocations are lock-free based using $(REF cas, core,atomic).
1022 */
1023 shared struct SharedRegion(ParentAllocator = NullAllocator,
1024                            uint minAlign = platformAlignment,
1025                            Flag!"growDownwards" growDownwards = No.growDownwards)
1026 {
1027     nothrow @nogc:
1028     static assert(minAlign.isGoodStaticAlignment);
1029     static assert(ParentAllocator.alignment >= minAlign);
1030 
1031     import std.traits : hasMember;
1032     import std.typecons : Ternary;
1033 
1034     // state
1035     /**
1036        The _parent allocator. Depending on whether `ParentAllocator` holds state
1037        or not, this is a member variable or an alias for
1038        `ParentAllocator.instance`.
1039     */
1040     static if (stateSize!ParentAllocator)
1041     {
1042         ParentAllocator parent;
1043     }
1044     else
1045     {
1046         alias parent = ParentAllocator.instance;
1047     }
1048     private shared void* _current, _begin, _end;
1049 
1050     private void* roundedBegin() const pure nothrow @trusted @nogc
1051     {
1052         return cast(void*) roundUpToAlignment(cast(size_t) _begin, alignment);
1053     }
1054 
1055     private void* roundedEnd() const pure nothrow @trusted @nogc
1056     {
1057         return cast(void*) roundDownToAlignment(cast(size_t) _end, alignment);
1058     }
1059 
1060 
1061     /**
1062        Constructs a region backed by a user-provided store.
1063        Assumes the memory was allocated with `ParentAllocator`
1064        (if different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator)).
1065 
1066        Params:
1067        store = User-provided store backing up the region. If `ParentAllocator`
1068        is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator), memory is assumed to
1069        have been allocated with `ParentAllocator`.
1070        n = Bytes to allocate using `ParentAllocator`. This constructor is only
1071        defined If `ParentAllocator` is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator). If
1072        `parent.allocate(n)` returns `null`, the region will be initialized
1073        as empty (correctly initialized but unable to allocate).
1074     */
1075     this(ubyte[] store) pure nothrow @nogc
1076     {
1077         _begin = cast(typeof(_begin)) store.ptr;
1078         _end = cast(typeof(_end)) (store.ptr + store.length);
1079         static if (growDownwards)
1080             _current = cast(typeof(_current)) roundedEnd();
1081         else
1082             _current = cast(typeof(_current)) roundedBegin();
1083     }
1084 
1085     /// Ditto
1086     static if (!is(ParentAllocator == NullAllocator))
1087         this(size_t n)
1088         {
1089             this(cast(ubyte[]) (parent.allocate(n.roundUpToAlignment(alignment))));
1090         }
1091 
1092     /**
1093        Rounds the given size to a multiple of the `alignment`
1094     */
1095     size_t goodAllocSize(size_t n) const pure nothrow @safe @nogc
1096     {
1097         return n.roundUpToAlignment(alignment);
1098     }
1099 
1100     /**
1101        Alignment offered.
1102     */
1103     alias alignment = minAlign;
1104 
1105     /**
1106        Allocates `n` bytes of memory. The allocation is served by atomically incrementing
1107        a pointer which keeps track of the current used space.
1108 
1109        Params:
1110        n = number of bytes to allocate
1111 
1112        Returns:
1113        A properly-aligned buffer of size `n`, or `null` if request could not
1114        be satisfied.
1115     */
1116     void[] allocate(size_t n) pure nothrow @trusted @nogc
1117     {
1118         import core.atomic : cas, atomicLoad;
1119 
1120         if (n == 0) return null;
1121         const rounded = goodAllocSize(n);
1122 
1123         shared void* localCurrent, localNewCurrent;
1124         static if (growDownwards)
1125         {
1126             do
1127             {
1128                 localCurrent = atomicLoad(_current);
1129                 localNewCurrent = localCurrent - rounded;
1130                 if (localNewCurrent > localCurrent || localNewCurrent < _begin)
1131                     return null;
1132             } while (!cas(&_current, localCurrent, localNewCurrent));
1133 
1134             return cast(void[]) localNewCurrent[0 .. n];
1135         }
1136         else
1137         {
1138             do
1139             {
1140                 localCurrent = atomicLoad(_current);
1141                 localNewCurrent = localCurrent + rounded;
1142                 if (localNewCurrent < localCurrent || localNewCurrent > _end)
1143                     return null;
1144             } while (!cas(&_current, localCurrent, localNewCurrent));
1145 
1146             return cast(void[]) localCurrent[0 .. n];
1147         }
1148 
1149         assert(0, "Unexpected error in SharedRegion.allocate");
1150     }
1151 
1152     /**
1153        Deallocates `b`. This works only if `b` was obtained as the last call
1154        to `allocate`; otherwise (i.e. another allocation has occurred since) it
1155        does nothing.
1156 
1157        Params:
1158        b = Block previously obtained by a call to `allocate` against this
1159        allocator (`null` is allowed).
1160     */
1161     bool deallocate(void[] b) pure nothrow @nogc
1162     {
1163         import core.atomic : cas, atomicLoad;
1164 
1165         const rounded = goodAllocSize(b.length);
1166         shared void* localCurrent, localNewCurrent;
1167 
1168         // The cas is done only once, because only the last allocation can be reverted
1169         localCurrent = atomicLoad(_current);
1170         static if (growDownwards)
1171         {
1172             localNewCurrent = localCurrent + rounded;
1173             if (b.ptr == localCurrent)
1174                 return cas(&_current, localCurrent, localNewCurrent);
1175         }
1176         else
1177         {
1178             localNewCurrent = localCurrent - rounded;
1179             if (b.ptr == localNewCurrent)
1180                 return cas(&_current, localCurrent, localNewCurrent);
1181         }
1182 
1183         return false;
1184     }
1185 
1186     /**
1187        Allocates `n` bytes of memory aligned at alignment `a`.
1188        Params:
1189        n = number of bytes to allocate
1190        a = alignment for the allocated block
1191 
1192        Returns:
1193        Either a suitable block of `n` bytes aligned at `a`, or `null`.
1194     */
1195     void[] alignedAllocate(size_t n, uint a) pure nothrow @trusted @nogc
1196     {
1197         import core.atomic : cas, atomicLoad;
1198         import std.math : isPowerOf2;
1199 
1200         assert(a.isPowerOf2);
1201         if (n == 0) return null;
1202 
1203         const rounded = goodAllocSize(n);
1204         shared void* localCurrent, localNewCurrent;
1205 
1206         static if (growDownwards)
1207         {
1208             do
1209             {
1210                 localCurrent = atomicLoad(_current);
1211                 auto alignedCurrent = cast(void*)(localCurrent - rounded);
1212                 localNewCurrent = cast(shared(void*)) alignedCurrent.alignDownTo(a);
1213                 if (alignedCurrent > localCurrent || localNewCurrent > alignedCurrent ||
1214                     localNewCurrent < _begin)
1215                     return null;
1216             } while (!cas(&_current, localCurrent, localNewCurrent));
1217 
1218             return cast(void[]) localNewCurrent[0 .. n];
1219         }
1220         else
1221         {
1222             do
1223             {
1224                 localCurrent = atomicLoad(_current);
1225                 auto alignedCurrent = alignUpTo(cast(void*) localCurrent, a);
1226                 localNewCurrent = cast(shared(void*)) (alignedCurrent + rounded);
1227                 if (alignedCurrent < localCurrent || localNewCurrent < alignedCurrent ||
1228                     localNewCurrent > _end)
1229                     return null;
1230             } while (!cas(&_current, localCurrent, localNewCurrent));
1231 
1232             return cast(void[]) (localNewCurrent - rounded)[0 .. n];
1233         }
1234 
1235         assert(0, "Unexpected error in SharedRegion.alignedAllocate");
1236     }
1237 
1238     /**
1239        Queries whether `b` has been allocated with this region.
1240 
1241        Params:
1242        b = Arbitrary block of memory (`null` is allowed; `owns(null)` returns
1243        `false`).
1244 
1245        Returns:
1246        `true` if `b` has been allocated with this region, `false` otherwise.
1247     */
1248     Ternary owns(const void[] b) const pure nothrow @trusted @nogc
1249     {
1250         return Ternary(b && (&b[0] >= _begin) && (&b[0] + b.length <= _end));
1251     }
1252 
1253     /**
1254        Returns `Ternary.yes` if no memory has been allocated in this region,
1255        `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
1256     */
1257     Ternary empty() const pure nothrow @safe @nogc
1258     {
1259         import core.atomic : atomicLoad;
1260 
1261         auto localCurrent = atomicLoad(_current);
1262         static if (growDownwards)
1263             return Ternary(localCurrent == roundedEnd());
1264         else
1265             return Ternary(localCurrent == roundedBegin());
1266     }
1267 
1268     /**
1269        If `ParentAllocator` is not $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator) and defines `deallocate`,
1270        the region defines a destructor that uses `ParentAllocator.deallocate` to free the
1271        memory chunk.
1272     */
1273     static if (!is(ParentAllocator == NullAllocator)
1274                && __traits(hasMember, ParentAllocator, "deallocate"))
1275         ~this() nothrow @nogc
1276         {
1277             parent.deallocate(cast(void[]) _begin[0 .. _end - _begin]);
1278         }
1279 }
1280 
1281 // TODO: activate
1282 // @system unittest
1283 // {
1284 //     import std.experimental.allocator.mallocator : Mallocator;
1285 
1286 //     static void testAlloc(Allocator)(ref Allocator a, bool growDownwards)
1287 //     {
1288 //         import core.thread : ThreadGroup;
1289 //         import std.algorithm.sorting : sort;
1290 //         import core.internal.spinlock : SpinLock;
1291 
1292 //         SpinLock lock = SpinLock(SpinLock.Contention.brief);
1293 //         enum numThreads = 100;
1294 //         void[][numThreads] buf;
1295 //         size_t count = 0;
1296 
1297 //         void fun()
1298 //         {
1299 //             void[] b = a.allocate(63);
1300 //             assert(b.length == 63);
1301 
1302 //             lock.lock();
1303 //             buf[count] = b;
1304 //             count++;
1305 //             lock.unlock();
1306 //         }
1307 
1308 //         auto tg = new ThreadGroup;
1309 //         foreach (i; 0 .. numThreads)
1310 //         {
1311 //             tg.create(&fun);
1312 //         }
1313 //         tg.joinAll();
1314 
1315 //         sort!((a, b) => a.ptr < b.ptr)(buf[0 .. numThreads]);
1316 //         foreach (i; 0 .. numThreads - 1)
1317 //         {
1318 //             assert(buf[i].ptr + a.goodAllocSize(buf[i].length) == buf[i + 1].ptr);
1319 //         }
1320 
1321 //         assert(!a.deallocate(buf[1]));
1322 
1323 //         foreach (i; 0 .. numThreads)
1324 //         {
1325 //             if (!growDownwards)
1326 //                 assert(a.deallocate(buf[numThreads - 1 - i]));
1327 //             else
1328 //                 assert(a.deallocate(buf[i]));
1329 //         }
1330 //     }
1331 
1332 //     auto a1 = SharedRegion!(Mallocator, Mallocator.alignment,
1333 //                             Yes.growDownwards)(1024 * 64);
1334 
1335 //     auto a2 = SharedRegion!(Mallocator, Mallocator.alignment,
1336 //                             No.growDownwards)(1024 * 64);
1337 
1338 //     testAlloc(a1, true);
1339 //     testAlloc(a2, false);
1340 // }
1341 
1342 // TODO: activate
1343 // @system unittest
1344 // {
1345 //     import std.experimental.allocator.mallocator : Mallocator;
1346 
1347 //     static void testAlloc(Allocator)(ref Allocator a, bool growDownwards)
1348 //     {
1349 //         import core.thread : ThreadGroup;
1350 //         import std.algorithm.sorting : sort;
1351 //         import core.internal.spinlock : SpinLock;
1352 
1353 //         SpinLock lock = SpinLock(SpinLock.Contention.brief);
1354 //         enum numThreads = 100;
1355 //         void[][2 * numThreads] buf;
1356 //         size_t count = 0;
1357 
1358 //         void fun()
1359 //         {
1360 //             void[] b = a.allocate(63);
1361 //             assert(b.length == 63);
1362 
1363 //             lock.lock();
1364 //             buf[count] = b;
1365 //             count++;
1366 //             lock.unlock();
1367 
1368 //             b = a.alignedAllocate(63, 32);
1369 //             assert(b.length == 63);
1370 //             assert(cast(size_t) b.ptr % 32 == 0);
1371 
1372 //             lock.lock();
1373 //             buf[count] = b;
1374 //             count++;
1375 //             lock.unlock();
1376 //         }
1377 
1378 //         auto tg = new ThreadGroup;
1379 //         foreach (i; 0 .. numThreads)
1380 //         {
1381 //             tg.create(&fun);
1382 //         }
1383 //         tg.joinAll();
1384 
1385 //         sort!((a, b) => a.ptr < b.ptr)(buf[0 .. 2 * numThreads]);
1386 //         foreach (i; 0 .. 2 * numThreads - 1)
1387 //         {
1388 //             assert(buf[i].ptr + buf[i].length <= buf[i + 1].ptr);
1389 //         }
1390 
1391 //         assert(!a.deallocate(buf[1]));
1392 //     }
1393 
1394 //     auto a1 = SharedRegion!(Mallocator, Mallocator.alignment,
1395 //                             Yes.growDownwards)(1024 * 64);
1396 
1397 //     auto a2 = SharedRegion!(Mallocator, Mallocator.alignment,
1398 //                             No.growDownwards)(1024 * 64);
1399 
1400 //     testAlloc(a1, true);
1401 //     testAlloc(a2, false);
1402 // }