1/*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20/*
21    Admin.cpp
22    Automatic Garbage Collection
23    Copyright (c) 2004-2011 Apple Inc. All rights reserved.
24*/
25
26#include "Admin.h"
27#include "Bitmap.h"
28#include "Configuration.h"
29#include "Definitions.h"
30#include "Zone.h"
31#include "Locks.h"
32#include "BlockRef.h"
33#include <stdio.h>
34#include <sys/mman.h>
35
36namespace Auto {
37
38    //----- Admin -----//
39
40
41    //
42    // initialize
43    //
44    // Set up the admin for initial use.  Provided the data area used for the management tables, the quantum used
45    // in the area managed, whether the tables are growable and whether it grows from the other end of the data.
46    //
47    void Admin::initialize(Zone *zone, const usword_t quantum_log2, const usword_t layout) {
48        _zone = zone;
49        _quantum_log2 = quantum_log2;
50        _active_subzone = NULL;
51        _admin_lock = 0;
52        _freelist_search_cap = 0;
53        _layout = layout;
54    }
55
56
57    //
58    // unused_count
59    //
60    // Quanta not on free list (anymore).  We shrink the heap when we can & let
61    // allocations battle it out on the free lists first.
62    //
63    usword_t Admin::unused_count() {
64        return _active_subzone->allocation_limit() - _active_subzone->allocation_count();
65    }
66
67    //
68    // free_space()
69    //
70    // Sums the free lists.
71    //
72    usword_t Admin::free_space() {
73        SpinLock lock(&_admin_lock);
74        usword_t empty_space = 0;
75
76        for (usword_t m = 0; m < AllocationCache::cache_size; m++) {
77            for (FreeListNode *node = _cache[m].head(); node != NULL; node = node->next()) {
78                empty_space += node->size();
79            }
80        }
81
82        return empty_space;
83    }
84
85    // The smallest sized block of memory to pass to uncommit_memory().
86    const usword_t min_purgeable_size = Auto::page_size;
87
88    //
89    // visit_purgeable_nodes
90    //
91    // Visits all free list nodes that exceed 1 page in size, and purgeable subzones.
92    //
93    template <typename PurgeVisitor> void Admin::visit_purgeable_nodes(PurgeVisitor &visitor) {
94        const usword_t quantum_size = (1 << _quantum_log2);
95
96        // Visit medium sized free list nodes that are at > min_purgeable_size in size.
97        if (is_medium()) {
98            for (usword_t m = maximum_quanta, block_size = quantum_size * maximum_quanta; m > 0 && block_size > min_purgeable_size; --m, block_size -= quantum_size) {
99                for (FreeListNode *node = _cache[m].head(); node != NULL; node = node->next()) {
100                    if (!node->is_purged()) {
101                        visitor(node);
102                    }
103                }
104            }
105        }
106
107        // Visit 0th bucket nodes, which are all >= maximum_quanta * quantum_size.
108        for (FreeListNode *node = _cache[0].head(); node != NULL; node = node->next()) {
109            if (node->size() > min_purgeable_size && !node->is_purged()) {
110                visitor(node);
111            }
112        }
113
114        // Visit unused portions of subzones.
115        for (Subzone *subzone = _purgeable_subzones; subzone != NULL; subzone = subzone->nextSubzone()) {
116            if (!subzone->is_purged()) {
117                visitor(subzone);
118            }
119        }
120    }
121
122    //
123    // purgeable_free_space()
124    //
125    // Returns how much free list space could be recovered by purge_free_space().
126    //
127    usword_t Admin::purgeable_free_space() {
128        SpinLock lock(&_admin_lock);
129        return purgeable_free_space_no_lock();
130    }
131
132    struct PurgeableFreeSpaceVisitor {
133        usword_t purgeable_bytes;
134
135        PurgeableFreeSpaceVisitor() : purgeable_bytes(0) {}
136
137        // Sum sizes of medium sized free list nodes that are at least min_purgeable_size in size.
138        void operator() (FreeListNode *node) {
139            Range r(node->purgeable_range());
140            usword_t r_size = r.size();
141            if (r_size >= min_purgeable_size) {
142                purgeable_bytes += r_size;
143            }
144        }
145
146        void operator() (Subzone *subzone) {
147            Range r(subzone->purgeable_range());
148            usword_t r_size = r.size();
149            if (r_size >= min_purgeable_size) {
150                purgeable_bytes += r_size;
151            }
152        }
153    };
154
155    usword_t Admin::purgeable_free_space_no_lock() {
156        PurgeableFreeSpaceVisitor visitor;
157        visit_purgeable_nodes(visitor);
158        return visitor.purgeable_bytes;
159    }
160
161    //
162    // purge_free_space()
163    //
164    // Relinquish free space pages to the system where possible.
165    //
166    usword_t Admin::purge_free_space() {
167        SpinLock lock(&_admin_lock);
168        return purge_free_space_no_lock();
169    }
170
171    struct PurgingVisitor {
172        usword_t bytes_purged;
173        usword_t subzone_bytes_purged;
174
175        PurgingVisitor() : bytes_purged(0), subzone_bytes_purged(0) {}
176
177        // Purge medium sized free list nodes that are at least min_purgeable_size in size.
178        void operator() (FreeListNode *node) {
179            Range r(node->purgeable_range());
180            usword_t r_size = r.size();
181            if (r_size >= min_purgeable_size) {
182                madvise(r.address(), r_size, MADV_FREE_REUSABLE);
183                node->set_purged(true);
184                bytes_purged += r_size;
185            }
186        }
187
188        // Purge unused portions of subzones.
189        void operator() (Subzone *subzone) {
190            Range r(subzone->purgeable_range());
191            usword_t r_size = r.size();
192            if (r_size >= min_purgeable_size) {
193                madvise(r.address(), r_size, MADV_FREE_REUSABLE);
194                subzone->set_purged(true);
195                bytes_purged += r_size;
196                subzone_bytes_purged += r_size;
197            }
198        }
199    };
200
201    usword_t Admin::purge_free_space_no_lock() {
202        PurgingVisitor visitor;
203        visit_purgeable_nodes(visitor);
204        return visitor.bytes_purged;
205    }
206
207    // Before a free node is handed out, must make sure it is marked for reuse to the VM system.
208
209    static void reuse_node(FreeListNode *node) {
210        if (node->is_purged()) {
211            Range r(node->purgeable_range());
212            madvise(r.address(), r.size(), MADV_FREE_REUSE);
213            node->set_purged(false);
214        }
215    }
216
217    //
218    // empty_space()
219    //
220    // Returns the size of the holes.
221    //
222    usword_t Admin::empty_space() {
223        SpinLock lock(&_admin_lock);
224        usword_t empty_space = 0;
225
226        // iterate through each free list
227        for (FreeListNode *node = _cache[0].head(); node != NULL; node = node->next()) {
228            empty_space += node->size();
229        }
230
231        return empty_space;
232    }
233
234#if DEBUG
235    bool Admin::test_node_integrity(FreeListNode *node) {
236        bool node_is_valid = false;
237        const Range &coverage = _zone->coverage();
238        do {
239            // make sure the node is a plausible address.
240            if (!coverage.in_range(node)) break;
241
242            Subzone *subzone = Subzone::subzone((void *)node);
243
244            // get quantum number
245            usword_t q = subzone->quantum_index(node->address());
246
247            // make sure quantum number is in range
248            if (q >= subzone->allocation_limit()) break;
249
250            // make sure that address is exact quantum
251            if (subzone->quantum_address(q) != node->address()) break;
252
253            // make sure it is free
254            if (!subzone->is_free(q)) break;
255
256            // check plausibility of next and previous pointers.
257            FreeListNode *next = node->next();
258            if (next && !coverage.in_range(next)) break;
259            FreeListNode *prev = node->prev();
260            if (prev && !coverage.in_range(prev)) break;
261
262            // make sure of size redundancy
263            if (node->size() != node->size_again()) break;
264
265            node_is_valid = true;
266        } while (0);
267
268        if (!node_is_valid) {
269            static char buffer[256];
270            if (coverage.in_range(node)) {
271                snprintf(buffer, sizeof(buffer), "test_node_integrity:  FreeListNode %p { _prev = %p, _next = %p, _size = %lu } failed integrity check.\n",
272                         node, node->prev(), node->next(), node->size());
273            } else {
274                snprintf(buffer, sizeof(buffer), "test_node_integrity:  FreeListNode %p failed integrity check.\n", node);
275            }
276            auto_fatal("%s", buffer);
277        }
278
279        return node_is_valid;
280    }
281#else
282    bool Admin::test_node_integrity(FreeListNode *node) { return true; }
283#endif
284
285    //
286    // test_freelist_integrity
287    //
288    // Returns true if the free list seems to me okay.
289    //
290    bool Admin::test_freelist_integrity() {
291        SpinLock lock(&_admin_lock);
292
293        // iterate through each free list
294        for (usword_t m = 0; m < AllocationCache::cache_size; m++) {
295            // iterate through each free list
296            for (FreeListNode *node = _cache[m].head(), *prev_node = NULL; node; node = node->next()) {
297                Subzone *subzone = Subzone::subzone((void *)node);
298
299                // get quantum number
300                usword_t q = subzone->quantum_index(node->address());
301
302                // make sure quantum number is in range
303                if (q >= subzone->allocation_limit()) return false;
304
305                // make sure that address is exact quantum
306                if (subzone->quantum_address(q) != node->address()) return false;
307
308                // make sure it is free
309                if (subzone->is_used(q)) return false;
310
311                // make sure the previous pointer is accurate
312                if (node->prev() != prev_node) return false;
313
314                // make sure of size redundancy
315                if (node->size() != node->size_again()) return false;
316
317                // update previous for next round
318                prev_node = node;
319            }
320        }
321
322        return true;
323    }
324
325
326    //
327    // pop_node
328    //
329    // Pops a node from the specified FreeList. Also
330    // performs node consistency checks.
331    //
332    inline FreeListNode *Admin::pop_node(usword_t index) {
333        FreeListNode *node = _cache[index].pop();
334        if (node && test_node_integrity(node)) {
335            // bigger nodes can be "purged"
336            if (is_medium()) reuse_node(node);
337            return node;
338        }
339        return NULL;
340    }
341
342
343    //
344    // push_node
345    //
346    // Pushes a new node on to the specified FreeList.
347    // Also tracks the range of active FreeList entries.
348    //
349    inline void Admin::push_node(usword_t index, void *address, usword_t size) {
350        _cache[index].push(address, size);
351        if (index > _freelist_search_cap || _freelist_search_cap == 0)
352            _freelist_search_cap = index;
353    }
354
355    //
356    // insert_node
357    //
358    // Inserts a new node on to the specified FreeList (keeping it sorted).
359    // Also tracks the range of active FreeList entries.
360    //
361    inline void Admin::insert_node(usword_t index, void *address, usword_t size) {
362        _cache[index].insert(address, size);
363        if (index > _freelist_search_cap || _freelist_search_cap == 0)
364            _freelist_search_cap = index;
365    }
366
367    //
368    // append_node
369    //
370    // Appends a new node on to the tail of the specified FreeList.
371    // Also tracks the range of active FreeList entries.
372    //
373    void Admin::append_node(FreeListNode *node) {
374        usword_t index = cache_slot(node->size());
375        _cache[index].append(node);
376        if (index > _freelist_search_cap || _freelist_search_cap == 0)
377            _freelist_search_cap = index;
378    }
379
380
381    //
382    // mark_allocated
383    //
384    // Set tables with information for new allocation.
385    //
386    void Admin::mark_allocated(void *address, const usword_t n, const usword_t layout, const bool refcount_is_one, const bool is_local) {
387        Subzone *subzone = Subzone::subzone(address);
388        // always ZERO the first word before marking an object as allocated, to avoid a race with the scanner.
389        // TODO:  consider doing the bzero here, to keep the scanner from seeing stale pointers altogether.
390        // TODO:  for the medium admin, might want to release the lock during block clearing, and reaquiring
391        // before allocation.
392        // XXX Perhaps only by virtue of Intel's total memory order doees this work because
393        // the scanner/collector thread might have stale just-freed-link data in its L2/L3 cache
394        // and might see the new admin byte and the old data, e.g. this NULL isn't really guaranteed
395        // to go across to other processors.
396        *(void **)address = NULL;
397        subzone->allocate(subzone->quantum_index(address), n, layout, refcount_is_one, is_local);
398    }
399
400    //
401    // mark_cached
402    //
403    // Set tables with information for new allocation.
404    //
405    void Admin::mark_cached(Subzone *subzone, usword_t q, const usword_t n) {
406        // mark as thread local garbage while in the cache.
407        subzone->cache(q, n);
408    }
409
410    void Admin::mark_cached_range(void *address, usword_t n) {
411        Subzone *subzone = Subzone::subzone(address);
412        const usword_t maximum_block_size = maximum_quanta << _quantum_log2;
413        while (n >= maximum_quanta) {
414            subzone->cache(subzone->quantum_index(address), maximum_quanta);
415            address = displace(address, maximum_block_size);
416            n -= maximum_quanta;
417        }
418        if (n) subzone->cache(subzone->quantum_index(address), n);
419    }
420
421    // try to reuse a subzone from the purgeable list. only choose a subzone with enough space to make it worth reusing.
422    void Admin::activate_purged_subzone() {
423        ASSERTION(_active_subzone == NULL);
424        Subzone *subzone = _purgeable_subzones, *prev_subzone = NULL;
425        while (subzone != NULL) {
426            usword_t top = subzone->allocation_count();
427            usword_t unused = subzone->allocation_limit() - top;
428            if (unused > AllocationCache::cache_size) {
429                if (prev_subzone)
430                    prev_subzone->setNextSubzone(subzone->nextSubzone());
431                else
432                    _purgeable_subzones = subzone->nextSubzone();
433                subzone->setNextSubzone(NULL);
434                subzone->set_purgeable(false);
435                _active_subzone = subzone;
436                if (subzone->is_purged()) {
437                    Range r(subzone->purgeable_range());
438                    madvise(r.address(), r.size(), MADV_FREE_REUSE);
439                    subzone->set_purged(false);
440                }
441                break;
442            }
443            prev_subzone = subzone;
444            subzone = subzone->nextSubzone();
445        }
446    }
447
448    inline void *Admin::find_allocation_no_lock(usword_t n) {
449        // Fast case, exact fit from the free list. the free list will contain guarded blocks.
450        FreeListNode *node = pop_node(n);
451        if (node) {
452            return node->address();
453        }
454
455        if (Environment::guard_pages) {
456            if (_active_subzone == NULL) return NULL;
457
458            // allocate directly from the subzone, since there will be no meaningful coalescing.
459            const usword_t quantum_size = (1 << _quantum_log2);
460            Subzone *subzone = _active_subzone;
461            usword_t first_quantum = subzone->allocation_count(), last_quantum = subzone->allocation_limit();
462            usword_t available_size = (last_quantum - first_quantum) * quantum_size;
463
464            void *slop_address = subzone->quantum_address(first_quantum);
465            void *guard_address = align_up(slop_address);
466            usword_t block_size = n << _quantum_log2;
467            usword_t slop_size = (uintptr_t)guard_address - (uintptr_t)slop_address;
468            while (block_size > slop_size) {
469                guard_address = displace(guard_address, page_size);
470                slop_size += page_size;
471            }
472            void *end_address = displace(guard_address, page_size);
473            usword_t total_size = (uintptr_t)end_address - (uintptr_t)slop_address;
474            // make sure this allocation will actually fit.
475            if (available_size < total_size) {
476                // this subzone is "full", add another.
477                // TODO:  look at free list again, and steal slop away from free blocks. NMOS.
478                set_active_subzone(NULL);
479                return NULL;
480            }
481            // account for the number of quanta we're allocating.
482            subzone->raise_allocation_count(total_size >> _quantum_log2);
483            // permanently allocate the slop (1 quantum at a time for simplicity FIXME later).
484            usword_t slop_count = ((slop_size - block_size) >> _quantum_log2);
485            if (slop_count) mark_cached_range(slop_address, slop_count);
486            // protect the guard page.
487            guard_page(guard_address);
488            // also cache the guard page itself.
489            mark_cached_range(guard_address, (page_size >> _quantum_log2));
490            // check to see if there's still enough space in the subzone.
491            usword_t remainder_size = available_size - total_size;
492            if (remainder_size < (2 * page_size)) {
493                // we need another subzone.
494                set_active_subzone(NULL);
495            }
496            return displace(guard_address, -block_size);
497        }
498
499        // Find bigger block to use, then chop off remainder as appropriate
500        void *address = NULL;
501
502        // if no block, iterate up through sizes greater than n (best fit)
503        for (usword_t i = n + 1; node == NULL && i <= _freelist_search_cap; i++) {
504            node = pop_node(i);
505        }
506
507        // Grab a free block from the big chunk free list
508        if (!node) {
509            node = pop_node(0);
510            if (_freelist_search_cap >= n)
511               _freelist_search_cap = n-1; // nothing on the free lists size n or more
512        }
513
514        if (node) {
515            // Got one.  Now return extra to free list.
516
517            // get the address of the free block
518            address = node->address();
519
520            // get the full size of the allocation
521            Subzone *subzone = Subzone::subzone(address);
522            usword_t allocation_size = subzone->quantum_size(n);
523
524            // see what's left over
525            ASSERTION(node->size() >= allocation_size);
526            usword_t remainder_size = node->size() - allocation_size;
527
528            // if there is some left over
529            if (remainder_size) {
530                // determine the address of the remainder
531                void *remainder_address = displace(address, allocation_size);
532                // figure out which cache slot it should go
533                usword_t m = cache_slot(remainder_size);
534                // push the remainder onto the free list
535                push_node(m, remainder_address, remainder_size);
536            }
537        } else if (_active_subzone) {
538            // See if we can get a free block from unused territory
539            usword_t top = _active_subzone->allocation_count();
540            usword_t unused = _active_subzone->allocation_limit() - top;
541
542            ASSERTION(unused >= n);
543            address = _active_subzone->quantum_address(top);
544            *(void **)address = NULL;
545            _active_subzone->raise_allocation_count(n);
546
547            // if remainder fits on non-0 free list, put it there now.  That way we're guaranteed
548            // to be able to satisfy any request.
549            unused -= n;
550            if (unused == 0) {
551                set_active_subzone(NULL);
552            } else if (unused < AllocationCache::cache_size) {
553                push_node(unused, _active_subzone->quantum_address(top+n), _active_subzone->quantum_size(unused));
554                _active_subzone->raise_allocation_count(unused);
555                set_active_subzone(NULL);
556            }
557
558            // try to reuse a subzone from the purgeable list. only choose a subzone with enough space to make it worth reusing.
559            if (_active_subzone == NULL) {
560                activate_purged_subzone();
561            }
562        } else {
563            return NULL;
564        }
565
566        return address;
567    }
568
569    unsigned Admin::batch_allocate_from_freelist_slot_no_lock(usword_t cache_index, usword_t requested_size, const bool clear, void **results, unsigned num_requested) {
570        unsigned count = 0;
571        FreeListNode *node;
572        assert(num_requested > 0);
573
574        do {
575            node = pop_node(cache_index);
576            if (node) {
577                void *addr = node->address();
578                usword_t node_size = node->size();
579
580                // calculate how many blocks we can allocate from this node
581                usword_t alloc_count = node_size / requested_size;
582
583                // cap alloc_count to the number requested
584                if (alloc_count > num_requested) alloc_count = num_requested;
585
586                // zero the allocated blocks
587                usword_t alloc_size = alloc_count * requested_size;
588                if (clear)
589                    bzero(addr, alloc_size);
590
591                // calculate the block pointers and mark them allocated
592                for (usword_t i=0; i<alloc_count; i++) {
593                    results[count++] = addr;
594                    addr = displace(addr, requested_size);
595                }
596
597                // if any remainder, put it back on the appropriate free list
598                if (node_size > alloc_size) {
599                    usword_t remainder_size = node_size - alloc_size;
600                    // figure out which cache slot it should go
601                    usword_t m = cache_slot(remainder_size);
602                    // push the remainder onto the free list
603                    push_node(m, addr, remainder_size);
604                }
605
606                num_requested -= alloc_count;
607            }
608        } while (node && num_requested);
609        return count;
610    }
611
612    unsigned Admin::batch_allocate_from_subzone_no_lock(Subzone *subzone, size_t size, const bool clear, void **results, unsigned num_requested) {
613        // See if we can get a blocks from unused territory
614        usword_t top = subzone->allocation_count();
615        usword_t unused = subzone->allocation_limit() - top;
616        usword_t quantum_size = quantum_count(size);
617        usword_t available = unused / quantum_size;
618        unsigned count = 0;
619
620        if (available > 0) {
621            if (available > num_requested)
622                available = num_requested;
623            void *address = subzone->quantum_address(top);
624            do {
625                results[count++] = address;
626                address = displace(address, size);
627            } while (count < available);
628            if (clear)
629                bzero(results[0], count * size);
630            subzone->raise_allocation_count(quantum_size*count);
631            // if remainder fits on non-0 free list, put it there.
632            unused -= quantum_size * count;
633            if ((unused > 0) && (unused < AllocationCache::cache_size)) {
634                push_node(unused, address, subzone->quantum_size(unused));
635                subzone->raise_allocation_count(unused);
636                unused = 0;
637            }
638            if (unused == 0 && subzone == _active_subzone) {
639                set_active_subzone(NULL);
640                activate_purged_subzone();
641            }
642        }
643        return count;
644    }
645
646    unsigned Admin::batch_allocate(Thread &thread, size_t &size, const usword_t layout, const bool refcount_is_one, const bool clear, void **results, unsigned num_requested) {
647        // if AUTO_USE_GUARDS is on, always take the slow path.
648        if (Environment::guard_pages) return 0;
649        usword_t n = quantum_count(size);
650        size = (n << _quantum_log2);
651        SpinLock lock(&_admin_lock);
652
653        unsigned count = 0;
654
655        // we might try to reduce fragmentation by checking for exact multiple free list slots first
656        // we could also try to improve locality by using larger block sizes first
657        // but for now we just use the same strategy as the non-batch allocator
658        for (usword_t cache_index = n; cache_index < AllocationCache::cache_size && count < num_requested; ++cache_index) {
659            count += batch_allocate_from_freelist_slot_no_lock(cache_index, size, clear, &results[count], num_requested - count);
660        }
661
662        // if we still don't have enough, try the big chunks list
663        if (count < num_requested) {
664            count += batch_allocate_from_freelist_slot_no_lock(0, size, clear, &results[count], num_requested - count);
665        }
666
667        // try purged memory
668        if (count < num_requested) {
669            if (!_active_subzone)
670                activate_purged_subzone();
671            while (count < num_requested && _active_subzone) {
672                count += batch_allocate_from_subzone_no_lock(_active_subzone, size, clear, &results[count], num_requested - count);
673                if (!_active_subzone)
674                    activate_purged_subzone();
675            }
676        }
677
678        // mark all the blocks allocated, and enliven
679        UnconditionalBarrier barrier(thread.needs_enlivening());
680        for (usword_t i=0; i<count; i++) {
681            mark_allocated(results[i], n, layout, refcount_is_one, false);
682            if (barrier) SubzoneBlockRef(results[i]).enliven();
683        }
684        _zone->set_write_barrier_range(results, count * sizeof(void *));
685        _zone->add_blocks_and_bytes(count, count * size);
686        return count;
687    }
688
689    void *Admin::thread_cache_allocate(Thread &thread, usword_t &size, const usword_t layout, const bool refcount_is_one, bool &is_local) {
690        // size is inout.  on input it is the requested size.
691        usword_t n = partition2(size, allocate_quantum_small_log2);
692        FreeList &list = thread.allocation_cache(layout)[n];
693        if (!list.head()) {
694            // per thread-cache queue is empty
695            // amortize cost of admin lock by finding several
696            SpinLock lock(&_admin_lock);
697            usword_t node_size = (n << allocate_quantum_small_log2);
698            ConditionBarrier barrier(thread.needs_enlivening());
699            int alloc_count;
700            for (alloc_count = 0; alloc_count < cached_list_node_initial_count; ++alloc_count) {
701                void *node = find_allocation_no_lock(n);
702                if (!node) break;    // ran out
703                Subzone *subzone = Subzone::subzone(node);
704                usword_t q = subzone->quantum_index_unchecked(node);
705                mark_cached(subzone, q, n);
706                // skip write-barrier since nodes are allocated refcount==1
707                list.push(node, node_size);
708                if (barrier) SubzoneBlockRef(subzone, q).enliven();
709            }
710            zone()->add_blocks_and_bytes(alloc_count, alloc_count * (1L << allocate_quantum_small_log2) * n);
711        }
712        // grab first node off the cache "line"
713        void *address = list.pop()->address();
714        if (!address) return NULL;
715        size = n << allocate_quantum_small_log2;   // return actual size (out param)
716        // mark with requested layout and refcount
717        // XXX only have to write the first byte - size is already OKAY
718        if (refcount_is_one || !Environment::thread_collections) {
719            // mark as a global (heap) object
720            mark_allocated(address, n, layout, refcount_is_one, false);
721            is_local = false;
722        }
723        else {
724            // thread local allocation
725            mark_allocated(address, n, layout, false, true);
726            thread.add_local_allocation(address);
727            is_local = true;
728        }
729
730        return address;
731    }
732
733    //
734    // find_allocation
735    //
736    // Find the next available quanta for the allocation.  Returns NULL if none found.
737    // Allocate otherwise.
738    //
739    void *Admin::find_allocation(Thread &thread, usword_t &size, const usword_t layout, const bool refcount_is_one, bool &is_local) {
740
741        SpinLock lock(&_admin_lock);
742
743        // determine the number of quantum we needed
744        usword_t n = quantum_count(size);
745        ASSERTION(n < AllocationCache::cache_size);
746        void *address = find_allocation_no_lock(n);
747
748        if (address) {
749            // mark as allocated
750            size = n << _quantum_log2;
751            ConditionBarrier barrier(thread.needs_enlivening());
752            if (refcount_is_one || !Environment::thread_collections) {
753                // mark as a global (heap) object
754                mark_allocated(address, n, layout, refcount_is_one, false);
755                is_local = false;
756            } else {
757                // thread local allocation
758                mark_allocated(address, n, layout, false, true);
759                thread.add_local_allocation(address);
760                is_local = true;
761            }
762            if (barrier) SubzoneBlockRef(address).enliven();
763            zone()->add_blocks_and_bytes(1, (1L << _quantum_log2) * n);
764        }
765        return address;
766    }
767
768    //
769    // lowest_available_list
770    //
771    // Searches the heads of all free lists for a node with size >= n, and returns the list with the lowest head.
772    //
773    FreeList *Admin::lowest_available_list(usword_t n) {
774        // start with bucket 0, which should always contain some free space.
775        FreeList *candidate_list = &_cache[0];
776        FreeListNode *candidate_node = candidate_list->head();
777
778        for (usword_t i = n; i <= _freelist_search_cap; i++) {
779            FreeListNode *node = _cache[i].head();
780            if (node != NULL && (candidate_node == NULL || node->address() < candidate_node->address())) {
781                candidate_list = &_cache[i];
782                candidate_node = node;
783            }
784        }
785
786        return candidate_list;
787    }
788
789    //
790    // allocate_lower_block_no_lock
791    //
792    // Allocates a block of the same size and layout of the block identified by (subzone, q, block_address), at a lower heap address.
793    // If no lower block can be found, returns block_address.
794    //
795    void *Admin::allocate_lower_block_no_lock(Subzone *subzone, usword_t q, void *block_address) {
796        // determine the number of quantum we needed
797        const usword_t size = subzone->size(q);
798        const unsigned layout = subzone->layout(q);
799        usword_t n = quantum_count(size);
800        ASSERTION(n < AllocationCache::cache_size);
801
802        // check to see if we'll be able to lower the block's address.
803        FreeList *list = lowest_available_list(n);
804        FreeListNode *node = list->head();
805        if (node == NULL || node->address() > block_address) {
806            return block_address;
807        }
808        if (is_medium()) reuse_node(node);
809        list->pop();
810        void *address = node->address();
811
812        // see if there is any room left over in this node.
813        ASSERTION(node->size() >= size);
814        usword_t remainder_size = node->size() - size;
815        if (remainder_size) {
816            // determine the address of the remainder
817            void *remainder_address = displace(address, size);
818            // figure out which cache slot it should go
819            usword_t m = cache_slot(remainder_size);
820            // insert the remainder onto the free list
821            insert_node(m, remainder_address, remainder_size);
822        }
823
824        // mark as allocated.
825        Subzone *copySubzone = Subzone::subzone(address);
826        usword_t copyQ = copySubzone->quantum_index_unchecked(address);
827        copySubzone->allocate(copyQ, n, layout, false, false);
828
829        return address;
830    }
831
832    //
833    // allocate_different_block_no_lock
834    //
835    // Allocates a block of the same size and layout of the block identified by (subzone, q, block_address), at a different heap address.
836    // If no other block can be found, returns block_address. Used by AUTO_COMPACTION_SCRAMBLE mode.
837    //
838    void *Admin::allocate_different_block_no_lock(Subzone *subzone, usword_t q, void *block_address) {
839        // determine the number of quantum we needed
840        const usword_t size = subzone->size(q);
841        const unsigned layout = subzone->layout(q);
842        usword_t n = quantum_count(size);
843        ASSERTION(n < AllocationCache::cache_size);
844        void *address = find_allocation_no_lock(n);
845
846        if (address) {
847            // mark as allocated
848            Subzone *copySubzone = Subzone::subzone(address);
849            usword_t copyQ = copySubzone->quantum_index_unchecked(address);
850            copySubzone->allocate(copyQ, n, layout, false, false);
851
852            return address;
853        }
854
855        // couldn't find a block to allocate, simply return the original block address.
856        return block_address;
857    }
858
859    //
860    // allocate_destination_block_no_lock
861    //
862    // Allocates a block of the same size and layout of the block identified by (subzone, q, block_address), at a different heap address.
863    // Calls either allocate_lower_block_no_lock or allocate_different_block_no_lock, depending on the compaction mode.
864    //
865    void *Admin::allocate_destination_block_no_lock(Subzone *subzone, usword_t q, void *block_address) {
866        static void *(Admin::*block_allocator)(Subzone *subzone, usword_t q, void *block_address) = (Environment::scramble_heap ? &Admin::allocate_different_block_no_lock : &Admin::allocate_lower_block_no_lock);
867        return (this->*block_allocator)(subzone, q, block_address);
868    }
869
870    //
871    // deallocate
872    //
873    // Clear tables of information after deallocation.
874    //
875    void Admin::deallocate(Subzone *subzone, usword_t q, void *address) {
876        SpinLock lock(&_admin_lock);
877        deallocate_no_lock(subzone, q, address);
878    }
879
880    //
881    // deallocate_no_lock
882    //
883    // Clear tables of information after deallocation.  Assumes _admin_lock held.
884    //
885    void Admin::deallocate_no_lock(Subzone *subzone, usword_t q, void *address) {
886        AUTO_PROBE(auto_probe_admin_deallocate(address));
887        usword_t n = subzone->length(q);
888
889        // detect double-frees.
890        ASSERTION(!subzone->is_free(q));
891        if (subzone->is_free(q)) {
892            malloc_printf("Admin::deallocate:  attempting to free already freed block %p\n", address);
893            return;
894        }
895
896        // assume that just this block is free
897        void *free_address = address;
898        usword_t free_size = subzone->quantum_size(n);
899
900        // coalescing seems detrimental to deallocation time, but it improves memory utilization.
901        // determine next block
902        usword_t next_q = q + n;
903        usword_t highwater = subzone->allocation_count();
904
905        // don't bother with coalescing when using guard pages.
906        if (!Environment::guard_pages) {
907            // if not past end of in use bits and the quantum is not in use
908            if (next_q < highwater && subzone->is_free(next_q)) {
909                // get the free block following in memory
910                FreeListNode *next_node = (FreeListNode *)displace(free_address, free_size);
911                if (test_node_integrity(next_node)) {
912                    // before coalescing, mark node as potentially reused.
913                    if (is_medium()) reuse_node(next_node);
914                    // determine it's size
915                    usword_t next_size = next_node->size();
916                    // which cache slot is it in
917                    usword_t m = cache_slot(next_size);
918                    // remove it from the free list
919                    _cache[m].remove(next_node);
920                    // add space to current free block
921                    free_size += next_size;
922                }
923            }
924
925            // check to see if prior quantum is free
926            if (q && subzone->is_free(q - 1)) {
927                // determine the prior free node
928                FreeListNode *this_node = (FreeListNode *)address;
929                FreeListNode *prev_node = this_node->prior_node();
930                if (test_node_integrity(prev_node)) {
931                    // before coalescing, mark node as potentially reused.
932                    if (is_medium()) reuse_node(prev_node);
933                    // update the current free address to use the prior node address
934                    free_address = prev_node->address();
935                    // get the prior's size
936                    usword_t prev_size = prev_node->size();
937                    // add space to current free block
938                    free_size += prev_size;
939                    // which cache slot is the prior free block in
940                    usword_t m = cache_slot(prev_size);
941                     // remove it from the free list
942                    _cache[m].remove(prev_node);
943                }
944            }
945        }
946
947        // scribble on blocks as they are deleted.
948        if (Environment::dirty_all_deleted) {
949            memset(free_address, 0x55, free_size);
950        }
951
952        // If the block we're freeing is at the end of the subzone, lower the allocation count and add the
953        // subzone to a list of subzones known to have free space at the end. The subzones are reused if the
954        // active subzone is exhausted, and there is enough free space to satisfy any requested size.
955        // When the system indicates there is memory pressure, purge_free_space() is called which checks to
956        // see if there are zones in the purgeable list.
957
958        if (next_q == highwater) {
959            subzone->lower_allocation_count(quantum_count(free_size));
960            if (subzone != _active_subzone && !subzone->is_purgeable()) {
961                // make this subzone eligible for purging in purge_free_space().
962                subzone->setNextSubzone(_purgeable_subzones);
963                _purgeable_subzones = subzone;
964                subzone->set_purgeable(true);
965            }
966        } else {
967            // determine which free list the free space should go upon
968            usword_t m = cache_slot(free_size);
969            // add free space to free lists
970            push_node(m, free_address, free_size);
971        }
972
973        // clear side data
974        subzone->deallocate(q, n);
975    }
976
977};
978