1/*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20/*
21    Large.cpp
22    Large Block Support
23    Copyright (c) 2004-2011 Apple Inc. All rights reserved.
24 */
25
26#include "Large.h"
27#include "Subzone.h"
28#include "Zone.h"
29
30
31namespace Auto {
32
33    //----- Large -----//
34
35    /*
36     The in_large bitmap tracks the starting quatum of large allocations.
37     In 32-bit systems the quanta size is 64K; on 64 it is 128K.
38     We allocate the # of pages needed to satisfy the request so as to leave
39     small holes in the address space.  We allocate on a quantum boundary always.
40
41     At the quantum boundary we establish the "Large" data structure.
42     Rounding up to the next small_quanta boundary we provide that as the address
43     of the allocation.  It is a constant.
44     Only pointers that have the in_large bit set and that have an address of that rounded up
45     small quanta size are in fact our pointers.
46
47     Beyond the requested size (also rounded) are the write-barrier cards.
48    */
49
50    Large::Large(Zone *zone, usword_t vm_size, usword_t size, usword_t layout, usword_t refcount, usword_t age, const WriteBarrier &wb)
51        : _prev(NULL), _next(NULL), _zone(zone), _vm_size(vm_size), _size(size), _layout(layout), _refcount(refcount), _age(age),
52          _is_pending(false), _is_marked(false), _is_garbage(false), _checking_count(0), _write_barrier(wb)
53    {
54    }
55
56    //
57    // allocate
58    //
59    // Allocate memory used for the large block.
60    //
61    Large *Large::allocate(Zone *zone, const usword_t size, usword_t layout, bool refcount_is_one) {
62        // determine the size of the block header
63        usword_t header_size = side_data_size();
64
65        // determine memory needed for allocation, guarantee minimum quantum alignment
66        usword_t allocation_size = align2(size, allocate_quantum_small_log2);
67
68        // determine the extra space to allocate for a guard page.
69        usword_t guard_size = 0;
70        if (Environment::guard_pages) {
71            // allocate enough extra space to page-align the guard page.
72            usword_t slop_size = align2(header_size + allocation_size, page_size_log2) - (header_size + allocation_size);
73            guard_size = slop_size + page_size;
74        }
75
76        // determine memory for the write barrier, guarantee minimum quantum alignment
77        usword_t wb_size = (layout & AUTO_UNSCANNED) ? 0 : align2(WriteBarrier::bytes_needed(allocation_size), allocate_quantum_small_log2);
78
79        // determine total allocation
80        usword_t vm_size = align2(header_size + allocation_size + guard_size + wb_size, page_size_log2);
81
82        // allocate memory, construct with placement new.
83        void *space = zone->arena_allocate_large(vm_size);
84        if (!space) return NULL;
85
86        if (Environment::guard_pages) {
87            // protect the guard page
88            void *guard_address = align_up(displace(space, header_size + allocation_size));
89            guard_page(guard_address);
90        }
91
92        // construct the WriteBarrier here, to simplify the Large constructor.
93        void *wb_base = wb_size ? displace(space, side_data_size()) : NULL; // start of area managed by the WriteBarrier itself.
94        unsigned char* wb_cards = wb_size ? (unsigned char *)displace(space, header_size + allocation_size + guard_size) : NULL;
95        WriteBarrier wb(wb_base, wb_cards, wb_size);
96        return new (space) Large(zone, vm_size, allocation_size, layout, refcount_is_one ? 1 : 0, initial_age, wb);
97    }
98
99
100    //
101    // deallocate
102    //
103    // Release memory used by the large block.
104    //
105    void Large::deallocate(Zone *zone) {
106        if (Environment::guard_pages) {
107            // unprotect the guard page.
108            usword_t header_size = side_data_size();
109            void *guard_address = align_up(displace((void *)this, header_size + _size));
110            unguard_page(guard_address);
111        }
112
113        // release large data
114        zone->arena_deallocate((void *)this, _vm_size);
115    }
116};
117