1251876Speter/* Licensed to the Apache Software Foundation (ASF) under one or more
2251876Speter * contributor license agreements.  See the NOTICE file distributed with
3251876Speter * this work for additional information regarding copyright ownership.
4251876Speter * The ASF licenses this file to You under the Apache License, Version 2.0
5251876Speter * (the "License"); you may not use this file except in compliance with
6251876Speter * the License.  You may obtain a copy of the License at
7251876Speter *
8251876Speter *     http://www.apache.org/licenses/LICENSE-2.0
9251876Speter *
10251876Speter * Unless required by applicable law or agreed to in writing, software
11251876Speter * distributed under the License is distributed on an "AS IS" BASIS,
12251876Speter * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13251876Speter * See the License for the specific language governing permissions and
14251876Speter * limitations under the License.
15251876Speter */
16251876Speter
17251876Speter#include "apr_general.h"
18251876Speter#include "apr_rmm.h"
19251876Speter#include "apr_errno.h"
20251876Speter#include "apr_lib.h"
21251876Speter#include "apr_strings.h"
22251876Speter
23251876Speter/* The RMM region is made up of two doubly-linked-list of blocks; the
24251876Speter * list of used blocks, and the list of free blocks (either list may
25251876Speter * be empty).  The base pointer, rmm->base, points at the beginning of
26251876Speter * the shmem region in use.  Each block is addressable by an
27251876Speter * apr_rmm_off_t value, which represents the offset from the base
28251876Speter * pointer.  The term "address" is used here to mean such a value; an
29251876Speter * "offset from rmm->base".
30251876Speter *
31251876Speter * The RMM region contains exactly one "rmm_hdr_block_t" structure,
32251876Speter * the "header block", which is always stored at the base pointer.
33251876Speter * The firstused field in this structure is the address of the first
34251876Speter * block in the "used blocks" list; the firstfree field is the address
35251876Speter * of the first block in the "free blocks" list.
36251876Speter *
37251876Speter * Each block is prefixed by an "rmm_block_t" structure, followed by
38251876Speter * the caller-usable region represented by the block.  The next and
39251876Speter * prev fields of the structure are zero if the block is at the end or
40251876Speter * beginning of the linked-list respectively, or otherwise hold the
41251876Speter * address of the next and previous blocks in the list.  ("address 0",
42251876Speter * i.e. rmm->base is *not* a valid address for a block, since the
43251876Speter * header block is always stored at that address).
44251876Speter *
45251876Speter * At creation, the RMM region is initialized to hold a single block
46251876Speter * on the free list representing the entire available shm segment
47251876Speter * (minus header block); subsequent allocation and deallocation of
48251876Speter * blocks involves splitting blocks and coalescing adjacent blocks,
49251876Speter * and switching them between the free and used lists as
50251876Speter * appropriate. */
51251876Speter
52251876Spetertypedef struct rmm_block_t {
53251876Speter    apr_size_t size;
54251876Speter    apr_rmm_off_t prev;
55251876Speter    apr_rmm_off_t next;
56251876Speter} rmm_block_t;
57251876Speter
58251876Speter/* Always at our apr_rmm_off(0):
59251876Speter */
60251876Spetertypedef struct rmm_hdr_block_t {
61251876Speter    apr_size_t abssize;
62251876Speter    apr_rmm_off_t /* rmm_block_t */ firstused;
63251876Speter    apr_rmm_off_t /* rmm_block_t */ firstfree;
64251876Speter} rmm_hdr_block_t;
65251876Speter
66251876Speter#define RMM_HDR_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_hdr_block_t)))
67251876Speter#define RMM_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_block_t)))
68251876Speter
69251876Speterstruct apr_rmm_t {
70251876Speter    apr_pool_t *p;
71251876Speter    rmm_hdr_block_t *base;
72251876Speter    apr_size_t size;
73251876Speter    apr_anylock_t lock;
74251876Speter};
75251876Speter
76251876Speterstatic apr_rmm_off_t find_block_by_offset(apr_rmm_t *rmm, apr_rmm_off_t next,
77251876Speter                                          apr_rmm_off_t find, int includes)
78251876Speter{
79251876Speter    apr_rmm_off_t prev = 0;
80251876Speter
81251876Speter    while (next) {
82251876Speter        struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
83251876Speter
84251876Speter        if (find == next)
85251876Speter            return next;
86251876Speter
87251876Speter        /* Overshot? */
88251876Speter        if (find < next)
89251876Speter            return includes ? prev : 0;
90251876Speter
91251876Speter        prev = next;
92251876Speter        next = blk->next;
93251876Speter    }
94251876Speter    return includes ? prev : 0;
95251876Speter}
96251876Speter
97251876Speterstatic apr_rmm_off_t find_block_of_size(apr_rmm_t *rmm, apr_size_t size)
98251876Speter{
99251876Speter    apr_rmm_off_t next = rmm->base->firstfree;
100251876Speter    apr_rmm_off_t best = 0;
101251876Speter    apr_rmm_off_t bestsize = 0;
102251876Speter
103251876Speter    while (next) {
104251876Speter        struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
105251876Speter
106251876Speter        if (blk->size == size)
107251876Speter            return next;
108251876Speter
109251876Speter        if (blk->size >= size) {
110251876Speter            /* XXX: sub optimal algorithm
111251876Speter             * We need the most thorough best-fit logic, since we can
112251876Speter             * never grow our rmm, we are SOL when we hit the wall.
113251876Speter             */
114251876Speter            if (!bestsize || (blk->size < bestsize)) {
115251876Speter                bestsize = blk->size;
116251876Speter                best = next;
117251876Speter            }
118251876Speter        }
119251876Speter
120251876Speter        next = blk->next;
121251876Speter    }
122251876Speter
123251876Speter    if (bestsize > RMM_BLOCK_SIZE + size) {
124251876Speter        struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + best);
125251876Speter        struct rmm_block_t *new = (rmm_block_t*)((char*)rmm->base + best + size);
126251876Speter
127251876Speter        new->size = blk->size - size;
128251876Speter        new->next = blk->next;
129251876Speter        new->prev = best;
130251876Speter
131251876Speter        blk->size = size;
132251876Speter        blk->next = best + size;
133251876Speter
134251876Speter        if (new->next) {
135251876Speter            blk = (rmm_block_t*)((char*)rmm->base + new->next);
136251876Speter            blk->prev = best + size;
137251876Speter        }
138251876Speter    }
139251876Speter
140251876Speter    return best;
141251876Speter}
142251876Speter
143251876Speterstatic void move_block(apr_rmm_t *rmm, apr_rmm_off_t this, int free)
144251876Speter{
145251876Speter    struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + this);
146251876Speter
147251876Speter    /* close the gap */
148251876Speter    if (blk->prev) {
149251876Speter        struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
150251876Speter        prev->next = blk->next;
151251876Speter    }
152251876Speter    else {
153251876Speter        if (free) {
154251876Speter            rmm->base->firstused = blk->next;
155251876Speter        }
156251876Speter        else {
157251876Speter            rmm->base->firstfree = blk->next;
158251876Speter        }
159251876Speter    }
160251876Speter    if (blk->next) {
161251876Speter        struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
162251876Speter        next->prev = blk->prev;
163251876Speter    }
164251876Speter
165251876Speter    /* now find it in the other list, pushing it to the head if required */
166251876Speter    if (free) {
167251876Speter        blk->prev = find_block_by_offset(rmm, rmm->base->firstfree, this, 1);
168251876Speter        if (!blk->prev) {
169251876Speter            blk->next = rmm->base->firstfree;
170251876Speter            rmm->base->firstfree = this;
171251876Speter        }
172251876Speter    }
173251876Speter    else {
174251876Speter        blk->prev = find_block_by_offset(rmm, rmm->base->firstused, this, 1);
175251876Speter        if (!blk->prev) {
176251876Speter            blk->next = rmm->base->firstused;
177251876Speter            rmm->base->firstused = this;
178251876Speter        }
179251876Speter    }
180251876Speter
181251876Speter    /* and open it up */
182251876Speter    if (blk->prev) {
183251876Speter        struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
184251876Speter        if (free && (blk->prev + prev->size == this)) {
185251876Speter            /* Collapse us into our predecessor */
186251876Speter            prev->size += blk->size;
187251876Speter            this = blk->prev;
188251876Speter            blk = prev;
189251876Speter        }
190251876Speter        else {
191251876Speter            blk->next = prev->next;
192251876Speter            prev->next = this;
193251876Speter        }
194251876Speter    }
195251876Speter
196251876Speter    if (blk->next) {
197251876Speter        struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
198251876Speter        if (free && (this + blk->size == blk->next)) {
199251876Speter            /* Collapse us into our successor */
200251876Speter            blk->size += next->size;
201251876Speter            blk->next = next->next;
202251876Speter            if (blk->next) {
203251876Speter                next = (rmm_block_t*)((char*)rmm->base + blk->next);
204251876Speter                next->prev = this;
205251876Speter            }
206251876Speter        }
207251876Speter        else {
208251876Speter            next->prev = this;
209251876Speter        }
210251876Speter    }
211251876Speter}
212251876Speter
213251876SpeterAPU_DECLARE(apr_status_t) apr_rmm_init(apr_rmm_t **rmm, apr_anylock_t *lock,
214251876Speter                                       void *base, apr_size_t size,
215251876Speter                                       apr_pool_t *p)
216251876Speter{
217251876Speter    apr_status_t rv;
218251876Speter    rmm_block_t *blk;
219251876Speter    apr_anylock_t nulllock;
220251876Speter
221251876Speter    if (!lock) {
222251876Speter        nulllock.type = apr_anylock_none;
223251876Speter        nulllock.lock.pm = NULL;
224251876Speter        lock = &nulllock;
225251876Speter    }
226251876Speter    if ((rv = APR_ANYLOCK_LOCK(lock)) != APR_SUCCESS)
227251876Speter        return rv;
228251876Speter
229251876Speter    (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
230251876Speter    (*rmm)->p = p;
231251876Speter    (*rmm)->base = base;
232251876Speter    (*rmm)->size = size;
233251876Speter    (*rmm)->lock = *lock;
234251876Speter
235251876Speter    (*rmm)->base->abssize = size;
236251876Speter    (*rmm)->base->firstused = 0;
237251876Speter    (*rmm)->base->firstfree = RMM_HDR_BLOCK_SIZE;
238251876Speter
239251876Speter    blk = (rmm_block_t *)((char*)base + (*rmm)->base->firstfree);
240251876Speter
241251876Speter    blk->size = size - (*rmm)->base->firstfree;
242251876Speter    blk->prev = 0;
243251876Speter    blk->next = 0;
244251876Speter
245251876Speter    return APR_ANYLOCK_UNLOCK(lock);
246251876Speter}
247251876Speter
248251876SpeterAPU_DECLARE(apr_status_t) apr_rmm_destroy(apr_rmm_t *rmm)
249251876Speter{
250251876Speter    apr_status_t rv;
251251876Speter    rmm_block_t *blk;
252251876Speter
253251876Speter    if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
254251876Speter        return rv;
255251876Speter    }
256251876Speter    /* Blast it all --- no going back :) */
257251876Speter    if (rmm->base->firstused) {
258251876Speter        apr_rmm_off_t this = rmm->base->firstused;
259251876Speter        do {
260251876Speter            blk = (rmm_block_t *)((char*)rmm->base + this);
261251876Speter            this = blk->next;
262251876Speter            blk->next = blk->prev = 0;
263251876Speter        } while (this);
264251876Speter        rmm->base->firstused = 0;
265251876Speter    }
266251876Speter    if (rmm->base->firstfree) {
267251876Speter        apr_rmm_off_t this = rmm->base->firstfree;
268251876Speter        do {
269251876Speter            blk = (rmm_block_t *)((char*)rmm->base + this);
270251876Speter            this = blk->next;
271251876Speter            blk->next = blk->prev = 0;
272251876Speter        } while (this);
273251876Speter        rmm->base->firstfree = 0;
274251876Speter    }
275251876Speter    rmm->base->abssize = 0;
276251876Speter    rmm->size = 0;
277251876Speter
278251876Speter    return APR_ANYLOCK_UNLOCK(&rmm->lock);
279251876Speter}
280251876Speter
281251876SpeterAPU_DECLARE(apr_status_t) apr_rmm_attach(apr_rmm_t **rmm, apr_anylock_t *lock,
282251876Speter                                         void *base, apr_pool_t *p)
283251876Speter{
284251876Speter    apr_anylock_t nulllock;
285251876Speter
286251876Speter    if (!lock) {
287251876Speter        nulllock.type = apr_anylock_none;
288251876Speter        nulllock.lock.pm = NULL;
289251876Speter        lock = &nulllock;
290251876Speter    }
291251876Speter
292251876Speter    /* sanity would be good here */
293251876Speter    (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
294251876Speter    (*rmm)->p = p;
295251876Speter    (*rmm)->base = base;
296251876Speter    (*rmm)->size = (*rmm)->base->abssize;
297251876Speter    (*rmm)->lock = *lock;
298251876Speter    return APR_SUCCESS;
299251876Speter}
300251876Speter
301251876SpeterAPU_DECLARE(apr_status_t) apr_rmm_detach(apr_rmm_t *rmm)
302251876Speter{
303251876Speter    /* A noop until we introduce locked/refcounts */
304251876Speter    return APR_SUCCESS;
305251876Speter}
306251876Speter
307251876SpeterAPU_DECLARE(apr_rmm_off_t) apr_rmm_malloc(apr_rmm_t *rmm, apr_size_t reqsize)
308251876Speter{
309251876Speter    apr_size_t size;
310251876Speter    apr_rmm_off_t this;
311251876Speter
312251876Speter    size = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
313251876Speter    if (size < reqsize) {
314251876Speter        return 0;
315251876Speter    }
316251876Speter
317251876Speter    APR_ANYLOCK_LOCK(&rmm->lock);
318251876Speter
319251876Speter    this = find_block_of_size(rmm, size);
320251876Speter
321251876Speter    if (this) {
322251876Speter        move_block(rmm, this, 0);
323251876Speter        this += RMM_BLOCK_SIZE;
324251876Speter    }
325251876Speter
326251876Speter    APR_ANYLOCK_UNLOCK(&rmm->lock);
327251876Speter    return this;
328251876Speter}
329251876Speter
330251876SpeterAPU_DECLARE(apr_rmm_off_t) apr_rmm_calloc(apr_rmm_t *rmm, apr_size_t reqsize)
331251876Speter{
332251876Speter    apr_size_t size;
333251876Speter    apr_rmm_off_t this;
334251876Speter
335251876Speter    size = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
336251876Speter    if (size < reqsize) {
337251876Speter        return 0;
338251876Speter    }
339251876Speter
340251876Speter    APR_ANYLOCK_LOCK(&rmm->lock);
341251876Speter
342251876Speter    this = find_block_of_size(rmm, size);
343251876Speter
344251876Speter    if (this) {
345251876Speter        move_block(rmm, this, 0);
346251876Speter        this += RMM_BLOCK_SIZE;
347251876Speter        memset((char*)rmm->base + this, 0, size - RMM_BLOCK_SIZE);
348251876Speter    }
349251876Speter
350251876Speter    APR_ANYLOCK_UNLOCK(&rmm->lock);
351251876Speter    return this;
352251876Speter}
353251876Speter
354251876SpeterAPU_DECLARE(apr_rmm_off_t) apr_rmm_realloc(apr_rmm_t *rmm, void *entity,
355251876Speter                                           apr_size_t reqsize)
356251876Speter{
357251876Speter    apr_rmm_off_t this;
358251876Speter    apr_rmm_off_t old;
359251876Speter    struct rmm_block_t *blk;
360251876Speter    apr_size_t size, oldsize;
361251876Speter
362251876Speter    if (!entity) {
363251876Speter        return apr_rmm_malloc(rmm, reqsize);
364251876Speter    }
365251876Speter
366251876Speter    size = APR_ALIGN_DEFAULT(reqsize);
367251876Speter    if (size < reqsize) {
368251876Speter        return 0;
369251876Speter    }
370251876Speter    old = apr_rmm_offset_get(rmm, entity);
371251876Speter
372251876Speter    if ((this = apr_rmm_malloc(rmm, size)) == 0) {
373251876Speter        return 0;
374251876Speter    }
375251876Speter
376251876Speter    blk = (rmm_block_t*)((char*)rmm->base + old - RMM_BLOCK_SIZE);
377251876Speter    oldsize = blk->size;
378251876Speter
379251876Speter    memcpy(apr_rmm_addr_get(rmm, this),
380251876Speter           apr_rmm_addr_get(rmm, old), oldsize < size ? oldsize : size);
381251876Speter    apr_rmm_free(rmm, old);
382251876Speter
383251876Speter    return this;
384251876Speter}
385251876Speter
386251876SpeterAPU_DECLARE(apr_status_t) apr_rmm_free(apr_rmm_t *rmm, apr_rmm_off_t this)
387251876Speter{
388251876Speter    apr_status_t rv;
389251876Speter    struct rmm_block_t *blk;
390251876Speter
391251876Speter    /* A little sanity check is always healthy, especially here.
392251876Speter     * If we really cared, we could make this compile-time
393251876Speter     */
394251876Speter    if (this < RMM_HDR_BLOCK_SIZE + RMM_BLOCK_SIZE) {
395251876Speter        return APR_EINVAL;
396251876Speter    }
397251876Speter
398251876Speter    this -= RMM_BLOCK_SIZE;
399251876Speter
400251876Speter    blk = (rmm_block_t*)((char*)rmm->base + this);
401251876Speter
402251876Speter    if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
403251876Speter        return rv;
404251876Speter    }
405251876Speter    if (blk->prev) {
406251876Speter        struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
407251876Speter        if (prev->next != this) {
408251876Speter            APR_ANYLOCK_UNLOCK(&rmm->lock);
409251876Speter            return APR_EINVAL;
410251876Speter        }
411251876Speter    }
412251876Speter    else {
413251876Speter        if (rmm->base->firstused != this) {
414251876Speter            APR_ANYLOCK_UNLOCK(&rmm->lock);
415251876Speter            return APR_EINVAL;
416251876Speter        }
417251876Speter    }
418251876Speter
419251876Speter    if (blk->next) {
420251876Speter        struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
421251876Speter        if (next->prev != this) {
422251876Speter            APR_ANYLOCK_UNLOCK(&rmm->lock);
423251876Speter            return APR_EINVAL;
424251876Speter        }
425251876Speter    }
426251876Speter
427251876Speter    /* Ok, it remained [apparently] sane, so unlink it
428251876Speter     */
429251876Speter    move_block(rmm, this, 1);
430251876Speter
431251876Speter    return APR_ANYLOCK_UNLOCK(&rmm->lock);
432251876Speter}
433251876Speter
434251876SpeterAPU_DECLARE(void *) apr_rmm_addr_get(apr_rmm_t *rmm, apr_rmm_off_t entity)
435251876Speter{
436251876Speter    /* debug-sanity checking here would be good
437251876Speter     */
438251876Speter    return (void*)((char*)rmm->base + entity);
439251876Speter}
440251876Speter
441251876SpeterAPU_DECLARE(apr_rmm_off_t) apr_rmm_offset_get(apr_rmm_t *rmm, void* entity)
442251876Speter{
443251876Speter    /* debug, or always, sanity checking here would be good
444251876Speter     * since the primitive is apr_rmm_off_t, I don't mind penalizing
445251876Speter     * inverse conversions for safety, unless someone can prove that
446251876Speter     * there is no choice in some cases.
447251876Speter     */
448251876Speter    return ((char*)entity - (char*)rmm->base);
449251876Speter}
450251876Speter
451251876SpeterAPU_DECLARE(apr_size_t) apr_rmm_overhead_get(int n)
452251876Speter{
453251876Speter    /* overhead per block is at most APR_ALIGN_DEFAULT(1) wasted bytes
454251876Speter     * for alignment overhead, plus the size of the rmm_block_t
455251876Speter     * structure. */
456251876Speter    return RMM_HDR_BLOCK_SIZE + n * (RMM_BLOCK_SIZE + APR_ALIGN_DEFAULT(1));
457251876Speter}
458