apr_pools.c revision 289166
1/* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements.  See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License.  You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "apr.h"
18#include "apr_private.h"
19
20#include "apr_atomic.h"
21#include "apr_portable.h" /* for get_os_proc */
22#include "apr_strings.h"
23#include "apr_general.h"
24#include "apr_pools.h"
25#include "apr_allocator.h"
26#include "apr_lib.h"
27#include "apr_thread_mutex.h"
28#include "apr_hash.h"
29#include "apr_time.h"
30#define APR_WANT_MEMFUNC
31#include "apr_want.h"
32#include "apr_env.h"
33
34#if APR_HAVE_STDLIB_H
35#include <stdlib.h>     /* for malloc, free and abort */
36#endif
37
38#if APR_HAVE_UNISTD_H
39#include <unistd.h>     /* for getpid and sysconf */
40#endif
41
42#if APR_ALLOCATOR_USES_MMAP
43#include <sys/mman.h>
44#endif
45
46/*
47 * Magic numbers
48 */
49
50/*
51 * XXX: This is not optimal when using --enable-allocator-uses-mmap on
52 * XXX: machines with large pagesize, but currently the sink is assumed
53 * XXX: to be index 0, so MIN_ALLOC must be at least two pages.
54 */
55#define MIN_ALLOC (2 * BOUNDARY_SIZE)
56#define MAX_INDEX   20
57
58#if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
59static unsigned int boundary_index;
60static unsigned int boundary_size;
61#define BOUNDARY_INDEX  boundary_index
62#define BOUNDARY_SIZE   boundary_size
63#else
64#define BOUNDARY_INDEX 12
65#define BOUNDARY_SIZE (1 << BOUNDARY_INDEX)
66#endif
67
68/*
69 * Timing constants for killing subprocesses
70 * There is a total 3-second delay between sending a SIGINT
71 * and sending of the final SIGKILL.
72 * TIMEOUT_INTERVAL should be set to TIMEOUT_USECS / 64
73 * for the exponetial timeout alogrithm.
74 */
75#define TIMEOUT_USECS    3000000
76#define TIMEOUT_INTERVAL   46875
77
78/*
79 * Allocator
80 *
81 * @note The max_free_index and current_free_index fields are not really
82 * indices, but quantities of BOUNDARY_SIZE big memory blocks.
83 */
84
85struct apr_allocator_t {
86    /** largest used index into free[], always < MAX_INDEX */
87    apr_uint32_t        max_index;
88    /** Total size (in BOUNDARY_SIZE multiples) of unused memory before
89     * blocks are given back. @see apr_allocator_max_free_set().
90     * @note Initialized to APR_ALLOCATOR_MAX_FREE_UNLIMITED,
91     * which means to never give back blocks.
92     */
93    apr_uint32_t        max_free_index;
94    /**
95     * Memory size (in BOUNDARY_SIZE multiples) that currently must be freed
96     * before blocks are given back. Range: 0..max_free_index
97     */
98    apr_uint32_t        current_free_index;
99#if APR_HAS_THREADS
100    apr_thread_mutex_t *mutex;
101#endif /* APR_HAS_THREADS */
102    apr_pool_t         *owner;
103    /**
104     * Lists of free nodes. Slot 0 is used for oversized nodes,
105     * and the slots 1..MAX_INDEX-1 contain nodes of sizes
106     * (i+1) * BOUNDARY_SIZE. Example for BOUNDARY_INDEX == 12:
107     * slot  0: nodes larger than 81920
108     * slot  1: size  8192
109     * slot  2: size 12288
110     * ...
111     * slot 19: size 81920
112     */
113    apr_memnode_t      *free[MAX_INDEX];
114};
115
116#define SIZEOF_ALLOCATOR_T  APR_ALIGN_DEFAULT(sizeof(apr_allocator_t))
117
118
119/*
120 * Allocator
121 */
122
123APR_DECLARE(apr_status_t) apr_allocator_create(apr_allocator_t **allocator)
124{
125    apr_allocator_t *new_allocator;
126
127    *allocator = NULL;
128
129    if ((new_allocator = malloc(SIZEOF_ALLOCATOR_T)) == NULL)
130        return APR_ENOMEM;
131
132    memset(new_allocator, 0, SIZEOF_ALLOCATOR_T);
133    new_allocator->max_free_index = APR_ALLOCATOR_MAX_FREE_UNLIMITED;
134
135    *allocator = new_allocator;
136
137    return APR_SUCCESS;
138}
139
140APR_DECLARE(void) apr_allocator_destroy(apr_allocator_t *allocator)
141{
142    apr_uint32_t index;
143    apr_memnode_t *node, **ref;
144
145    for (index = 0; index < MAX_INDEX; index++) {
146        ref = &allocator->free[index];
147        while ((node = *ref) != NULL) {
148            *ref = node->next;
149#if APR_ALLOCATOR_USES_MMAP
150            munmap(node, (node->index+1) << BOUNDARY_INDEX);
151#else
152            free(node);
153#endif
154        }
155    }
156
157    free(allocator);
158}
159
160#if APR_HAS_THREADS
161APR_DECLARE(void) apr_allocator_mutex_set(apr_allocator_t *allocator,
162                                          apr_thread_mutex_t *mutex)
163{
164    allocator->mutex = mutex;
165}
166
167APR_DECLARE(apr_thread_mutex_t *) apr_allocator_mutex_get(
168                                      apr_allocator_t *allocator)
169{
170    return allocator->mutex;
171}
172#endif /* APR_HAS_THREADS */
173
174APR_DECLARE(void) apr_allocator_owner_set(apr_allocator_t *allocator,
175                                          apr_pool_t *pool)
176{
177    allocator->owner = pool;
178}
179
180APR_DECLARE(apr_pool_t *) apr_allocator_owner_get(apr_allocator_t *allocator)
181{
182    return allocator->owner;
183}
184
185APR_DECLARE(void) apr_allocator_max_free_set(apr_allocator_t *allocator,
186                                             apr_size_t in_size)
187{
188    apr_uint32_t max_free_index;
189    apr_uint32_t size = (APR_UINT32_TRUNC_CAST)in_size;
190
191#if APR_HAS_THREADS
192    apr_thread_mutex_t *mutex;
193
194    mutex = apr_allocator_mutex_get(allocator);
195    if (mutex != NULL)
196        apr_thread_mutex_lock(mutex);
197#endif /* APR_HAS_THREADS */
198
199    max_free_index = APR_ALIGN(size, BOUNDARY_SIZE) >> BOUNDARY_INDEX;
200    allocator->current_free_index += max_free_index;
201    allocator->current_free_index -= allocator->max_free_index;
202    allocator->max_free_index = max_free_index;
203    if (allocator->current_free_index > max_free_index)
204        allocator->current_free_index = max_free_index;
205
206#if APR_HAS_THREADS
207    if (mutex != NULL)
208        apr_thread_mutex_unlock(mutex);
209#endif
210}
211
212static APR_INLINE
213apr_memnode_t *allocator_alloc(apr_allocator_t *allocator, apr_size_t in_size)
214{
215    apr_memnode_t *node, **ref;
216    apr_uint32_t max_index;
217    apr_size_t size, i, index;
218
219    /* Round up the block size to the next boundary, but always
220     * allocate at least a certain size (MIN_ALLOC).
221     */
222    size = APR_ALIGN(in_size + APR_MEMNODE_T_SIZE, BOUNDARY_SIZE);
223    if (size < in_size) {
224        return NULL;
225    }
226    if (size < MIN_ALLOC)
227        size = MIN_ALLOC;
228
229    /* Find the index for this node size by
230     * dividing its size by the boundary size
231     */
232    index = (size >> BOUNDARY_INDEX) - 1;
233
234    if (index > APR_UINT32_MAX) {
235        return NULL;
236    }
237
238    /* First see if there are any nodes in the area we know
239     * our node will fit into.
240     */
241    if (index <= allocator->max_index) {
242#if APR_HAS_THREADS
243        if (allocator->mutex)
244            apr_thread_mutex_lock(allocator->mutex);
245#endif /* APR_HAS_THREADS */
246
247        /* Walk the free list to see if there are
248         * any nodes on it of the requested size
249         *
250         * NOTE: an optimization would be to check
251         * allocator->free[index] first and if no
252         * node is present, directly use
253         * allocator->free[max_index].  This seems
254         * like overkill though and could cause
255         * memory waste.
256         */
257        max_index = allocator->max_index;
258        ref = &allocator->free[index];
259        i = index;
260        while (*ref == NULL && i < max_index) {
261           ref++;
262           i++;
263        }
264
265        if ((node = *ref) != NULL) {
266            /* If we have found a node and it doesn't have any
267             * nodes waiting in line behind it _and_ we are on
268             * the highest available index, find the new highest
269             * available index
270             */
271            if ((*ref = node->next) == NULL && i >= max_index) {
272                do {
273                    ref--;
274                    max_index--;
275                }
276                while (*ref == NULL && max_index > 0);
277
278                allocator->max_index = max_index;
279            }
280
281            allocator->current_free_index += node->index + 1;
282            if (allocator->current_free_index > allocator->max_free_index)
283                allocator->current_free_index = allocator->max_free_index;
284
285#if APR_HAS_THREADS
286            if (allocator->mutex)
287                apr_thread_mutex_unlock(allocator->mutex);
288#endif /* APR_HAS_THREADS */
289
290            node->next = NULL;
291            node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;
292
293            return node;
294        }
295
296#if APR_HAS_THREADS
297        if (allocator->mutex)
298            apr_thread_mutex_unlock(allocator->mutex);
299#endif /* APR_HAS_THREADS */
300    }
301
302    /* If we found nothing, seek the sink (at index 0), if
303     * it is not empty.
304     */
305    else if (allocator->free[0]) {
306#if APR_HAS_THREADS
307        if (allocator->mutex)
308            apr_thread_mutex_lock(allocator->mutex);
309#endif /* APR_HAS_THREADS */
310
311        /* Walk the free list to see if there are
312         * any nodes on it of the requested size
313         */
314        ref = &allocator->free[0];
315        while ((node = *ref) != NULL && index > node->index)
316            ref = &node->next;
317
318        if (node) {
319            *ref = node->next;
320
321            allocator->current_free_index += node->index + 1;
322            if (allocator->current_free_index > allocator->max_free_index)
323                allocator->current_free_index = allocator->max_free_index;
324
325#if APR_HAS_THREADS
326            if (allocator->mutex)
327                apr_thread_mutex_unlock(allocator->mutex);
328#endif /* APR_HAS_THREADS */
329
330            node->next = NULL;
331            node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;
332
333            return node;
334        }
335
336#if APR_HAS_THREADS
337        if (allocator->mutex)
338            apr_thread_mutex_unlock(allocator->mutex);
339#endif /* APR_HAS_THREADS */
340    }
341
342    /* If we haven't got a suitable node, malloc a new one
343     * and initialize it.
344     */
345#if APR_ALLOCATOR_USES_MMAP
346    if ((node = mmap(NULL, size, PROT_READ|PROT_WRITE,
347                     MAP_PRIVATE|MAP_ANON, -1, 0)) == MAP_FAILED)
348#else
349    if ((node = malloc(size)) == NULL)
350#endif
351        return NULL;
352
353    node->next = NULL;
354    node->index = (APR_UINT32_TRUNC_CAST)index;
355    node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;
356    node->endp = (char *)node + size;
357
358    return node;
359}
360
361static APR_INLINE
362void allocator_free(apr_allocator_t *allocator, apr_memnode_t *node)
363{
364    apr_memnode_t *next, *freelist = NULL;
365    apr_uint32_t index, max_index;
366    apr_uint32_t max_free_index, current_free_index;
367
368#if APR_HAS_THREADS
369    if (allocator->mutex)
370        apr_thread_mutex_lock(allocator->mutex);
371#endif /* APR_HAS_THREADS */
372
373    max_index = allocator->max_index;
374    max_free_index = allocator->max_free_index;
375    current_free_index = allocator->current_free_index;
376
377    /* Walk the list of submitted nodes and free them one by one,
378     * shoving them in the right 'size' buckets as we go.
379     */
380    do {
381        next = node->next;
382        index = node->index;
383
384        if (max_free_index != APR_ALLOCATOR_MAX_FREE_UNLIMITED
385            && index + 1 > current_free_index) {
386            node->next = freelist;
387            freelist = node;
388        }
389        else if (index < MAX_INDEX) {
390            /* Add the node to the appropiate 'size' bucket.  Adjust
391             * the max_index when appropiate.
392             */
393            if ((node->next = allocator->free[index]) == NULL
394                && index > max_index) {
395                max_index = index;
396            }
397            allocator->free[index] = node;
398            if (current_free_index >= index + 1)
399                current_free_index -= index + 1;
400            else
401                current_free_index = 0;
402        }
403        else {
404            /* This node is too large to keep in a specific size bucket,
405             * just add it to the sink (at index 0).
406             */
407            node->next = allocator->free[0];
408            allocator->free[0] = node;
409            if (current_free_index >= index + 1)
410                current_free_index -= index + 1;
411            else
412                current_free_index = 0;
413        }
414    } while ((node = next) != NULL);
415
416    allocator->max_index = max_index;
417    allocator->current_free_index = current_free_index;
418
419#if APR_HAS_THREADS
420    if (allocator->mutex)
421        apr_thread_mutex_unlock(allocator->mutex);
422#endif /* APR_HAS_THREADS */
423
424    while (freelist != NULL) {
425        node = freelist;
426        freelist = node->next;
427#if APR_ALLOCATOR_USES_MMAP
428        munmap(node, (node->index+1) << BOUNDARY_INDEX);
429#else
430        free(node);
431#endif
432    }
433}
434
435APR_DECLARE(apr_memnode_t *) apr_allocator_alloc(apr_allocator_t *allocator,
436                                                 apr_size_t size)
437{
438    return allocator_alloc(allocator, size);
439}
440
441APR_DECLARE(void) apr_allocator_free(apr_allocator_t *allocator,
442                                     apr_memnode_t *node)
443{
444    allocator_free(allocator, node);
445}
446
447
448
449/*
450 * Debug level
451 */
452
453#define APR_POOL_DEBUG_GENERAL  0x01
454#define APR_POOL_DEBUG_VERBOSE  0x02
455#define APR_POOL_DEBUG_LIFETIME 0x04
456#define APR_POOL_DEBUG_OWNER    0x08
457#define APR_POOL_DEBUG_VERBOSE_ALLOC 0x10
458
459#define APR_POOL_DEBUG_VERBOSE_ALL (APR_POOL_DEBUG_VERBOSE \
460                                    | APR_POOL_DEBUG_VERBOSE_ALLOC)
461
462
463/*
464 * Structures
465 */
466
467typedef struct cleanup_t cleanup_t;
468
469/** A list of processes */
470struct process_chain {
471    /** The process ID */
472    apr_proc_t *proc;
473    apr_kill_conditions_e kill_how;
474    /** The next process in the list */
475    struct process_chain *next;
476};
477
478
479#if APR_POOL_DEBUG
480
481typedef struct debug_node_t debug_node_t;
482
483struct debug_node_t {
484    debug_node_t *next;
485    apr_uint32_t  index;
486    void         *beginp[64];
487    void         *endp[64];
488};
489
490#define SIZEOF_DEBUG_NODE_T APR_ALIGN_DEFAULT(sizeof(debug_node_t))
491
492#endif /* APR_POOL_DEBUG */
493
494/* The ref field in the apr_pool_t struct holds a
495 * pointer to the pointer referencing this pool.
496 * It is used for parent, child, sibling management.
497 * Look at apr_pool_create_ex() and apr_pool_destroy()
498 * to see how it is used.
499 */
500struct apr_pool_t {
501    apr_pool_t           *parent;
502    apr_pool_t           *child;
503    apr_pool_t           *sibling;
504    apr_pool_t          **ref;
505    cleanup_t            *cleanups;
506    cleanup_t            *free_cleanups;
507    apr_allocator_t      *allocator;
508    struct process_chain *subprocesses;
509    apr_abortfunc_t       abort_fn;
510    apr_hash_t           *user_data;
511    const char           *tag;
512
513#if !APR_POOL_DEBUG
514    apr_memnode_t        *active;
515    apr_memnode_t        *self; /* The node containing the pool itself */
516    char                 *self_first_avail;
517
518#else /* APR_POOL_DEBUG */
519    apr_pool_t           *joined; /* the caller has guaranteed that this pool
520                                   * will survive as long as ->joined */
521    debug_node_t         *nodes;
522    const char           *file_line;
523    apr_uint32_t          creation_flags;
524    unsigned int          stat_alloc;
525    unsigned int          stat_total_alloc;
526    unsigned int          stat_clear;
527#if APR_HAS_THREADS
528    apr_os_thread_t       owner;
529    apr_thread_mutex_t   *mutex;
530#endif /* APR_HAS_THREADS */
531#endif /* APR_POOL_DEBUG */
532#ifdef NETWARE
533    apr_os_proc_t         owner_proc;
534#endif /* defined(NETWARE) */
535    cleanup_t            *pre_cleanups;
536};
537
538#define SIZEOF_POOL_T       APR_ALIGN_DEFAULT(sizeof(apr_pool_t))
539
540
541/*
542 * Variables
543 */
544
545static apr_byte_t   apr_pools_initialized = 0;
546static apr_pool_t  *global_pool = NULL;
547
548#if !APR_POOL_DEBUG
549static apr_allocator_t *global_allocator = NULL;
550#endif /* !APR_POOL_DEBUG */
551
552#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
553static apr_file_t *file_stderr = NULL;
554#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
555
556/*
557 * Local functions
558 */
559
560static void run_cleanups(cleanup_t **c);
561static void free_proc_chain(struct process_chain *procs);
562
563#if APR_POOL_DEBUG
564static void pool_destroy_debug(apr_pool_t *pool, const char *file_line);
565#endif
566
567#if !APR_POOL_DEBUG
568/*
569 * Initialization
570 */
571
572APR_DECLARE(apr_status_t) apr_pool_initialize(void)
573{
574    apr_status_t rv;
575
576    if (apr_pools_initialized++)
577        return APR_SUCCESS;
578
579#if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
580    boundary_size = sysconf(_SC_PAGESIZE);
581    boundary_index = 12;
582    while ( (1 << boundary_index) < boundary_size)
583        boundary_index++;
584    boundary_size = (1 << boundary_index);
585#endif
586
587    if ((rv = apr_allocator_create(&global_allocator)) != APR_SUCCESS) {
588        apr_pools_initialized = 0;
589        return rv;
590    }
591
592    if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL,
593                                 global_allocator)) != APR_SUCCESS) {
594        apr_allocator_destroy(global_allocator);
595        global_allocator = NULL;
596        apr_pools_initialized = 0;
597        return rv;
598    }
599
600    apr_pool_tag(global_pool, "apr_global_pool");
601
602    /* This has to happen here because mutexes might be backed by
603     * atomics.  It used to be snug and safe in apr_initialize().
604     *
605     * Warning: apr_atomic_init() must always be called, by any
606     * means possible, from apr_initialize().
607     */
608    if ((rv = apr_atomic_init(global_pool)) != APR_SUCCESS) {
609        return rv;
610    }
611
612#if APR_HAS_THREADS
613    {
614        apr_thread_mutex_t *mutex;
615
616        if ((rv = apr_thread_mutex_create(&mutex,
617                                          APR_THREAD_MUTEX_DEFAULT,
618                                          global_pool)) != APR_SUCCESS) {
619            return rv;
620        }
621
622        apr_allocator_mutex_set(global_allocator, mutex);
623    }
624#endif /* APR_HAS_THREADS */
625
626    apr_allocator_owner_set(global_allocator, global_pool);
627
628    return APR_SUCCESS;
629}
630
631APR_DECLARE(void) apr_pool_terminate(void)
632{
633    if (!apr_pools_initialized)
634        return;
635
636    if (--apr_pools_initialized)
637        return;
638
639    apr_pool_destroy(global_pool); /* This will also destroy the mutex */
640    global_pool = NULL;
641
642    global_allocator = NULL;
643}
644
645
646/* Node list management helper macros; list_insert() inserts 'node'
647 * before 'point'. */
648#define list_insert(node, point) do {           \
649    node->ref = point->ref;                     \
650    *node->ref = node;                          \
651    node->next = point;                         \
652    point->ref = &node->next;                   \
653} while (0)
654
655/* list_remove() removes 'node' from its list. */
656#define list_remove(node) do {                  \
657    *node->ref = node->next;                    \
658    node->next->ref = node->ref;                \
659} while (0)
660
661/* Returns the amount of free space in the given node. */
662#define node_free_space(node_) ((apr_size_t)(node_->endp - node_->first_avail))
663
664/*
665 * Memory allocation
666 */
667
668APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t in_size)
669{
670    apr_memnode_t *active, *node;
671    void *mem;
672    apr_size_t size, free_index;
673
674    size = APR_ALIGN_DEFAULT(in_size);
675    if (size < in_size) {
676        if (pool->abort_fn)
677            pool->abort_fn(APR_ENOMEM);
678
679        return NULL;
680    }
681    active = pool->active;
682
683    /* If the active node has enough bytes left, use it. */
684    if (size <= node_free_space(active)) {
685        mem = active->first_avail;
686        active->first_avail += size;
687
688        return mem;
689    }
690
691    node = active->next;
692    if (size <= node_free_space(node)) {
693        list_remove(node);
694    }
695    else {
696        if ((node = allocator_alloc(pool->allocator, size)) == NULL) {
697            if (pool->abort_fn)
698                pool->abort_fn(APR_ENOMEM);
699
700            return NULL;
701        }
702    }
703
704    node->free_index = 0;
705
706    mem = node->first_avail;
707    node->first_avail += size;
708
709    list_insert(node, active);
710
711    pool->active = node;
712
713    free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
714                            BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
715
716    active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
717    node = active->next;
718    if (free_index >= node->free_index)
719        return mem;
720
721    do {
722        node = node->next;
723    }
724    while (free_index < node->free_index);
725
726    list_remove(active);
727    list_insert(active, node);
728
729    return mem;
730}
731
732/* Provide an implementation of apr_pcalloc for backward compatibility
733 * with code built before apr_pcalloc was a macro
734 */
735
736#ifdef apr_pcalloc
737#undef apr_pcalloc
738#endif
739
740APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size);
741APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size)
742{
743    void *mem;
744
745    if ((mem = apr_palloc(pool, size)) != NULL) {
746        memset(mem, 0, size);
747    }
748
749    return mem;
750}
751
752
753/*
754 * Pool creation/destruction
755 */
756
757APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool)
758{
759    apr_memnode_t *active;
760
761    /* Run pre destroy cleanups */
762    run_cleanups(&pool->pre_cleanups);
763    pool->pre_cleanups = NULL;
764
765    /* Destroy the subpools.  The subpools will detach themselves from
766     * this pool thus this loop is safe and easy.
767     */
768    while (pool->child)
769        apr_pool_destroy(pool->child);
770
771    /* Run cleanups */
772    run_cleanups(&pool->cleanups);
773    pool->cleanups = NULL;
774    pool->free_cleanups = NULL;
775
776    /* Free subprocesses */
777    free_proc_chain(pool->subprocesses);
778    pool->subprocesses = NULL;
779
780    /* Clear the user data. */
781    pool->user_data = NULL;
782
783    /* Find the node attached to the pool structure, reset it, make
784     * it the active node and free the rest of the nodes.
785     */
786    active = pool->active = pool->self;
787    active->first_avail = pool->self_first_avail;
788
789    if (active->next == active)
790        return;
791
792    *active->ref = NULL;
793    allocator_free(pool->allocator, active->next);
794    active->next = active;
795    active->ref = &active->next;
796}
797
798APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool)
799{
800    apr_memnode_t *active;
801    apr_allocator_t *allocator;
802
803    /* Run pre destroy cleanups */
804    run_cleanups(&pool->pre_cleanups);
805    pool->pre_cleanups = NULL;
806
807    /* Destroy the subpools.  The subpools will detach themselve from
808     * this pool thus this loop is safe and easy.
809     */
810    while (pool->child)
811        apr_pool_destroy(pool->child);
812
813    /* Run cleanups */
814    run_cleanups(&pool->cleanups);
815
816    /* Free subprocesses */
817    free_proc_chain(pool->subprocesses);
818
819    /* Remove the pool from the parents child list */
820    if (pool->parent) {
821#if APR_HAS_THREADS
822        apr_thread_mutex_t *mutex;
823
824        if ((mutex = apr_allocator_mutex_get(pool->parent->allocator)) != NULL)
825            apr_thread_mutex_lock(mutex);
826#endif /* APR_HAS_THREADS */
827
828        if ((*pool->ref = pool->sibling) != NULL)
829            pool->sibling->ref = pool->ref;
830
831#if APR_HAS_THREADS
832        if (mutex)
833            apr_thread_mutex_unlock(mutex);
834#endif /* APR_HAS_THREADS */
835    }
836
837    /* Find the block attached to the pool structure.  Save a copy of the
838     * allocator pointer, because the pool struct soon will be no more.
839     */
840    allocator = pool->allocator;
841    active = pool->self;
842    *active->ref = NULL;
843
844#if APR_HAS_THREADS
845    if (apr_allocator_owner_get(allocator) == pool) {
846        /* Make sure to remove the lock, since it is highly likely to
847         * be invalid now.
848         */
849        apr_allocator_mutex_set(allocator, NULL);
850    }
851#endif /* APR_HAS_THREADS */
852
853    /* Free all the nodes in the pool (including the node holding the
854     * pool struct), by giving them back to the allocator.
855     */
856    allocator_free(allocator, active);
857
858    /* If this pool happens to be the owner of the allocator, free
859     * everything in the allocator (that includes the pool struct
860     * and the allocator).  Don't worry about destroying the optional mutex
861     * in the allocator, it will have been destroyed by the cleanup function.
862     */
863    if (apr_allocator_owner_get(allocator) == pool) {
864        apr_allocator_destroy(allocator);
865    }
866}
867
868APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
869                                             apr_pool_t *parent,
870                                             apr_abortfunc_t abort_fn,
871                                             apr_allocator_t *allocator)
872{
873    apr_pool_t *pool;
874    apr_memnode_t *node;
875
876    *newpool = NULL;
877
878    if (!parent)
879        parent = global_pool;
880
881    /* parent will always be non-NULL here except the first time a
882     * pool is created, in which case allocator is guaranteed to be
883     * non-NULL. */
884
885    if (!abort_fn && parent)
886        abort_fn = parent->abort_fn;
887
888    if (allocator == NULL)
889        allocator = parent->allocator;
890
891    if ((node = allocator_alloc(allocator,
892                                MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
893        if (abort_fn)
894            abort_fn(APR_ENOMEM);
895
896        return APR_ENOMEM;
897    }
898
899    node->next = node;
900    node->ref = &node->next;
901
902    pool = (apr_pool_t *)node->first_avail;
903    node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
904
905    pool->allocator = allocator;
906    pool->active = pool->self = node;
907    pool->abort_fn = abort_fn;
908    pool->child = NULL;
909    pool->cleanups = NULL;
910    pool->free_cleanups = NULL;
911    pool->pre_cleanups = NULL;
912    pool->subprocesses = NULL;
913    pool->user_data = NULL;
914    pool->tag = NULL;
915
916#ifdef NETWARE
917    pool->owner_proc = (apr_os_proc_t)getnlmhandle();
918#endif /* defined(NETWARE) */
919
920    if ((pool->parent = parent) != NULL) {
921#if APR_HAS_THREADS
922        apr_thread_mutex_t *mutex;
923
924        if ((mutex = apr_allocator_mutex_get(parent->allocator)) != NULL)
925            apr_thread_mutex_lock(mutex);
926#endif /* APR_HAS_THREADS */
927
928        if ((pool->sibling = parent->child) != NULL)
929            pool->sibling->ref = &pool->sibling;
930
931        parent->child = pool;
932        pool->ref = &parent->child;
933
934#if APR_HAS_THREADS
935        if (mutex)
936            apr_thread_mutex_unlock(mutex);
937#endif /* APR_HAS_THREADS */
938    }
939    else {
940        pool->sibling = NULL;
941        pool->ref = NULL;
942    }
943
944    *newpool = pool;
945
946    return APR_SUCCESS;
947}
948
949/* Deprecated. Renamed to apr_pool_create_unmanaged_ex
950 */
951APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
952                                                  apr_abortfunc_t abort_fn,
953                                                  apr_allocator_t *allocator)
954{
955    return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
956}
957
958APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
959                                                  apr_abortfunc_t abort_fn,
960                                                  apr_allocator_t *allocator)
961{
962    apr_pool_t *pool;
963    apr_memnode_t *node;
964    apr_allocator_t *pool_allocator;
965
966    *newpool = NULL;
967
968    if (!apr_pools_initialized)
969        return APR_ENOPOOL;
970    if ((pool_allocator = allocator) == NULL) {
971        if ((pool_allocator = malloc(SIZEOF_ALLOCATOR_T)) == NULL) {
972            if (abort_fn)
973                abort_fn(APR_ENOMEM);
974
975            return APR_ENOMEM;
976        }
977        memset(pool_allocator, 0, SIZEOF_ALLOCATOR_T);
978        pool_allocator->max_free_index = APR_ALLOCATOR_MAX_FREE_UNLIMITED;
979    }
980    if ((node = allocator_alloc(pool_allocator,
981                                MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
982        if (abort_fn)
983            abort_fn(APR_ENOMEM);
984
985        return APR_ENOMEM;
986    }
987
988    node->next = node;
989    node->ref = &node->next;
990
991    pool = (apr_pool_t *)node->first_avail;
992    node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
993
994    pool->allocator = pool_allocator;
995    pool->active = pool->self = node;
996    pool->abort_fn = abort_fn;
997    pool->child = NULL;
998    pool->cleanups = NULL;
999    pool->free_cleanups = NULL;
1000    pool->pre_cleanups = NULL;
1001    pool->subprocesses = NULL;
1002    pool->user_data = NULL;
1003    pool->tag = NULL;
1004    pool->parent = NULL;
1005    pool->sibling = NULL;
1006    pool->ref = NULL;
1007
1008#ifdef NETWARE
1009    pool->owner_proc = (apr_os_proc_t)getnlmhandle();
1010#endif /* defined(NETWARE) */
1011    if (!allocator)
1012        pool_allocator->owner = pool;
1013    *newpool = pool;
1014
1015    return APR_SUCCESS;
1016}
1017
1018/*
1019 * "Print" functions
1020 */
1021
1022/*
1023 * apr_psprintf is implemented by writing directly into the current
1024 * block of the pool, starting right at first_avail.  If there's
1025 * insufficient room, then a new block is allocated and the earlier
1026 * output is copied over.  The new block isn't linked into the pool
1027 * until all the output is done.
1028 *
1029 * Note that this is completely safe because nothing else can
1030 * allocate in this apr_pool_t while apr_psprintf is running.  alarms are
1031 * blocked, and the only thing outside of apr_pools.c that's invoked
1032 * is apr_vformatter -- which was purposefully written to be
1033 * self-contained with no callouts.
1034 */
1035
1036struct psprintf_data {
1037    apr_vformatter_buff_t vbuff;
1038    apr_memnode_t   *node;
1039    apr_pool_t      *pool;
1040    apr_byte_t       got_a_new_node;
1041    apr_memnode_t   *free;
1042};
1043
1044#define APR_PSPRINTF_MIN_STRINGSIZE 32
1045
1046static int psprintf_flush(apr_vformatter_buff_t *vbuff)
1047{
1048    struct psprintf_data *ps = (struct psprintf_data *)vbuff;
1049    apr_memnode_t *node, *active;
1050    apr_size_t cur_len, size;
1051    char *strp;
1052    apr_pool_t *pool;
1053    apr_size_t free_index;
1054
1055    pool = ps->pool;
1056    active = ps->node;
1057    strp = ps->vbuff.curpos;
1058    cur_len = strp - active->first_avail;
1059    size = cur_len << 1;
1060
1061    /* Make sure that we don't try to use a block that has less
1062     * than APR_PSPRINTF_MIN_STRINGSIZE bytes left in it.  This
1063     * also catches the case where size == 0, which would result
1064     * in reusing a block that can't even hold the NUL byte.
1065     */
1066    if (size < APR_PSPRINTF_MIN_STRINGSIZE)
1067        size = APR_PSPRINTF_MIN_STRINGSIZE;
1068
1069    node = active->next;
1070    if (!ps->got_a_new_node && size <= node_free_space(node)) {
1071
1072        list_remove(node);
1073        list_insert(node, active);
1074
1075        node->free_index = 0;
1076
1077        pool->active = node;
1078
1079        free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
1080                                BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
1081
1082        active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
1083        node = active->next;
1084        if (free_index < node->free_index) {
1085            do {
1086                node = node->next;
1087            }
1088            while (free_index < node->free_index);
1089
1090            list_remove(active);
1091            list_insert(active, node);
1092        }
1093
1094        node = pool->active;
1095    }
1096    else {
1097        if ((node = allocator_alloc(pool->allocator, size)) == NULL)
1098            return -1;
1099
1100        if (ps->got_a_new_node) {
1101            active->next = ps->free;
1102            ps->free = active;
1103        }
1104
1105        ps->got_a_new_node = 1;
1106    }
1107
1108    memcpy(node->first_avail, active->first_avail, cur_len);
1109
1110    ps->node = node;
1111    ps->vbuff.curpos = node->first_avail + cur_len;
1112    ps->vbuff.endpos = node->endp - 1; /* Save a byte for NUL terminator */
1113
1114    return 0;
1115}
1116
1117APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
1118{
1119    struct psprintf_data ps;
1120    char *strp;
1121    apr_size_t size;
1122    apr_memnode_t *active, *node;
1123    apr_size_t free_index;
1124
1125    ps.node = active = pool->active;
1126    ps.pool = pool;
1127    ps.vbuff.curpos  = ps.node->first_avail;
1128
1129    /* Save a byte for the NUL terminator */
1130    ps.vbuff.endpos = ps.node->endp - 1;
1131    ps.got_a_new_node = 0;
1132    ps.free = NULL;
1133
1134    /* Make sure that the first node passed to apr_vformatter has at least
1135     * room to hold the NUL terminator.
1136     */
1137    if (ps.node->first_avail == ps.node->endp) {
1138        if (psprintf_flush(&ps.vbuff) == -1)
1139           goto error;
1140    }
1141
1142    if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1)
1143        goto error;
1144
1145    strp = ps.vbuff.curpos;
1146    *strp++ = '\0';
1147
1148    size = strp - ps.node->first_avail;
1149    size = APR_ALIGN_DEFAULT(size);
1150    strp = ps.node->first_avail;
1151    ps.node->first_avail += size;
1152
1153    if (ps.free)
1154        allocator_free(pool->allocator, ps.free);
1155
1156    /*
1157     * Link the node in if it's a new one
1158     */
1159    if (!ps.got_a_new_node)
1160        return strp;
1161
1162    active = pool->active;
1163    node = ps.node;
1164
1165    node->free_index = 0;
1166
1167    list_insert(node, active);
1168
1169    pool->active = node;
1170
1171    free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
1172                            BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
1173
1174    active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
1175    node = active->next;
1176
1177    if (free_index >= node->free_index)
1178        return strp;
1179
1180    do {
1181        node = node->next;
1182    }
1183    while (free_index < node->free_index);
1184
1185    list_remove(active);
1186    list_insert(active, node);
1187
1188    return strp;
1189
1190error:
1191    if (pool->abort_fn)
1192        pool->abort_fn(APR_ENOMEM);
1193    if (ps.got_a_new_node) {
1194        ps.node->next = ps.free;
1195        allocator_free(pool->allocator, ps.node);
1196    }
1197    return NULL;
1198}
1199
1200
1201#else /* APR_POOL_DEBUG */
1202/*
1203 * Debug helper functions
1204 */
1205
1206
1207/*
1208 * Walk the pool tree rooted at pool, depth first.  When fn returns
1209 * anything other than 0, abort the traversal and return the value
1210 * returned by fn.
1211 */
1212static int apr_pool_walk_tree(apr_pool_t *pool,
1213                              int (*fn)(apr_pool_t *pool, void *data),
1214                              void *data)
1215{
1216    int rv;
1217    apr_pool_t *child;
1218
1219    rv = fn(pool, data);
1220    if (rv)
1221        return rv;
1222
1223#if APR_HAS_THREADS
1224    if (pool->mutex) {
1225        apr_thread_mutex_lock(pool->mutex);
1226                        }
1227#endif /* APR_HAS_THREADS */
1228
1229    child = pool->child;
1230    while (child) {
1231        rv = apr_pool_walk_tree(child, fn, data);
1232        if (rv)
1233            break;
1234
1235        child = child->sibling;
1236    }
1237
1238#if APR_HAS_THREADS
1239    if (pool->mutex) {
1240        apr_thread_mutex_unlock(pool->mutex);
1241    }
1242#endif /* APR_HAS_THREADS */
1243
1244    return rv;
1245}
1246
1247#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1248static void apr_pool_log_event(apr_pool_t *pool, const char *event,
1249                               const char *file_line, int deref)
1250{
1251    if (file_stderr) {
1252        if (deref) {
1253            apr_file_printf(file_stderr,
1254                "POOL DEBUG: "
1255                "[%lu"
1256#if APR_HAS_THREADS
1257                "/%lu"
1258#endif /* APR_HAS_THREADS */
1259                "] "
1260                "%7s "
1261                "(%10lu/%10lu/%10lu) "
1262                "0x%pp \"%s\" "
1263                "<%s> "
1264                "(%u/%u/%u) "
1265                "\n",
1266                (unsigned long)getpid(),
1267#if APR_HAS_THREADS
1268                (unsigned long)apr_os_thread_current(),
1269#endif /* APR_HAS_THREADS */
1270                event,
1271                (unsigned long)apr_pool_num_bytes(pool, 0),
1272                (unsigned long)apr_pool_num_bytes(pool, 1),
1273                (unsigned long)apr_pool_num_bytes(global_pool, 1),
1274                pool, pool->tag,
1275                file_line,
1276                pool->stat_alloc, pool->stat_total_alloc, pool->stat_clear);
1277        }
1278        else {
1279            apr_file_printf(file_stderr,
1280                "POOL DEBUG: "
1281                "[%lu"
1282#if APR_HAS_THREADS
1283                "/%lu"
1284#endif /* APR_HAS_THREADS */
1285                "] "
1286                "%7s "
1287                "                                   "
1288                "0x%pp "
1289                "<%s> "
1290                "\n",
1291                (unsigned long)getpid(),
1292#if APR_HAS_THREADS
1293                (unsigned long)apr_os_thread_current(),
1294#endif /* APR_HAS_THREADS */
1295                event,
1296                pool,
1297                file_line);
1298        }
1299    }
1300}
1301#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1302
1303#if (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME)
1304static int pool_is_child_of(apr_pool_t *parent, void *data)
1305{
1306    apr_pool_t *pool = (apr_pool_t *)data;
1307
1308    return (pool == parent);
1309}
1310
1311static int apr_pool_is_child_of(apr_pool_t *pool, apr_pool_t *parent)
1312{
1313    if (parent == NULL)
1314        return 0;
1315
1316    return apr_pool_walk_tree(parent, pool_is_child_of, pool);
1317}
1318#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME) */
1319
1320static void apr_pool_check_integrity(apr_pool_t *pool)
1321{
1322    /* Rule of thumb: use of the global pool is always
1323     * ok, since the only user is apr_pools.c.  Unless
1324     * people have searched for the top level parent and
1325     * started to use that...
1326     */
1327    if (pool == global_pool || global_pool == NULL)
1328        return;
1329
1330    /* Lifetime
1331     * This basically checks to see if the pool being used is still
1332     * a relative to the global pool.  If not it was previously
1333     * destroyed, in which case we abort().
1334     */
1335#if (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME)
1336    if (!apr_pool_is_child_of(pool, global_pool)) {
1337#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1338        apr_pool_log_event(pool, "LIFE",
1339                           __FILE__ ":apr_pool_integrity check", 0);
1340#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1341        abort();
1342    }
1343#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME) */
1344
1345#if (APR_POOL_DEBUG & APR_POOL_DEBUG_OWNER)
1346#if APR_HAS_THREADS
1347    if (!apr_os_thread_equal(pool->owner, apr_os_thread_current())) {
1348#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1349        apr_pool_log_event(pool, "THREAD",
1350                           __FILE__ ":apr_pool_integrity check", 0);
1351#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1352        abort();
1353    }
1354#endif /* APR_HAS_THREADS */
1355#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_OWNER) */
1356}
1357
1358
1359/*
1360 * Initialization (debug)
1361 */
1362
1363APR_DECLARE(apr_status_t) apr_pool_initialize(void)
1364{
1365    apr_status_t rv;
1366#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1367    char *logpath;
1368    apr_file_t *debug_log = NULL;
1369#endif
1370
1371    if (apr_pools_initialized++)
1372        return APR_SUCCESS;
1373
1374#if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
1375    boundary_size = sysconf(_SC_PAGESIZE);
1376    boundary_index = 12;
1377    while ( (1 << boundary_index) < boundary_size)
1378        boundary_index++;
1379    boundary_size = (1 << boundary_index);
1380#endif
1381
1382    /* Since the debug code works a bit differently then the
1383     * regular pools code, we ask for a lock here.  The regular
1384     * pools code has got this lock embedded in the global
1385     * allocator, a concept unknown to debug mode.
1386     */
1387    if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL,
1388                                 NULL)) != APR_SUCCESS) {
1389        return rv;
1390    }
1391
1392    apr_pool_tag(global_pool, "APR global pool");
1393
1394    apr_pools_initialized = 1;
1395
1396    /* This has to happen here because mutexes might be backed by
1397     * atomics.  It used to be snug and safe in apr_initialize().
1398     */
1399    if ((rv = apr_atomic_init(global_pool)) != APR_SUCCESS) {
1400        return rv;
1401    }
1402
1403#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1404    rv = apr_env_get(&logpath, "APR_POOL_DEBUG_LOG", global_pool);
1405
1406    /* Don't pass file_stderr directly to apr_file_open() here, since
1407     * apr_file_open() can call back to apr_pool_log_event() and that
1408     * may attempt to use then then non-NULL but partially set up file
1409     * object. */
1410    if (rv == APR_SUCCESS) {
1411        apr_file_open(&debug_log, logpath, APR_APPEND|APR_WRITE|APR_CREATE,
1412                      APR_OS_DEFAULT, global_pool);
1413    }
1414    else {
1415        apr_file_open_stderr(&debug_log, global_pool);
1416    }
1417
1418    /* debug_log is now a file handle. */
1419    file_stderr = debug_log;
1420
1421    if (file_stderr) {
1422        apr_file_printf(file_stderr,
1423            "POOL DEBUG: [PID"
1424#if APR_HAS_THREADS
1425            "/TID"
1426#endif /* APR_HAS_THREADS */
1427            "] ACTION  (SIZE      /POOL SIZE /TOTAL SIZE) "
1428            "POOL       \"TAG\" <__FILE__:__LINE__> (ALLOCS/TOTAL ALLOCS/CLEARS)\n");
1429
1430        apr_pool_log_event(global_pool, "GLOBAL", __FILE__ ":apr_pool_initialize", 0);
1431    }
1432#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1433
1434    return APR_SUCCESS;
1435}
1436
1437APR_DECLARE(void) apr_pool_terminate(void)
1438{
1439    if (!apr_pools_initialized)
1440        return;
1441
1442    if (--apr_pools_initialized)
1443        return;
1444
1445    apr_pool_destroy(global_pool); /* This will also destroy the mutex */
1446    global_pool = NULL;
1447
1448#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1449    file_stderr = NULL;
1450#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1451}
1452
1453
1454/*
1455 * Memory allocation (debug)
1456 */
1457
1458static void *pool_alloc(apr_pool_t *pool, apr_size_t size)
1459{
1460    debug_node_t *node;
1461    void *mem;
1462
1463    if ((mem = malloc(size)) == NULL) {
1464        if (pool->abort_fn)
1465            pool->abort_fn(APR_ENOMEM);
1466
1467        return NULL;
1468    }
1469
1470    node = pool->nodes;
1471    if (node == NULL || node->index == 64) {
1472        if ((node = malloc(SIZEOF_DEBUG_NODE_T)) == NULL) {
1473            free(mem);
1474            if (pool->abort_fn)
1475                pool->abort_fn(APR_ENOMEM);
1476
1477            return NULL;
1478        }
1479
1480        memset(node, 0, SIZEOF_DEBUG_NODE_T);
1481
1482        node->next = pool->nodes;
1483        pool->nodes = node;
1484        node->index = 0;
1485    }
1486
1487    node->beginp[node->index] = mem;
1488    node->endp[node->index] = (char *)mem + size;
1489    node->index++;
1490
1491    pool->stat_alloc++;
1492    pool->stat_total_alloc++;
1493
1494    return mem;
1495}
1496
1497APR_DECLARE(void *) apr_palloc_debug(apr_pool_t *pool, apr_size_t size,
1498                                     const char *file_line)
1499{
1500    void *mem;
1501
1502    apr_pool_check_integrity(pool);
1503
1504    mem = pool_alloc(pool, size);
1505
1506#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC)
1507    apr_pool_log_event(pool, "PALLOC", file_line, 1);
1508#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC) */
1509
1510    return mem;
1511}
1512
1513APR_DECLARE(void *) apr_pcalloc_debug(apr_pool_t *pool, apr_size_t size,
1514                                      const char *file_line)
1515{
1516    void *mem;
1517
1518    apr_pool_check_integrity(pool);
1519
1520    mem = pool_alloc(pool, size);
1521    memset(mem, 0, size);
1522
1523#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC)
1524    apr_pool_log_event(pool, "PCALLOC", file_line, 1);
1525#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC) */
1526
1527    return mem;
1528}
1529
1530
1531/*
1532 * Pool creation/destruction (debug)
1533 */
1534
1535#define POOL_POISON_BYTE 'A'
1536
1537static void pool_clear_debug(apr_pool_t *pool, const char *file_line)
1538{
1539    debug_node_t *node;
1540    apr_uint32_t index;
1541
1542    /* Run pre destroy cleanups */
1543    run_cleanups(&pool->pre_cleanups);
1544    pool->pre_cleanups = NULL;
1545
1546    /* Destroy the subpools.  The subpools will detach themselves from
1547     * this pool thus this loop is safe and easy.
1548     */
1549    while (pool->child)
1550        pool_destroy_debug(pool->child, file_line);
1551
1552    /* Run cleanups */
1553    run_cleanups(&pool->cleanups);
1554    pool->free_cleanups = NULL;
1555    pool->cleanups = NULL;
1556
1557    /* If new child pools showed up, this is a reason to raise a flag */
1558    if (pool->child)
1559        abort();
1560
1561    /* Free subprocesses */
1562    free_proc_chain(pool->subprocesses);
1563    pool->subprocesses = NULL;
1564
1565    /* Clear the user data. */
1566    pool->user_data = NULL;
1567
1568    /* Free the blocks, scribbling over them first to help highlight
1569     * use-after-free issues. */
1570    while ((node = pool->nodes) != NULL) {
1571        pool->nodes = node->next;
1572
1573        for (index = 0; index < node->index; index++) {
1574            memset(node->beginp[index], POOL_POISON_BYTE,
1575                   (char *)node->endp[index] - (char *)node->beginp[index]);
1576            free(node->beginp[index]);
1577        }
1578
1579        memset(node, POOL_POISON_BYTE, SIZEOF_DEBUG_NODE_T);
1580        free(node);
1581    }
1582
1583    pool->stat_alloc = 0;
1584    pool->stat_clear++;
1585}
1586
1587APR_DECLARE(void) apr_pool_clear_debug(apr_pool_t *pool,
1588                                       const char *file_line)
1589{
1590#if APR_HAS_THREADS
1591    apr_thread_mutex_t *mutex = NULL;
1592#endif
1593
1594    apr_pool_check_integrity(pool);
1595
1596#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
1597    apr_pool_log_event(pool, "CLEAR", file_line, 1);
1598#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1599
1600#if APR_HAS_THREADS
1601    if (pool->parent != NULL)
1602        mutex = pool->parent->mutex;
1603
1604    /* Lock the parent mutex before clearing so that if we have our
1605     * own mutex it won't be accessed by apr_pool_walk_tree after
1606     * it has been destroyed.
1607     */
1608    if (mutex != NULL && mutex != pool->mutex) {
1609        apr_thread_mutex_lock(mutex);
1610    }
1611#endif
1612
1613    pool_clear_debug(pool, file_line);
1614
1615#if APR_HAS_THREADS
1616    /* If we had our own mutex, it will have been destroyed by
1617     * the registered cleanups.  Recreate the mutex.  Unlock
1618     * the mutex we obtained above.
1619     */
1620    if (mutex != pool->mutex) {
1621        (void)apr_thread_mutex_create(&pool->mutex,
1622                                      APR_THREAD_MUTEX_NESTED, pool);
1623
1624        if (mutex != NULL)
1625            (void)apr_thread_mutex_unlock(mutex);
1626    }
1627#endif /* APR_HAS_THREADS */
1628}
1629
1630static void pool_destroy_debug(apr_pool_t *pool, const char *file_line)
1631{
1632    apr_pool_check_integrity(pool);
1633
1634#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
1635    apr_pool_log_event(pool, "DESTROY", file_line, 1);
1636#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1637
1638    pool_clear_debug(pool, file_line);
1639
1640    /* Remove the pool from the parents child list */
1641    if (pool->parent) {
1642#if APR_HAS_THREADS
1643        apr_thread_mutex_t *mutex;
1644
1645        if ((mutex = pool->parent->mutex) != NULL)
1646            apr_thread_mutex_lock(mutex);
1647#endif /* APR_HAS_THREADS */
1648
1649        if ((*pool->ref = pool->sibling) != NULL)
1650            pool->sibling->ref = pool->ref;
1651
1652#if APR_HAS_THREADS
1653        if (mutex)
1654            apr_thread_mutex_unlock(mutex);
1655#endif /* APR_HAS_THREADS */
1656    }
1657
1658    if (pool->allocator != NULL
1659        && apr_allocator_owner_get(pool->allocator) == pool) {
1660        apr_allocator_destroy(pool->allocator);
1661    }
1662
1663    /* Free the pool itself */
1664    free(pool);
1665}
1666
1667APR_DECLARE(void) apr_pool_destroy_debug(apr_pool_t *pool,
1668                                         const char *file_line)
1669{
1670    if (pool->joined) {
1671        /* Joined pools must not be explicitly destroyed; the caller
1672         * has broken the guarantee. */
1673#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1674        apr_pool_log_event(pool, "LIFE",
1675                           __FILE__ ":apr_pool_destroy abort on joined", 0);
1676#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1677
1678        abort();
1679    }
1680    pool_destroy_debug(pool, file_line);
1681}
1682
1683APR_DECLARE(apr_status_t) apr_pool_create_ex_debug(apr_pool_t **newpool,
1684                                                   apr_pool_t *parent,
1685                                                   apr_abortfunc_t abort_fn,
1686                                                   apr_allocator_t *allocator,
1687                                                   const char *file_line)
1688{
1689    apr_pool_t *pool;
1690
1691    *newpool = NULL;
1692
1693    if (!parent) {
1694        parent = global_pool;
1695    }
1696    else {
1697       apr_pool_check_integrity(parent);
1698
1699       if (!allocator)
1700           allocator = parent->allocator;
1701    }
1702
1703    if (!abort_fn && parent)
1704        abort_fn = parent->abort_fn;
1705
1706    if ((pool = malloc(SIZEOF_POOL_T)) == NULL) {
1707        if (abort_fn)
1708            abort_fn(APR_ENOMEM);
1709
1710         return APR_ENOMEM;
1711    }
1712
1713    memset(pool, 0, SIZEOF_POOL_T);
1714
1715    pool->allocator = allocator;
1716    pool->abort_fn = abort_fn;
1717    pool->tag = file_line;
1718    pool->file_line = file_line;
1719
1720    if ((pool->parent = parent) != NULL) {
1721#if APR_HAS_THREADS
1722        if (parent->mutex)
1723            apr_thread_mutex_lock(parent->mutex);
1724#endif /* APR_HAS_THREADS */
1725        if ((pool->sibling = parent->child) != NULL)
1726            pool->sibling->ref = &pool->sibling;
1727
1728        parent->child = pool;
1729        pool->ref = &parent->child;
1730
1731#if APR_HAS_THREADS
1732        if (parent->mutex)
1733            apr_thread_mutex_unlock(parent->mutex);
1734#endif /* APR_HAS_THREADS */
1735    }
1736    else {
1737        pool->sibling = NULL;
1738        pool->ref = NULL;
1739    }
1740
1741#if APR_HAS_THREADS
1742    pool->owner = apr_os_thread_current();
1743#endif /* APR_HAS_THREADS */
1744#ifdef NETWARE
1745    pool->owner_proc = (apr_os_proc_t)getnlmhandle();
1746#endif /* defined(NETWARE) */
1747
1748
1749    if (parent == NULL || parent->allocator != allocator) {
1750#if APR_HAS_THREADS
1751        apr_status_t rv;
1752
1753        /* No matter what the creation flags say, always create
1754         * a lock.  Without it integrity_check and apr_pool_num_bytes
1755         * blow up (because they traverse pools child lists that
1756         * possibly belong to another thread, in combination with
1757         * the pool having no lock).  However, this might actually
1758         * hide problems like creating a child pool of a pool
1759         * belonging to another thread.
1760         */
1761        if ((rv = apr_thread_mutex_create(&pool->mutex,
1762                APR_THREAD_MUTEX_NESTED, pool)) != APR_SUCCESS) {
1763            free(pool);
1764            return rv;
1765        }
1766#endif /* APR_HAS_THREADS */
1767    }
1768    else {
1769#if APR_HAS_THREADS
1770        if (parent)
1771            pool->mutex = parent->mutex;
1772#endif /* APR_HAS_THREADS */
1773    }
1774
1775    *newpool = pool;
1776
1777#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
1778    apr_pool_log_event(pool, "CREATE", file_line, 1);
1779#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1780
1781    return APR_SUCCESS;
1782}
1783
1784APR_DECLARE(apr_status_t) apr_pool_create_core_ex_debug(apr_pool_t **newpool,
1785                                                   apr_abortfunc_t abort_fn,
1786                                                   apr_allocator_t *allocator,
1787                                                   const char *file_line)
1788{
1789    return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn, allocator,
1790                                              file_line);
1791}
1792
1793APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex_debug(apr_pool_t **newpool,
1794                                                   apr_abortfunc_t abort_fn,
1795                                                   apr_allocator_t *allocator,
1796                                                   const char *file_line)
1797{
1798    apr_pool_t *pool;
1799    apr_allocator_t *pool_allocator;
1800
1801    *newpool = NULL;
1802
1803    if ((pool = malloc(SIZEOF_POOL_T)) == NULL) {
1804        if (abort_fn)
1805            abort_fn(APR_ENOMEM);
1806
1807         return APR_ENOMEM;
1808    }
1809
1810    memset(pool, 0, SIZEOF_POOL_T);
1811
1812    pool->abort_fn = abort_fn;
1813    pool->tag = file_line;
1814    pool->file_line = file_line;
1815
1816#if APR_HAS_THREADS
1817    pool->owner = apr_os_thread_current();
1818#endif /* APR_HAS_THREADS */
1819#ifdef NETWARE
1820    pool->owner_proc = (apr_os_proc_t)getnlmhandle();
1821#endif /* defined(NETWARE) */
1822
1823    if ((pool_allocator = allocator) == NULL) {
1824        apr_status_t rv;
1825        if ((rv = apr_allocator_create(&pool_allocator)) != APR_SUCCESS) {
1826            if (abort_fn)
1827                abort_fn(rv);
1828            return rv;
1829        }
1830        pool_allocator->owner = pool;
1831    }
1832    pool->allocator = pool_allocator;
1833
1834    if (pool->allocator != allocator) {
1835#if APR_HAS_THREADS
1836        apr_status_t rv;
1837
1838        /* No matter what the creation flags say, always create
1839         * a lock.  Without it integrity_check and apr_pool_num_bytes
1840         * blow up (because they traverse pools child lists that
1841         * possibly belong to another thread, in combination with
1842         * the pool having no lock).  However, this might actually
1843         * hide problems like creating a child pool of a pool
1844         * belonging to another thread.
1845         */
1846        if ((rv = apr_thread_mutex_create(&pool->mutex,
1847                APR_THREAD_MUTEX_NESTED, pool)) != APR_SUCCESS) {
1848            free(pool);
1849            return rv;
1850        }
1851#endif /* APR_HAS_THREADS */
1852    }
1853
1854    *newpool = pool;
1855
1856#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
1857    apr_pool_log_event(pool, "CREATE", file_line, 1);
1858#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1859
1860    return APR_SUCCESS;
1861}
1862
1863/*
1864 * "Print" functions (debug)
1865 */
1866
1867struct psprintf_data {
1868    apr_vformatter_buff_t vbuff;
1869    char      *mem;
1870    apr_size_t size;
1871};
1872
1873static int psprintf_flush(apr_vformatter_buff_t *vbuff)
1874{
1875    struct psprintf_data *ps = (struct psprintf_data *)vbuff;
1876    apr_size_t size;
1877
1878    size = ps->vbuff.curpos - ps->mem;
1879
1880    ps->size <<= 1;
1881    if ((ps->mem = realloc(ps->mem, ps->size)) == NULL)
1882        return -1;
1883
1884    ps->vbuff.curpos = ps->mem + size;
1885    ps->vbuff.endpos = ps->mem + ps->size - 1;
1886
1887    return 0;
1888}
1889
1890APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
1891{
1892    struct psprintf_data ps;
1893    debug_node_t *node;
1894
1895    apr_pool_check_integrity(pool);
1896
1897    ps.size = 64;
1898    ps.mem = malloc(ps.size);
1899    ps.vbuff.curpos  = ps.mem;
1900
1901    /* Save a byte for the NUL terminator */
1902    ps.vbuff.endpos = ps.mem + ps.size - 1;
1903
1904    if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
1905        if (pool->abort_fn)
1906            pool->abort_fn(APR_ENOMEM);
1907
1908        return NULL;
1909    }
1910
1911    *ps.vbuff.curpos++ = '\0';
1912
1913    /*
1914     * Link the node in
1915     */
1916    node = pool->nodes;
1917    if (node == NULL || node->index == 64) {
1918        if ((node = malloc(SIZEOF_DEBUG_NODE_T)) == NULL) {
1919            if (pool->abort_fn)
1920                pool->abort_fn(APR_ENOMEM);
1921
1922            return NULL;
1923        }
1924
1925        node->next = pool->nodes;
1926        pool->nodes = node;
1927        node->index = 0;
1928    }
1929
1930    node->beginp[node->index] = ps.mem;
1931    node->endp[node->index] = ps.mem + ps.size;
1932    node->index++;
1933
1934    return ps.mem;
1935}
1936
1937
1938/*
1939 * Debug functions
1940 */
1941
1942APR_DECLARE(void) apr_pool_join(apr_pool_t *p, apr_pool_t *sub)
1943{
1944#if APR_POOL_DEBUG
1945    if (sub->parent != p) {
1946        abort();
1947    }
1948    sub->joined = p;
1949#endif
1950}
1951
1952static int pool_find(apr_pool_t *pool, void *data)
1953{
1954    void **pmem = (void **)data;
1955    debug_node_t *node;
1956    apr_uint32_t index;
1957
1958    node = pool->nodes;
1959
1960    while (node) {
1961        for (index = 0; index < node->index; index++) {
1962             if (node->beginp[index] <= *pmem
1963                 && node->endp[index] > *pmem) {
1964                 *pmem = pool;
1965                 return 1;
1966             }
1967        }
1968
1969        node = node->next;
1970    }
1971
1972    return 0;
1973}
1974
1975APR_DECLARE(apr_pool_t *) apr_pool_find(const void *mem)
1976{
1977    void *pool = (void *)mem;
1978
1979    if (apr_pool_walk_tree(global_pool, pool_find, &pool))
1980        return pool;
1981
1982    return NULL;
1983}
1984
1985static int pool_num_bytes(apr_pool_t *pool, void *data)
1986{
1987    apr_size_t *psize = (apr_size_t *)data;
1988    debug_node_t *node;
1989    apr_uint32_t index;
1990
1991    node = pool->nodes;
1992
1993    while (node) {
1994        for (index = 0; index < node->index; index++) {
1995            *psize += (char *)node->endp[index] - (char *)node->beginp[index];
1996        }
1997
1998        node = node->next;
1999    }
2000
2001    return 0;
2002}
2003
2004APR_DECLARE(apr_size_t) apr_pool_num_bytes(apr_pool_t *pool, int recurse)
2005{
2006    apr_size_t size = 0;
2007
2008    if (!recurse) {
2009        pool_num_bytes(pool, &size);
2010
2011        return size;
2012    }
2013
2014    apr_pool_walk_tree(pool, pool_num_bytes, &size);
2015
2016    return size;
2017}
2018
2019APR_DECLARE(void) apr_pool_lock(apr_pool_t *pool, int flag)
2020{
2021}
2022
2023#endif /* !APR_POOL_DEBUG */
2024
2025#ifdef NETWARE
2026void netware_pool_proc_cleanup ()
2027{
2028    apr_pool_t *pool = global_pool->child;
2029    apr_os_proc_t owner_proc = (apr_os_proc_t)getnlmhandle();
2030
2031    while (pool) {
2032        if (pool->owner_proc == owner_proc) {
2033            apr_pool_destroy (pool);
2034            pool = global_pool->child;
2035        }
2036        else {
2037            pool = pool->sibling;
2038        }
2039    }
2040    return;
2041}
2042#endif /* defined(NETWARE) */
2043
2044
2045/*
2046 * "Print" functions (common)
2047 */
2048
2049APR_DECLARE_NONSTD(char *) apr_psprintf(apr_pool_t *p, const char *fmt, ...)
2050{
2051    va_list ap;
2052    char *res;
2053
2054    va_start(ap, fmt);
2055    res = apr_pvsprintf(p, fmt, ap);
2056    va_end(ap);
2057    return res;
2058}
2059
2060/*
2061 * Pool Properties
2062 */
2063
2064APR_DECLARE(void) apr_pool_abort_set(apr_abortfunc_t abort_fn,
2065                                     apr_pool_t *pool)
2066{
2067    pool->abort_fn = abort_fn;
2068}
2069
2070APR_DECLARE(apr_abortfunc_t) apr_pool_abort_get(apr_pool_t *pool)
2071{
2072    return pool->abort_fn;
2073}
2074
2075APR_DECLARE(apr_pool_t *) apr_pool_parent_get(apr_pool_t *pool)
2076{
2077#ifdef NETWARE
2078    /* On NetWare, don't return the global_pool, return the application pool
2079       as the top most pool */
2080    if (pool->parent == global_pool)
2081        return pool;
2082    else
2083#endif
2084    return pool->parent;
2085}
2086
2087APR_DECLARE(apr_allocator_t *) apr_pool_allocator_get(apr_pool_t *pool)
2088{
2089    return pool->allocator;
2090}
2091
2092/* return TRUE if a is an ancestor of b
2093 * NULL is considered an ancestor of all pools
2094 */
2095APR_DECLARE(int) apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b)
2096{
2097    if (a == NULL)
2098        return 1;
2099
2100#if APR_POOL_DEBUG
2101    /* Find the pool with the longest lifetime guaranteed by the
2102     * caller: */
2103    while (a->joined) {
2104        a = a->joined;
2105    }
2106#endif
2107
2108    while (b) {
2109        if (a == b)
2110            return 1;
2111
2112        b = b->parent;
2113    }
2114
2115    return 0;
2116}
2117
2118APR_DECLARE(void) apr_pool_tag(apr_pool_t *pool, const char *tag)
2119{
2120    pool->tag = tag;
2121}
2122
2123
2124/*
2125 * User data management
2126 */
2127
2128APR_DECLARE(apr_status_t) apr_pool_userdata_set(const void *data, const char *key,
2129                                                apr_status_t (*cleanup) (void *),
2130                                                apr_pool_t *pool)
2131{
2132#if APR_POOL_DEBUG
2133    apr_pool_check_integrity(pool);
2134#endif /* APR_POOL_DEBUG */
2135
2136    if (pool->user_data == NULL)
2137        pool->user_data = apr_hash_make(pool);
2138
2139    if (apr_hash_get(pool->user_data, key, APR_HASH_KEY_STRING) == NULL) {
2140        char *new_key = apr_pstrdup(pool, key);
2141        apr_hash_set(pool->user_data, new_key, APR_HASH_KEY_STRING, data);
2142    }
2143    else {
2144        apr_hash_set(pool->user_data, key, APR_HASH_KEY_STRING, data);
2145    }
2146
2147    if (cleanup)
2148        apr_pool_cleanup_register(pool, data, cleanup, cleanup);
2149
2150    return APR_SUCCESS;
2151}
2152
2153APR_DECLARE(apr_status_t) apr_pool_userdata_setn(const void *data,
2154                              const char *key,
2155                              apr_status_t (*cleanup)(void *),
2156                              apr_pool_t *pool)
2157{
2158#if APR_POOL_DEBUG
2159    apr_pool_check_integrity(pool);
2160#endif /* APR_POOL_DEBUG */
2161
2162    if (pool->user_data == NULL)
2163        pool->user_data = apr_hash_make(pool);
2164
2165    apr_hash_set(pool->user_data, key, APR_HASH_KEY_STRING, data);
2166
2167    if (cleanup)
2168        apr_pool_cleanup_register(pool, data, cleanup, cleanup);
2169
2170    return APR_SUCCESS;
2171}
2172
2173APR_DECLARE(apr_status_t) apr_pool_userdata_get(void **data, const char *key,
2174                                                apr_pool_t *pool)
2175{
2176#if APR_POOL_DEBUG
2177    apr_pool_check_integrity(pool);
2178#endif /* APR_POOL_DEBUG */
2179
2180    if (pool->user_data == NULL) {
2181        *data = NULL;
2182    }
2183    else {
2184        *data = apr_hash_get(pool->user_data, key, APR_HASH_KEY_STRING);
2185    }
2186
2187    return APR_SUCCESS;
2188}
2189
2190
2191/*
2192 * Cleanup
2193 */
2194
2195struct cleanup_t {
2196    struct cleanup_t *next;
2197    const void *data;
2198    apr_status_t (*plain_cleanup_fn)(void *data);
2199    apr_status_t (*child_cleanup_fn)(void *data);
2200};
2201
2202APR_DECLARE(void) apr_pool_cleanup_register(apr_pool_t *p, const void *data,
2203                      apr_status_t (*plain_cleanup_fn)(void *data),
2204                      apr_status_t (*child_cleanup_fn)(void *data))
2205{
2206    cleanup_t *c;
2207
2208#if APR_POOL_DEBUG
2209    apr_pool_check_integrity(p);
2210#endif /* APR_POOL_DEBUG */
2211
2212    if (p != NULL) {
2213        if (p->free_cleanups) {
2214            /* reuse a cleanup structure */
2215            c = p->free_cleanups;
2216            p->free_cleanups = c->next;
2217        } else {
2218            c = apr_palloc(p, sizeof(cleanup_t));
2219        }
2220        c->data = data;
2221        c->plain_cleanup_fn = plain_cleanup_fn;
2222        c->child_cleanup_fn = child_cleanup_fn;
2223        c->next = p->cleanups;
2224        p->cleanups = c;
2225    }
2226}
2227
2228APR_DECLARE(void) apr_pool_pre_cleanup_register(apr_pool_t *p, const void *data,
2229                      apr_status_t (*plain_cleanup_fn)(void *data))
2230{
2231    cleanup_t *c;
2232
2233#if APR_POOL_DEBUG
2234    apr_pool_check_integrity(p);
2235#endif /* APR_POOL_DEBUG */
2236
2237    if (p != NULL) {
2238        if (p->free_cleanups) {
2239            /* reuse a cleanup structure */
2240            c = p->free_cleanups;
2241            p->free_cleanups = c->next;
2242        } else {
2243            c = apr_palloc(p, sizeof(cleanup_t));
2244        }
2245        c->data = data;
2246        c->plain_cleanup_fn = plain_cleanup_fn;
2247        c->next = p->pre_cleanups;
2248        p->pre_cleanups = c;
2249    }
2250}
2251
2252APR_DECLARE(void) apr_pool_cleanup_kill(apr_pool_t *p, const void *data,
2253                      apr_status_t (*cleanup_fn)(void *))
2254{
2255    cleanup_t *c, **lastp;
2256
2257#if APR_POOL_DEBUG
2258    apr_pool_check_integrity(p);
2259#endif /* APR_POOL_DEBUG */
2260
2261    if (p == NULL)
2262        return;
2263
2264    c = p->cleanups;
2265    lastp = &p->cleanups;
2266    while (c) {
2267#if APR_POOL_DEBUG
2268        /* Some cheap loop detection to catch a corrupt list: */
2269        if (c == c->next
2270            || (c->next && c == c->next->next)
2271            || (c->next && c->next->next && c == c->next->next->next)) {
2272            abort();
2273        }
2274#endif
2275
2276        if (c->data == data && c->plain_cleanup_fn == cleanup_fn) {
2277            *lastp = c->next;
2278            /* move to freelist */
2279            c->next = p->free_cleanups;
2280            p->free_cleanups = c;
2281            break;
2282        }
2283
2284        lastp = &c->next;
2285        c = c->next;
2286    }
2287
2288    /* Remove any pre-cleanup as well */
2289    c = p->pre_cleanups;
2290    lastp = &p->pre_cleanups;
2291    while (c) {
2292#if APR_POOL_DEBUG
2293        /* Some cheap loop detection to catch a corrupt list: */
2294        if (c == c->next
2295            || (c->next && c == c->next->next)
2296            || (c->next && c->next->next && c == c->next->next->next)) {
2297            abort();
2298        }
2299#endif
2300
2301        if (c->data == data && c->plain_cleanup_fn == cleanup_fn) {
2302            *lastp = c->next;
2303            /* move to freelist */
2304            c->next = p->free_cleanups;
2305            p->free_cleanups = c;
2306            break;
2307        }
2308
2309        lastp = &c->next;
2310        c = c->next;
2311    }
2312
2313}
2314
2315APR_DECLARE(void) apr_pool_child_cleanup_set(apr_pool_t *p, const void *data,
2316                      apr_status_t (*plain_cleanup_fn)(void *),
2317                      apr_status_t (*child_cleanup_fn)(void *))
2318{
2319    cleanup_t *c;
2320
2321#if APR_POOL_DEBUG
2322    apr_pool_check_integrity(p);
2323#endif /* APR_POOL_DEBUG */
2324
2325    if (p == NULL)
2326        return;
2327
2328    c = p->cleanups;
2329    while (c) {
2330        if (c->data == data && c->plain_cleanup_fn == plain_cleanup_fn) {
2331            c->child_cleanup_fn = child_cleanup_fn;
2332            break;
2333        }
2334
2335        c = c->next;
2336    }
2337}
2338
2339APR_DECLARE(apr_status_t) apr_pool_cleanup_run(apr_pool_t *p, void *data,
2340                              apr_status_t (*cleanup_fn)(void *))
2341{
2342    apr_pool_cleanup_kill(p, data, cleanup_fn);
2343    return (*cleanup_fn)(data);
2344}
2345
2346static void run_cleanups(cleanup_t **cref)
2347{
2348    cleanup_t *c = *cref;
2349
2350    while (c) {
2351        *cref = c->next;
2352        (*c->plain_cleanup_fn)((void *)c->data);
2353        c = *cref;
2354    }
2355}
2356
2357#if !defined(WIN32) && !defined(OS2)
2358
2359static void run_child_cleanups(cleanup_t **cref)
2360{
2361    cleanup_t *c = *cref;
2362
2363    while (c) {
2364        *cref = c->next;
2365        (*c->child_cleanup_fn)((void *)c->data);
2366        c = *cref;
2367    }
2368}
2369
2370static void cleanup_pool_for_exec(apr_pool_t *p)
2371{
2372    run_child_cleanups(&p->cleanups);
2373
2374    for (p = p->child; p; p = p->sibling)
2375        cleanup_pool_for_exec(p);
2376}
2377
2378APR_DECLARE(void) apr_pool_cleanup_for_exec(void)
2379{
2380    cleanup_pool_for_exec(global_pool);
2381}
2382
2383#else /* !defined(WIN32) && !defined(OS2) */
2384
2385APR_DECLARE(void) apr_pool_cleanup_for_exec(void)
2386{
2387    /*
2388     * Don't need to do anything on NT or OS/2, because
2389     * these platforms will spawn the new process - not
2390     * fork for exec. All handles that are not inheritable,
2391     * will be automajically closed. The only problem is
2392     * with file handles that are open, but there isn't
2393     * much that can be done about that (except if the
2394     * child decides to go out and close them, or the
2395     * developer quits opening them shared)
2396     */
2397    return;
2398}
2399
2400#endif /* !defined(WIN32) && !defined(OS2) */
2401
2402APR_DECLARE_NONSTD(apr_status_t) apr_pool_cleanup_null(void *data)
2403{
2404    /* do nothing cleanup routine */
2405    return APR_SUCCESS;
2406}
2407
2408/* Subprocesses don't use the generic cleanup interface because
2409 * we don't want multiple subprocesses to result in multiple
2410 * three-second pauses; the subprocesses have to be "freed" all
2411 * at once.  If other resources are introduced with the same property,
2412 * we might want to fold support for that into the generic interface.
2413 * For now, it's a special case.
2414 */
2415APR_DECLARE(void) apr_pool_note_subprocess(apr_pool_t *pool, apr_proc_t *proc,
2416                                           apr_kill_conditions_e how)
2417{
2418    struct process_chain *pc = apr_palloc(pool, sizeof(struct process_chain));
2419
2420    pc->proc = proc;
2421    pc->kill_how = how;
2422    pc->next = pool->subprocesses;
2423    pool->subprocesses = pc;
2424}
2425
2426static void free_proc_chain(struct process_chain *procs)
2427{
2428    /* Dispose of the subprocesses we've spawned off in the course of
2429     * whatever it was we're cleaning up now.  This may involve killing
2430     * some of them off...
2431     */
2432    struct process_chain *pc;
2433    int need_timeout = 0;
2434    apr_time_t timeout_interval;
2435
2436    if (!procs)
2437        return; /* No work.  Whew! */
2438
2439    /* First, check to see if we need to do the SIGTERM, sleep, SIGKILL
2440     * dance with any of the processes we're cleaning up.  If we've got
2441     * any kill-on-sight subprocesses, ditch them now as well, so they
2442     * don't waste any more cycles doing whatever it is that they shouldn't
2443     * be doing anymore.
2444     */
2445
2446#ifndef NEED_WAITPID
2447    /* Pick up all defunct processes */
2448    for (pc = procs; pc; pc = pc->next) {
2449        if (apr_proc_wait(pc->proc, NULL, NULL, APR_NOWAIT) != APR_CHILD_NOTDONE)
2450            pc->kill_how = APR_KILL_NEVER;
2451    }
2452#endif /* !defined(NEED_WAITPID) */
2453
2454    for (pc = procs; pc; pc = pc->next) {
2455#ifndef WIN32
2456        if ((pc->kill_how == APR_KILL_AFTER_TIMEOUT)
2457            || (pc->kill_how == APR_KILL_ONLY_ONCE)) {
2458            /*
2459             * Subprocess may be dead already.  Only need the timeout if not.
2460             * Note: apr_proc_kill on Windows is TerminateProcess(), which is
2461             * similar to a SIGKILL, so always give the process a timeout
2462             * under Windows before killing it.
2463             */
2464            if (apr_proc_kill(pc->proc, SIGTERM) == APR_SUCCESS)
2465                need_timeout = 1;
2466        }
2467        else if (pc->kill_how == APR_KILL_ALWAYS) {
2468#else /* WIN32 knows only one fast, clean method of killing processes today */
2469        if (pc->kill_how != APR_KILL_NEVER) {
2470            need_timeout = 1;
2471            pc->kill_how = APR_KILL_ALWAYS;
2472#endif
2473            apr_proc_kill(pc->proc, SIGKILL);
2474        }
2475    }
2476
2477    /* Sleep only if we have to. The sleep algorithm grows
2478     * by a factor of two on each iteration. TIMEOUT_INTERVAL
2479     * is equal to TIMEOUT_USECS / 64.
2480     */
2481    if (need_timeout) {
2482        timeout_interval = TIMEOUT_INTERVAL;
2483        apr_sleep(timeout_interval);
2484
2485        do {
2486            /* check the status of the subprocesses */
2487            need_timeout = 0;
2488            for (pc = procs; pc; pc = pc->next) {
2489                if (pc->kill_how == APR_KILL_AFTER_TIMEOUT) {
2490                    if (apr_proc_wait(pc->proc, NULL, NULL, APR_NOWAIT)
2491                            == APR_CHILD_NOTDONE)
2492                        need_timeout = 1;		/* subprocess is still active */
2493                    else
2494                        pc->kill_how = APR_KILL_NEVER;	/* subprocess has exited */
2495                }
2496            }
2497            if (need_timeout) {
2498                if (timeout_interval >= TIMEOUT_USECS) {
2499                    break;
2500                }
2501                apr_sleep(timeout_interval);
2502                timeout_interval *= 2;
2503            }
2504        } while (need_timeout);
2505    }
2506
2507    /* OK, the scripts we just timed out for have had a chance to clean up
2508     * --- now, just get rid of them, and also clean up the system accounting
2509     * goop...
2510     */
2511    for (pc = procs; pc; pc = pc->next) {
2512        if (pc->kill_how == APR_KILL_AFTER_TIMEOUT)
2513            apr_proc_kill(pc->proc, SIGKILL);
2514    }
2515
2516    /* Now wait for all the signaled processes to die */
2517    for (pc = procs; pc; pc = pc->next) {
2518        if (pc->kill_how != APR_KILL_NEVER)
2519            (void)apr_proc_wait(pc->proc, NULL, NULL, APR_WAIT);
2520    }
2521}
2522
2523
2524/*
2525 * Pool creation/destruction stubs, for people who are running
2526 * mixed release/debug enviroments.
2527 */
2528
2529#if !APR_POOL_DEBUG
2530APR_DECLARE(void *) apr_palloc_debug(apr_pool_t *pool, apr_size_t size,
2531                                     const char *file_line)
2532{
2533    return apr_palloc(pool, size);
2534}
2535
2536APR_DECLARE(void *) apr_pcalloc_debug(apr_pool_t *pool, apr_size_t size,
2537                                      const char *file_line)
2538{
2539    return apr_pcalloc(pool, size);
2540}
2541
2542APR_DECLARE(void) apr_pool_clear_debug(apr_pool_t *pool,
2543                                       const char *file_line)
2544{
2545    apr_pool_clear(pool);
2546}
2547
2548APR_DECLARE(void) apr_pool_destroy_debug(apr_pool_t *pool,
2549                                         const char *file_line)
2550{
2551    apr_pool_destroy(pool);
2552}
2553
2554APR_DECLARE(apr_status_t) apr_pool_create_ex_debug(apr_pool_t **newpool,
2555                                                   apr_pool_t *parent,
2556                                                   apr_abortfunc_t abort_fn,
2557                                                   apr_allocator_t *allocator,
2558                                                   const char *file_line)
2559{
2560    return apr_pool_create_ex(newpool, parent, abort_fn, allocator);
2561}
2562
2563APR_DECLARE(apr_status_t) apr_pool_create_core_ex_debug(apr_pool_t **newpool,
2564                                                   apr_abortfunc_t abort_fn,
2565                                                   apr_allocator_t *allocator,
2566                                                   const char *file_line)
2567{
2568    return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
2569}
2570
2571APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex_debug(apr_pool_t **newpool,
2572                                                   apr_abortfunc_t abort_fn,
2573                                                   apr_allocator_t *allocator,
2574                                                   const char *file_line)
2575{
2576    return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
2577}
2578
2579#else /* APR_POOL_DEBUG */
2580
2581#undef apr_palloc
2582APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size);
2583
2584APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
2585{
2586    return apr_palloc_debug(pool, size, "undefined");
2587}
2588
2589#undef apr_pcalloc
2590APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size);
2591
2592APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size)
2593{
2594    return apr_pcalloc_debug(pool, size, "undefined");
2595}
2596
2597#undef apr_pool_clear
2598APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool);
2599
2600APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool)
2601{
2602    apr_pool_clear_debug(pool, "undefined");
2603}
2604
2605#undef apr_pool_destroy
2606APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool);
2607
2608APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool)
2609{
2610    apr_pool_destroy_debug(pool, "undefined");
2611}
2612
2613#undef apr_pool_create_ex
2614APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
2615                                             apr_pool_t *parent,
2616                                             apr_abortfunc_t abort_fn,
2617                                             apr_allocator_t *allocator);
2618
2619APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
2620                                             apr_pool_t *parent,
2621                                             apr_abortfunc_t abort_fn,
2622                                             apr_allocator_t *allocator)
2623{
2624    return apr_pool_create_ex_debug(newpool, parent,
2625                                    abort_fn, allocator,
2626                                    "undefined");
2627}
2628
2629#undef apr_pool_create_core_ex
2630APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
2631                                                  apr_abortfunc_t abort_fn,
2632                                                  apr_allocator_t *allocator);
2633
2634APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
2635                                                  apr_abortfunc_t abort_fn,
2636                                                  apr_allocator_t *allocator)
2637{
2638    return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn,
2639                                         allocator, "undefined");
2640}
2641
2642#undef apr_pool_create_unmanaged_ex
2643APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
2644                                                  apr_abortfunc_t abort_fn,
2645                                                  apr_allocator_t *allocator);
2646
2647APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
2648                                                  apr_abortfunc_t abort_fn,
2649                                                  apr_allocator_t *allocator)
2650{
2651    return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn,
2652                                         allocator, "undefined");
2653}
2654
2655#endif /* APR_POOL_DEBUG */
2656