You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@apr.apache.org by st...@apache.org on 2001/12/14 03:16:55 UTC
cvs commit: apr/misc/unix start.c
striker 01/12/13 18:16:55
Modified: include apr_pools.h
memory/unix apr_pools.c
misc/unix start.c
Log:
Move us over to the new pools code. The debug code isn't in place yet,
so hold off on APR_POOL_DEBUG for a day.
No API changes.
Reviewed by: Justin Erenkrantz, Brian Pane
Revision Changes Path
1.64 +138 -98 apr/include/apr_pools.h
Index: apr_pools.h
===================================================================
RCS file: /home/cvs/apr/include/apr_pools.h,v
retrieving revision 1.63
retrieving revision 1.64
diff -u -r1.63 -r1.64
--- apr_pools.h 2001/11/09 17:50:48 1.63
+++ apr_pools.h 2001/12/14 02:16:55 1.64
@@ -82,6 +82,8 @@
*/
#include "apr.h"
#include "apr_errno.h"
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
/* Memory allocation/Pool debugging options...
*
@@ -90,11 +92,15 @@
* NB These should ALL normally be commented out unless you REALLY
* need them!!
*/
-
-/*
+/*
#define APR_POOL_DEBUG
*/
+#define APR_POOL_STRINGIZE(x) APR_POOL__STRINGIZE(x)
+#define APR_POOL__STRINGIZE(x) #x
+#define APR_POOL__FILELINE__ __FILE__ ":" APR_POOL_STRINGIZE(__LINE__)
+
+
/** The fundamental pool type */
typedef struct apr_pool_t apr_pool_t;
@@ -174,13 +180,37 @@
*/
APR_DECLARE(apr_size_t) apr_pool_free_blocks_num_bytes(void);
+/**
+ * Tag a pool (give it a name)
+ * @param pool The pool to tag
+ * @param tag The tag
+ */
+APR_DECLARE(void) apr_pool_tag(apr_pool_t *pool, const char *tag);
+
+/**
+ * Lock a pool
+ * @param pool The pool to lock
+ * @param flag The flag
+ */
+APR_DECLARE(void) apr_pool_lock(apr_pool_t *pool, int flag);
+
/* @} */
#else
-# ifdef apr_pool_join
-# undef apr_pool_join
-# endif
-# define apr_pool_join(a,b)
+# ifdef apr_pool_join
+# undef apr_pool_join
+# endif
+# define apr_pool_join(a,b)
+
+# ifdef apr_pool_tag
+# undef apr_pool_tag
+# endif
+# define apr_pool_tag(pool, tag)
+
+# ifdef apr_pool_lock
+# undef apr_pool_lock
+# endif
+# define apr_pool_lock(pool, lock)
#endif
/**
@@ -199,40 +229,114 @@
/**
* Setup all of the internal structures required to use pools
- * @param globalp The APR global pool, used to allocate APR structures
- * before any other pools are created. This pool should not
- * ever be used outside of APR.
* @remark Programs do NOT need to call this directly. APR will call this
* automatically from apr_initialize.
* @internal
*/
-APR_DECLARE(apr_status_t) apr_pool_alloc_init(apr_pool_t *globalp);
+APR_DECLARE(apr_status_t) apr_pool_initialize(void);
/**
* Tear down all of the internal structures required to use pools
- * @param globalp The APR global pool, used to allocate APR structures
- * before any other pools are created. This pool should not
- * ever be used outside of APR.
* @remark Programs do NOT need to call this directly. APR will call this
* automatically from apr_terminate.
* @internal
*/
-APR_DECLARE(void) apr_pool_alloc_term(apr_pool_t *globalp);
+APR_DECLARE(void) apr_pool_terminate(void);
/* pool functions */
+#define APR_POOL_FDEFAULT 0x0
+#define APR_POOL_FNEW_ALLOCATOR 0x1
+#define APR_POOL_FLOCK 0x2
+
/**
* Create a new pool.
- * @param newcont The pool we have just created.
- * @param cont The parent pool. If this is NULL, the new pool is a root
+ * @param newpool The pool we have just created.
+ * @param parent The parent pool. If this is NULL, the new pool is a root
* pool. If it is non-NULL, the new pool will inherit all
* of its parent pool's attributes, except the apr_pool_t will
* be a sub-pool.
+ * @param apr_abort A function to use if the pool cannot allocate more memory.
+ * @param flags Flags indicating how the pool should be created:
+ * - POOL_FNEW_ALLOCATOR will create a new allocator for the pool
+ * instead of using the allocator of the parent.
+ * - POOL_FLOCK will create a mutex for the newly created allocator
+ * (this flag only makes sense in combination with POOL_FNEW_ALLOCATOR)
+ *
*/
-APR_DECLARE(apr_status_t) apr_pool_create(apr_pool_t **newcont,
- apr_pool_t *cont);
+APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
+ apr_pool_t *parent,
+ apr_abortfunc_t abort_fn,
+ apr_uint32_t flags);
/**
+ * Create a new pool.
+ * @param newpool The pool we have just created.
+ * @param parent The parent pool. If this is NULL, the new pool is a root
+ * pool. If it is non-NULL, the new pool will inherit all
+ * of its parent pool's attributes, except the apr_pool_t will
+ * be a sub-pool.
+ */
+#if defined(DOXYGEN)
+APR_DECLARE(apr_status_t) apr_pool_create(apr_pool_t **newpool,
+ apr_pool_t *parent);
+#else
+#define apr_pool_create(newpool, parent) \
+ apr_pool_create_ex(newpool, parent, NULL, APR_POOL_FDEFAULT)
+#endif
+
+/**
+ * This function is deprecated. Use apr_pool_create_ex.
+ * @param newpool The new sub-pool
+ * @param parent The pool to use as a parent pool
+ * @param apr_abort A function to use if the pool cannot allocate more memory.
+ * @deffunc void apr_pool_sub_make(apr_pool_t **p, apr_pool_t *parent, int (*apr_abort)(int retcode), const char *created)
+ * @remark The @a apr_abort function provides a way to quit the program if the
+ * machine is out of memory. By default, APR will return on error.
+ */
+#if defined(DOXYGEN)
+APR_DECLARE(void) apr_pool_sub_make(apr_pool_t **newpool,
+ apr_pool_t *parent,
+ int (*apr_abort)(int retcode));
+#else
+#define apr_pool_sub_make(newpool, parent, abort_fn) \
+ (void)apr_pool_create_ex(newpool, parent, abort_fn, APR_POOL_FDEFAULT);
+#endif
+
+/**
+ * Allocate a block of memory from a pool
+ * @param p The pool to allocate from
+ * @param reqsize The amount of memory to allocate
+ * @return The allocated memory
+ */
+APR_DECLARE(void *) apr_palloc(apr_pool_t *p, apr_size_t reqsize);
+
+/**
+ * Allocate a block of memory from a pool and set all of the memory to 0
+ * @param p The pool to allocate from
+ * @param size The amount of memory to allocate
+ * @return The allocated memory
+ */
+APR_DECLARE(void *) apr_pcalloc(apr_pool_t *p, apr_size_t size);
+
+/**
+ * Clear all memory in the pool and run all the cleanups. This also clears all
+ * subpools.
+ * @param p The pool to clear
+ * @remark This does not actually free the memory, it just allows the pool
+ * to re-use this memory for the next allocation.
+ * @see apr_pool_destroy()
+ */
+APR_DECLARE(void) apr_pool_clear(apr_pool_t *p);
+
+/**
+ * Destroy the pool. This runs apr_pool_clear() and then frees all the memory.
+ * @param p The pool to destroy
+ * @remark This will actually free the memory
+ */
+APR_DECLARE(void) apr_pool_destroy(apr_pool_t *p);
+
+/**
* Set the function to be called when an allocation failure occurs.
* @tip If the program wants APR to exit on a memory allocation error,
* then this function can be called to set the callback to use (for
@@ -265,7 +369,7 @@
* @param data The user data associated with the pool.
* @param key The key to use for association
* @param cleanup The cleanup program to use to cleanup the data (NULL if none)
- * @param cont The current pool
+ * @param pool The current pool
* @warning The data to be attached to the pool should have a life span
* at least as long as the pool it is being attached to.
*
@@ -277,16 +381,16 @@
* @bug Specify how to ensure this uniqueness!
*/
APR_DECLARE(apr_status_t) apr_pool_userdata_set(const void *data,
- const char *key,
- apr_status_t (*cleanup)(void *),
- apr_pool_t *cont);
+ const char *key,
+ apr_status_t (*cleanup)(void *),
+ apr_pool_t *pool);
/**
* Set the data associated with the current pool
* @param data The user data associated with the pool.
* @param key The key to use for association
* @param cleanup The cleanup program to use to cleanup the data (NULL if none)
- * @param cont The current pool
+ * @param pool The current pool
* @note same as apr_pool_userdata_set(), except that this version doesn't
* make a copy of the key (this function is useful, for example, when
* the key is a string literal)
@@ -297,73 +401,18 @@
APR_DECLARE(apr_status_t) apr_pool_userdata_setn(const void *data,
const char *key,
apr_status_t (*cleanup)(void *),
- apr_pool_t *cont);
+ apr_pool_t *pool);
/**
* Return the data associated with the current pool.
* @param data The user data associated with the pool.
* @param key The key for the data to retrieve
- * @param cont The current pool.
+ * @param pool The current pool.
*/
APR_DECLARE(apr_status_t) apr_pool_userdata_get(void **data, const char *key,
- apr_pool_t *cont);
-
-/**
- * Lock the pool. All the memory is write protected against changes.
- * @param p The pool to lock
- * @param writeprotect If true the pool's memory is locked read-only,
- * otherwise the lock is released
- * @remark This is a no-op if the program isn't built with appropriate flags
- * on a platform that supports page locking.
- */
-APR_DECLARE(void) apr_pool_lock(apr_pool_t *p, int writeprotect);
-
-/**
- * Clear all memory in the pool and run all the cleanups. This also clears all
- * subpools.
- * @param p The pool to clear
- * @remark This does not actually free the memory, it just allows the pool
- * to re-use this memory for the next allocation.
- * @see apr_pool_destroy()
- */
-APR_DECLARE(void) apr_pool_clear(apr_pool_t *p);
-
-/**
- * Destroy the pool. This runs apr_pool_clear() and then frees all the memory.
- * @param p The pool to destroy
- * @remark This will actually free the memory
- */
-APR_DECLARE(void) apr_pool_destroy(apr_pool_t *p);
-
-/**
- * Allocate a block of memory from a pool
- * @param c The pool to allocate from
- * @param reqsize The amount of memory to allocate
- * @return The allocated memory
- */
-APR_DECLARE(void *) apr_palloc(apr_pool_t *c, apr_size_t reqsize);
-
-/**
- * Allocate a block of memory from a pool and set all of the memory to 0
- * @param p The pool to allocate from
- * @param size The amount of memory to allocate
- * @return The allocated memory
- */
-APR_DECLARE(void *) apr_pcalloc(apr_pool_t *p, apr_size_t size);
+ apr_pool_t *pool);
/**
- * @param p The new sub-pool
- * @param parent The pool to use as a parent pool
- * @param apr_abort A function to use if the pool cannot allocate more memory.
- * @deffunc void apr_pool_sub_make(apr_pool_t **p, apr_pool_t *parent, int (*apr_abort)(int retcode), const char *created)
- * @remark The @a apr_abort function provides a way to quit the program if the
- * machine is out of memory. By default, APR will return on error.
- */
-APR_DECLARE(void) apr_pool_sub_make(apr_pool_t **p,
- apr_pool_t *pparent,
- int (*apr_abort)(int retcode));
-
-/**
* Register a function to be called when a pool is cleared or destroyed
* @param p The pool register the cleanup with
* @param data The data to pass to the cleanup function.
@@ -395,8 +444,8 @@
* @param child_cleanup The function to register as the child cleanup
*/
APR_DECLARE(void) apr_pool_child_cleanup_set(apr_pool_t *p, const void *data,
- apr_status_t (*plain_cleanup) (void *),
- apr_status_t (*child_cleanup) (void *));
+ apr_status_t (*plain_cleanup)(void *),
+ apr_status_t (*child_cleanup)(void *));
/**
* Run the specified cleanup function immediately and unregister it. Use
@@ -445,27 +494,18 @@
* the macros to support other linkages.
*/
#define APR_POOL_DECLARE_ACCESSOR(typename) \
- APR_DECLARE(apr_pool_t *) apr_##typename##_pool_get \
- (const apr_##typename##_t *ob)
+ APR_DECLARE(apr_pool_t *) apr_##typename##_pool_get \
+ (const apr_##typename##_t *ob)
#define APR_POOL_IMPLEMENT_ACCESSOR(typename) \
- APR_POOL_IMPLEMENT_ACCESSOR_X(typename, pool)
+ APR_POOL_IMPLEMENT_ACCESSOR_X(typename, pool)
#define APR_POOL_IMPLEMENT_ACCESSOR_X(typename, fieldname) \
- APR_DECLARE(apr_pool_t *) apr_##typename##_pool_get \
- (const apr_##typename##_t *ob) { return ob->fieldname; }
-
-/* used to guarantee to the apr_pool_t debugging code that the sub apr_pool_t
- * will not be destroyed before the parent pool */
-#ifndef APR_POOL_DEBUG
-# ifdef apr_pool_join
-# undef apr_pool_join
-# endif /* apr_pool_join */
-# define apr_pool_join(a,b)
-#endif /* APR_POOL_DEBUG */
+ APR_DECLARE(apr_pool_t *) apr_##typename##_pool_get \
+ (const apr_##typename##_t *ob) { return ob->fieldname; }
/** @} */
#ifdef __cplusplus
}
#endif
-#endif /* !APR_POOLS_H */
+#endif /* !APR_POOLS_H */
1.118 +647 -1109 apr/memory/unix/apr_pools.c
Index: apr_pools.c
===================================================================
RCS file: /home/cvs/apr/memory/unix/apr_pools.c,v
retrieving revision 1.117
retrieving revision 1.118
diff -u -r1.117 -r1.118
--- apr_pools.c 2001/11/23 16:47:52 1.117
+++ apr_pools.c 2001/12/14 02:16:55 1.118
@@ -52,23 +52,20 @@
* <http://www.apache.org/>.
*/
-/*
- * Resource allocation code... the code here is responsible for making
- * sure that nothing leaks.
- *
- * rst --- 4/95 --- 6/95
- */
-
#include "apr.h"
#include "apr_private.h"
+/* TODO: Clean out the #includes */
+
#include "apr_portable.h" /* for get_os_proc */
#include "apr_strings.h"
#include "apr_general.h"
#include "apr_pools.h"
#include "apr_lib.h"
-#include "apr_lock.h"
+#include "apr_thread_mutex.h"
#include "apr_hash.h"
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
#if APR_HAVE_STDIO_H
#include <stdio.h>
@@ -94,7 +91,6 @@
#if APR_HAVE_FCNTL_H
#include <fcntl.h>
#endif
-
#if APR_HAVE_STRING_H
#include <string.h>
#endif
@@ -105,646 +101,552 @@
#include <malloc.h>
#endif
-/* Details of the debugging options can now be found in the developer
- * section of the documentaion.
- * ### gjs: where the hell is that?
- *
- * DEBUG_WITH_MPROTECT:
- * This is known to work on Linux systems. It can only be used in
- * conjunction with ALLOC_USE_MALLOC (for now). ALLOC_USE_MALLOC will
- * use malloc() for *each* allocation, and then free it when the pool
- * is cleared. When DEBUG_WITH_MPROTECT is used, the allocation is
- * performed using an anonymous mmap() call to get page-aligned memory.
- * Rather than free'ing the memory, an mprotect() call is made to make
- * the memory non-accessible. Thus, if the memory is referred to *after*
- * the pool is cleared, an immediate segfault occurs. :-)
- *
- * WARNING: Since every allocation creates a new mmap, aligned on a new
- * page, this debugging option chews memory. A **LOT** of
- * memory. Linux "recovered" the memory from my X Server process
- * the first time I ran a "largish" sequence of operations.
- *
- * ### it should be possible to use this option without ALLOC_USE_MALLOC
- * ### and simply mprotect the blocks at clear time (rather than put them
- * ### into the free block list).
+
+
+/*
+ * Magic numbers
*/
+
+#define MIN_ALLOC 8192
+#define MAX_INDEX 20
+
+#define BOUNDARY_INDEX 12
+#define BOUNDARY_SIZE (1 << BOUNDARY_INDEX)
+
/*
-#define ALLOC_DEBUG
-#define ALLOC_STATS
-#define ALLOC_USE_MALLOC
-#define DEBUG_WITH_MPROTECT
-*/
+ * Macros and defines
+ */
-/* magic numbers --- min free bytes to consider a free apr_pool_t block useable,
- * and the min amount to allocate if we have to go to malloc() */
+/* ALIGN() is only to be used to align on a power of 2 boundary */
+#define ALIGN(size, boundary) \
+ (((size) + ((boundary) - 1)) & ~((boundary) - 1))
-#ifndef BLOCK_MINFREE
-#define BLOCK_MINFREE 4096
-#endif
-#ifndef BLOCK_MINALLOC
-#define BLOCK_MINALLOC 8192
-#endif
-
-#ifdef APR_POOL_DEBUG
-/* first do some option checking... */
-#ifdef ALLOC_USE_MALLOC
-#error "sorry, no support for ALLOC_USE_MALLOC and APR_POOL_DEBUG at the same time"
-#endif /* ALLOC_USE_MALLOC */
-
-#ifdef MULTITHREAD
-# error "sorry, no support for MULTITHREAD and APR_POOL_DEBUG at the same time"
-#endif /* MULTITHREAD */
-
-#endif /* APR_POOL_DEBUG */
-
-#ifdef ALLOC_USE_MALLOC
-#undef BLOCK_MINFREE
-#undef BLOCK_MINALLOC
-#define BLOCK_MINFREE 0
-#define BLOCK_MINALLOC 0
-#endif /* ALLOC_USE_MALLOC */
-
-#ifdef DEBUG_WITH_MPROTECT
-#ifndef ALLOC_USE_MALLOC
-#error "ALLOC_USE_MALLOC must be enabled to use DEBUG_WITH_MPROTECT"
-#endif
-#ifndef WIN32
-#include <sys/mman.h>
-#endif
+#define ALIGN_DEFAULT(size) ALIGN(size, 8)
+
+#if APR_HAS_THREADS
+#define LOCK(mutex) \
+ do { \
+ if (mutex) \
+ apr_thread_mutex_lock(mutex); \
+ } while(0)
+
+#define UNLOCK(mutex) \
+ do { \
+ if (mutex) \
+ apr_thread_mutex_unlock(mutex); \
+ } while(0)
+#else
+#define LOCK(mutex)
+#define UNLOCK(mutex)
#endif
+/*
+ * Structures
+ */
-/** The memory allocation structure
+typedef struct cleanup_t cleanup_t;
+typedef struct allocator_t allocator_t;
+typedef struct node_t node_t;
+
+struct node_t {
+ node_t *next;
+ apr_uint32_t index;
+ char *first_avail;
+ char *endp;
+};
+
+struct allocator_t {
+ apr_uint32_t max_index;
+ apr_thread_mutex_t *mutex;
+ apr_pool_t *owner;
+ node_t *free[MAX_INDEX];
+};
+
+/* The ref field in the apr_pool_t struct holds a
+ * pointer to the pointer referencing this pool.
+ * It is used for parent, child, sibling management.
+ * Look at apr_pool_create_ex() and apr_pool_destroy()
+ * to see how it is used.
*/
struct apr_pool_t {
- /** The first block in this pool. */
- union block_hdr *first;
- /** The last block in this pool. */
- union block_hdr *last;
- /** The list of cleanups to run on pool cleanup. */
- struct cleanup *cleanups;
- /** A list of processes to kill when this pool is cleared */
+ allocator_t *allocator;
+ node_t *active;
+ node_t *self; /* The node containing the pool itself */
+ char *self_first_avail;
+ apr_pool_t *parent;
+ apr_pool_t *child;
+ apr_pool_t *sibling;
+ apr_pool_t **ref;
+ cleanup_t *cleanups;
struct process_chain *subprocesses;
- /** The first sub_pool of this pool */
- struct apr_pool_t *sub_pools;
- /** The next sibling pool */
- struct apr_pool_t *sub_next;
- /** The previous sibling pool */
- struct apr_pool_t *sub_prev;
- /** The parent pool of this pool */
- struct apr_pool_t *parent;
- /** The first free byte in this pool */
- char *free_first_avail;
-#ifdef ALLOC_USE_MALLOC
- /** The allocation list if using malloc */
- void *allocation_list;
-#endif
-#ifdef APR_POOL_DEBUG
- /** a list of joined pools */
- struct apr_pool_t *joined;
+ apr_abortfunc_t abort_fn;
+ apr_hash_t *user_data;
+#if defined(APR_POOL_DEBUG)
+ const char *tag;
#endif
- /** A function to control how pools behave when they receive ENOMEM */
- int (*apr_abort)(int retcode);
- /** A place to hold user data associated with this pool */
- struct apr_hash_t *prog_data;
};
+#define SIZEOF_NODE_T ALIGN_DEFAULT(sizeof(node_t))
+#define SIZEOF_ALLOCATOR_T ALIGN_DEFAULT(sizeof(allocator_t))
+#define SIZEOF_POOL_T ALIGN_DEFAULT(sizeof(apr_pool_t))
-/*****************************************************************
- *
- * Managing free storage blocks...
+/*
+ * Variables
*/
-
-union align {
- /*
- * Types which are likely to have the longest RELEVANT alignment
- * restrictions...
- */
- char *cp;
- void (*f) (void);
- long l;
- FILE *fp;
- double d;
+static apr_pool_t *global_pool = NULL;
+static apr_byte_t global_allocator_initialized = 0;
+static allocator_t global_allocator = {
+ 0, /* max_index */
+ NULL, /* mutex */
+ NULL, /* owner */
+ { NULL } /* free[0] */
};
-#define CLICK_SZ (sizeof(union align))
+/*
+ * Memory allocation
+ */
-union block_hdr {
- union align a;
+static APR_INLINE node_t *node_malloc(allocator_t *allocator, apr_size_t size)
+{
+ node_t *node, **ref;
+ apr_uint32_t i, index, max_index;
- /* Actual header... */
+ /* Round up the block size to the next boundary, but always
+ * allocate at least a certain size (MIN_ALLOC).
+ */
+ size = ALIGN(size + SIZEOF_NODE_T, BOUNDARY_SIZE);
+ if (size < MIN_ALLOC)
+ size = MIN_ALLOC;
- struct {
- char *endp;
- union block_hdr *next;
- char *first_avail;
-#ifdef APR_POOL_DEBUG
- union block_hdr *global_next;
- apr_pool_t *owning_pool;
-#endif /* APR_POOL_DEBUG */
- } h;
-};
+ /* Find the index for this node size by
+ * deviding its size by the boundary size
+ */
+ index = (size >> BOUNDARY_INDEX) - 1;
+ /* First see if there are any nodes in the area we know
+ * our node will fit into.
+ */
+ if (index <= allocator->max_index) {
+ LOCK(allocator->mutex);
-/*
- * Static cells for managing our internal synchronisation.
- */
-static union block_hdr *block_freelist = NULL;
+ /* Walk the free list to see if there are
+ * any nodes on it of the requested size
+ *
+ * NOTE: an optimization would be to check
+ * allocator->free[index] first and if no
+ * node is present, directly use
+ * allocator->free[max_index]. This seems
+ * like overkill though and could cause
+ * memory waste.
+ */
+ max_index = allocator->max_index;
+ ref = &allocator->free[index];
+ i = index;
+ while (*ref == NULL && i < max_index) {
+ ref++;
+ i++;
+ }
-#if APR_HAS_THREADS
-static apr_lock_t *alloc_mutex;
-#endif
+ if ((node = *ref) != NULL) {
+ /* If we have found a node and it doesn't have any
+ * nodes waiting in line behind it _and_ we are on
+ * the highest available index, find the new highest
+ * available index
+ */
+ if ((*ref = node->next) == NULL && i >= max_index) {
+ do {
+ ref--;
+ max_index--;
+ }
+ while (*ref == NULL && max_index > 0);
-#ifdef APR_POOL_DEBUG
-static char *known_stack_point;
-static int stack_direction;
-static union block_hdr *global_block_list;
-#define FREE_POOL ((apr_pool_t *)(-1))
-#endif /* APR_POOL_DEBUG */
+ allocator->max_index = max_index;
+ }
+
+ node->next = NULL;
-#ifdef ALLOC_STATS
-static apr_uint64_t num_free_blocks_calls;
-static apr_uint64_t num_blocks_freed;
-static unsigned max_blocks_in_one_free;
-static unsigned num_malloc_calls;
-static unsigned num_malloc_bytes;
-#endif /* ALLOC_STATS */
+ UNLOCK(allocator->mutex);
-#ifdef ALLOC_DEBUG
-#define FILL_BYTE ((char)(0xa5))
-#define debug_fill(ptr,size) ((void)memset((ptr), FILL_BYTE, (size)))
+ return node;
+ }
-static APR_INLINE void debug_verify_filled(const char *ptr, const char *endp,
- const char *error_msg)
-{
- for ( ; ptr < endp; ++ptr) {
- if (*ptr != FILL_BYTE) {
- fputs(error_msg, stderr);
- abort();
- exit(1);
- }
+ UNLOCK(allocator->mutex);
}
-}
-
-#else /* ALLOC_DEBUG */
-#define debug_fill(a,b)
-#define debug_verify_filled(a,b,c)
-#endif /* ALLOC_DEBUG */
-
-#ifdef DEBUG_WITH_MPROTECT
-
-#define SIZEOF_BLOCK(p) (((union block_hdr *)(p) - 1)->a.l)
-#ifndef WIN32
+ /* If we found nothing, seek the sink (at index 0), if
+ * it is not empty.
+ */
+ else if (allocator->free[0]) {
+ LOCK(allocator->mutex);
-static void *mprotect_malloc(apr_size_t size)
-{
- union block_hdr * addr;
+ /* Walk the free list to see if there are
+ * any nodes on it of the requested size
+ */
+ ref = &allocator->free[0];
+ while ((node = *ref) != NULL && index > node->index)
+ ref = &node->next;
+
+ if (node) {
+ *ref = node->next;
+ node->next = NULL;
+
+ UNLOCK(allocator->mutex);
- size += sizeof(union block_hdr);
+ return node;
+ }
+
+ UNLOCK(allocator->mutex);
+ }
- addr = mmap(NULL, size,
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
- -1, 0);
- if (addr == MAP_FAILED)
+ /* If we haven't got a suitable node, malloc a new one
+ * and initialize it.
+ */
+ if ((node = malloc(size)) == NULL)
return NULL;
- addr->a.l = size;
- return addr + 1;
+
+ node->next = NULL;
+ node->index = index;
+ node->first_avail = (char *)node + SIZEOF_NODE_T;
+ node->endp = (char *)node + size;
+
+ return node;
}
-static void mprotect_free(void *addr)
+static APR_INLINE void node_free(allocator_t *allocator, node_t *node)
{
- apr_size_t size = SIZEOF_BLOCK(addr);
- int rv = mprotect((union block_hdr *)addr - 1, size, PROT_NONE);
- if (rv != 0) {
- fprintf(stderr, "could not protect. errno=%d\n", errno);
- abort();
- }
-}
+ node_t *next;
+ apr_uint32_t index, max_index;
-#else /* WIN32 */
+ LOCK(allocator->mutex);
-/* return the number insignificant bits in size, e.g. the typical page
- * size of 4096 on x86/WinNT will return 12, as the 12 low-order bits
- * in the size aren't relevant to the number of pages.
- */
-static int mprotect_pageshift()
-{
- static int savesize = 0;
- if (!savesize) {
- SYSTEM_INFO sysinfo;
- GetSystemInfo(&sysinfo);
- --sysinfo.dwPageSize;
- while (sysinfo.dwPageSize) {
- ++savesize;
- sysinfo.dwPageSize >>= 1;
+ max_index = allocator->max_index;
+
+ /* Walk the list of submitted nodes and free them one by one,
+ * shoving them in the right 'size' buckets as we go.
+ */
+ do {
+ next = node->next;
+ index = node->index;
+
+ if (index < MAX_INDEX) {
+ /* Add the node to the appropiate 'size' bucket. Adjust
+ * the max_index when appropiate.
+ */
+ if ((node->next = allocator->free[index]) == NULL && index > max_index) {
+ max_index = index;
+ }
+ allocator->free[index] = node;
}
+ else {
+ /* This node is too large to keep in a specific size bucket,
+ * just add it to the sink (at index 0).
+ */
+ node->next = allocator->free[0];
+ allocator->free[0] = node;
+ }
}
- return savesize;
-}
+ while ((node = next) != NULL);
-static void *mprotect_malloc(apr_size_t size)
-{
- union block_hdr * addr;
- int pageshift = mprotect_pageshift();
- size += sizeof(union block_hdr);
- size = (((size - 1) >> pageshift) + 1) << pageshift;
- addr = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
- if (!addr)
- return NULL;
- addr->a.l = size;
- return addr + 1;
-}
-
-static void mprotect_free(void *addr)
-{
- apr_size_t size = SIZEOF_BLOCK(addr);
- BOOL rv = VirtualFree((union block_hdr *)addr - 1, size, MEM_DECOMMIT);
- if (!rv) {
- fprintf(stderr, "could not protect. errno=%d\n", errno);
- abort();
- }
-}
+ allocator->max_index = max_index;
-static void mprotect_lock(void *addr, int lock)
-{
- size_t size = SIZEOF_BLOCK(addr);
- DWORD prot = (lock ? PAGE_READONLY : PAGE_READWRITE);
- BOOL rv = VirtualProtect((union block_hdr *)addr - 1, size, prot, &prot);
- if (!rv) {
- fprintf(stderr, "could not protect. errno=%d\n", errno);
- abort();
- }
+ UNLOCK(allocator->mutex);
}
-#define DO_LOCK(p,l) mprotect_lock(p,l)
-#endif
-
-static void *mprotect_realloc(void *addr, apr_size_t size)
+APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
{
- void *new_addr = mprotect_malloc(size);
- apr_size_t old_size = SIZEOF_BLOCK(addr);
-
- if (size < old_size)
- old_size = size;
- memcpy(new_addr, addr, old_size);
- mprotect_free(addr);
- return new_addr;
-}
+ node_t *active, *node;
+ void *mem;
+ char *endp;
-#define DO_MALLOC(s) mprotect_malloc(s)
-#define DO_FREE(p) mprotect_free(p)
-#define DO_REALLOC(p,s) mprotect_realloc(p,s)
+ size = ALIGN_DEFAULT(size);
+ active = pool->active;
-#else /* DEBUG_WITH_MPROTECT */
+ /* If the active node has enough bytes left, use it. */
+ endp = active->first_avail + size;
+ if (endp < active->endp) {
+ mem = active->first_avail;
+ active->first_avail = endp;
+
+ return mem;
+ }
-#define DO_MALLOC(s) malloc(s)
-#define DO_FREE(p) free(p)
-#define DO_REALLOC(p,s) realloc(p,s)
+ /* Reset the active node, get ourselves a new one and activate it. */
+ active->first_avail = (char *)active + SIZEOF_NODE_T;
-#endif /* DEBUG_WITH_MPROTECT */
-
-/*
- * Get a completely new block from the system pool. Note that we rely on
- * malloc() to provide aligned memory.
- */
-static union block_hdr *malloc_block(apr_size_t size, apr_abortfunc_t abortfunc)
-{
- union block_hdr *blok;
+ if ((node = node_malloc(pool->allocator, size)) == NULL) {
+ active->first_avail = active->endp;
-#ifdef ALLOC_DEBUG
- /* make some room at the end which we'll fill and expect to be
- * always filled
- */
- size += CLICK_SZ;
-#endif /* ALLOC_DEBUG */
+ if (pool->abort_fn)
+ pool->abort_fn(APR_ENOMEM);
-#ifdef ALLOC_STATS
- ++num_malloc_calls;
- num_malloc_bytes += size + sizeof(union block_hdr);
-#endif /* ALLOC_STATS */
-
- blok = (union block_hdr *) DO_MALLOC(size + sizeof(union block_hdr));
- if (blok == NULL) {
- /* ### keep this fprintf here? */
- fprintf(stderr, "Ouch! malloc failed in malloc_block()\n");
- if (abortfunc != NULL) {
- (void) (*abortfunc)(APR_ENOMEM);
- }
return NULL;
}
-
- debug_fill(blok, size + sizeof(union block_hdr));
- blok->h.next = NULL;
- blok->h.first_avail = (char *) (blok + 1);
- blok->h.endp = size + blok->h.first_avail;
+ active->next = pool->active = node;
-#ifdef ALLOC_DEBUG
- blok->h.endp -= CLICK_SZ;
-#endif /* ALLOC_DEBUG */
-
-#ifdef APR_POOL_DEBUG
- blok->h.global_next = global_block_list;
- global_block_list = blok;
- blok->h.owning_pool = NULL;
-#endif /* APR_POOL_DEBUG */
-
- return blok;
+ mem = node->first_avail;
+ node->first_avail += size;
+
+ return mem;
}
-
-
-#if defined(ALLOC_DEBUG) && !defined(ALLOC_USE_MALLOC)
-static void chk_on_blk_list(union block_hdr *blok, union block_hdr *free_blk)
+APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size)
{
- debug_verify_filled(blok->h.endp, blok->h.endp + CLICK_SZ,
- "[chk_on_blk_list] Ouch! Someone trounced the padding "
- "at the end of a block!\n");
- while (free_blk) {
- if (free_blk == blok) {
- fprintf(stderr, "Ouch! Freeing free block\n");
- abort();
- exit(1);
- }
- free_blk = free_blk->h.next;
- }
-}
-#else /* defined(ALLOC_DEBUG) && !defined(ALLOC_USE_MALLOC) */
-#define chk_on_blk_list(_x, _y)
-#endif /* defined(ALLOC_DEBUG) && !defined(ALLOC_USE_MALLOC) */
+ node_t *active, *node;
+ void *mem;
+ char *endp;
-/* Free a chain of blocks --- must be called with alarms blocked. */
+ size = ALIGN_DEFAULT(size);
+ active = pool->active;
-static void free_blocks(union block_hdr *blok)
-{
-#ifdef ALLOC_USE_MALLOC
- union block_hdr *next;
+ /* If the active node has enough bytes left, use it. */
+ endp = active->first_avail + size;
+ if (endp < active->endp) {
+ mem = active->first_avail;
+ active->first_avail = endp;
- for ( ; blok; blok = next) {
- next = blok->h.next;
- DO_FREE(blok);
+ memset(mem, 0, size);
+
+ return mem;
}
-#else /* ALLOC_USE_MALLOC */
-#ifdef ALLOC_STATS
- unsigned num_blocks;
-#endif /* ALLOC_STATS */
+ /* Reset the active node, get ourselves a new one and activate it. */
+ active->first_avail = (char *)active + SIZEOF_NODE_T;
- /*
- * First, put new blocks at the head of the free list ---
- * we'll eventually bash the 'next' pointer of the last block
- * in the chain to point to the free blocks we already had.
- */
+ if ((node = node_malloc(pool->allocator, size)) == NULL) {
+ active->first_avail = active->endp;
- union block_hdr *old_free_list;
+ if (pool->abort_fn)
+ pool->abort_fn(APR_ENOMEM);
- if (blok == NULL) {
- return; /* Sanity check --- freeing empty pool? */
+ return NULL;
}
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_acquire(alloc_mutex);
- }
-#endif
- old_free_list = block_freelist;
- block_freelist = blok;
+ active->next = pool->active = node;
- /*
- * Next, adjust first_avail pointers of each block --- have to do it
- * sooner or later, and it simplifies the search in new_block to do it
- * now.
- */
-
-#ifdef ALLOC_STATS
- num_blocks = 1;
-#endif /* ALLOC_STATS */
-
- while (blok->h.next != NULL) {
-
-#ifdef ALLOC_STATS
- ++num_blocks;
-#endif /* ALLOC_STATS */
-
- chk_on_blk_list(blok, old_free_list);
- blok->h.first_avail = (char *) (blok + 1);
- debug_fill(blok->h.first_avail, blok->h.endp - blok->h.first_avail);
-#ifdef APR_POOL_DEBUG
- blok->h.owning_pool = FREE_POOL;
-#endif /* APR_POOL_DEBUG */
- blok = blok->h.next;
- }
-
- chk_on_blk_list(blok, old_free_list);
- blok->h.first_avail = (char *) (blok + 1);
- debug_fill(blok->h.first_avail, blok->h.endp - blok->h.first_avail);
-#ifdef APR_POOL_DEBUG
- blok->h.owning_pool = FREE_POOL;
-#endif /* APR_POOL_DEBUG */
-
- /* Finally, reset next pointer to get the old free blocks back */
-
- blok->h.next = old_free_list;
-
-#ifdef ALLOC_STATS
- if (num_blocks > max_blocks_in_one_free) {
- max_blocks_in_one_free = num_blocks;
- }
- ++num_free_blocks_calls;
- num_blocks_freed += num_blocks;
-#endif /* ALLOC_STATS */
-
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_release(alloc_mutex);
- }
-#endif /* APR_HAS_THREADS */
-#endif /* ALLOC_USE_MALLOC */
+ mem = node->first_avail;
+ node->first_avail += size;
+
+ memset(mem, 0, size);
+
+ return mem;
}
/*
- * Get a new block, from our own free list if possible, from the system
- * if necessary. Must be called with alarms blocked.
+ * Pool management
*/
-static union block_hdr *new_block(apr_size_t min_size, apr_abortfunc_t abortfunc)
+
+static void run_cleanups(cleanup_t *c);
+static void free_proc_chain(struct process_chain *procs);
+
+APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool)
{
- union block_hdr **lastptr = &block_freelist;
- union block_hdr *blok = block_freelist;
+ node_t *active;
- /* First, see if we have anything of the required size
- * on the free list...
+ /* Destroy the subpools. The subpools will detach themselves from
+ * this pool thus this loop is safe and easy.
*/
+ while (pool->child)
+ apr_pool_destroy(pool->child);
- while (blok != NULL) {
- if ((apr_ssize_t)min_size + BLOCK_MINFREE <= blok->h.endp - blok->h.first_avail) {
- *lastptr = blok->h.next;
- blok->h.next = NULL;
- debug_verify_filled(blok->h.first_avail, blok->h.endp,
- "[new_block] Ouch! Someone trounced a block "
- "on the free list!\n");
- return blok;
- }
- else {
- lastptr = &blok->h.next;
- blok = blok->h.next;
- }
+ /* Run cleanups */
+ run_cleanups(pool->cleanups);
+ pool->cleanups = NULL;
+
+ /* Free subprocesses */
+ free_proc_chain(pool->subprocesses);
+ pool->subprocesses = NULL;
+
+ /* Clear the user data. */
+ pool->user_data = NULL;
+
+ /* Reset the active node */
+ if ((active = pool->active) == pool->self) {
+ active->first_avail = pool->self_first_avail;
+ return;
}
- /* Nope. */
+ active->first_avail = (char *)active + SIZEOF_NODE_T;
- min_size += BLOCK_MINFREE;
- blok = malloc_block((min_size > BLOCK_MINALLOC)
- ? min_size : BLOCK_MINALLOC, abortfunc);
- return blok;
+ /* Find the node attached to the pool structure, make
+ * it the active node and free the rest of the nodes.
+ */
+ active = pool->active = pool->self;
+ active->first_avail = pool->self_first_avail;
+ node_free(pool->allocator, active->next);
+ active->next = NULL;
}
-
-/* Accounting */
-#ifdef APR_POOL_DEBUG
-static apr_size_t bytes_in_block_list(union block_hdr *blok)
+APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool)
{
- apr_size_t size = 0;
+ node_t *node, *active, **ref;
+ allocator_t *allocator;
+ apr_thread_mutex_t *mutex;
+ apr_uint32_t index;
- while (blok) {
- size += blok->h.endp - (char *) (blok + 1);
- blok = blok->h.next;
- }
+ /* Destroy the subpools. The subpools will detach themselve from
+ * this pool thus this loop is safe and easy.
+ */
+ while (pool->child)
+ apr_pool_destroy(pool->child);
- return size;
-}
-#endif
+ /* Run cleanups */
+ run_cleanups(pool->cleanups);
-/*****************************************************************
- *
- * Pool internals and management...
- * NB that subprocesses are not handled by the generic cleanup code,
- * basically because we don't want cleanups for multiple subprocesses
- * to result in multiple three-second pauses.
- */
+ /* Free subprocesses */
+ free_proc_chain(pool->subprocesses);
-struct process_chain;
-struct cleanup;
+ /* Remove the pool from the parents child list */
+ if (pool->parent) {
+ mutex = pool->parent->allocator->mutex;
-static void run_cleanups(struct cleanup *c);
-static void free_proc_chain(struct process_chain *p);
+ LOCK(mutex);
-static apr_pool_t *permanent_pool;
+ if ((*pool->ref = pool->sibling) != NULL)
+ pool->sibling->ref = pool->ref;
-/* Each pool structure is allocated in the start of its own first block,
- * so we need to know how many bytes that is (once properly aligned...).
- * This also means that when a pool's sub-pool is destroyed, the storage
- * associated with it is *completely* gone, so we have to make sure it
- * gets taken off the parent's sub-pool list...
- */
+ UNLOCK(mutex);
+ }
+
+ /* Reset the active block */
+ active = pool->active;
+ active->first_avail = (char *)active + SIZEOF_NODE_T;
-#define POOL_HDR_CLICKS (1 + ((sizeof(struct apr_pool_t) - 1) / CLICK_SZ))
-#define POOL_HDR_BYTES (POOL_HDR_CLICKS * CLICK_SZ)
+ /* Find the block attached to the pool structure. Save a copy of the
+ * allocator pointer, because the pool struct soon will be no more.
+ */
+ allocator = pool->allocator;
+ active = pool->self;
+ active->first_avail = (char *)active + SIZEOF_NODE_T;
+
+ /* If this pool happens to be the owner of the allocator, free
+ * everything in the allocator (that includes the pool struct
+ * and the allocator). Don't worry about destroying the optional mutex
+ * in the allocator, it will have been destroyed by the cleanup function.
+ */
+ if (allocator->owner == pool) {
+ for (index = 0; index < MAX_INDEX; index++) {
+ ref = &allocator->free[index];
+ while ((node = *ref) != NULL) {
+ *ref = node->next;
+ free(node);
+ }
+ }
+
+ ref = &active;
+ while ((node = *ref) != NULL) {
+ *ref = node->next;
+ free(node);
+ }
+
+ return;
+ }
-APR_DECLARE(void) apr_pool_sub_make(apr_pool_t **p,
- apr_pool_t *parent,
- apr_abortfunc_t abortfunc)
-{
- union block_hdr *blok;
- apr_pool_t *new_pool;
+ /* Free all the nodes in the pool (including the node holding the
+ * pool struct), by giving them back to the allocator.
+ */
+ node_free(allocator, active);
+}
+APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
+ apr_pool_t *parent,
+ apr_abortfunc_t abort_fn,
+ apr_uint32_t flags)
+{
+ apr_pool_t *pool;
+ node_t *node;
+ allocator_t *allocator, *new_allocator;
+ apr_status_t rv;
+
+ *newpool = NULL;
+
+ if (!parent)
+ parent = global_pool;
+
+ allocator = parent ? parent->allocator : &global_allocator;
+ if ((node = node_malloc(allocator, MIN_ALLOC - SIZEOF_NODE_T)) == NULL) {
+ if (abort_fn)
+ abort_fn(APR_ENOMEM);
+
+ return APR_ENOMEM;
+ }
+
+ if ((flags & APR_POOL_FNEW_ALLOCATOR) == APR_POOL_FNEW_ALLOCATOR) {
+ new_allocator = (allocator_t *)node->first_avail;
+ pool = (apr_pool_t *)((char *)new_allocator + SIZEOF_ALLOCATOR_T);
+ node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
+
+ memset(new_allocator, 0, SIZEOF_ALLOCATOR_T);
+ new_allocator->owner = pool;
+
+ pool->allocator = new_allocator;
+ pool->active = pool->self = node;
+ pool->abort_fn = abort_fn;
+ pool->child = NULL;
+ pool->cleanups = NULL;
+ pool->subprocesses = NULL;
+ pool->user_data = NULL;
+#if defined(APR_POOL_DEBUG)
+ pool->tag = NULL;
+#endif
#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_acquire(alloc_mutex);
- }
+ if ((flags & APR_POOL_FLOCK) == APR_POOL_FLOCK) {
+ if ((rv = apr_thread_mutex_create(&allocator->mutex,
+ APR_THREAD_MUTEX_DEFAULT, pool)) != APR_SUCCESS) {
+ node_free(allocator, node);
+ return rv;
+ }
+ }
#endif
-
- blok = new_block(POOL_HDR_BYTES, abortfunc);
- new_pool = (apr_pool_t *) blok->h.first_avail;
- blok->h.first_avail += POOL_HDR_BYTES;
-#ifdef APR_POOL_DEBUG
- blok->h.owning_pool = new_pool;
+ }
+ else {
+ pool = (apr_pool_t *)node->first_avail;
+ node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
+
+ pool->allocator = allocator;
+ pool->active = pool->self = node;
+ pool->abort_fn = abort_fn;
+ pool->child = NULL;
+ pool->cleanups = NULL;
+ pool->subprocesses = NULL;
+ pool->user_data = NULL;
+#if defined(APR_POOL_DEBUG)
+ pool->tag = NULL;
#endif
-
- memset((char *) new_pool, '\0', sizeof(struct apr_pool_t));
- new_pool->free_first_avail = blok->h.first_avail;
- new_pool->first = new_pool->last = blok;
-
- if (parent) {
- new_pool->parent = parent;
- new_pool->sub_next = parent->sub_pools;
- if (new_pool->sub_next) {
- new_pool->sub_next->sub_prev = new_pool;
- }
- parent->sub_pools = new_pool;
}
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_release(alloc_mutex);
- }
-#endif
+ if ((pool->parent = parent) != NULL) {
+ LOCK(allocator->mutex);
- *p = new_pool;
-}
+ if ((pool->sibling = parent->child) != NULL)
+ pool->sibling->ref = &pool->sibling;
-#ifdef APR_POOL_DEBUG
-static void stack_var_init(char *s)
-{
- char t;
+ parent->child = pool;
+ pool->ref = &parent->child;
- if (s < &t) {
- stack_direction = 1; /* stack grows up */
+ UNLOCK(allocator->mutex);
}
else {
- stack_direction = -1; /* stack grows down */
+ pool->sibling = NULL;
+ pool->ref = NULL;
}
-}
-#endif
-
-#ifdef ALLOC_STATS
-static void dump_stats(void)
-{
- fprintf(stderr,
- "alloc_stats: [%d] #free_blocks %" APR_INT64_T_FMT
- " #blocks %" APR_INT64_T_FMT
- " max %u #malloc %u #bytes %u\n",
- (int) getpid(),
- num_free_blocks_calls,
- num_blocks_freed,
- max_blocks_in_one_free,
- num_malloc_calls,
- num_malloc_bytes);
-}
-#endif
-/* ### why do we have this, in addition to apr_pool_sub_make? */
-APR_DECLARE(apr_status_t) apr_pool_create(apr_pool_t **newpool,
- apr_pool_t *parent_pool)
-{
- apr_abortfunc_t abortfunc;
- apr_pool_t *ppool;
-
- abortfunc = parent_pool ? parent_pool->apr_abort : NULL;
- ppool = parent_pool ? parent_pool : permanent_pool;
-
- apr_pool_sub_make(newpool, ppool, abortfunc);
- if (*newpool == NULL) {
- return APR_ENOPOOL;
- }
+ *newpool = pool;
- (*newpool)->prog_data = NULL;
- (*newpool)->apr_abort = abortfunc;
-
return APR_SUCCESS;
}
-APR_DECLARE(void) apr_pool_set_abort(apr_abortfunc_t abortfunc,
+APR_DECLARE(void) apr_pool_set_abort(apr_abortfunc_t abort_fn,
apr_pool_t *pool)
{
- pool->apr_abort = abortfunc;
+ pool->abort_fn = abort_fn;
}
APR_DECLARE(apr_abortfunc_t) apr_pool_get_abort(apr_pool_t *pool)
{
- return pool->apr_abort;
+ return pool->abort_fn;
}
APR_DECLARE(apr_pool_t *) apr_pool_get_parent(apr_pool_t *pool)
@@ -752,46 +654,106 @@
return pool->parent;
}
-/*****************************************************************
- *
- * Managing generic cleanups.
+/* return TRUE if a is an ancestor of b
+ * NULL is considered an ancestor of all pools
+ */
+APR_DECLARE(int) apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b)
+{
+ if (a == NULL)
+ return 1;
+
+ while (b) {
+ if (a == b)
+ return 1;
+
+ b = b->parent;
+ }
+
+ return 0;
+}
+
+/*
+ * Initialization
*/
+
+APR_DECLARE(apr_status_t) apr_pool_initialize(void)
+{
+ apr_status_t rv;
+
+ if (global_allocator_initialized++)
+ return APR_SUCCESS;
+
+ memset(&global_allocator, 0, SIZEOF_ALLOCATOR_T);
+
+ if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL, APR_POOL_FDEFAULT)) != APR_SUCCESS) {
+ return rv;
+ }
+
+#if APR_HAS_THREADS
+ if ((rv = apr_thread_mutex_create(&global_allocator.mutex,
+ APR_THREAD_MUTEX_DEFAULT, global_pool)) != APR_SUCCESS) {
+ return rv;
+ }
+#endif
+
+ global_allocator.owner = global_pool;
+ global_allocator_initialized = 1;
+
+ return APR_SUCCESS;
+}
+
+APR_DECLARE(void) apr_pool_terminate(void)
+{
+ if (!global_allocator_initialized)
+ return;
-struct cleanup {
+ global_allocator_initialized = 0;
+
+ apr_pool_destroy(global_pool); /* This will also destroy the mutex */
+ global_pool = NULL;
+
+ memset(&global_allocator, 0, SIZEOF_ALLOCATOR_T);
+}
+
+/*
+ * Cleanup
+ */
+
+struct cleanup_t {
+ struct cleanup_t *next;
const void *data;
- apr_status_t (*plain_cleanup) (void *);
- apr_status_t (*child_cleanup) (void *);
- struct cleanup *next;
+ apr_status_t (*plain_cleanup_fn)(void *data);
+ apr_status_t (*child_cleanup_fn)(void *data);
};
APR_DECLARE(void) apr_pool_cleanup_register(apr_pool_t *p, const void *data,
- apr_status_t (*plain_cleanup) (void *),
- apr_status_t (*child_cleanup) (void *))
+ apr_status_t (*plain_cleanup_fn)(void *data),
+ apr_status_t (*child_cleanup_fn)(void *data))
{
- struct cleanup *c;
+ cleanup_t *c;
if (p != NULL) {
- c = (struct cleanup *) apr_palloc(p, sizeof(struct cleanup));
+ c = (cleanup_t *) apr_palloc(p, sizeof(cleanup_t));
c->data = data;
- c->plain_cleanup = plain_cleanup;
- c->child_cleanup = child_cleanup;
+ c->plain_cleanup_fn = plain_cleanup_fn;
+ c->child_cleanup_fn = child_cleanup_fn;
c->next = p->cleanups;
p->cleanups = c;
}
}
APR_DECLARE(void) apr_pool_cleanup_kill(apr_pool_t *p, const void *data,
- apr_status_t (*cleanup) (void *))
+ apr_status_t (*cleanup_fn)(void *))
{
- struct cleanup *c;
- struct cleanup **lastp;
+ cleanup_t *c, **lastp;
if (p == NULL)
return;
+
c = p->cleanups;
lastp = &p->cleanups;
while (c) {
- if (c->data == data && c->plain_cleanup == cleanup) {
+ if (c->data == data && c->plain_cleanup_fn == cleanup_fn) {
*lastp = c->next;
break;
}
@@ -802,17 +764,18 @@
}
APR_DECLARE(void) apr_pool_child_cleanup_set(apr_pool_t *p, const void *data,
- apr_status_t (*plain_cleanup) (void *),
- apr_status_t (*child_cleanup) (void *))
+ apr_status_t (*plain_cleanup_fn) (void *),
+ apr_status_t (*child_cleanup_fn) (void *))
{
- struct cleanup *c;
+ cleanup_t *c;
if (p == NULL)
return;
+
c = p->cleanups;
while (c) {
- if (c->data == data && c->plain_cleanup == plain_cleanup) {
- c->child_cleanup = child_cleanup;
+ if (c->data == data && c->plain_cleanup_fn == plain_cleanup_fn) {
+ c->child_cleanup_fn = child_cleanup_fn;
break;
}
@@ -821,25 +784,25 @@
}
APR_DECLARE(apr_status_t) apr_pool_cleanup_run(apr_pool_t *p, void *data,
- apr_status_t (*cleanup) (void *))
+ apr_status_t (*cleanup_fn) (void *))
{
- apr_pool_cleanup_kill(p, data, cleanup);
- return (*cleanup) (data);
+ apr_pool_cleanup_kill(p, data, cleanup_fn);
+ return (*cleanup_fn)(data);
}
-static void run_cleanups(struct cleanup *c)
+static void run_cleanups(cleanup_t *c)
{
while (c) {
- (*c->plain_cleanup) ((void *)c->data);
- c = c->next;
+ (*c->plain_cleanup_fn)((void *)c->data);
+ c = c->next;
}
}
-static void run_child_cleanups(struct cleanup *c)
+static void run_child_cleanups(cleanup_t *c)
{
while (c) {
- (*c->child_cleanup) ((void *)c->data);
- c = c->next;
+ (*c->child_cleanup_fn)((void *)c->data);
+ c = c->next;
}
}
@@ -848,9 +811,8 @@
run_child_cleanups(p->cleanups);
p->cleanups = NULL;
- for (p = p->sub_pools; p; p = p->sub_next) {
- cleanup_pool_for_exec(p);
- }
+ for (p = p->child; p; p = p->sibling)
+ cleanup_pool_for_exec(p);
}
APR_DECLARE(void) apr_pool_cleanup_for_exec(void)
@@ -865,8 +827,8 @@
* I can do about that (except if the child decides
* to go out and close them
*/
- cleanup_pool_for_exec(permanent_pool);
-#endif /* ndef WIN32 */
+ cleanup_pool_for_exec(global_pool);
+#endif /* !defined(WIN32) && !defined(OS2) */
}
APR_DECLARE_NONSTD(apr_status_t) apr_pool_cleanup_null(void *data)
@@ -874,446 +836,75 @@
/* do nothing cleanup routine */
return APR_SUCCESS;
}
-
-APR_DECLARE(apr_status_t) apr_pool_alloc_init(apr_pool_t *globalp)
-{
-#if APR_HAS_THREADS
- apr_status_t status;
-#endif
-#ifdef APR_POOL_DEBUG
- char s;
-
- known_stack_point = &s;
- stack_var_init(&s);
-#endif
-#if APR_HAS_THREADS
- status = apr_lock_create(&alloc_mutex, APR_MUTEX, APR_INTRAPROCESS,
- NULL, globalp);
- if (status != APR_SUCCESS) {
- return status;
- }
-#endif
- permanent_pool = globalp;
-
-#ifdef ALLOC_STATS
- atexit(dump_stats);
-#endif
-
- return APR_SUCCESS;
-}
-APR_DECLARE(void) apr_pool_alloc_term(apr_pool_t *globalp)
-{
-#if APR_HAS_THREADS
- apr_lock_destroy(alloc_mutex);
- alloc_mutex = NULL;
-#endif
- apr_pool_destroy(globalp);
-}
-
-APR_DECLARE(void) apr_pool_lock(apr_pool_t *a, int l)
-{
-#ifdef ALLOC_USE_MALLOC
-#ifdef DO_LOCK
- /* lock the subpools. */
- apr_pool_t *s;
- void *c, *n;
-
- for (s = a->sub_pools; s; s = s->sub_next) {
- apr_pool_lock(s, l);
- }
-
- for (c = a->allocation_list; c; c = n) {
- n = *(void **)c;
- DO_LOCK(c, l);
- }
-#endif
-#endif
-}
-
-/* We only want to lock the mutex if we are being called from apr_pool_clear.
- * This is because if we also call this function from apr_destroy_real_pool,
- * which also locks the same mutex, and recursive locks aren't portable.
- * This way, we are garaunteed that we only lock this mutex once when calling
- * either one of these functions.
+/*
+ * Debug functions
*/
-APR_DECLARE(void) apr_pool_clear(apr_pool_t *a)
-{
- /* free the subpools. we can just loop -- the subpools will detach
- themselve from us, so this is easy. */
- while (a->sub_pools) {
- apr_pool_destroy(a->sub_pools);
- }
-
- /* run cleanups and free any subprocesses. */
- run_cleanups(a->cleanups);
- a->cleanups = NULL;
- free_proc_chain(a->subprocesses);
- a->subprocesses = NULL;
-
- /* free the pool's blocks, *except* for the first one. the actual pool
- structure is contained in the first block. this also gives us some
- ready memory for reallocating within this pool. */
- free_blocks(a->first->h.next);
- a->first->h.next = NULL;
-
- /* this was allocated in self, or a subpool of self. it simply
- disappears, so forget the hash table. */
- a->prog_data = NULL;
-
- /* no other blocks, so the last block is the first. */
- a->last = a->first;
-
- /* "free_first_avail" is the original first_avail when the pool was
- constructed. (kind of a misnomer, but it means "when freeing, use
- this as the first available ptr)
-
- restore the first/only block avail pointer, effectively resetting
- the block to empty (except for the pool structure). */
- a->first->h.first_avail = a->free_first_avail;
- debug_fill(a->first->h.first_avail,
- a->first->h.endp - a->first->h.first_avail);
-
-#ifdef ALLOC_USE_MALLOC
- {
- void *c, *n;
- for (c = a->allocation_list; c; c = n) {
- n = *(void **)c;
- DO_FREE(c);
- }
- a->allocation_list = NULL;
- }
-#endif
-}
-
-APR_DECLARE(void) apr_pool_destroy(apr_pool_t *a)
+#if defined(APR_POOL_DEBUG)
+APR_DECLARE(void) apr_pool_tag(apr_pool_t *pool, const char *tag)
{
- union block_hdr *blok;
-
- /* toss everything in the pool. */
- apr_pool_clear(a);
-
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_acquire(alloc_mutex);
- }
-#endif
-
- /* detach this pool from its parent. */
- if (a->parent) {
- if (a->parent->sub_pools == a) {
- a->parent->sub_pools = a->sub_next;
- }
- if (a->sub_prev) {
- a->sub_prev->sub_next = a->sub_next;
- }
- if (a->sub_next) {
- a->sub_next->sub_prev = a->sub_prev;
- }
- }
-
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_release(alloc_mutex);
- }
-#endif
-
- /* freeing the first block will include the pool structure. to prevent
- a double call to apr_pool_destroy, we want to fill a NULL into
- a->first so that the second call (or any attempted usage of the
- pool) will segfault on a deref.
-
- Note: when ALLOC_DEBUG is on, the free'd blocks are filled with
- 0xa5. That will cause future use of this pool to die since the pool
- structure resides within the block's 0xa5 overwrite area. However,
- we want this to fail much more regularly, so stash the NULL.
- */
- blok = a->first;
- a->first = NULL;
- free_blocks(blok);
+ pool->tag = tag;
}
-
-/*****************************************************************
- * APR_POOL_DEBUG support
- */
-#ifdef APR_POOL_DEBUG
-
APR_DECLARE(apr_size_t) apr_pool_num_bytes(apr_pool_t *p, int recurse)
{
- apr_size_t total_bytes = bytes_in_block_list(p->first);
-
- if (recurse)
- for (p = p->sub_pools; p != NULL; p = p->sub_next)
- total_bytes += apr_pool_num_bytes(p, 1);
-
- return total_bytes;
-}
-
-APR_DECLARE(apr_size_t) apr_pool_free_blocks_num_bytes(void)
-{
- return bytes_in_block_list(block_freelist);
-}
-
-/* the unix linker defines this symbol as the last byte + 1 of
- * the executable... so it includes TEXT, BSS, and DATA
- */
-#ifdef HAVE__END
-extern char _end;
-#endif
-
-/* is ptr in the range [lo,hi) */
-#define is_ptr_in_range(ptr, lo, hi) \
- (((unsigned long)(ptr) - (unsigned long)(lo)) \
- < (unsigned long)(hi) - (unsigned long)(lo))
-
-/* Find the pool that ts belongs to, return NULL if it doesn't
- * belong to any pool.
- */
-APR_DECLARE(apr_pool_t *) apr_find_pool(const void *ts)
-{
- const char *s = ts;
- union block_hdr **pb;
- union block_hdr *b;
-
-#ifdef HAVE__END
- /* short-circuit stuff which is in TEXT, BSS, or DATA */
- if (is_ptr_in_range(s, 0, &_end)) {
- return NULL;
- }
-#endif
- /* consider stuff on the stack to also be in the NULL pool...
- * XXX: there's cases where we don't want to assume this
- */
- if ((stack_direction == -1 && is_ptr_in_range(s, &ts, known_stack_point))
- || (stack_direction == 1 && is_ptr_in_range(s, known_stack_point, &ts))) {
-#ifdef HAVE__END
- abort();
-#endif
- return NULL;
- }
- /* search the global_block_list */
- for (pb = &global_block_list; *pb; pb = &b->h.global_next) {
- b = *pb;
- if (is_ptr_in_range(s, b, b->h.endp)) {
- if (b->h.owning_pool == FREE_POOL) {
- fprintf(stderr,
- "Ouch! find_pool() called on pointer in a free block\n");
- abort();
- exit(1);
- }
- if (b != global_block_list) {
- /*
- * promote b to front of list, this is a hack to speed
- * up the lookup
- */
- *pb = b->h.global_next;
- b->h.global_next = global_block_list;
- global_block_list = b;
- }
- return b->h.owning_pool;
- }
- }
- return NULL;
-}
-
-/*
- * All blocks belonging to sub will be changed to point to p
- * instead. This is a guarantee by the caller that sub will not
- * be destroyed before p is.
- */
-APR_DECLARE(void) apr_pool_join(apr_pool_t *p, apr_pool_t *sub)
-{
- union block_hdr *b;
-
- /* We could handle more general cases... but this is it for now. */
- if (sub->parent != p) {
- fprintf(stderr, "pool_join: p is not parent of sub\n");
- abort();
- }
- while (p->joined) {
- p = p->joined;
- }
- sub->joined = p;
- for (b = global_block_list; b; b = b->h.global_next) {
- if (b->h.owning_pool == sub) {
- b->h.owning_pool = p;
- }
- }
-}
-#endif
-
-/* return TRUE iff a is an ancestor of b
- * NULL is considered an ancestor of all pools
- */
-APR_DECLARE(int) apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b)
-{
- if (a == NULL) {
- return 1;
- }
-#ifdef APR_POOL_DEBUG
- while (a && a->joined) {
- a = a->joined;
- }
-#endif
- while (b) {
- if (a == b) {
- return 1;
- }
- b = b->parent;
- }
return 0;
}
-
-/*****************************************************************
- *
- * Allocating stuff...
- */
-
-APR_DECLARE(void*) apr_palloc(apr_pool_t *a, apr_size_t reqsize)
-{
-#ifdef ALLOC_USE_MALLOC
- apr_size_t size = reqsize + CLICK_SZ;
- void *ptr;
-
- ptr = DO_MALLOC(size);
- if (ptr == NULL) {
- fputs("Ouch! Out of memory!\n", stderr);
- exit(1);
- }
- debug_fill(ptr, size); /* might as well get uninitialized protection */
- *(void **)ptr = a->allocation_list;
- a->allocation_list = ptr;
- return (char *)ptr + CLICK_SZ;
-#else
-
- /*
- * Round up requested size to an even number of alignment units
- * (core clicks)
- */
- apr_size_t nclicks;
- apr_size_t size;
-
- /* First, see if we have space in the block most recently
- * allocated to this pool
- */
-
- union block_hdr *blok;
- char *first_avail;
- char *new_first_avail;
-
- nclicks = 1 + ((reqsize - 1) / CLICK_SZ);
- size = nclicks * CLICK_SZ;
-
- /* First, see if we have space in the block most recently
- * allocated to this pool
- */
-
- blok = a->last;
- first_avail = blok->h.first_avail;
-
- if (reqsize <= 0) {
- return NULL;
- }
-
- new_first_avail = first_avail + size;
-
- if (new_first_avail <= blok->h.endp) {
- debug_verify_filled(first_avail, blok->h.endp,
- "[apr_palloc] Ouch! Someone trounced past the end "
- "of their allocation!\n");
- blok->h.first_avail = new_first_avail;
- return (void *) first_avail;
- }
-
- /* Nope --- get a new one that's guaranteed to be big enough */
-
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_acquire(alloc_mutex);
- }
-#endif
-
- blok = new_block(size, a->apr_abort);
- a->last->h.next = blok;
- a->last = blok;
-#ifdef APR_POOL_DEBUG
- blok->h.owning_pool = a;
-#endif
-
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_release(alloc_mutex);
- }
#endif
- first_avail = blok->h.first_avail;
- blok->h.first_avail += size;
-
- return (void *) first_avail;
-#endif
-}
-
-APR_DECLARE(void *) apr_pcalloc(apr_pool_t *a, apr_size_t size)
-{
- void *res = apr_palloc(a, size);
- memset(res, '\0', size);
- return res;
-}
-
-/*****************************************************************
- *
- * User data management functions
+/*
+ * User data management
*/
APR_DECLARE(apr_status_t) apr_pool_userdata_set(const void *data, const char *key,
- apr_status_t (*cleanup) (void *),
- apr_pool_t *cont)
+ apr_status_t (*cleanup) (void *),
+ apr_pool_t *pool)
{
- if (cont->prog_data == NULL)
- cont->prog_data = apr_hash_make(cont);
+ if (pool->user_data == NULL)
+ pool->user_data = apr_hash_make(pool);
- if (apr_hash_get(cont->prog_data, key, APR_HASH_KEY_STRING) == NULL){
- char *new_key = apr_pstrdup(cont, key);
- apr_hash_set(cont->prog_data, new_key, APR_HASH_KEY_STRING, data);
+ if (apr_hash_get(pool->user_data, key, APR_HASH_KEY_STRING) == NULL) {
+ char *new_key = apr_pstrdup(pool, key);
+ apr_hash_set(pool->user_data, new_key, APR_HASH_KEY_STRING, data);
}
else {
- apr_hash_set(cont->prog_data, key, APR_HASH_KEY_STRING, data);
+ apr_hash_set(pool->user_data, key, APR_HASH_KEY_STRING, data);
}
- if (cleanup) {
- apr_pool_cleanup_register(cont, data, cleanup, cleanup);
- }
+ if (cleanup)
+ apr_pool_cleanup_register(pool, data, cleanup, cleanup);
+
return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_pool_userdata_setn(const void *data, const char *key,
- apr_status_t (*cleanup) (void *),
- apr_pool_t *cont)
+ apr_status_t (*cleanup) (void *),
+ apr_pool_t *pool)
{
- if (cont->prog_data == NULL)
- cont->prog_data = apr_hash_make(cont);
+ if (pool->user_data == NULL)
+ pool->user_data = apr_hash_make(pool);
- apr_hash_set(cont->prog_data, key, APR_HASH_KEY_STRING, data);
+ apr_hash_set(pool->user_data, key, APR_HASH_KEY_STRING, data);
- if (cleanup) {
- apr_pool_cleanup_register(cont, data, cleanup, cleanup);
- }
+ if (cleanup)
+ apr_pool_cleanup_register(pool, data, cleanup, cleanup);
+
return APR_SUCCESS;
}
-APR_DECLARE(apr_status_t) apr_pool_userdata_get(void **data, const char *key, apr_pool_t *cont)
+APR_DECLARE(apr_status_t) apr_pool_userdata_get(void **data, const char *key, apr_pool_t *pool)
{
- if (cont->prog_data == NULL)
+ if (pool->user_data == NULL)
*data = NULL;
else
- *data = apr_hash_get(cont->prog_data, key, APR_HASH_KEY_STRING);
+ *data = apr_hash_get(pool->user_data, key, APR_HASH_KEY_STRING);
+
return APR_SUCCESS;
}
-/*****************************************************************
- *
+
+/*
* "Print" functions
*/
@@ -1333,133 +924,87 @@
struct psprintf_data {
apr_vformatter_buff_t vbuff;
-#ifdef ALLOC_USE_MALLOC
- char *base;
-#else
- union block_hdr *blok;
- int got_a_new_block;
-#endif
+ node_t *node;
+ allocator_t *allocator;
+ apr_byte_t got_a_new_node;
+ node_t *free;
};
static int psprintf_flush(apr_vformatter_buff_t *vbuff)
{
struct psprintf_data *ps = (struct psprintf_data *)vbuff;
-#ifdef ALLOC_USE_MALLOC
- apr_size_t size;
- char *ptr;
-
- size = (char *)ps->vbuff.curpos - ps->base;
- ptr = DO_REALLOC(ps->base, 2*size);
- if (ptr == NULL) {
- fputs("Ouch! Out of memory!\n", stderr);
- exit(1);
- }
- ps->base = ptr;
- ps->vbuff.curpos = ptr + size;
- ps->vbuff.endpos = ptr + 2*size - 1;
- return 0;
-#else
- union block_hdr *blok;
- union block_hdr *nblok;
+ node_t *node, *active;
apr_size_t cur_len;
char *strp;
+ allocator_t *allocator;
- blok = ps->blok;
+ allocator = ps->allocator;
+ node = ps->node;
strp = ps->vbuff.curpos;
- cur_len = strp - blok->h.first_avail;
+ cur_len = strp - node->first_avail;
- /* must try another blok */
-#if APR_HAS_THREADS
- apr_lock_acquire(alloc_mutex);
-#endif
- nblok = new_block(2 * cur_len, NULL);
-#if APR_HAS_THREADS
- apr_lock_release(alloc_mutex);
-#endif
- memcpy(nblok->h.first_avail, blok->h.first_avail, cur_len);
- ps->vbuff.curpos = nblok->h.first_avail + cur_len;
- /* save a byte for the NUL terminator */
- ps->vbuff.endpos = nblok->h.endp - 1;
-
- /* did we allocate the current blok? if so free it up */
- if (ps->got_a_new_block) {
- debug_fill(blok->h.first_avail, blok->h.endp - blok->h.first_avail);
-#if APR_HAS_THREADS
- apr_lock_acquire(alloc_mutex);
-#endif
- blok->h.next = block_freelist;
- block_freelist = blok;
-#if APR_HAS_THREADS
- apr_lock_release(alloc_mutex);
-#endif
+ if ((active = node_malloc(allocator, cur_len << 1)) == NULL)
+ return -1;
+
+ memcpy(active->first_avail, node->first_avail, cur_len);
+
+ /* Reset the previous active node */
+ node->first_avail = (char *)node + SIZEOF_NODE_T;
+
+ if (ps->got_a_new_node) {
+ node->next = ps->free;
+ ps->free = node;
}
- ps->blok = nblok;
- ps->got_a_new_block = 1;
- /* note that we've deliberately not linked the new block onto
- * the pool yet... because we may need to flush again later, and
- * we'd have to spend more effort trying to unlink the block.
- */
+
+ ps->node = active;
+ ps->vbuff.curpos = active->first_avail + cur_len;
+ ps->vbuff.endpos = active->endp - 1; /* Save a byte for NUL terminator */
+ ps->got_a_new_node = 1;
+
return 0;
-#endif
}
-APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *p, const char *fmt, va_list ap)
+APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
{
-#ifdef ALLOC_USE_MALLOC
- struct psprintf_data ps;
- void *ptr;
-
- ps.base = DO_MALLOC(512);
- if (ps.base == NULL) {
- fputs("Ouch! Out of memory!\n", stderr);
- exit(1);
- }
- /* need room at beginning for allocation_list */
- ps.vbuff.curpos = ps.base + CLICK_SZ;
- ps.vbuff.endpos = ps.base + 511;
- apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap);
- *ps.vbuff.curpos++ = '\0';
- ptr = ps.base;
- /* shrink */
- ptr = DO_REALLOC(ptr, (char *)ps.vbuff.curpos - (char *)ptr);
- if (ptr == NULL) {
- fputs("Ouch! Out of memory!\n", stderr);
- exit(1);
- }
- *(void **)ptr = p->allocation_list;
- p->allocation_list = ptr;
- return (char *)ptr + CLICK_SZ;
-#else
struct psprintf_data ps;
char *strp;
apr_size_t size;
+ node_t *active;
- ps.blok = p->last;
- ps.vbuff.curpos = ps.blok->h.first_avail;
- ps.vbuff.endpos = ps.blok->h.endp - 1; /* save one for NUL */
- ps.got_a_new_block = 0;
+ ps.node = active = pool->active;
+ ps.allocator = pool->allocator;
+ ps.vbuff.curpos = ps.node->first_avail;
+ /* Save a byte for the NUL terminator */
+ ps.vbuff.endpos = ps.node->endp - 1;
+ ps.got_a_new_node = 0;
+ ps.free = NULL;
+
+ if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
+ if (pool->abort_fn)
+ pool->abort_fn(APR_ENOMEM);
- apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap);
+ return NULL;
+ }
strp = ps.vbuff.curpos;
*strp++ = '\0';
- size = strp - ps.blok->h.first_avail;
- size = (1 + ((size - 1) / CLICK_SZ)) * CLICK_SZ;
- strp = ps.blok->h.first_avail; /* save away result pointer */
- ps.blok->h.first_avail += size;
-
- /* have to link the block in if it's a new one */
- if (ps.got_a_new_block) {
- p->last->h.next = ps.blok;
- p->last = ps.blok;
-#ifdef APR_POOL_DEBUG
- ps.blok->h.owning_pool = p;
-#endif
+ size = strp - ps.node->first_avail;
+ size = ALIGN_DEFAULT(size);
+ strp = ps.node->first_avail;
+ ps.node->first_avail += size;
+
+ /*
+ * Link the node in if it's a new one
+ */
+ if (ps.got_a_new_node) {
+ active->next = pool->active = ps.node;
}
+ if (ps.free)
+ node_free(ps.allocator, ps.free);
+
return strp;
-#endif
}
APR_DECLARE_NONSTD(char *) apr_psprintf(apr_pool_t *p, const char *fmt, ...)
@@ -1473,7 +1018,6 @@
return res;
}
-
/*****************************************************************
*
* More grotty system stuff... subprocesses. Frump. These don't use
@@ -1485,16 +1029,15 @@
* generic interface, but for now, it's a special case
*/
-APR_DECLARE(void) apr_pool_note_subprocess(apr_pool_t *a, apr_proc_t *pid,
+APR_DECLARE(void) apr_pool_note_subprocess(apr_pool_t *pool, apr_proc_t *pid,
enum kill_conditions how)
{
- struct process_chain *new =
- (struct process_chain *) apr_palloc(a, sizeof(struct process_chain));
+ struct process_chain *pc = apr_palloc(pool, sizeof(struct process_chain));
- new->pid = pid;
- new->kill_how = how;
- new->next = a->subprocesses;
- a->subprocesses = new;
+ pc->pid = pid;
+ pc->kill_how = how;
+ pc->next = pool->subprocesses;
+ pool->subprocesses = pc;
}
static void free_proc_chain(struct process_chain *procs)
@@ -1503,12 +1046,11 @@
* whatever it was we're cleaning up now. This may involve killing
* some of them off...
*/
- struct process_chain *p;
+ struct process_chain *pc;
int need_timeout = 0;
- if (procs == NULL) {
- return; /* No work. Whew! */
- }
+ if (!procs)
+ return; /* No work. Whew! */
/* First, check to see if we need to do the SIGTERM, sleep, SIGKILL
* dance with any of the processes we're cleaning up. If we've got
@@ -1519,16 +1061,15 @@
#ifndef NEED_WAITPID
/* Pick up all defunct processes */
- for (p = procs; p; p = p->next) {
- if (apr_proc_wait(p->pid, NULL, NULL, APR_NOWAIT) != APR_CHILD_NOTDONE) {
- p->kill_how = kill_never;
- }
+ for (pc = procs; pc; pc = pc->next) {
+ if (apr_proc_wait(pc->pid, NULL, NULL, APR_NOWAIT) != APR_CHILD_NOTDONE)
+ pc->kill_how = kill_never;
}
#endif
- for (p = procs; p; p = p->next) {
- if ((p->kill_how == kill_after_timeout)
- || (p->kill_how == kill_only_once)) {
+ for (pc = procs; pc; pc = pc->next) {
+ if ((pc->kill_how == kill_after_timeout) ||
+ (pc->kill_how == kill_only_once)) {
/*
* Subprocess may be dead already. Only need the timeout if not.
* Note: apr_proc_kill on Windows is TerminateProcess(), which is
@@ -1538,35 +1079,32 @@
#ifdef WIN32
need_timeout = 1;
#else
- if (apr_proc_kill(p->pid, SIGTERM) == APR_SUCCESS) {
- need_timeout = 1;
- }
+ if (apr_proc_kill(pc->pid, SIGTERM) == APR_SUCCESS)
+ need_timeout = 1;
#endif
- }
- else if (p->kill_how == kill_always) {
- apr_proc_kill(p->pid, SIGKILL);
- }
+ }
+ else if (pc->kill_how == kill_always) {
+ apr_proc_kill(pc->pid, SIGKILL);
+ }
}
/* Sleep only if we have to... */
- if (need_timeout) {
- sleep(3);
- }
+ if (need_timeout)
+ sleep(3);
/* OK, the scripts we just timed out for have had a chance to clean up
* --- now, just get rid of them, and also clean up the system accounting
* goop...
*/
- for (p = procs; p; p = p->next) {
- if (p->kill_how == kill_after_timeout) {
- apr_proc_kill(p->pid, SIGKILL);
- }
+ for (pc = procs; pc; pc = pc->next) {
+ if (pc->kill_how == kill_after_timeout)
+ apr_proc_kill(pc->pid, SIGKILL);
}
+
/* Now wait for all the signaled processes to die */
- for (p = procs; p; p = p->next) {
- if (p->kill_how != kill_never) {
- (void) apr_proc_wait(p->pid, NULL, NULL, APR_WAIT);
- }
+ for (pc = procs; pc; pc = pc->next) {
+ if (pc->kill_how != kill_never)
+ (void)apr_proc_wait(pc->pid, NULL, NULL, APR_WAIT);
}
#ifdef WIN32
/*
@@ -1583,7 +1121,7 @@
}
}
}
-#endif /* WIN32 */
+#endif /* WIN32 */
}
1.58 +22 -17 apr/misc/unix/start.c
Index: start.c
===================================================================
RCS file: /home/cvs/apr/misc/unix/start.c,v
retrieving revision 1.57
retrieving revision 1.58
diff -u -r1.57 -r1.58
--- start.c 2001/11/27 02:31:55 1.57
+++ start.c 2001/12/14 02:16:55 1.58
@@ -64,10 +64,10 @@
static int initialized = 0;
-static apr_pool_t *global_apr_pool;
APR_DECLARE(apr_status_t) apr_initialize(void)
{
+ apr_pool_t *pool;
apr_status_t status;
#if defined WIN32 || defined(NETWARE)
int iVersionRequested;
@@ -82,21 +82,31 @@
return APR_SUCCESS;
}
- if (apr_pool_create(&global_apr_pool, NULL) != APR_SUCCESS) {
+#if !defined(BEOS) && !defined(OS2) && !defined(WIN32) && !defined(NETWARE)
+ apr_unix_setup_lock();
+ apr_proc_mutex_unix_setup_lock();
+ apr_unix_setup_time();
+#endif
+
+#if defined(NETWARE)
+ apr_netware_setup_time();
+#endif
+
+ if ((status = apr_pool_initialize()) != APR_SUCCESS)
+ return status;
+
+ if (apr_pool_create(&pool, NULL) != APR_SUCCESS) {
return APR_ENOPOOL;
}
#ifdef WIN32
/* Initialize apr_os_level global */
- if (apr_get_oslevel(global_apr_pool, &osver) != APR_SUCCESS) {
+ if (apr_get_oslevel(pool, &osver) != APR_SUCCESS) {
return APR_EEXIST;
}
#endif
-#if !defined(BEOS) && !defined(OS2) && !defined(WIN32) && !defined(NETWARE)
- apr_unix_setup_lock();
- apr_proc_mutex_unix_setup_lock();
- apr_unix_setup_time();
-#elif defined WIN32 || defined(NETWARE)
+
+#if defined WIN32 || defined(NETWARE)
iVersionRequested = MAKEWORD(WSAHighByte, WSALowByte);
err = WSAStartup((WORD) iVersionRequested, &wsaData);
if (err) {
@@ -108,14 +118,8 @@
return APR_EEXIST;
}
#endif
-#if defined(NETWARE)
- apr_netware_setup_time();
-#endif
-
- if ((status = apr_pool_alloc_init(global_apr_pool)) != APR_SUCCESS)
- return status;
-
- apr_signal_init(global_apr_pool);
+
+ apr_signal_init(pool);
return APR_SUCCESS;
}
@@ -126,7 +130,8 @@
if (initialized) {
return;
}
- apr_pool_alloc_term(global_apr_pool);
+ apr_pool_terminate();
+
#if defined(NETWARE)
WSACleanup();
#endif