You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@apr.apache.org by Ruediger Pluem <rp...@apache.org> on 2009/03/25 19:13:40 UTC

Re: svn commit: r758360 - in /apr/apr/trunk: include/apr_pools.h memory/unix/apr_pools.c

On 25.03.2009 18:32, pquerna@apache.org wrote:
> Author: pquerna
> Date: Wed Mar 25 17:32:01 2009
> New Revision: 758360
> 
> URL: http://svn.apache.org/viewvc?rev=758360&view=rev
> Log:
> - palloc now used malloc underneath.
> - we keep a list of malloc'ed adrresses, in a new list object, and free them on clear/destroy
> - unmanaged/core pool APIs removed.
> 
> Modified:
>     apr/apr/trunk/include/apr_pools.h
>     apr/apr/trunk/memory/unix/apr_pools.c
> 
> Modified: apr/apr/trunk/memory/unix/apr_pools.c
> URL: http://svn.apache.org/viewvc/apr/apr/trunk/memory/unix/apr_pools.c?rev=758360&r1=758359&r2=758360&view=diff
> ==============================================================================
> --- apr/apr/trunk/memory/unix/apr_pools.c (original)
> +++ apr/apr/trunk/memory/unix/apr_pools.c Wed Mar 25 17:32:01 2009
> @@ -495,7 +504,6 @@
>      unsigned int          stat_clear;
>  #if APR_HAS_THREADS
>      apr_os_thread_t       owner;
> -    apr_thread_mutex_t   *mutex;
>  #endif /* APR_HAS_THREADS */
>  #endif /* APR_POOL_DEBUG */
>  #ifdef NETWARE
> @@ -628,61 +636,58 @@
>   * Memory allocation
>   */
>  
> -APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
> +static void malloc_list_add_entry(apr_pool_t *pool, void *mem)
>  {
> -    apr_memnode_t *active, *node;
> -    void *mem;
> -    apr_size_t free_index;
> -
> -    size = APR_ALIGN_DEFAULT(size);
> -    active = pool->active;
> -
> -    /* If the active node has enough bytes left, use it. */
> -    if (size <= node_free_space(active)) {
> -        mem = active->first_avail;
> -        active->first_avail += size;
> -
> -        return mem;
> -    }
> -
> -    node = active->next;
> -    if (size <= node_free_space(node)) {
> -        list_remove(node);
> +    if (pool->malloced->offset == MALLOC_LIST_ENTRIES_MAX) {
> +        malloc_list_t *ml = pool->malloced;
> +        pool->malloced = calloc(1, sizeof(malloc_list_t));
> +        pool->malloced->next = ml;
>      }
> -    else {
> -        if ((node = allocator_alloc(pool->allocator, size)) == NULL) {
> -            if (pool->abort_fn)
> -                pool->abort_fn(APR_ENOMEM);
> +    
> +    pool->malloced->entries[pool->malloced->offset] = mem;
> +    pool->malloced->offset++;
> +}
>  
> -            return NULL;
> -        }
> +static void malloc_list_clear(malloc_list_t *ml)
> +{
> +    apr_size_t i;
> +    for (i = 0; i < ml->offset; i++) {
> +        free(ml->entries[i]);
>      }
> +    ml->offset = 0;
> +}
>  
> -    node->free_index = 0;
> -
> -    mem = node->first_avail;
> -    node->first_avail += size;
> -
> -    list_insert(node, active);
> -
> -    pool->active = node;
> -
> -    free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
> -                            BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
>  
> -    active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
> -    node = active->next;
> -    if (free_index >= node->free_index)
> -        return mem;
> +static void malloc_list_clear_all(malloc_list_t *ml)
> +{
> +    while (ml != NULL) {
> +        malloc_list_clear(ml);
> +        ml = ml->next;
> +    }
> +}
>  
> -    do {
> -        node = node->next;
> +static void malloc_list_destroy(malloc_list_t *ml)
> +{
> +    apr_size_t i;
> +    for (i = 0; i < ml->offset; i++) {
> +        free(ml->entries[i]);
>      }
> -    while (free_index < node->free_index);
> +    free(ml);
> +}
>  
> -    list_remove(active);
> -    list_insert(active, node);
> +static void malloc_list_destroy_all(malloc_list_t *ml)
> +{
> +    malloc_list_t *i, *j;
> +    for (i = ml; i != NULL; i = j) {
> +        j = i->next;
> +        malloc_list_destroy(i);
> +    }
> +}
>  
> +APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
> +{
> +    void *mem = malloc(size);
> +    malloc_list_add_entry(pool, mem);

Although free on NULL should be a NOOP, we may should not add NULL to the list.

>      return mem;
>  }
>  
> @@ -697,13 +702,8 @@
>  APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size);
>  APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size)
>  {
> -    void *mem;
> -
> -    size = APR_ALIGN_DEFAULT(size);
> -    if ((mem = apr_palloc(pool, size)) != NULL) {
> -        memset(mem, 0, size);
> -    }
> -
> +    void *mem = calloc(1, size);
> +    malloc_list_add_entry(pool, mem);

Although free on NULL should be a NOOP, we may should not add NULL to the list.

>      return mem;
>  }
>  

> @@ -907,261 +849,54 @@
>      return APR_SUCCESS;
>  }
>  
> -/* Deprecated. Renamed to apr_pool_create_unmanaged_ex
> - */
> -APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
> -                                                  apr_abortfunc_t abort_fn,
> -                                                  apr_allocator_t *allocator)
> -{
> -    return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
> -}
> -
> -APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
> -                                                  apr_abortfunc_t abort_fn,
> -                                                  apr_allocator_t *allocator)
> -{
> -    apr_pool_t *pool;
> -    apr_memnode_t *node;
> -    apr_allocator_t *pool_allocator;
> -
> -    *newpool = NULL;
> -
> -    if (!apr_pools_initialized)
> -        return APR_ENOPOOL;
> -    if ((pool_allocator = allocator) == NULL) {
> -        if ((pool_allocator = malloc(MIN_ALLOC)) == NULL) {
> -            if (abort_fn)
> -                abort_fn(APR_ENOMEM);
> -
> -            return APR_ENOMEM;
> -        }
> -        memset(pool_allocator, 0, SIZEOF_ALLOCATOR_T);
> -        pool_allocator->max_free_index = APR_ALLOCATOR_MAX_FREE_UNLIMITED;
> -        node = (apr_memnode_t *)((char *)pool_allocator + SIZEOF_ALLOCATOR_T);
> -        node->next  = NULL;
> -        node->index = 1;
> -        node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;
> -        node->endp = (char *)pool_allocator + MIN_ALLOC;
> -    }
> -    else if ((node = allocator_alloc(pool_allocator,
> -                                     MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
> -        if (abort_fn)
> -            abort_fn(APR_ENOMEM);
> -
> -        return APR_ENOMEM;
> -    }
> -
> -    node->next = node;
> -    node->ref = &node->next;
> -
> -    pool = (apr_pool_t *)node->first_avail;
> -    node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
> -
> -    pool->allocator = pool_allocator;
> -    pool->active = pool->self = node;
> -    pool->abort_fn = abort_fn;
> -    pool->child = NULL;
> -    pool->cleanups = NULL;
> -    pool->free_cleanups = NULL;
> -    pool->pre_cleanups = NULL;
> -    pool->free_pre_cleanups = NULL;
> -    pool->subprocesses = NULL;
> -    pool->user_data = NULL;
> -    pool->tag = NULL;
> -    pool->parent = NULL;
> -    pool->sibling = NULL;
> -    pool->ref = NULL;
> -
> -#ifdef NETWARE
> -    pool->owner_proc = (apr_os_proc_t)getnlmhandle();
> -#endif /* defined(NETWARE) */
> -    if (!allocator)
> -        pool_allocator->owner = pool;
> -    *newpool = pool;
> -
> -    return APR_SUCCESS;
> -}
> -
>  /*
>   * "Print" functions
>   */
> -
> -/*
> - * apr_psprintf is implemented by writing directly into the current
> - * block of the pool, starting right at first_avail.  If there's
> - * insufficient room, then a new block is allocated and the earlier
> - * output is copied over.  The new block isn't linked into the pool
> - * until all the output is done.
> - *
> - * Note that this is completely safe because nothing else can
> - * allocate in this apr_pool_t while apr_psprintf is running.  alarms are
> - * blocked, and the only thing outside of apr_pools.c that's invoked
> - * is apr_vformatter -- which was purposefully written to be
> - * self-contained with no callouts.
> - */
> -
>  struct psprintf_data {
>      apr_vformatter_buff_t vbuff;
> -    apr_memnode_t   *node;
> -    apr_pool_t      *pool;
> -    apr_byte_t       got_a_new_node;
> -    apr_memnode_t   *free;
> +    char      *mem;
> +    apr_size_t size;
>  };
>  
> -#define APR_PSPRINTF_MIN_STRINGSIZE 32
> -
>  static int psprintf_flush(apr_vformatter_buff_t *vbuff)
>  {
>      struct psprintf_data *ps = (struct psprintf_data *)vbuff;
> -    apr_memnode_t *node, *active;
> -    apr_size_t cur_len, size;
> -    char *strp;
> -    apr_pool_t *pool;
> -    apr_size_t free_index;
> -
> -    pool = ps->pool;
> -    active = ps->node;
> -    strp = ps->vbuff.curpos;
> -    cur_len = strp - active->first_avail;
> -    size = cur_len << 1;
> -
> -    /* Make sure that we don't try to use a block that has less
> -     * than APR_PSPRINTF_MIN_STRINGSIZE bytes left in it.  This
> -     * also catches the case where size == 0, which would result
> -     * in reusing a block that can't even hold the NUL byte.
> -     */
> -    if (size < APR_PSPRINTF_MIN_STRINGSIZE)
> -        size = APR_PSPRINTF_MIN_STRINGSIZE;
> -
> -    node = active->next;
> -    if (!ps->got_a_new_node && size <= node_free_space(node)) {
> -
> -        list_remove(node);
> -        list_insert(node, active);
> -
> -        node->free_index = 0;
> -
> -        pool->active = node;
> -
> -        free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
> -                                BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
> -
> -        active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
> -        node = active->next;
> -        if (free_index < node->free_index) {
> -            do {
> -                node = node->next;
> -            }
> -            while (free_index < node->free_index);
> -
> -            list_remove(active);
> -            list_insert(active, node);
> -        }
> -
> -        node = pool->active;
> -    }
> -    else {
> -        if ((node = allocator_alloc(pool->allocator, size)) == NULL)
> -            return -1;
> -
> -        if (ps->got_a_new_node) {
> -            active->next = ps->free;
> -            ps->free = active;
> -        }
> -
> -        ps->got_a_new_node = 1;
> -    }
> -
> -    memcpy(node->first_avail, active->first_avail, cur_len);
> -
> -    ps->node = node;
> -    ps->vbuff.curpos = node->first_avail + cur_len;
> -    ps->vbuff.endpos = node->endp - 1; /* Save a byte for NUL terminator */
> -
> +    apr_size_t size;
> +    
> +    size = ps->vbuff.curpos - ps->mem;
> +    
> +    ps->size <<= 1;
> +    if ((ps->mem = realloc(ps->mem, ps->size)) == NULL)
> +        return -1;
> +    
> +    ps->vbuff.curpos = ps->mem + size;
> +    ps->vbuff.endpos = ps->mem + ps->size - 1;
> +    
>      return 0;
>  }
>  
>  APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
>  {
>      struct psprintf_data ps;
> -    char *strp;
> -    apr_size_t size;
> -    apr_memnode_t *active, *node;
> -    apr_size_t free_index;
> -
> -    ps.node = active = pool->active;
> -    ps.pool = pool;
> -    ps.vbuff.curpos  = ps.node->first_avail;
>  
> +    ps.size = 128;

We should make this a defined constant.

> +    ps.mem = malloc(ps.size);
> +    ps.vbuff.curpos  = ps.mem;
> +    
>      /* Save a byte for the NUL terminator */
> -    ps.vbuff.endpos = ps.node->endp - 1;
> -    ps.got_a_new_node = 0;
> -    ps.free = NULL;
> -
> -    /* Make sure that the first node passed to apr_vformatter has at least
> -     * room to hold the NUL terminator.
> -     */
> -    if (ps.node->first_avail == ps.node->endp) {
> -        if (psprintf_flush(&ps.vbuff) == -1) {
> -            if (pool->abort_fn) {
> -                pool->abort_fn(APR_ENOMEM);
> -            }
> -
> -            return NULL;
> -        }
> -    }
> -
> +    ps.vbuff.endpos = ps.mem + ps.size - 1;
> +    
>      if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
>          if (pool->abort_fn)
>              pool->abort_fn(APR_ENOMEM);
> -
> +        
>          return NULL;
>      }
>  
> -    strp = ps.vbuff.curpos;
> -    *strp++ = '\0';
> -
> -    size = strp - ps.node->first_avail;
> -    size = APR_ALIGN_DEFAULT(size);
> -    strp = ps.node->first_avail;
> -    ps.node->first_avail += size;
> -
> -    if (ps.free)
> -        allocator_free(pool->allocator, ps.free);
> -
> -    /*
> -     * Link the node in if it's a new one
> -     */
> -    if (!ps.got_a_new_node)
> -        return strp;
> -
> -    active = pool->active;
> -    node = ps.node;
> -
> -    node->free_index = 0;
> -
> -    list_insert(node, active);
> -
> -    pool->active = node;
> -
> -    free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
> -                            BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
> -
> -    active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
> -    node = active->next;
> -
> -    if (free_index >= node->free_index)
> -        return strp;
> -
> -    do {
> -        node = node->next;
> -    }
> -    while (free_index < node->free_index);
> -
> -    list_remove(active);
> -    list_insert(active, node);
> -
> -    return strp;
> +    *ps.vbuff.curpos++ = '\0';

Why the ++?

> +    
> +    malloc_list_add_entry(pool, ps.mem);
> +    return ps.mem;
>  }
>  
>  

Regards

RĂ¼diger