You are viewing a plain text version of this content. The canonical link for it is here.
Posted to cvs@httpd.apache.org by mt...@apache.org on 2004/09/10 10:16:26 UTC

cvs commit: httpd-2.0/modules/proxy proxy_balancer.c proxy_util.c mod_proxy.h

mturk       2004/09/10 01:16:26

  Modified:    modules/proxy proxy_balancer.c proxy_util.c mod_proxy.h
  Log:
  Move the shared scoreboard data initization to the balancer module.
  The initialization is done like for the worker, on first request.
  This is done because during config phase we don't have a scoreboard
  to allocate from. Thanks to Chritian von Roques for spotting that.
  
  Revision  Changes    Path
  1.16      +83 -4     httpd-2.0/modules/proxy/proxy_balancer.c
  
  Index: proxy_balancer.c
  ===================================================================
  RCS file: /home/cvs/httpd-2.0/modules/proxy/proxy_balancer.c,v
  retrieving revision 1.15
  retrieving revision 1.16
  diff -u -r1.15 -r1.16
  --- proxy_balancer.c	3 Sep 2004 16:15:03 -0000	1.15
  +++ proxy_balancer.c	10 Sep 2004 08:16:25 -0000	1.16
  @@ -19,6 +19,7 @@
   
   #include "mod_proxy.h"
   #include "ap_mpm.h"
  +#include "scoreboard.h"
   #include "apr_version.h"
   
   module AP_MODULE_DECLARE_DATA proxy_balancer_module;
  @@ -31,6 +32,75 @@
   #define PROXY_BALANCER_UNLOCK(b)    APR_SUCCESS
   #endif
   
  +static int init_runtime_score(apr_pool_t *pool, proxy_balancer *balancer)
  +{
  +    int i;
  +    double median, ffactor = 0.0;
  +    proxy_runtime_worker *workers;    
  +#if PROXY_HAS_SCOREBOARD
  +    lb_score *score;
  +    int mpm_daemons;
  +#else
  +    void *score;
  +#endif
  +
  +    workers = (proxy_runtime_worker *)balancer->workers->elts;
  +
  +    for (i = 0; i < balancer->workers->nelts; i++) {
  +#if PROXY_HAS_SCOREBOARD
  +        ap_mpm_query(AP_MPMQ_HARD_LIMIT_DAEMONS, &mpm_daemons);
  +        /* Check if we are prefork or single child */
  +        if (workers[i].w->hmax && mpm_daemons > 1) {
  +            score = ap_get_scoreboard_lb(getpid(), workers[i].id);
  +        }
  +        else
  +#endif
  +        {
  +            /* Use the plain memory */
  +            score = apr_pcalloc(pool, sizeof(proxy_runtime_stat));
  +        }
  +        workers[i].s = (proxy_runtime_stat *)score;
  +    }
  +
  +    /* Recalculate lbfactors */
  +    for (i = 0; i < balancer->workers->nelts; i++) {
  +        /* Set to the original configuration */
  +        workers[i].s->lbfactor = workers[i].w->lbfactor;
  +        ffactor += workers[i].s->lbfactor;
  +    }
  +    if (ffactor < 100.0) {
  +        int z = 0;
  +        for (i = 0; i < balancer->workers->nelts; i++) {
  +            if (workers[i].s->lbfactor == 0.0) 
  +                ++z;
  +        }
  +        if (z) {
  +            median = (100.0 - ffactor) / z;
  +            for (i = 0; i < balancer->workers->nelts; i++) {
  +                if (workers[i].s->lbfactor == 0.0) 
  +                    workers[i].s->lbfactor = median;
  +            }
  +        }
  +        else {
  +            median = (100.0 - ffactor) / balancer->workers->nelts;
  +            for (i = 0; i < balancer->workers->nelts; i++)
  +                workers[i].s->lbfactor += median;
  +        }
  +    }
  +    else if (ffactor > 100.0) {
  +        median = (ffactor - 100.0) / balancer->workers->nelts;
  +        for (i = 0; i < balancer->workers->nelts; i++) {
  +            if (workers[i].s->lbfactor > median)
  +                workers[i].s->lbfactor -= median;
  +        }
  +    } 
  +    for (i = 0; i < balancer->workers->nelts; i++) {
  +        /* Update the status entires */
  +        workers[i].s->lbstatus = workers[i].s->lbfactor;
  +    }
  +    balancer->status = 1;
  +    return 0;
  +}
   
   /* Retrieve the parameter with the given name                                */
   static char *get_path_param(apr_pool_t *pool, char *url,
  @@ -273,6 +343,10 @@
       if (!(*balancer = ap_proxy_get_balancer(r->pool, conf, *url)))
           return DECLINED;
       
  +    /* Initialize shared scoreboard data */ 
  +    if (!((*balancer)->status))
  +        init_runtime_score(conf->pool, *balancer);
  +
       /* Step 2: find the session route */
       
       runtime = find_session_route(*balancer, r, &route, url);
  @@ -325,8 +399,8 @@
           *worker = runtime->w;
       }
       /* Decrease the free channels number */
  -    if ((*worker)->cp->nfree)
  -        --(*worker)->cp->nfree;
  +    /* XXX: This should be the function of apr_reslist */
  +    --(*worker)->cp->nfree;
   
       PROXY_BALANCER_UNLOCK(*balancer);
       
  @@ -359,8 +433,7 @@
           return HTTP_INTERNAL_SERVER_ERROR;
       }
       /* increase the free channels number */
  -    if (worker->cp->nfree)
  -        worker->cp->nfree++;
  +    worker->cp->nfree++;
       /* TODO: calculate the bytes transfered */
   
       /* TODO: update the scoreboard status */
  @@ -483,6 +556,9 @@
       /* First set the params */
       if (bsel) {
           const char *val;
  +        if (!bsel->status)
  +            init_runtime_score(conf->pool, bsel);
  +
           if ((val = apr_table_get(params, "ss"))) {
               if (strlen(val))
                   bsel->sticky = apr_pstrdup(conf->pool, val);
  @@ -565,6 +641,9 @@
                     ap_get_server_built(), "\n</dt></dl>\n", NULL);
           balancer = (proxy_balancer *)conf->balancers->elts;
           for (i = 0; i < conf->balancers->nelts; i++) {
  +            if (!balancer->status)
  +                init_runtime_score(conf->pool, balancer);
  +
               ap_rputs("<hr />\n<h3>LoadBalancer Status for ", r);
               ap_rvputs(r, "<a href=\"", r->uri, "?b=",
                         balancer->name + sizeof("balancer://") - 1,
  
  
  
  1.144     +8 -58     httpd-2.0/modules/proxy/proxy_util.c
  
  Index: proxy_util.c
  ===================================================================
  RCS file: /home/cvs/httpd-2.0/modules/proxy/proxy_util.c,v
  retrieving revision 1.143
  retrieving revision 1.144
  diff -u -r1.143 -r1.144
  --- proxy_util.c	9 Sep 2004 19:57:29 -0000	1.143
  +++ proxy_util.c	10 Sep 2004 08:16:25 -0000	1.144
  @@ -1037,6 +1037,8 @@
   
       ap_str_tolower(uri);
       *balancer = apr_array_push(conf->balancers);
  +    memset(*balancer, 0, sizeof(proxy_balancer));
  +
       (*balancer)->name = uri;
       (*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_runtime_worker));
       /* XXX Is this a right place to create mutex */
  @@ -1150,14 +1152,7 @@
   PROXY_DECLARE(void) 
   ap_proxy_add_worker_to_balancer(apr_pool_t *pool, proxy_balancer *balancer, proxy_worker *worker)
   {
  -    int i;
  -    double median, ffactor = 0.0;
  -    proxy_runtime_worker *runtime, *workers;    
  -#if PROXY_HAS_SCOREBOARD
  -    lb_score *score;
  -#else
  -    void *score;
  -#endif
  +    proxy_runtime_worker *runtime;
   
   #if PROXY_HAS_SCOREBOARD
       int mpm_daemons;
  @@ -1173,61 +1168,16 @@
                             worker->name, balancer->name);
               return;
           }
  -        score = ap_get_scoreboard_lb(getpid(), lb_workers);
       }
  -    else
   #endif
  -    {
  -        /* Use the plain memory */
  -        score = apr_pcalloc(pool, sizeof(proxy_runtime_stat));
  -    }
  -    if (!score)
  -        return;
       runtime = apr_array_push(balancer->workers);
  -    runtime->w = worker;
  -    runtime->s = (proxy_runtime_stat *)score;
  -    runtime->s->id = lb_workers;
  -    /* TODO: deal with the dynamic overflow */
  +    runtime->w  = worker;
  +    runtime->b  = balancer;
  +    runtime->id = lb_workers;
  +    runtime->s  = NULL;
  +    /* Increase the total runtime count */
       ++lb_workers;
   
  -    /* Recalculate lbfactors */
  -    workers = (proxy_runtime_worker *)balancer->workers->elts;
  -
  -    for (i = 0; i < balancer->workers->nelts; i++) {
  -        /* Set to the original configuration */
  -        workers[i].s->lbfactor = workers[i].w->lbfactor;
  -        ffactor += workers[i].s->lbfactor;
  -    }
  -    if (ffactor < 100.0) {
  -        int z = 0;
  -        for (i = 0; i < balancer->workers->nelts; i++) {
  -            if (workers[i].s->lbfactor == 0.0) 
  -                ++z;
  -        }
  -        if (z) {
  -            median = (100.0 - ffactor) / z;
  -            for (i = 0; i < balancer->workers->nelts; i++) {
  -                if (workers[i].s->lbfactor == 0.0) 
  -                    workers[i].s->lbfactor = median;
  -            }
  -        }
  -        else {
  -            median = (100.0 - ffactor) / balancer->workers->nelts;
  -            for (i = 0; i < balancer->workers->nelts; i++)
  -                workers[i].s->lbfactor += median;
  -        }
  -    }
  -    else if (ffactor > 100.0) {
  -        median = (ffactor - 100.0) / balancer->workers->nelts;
  -        for (i = 0; i < balancer->workers->nelts; i++) {
  -            if (workers[i].s->lbfactor > median)
  -                workers[i].s->lbfactor -= median;
  -        }
  -    } 
  -    for (i = 0; i < balancer->workers->nelts; i++) {
  -        /* Update the status entires */
  -        workers[i].s->lbstatus = workers[i].s->lbfactor;
  -    }
   }
   
   PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker,
  
  
  
  1.129     +3 -2      httpd-2.0/modules/proxy/mod_proxy.h
  
  Index: mod_proxy.h
  ===================================================================
  RCS file: /home/cvs/httpd-2.0/modules/proxy/mod_proxy.h,v
  retrieving revision 1.128
  retrieving revision 1.129
  diff -u -r1.128 -r1.129
  --- mod_proxy.h	9 Sep 2004 13:15:40 -0000	1.128
  +++ mod_proxy.h	10 Sep 2004 08:16:25 -0000	1.129
  @@ -266,7 +266,6 @@
   
   /* Runtime worker status informations. Shared in scoreboard */
   typedef struct {
  -    int             id;         /* scoreboard id */
       double          lbstatus;   /* Current lbstatus */
       double          lbfactor;   /* dynamic lbfactor */
       apr_size_t      transfered; /* Number of bytes transfered to remote */
  @@ -276,12 +275,14 @@
   
   /* Runtime worker. */
   typedef struct {
  -    proxy_balancer     *b;         /* balancer containing this worker */
  +    int                id;      /* scoreboard id */
  +    proxy_balancer     *b;      /* balancer containing this worker */
       proxy_worker       *w;
       proxy_runtime_stat *s;
   } proxy_runtime_worker;
   
   struct proxy_balancer {
  +    int                status;
       apr_array_header_t *workers; /* array of proxy_runtime_workers */
       const char *name;            /* name of the load balancer */
       const char *sticky;          /* sticky session identifier */
  
  
  

Re: cvs commit: httpd-2.0/modules/proxy proxy_balancer.c proxy_util.c mod_proxy.h

Posted by Mladen Turk <mt...@apache.org>.
William A. Rowe, Jr. wrote:

> You introduce a race between multiple listening threads attempting
> to initialize the scoreboard together.  post_config or child_init
> should solve the problem, no?
> 

Yes, I see that (now that you've mentioned :)).
I'll use the child_init to initialize all balancers.

Regards,
MT.


Re: cvs commit: httpd-2.0/modules/proxy proxy_balancer.c proxy_util.c mod_proxy.h

Posted by "William A. Rowe, Jr." <wr...@rowe-clan.net>.
You introduce a race between multiple listening threads attempting
to initialize the scoreboard together.  post_config or child_init
should solve the problem, no?

At 03:16 AM 9/10/2004, mturk@apache.org wrote:
>mturk       2004/09/10 01:16:26
>
>  Modified:    modules/proxy proxy_balancer.c proxy_util.c mod_proxy.h
>  Log:
>  Move the shared scoreboard data initization to the balancer module.
>  The initialization is done like for the worker, on first request.
>  This is done because during config phase we don't have a scoreboard
>  to allocate from. Thanks to Chritian von Roques for spotting that.
>  
>  @@ -273,6 +343,10 @@
>       if (!(*balancer = ap_proxy_get_balancer(r->pool, conf, *url)))
>           return DECLINED;
>       
>  +    /* Initialize shared scoreboard data */ 
>  +    if (!((*balancer)->status))
>  +        init_runtime_score(conf->pool, *balancer);
>  +
>       /* Step 2: find the session route */
>       
>       runtime = find_session_route(*balancer, r, &route, url);