You are viewing a plain text version of this content. The canonical link for it is here.
Posted to cvs@httpd.apache.org by ji...@apache.org on 2011/01/13 16:58:51 UTC

svn commit: r1058622 - in /httpd/httpd/trunk: include/scoreboard.h modules/proxy/mod_proxy.c modules/proxy/mod_proxy.h modules/proxy/mod_proxy_balancer.c modules/proxy/proxy_util.c server/scoreboard.c

Author: jim
Date: Thu Jan 13 15:58:50 2011
New Revision: 1058622

URL: http://svn.apache.org/viewvc?rev=1058622&view=rev
Log:
OK... a good commit point (we don't quite compile yet though...)

Pull out the worker scoreboard cruft and start moving most
worker stuff to shm. Use slotmem for workers and provide
space for growth.

Redo logic:

  ap_proxy_define_*
  ap_proxy_create_*
  ap_proxy_initialize_*

Right now just for workers, but lay framework for balancers
as well. The idea is to break out the functional parts
to make it easy for dynamics. Defining is simply describing
the worker, and tucking that info away. When we create, we
go ahead and create the shared memory, etc... Initialize
is simply to allow child process to access the shm...

Modified:
    httpd/httpd/trunk/include/scoreboard.h
    httpd/httpd/trunk/modules/proxy/mod_proxy.c
    httpd/httpd/trunk/modules/proxy/mod_proxy.h
    httpd/httpd/trunk/modules/proxy/mod_proxy_balancer.c
    httpd/httpd/trunk/modules/proxy/proxy_util.c
    httpd/httpd/trunk/server/scoreboard.c

Modified: httpd/httpd/trunk/include/scoreboard.h
URL: http://svn.apache.org/viewvc/httpd/httpd/trunk/include/scoreboard.h?rev=1058622&r1=1058621&r2=1058622&view=diff
==============================================================================
--- httpd/httpd/trunk/include/scoreboard.h (original)
+++ httpd/httpd/trunk/include/scoreboard.h Thu Jan 13 15:58:50 2011
@@ -126,7 +126,6 @@ typedef struct {
                                          * should still be serving requests.
                                          */
     apr_time_t restart_time;
-    int             lb_limit;
 } global_score;
 
 /* stuff which the parent generally writes and the children rarely read */
@@ -139,12 +138,6 @@ struct process_score {
                              */
 };
 
-/* stuff which is lb specific */
-typedef struct lb_score lb_score;
-struct lb_score {
-    unsigned char data[1024];
-};
-
 /* Scoreboard is now in 'local' memory, since it isn't updated once created,
  * even in forked architectures.  Child created-processes (non-fork) will
  * set up these indicies into the (possibly relocated) shmem records.
@@ -153,7 +146,6 @@ typedef struct {
     global_score *global;
     process_score *parent;
     worker_score **servers;
-    lb_score     *balancers;
 } scoreboard;
 
 typedef struct ap_sb_handle_t ap_sb_handle_t;
@@ -182,7 +174,6 @@ AP_DECLARE(worker_score *) ap_get_scoreb
                                                                 int thread_num);
 AP_DECLARE(process_score *) ap_get_scoreboard_process(int x);
 AP_DECLARE(global_score *) ap_get_scoreboard_global(void);
-AP_DECLARE(lb_score *) ap_get_scoreboard_lb(int lb_num);
 
 AP_DECLARE_DATA extern scoreboard *ap_scoreboard_image;
 AP_DECLARE_DATA extern const char *ap_scoreboard_fname;
@@ -206,19 +197,6 @@ const char *ap_set_reqtail(cmd_parms *cm
   */  
 AP_DECLARE_HOOK(int, pre_mpm, (apr_pool_t *p, ap_scoreboard_e sb_type))
 
-/**
-  * proxy load balancer
-  * @return the number of load balancer workers.
-  */  
-APR_DECLARE_OPTIONAL_FN(int, ap_proxy_lb_workers,
-                        (void));
-/**
-  * proxy load balancer
-  * @return the size of lb_workers.
-  */  
-APR_DECLARE_OPTIONAL_FN(int, ap_proxy_lb_worker_size,
-                        (void));
-
 /* for time_process_request() in http_main.c */
 #define START_PREQUEST 1
 #define STOP_PREQUEST  2

Modified: httpd/httpd/trunk/modules/proxy/mod_proxy.c
URL: http://svn.apache.org/viewvc/httpd/httpd/trunk/modules/proxy/mod_proxy.c?rev=1058622&r1=1058621&r2=1058622&view=diff
==============================================================================
--- httpd/httpd/trunk/modules/proxy/mod_proxy.c (original)
+++ httpd/httpd/trunk/modules/proxy/mod_proxy.c Thu Jan 13 15:58:50 2011
@@ -60,12 +60,12 @@ static int ap_proxy_lb_worker_size(void)
 
 #define PROXY_COPY_CONF_PARAMS(w, c) \
     do {                             \
-        (w)->timeout              = (c)->timeout;               \
-        (w)->timeout_set          = (c)->timeout_set;           \
-        (w)->recv_buffer_size     = (c)->recv_buffer_size;      \
-        (w)->recv_buffer_size_set = (c)->recv_buffer_size_set;  \
-        (w)->io_buffer_size       = (c)->io_buffer_size;        \
-        (w)->io_buffer_size_set   = (c)->io_buffer_size_set;    \
+        (w)->s->timeout              = (c)->timeout;               \
+        (w)->s->timeout_set          = (c)->timeout_set;           \
+        (w)->s->recv_buffer_size     = (c)->recv_buffer_size;      \
+        (w)->s->recv_buffer_size_set = (c)->recv_buffer_size_set;  \
+        (w)->s->io_buffer_size       = (c)->io_buffer_size;        \
+        (w)->s->io_buffer_size_set   = (c)->io_buffer_size_set;    \
     } while (0)
 
 static const char *set_worker_param(apr_pool_t *p,
@@ -81,8 +81,8 @@ static const char *set_worker_param(apr_
         /* Normalized load factor. Used with BalancerMamber,
          * it is a number between 1 and 100.
          */
-        worker->lbfactor = atoi(val);
-        if (worker->lbfactor < 1 || worker->lbfactor > 100)
+        worker->s->lbfactor = atoi(val);
+        if (worker->s->lbfactor < 1 || worker->s->lbfactor > 100)
             return "LoadFactor must be number between 1..100";
     }
     else if (!strcasecmp(key, "retry")) {
@@ -93,8 +93,8 @@ static const char *set_worker_param(apr_
         ival = atoi(val);
         if (ival < 0)
             return "Retry must be a positive value";
-        worker->retry = apr_time_from_sec(ival);
-        worker->retry_set = 1;
+        worker->s->retry = apr_time_from_sec(ival);
+        worker->s->retry_set = 1;
     }
     else if (!strcasecmp(key, "ttl")) {
         /* Time in seconds that will destroy all the connections
@@ -103,7 +103,7 @@ static const char *set_worker_param(apr_
         ival = atoi(val);
         if (ival < 1)
             return "TTL must be at least one second";
-        worker->ttl = apr_time_from_sec(ival);
+        worker->s->ttl = apr_time_from_sec(ival);
     }
     else if (!strcasecmp(key, "min")) {
         /* Initial number of connections to remote
@@ -111,7 +111,7 @@ static const char *set_worker_param(apr_
         ival = atoi(val);
         if (ival < 0)
             return "Min must be a positive number";
-        worker->min = ival;
+        worker->s->min = ival;
     }
     else if (!strcasecmp(key, "max")) {
         /* Maximum number of connections to remote
@@ -119,7 +119,7 @@ static const char *set_worker_param(apr_
         ival = atoi(val);
         if (ival < 0)
             return "Max must be a positive number";
-        worker->hmax = ival;
+        worker->s->hmax = ival;
     }
     /* XXX: More inteligent naming needed */
     else if (!strcasecmp(key, "smax")) {
@@ -129,7 +129,7 @@ static const char *set_worker_param(apr_
         ival = atoi(val);
         if (ival < 0)
             return "Smax must be a positive number";
-        worker->smax = ival;
+        worker->s->smax = ival;
     }
     else if (!strcasecmp(key, "acquire")) {
         /* Acquire timeout in given unit (default is milliseconds).
@@ -140,8 +140,8 @@ static const char *set_worker_param(apr_
             return "Acquire timeout has wrong format";
         if (timeout < 1000)
             return "Acquire must be at least one millisecond";
-        worker->acquire = timeout;
-        worker->acquire_set = 1;
+        worker->s->acquire = timeout;
+        worker->s->acquire_set = 1;
     }
     else if (!strcasecmp(key, "timeout")) {
         /* Connection timeout in seconds.
@@ -150,56 +150,56 @@ static const char *set_worker_param(apr_
         ival = atoi(val);
         if (ival < 1)
             return "Timeout must be at least one second";
-        worker->timeout = apr_time_from_sec(ival);
-        worker->timeout_set = 1;
+        worker->s->timeout = apr_time_from_sec(ival);
+        worker->s->timeout_set = 1;
     }
     else if (!strcasecmp(key, "iobuffersize")) {
         long s = atol(val);
         if (s < 512 && s) {
             return "IOBufferSize must be >= 512 bytes, or 0 for system default.";
         }
-        worker->io_buffer_size = (s ? s : AP_IOBUFSIZE);
-        worker->io_buffer_size_set = 1;
+        worker->s->io_buffer_size = (s ? s : AP_IOBUFSIZE);
+        worker->s->io_buffer_size_set = 1;
     }
     else if (!strcasecmp(key, "receivebuffersize")) {
         ival = atoi(val);
         if (ival < 512 && ival != 0) {
             return "ReceiveBufferSize must be >= 512 bytes, or 0 for system default.";
         }
-        worker->recv_buffer_size = ival;
-        worker->recv_buffer_size_set = 1;
+        worker->s->recv_buffer_size = ival;
+        worker->s->recv_buffer_size_set = 1;
     }
     else if (!strcasecmp(key, "keepalive")) {
         if (!strcasecmp(val, "on"))
-            worker->keepalive = 1;
+            worker->s->keepalive = 1;
         else if (!strcasecmp(val, "off"))
-            worker->keepalive = 0;
+            worker->s->keepalive = 0;
         else
             return "KeepAlive must be On|Off";
-        worker->keepalive_set = 1;
+        worker->s->keepalive_set = 1;
     }
     else if (!strcasecmp(key, "disablereuse")) {
         if (!strcasecmp(val, "on"))
-            worker->disablereuse = 1;
+            worker->s->disablereuse = 1;
         else if (!strcasecmp(val, "off"))
-            worker->disablereuse = 0;
+            worker->s->disablereuse = 0;
         else
             return "DisableReuse must be On|Off";
-        worker->disablereuse_set = 1;
+        worker->s->disablereuse_set = 1;
     }
     else if (!strcasecmp(key, "route")) {
         /* Worker route.
          */
-        if (strlen(val) > PROXY_WORKER_MAX_ROUTE_SIZ)
+        if (strlen(val) >= PROXY_WORKER_MAX_ROUTE_SIZE)
             return "Route length must be < 64 characters";
-        worker->route = apr_pstrdup(p, val);
+        PROXY_STRNCPY(worker->s->route, val);
     }
     else if (!strcasecmp(key, "redirect")) {
         /* Worker redirection route.
          */
-        if (strlen(val) > PROXY_WORKER_MAX_ROUTE_SIZ)
+        if (strlen(val) >= PROXY_WORKER_MAX_ROUTE_SIZE)
             return "Redirect length must be < 64 characters";
-        worker->redirect = apr_pstrdup(p, val);
+        PROXY_STRNCPY(worker->s->redirect, val);
     }
     else if (!strcasecmp(key, "status")) {
         const char *v;
@@ -217,33 +217,33 @@ static const char *set_worker_param(apr_
             }
             if (*v == 'D' || *v == 'd') {
                 if (mode)
-                    worker->status |= PROXY_WORKER_DISABLED;
+                    worker->s->status |= PROXY_WORKER_DISABLED;
                 else
-                    worker->status &= ~PROXY_WORKER_DISABLED;
+                    worker->s->status &= ~PROXY_WORKER_DISABLED;
             }
             else if (*v == 'S' || *v == 's') {
                 if (mode)
-                    worker->status |= PROXY_WORKER_STOPPED;
+                    worker->s->status |= PROXY_WORKER_STOPPED;
                 else
-                    worker->status &= ~PROXY_WORKER_STOPPED;
+                    worker->s->status &= ~PROXY_WORKER_STOPPED;
             }
             else if (*v == 'E' || *v == 'e') {
                 if (mode)
-                    worker->status |= PROXY_WORKER_IN_ERROR;
+                    worker->s->status |= PROXY_WORKER_IN_ERROR;
                 else
-                    worker->status &= ~PROXY_WORKER_IN_ERROR;
+                    worker->s->status &= ~PROXY_WORKER_IN_ERROR;
             }
             else if (*v == 'H' || *v == 'h') {
                 if (mode)
-                    worker->status |= PROXY_WORKER_HOT_STANDBY;
+                    worker->s->status |= PROXY_WORKER_HOT_STANDBY;
                 else
-                    worker->status &= ~PROXY_WORKER_HOT_STANDBY;
+                    worker->s->status &= ~PROXY_WORKER_HOT_STANDBY;
             }
             else if (*v == 'I' || *v == 'i') {
                 if (mode)
-                    worker->status |= PROXY_WORKER_IGNORE_ERRORS;
+                    worker->s->status |= PROXY_WORKER_IGNORE_ERRORS;
                 else
-                    worker->status &= ~PROXY_WORKER_IGNORE_ERRORS;
+                    worker->s->status &= ~PROXY_WORKER_IGNORE_ERRORS;
             }
             else {
                 return "Unknown status parameter option";
@@ -252,11 +252,11 @@ static const char *set_worker_param(apr_
     }
     else if (!strcasecmp(key, "flushpackets")) {
         if (!strcasecmp(val, "on"))
-            worker->flush_packets = flush_on;
+            worker->s->flush_packets = flush_on;
         else if (!strcasecmp(val, "off"))
-            worker->flush_packets = flush_off;
+            worker->s->flush_packets = flush_off;
         else if (!strcasecmp(val, "auto"))
-            worker->flush_packets = flush_auto;
+            worker->s->flush_packets = flush_auto;
         else
             return "flushpackets must be on|off|auto";
     }
@@ -266,9 +266,9 @@ static const char *set_worker_param(apr_
             return "flushwait must be <= 1000, or 0 for system default of 10 millseconds.";
         }
         if (ival == 0)
-            worker->flush_wait = PROXY_FLUSH_WAIT;
+            worker->s->flush_wait = PROXY_FLUSH_WAIT;
         else
-            worker->flush_wait = ival * 1000;    /* change to microseconds */
+            worker->s->flush_wait = ival * 1000;    /* change to microseconds */
     }
     else if (!strcasecmp(key, "ping")) {
         /* Ping/Pong timeout in given unit (default is second).
@@ -277,14 +277,14 @@ static const char *set_worker_param(apr_
             return "Ping/Pong timeout has wrong format";
         if (timeout < 1000)
             return "Ping/Pong timeout must be at least one millisecond";
-        worker->ping_timeout = timeout;
-        worker->ping_timeout_set = 1;
+        worker->s->ping_timeout = timeout;
+        worker->s->ping_timeout_set = 1;
     }
     else if (!strcasecmp(key, "lbset")) {
         ival = atoi(val);
         if (ival < 0 || ival > 99)
             return "lbset must be between 0 and 99";
-        worker->lbset = ival;
+        worker->s->lbset = ival;
     }
     else if (!strcasecmp(key, "connectiontimeout")) {
         /* Request timeout in given unit (default is second).
@@ -294,11 +294,13 @@ static const char *set_worker_param(apr_
             return "Connectiontimeout has wrong format";
         if (timeout < 1000)
             return "Connectiontimeout must be at least one millisecond.";
-        worker->conn_timeout = timeout;
-        worker->conn_timeout_set = 1;
+        worker->s->conn_timeout = timeout;
+        worker->s->conn_timeout_set = 1;
     }
     else if (!strcasecmp(key, "flusher")) {
-        worker->flusher = apr_pstrdup(p, val);
+        if (strlen(val) >= PROXY_WORKER_MAX_SCHEME_SIZE)
+            return "flusher name length must be < 16 characters";
+        PROXY_STRNCPY(worker->s->flusher, val);
     }
     else {
         return "unknown Worker parameter";
@@ -1447,10 +1449,10 @@ static const char *
     arr = apr_table_elts(params);
     elts = (const apr_table_entry_t *)arr->elts;
     /* Distinguish the balancer from worker */
-    if (strncasecmp(r, "balancer:", 9) == 0) {
+    if (ap_proxy_valid_balancer_name(r)) {
         proxy_balancer *balancer = ap_proxy_get_balancer(cmd->pool, conf, r);
         if (!balancer) {
-            const char *err = ap_proxy_add_balancer(&balancer,
+            const char *err = ap_proxy_alloc_balancer(&balancer,
                                                     cmd->pool,
                                                     conf, r);
             if (err)
@@ -1906,7 +1908,7 @@ static const char *add_member(cmd_parms 
     /* Try to find the balancer */
     balancer = ap_proxy_get_balancer(cmd->temp_pool, conf, path);
     if (!balancer) {
-        const char *err = ap_proxy_add_balancer(&balancer,
+        const char *err = ap_proxy_alloc_balancer(&balancer,
                                                 cmd->pool,
                                                 conf, path);
         if (err)
@@ -1949,11 +1951,11 @@ static const char *
         name = ap_getword_conf(cmd->temp_pool, &arg);
     }
 
-    if (strncasecmp(name, "balancer:", 9) == 0) {
+    if (ap_proxy_valid_balancer_name(name) {
         balancer = ap_proxy_get_balancer(cmd->pool, conf, name);
         if (!balancer) {
             if (in_proxy_section) {
-                err = ap_proxy_add_balancer(&balancer,
+                err = ap_proxy_alloc_balancer(&balancer,
                                             cmd->pool,
                                             conf, name);
                 if (err)
@@ -2100,10 +2102,10 @@ static const char *proxysection(cmd_parm
             return apr_pstrcat(cmd->pool, thiscmd->name,
                                "> arguments are not supported for non url.",
                                NULL);
-        if (strncasecmp(conf->p, "balancer:", 9) == 0) {
+        if (ap_proxy_valid_balancer_name(conf->p) {
             balancer = ap_proxy_get_balancer(cmd->pool, sconf, conf->p);
             if (!balancer) {
-                err = ap_proxy_add_balancer(&balancer,
+                err = ap_proxy_alloc_balancer(&balancer,
                                             cmd->pool,
                                             sconf, conf->p);
                 if (err)

Modified: httpd/httpd/trunk/modules/proxy/mod_proxy.h
URL: http://svn.apache.org/viewvc/httpd/httpd/trunk/modules/proxy/mod_proxy.h?rev=1058622&r1=1058621&r2=1058622&view=diff
==============================================================================
--- httpd/httpd/trunk/modules/proxy/mod_proxy.h (original)
+++ httpd/httpd/trunk/modules/proxy/mod_proxy.h Thu Jan 13 15:58:50 2011
@@ -57,6 +57,7 @@
 #include "util_filter.h"
 #include "util_ebcdic.h"
 #include "ap_provider.h"
+#include "ap_slotmem.h"
 
 #if APR_HAVE_NETINET_IN_H
 #include <netinet/in.h>
@@ -70,6 +71,8 @@ enum enctype {
     enc_path, enc_search, enc_user, enc_fpath, enc_parm
 };
 
+#define BALANCER_PREFIX "balancer://"
+
 #if APR_CHARSET_EBCDIC
 #define CRLF   "\r\n"
 #else /*APR_CHARSET_EBCDIC*/
@@ -229,9 +232,7 @@ typedef struct {
     int          close:1;      /* Close 'this' connection */
     int          need_flush:1; /* Flag to decide whether we need to flush the
                                 * filter chain or not */
-#if APR_HAS_THREADS
     int          inreslist:1;  /* connection in apr_reslist? */
-#endif
 } proxy_conn_rec;
 
 typedef struct {
@@ -243,9 +244,7 @@ typedef struct {
 struct proxy_conn_pool {
     apr_pool_t     *pool;   /* The pool used in constructor and destructor calls */
     apr_sockaddr_t *addr;   /* Preparsed remote address info */
-#if APR_HAS_THREADS
     apr_reslist_t  *res;    /* Connection resource list */
-#endif
     proxy_conn_rec *conn;   /* Single connection for prefork mpm */
 };
 
@@ -280,6 +279,7 @@ PROXY_WORKER_DISABLED | PROXY_WORKER_STO
 #define PROXY_WORKER_MAX_ROUTE_SIZE   64
 #define PROXY_WORKER_MAX_NAME_SIZE    96
 
+#define PROXY_STRNCPY(dst, src) apr_cpystrn((dst), (src), sizeof(dst))
 
 /* Runtime worker status informations. Shared in scoreboard */
 typedef struct {
@@ -299,8 +299,7 @@ typedef struct {
     int             hmax;       /* Hard maximum on the total number of connections */
     int             flush_wait; /* poll wait time in microseconds if flush_auto */
     int             index;      /* shm array index */
-    unsigned int    apr_hash;    /* hash #0 of worker name */
-    unsigned int    our_hash;    /* hash #1 of worker name. Why 2? hash collisions. */
+    unsigned int    hash;       /* hash of worker name */
     enum {
         flush_off,
         flush_on,
@@ -338,12 +337,13 @@ typedef struct {
 
 /* Worker configuration */
 struct proxy_worker {
+    int             index;      /* shm array index */
+    unsigned int    hash;       /* hash of worker name */
     proxy_conn_pool     *cp;    /* Connection pool to use */
     proxy_worker_shared   *s;   /* Shared data */
+    proxy_balancer  *balancer;  /* which balancer am I in? */
     void            *context;   /* general purpose storage */
-#if APR_HAS_THREADS
     apr_thread_mutex_t  *mutex; /* Thread lock for updating address cache */
-#endif
 };
 
 /*
@@ -353,12 +353,13 @@ struct proxy_worker {
 #define PROXY_FLUSH_WAIT 10000
 
 struct proxy_balancer {
-    apr_array_header_t *cw;      /* initially configured workers */
-    proxy_worker **workers;     /* array of proxy_workers - runtime*/
-    int max_workers;             /* maximum number of allowed workers */
-    const char *name;            /* name of the load balancer */
-    apr_interval_time_t timeout; /* Timeout for waiting on free connection */
-    const char *lbprovider;      /* name of the lbmethod provider to use */
+    apr_array_header_t *workers;  /* initially configured workers */
+    ap_slotmem_instance_t *slot;  /* worker shm data - runtime */
+    int growth;                   /* number of post-config workers can added */
+    int max_workers;              /* maximum number of allowed workers */
+    const char *name;             /* name of the load balancer */
+    apr_interval_time_t timeout;  /* Timeout for waiting on free connection */
+    const char *lbprovider;       /* name of the lbmethod provider to use */
     proxy_balancer_method *lbmethod;
 
     const char      *sticky_path;     /* URL sticky session identifier */
@@ -383,13 +384,8 @@ struct proxy_balancer_method {
     apr_status_t (*updatelbstatus)(proxy_balancer *balancer, proxy_worker *elected, server_rec *s); 
 };
 
-#if APR_HAS_THREADS
 #define PROXY_THREAD_LOCK(x)      apr_thread_mutex_lock((x)->mutex)
 #define PROXY_THREAD_UNLOCK(x)    apr_thread_mutex_unlock((x)->mutex)
-#else
-#define PROXY_THREAD_LOCK(x)      APR_SUCCESS
-#define PROXY_THREAD_UNLOCK(x)    APR_SUCCESS
-#endif
 
 #define PROXY_GLOBAL_LOCK(x)      apr_global_mutex_lock((x)->mutex)
 #define PROXY_GLOBAL_UNLOCK(x)    apr_global_mutex_unlock((x)->mutex)
@@ -517,41 +513,30 @@ typedef __declspec(dllimport) const char
 /* Connection pool API */
 /**
  * Get the worker from proxy configuration
- * @param p     memory pool used for finding worker
- * @param conf  current proxy server configuration
- * @param url   url to find the worker from
- * @return      proxy_worker or NULL if not found
+ * @param p        memory pool used for finding worker
+ * @param conf     current proxy server configuration
+ * @param balancer the balancer that the worker belongs to
+ * @param url      url to find the worker from
+ * @return         proxy_worker or NULL if not found
  */
 PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
                                                   proxy_server_conf *conf,
+                                                  proxy_balancer *balancer,
                                                   const char *url);
 /**
- * Add the worker to proxy configuration
- * @param worker the new worker
- * @param p      memory pool to allocate worker from 
- * @param conf   current proxy server configuration
- * @param url    url containing worker name
- * @param id     slotnumber id or -1 for auto allocation
- * @return       error message or NULL if successful
- */
-PROXY_DECLARE(const char *) ap_proxy_add_worker_wid(proxy_worker **worker,
-                                                apr_pool_t *p,
-                                                proxy_server_conf *conf,
-                                                const char *url,
-                                                int id);
-
-/**
- * Add the worker to proxy configuration
- * @param worker the new worker
- * @param p      memory pool to allocate worker from 
- * @param conf   current proxy server configuration
- * @param url    url containing worker name
- * @return       error message or NULL if successful
- */
-PROXY_DECLARE(const char *) ap_proxy_add_worker(proxy_worker **worker,
-                                                apr_pool_t *p,
-                                                proxy_server_conf *conf,
-                                                const char *url);
+ * Define and Allocate space for the worker to proxy configuration
+ * @param p         memory pool to allocate worker from 
+ * @param worker    the new worker
+ * @param balancer  the balancer that the worker belongs to
+ * @param conf      current proxy server configuration
+ * @param url       url containing worker name
+ * @return          error message or NULL if successful (*worker is new worker)
+ */
+PROXY_DECLARE(const char *) ap_proxy_define_worker(apr_pool_t *p,
+                                                   proxy_worker **worker,
+                                                   proxy_balancer *balancer,
+                                                   proxy_server_conf *conf,
+                                                   const char *url);
 
 /**
  * Create new worker
@@ -579,7 +564,6 @@ PROXY_DECLARE(void) ap_proxy_initialize_
                                                      proxy_worker *worker,
                                                      server_rec *s);
 
-
 /**
  * Initialize the worker
  * @param worker worker to initialize
@@ -591,6 +575,14 @@ PROXY_DECLARE(apr_status_t) ap_proxy_ini
                                                        server_rec *s,
                                                        apr_pool_t *p);
 /**
+ * Verifies valid balancer name (eg: balancer://foo)
+ * @param name  name to test
+ * @return      ptr to start of name or NULL if not valid
+ */
+PROXY_DECLARE(char *) ap_proxy_valid_balancer_name(const char *name);
+
+
+/**
  * Get the balancer from proxy configuration
  * @param p     memory pool used for temporary storage while finding balancer
  * @param conf  current proxy server configuration
@@ -600,42 +592,21 @@ PROXY_DECLARE(apr_status_t) ap_proxy_ini
 PROXY_DECLARE(proxy_balancer *) ap_proxy_get_balancer(apr_pool_t *p,
                                                       proxy_server_conf *conf,
                                                       const char *url);
+
 /**
- * Add the balancer to proxy configuration
- * @param balancer the new balancer
+ * Define and Allocate space for the balancer to proxy configuration
  * @param p      memory pool to allocate balancer from 
+ * @param balancer the new balancer
  * @param conf   current proxy server configuration
  * @param url    url containing balancer name
  * @return       error message or NULL if successfull
  */
-PROXY_DECLARE(const char *) ap_proxy_add_balancer(proxy_balancer **balancer,
-                                                  apr_pool_t *p,
-                                                  proxy_server_conf *conf,
-                                                  const char *url);
+PROXY_DECLARE(const char *) ap_proxy_define_balancer(apr_pool_t *p,
+                                                    proxy_balancer **balancer,
+                                                    proxy_server_conf *conf,
+                                                    const char *url);
 
 /**
- * Add the worker to the balancer
- * @param pool     memory pool for adding worker 
- * @param balancer balancer to add to
- * @param worker worker to add
- * @param id     slotnumber id or -1 for auto allocation
- * @note A single worker can be added to multiple balancers.
- */
-PROXY_DECLARE(void) ap_proxy_add_worker_to_balancer_wid(apr_pool_t *pool,
-                                                    proxy_balancer *balancer,
-                                                    proxy_worker *worker,
-                                                    int id);
-/**
- * Add the worker to the balancer
- * @param pool     memory pool for adding worker 
- * @param balancer balancer to add to
- * @param worker worker to add
- * @note A single worker can be added to multiple balancers.
- */
-PROXY_DECLARE(void) ap_proxy_add_worker_to_balancer(apr_pool_t *pool,
-                                                    proxy_balancer *balancer,
-                                                    proxy_worker *worker);
-/**
  * Get the most suitable worker and/or balancer for the request
  * @param worker   worker used for processing request
  * @param balancer balancer used for processing request

Modified: httpd/httpd/trunk/modules/proxy/mod_proxy_balancer.c
URL: http://svn.apache.org/viewvc/httpd/httpd/trunk/modules/proxy/mod_proxy_balancer.c?rev=1058622&r1=1058621&r2=1058622&view=diff
==============================================================================
--- httpd/httpd/trunk/modules/proxy/mod_proxy_balancer.c (original)
+++ httpd/httpd/trunk/modules/proxy/mod_proxy_balancer.c Thu Jan 13 15:58:50 2011
@@ -25,6 +25,7 @@
 #include "apr_date.h"
 
 static const char *balancer_mutex_type = "proxy-balancer-shm";
+ap_slotmem_provider_t *storage = NULL;
 
 module AP_MODULE_DECLARE_DATA proxy_balancer_module;
 
@@ -114,7 +115,7 @@ static int init_balancer_members(proxy_s
              * If the worker is not initialized check whether its scoreboard
              * slot is already initialized.
              */
-            slot = (proxy_worker_shared *) ap_get_scoreboard_lb((*workers)->id);
+            slot = (proxy_worker_shared *) XXXXXap_get_scoreboard_lb((*workers)->id);
             if (slot) {
                 worker_is_initialized = slot->status & PROXY_WORKER_INITIALIZED;
             }
@@ -367,7 +368,6 @@ static proxy_worker *find_best_worker(pr
          * By default the timeout is not set, and the server
          * returns SERVER_BUSY.
          */
-#if APR_HAS_THREADS
         if (balancer->timeout) {
             /* XXX: This can perhaps be build using some
              * smarter mechanism, like tread_cond.
@@ -391,7 +391,6 @@ static proxy_worker *find_best_worker(pr
             /* restore the timeout */
             balancer->timeout = timeout;
         }
-#endif
     }
 
     return candidate;
@@ -716,6 +715,53 @@ static int balancer_post_config(apr_pool
     if (rv != APR_SUCCESS) {
         return HTTP_INTERNAL_SERVER_ERROR;
     }
+
+    /*
+     * Get worker slotmem setup
+     */
+    storage = ap_lookup_provider(AP_SLOTMEM_PROVIDER_GROUP, "shared", "0");
+    if (!storage) {
+        ap_log_error(APLOG_MARK, APLOG_NOERRNO|APLOG_EMERG, 0, s,
+                     "ap_lookup_provider %s failed", AP_SLOTMEM_PROVIDER_GROUP);
+        return !OK;
+    }
+    /*
+     * Go thru each Vhost and create the shared mem slotmem for
+     * each balancer's workers
+     */
+    while (s) {
+        int i,j;
+        sconf = s->module_config;
+        conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module);
+        proxy_worker *worker;
+        
+        /* Initialize shared scoreboard data */
+        balancer = (proxy_balancer *)conf->balancers->elts;
+        for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
+            proxy_worker *worker;
+            
+            balancer->max_workers = balancer->workers->nelts + balancer->growth;
+            storage->create(&balancer->slot, balancer->name, sizeof(proxy_worker_shared),
+                            balancer->max_workers, AP_SLOTMEM_TYPE_PREGRAB, pconf);
+            if (!balancer->slot) {
+                ap_log_error(APLOG_MARK, APLOG_NOERRNO|APLOG_EMERG, 0, s, "slotmem_create failed");
+                return !OK;
+            }
+            proxy_worker *worker = balancer->workers->elts;
+            for (j = 0; j < balancer->workers->nelts; j++, worker++) {
+                proxy_worker_shared *shm;
+                unsigned int index;
+                if ((storage->grab(balancer->slot, &index) != APR_SUCCESS) ||;
+                    (storage->dptr(balancer->slot, index, &shm) != APR_SUCESS)) {
+                    ap_log_error(APLOG_MARK, APLOG_NOERRNO|APLOG_EMERG, 0, s, "slotmem_grab/dptr failed");
+                    return !OK;
+                }
+                ap_proxy_create_worker(worker, shm, index)
+            }
+        }
+        s = s->next;
+    }
+    
     return OK;
 }
 
@@ -1015,6 +1061,13 @@ static void balancer_child_init(apr_pool
         /* Initialize shared scoreboard data */
         balancer = (proxy_balancer *)conf->balancers->elts;
         for (i = 0; i < conf->balancers->nelts; i++) {
+            apr_size_t size;
+            unsigned int num;
+            storage->attach(&balancer->slot, balancer->name, &size, &num, p);
+            if (!balancer->slot) {
+                ap_log_error(APLOG_MARK, APLOG_NOERRNO|APLOG_EMERG, 0, s, "slotmem_attach failed");
+                return !OK;
+            }
             if (balancer->lbmethod && balancer->lbmethod->reset)
                balancer->lbmethod->reset(balancer, s);
             init_balancer_members(conf, s, balancer);

Modified: httpd/httpd/trunk/modules/proxy/proxy_util.c
URL: http://svn.apache.org/viewvc/httpd/httpd/trunk/modules/proxy/proxy_util.c?rev=1058622&r1=1058621&r2=1058622&view=diff
==============================================================================
--- httpd/httpd/trunk/modules/proxy/proxy_util.c (original)
+++ httpd/httpd/trunk/modules/proxy/proxy_util.c Thu Jan 13 15:58:50 2011
@@ -1092,6 +1092,7 @@ PROXY_DECLARE(const char *) ap_proxy_loc
             ap_get_module_config(r->server->module_config, &proxy_module);
         proxy_balancer *balancer;
         const char *real = ent[i].real;
+        const char *bname;
         /*
          * First check if mapping against a balancer and see
          * if we have such a entity. If so, then we need to
@@ -1099,11 +1100,11 @@ PROXY_DECLARE(const char *) ap_proxy_loc
          * or may not be the right one... basically, we need
          * to find which member actually handled this request.
          */
-        if ((strncasecmp(real, "balancer://", 11) == 0) &&
-            (balancer = ap_proxy_get_balancer(r->pool, sconf, real))) {
+        bname = ap_proxy_valid_balancer_name(real);
+        if (bname && (balancer = ap_proxy_get_balancer(r->pool, sconf, real))) {
             int n, l3 = 0;
             proxy_worker **worker = (proxy_worker **)balancer->workers->elts;
-            const char *urlpart = ap_strchr_c(real + 11, '/');
+            const char *urlpart = ap_strchr_c(bname, '/');
             if (urlpart) {
                 if (!urlpart[1])
                     urlpart = NULL;
@@ -1284,25 +1285,42 @@ PROXY_DECLARE(const char *) ap_proxy_coo
     return ret;
 }
 
+/*
+ * BALANCER related...
+ */
+
+/*
+ * verifies that the balancer name conforms to standards. If
+ * so, returns ptr to the actual name (BALANCER_PREFIX removed),
+ * otherwise NULL
+ */
+PROXY_DECLARE(char *) ap_proxy_valid_balancer_name(const char *name)
+{
+    if (strncasecmp(name, BALANCER_PREFIX, sizeof(BALANCER_PREFIX)) == 0)
+        return (name + sizeof(BALANCER_PREFIX));
+    else
+        return NULL;
+}
+
+
 PROXY_DECLARE(proxy_balancer *) ap_proxy_get_balancer(apr_pool_t *p,
                                                       proxy_server_conf *conf,
                                                       const char *url)
 {
     proxy_balancer *balancer;
-    char *c, *uri = apr_pstrdup(p, url);
+    char *name, *q, *uri = apr_pstrdup(p, url);
     int i;
 
-    c = strchr(uri, ':');
-    if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {
+    if (!(name = ap_proxy_valid_balancer_name(uri)))
        return NULL;
-    }
+
     /* remove path from uri */
-    if ((c = strchr(c + 3, '/'))) {
-        *c = '\0';
-    }
+    if ((q = strchr(name, '/')))
+        *q = '\0';
+
     balancer = (proxy_balancer *)conf->balancers->elts;
     for (i = 0; i < conf->balancers->nelts; i++) {
-        if (strcasecmp(balancer->name, uri) == 0) {
+        if (strcasecmp(balancer->name, name) == 0) {
             return balancer;
         }
         balancer++;
@@ -1310,22 +1328,24 @@ PROXY_DECLARE(proxy_balancer *) ap_proxy
     return NULL;
 }
 
-PROXY_DECLARE(const char *) ap_proxy_add_balancer(proxy_balancer **balancer,
+PROXY_DECLARE(const char *) ap_proxy_define_balancer(proxy_balancer **balancer,
                                                   apr_pool_t *p,
                                                   proxy_server_conf *conf,
                                                   const char *url)
 {
-    char *c, *q, *uri = apr_pstrdup(p, url);
+    char *name, *q, *uri = apr_pstrdup(p, url);
     proxy_balancer_method *lbmethod;
 
-    c = strchr(uri, ':');
-    if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0')
-       return "Bad syntax for a balancer name";
+    /* We should never get here without a valid BALANCER_PREFIX... */
+
+    if (!(name = ap_proxy_valid_balancer_name(uri)))
+        return "Bad syntax for a balancer name";
+    
     /* remove path from uri */
-    if ((q = strchr(c + 3, '/')))
+    if ((q = strchr(name, '/')))
         *q = '\0';
-
-    ap_str_tolower(uri);
+    
+    ap_str_tolower(name);
     *balancer = apr_array_push(conf->balancers);
     memset(*balancer, 0, sizeof(proxy_balancer));
 
@@ -1338,86 +1358,33 @@ PROXY_DECLARE(const char *) ap_proxy_add
         return "Can't find 'byrequests' lb method";
     }
 
-    (*balancer)->name = uri;
+    (*balancer)->name = name;
     (*balancer)->lbmethod = lbmethod;
     (*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker *));
     (*balancer)->updated = apr_time_now();
-    /* XXX Is this a right place to create mutex */
-#if APR_HAS_THREADS
-    if (apr_thread_mutex_create(&((*balancer)->mutex),
-                APR_THREAD_MUTEX_DEFAULT, p) != APR_SUCCESS) {
-        /* XXX: Do we need to log something here */
-        return "can not create thread mutex";
-    }
-#endif
 
     return NULL;
 }
 
-PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
-                                                  proxy_server_conf *conf,
-                                                  const char *url)
+#if 0
+/*
+ * Create an already defined balancer and free up memory.
+ * Placeholder for when we make +/- of balancers runtime as well
+ */
+PROXY_DECLARE(void) ap_proxy_create_balancer(TODO)
 {
-    proxy_worker *worker;
-    proxy_worker *max_worker = NULL;
-    int max_match = 0;
-    int url_length;
-    int min_match;
-    int worker_name_length;
-    const char *c;
-    char *url_copy;
-    int i;
-
-    c = ap_strchr_c(url, ':');
-    if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {
-       return NULL;
-    }
-
-    url_copy = apr_pstrdup(p, url);
-    url_length = strlen(url);
+}
 
-    /*
-     * We need to find the start of the path and
-     * therefore we know the length of the scheme://hostname/
-     * part to we can force-lowercase everything up to
-     * the start of the path.
-     */
-    c = ap_strchr_c(c+3, '/');
-    if (c) {
-        char *pathstart;
-        pathstart = url_copy + (c - url);
-        *pathstart = '\0';
-        ap_str_tolower(url_copy);
-        min_match = strlen(url_copy);
-        *pathstart = '/';
-    }
-    else {
-        ap_str_tolower(url_copy);
-        min_match = strlen(url_copy);
-    }
+PROXY_DECLARE(void) ap_proxy_initialize_balancer(TODO)
+{
+}
 
-    worker = (proxy_worker *)conf->workers->elts;
+#endif
 
-    /*
-     * Do a "longest match" on the worker name to find the worker that
-     * fits best to the URL, but keep in mind that we must have at least
-     * a minimum matching of length min_match such that
-     * scheme://hostname[:port] matches between worker and url.
-     */
-    for (i = 0; i < conf->workers->nelts; i++) {
-        if ( ((worker_name_length = strlen(worker->name)) <= url_length)
-           && (worker_name_length >= min_match)
-           && (worker_name_length > max_match)
-           && (strncmp(url_copy, worker->name, worker_name_length) == 0) ) {
-            max_worker = worker;
-            max_match = worker_name_length;
-        }
-        worker++;
-    }
-    return max_worker;
-}
+/*
+ * CONNECTION related...
+ */
 
-#if APR_HAS_THREADS
 static apr_status_t conn_pool_cleanup(void *theworker)
 {
     proxy_worker *worker = (proxy_worker *)theworker;
@@ -1426,7 +1393,6 @@ static apr_status_t conn_pool_cleanup(vo
     }
     return APR_SUCCESS;
 }
-#endif
 
 static void init_conn_pool(apr_pool_t *p, proxy_worker *worker)
 {
@@ -1435,285 +1401,19 @@ static void init_conn_pool(apr_pool_t *p
 
     /*
      * Create a connection pool's subpool.
-     * This pool is used for connection recycling.
-     * Once the worker is added it is never removed but
-     * it can be disabled.
-     */
-    apr_pool_create(&pool, p);
-    apr_pool_tag(pool, "proxy_worker_cp");
-    /*
-     * Alloc from the same pool as worker.
-     * proxy_conn_pool is permanently attached to the worker.
-     */
-    cp = (proxy_conn_pool *)apr_pcalloc(p, sizeof(proxy_conn_pool));
-    cp->pool = pool;
-    worker->cp = cp;
-}
-
-PROXY_DECLARE(const char *) ap_proxy_add_worker_wid(proxy_worker **worker,
-                                                apr_pool_t *p,
-                                                proxy_server_conf *conf,
-                                                const char *url,
-                                                int id)
-{
-    int rv;
-    apr_uri_t uri;
-
-    rv = apr_uri_parse(p, url, &uri);
-
-    if (rv != APR_SUCCESS) {
-        return "Unable to parse URL";
-    }
-    if (!uri.hostname || !uri.scheme) {
-        return "URL must be absolute!";
-    }
-
-    ap_str_tolower(uri.hostname);
-    ap_str_tolower(uri.scheme);
-    *worker = apr_array_push(conf->workers);
-    memset(*worker, 0, sizeof(proxy_worker));
-    (*worker)->name = apr_uri_unparse(p, &uri, APR_URI_UNP_REVEALPASSWORD);
-    (*worker)->scheme = uri.scheme;
-    (*worker)->hostname = uri.hostname;
-    (*worker)->port = uri.port;
-    if (id < 0) {
-        (*worker)->id   = proxy_lb_workers;
-        /* Increase the total worker count */
-        proxy_lb_workers++;
-    } else {
-        (*worker)->id   = id;
-    }
-    (*worker)->flush_packets = flush_off;
-    (*worker)->flush_wait = PROXY_FLUSH_WAIT;
-    (*worker)->smax = -1;
-    (*worker)->our_hash = ap_proxy_hashfunc((*worker)->name, PROXY_HASHFUNC_DEFAULT);
-    (*worker)->apr_hash = ap_proxy_hashfunc((*worker)->name, PROXY_HASHFUNC_APR);
-    (*worker)->cp = NULL;
-    (*worker)->mutex = NULL;
-
-    return NULL;
-}
-
-PROXY_DECLARE(const char *) ap_proxy_add_worker(proxy_worker **worker,
-                                                apr_pool_t *p,
-                                                proxy_server_conf *conf,
-                                                const char *url)
-{
-    return ap_proxy_add_worker_wid(worker, p, conf, url, -1);
-}
-
-PROXY_DECLARE(proxy_worker *) ap_proxy_create_worker_wid(apr_pool_t *p, int id)
-{
-
-    proxy_worker *worker;
-    worker = (proxy_worker *)apr_pcalloc(p, sizeof(proxy_worker));
-    if (id < 0) {
-        worker->id = proxy_lb_workers;
-        /* Increase the total worker count */
-        proxy_lb_workers++;
-    } else {
-        worker->id = id;
-    }
-    worker->smax = -1;
-    worker->cp = NULL;
-    worker->mutex = NULL;
-
-    return worker;
-}
-
-PROXY_DECLARE(proxy_worker *) ap_proxy_create_worker(apr_pool_t *p)
-{
-    return ap_proxy_create_worker_wid(p, -1);
-}
-
-PROXY_DECLARE(void)
-ap_proxy_add_worker_to_balancer_wid(apr_pool_t *pool, proxy_balancer *balancer,
-                                proxy_worker *worker, int id)
-{
-    proxy_worker **runtime;
-
-    runtime = apr_array_push(balancer->workers);
-    *runtime = worker;
-    if (id < 0) {
-        (*runtime)->id = proxy_lb_workers;
-        /* Increase the total runtime count */
-        proxy_lb_workers++;
-    } else {
-        (*runtime)->id = id;
-    }
-    balancer->updated = apr_time_now();
-
-}
-
-PROXY_DECLARE(void)
-ap_proxy_add_worker_to_balancer(apr_pool_t *pool, proxy_balancer *balancer,
-                                proxy_worker *worker)
-{
-    ap_proxy_add_worker_to_balancer_wid(pool, balancer, worker, -1);
-}
-
-PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker,
-                                        proxy_balancer **balancer,
-                                        request_rec *r,
-                                        proxy_server_conf *conf, char **url)
-{
-    int access_status;
-
-    access_status = proxy_run_pre_request(worker, balancer, r, conf, url);
-    if (access_status == DECLINED && *balancer == NULL) {
-        *worker = ap_proxy_get_worker(r->pool, conf, *url);
-        if (*worker) {
-            ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
-                          "proxy: %s: found worker %s for %s",
-                          (*worker)->scheme, (*worker)->name, *url);
-
-            *balancer = NULL;
-            access_status = OK;
-        }
-        else if (r->proxyreq == PROXYREQ_PROXY) {
-            if (conf->forward) {
-                ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
-                              "proxy: *: found forward proxy worker for %s",
-                              *url);
-                *balancer = NULL;
-                *worker = conf->forward;
-                access_status = OK;
-                /*
-                 * The forward worker does not keep connections alive, so
-                 * ensure that mod_proxy_http does the correct thing
-                 * regarding the Connection header in the request.
-                 */
-                apr_table_set(r->subprocess_env, "proxy-nokeepalive", "1");
-            }
-        }
-        else if (r->proxyreq == PROXYREQ_REVERSE) {
-            if (conf->reverse) {
-                ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
-                              "proxy: *: found reverse proxy worker for %s",
-                               *url);
-                *balancer = NULL;
-                *worker = conf->reverse;
-                access_status = OK;
-                /*
-                 * The reverse worker does not keep connections alive, so
-                 * ensure that mod_proxy_http does the correct thing
-                 * regarding the Connection header in the request.
-                 */
-                apr_table_set(r->subprocess_env, "proxy-nokeepalive", "1");
-            }
-        }
-    }
-    else if (access_status == DECLINED && *balancer != NULL) {
-        /* All the workers are busy */
-        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
-          "proxy: all workers are busy.  Unable to serve %s",
-          *url);
-        access_status = HTTP_SERVICE_UNAVAILABLE;
-    }
-    return access_status;
-}
-
-PROXY_DECLARE(int) ap_proxy_post_request(proxy_worker *worker,
-                                         proxy_balancer *balancer,
-                                         request_rec *r,
-                                         proxy_server_conf *conf)
-{
-    int access_status = OK;
-    if (balancer) {
-        access_status = proxy_run_post_request(worker, balancer, r, conf);
-        if (access_status == DECLINED) {
-            access_status = OK; /* no post_request handler available */
-            /* TODO: recycle direct worker */
-        }
-    }
-
-    return access_status;
-}
-
-/* DEPRECATED */
-PROXY_DECLARE(int) ap_proxy_connect_to_backend(apr_socket_t **newsock,
-                                               const char *proxy_function,
-                                               apr_sockaddr_t *backend_addr,
-                                               const char *backend_name,
-                                               proxy_server_conf *conf,
-                                               request_rec *r)
-{
-    apr_status_t rv;
-    int connected = 0;
-    int loglevel;
-
-    while (backend_addr && !connected) {
-        if ((rv = apr_socket_create(newsock, backend_addr->family,
-                                    SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) {
-            loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
-            ap_log_rerror(APLOG_MARK, loglevel, rv, r,
-                          "proxy: %s: error creating fam %d socket for target %s",
-                          proxy_function,
-                          backend_addr->family,
-                          backend_name);
-            /*
-             * this could be an IPv6 address from the DNS but the
-             * local machine won't give us an IPv6 socket; hopefully the
-             * DNS returned an additional address to try
-             */
-            backend_addr = backend_addr->next;
-            continue;
-        }
-
-        if (conf->recv_buffer_size > 0 &&
-            (rv = apr_socket_opt_set(*newsock, APR_SO_RCVBUF,
-                                     conf->recv_buffer_size))) {
-            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
-                          "apr_socket_opt_set(SO_RCVBUF): Failed to set "
-                          "ProxyReceiveBufferSize, using default");
-        }
-
-        rv = apr_socket_opt_set(*newsock, APR_TCP_NODELAY, 1);
-        if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) {
-             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
-                           "apr_socket_opt_set(APR_TCP_NODELAY): "
-                           "Failed to set");
-        }
-
-        /* Set a timeout on the socket */
-        if (conf->timeout_set) {
-            apr_socket_timeout_set(*newsock, conf->timeout);
-        }
-        else {
-             apr_socket_timeout_set(*newsock, r->server->timeout);
-        }
-
-        ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
-                      "proxy: %s: fam %d socket created to connect to %s",
-                      proxy_function, backend_addr->family, backend_name);
-
-        if (conf->source_address) {
-            rv = apr_socket_bind(*newsock, conf->source_address);
-            if (rv != APR_SUCCESS) {
-                ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
-                    "proxy: %s: failed to bind socket to local address",
-                    proxy_function);
-            }
-        }
-
-        /* make the connection out of the socket */
-        rv = apr_socket_connect(*newsock, backend_addr);
-
-        /* if an error occurred, loop round and try again */
-        if (rv != APR_SUCCESS) {
-            apr_socket_close(*newsock);
-            loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
-            ap_log_rerror(APLOG_MARK, loglevel, rv, r,
-                          "proxy: %s: attempt to connect to %pI (%s) failed",
-                          proxy_function,
-                          backend_addr,
-                          backend_name);
-            backend_addr = backend_addr->next;
-            continue;
-        }
-        connected = 1;
-    }
-    return connected ? 0 : 1;
+     * This pool is used for connection recycling.
+     * Once the worker is added it is never removed but
+     * it can be disabled.
+     */
+    apr_pool_create(&pool, p);
+    apr_pool_tag(pool, "proxy_worker_cp");
+    /*
+     * Alloc from the same pool as worker.
+     * proxy_conn_pool is permanently attached to the worker.
+     */
+    cp = (proxy_conn_pool *)apr_pcalloc(p, sizeof(proxy_conn_pool));
+    cp->pool = pool;
+    worker->cp = cp;
 }
 
 static apr_status_t connection_cleanup(void *theconn)
@@ -1734,7 +1434,6 @@ static apr_status_t connection_cleanup(v
         conn->r = NULL;
     }
 
-#if APR_HAS_THREADS
     /* Sanity check: Did we already return the pooled connection? */
     if (conn->inreslist) {
         ap_log_perror(APLOG_MARK, APLOG_ERR, 0, conn->pool,
@@ -1743,10 +1442,9 @@ static apr_status_t connection_cleanup(v
                       worker->name);
         return APR_SUCCESS;
     }
-#endif
 
     /* determine if the connection need to be closed */
-    if (conn->close || !worker->is_address_reusable || worker->disablereuse) {
+    if (conn->close || !worker->s->is_address_reusable || worker->s->disablereuse) {
         apr_pool_t *p = conn->pool;
         apr_pool_clear(p);
         conn = apr_pcalloc(p, sizeof(proxy_conn_rec));
@@ -1755,13 +1453,12 @@ static apr_status_t connection_cleanup(v
         apr_pool_create(&(conn->scpool), p);
         apr_pool_tag(conn->scpool, "proxy_conn_scpool");
     }
-#if APR_HAS_THREADS
+
     if (worker->hmax && worker->cp->res) {
         conn->inreslist = 1;
         apr_reslist_release(worker->cp->res, (void *)conn);
     }
     else
-#endif
     {
         worker->cp->conn = conn;
     }
@@ -1842,15 +1539,12 @@ static apr_status_t connection_construct
     conn->pool   = ctx;
     conn->scpool = scpool;
     conn->worker = worker;
-#if APR_HAS_THREADS
     conn->inreslist = 1;
-#endif
     *resource = conn;
 
     return APR_SUCCESS;
 }
 
-#if APR_HAS_THREADS /* only needed when threads are used */
 /* reslist destructor */
 static apr_status_t connection_destructor(void *resource, void *params,
                                           apr_pool_t *pool)
@@ -1864,102 +1558,179 @@ static apr_status_t connection_destructo
 
     return APR_SUCCESS;
 }
-#endif
 
 /*
- * ap_proxy_initialize_worker_share() concerns itself
- * with initializing those parts of worker which
- * are, or could be, shared. Basically worker->s
+ * WORKER related...
  */
-PROXY_DECLARE(void) ap_proxy_initialize_worker_share(proxy_server_conf *conf,
-                                                     proxy_worker *worker,
-                                                     server_rec *s)
-{
-    proxy_worker_shared *score = NULL;
-
-    if (PROXY_WORKER_IS_INITIALIZED(worker)) {
-        /* The worker share is already initialized */
-        ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s,
-                     "proxy: worker %s already initialized",
-                     worker->name);
-        return;
-    }
-    if (!worker->s) {
-        /* Get scoreboard slot */
-        if (ap_scoreboard_image) {
-            score = (proxy_worker_shared *) ap_get_scoreboard_lb(worker->id);
-            if (!score) {
-                ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
-                      "proxy: ap_get_scoreboard_lb(%d) failed in child %" APR_PID_T_FMT " for worker %s",
-                      worker->id, getpid(), worker->name);
-            }
-            else {
-                 ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s,
-                      "proxy: grabbed scoreboard slot %d in child %" APR_PID_T_FMT " for worker %s",
-                      worker->id, getpid(), worker->name);
-            }
-        }
-        if (!score) {
-            score = (proxy_worker_shared *) apr_pcalloc(conf->pool, sizeof(proxy_worker_shared));
-            ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s,
-                  "proxy: initialized plain memory in child %" APR_PID_T_FMT " for worker %s",
-                  getpid(), worker->name);
-        }
-        worker->s = score;
-        /*
-         * recheck to see if we've already been here. Possible
-         * if proxy is using scoreboard to hold shared stats
-         */
-        if (PROXY_WORKER_IS_INITIALIZED(worker)) {
-            /* The worker share is already initialized */
-            ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s,
-                         "proxy: worker %s already initialized",
-                         worker->name);
-            return;
-        }
+
+PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
+                                                  proxy_server_conf *conf,
+                                                  proxy_balancer *balancer
+                                                  const char *url)
+{
+    proxy_worker *worker;
+    proxy_worker *max_worker = NULL;
+    int max_match = 0;
+    int url_length;
+    int min_match;
+    int worker_name_length;
+    const char *c;
+    char *url_copy;
+    int i, end;
+    
+    c = ap_strchr_c(url, ':');
+    if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {
+        return NULL;
     }
-    if (worker->route) {
-        strcpy(worker->s->route, worker->route);
+    
+    url_copy = apr_pstrdup(p, url);
+    url_length = strlen(url);
+    
+    /*
+     * We need to find the start of the path and
+     * therefore we know the length of the scheme://hostname/
+     * part to we can force-lowercase everything up to
+     * the start of the path.
+     */
+    c = ap_strchr_c(c+3, '/');
+    if (c) {
+        char *pathstart;
+        pathstart = url_copy + (c - url);
+        *pathstart = '\0';
+        ap_str_tolower(url_copy);
+        min_match = strlen(url_copy);
+        *pathstart = '/';
     }
     else {
-        *worker->s->route = '\0';
+        ap_str_tolower(url_copy);
+        min_match = strlen(url_copy);
     }
-    if (worker->redirect) {
-        strcpy(worker->s->redirect, worker->redirect);
+    
+    if (balancer) {
+        worker = (proxy_worker *)balancer->workers->elts;
+        end = balancer->workers->nelts;
+    } else {
+        worker = (proxy_worker *)conf->workers->elts;
+        end = conf->workers->nelts;
     }
-    else {
-        *worker->s->redirect = '\0';
+    
+    /*
+     * Do a "longest match" on the worker name to find the worker that
+     * fits best to the URL, but keep in mind that we must have at least
+     * a minimum matching of length min_match such that
+     * scheme://hostname[:port] matches between worker and url.
+     */
+    for (i = 0; i < end; i++) {
+        if ( ((worker_name_length = strlen(worker->name)) <= url_length)
+            && (worker_name_length >= min_match)
+            && (worker_name_length > max_match)
+            && (strncmp(url_copy, worker->name, worker_name_length) == 0) ) {
+            max_worker = worker;
+            max_match = worker_name_length;
+        }
+        worker++;
     }
+    return max_worker;
+}
 
-    worker->s->status |= (worker->status | PROXY_WORKER_INITIALIZED);
-    worker->s->apr_hash = worker->apr_hash;
-    worker->s->our_hash = worker->our_hash;
+/*
+ * To create a worker from scratch first we define the
+ * specifics of the worker; this is all local data.
+ * We then allocate space for it if data needs to be
+ * shared. This allows for dynamic addition during
+ * config and runtime.
+ */
+PROXY_DECLARE(const char *) ap_proxy_define_worker(apr_pool_t *p,
+                                                   proxy_worker **worker,
+                                                   proxy_balancer *balancer,
+                                                   proxy_server_conf *conf,
+                                                   const char *url)
+{
+    int rv;
+    apr_uri_t uri;
+    proxy_worker_shared *wstatus;
+    
+    rv = apr_uri_parse(p, url, &uri);
+    
+    if (rv != APR_SUCCESS) {
+        return "Unable to parse URL";
+    }
+    if (!uri.hostname || !uri.scheme) {
+        return "URL must be absolute!";
+    }
+    
+    ap_str_tolower(uri.hostname);
+    ap_str_tolower(uri.scheme);
+    /*
+     * Workers can be associated w/ balancers or on their
+     * own; ie: the generic reverse-proxy or a worker
+     * in a simple ProxyPass statement. eg: 
+     *
+     *      ProxyPass / http://www.example.com
+     *
+     * in which case the worker goes in the conf slot.
+     */
+    if (balancer)
+        *worker = apr_array_push(balancer->workers);
+    else
+        *worker = apr_array_push(conf->workers)
+        memset(*worker, 0, sizeof(proxy_worker));
+    /* right here we just want to tuck away the worker info.
+     * if called during config, we don't have shm setup yet,
+     * so just note the info for later. */
+    wstatus = malloc(sizeof(proxy_worker_shared);)  /* will be freed ap_proxy_create_worker */
+    memset(*wstatus, 0, sizeof(proxy_worker_shared));
+    
+    (*worker)->hash = ap_proxy_hashfunc((*worker)->name, PROXY_HASHFUNC_DEFAULT);
+    (*worker)->cp = NULL;
+    (*worker)->mutex = NULL;
+    (*worker)->balancer = balancer;
+    
+    PROXY_STRNCPY(wstatus->name, apr_uri_unparse(p, &uri, APR_URI_UNP_REVEALPASSWORD));
+    PROXY_STRNCPY(wstatus->scheme, uri.scheme);
+    PROXY_STRNCPY(wstatus->hostname, uri.hostname);
+    wstatus->port = uri.port;
+    wstatus->flush_packets = flush_off;
+    wstatus->flush_wait = PROXY_FLUSH_WAIT;
+    wstatus->smax = -1;
+    wstatus->hash = (*worker)->hash;
+    
+    (*worker)->s = wstatus;
+    
+    return NULL;
+}
 
+/*
+ * Create an already defined worker and free up memory
+ */
+PROXY_DECLARE(void) ap_proxy_create_worker(proxy_worker *worker, proxy_worker_shared *shm, int i)
+{
+    memcpy(shm, worker->s, sizeof(proxy_worker_shared));
+    free(worker->s); /* was malloced in ap_proxy_define_worker */
+    worker->s = shm;
+    worker->s->index = i;
 }
 
 PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, server_rec *s, apr_pool_t *p)
 {
     apr_status_t rv;
-
-#if APR_HAS_THREADS
     int mpm_threads;
-#endif
 
-    if (worker->status & PROXY_WORKER_INITIALIZED) {
+    if (worker->s->status & PROXY_WORKER_INITIALIZED) {
         /* The worker is already initialized */
         return APR_SUCCESS;
     }
 
     /* Set default parameters */
-    if (!worker->retry_set) {
-        worker->retry = apr_time_from_sec(PROXY_WORKER_DEFAULT_RETRY);
+    if (!worker->s->retry_set) {
+        worker->s->retry = apr_time_from_sec(PROXY_WORKER_DEFAULT_RETRY);
     }
     /* By default address is reusable unless DisableReuse is set */
-    if (worker->disablereuse) {
-        worker->is_address_reusable = 0;
+    if (worker->s->disablereuse) {
+        worker->s->is_address_reusable = 0;
     }
     else {
-        worker->is_address_reusable = 1;
+        worker->s->is_address_reusable = 1;
     }
 
     if (worker->cp == NULL)
@@ -1970,7 +1741,6 @@ PROXY_DECLARE(apr_status_t) ap_proxy_ini
         return APR_EGENERAL;
     } 
 
-#if APR_HAS_THREADS
     if (worker->mutex == NULL) {
         rv = apr_thread_mutex_create(&(worker->mutex), APR_THREAD_MUTEX_DEFAULT, p);
         if (rv != APR_SUCCESS) {
@@ -1983,20 +1753,20 @@ PROXY_DECLARE(apr_status_t) ap_proxy_ini
     ap_mpm_query(AP_MPMQ_MAX_THREADS, &mpm_threads);
     if (mpm_threads > 1) {
         /* Set hard max to no more then mpm_threads */
-        if (worker->hmax == 0 || worker->hmax > mpm_threads) {
-            worker->hmax = mpm_threads;
+        if (worker->s->hmax == 0 || worker->s->hmax > mpm_threads) {
+            worker->s->hmax = mpm_threads;
         }
-        if (worker->smax == -1 || worker->smax > worker->hmax) {
-            worker->smax = worker->hmax;
+        if (worker->s->smax == -1 || worker->s->smax > worker->hmax) {
+            worker->s->smax = worker->s->hmax;
         }
         /* Set min to be lower then smax */
-        if (worker->min > worker->smax) {
-            worker->min = worker->smax;
+        if (worker->s->min > worker->s->smax) {
+            worker->s->min = worker->s->smax;
         }
     }
     else {
         /* This will supress the apr_reslist creation */
-        worker->min = worker->smax = worker->hmax = 0;
+        worker->s->min = worker->s->smax = worker->s->hmax = 0;
     }
     if (worker->hmax) {
         rv = apr_reslist_create(&(worker->cp->res),
@@ -2010,19 +1780,18 @@ PROXY_DECLARE(apr_status_t) ap_proxy_ini
                                   apr_pool_cleanup_null);
 
         ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
-            "proxy: initialized worker %d in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
-             worker->id, getpid(), worker->hostname, worker->min,
-             worker->hmax, worker->smax);
+            "proxy: initialized worker in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
+             getpid(), worker->s->hostname, worker->s->min,
+             worker->s->hmax, worker->s->smax);
 
 #if (APR_MAJOR_VERSION > 0)
         /* Set the acquire timeout */
-        if (rv == APR_SUCCESS && worker->acquire_set) {
-            apr_reslist_timeout_set(worker->cp->res, worker->acquire);
+        if (rv == APR_SUCCESS && worker->s->acquire_set) {
+            apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
         }
 #endif
     }
     else
-#endif
     {
         void *conn;
 
@@ -2030,11 +1799,11 @@ PROXY_DECLARE(apr_status_t) ap_proxy_ini
         worker->cp->conn = conn;
 
         ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
-             "proxy: initialized single connection worker %d in child %" APR_PID_T_FMT " for (%s)",
-             worker->id, getpid(), worker->hostname);
+             "proxy: initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
+             getpid(), worker->hostname);
     }
     if (rv == APR_SUCCESS) {
-        worker->status |= (PROXY_WORKER_INITIALIZED);
+        worker->s->status |= (PROXY_WORKER_INITIALIZED);
     }
     return rv;
 }
@@ -2064,6 +1833,170 @@ PROXY_DECLARE(int) ap_proxy_retry_worker
     }
 }
 
+PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker,
+                                        proxy_balancer **balancer,
+                                        request_rec *r,
+                                        proxy_server_conf *conf, char **url)
+{
+    int access_status;
+    
+    access_status = proxy_run_pre_request(worker, balancer, r, conf, url);
+    if (access_status == DECLINED && *balancer == NULL) {
+        *worker = ap_proxy_get_worker(r->pool, conf, *url);
+        if (*worker) {
+            ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+                          "proxy: %s: found worker %s for %s",
+                          (*worker)->scheme, (*worker)->name, *url);
+            
+            *balancer = NULL;
+            access_status = OK;
+        }
+        else if (r->proxyreq == PROXYREQ_PROXY) {
+            if (conf->forward) {
+                ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+                              "proxy: *: found forward proxy worker for %s",
+                              *url);
+                *balancer = NULL;
+                *worker = conf->forward;
+                access_status = OK;
+                /*
+                 * The forward worker does not keep connections alive, so
+                 * ensure that mod_proxy_http does the correct thing
+                 * regarding the Connection header in the request.
+                 */
+                apr_table_set(r->subprocess_env, "proxy-nokeepalive", "1");
+            }
+        }
+        else if (r->proxyreq == PROXYREQ_REVERSE) {
+            if (conf->reverse) {
+                ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+                              "proxy: *: found reverse proxy worker for %s",
+                              *url);
+                *balancer = NULL;
+                *worker = conf->reverse;
+                access_status = OK;
+                /*
+                 * The reverse worker does not keep connections alive, so
+                 * ensure that mod_proxy_http does the correct thing
+                 * regarding the Connection header in the request.
+                 */
+                apr_table_set(r->subprocess_env, "proxy-nokeepalive", "1");
+            }
+        }
+    }
+    else if (access_status == DECLINED && *balancer != NULL) {
+        /* All the workers are busy */
+        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+                      "proxy: all workers are busy.  Unable to serve %s",
+                      *url);
+        access_status = HTTP_SERVICE_UNAVAILABLE;
+    }
+    return access_status;
+}
+
+PROXY_DECLARE(int) ap_proxy_post_request(proxy_worker *worker,
+                                         proxy_balancer *balancer,
+                                         request_rec *r,
+                                         proxy_server_conf *conf)
+{
+    int access_status = OK;
+    if (balancer) {
+        access_status = proxy_run_post_request(worker, balancer, r, conf);
+        if (access_status == DECLINED) {
+            access_status = OK; /* no post_request handler available */
+            /* TODO: recycle direct worker */
+        }
+    }
+    
+    return access_status;
+}
+
+/* DEPRECATED */
+PROXY_DECLARE(int) ap_proxy_connect_to_backend(apr_socket_t **newsock,
+                                               const char *proxy_function,
+                                               apr_sockaddr_t *backend_addr,
+                                               const char *backend_name,
+                                               proxy_server_conf *conf,
+                                               request_rec *r)
+{
+    apr_status_t rv;
+    int connected = 0;
+    int loglevel;
+    
+    while (backend_addr && !connected) {
+        if ((rv = apr_socket_create(newsock, backend_addr->family,
+                                    SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) {
+            loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
+            ap_log_rerror(APLOG_MARK, loglevel, rv, r,
+                          "proxy: %s: error creating fam %d socket for target %s",
+                          proxy_function,
+                          backend_addr->family,
+                          backend_name);
+            /*
+             * this could be an IPv6 address from the DNS but the
+             * local machine won't give us an IPv6 socket; hopefully the
+             * DNS returned an additional address to try
+             */
+            backend_addr = backend_addr->next;
+            continue;
+        }
+        
+        if (conf->recv_buffer_size > 0 &&
+            (rv = apr_socket_opt_set(*newsock, APR_SO_RCVBUF,
+                                     conf->recv_buffer_size))) {
+            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+                          "apr_socket_opt_set(SO_RCVBUF): Failed to set "
+                          "ProxyReceiveBufferSize, using default");
+        }
+        
+        rv = apr_socket_opt_set(*newsock, APR_TCP_NODELAY, 1);
+        if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) {
+            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+                          "apr_socket_opt_set(APR_TCP_NODELAY): "
+                          "Failed to set");
+        }
+        
+        /* Set a timeout on the socket */
+        if (conf->timeout_set) {
+            apr_socket_timeout_set(*newsock, conf->timeout);
+        }
+        else {
+            apr_socket_timeout_set(*newsock, r->server->timeout);
+        }
+        
+        ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+                      "proxy: %s: fam %d socket created to connect to %s",
+                      proxy_function, backend_addr->family, backend_name);
+        
+        if (conf->source_address) {
+            rv = apr_socket_bind(*newsock, conf->source_address);
+            if (rv != APR_SUCCESS) {
+                ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+                              "proxy: %s: failed to bind socket to local address",
+                              proxy_function);
+            }
+        }
+        
+        /* make the connection out of the socket */
+        rv = apr_socket_connect(*newsock, backend_addr);
+        
+        /* if an error occurred, loop round and try again */
+        if (rv != APR_SUCCESS) {
+            apr_socket_close(*newsock);
+            loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
+            ap_log_rerror(APLOG_MARK, loglevel, rv, r,
+                          "proxy: %s: attempt to connect to %pI (%s) failed",
+                          proxy_function,
+                          backend_addr,
+                          backend_name);
+            backend_addr = backend_addr->next;
+            continue;
+        }
+        connected = 1;
+    }
+    return connected ? 0 : 1;
+}
+
 PROXY_DECLARE(int) ap_proxy_acquire_connection(const char *proxy_function,
                                                proxy_conn_rec **conn,
                                                proxy_worker *worker,
@@ -2082,12 +2015,11 @@ PROXY_DECLARE(int) ap_proxy_acquire_conn
             return HTTP_SERVICE_UNAVAILABLE;
         }
     }
-#if APR_HAS_THREADS
+
     if (worker->hmax && worker->cp->res) {
         rv = apr_reslist_acquire(worker->cp->res, (void **)conn);
     }
     else
-#endif
     {
         /* create the new connection if the previous was destroyed */
         if (!worker->cp->conn) {
@@ -2112,9 +2044,7 @@ PROXY_DECLARE(int) ap_proxy_acquire_conn
 
     (*conn)->worker = worker;
     (*conn)->close  = 0;
-#if APR_HAS_THREADS
     (*conn)->inreslist = 0;
-#endif
 
     return OK;
 }

Modified: httpd/httpd/trunk/server/scoreboard.c
URL: http://svn.apache.org/viewvc/httpd/httpd/trunk/server/scoreboard.c?rev=1058622&r1=1058621&r2=1058622&view=diff
==============================================================================
--- httpd/httpd/trunk/server/scoreboard.c (original)
+++ httpd/httpd/trunk/server/scoreboard.c Thu Jan 13 15:58:50 2011
@@ -97,10 +97,6 @@ AP_IMPLEMENT_HOOK_RUN_ALL(int,pre_mpm,
                           (apr_pool_t *p, ap_scoreboard_e sb_type),
                           (p, sb_type),OK,DECLINED)
 
-static APR_OPTIONAL_FN_TYPE(ap_proxy_lb_workers)
-                                *pfn_proxy_lb_workers;
-static APR_OPTIONAL_FN_TYPE(ap_proxy_lb_worker_size)
-                                *pfn_proxy_lb_worker_size;
 static APR_OPTIONAL_FN_TYPE(ap_logio_get_last_bytes)
                                 *pfn_ap_logio_get_last_bytes;
 
@@ -109,7 +105,7 @@ struct ap_sb_handle_t {
     int thread_num;
 };
 
-static int server_limit, thread_limit, lb_limit, lb_size;
+static int server_limit, thread_limit;
 static apr_size_t scoreboard_size;
 
 /*
@@ -135,25 +131,9 @@ AP_DECLARE(int) ap_calc_scoreboard_size(
     ap_mpm_query(AP_MPMQ_HARD_LIMIT_THREADS, &thread_limit);
     ap_mpm_query(AP_MPMQ_HARD_LIMIT_DAEMONS, &server_limit);
 
-    if (!pfn_proxy_lb_workers)
-        pfn_proxy_lb_workers = APR_RETRIEVE_OPTIONAL_FN(ap_proxy_lb_workers);
-    if (pfn_proxy_lb_workers)
-        lb_limit = pfn_proxy_lb_workers();
-    else
-        lb_limit = 0;
-
-    if (!pfn_proxy_lb_worker_size)
-        pfn_proxy_lb_worker_size = APR_RETRIEVE_OPTIONAL_FN(ap_proxy_lb_worker_size);
-    if (pfn_proxy_lb_worker_size)
-        lb_size = pfn_proxy_lb_worker_size();
-    else
-        lb_size = sizeof(lb_score);
-
     scoreboard_size = sizeof(global_score);
     scoreboard_size += sizeof(process_score) * server_limit;
     scoreboard_size += sizeof(worker_score) * server_limit * thread_limit;
-    if (lb_limit && lb_size)
-        scoreboard_size += lb_size * lb_limit;
 
     pfn_ap_logio_get_last_bytes = APR_RETRIEVE_OPTIONAL_FN(ap_logio_get_last_bytes);
 
@@ -179,14 +159,9 @@ void ap_init_scoreboard(void *shared_sco
         ap_scoreboard_image->servers[i] = (worker_score *)more_storage;
         more_storage += thread_limit * sizeof(worker_score);
     }
-    if (lb_limit && lb_size) {
-        ap_scoreboard_image->balancers = (void *)more_storage;
-        more_storage += lb_limit * lb_size;
-    }
     ap_assert(more_storage == (char*)shared_score + scoreboard_size);
     ap_scoreboard_image->global->server_limit = server_limit;
     ap_scoreboard_image->global->thread_limit = thread_limit;
-    ap_scoreboard_image->global->lb_limit     = lb_limit;
 }
 
 /**
@@ -330,11 +305,6 @@ int ap_create_scoreboard(apr_pool_t *p, 
             memset(ap_scoreboard_image->servers[i], 0,
                    sizeof(worker_score) * thread_limit);
         }
-        /* Clean up the lb workers data */
-        if (lb_limit && lb_size) {
-            memset(ap_scoreboard_image->balancers, 0,
-                   lb_size * lb_limit);
-        }
         return OK;
     }
 
@@ -622,12 +592,3 @@ AP_DECLARE(global_score *) ap_get_scoreb
 {
     return ap_scoreboard_image->global;
 }
-
-AP_DECLARE(lb_score *) ap_get_scoreboard_lb(int lb_num)
-{
-    if ( (lb_num < 0) || (lb_limit < lb_num) || (lb_size==0) ) {
-        return(NULL); /* Out of range */
-    }
-    return (lb_score *) ( ((char *) ap_scoreboard_image->balancers) +
-                          (lb_num*lb_size) );
-}