You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@tomcat.apache.org by rj...@apache.org on 2008/01/13 19:30:56 UTC
svn commit: r611618 - in /tomcat/connectors/trunk/jk/native/common:
jk_lb_worker.c jk_shm.h jk_status.c
Author: rjung
Date: Sun Jan 13 10:30:55 2008
New Revision: 611618
URL: http://svn.apache.org/viewvc?rev=611618&view=rev
Log:
Now use the more generic ajp shm status info
and remove it from lb sub worker:
Do it for:
- busy
- max_busy
- readed
- transferred
- elected
- reply_timeouts
- client_errors
Modified:
tomcat/connectors/trunk/jk/native/common/jk_lb_worker.c
tomcat/connectors/trunk/jk/native/common/jk_shm.h
tomcat/connectors/trunk/jk/native/common/jk_status.c
Modified: tomcat/connectors/trunk/jk/native/common/jk_lb_worker.c
URL: http://svn.apache.org/viewvc/tomcat/connectors/trunk/jk/native/common/jk_lb_worker.c?rev=611618&r1=611617&r2=611618&view=diff
==============================================================================
--- tomcat/connectors/trunk/jk/native/common/jk_lb_worker.c (original)
+++ tomcat/connectors/trunk/jk/native/common/jk_lb_worker.c Sun Jan 13 10:30:55 2008
@@ -486,12 +486,14 @@
int non_error = 0;
int elapsed;
lb_sub_worker_t *w = NULL;
+ ajp_worker_t *aw = NULL;
JK_TRACE_ENTER(l);
if (p->sequence != p->s->h.sequence)
jk_lb_pull(p, l);
for (i = 0; i < p->num_of_workers; i++) {
w = &p->lb_workers[i];
+ aw = (ajp_worker_t *)w->worker->worker_private;
if (w->s->state == JK_LB_STATE_ERROR) {
elapsed = (int)difftime(now, w->s->error_time);
if (elapsed <= p->recover_wait_time) {
@@ -507,7 +509,7 @@
w->name);
if (p->lbmethod != JK_LB_METHOD_BUSYNESS)
w->s->lb_value = curmax;
- w->s->reply_timeouts = 0;
+ aw->s->reply_timeouts = 0;
w->s->state = JK_LB_STATE_RECOVER;
non_error++;
}
@@ -515,10 +517,10 @@
else {
non_error++;
if (w->s->state == JK_LB_STATE_OK &&
- w->s->elected == w->s->elected_snapshot)
+ aw->s->used == w->s->elected_snapshot)
w->s->state = JK_LB_STATE_IDLE;
}
- w->s->elected_snapshot = w->s->elected;
+ w->s->elected_snapshot = aw->s->used;
}
JK_TRACE_EXIT(l);
@@ -563,11 +565,13 @@
JK_TRACE_ENTER(l);
if (p->lbmethod != JK_LB_METHOD_BUSYNESS) {
for (i = 0; i < p->num_of_workers; i++) {
- p->lb_workers[i].s->lb_value >>= exponent;
- if (p->lb_workers[i].s->lb_value > curmax) {
- curmax = p->lb_workers[i].s->lb_value;
+ lb_sub_worker_t *w = &p->lb_workers[i];
+ ajp_worker_t *aw = (ajp_worker_t *)w->worker->worker_private;
+ w->s->lb_value >>= exponent;
+ if (w->s->lb_value > curmax) {
+ curmax = w->s->lb_value;
}
- p->lb_workers[i].s->reply_timeouts >>= exponent;
+ aw->s->reply_timeouts >>= exponent;
}
}
JK_TRACE_EXIT(l);
@@ -887,6 +891,7 @@
lb_sub_worker_t *w,
jk_logger_t *l)
{
+ ajp_worker_t *aw = (ajp_worker_t *)w->worker->worker_private;
const char **log_values = jk_pool_alloc(s->pool, sizeof(char *) * JK_LB_NOTES_COUNT);
char *buf = jk_pool_alloc(s->pool, sizeof(char *) * JK_LB_NOTES_COUNT * JK_LB_UINT64_STR_SZ);
if (log_values && buf) {
@@ -896,15 +901,15 @@
/* JK_NOTE_LB_FIRST/LAST_VALUE */
log_values[1] = buf;
buf += JK_LB_UINT64_STR_SZ;
- snprintf(buf, JK_LB_UINT64_STR_SZ, "%" JK_UINT64_T_FMT, w->s->elected);
+ snprintf(buf, JK_LB_UINT64_STR_SZ, "%" JK_UINT64_T_FMT, aw->s->used);
/* JK_NOTE_LB_FIRST/LAST_ACCESSED */
log_values[2] = buf;
buf += JK_LB_UINT64_STR_SZ;
- snprintf(buf, JK_LB_UINT64_STR_SZ, "%" JK_UINT64_T_FMT, w->s->readed);
+ snprintf(buf, JK_LB_UINT64_STR_SZ, "%" JK_UINT64_T_FMT, aw->s->readed);
/* JK_NOTE_LB_FIRST/LAST_READ */
log_values[3] = buf;
buf += JK_LB_UINT64_STR_SZ;
- snprintf(buf, JK_LB_UINT64_STR_SZ, "%" JK_UINT64_T_FMT, w->s->transferred);
+ snprintf(buf, JK_LB_UINT64_STR_SZ, "%" JK_UINT64_T_FMT, aw->s->transferred);
/* JK_NOTE_LB_FIRST/LAST_TRANSFERRED */
log_values[4] = buf;
buf += JK_LB_UINT64_STR_SZ;
@@ -912,7 +917,7 @@
/* JK_NOTE_LB_FIRST/LAST_ERRORS */
log_values[5] = buf;
buf += JK_LB_UINT64_STR_SZ;
- snprintf(buf, JK_LB_UINT64_STR_SZ, "%d", w->s->busy);
+ snprintf(buf, JK_LB_UINT64_STR_SZ, "%d", aw->s->busy);
/* JK_NOTE_LB_FIRST/LAST_BUSY */
log_values[6] = buf;
/* JK_NOTE_LB_FIRST/LAST_ACTIVATION */
@@ -1000,6 +1005,7 @@
if (rec) {
int r;
int is_service_error = JK_HTTP_OK;
+ ajp_worker_t *aw = (ajp_worker_t *)rec->worker->worker_private;
jk_endpoint_t *end = NULL;
int retry = 0;
int retry_wait = JK_LB_MIN_RETRY_WAIT;
@@ -1067,14 +1073,10 @@
if (p->worker->lblock == JK_LB_LOCK_PESSIMISTIC)
jk_shm_lock();
- rec->s->elected++;
/* Increment the number of workers serving request */
p->worker->s->busy++;
if (p->worker->s->busy > p->worker->s->max_busy)
p->worker->s->max_busy = p->worker->s->busy;
- rec->s->busy++;
- if (rec->s->busy > rec->s->max_busy)
- rec->s->max_busy = rec->s->busy;
if ( (p->worker->lbmethod == JK_LB_METHOD_REQUESTS) ||
(p->worker->lbmethod == JK_LB_METHOD_BUSYNESS) ||
(p->worker->lbmethod == JK_LB_METHOD_SESSIONS &&
@@ -1094,8 +1096,6 @@
jk_shm_lock();
/* Update partial reads and writes if any */
- rec->s->readed += rd;
- rec->s->transferred += wr;
if (p->worker->lbmethod == JK_LB_METHOD_TRAFFIC) {
rec->s->lb_value += (rd+wr)*rec->lb_mult;
}
@@ -1129,8 +1129,6 @@
* Check if the busy was reset to zero by graceful
* restart of the server.
*/
- if (rec->s->busy)
- rec->s->busy--;
if (p->worker->s->busy)
p->worker->s->busy--;
if (service_stat == JK_TRUE) {
@@ -1144,7 +1142,6 @@
* Client error !!!
* Since this is bad request do not fail over.
*/
- rec->s->client_errors++;
rec->s->state = JK_LB_STATE_OK;
rec->s->error_time = 0;
rc = JK_CLIENT_ERROR;
@@ -1182,8 +1179,7 @@
rc = JK_FALSE;
}
else if (service_stat == JK_REPLY_TIMEOUT) {
- rec->s->reply_timeouts++;
- if (rec->s->reply_timeouts > (unsigned)p->worker->max_reply_timeouts) {
+ if (aw->s->reply_timeouts > (unsigned)p->worker->max_reply_timeouts) {
/*
* Service failed - to many reply timeouts
* Take this node out of service.
Modified: tomcat/connectors/trunk/jk/native/common/jk_shm.h
URL: http://svn.apache.org/viewvc/tomcat/connectors/trunk/jk/native/common/jk_shm.h?rev=611618&r1=611617&r2=611618&view=diff
==============================================================================
--- tomcat/connectors/trunk/jk/native/common/jk_shm.h (original)
+++ tomcat/connectors/trunk/jk/native/common/jk_shm.h Sun Jan 13 10:30:55 2008
@@ -110,10 +110,6 @@
{
jk_shm_worker_header_t h;
- /* Number of currently busy channels */
- volatile int busy;
- /* Maximum number of busy channels */
- volatile int max_busy;
/* route */
char route[JK_SHM_STR_SIZ+1];
/* worker domain */
@@ -134,20 +130,10 @@
volatile jk_uint64_t lb_value;
/* Statistical data */
volatile time_t error_time;
- /* Number of bytes read from remote */
- volatile jk_uint64_t readed;
- /* Number of bytes transferred to remote */
- volatile jk_uint64_t transferred;
- /* Number of times the worker was elected */
- volatile jk_uint64_t elected;
/* Number of times the worker was elected - snapshot during maintenance */
volatile jk_uint64_t elected_snapshot;
/* Number of non 200 responses */
volatile jk_uint32_t errors;
- /* Decayed number of reply_timeout errors */
- volatile jk_uint32_t reply_timeouts;
- /* Number of client errors */
- volatile jk_uint32_t client_errors;
};
typedef struct jk_shm_lb_sub_worker jk_shm_lb_sub_worker_t;
Modified: tomcat/connectors/trunk/jk/native/common/jk_status.c
URL: http://svn.apache.org/viewvc/tomcat/connectors/trunk/jk/native/common/jk_status.c?rev=611618&r1=611617&r2=611618&view=diff
==============================================================================
--- tomcat/connectors/trunk/jk/native/common/jk_status.c (original)
+++ tomcat/connectors/trunk/jk/native/common/jk_status.c Sun Jan 13 10:30:55 2008
@@ -1645,7 +1645,7 @@
for (j = 0; j < lb->num_of_workers; j++) {
lb_sub_worker_t *wr = &(lb->lb_workers[j]);
- ajp_worker_t *a = (ajp_worker_t *)wr->worker->worker_private;
+ ajp_worker_t *aw = (ajp_worker_t *)wr->worker->worker_private;
int rs_min = 0;
int rs_max = 0;
if (wr->s->state == JK_LB_STATE_ERROR) {
@@ -1680,22 +1680,22 @@
jk_printf(s, JK_STATUS_SHOW_MEMBER_ROW,
wr->name,
status_worker_type(wr->worker->type),
- a->host, a->port,
- jk_dump_hinfo(&a->worker_inet_addr, buf),
+ aw->host, aw->port,
+ jk_dump_hinfo(&aw->worker_inet_addr, buf),
jk_lb_get_activation(wr, l),
jk_lb_get_state(wr, l),
wr->distance,
wr->lb_factor,
wr->lb_mult,
wr->s->lb_value,
- wr->s->elected,
+ aw->s->used,
wr->s->errors,
- wr->s->client_errors,
- wr->s->reply_timeouts,
- status_strfsize(wr->s->transferred, buf_wr),
- status_strfsize(wr->s->readed, buf_rd),
- wr->s->busy,
- wr->s->max_busy,
+ aw->s->client_errors,
+ aw->s->reply_timeouts,
+ status_strfsize(aw->s->transferred, buf_wr),
+ status_strfsize(aw->s->readed, buf_rd),
+ aw->s->busy,
+ aw->s->max_busy,
wr->route,
wr->redirect ? (*wr->redirect ? wr->redirect : " ") : " ",
wr->domain ? (*wr->domain ? wr->domain : " ") : " ",
@@ -1708,9 +1708,9 @@
jk_print_xml_start_elt(s, w, 6, 0, "member");
jk_print_xml_att_string(s, 8, "name", wr->name);
jk_print_xml_att_string(s, 8, "type", status_worker_type(wr->worker->type));
- jk_print_xml_att_string(s, 8, "host", a->host);
- jk_print_xml_att_int(s, 8, "port", a->port);
- jk_print_xml_att_string(s, 8, "address", jk_dump_hinfo(&a->worker_inet_addr, buf));
+ jk_print_xml_att_string(s, 8, "host", aw->host);
+ jk_print_xml_att_int(s, 8, "port", aw->port);
+ jk_print_xml_att_string(s, 8, "address", jk_dump_hinfo(&aw->worker_inet_addr, buf));
jk_print_xml_att_string(s, 8, "activation", jk_lb_get_activation(wr, l));
jk_print_xml_att_int(s, 8, "lbfactor", wr->lb_factor);
jk_print_xml_att_string(s, 8, "route", wr->route);
@@ -1720,14 +1720,14 @@
jk_print_xml_att_string(s, 8, "state", jk_lb_get_state(wr, l));
jk_print_xml_att_uint64(s, 8, "lbmult", wr->lb_mult);
jk_print_xml_att_uint64(s, 8, "lbvalue", wr->s->lb_value);
- jk_print_xml_att_uint64(s, 8, "elected", wr->s->elected);
+ jk_print_xml_att_uint64(s, 8, "elected", aw->s->used);
jk_print_xml_att_uint32(s, 8, "errors", wr->s->errors);
- jk_print_xml_att_uint32(s, 8, "client_errors", wr->s->client_errors);
- jk_print_xml_att_uint32(s, 8, "reply_timeouts", wr->s->reply_timeouts);
- jk_print_xml_att_uint64(s, 8, "transferred", wr->s->transferred);
- jk_print_xml_att_uint64(s, 8, "read", wr->s->readed);
- jk_print_xml_att_int(s, 8, "busy", wr->s->busy);
- jk_print_xml_att_int(s, 8, "max_busy", wr->s->max_busy);
+ jk_print_xml_att_uint32(s, 8, "client_errors", aw->s->client_errors);
+ jk_print_xml_att_uint32(s, 8, "reply_timeouts", aw->s->reply_timeouts);
+ jk_print_xml_att_uint64(s, 8, "transferred", aw->s->transferred);
+ jk_print_xml_att_uint64(s, 8, "read", aw->s->readed);
+ jk_print_xml_att_int(s, 8, "busy", aw->s->busy);
+ jk_print_xml_att_int(s, 8, "max_busy", aw->s->max_busy);
jk_print_xml_att_int(s, 8, "time_to_recover_min", rs_min);
jk_print_xml_att_int(s, 8, "time_to_recover_max", rs_max);
/* Terminate the tag */
@@ -1739,9 +1739,9 @@
jk_puts(s, "Member:");
jk_printf(s, " name=%s", wr->name);
jk_printf(s, " type=%s", status_worker_type(wr->worker->type));
- jk_printf(s, " host=%s", a->host);
- jk_printf(s, " port=%d", a->port);
- jk_printf(s, " address=%s", jk_dump_hinfo(&a->worker_inet_addr, buf));
+ jk_printf(s, " host=%s", aw->host);
+ jk_printf(s, " port=%d", aw->port);
+ jk_printf(s, " address=%s", jk_dump_hinfo(&aw->worker_inet_addr, buf));
jk_printf(s, " activation=%s", jk_lb_get_activation(wr, l));
jk_printf(s, " lbfactor=%d", wr->lb_factor);
jk_printf(s, " route=\"%s\"", wr->route ? wr->route : "");
@@ -1751,14 +1751,14 @@
jk_printf(s, " state=%s", jk_lb_get_state(wr, l));
jk_printf(s, " lbmult=%" JK_UINT64_T_FMT, wr->lb_mult);
jk_printf(s, " lbvalue=%" JK_UINT64_T_FMT, wr->s->lb_value);
- jk_printf(s, " elected=%" JK_UINT64_T_FMT, wr->s->elected);
+ jk_printf(s, " elected=%" JK_UINT64_T_FMT, aw->s->used);
jk_printf(s, " errors=%" JK_UINT32_T_FMT, wr->s->errors);
- jk_printf(s, " client_errors=%" JK_UINT32_T_FMT, wr->s->client_errors);
- jk_printf(s, " reply_timeouts=%" JK_UINT32_T_FMT, wr->s->reply_timeouts);
- jk_printf(s, " transferred=%" JK_UINT64_T_FMT, wr->s->transferred);
- jk_printf(s, " read=%" JK_UINT64_T_FMT, wr->s->readed);
- jk_printf(s, " busy=%d", wr->s->busy);
- jk_printf(s, " max_busy=%d", wr->s->max_busy);
+ jk_printf(s, " client_errors=%" JK_UINT32_T_FMT, aw->s->client_errors);
+ jk_printf(s, " reply_timeouts=%" JK_UINT32_T_FMT, aw->s->reply_timeouts);
+ jk_printf(s, " transferred=%" JK_UINT64_T_FMT, aw->s->transferred);
+ jk_printf(s, " read=%" JK_UINT64_T_FMT, aw->s->readed);
+ jk_printf(s, " busy=%d", aw->s->busy);
+ jk_printf(s, " max_busy=%d", aw->s->max_busy);
jk_printf(s, " time_to_recover_min=%d", rs_min);
jk_printf(s, " time_to_recover_max=%d", rs_max);
jk_puts(s, "\n");
@@ -1768,9 +1768,9 @@
jk_print_prop_att_string(s, w, name, "balance_workers", wr->name);
jk_print_prop_att_string(s, w, wr->name, "type", status_worker_type(wr->worker->type));
- jk_print_prop_att_string(s, w, wr->name, "host", a->host);
- jk_print_prop_att_int(s, w, wr->name, "port", a->port);
- jk_print_prop_att_string(s, w, wr->name, "address", jk_dump_hinfo(&a->worker_inet_addr, buf));
+ jk_print_prop_att_string(s, w, wr->name, "host", aw->host);
+ jk_print_prop_att_int(s, w, wr->name, "port", aw->port);
+ jk_print_prop_att_string(s, w, wr->name, "address", jk_dump_hinfo(&aw->worker_inet_addr, buf));
jk_print_prop_att_string(s, w, wr->name, "activation", jk_lb_get_activation(wr, l));
jk_print_prop_att_int(s, w, wr->name, "lbfactor", wr->lb_factor);
jk_print_prop_att_string(s, w, wr->name, "route", wr->route);
@@ -1780,14 +1780,14 @@
jk_print_prop_att_string(s, w, wr->name, "state", jk_lb_get_state(wr, l));
jk_print_prop_att_uint64(s, w, wr->name, "lbmult", wr->lb_mult);
jk_print_prop_att_uint64(s, w, wr->name, "lbvalue", wr->s->lb_value);
- jk_print_prop_att_uint64(s, w, wr->name, "elected", wr->s->elected);
+ jk_print_prop_att_uint64(s, w, wr->name, "elected", aw->s->used);
jk_print_prop_att_uint32(s, w, wr->name, "errors", wr->s->errors);
- jk_print_prop_att_uint32(s, w, wr->name, "client_errors", wr->s->client_errors);
- jk_print_prop_att_uint32(s, w, wr->name, "reply_timeouts", wr->s->reply_timeouts);
- jk_print_prop_att_uint64(s, w, wr->name, "transferred", wr->s->transferred);
- jk_print_prop_att_uint64(s, w, wr->name, "read", wr->s->readed);
- jk_print_prop_att_int(s, w, wr->name, "busy", wr->s->busy);
- jk_print_prop_att_int(s, w, wr->name, "max_busy", wr->s->max_busy);
+ jk_print_prop_att_uint32(s, w, wr->name, "client_errors", aw->s->client_errors);
+ jk_print_prop_att_uint32(s, w, wr->name, "reply_timeouts", aw->s->reply_timeouts);
+ jk_print_prop_att_uint64(s, w, wr->name, "transferred", aw->s->transferred);
+ jk_print_prop_att_uint64(s, w, wr->name, "read", aw->s->readed);
+ jk_print_prop_att_int(s, w, wr->name, "busy", aw->s->busy);
+ jk_print_prop_att_int(s, w, wr->name, "max_busy", aw->s->max_busy);
jk_print_prop_att_int(s, w, wr->name, "time_to_recover_min", rs_min);
jk_print_prop_att_int(s, w, wr->name, "time_to_recover_max", rs_max);
@@ -1892,15 +1892,15 @@
}
else if (mime == JK_STATUS_MIME_XML) {
- jk_print_xml_start_elt(s, w, 0, 0, "ajp");
- jk_print_xml_att_string(s, 2, "name", name);
- jk_print_xml_att_string(s, 2, "type", status_worker_type(aw->worker.type));
- jk_print_xml_att_string(s, 2, "host", aw->host);
- jk_print_xml_att_int(s, 2, "port", aw->port);
- jk_print_xml_att_string(s, 2, "address", jk_dump_hinfo(&aw->worker_inet_addr, buf));
- jk_print_xml_att_int(s, 2, "map_count", map_count);
+ jk_print_xml_start_elt(s, w, 2, 0, "ajp");
+ jk_print_xml_att_string(s, 4, "name", name);
+ jk_print_xml_att_string(s, 4, "type", status_worker_type(aw->worker.type));
+ jk_print_xml_att_string(s, 4, "host", aw->host);
+ jk_print_xml_att_int(s, 4, "port", aw->port);
+ jk_print_xml_att_string(s, 4, "address", jk_dump_hinfo(&aw->worker_inet_addr, buf));
+ jk_print_xml_att_int(s, 4, "map_count", map_count);
/* Terminate the tag */
- jk_print_xml_stop_elt(s, 0, 0);
+ jk_print_xml_stop_elt(s, 1, 0);
}
else if (mime == JK_STATUS_MIME_TXT) {
@@ -1928,10 +1928,6 @@
if (name)
display_maps(s, p, name, l);
- if (mime == JK_STATUS_MIME_XML) {
- jk_print_xml_close_elt(s, w, 0, "ajp");
- }
-
JK_TRACE_EXIT(l);
}
@@ -3048,6 +3044,7 @@
jk_worker_t *jw = NULL;
lb_worker_t *lb = NULL;
lb_sub_worker_t *wr = NULL;
+ ajp_worker_t *aw = NULL;
JK_TRACE_ENTER(l);
fetch_worker_and_sub_worker(p, "resetting", &worker, &sub_worker, l);
@@ -3065,16 +3062,17 @@
lb->s->max_busy = 0;
for (i = 0; i < lb->num_of_workers; i++) {
wr = &(lb->lb_workers[i]);
- wr->s->client_errors = 0;
- wr->s->reply_timeouts = 0;
- wr->s->elected = 0;
+ aw = (ajp_worker_t *)wr->worker->worker_private;
+ aw->s->client_errors = 0;
+ aw->s->reply_timeouts = 0;
+ aw->s->used = 0;
wr->s->elected_snapshot = 0;
wr->s->error_time = 0;
wr->s->errors = 0;
wr->s->lb_value = 0;
- wr->s->max_busy = 0;
- wr->s->readed = 0;
- wr->s->transferred = 0;
+ aw->s->max_busy = 0;
+ aw->s->readed = 0;
+ aw->s->transferred = 0;
wr->s->state = JK_LB_STATE_IDLE;
}
JK_TRACE_EXIT(l);
@@ -3085,16 +3083,17 @@
JK_TRACE_EXIT(l);
return JK_FALSE;
}
- wr->s->client_errors = 0;
- wr->s->reply_timeouts = 0;
- wr->s->elected = 0;
+ aw = (ajp_worker_t *)wr->worker->worker_private;
+ aw->s->client_errors = 0;
+ aw->s->reply_timeouts = 0;
+ aw->s->used = 0;
wr->s->elected_snapshot = 0;
wr->s->error_time = 0;
wr->s->errors = 0;
wr->s->lb_value = 0;
- wr->s->max_busy = 0;
- wr->s->readed = 0;
- wr->s->transferred = 0;
+ aw->s->max_busy = 0;
+ aw->s->readed = 0;
+ aw->s->transferred = 0;
wr->s->state = JK_LB_STATE_IDLE;
JK_TRACE_EXIT(l);
return JK_TRUE;
@@ -3127,6 +3126,7 @@
if (wr->s->state == JK_LB_STATE_ERROR) {
lb_worker_t *lb = NULL;
+ ajp_worker_t *aw = (ajp_worker_t *)wr->worker->worker_private;
/* We need an lb to correct the lb_value */
if (check_valid_lb(s, p, jw, worker, &lb, 0, l) == JK_FALSE) {
@@ -3146,7 +3146,7 @@
wr->s->lb_value = curmax;
}
- wr->s->reply_timeouts = 0;
+ aw->s->reply_timeouts = 0;
wr->s->state = JK_LB_STATE_RECOVER;
jk_log(l, JK_LOG_INFO,
"Status worker '%s' marked worker '%s' sub worker '%s' for recovery",
---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscribe@tomcat.apache.org
For additional commands, e-mail: dev-help@tomcat.apache.org