r5673 - in trunk/varnish-cache: bin/varnishd include

phk at varnish-cache.org phk at varnish-cache.org
Mon Jan 3 10:40:10 CET 2011


Author: phk
Date: 2011-01-03 10:40:09 +0100 (Mon, 03 Jan 2011)
New Revision: 5673

Modified:
   trunk/varnish-cache/bin/varnishd/cache_pool.c
   trunk/varnish-cache/bin/varnishd/heritage.h
   trunk/varnish-cache/bin/varnishd/mgt_pool.c
   trunk/varnish-cache/include/vsc_fields.h
Log:
Another rename to reduce confusion:

When there is no idle workerthread available, an incoming requests will
get queued until a thread becomes available.

We called this "overflow" because it overflowed the worker thread pool.

However, "overflow" gives the impression that the request is somehow
lost, which it is not.

"queued" gives a much more precise idea what goes on.



Modified: trunk/varnish-cache/bin/varnishd/cache_pool.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_pool.c	2011-01-03 09:17:47 UTC (rev 5672)
+++ trunk/varnish-cache/bin/varnishd/cache_pool.c	2011-01-03 09:40:09 UTC (rev 5673)
@@ -70,17 +70,17 @@
 #define WQ_MAGIC		0x606658fa
 	struct lock		mtx;
 	struct workerhead	idle;
-	VTAILQ_HEAD(, workreq)	overflow;
+	VTAILQ_HEAD(, workreq)	queue;
 	unsigned		nthr;
-	unsigned		nqueue;
 	unsigned		lqueue;
+	unsigned		last_lqueue;
 	uintmax_t		ndrop;
-	uintmax_t		noverflow;
+	uintmax_t		nqueue;
 };
 
 static struct wq		**wq;
 static unsigned			nwq;
-static unsigned			ovfl_max;
+static unsigned			queue_max;
 static unsigned			nthr_max;
 
 static pthread_cond_t		herder_cond;
@@ -156,11 +156,11 @@
 	while (1) {
 		CHECK_OBJ_NOTNULL(w, WORKER_MAGIC);
 
-		/* Process overflow requests, if any */
-		w->wrq = VTAILQ_FIRST(&qp->overflow);
+		/* Process queued requests, if any */
+		w->wrq = VTAILQ_FIRST(&qp->queue);
 		if (w->wrq != NULL) {
-			VTAILQ_REMOVE(&qp->overflow, w->wrq, list);
-			qp->nqueue--;
+			VTAILQ_REMOVE(&qp->queue, w->wrq, list);
+			qp->lqueue--;
 		} else {
 			if (isnan(w->lastused))
 				w->lastused = TIM_real();
@@ -271,16 +271,16 @@
 		return (0);
 	}
 
-	/* If we have too much in the overflow already, refuse. */
-	if (qp->nqueue > ovfl_max) {
+	/* If we have too much in the queue already, refuse. */
+	if (qp->lqueue > queue_max) {
 		qp->ndrop++;
 		Lck_Unlock(&qp->mtx);
 		return (-1);
 	}
 
-	VTAILQ_INSERT_TAIL(&qp->overflow, wrq, list);
-	qp->noverflow++;
+	VTAILQ_INSERT_TAIL(&qp->queue, wrq, list);
 	qp->nqueue++;
+	qp->lqueue++;
 	Lck_Unlock(&qp->mtx);
 	AZ(pthread_cond_signal(&herder_cond));
 	return (0);
@@ -356,7 +356,7 @@
 		XXXAN(wq[u]);
 		wq[u]->magic = WQ_MAGIC;
 		Lck_New(&wq[u]->mtx, lck_wq);
-		VTAILQ_INIT(&wq[u]->overflow);
+		VTAILQ_INIT(&wq[u]->queue);
 		VTAILQ_INIT(&wq[u]->idle);
 	}
 	(void)owq;	/* XXX: avoid race, leak it. */
@@ -374,9 +374,9 @@
 
 	Lck_Lock(&qp->mtx);
 	vs->n_wrk += qp->nthr;
-	vs->n_wrk_queue += qp->nqueue;
+	vs->n_wrk_lqueue += qp->lqueue;
 	vs->n_wrk_drop += qp->ndrop;
-	vs->n_wrk_overflow += qp->noverflow;
+	vs->n_wrk_queued += qp->nqueue;
 
 	if (qp->nthr > params->wthread_min) {
 		w = VTAILQ_LAST(&qp->idle, workerhead);
@@ -442,21 +442,21 @@
 			u = params->wthread_min;
 		nthr_max = u;
 
-		ovfl_max = (nthr_max * params->overflow_max) / 100;
+		queue_max = (nthr_max * params->queue_max) / 100;
 
 		vs->n_wrk = 0;
-		vs->n_wrk_queue = 0;
+		vs->n_wrk_lqueue = 0;
 		vs->n_wrk_drop = 0;
-		vs->n_wrk_overflow = 0;
+		vs->n_wrk_queued = 0;
 
 		t_idle = TIM_real() - params->wthread_timeout;
 		for (u = 0; u < nwq; u++)
 			wrk_decimate_flock(wq[u], t_idle, vs);
 
 		VSC_main->n_wrk= vs->n_wrk;
-		VSC_main->n_wrk_queue = vs->n_wrk_queue;
+		VSC_main->n_wrk_lqueue = vs->n_wrk_lqueue;
 		VSC_main->n_wrk_drop = vs->n_wrk_drop;
-		VSC_main->n_wrk_overflow = vs->n_wrk_overflow;
+		VSC_main->n_wrk_queued = vs->n_wrk_queued;
 
 		TIM_sleep(params->wthread_purge_delay * 1e-3);
 	}
@@ -477,8 +477,8 @@
 	 * one more thread.
 	 */
 	if (qp->nthr < params->wthread_min ||	/* Not enough threads yet */
-	    (qp->nqueue > params->wthread_add_threshold && /* more needed */
-	    qp->nqueue > qp->lqueue)) {	/* not getting better since last */
+	    (qp->lqueue > params->wthread_add_threshold && /* more needed */
+	    qp->lqueue > qp->last_lqueue)) {	/* not getting better since last */
 		if (qp->nthr >= nthr_max) {
 			VSC_main->n_wrk_max++;
 		} else if (pthread_create(&tp, tp_attr, wrk_thread, qp)) {
@@ -492,11 +492,11 @@
 			TIM_sleep(params->wthread_add_delay * 1e-3);
 		}
 	}
-	qp->lqueue = qp->nqueue;
+	qp->last_lqueue = qp->lqueue;
 }
 
 /*--------------------------------------------------------------------
- * This thread wakes up whenever a pool overflows.
+ * This thread wakes up whenever a pool queues.
  *
  * The trick here is to not be too aggressive about creating threads.
  * We do this by only examining one pool at a time, and by sleeping

Modified: trunk/varnish-cache/bin/varnishd/heritage.h
===================================================================
--- trunk/varnish-cache/bin/varnishd/heritage.h	2011-01-03 09:17:47 UTC (rev 5672)
+++ trunk/varnish-cache/bin/varnishd/heritage.h	2011-01-03 09:40:09 UTC (rev 5673)
@@ -91,7 +91,7 @@
 	unsigned		wthread_stats_rate;
 	unsigned		wthread_stacksize;
 
-	unsigned		overflow_max;
+	unsigned		queue_max;
 
 	/* Memory allocation hints */
 	unsigned		sess_workspace;

Modified: trunk/varnish-cache/bin/varnishd/mgt_pool.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/mgt_pool.c	2011-01-03 09:17:47 UTC (rev 5672)
+++ trunk/varnish-cache/bin/varnishd/mgt_pool.c	2011-01-03 09:40:09 UTC (rev 5673)
@@ -205,8 +205,8 @@
 		"its accumulated stats into the global counters.\n",
 		EXPERIMENTAL,
 		"10", "requests" },
-	{ "overflow_max", tweak_uint, &master.overflow_max, 0, UINT_MAX,
-		"Percentage permitted overflow queue length.\n"
+	{ "queue_max", tweak_uint, &master.queue_max, 0, UINT_MAX,
+		"Percentage permitted queue length.\n"
 		"\n"
 		"This sets the ratio of queued requests to worker threads, "
 		"above which sessions will be dropped instead of queued.\n",

Modified: trunk/varnish-cache/include/vsc_fields.h
===================================================================
--- trunk/varnish-cache/include/vsc_fields.h	2011-01-03 09:17:47 UTC (rev 5672)
+++ trunk/varnish-cache/include/vsc_fields.h	2011-01-03 09:40:09 UTC (rev 5673)
@@ -83,8 +83,8 @@
 VSC_F(n_wrk_failed,	uint64_t, 0, 'a',
 					"N worker threads not created")
 VSC_F(n_wrk_max,		uint64_t, 0, 'a', "N worker threads limited")
-VSC_F(n_wrk_queue,		uint64_t, 0, 'a', "N queued work requests")
-VSC_F(n_wrk_overflow,	uint64_t, 0, 'a', "N overflowed work requests")
+VSC_F(n_wrk_lqueue,		uint64_t, 0, 'a', "work request queue length")
+VSC_F(n_wrk_queued,		uint64_t, 0, 'a', "N queued work requests")
 VSC_F(n_wrk_drop,		uint64_t, 0, 'a', "N dropped work requests")
 VSC_F(n_backend,		uint64_t, 0, 'i', "N backends")
 




More information about the varnish-commit mailing list