[master] 0197fd4 Add two parameters more: One to control gzip level (it was zero until now, nobody seems to have noticed ?) and one to control where we put the temporary allocations for gzip/gunzip.

Poul-Henning Kamp phk at varnish-cache.org
Wed Jan 26 11:48:54 CET 2011


commit 0197fd48c22da43382e2617c5832cfdfc14affba
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date:   Wed Jan 26 10:48:04 2011 +0000

    Add two parameters more:  One to control gzip level (it was zero
    until now, nobody seems to have noticed ?) and one to control where
    we put the temporary allocations for gzip/gunzip.

diff --git a/bin/varnishd/cache.h b/bin/varnishd/cache.h
index 2a5ff93..ead98c1 100644
--- a/bin/varnishd/cache.h
+++ b/bin/varnishd/cache.h
@@ -640,8 +640,8 @@ void Fetch_Init(void);
 struct vgz;
 
 enum vgz_flag { VGZ_NORMAL, VGZ_ALIGN, VGZ_RESET, VGZ_FINISH };
-struct vgz *VGZ_NewUngzip(const struct sess *sp, struct ws *tmp);
-struct vgz *VGZ_NewGzip(const struct sess *sp, struct ws *tmp);
+struct vgz *VGZ_NewUngzip(struct sess *sp);
+struct vgz *VGZ_NewGzip(struct sess *sp);
 void VGZ_Ibuf(struct vgz *, const void *, ssize_t len);
 int VGZ_IbufEmpty(const struct vgz *vg);
 void VGZ_Obuf(struct vgz *, const void *, ssize_t len);
diff --git a/bin/varnishd/cache_esi_deliver.c b/bin/varnishd/cache_esi_deliver.c
index c687dd7..589feeb 100644
--- a/bin/varnishd/cache_esi_deliver.c
+++ b/bin/varnishd/cache_esi_deliver.c
@@ -274,7 +274,7 @@ ESI_Deliver(struct sess *sp)
 	}
 
 	if (isgzip && !sp->wrk->gzip_resp) {
-		vgz = VGZ_NewUngzip(sp, sp->wrk->ws);
+		vgz = VGZ_NewUngzip(sp);
 		obufl = 0;
 	}
 
diff --git a/bin/varnishd/cache_esi_fetch.c b/bin/varnishd/cache_esi_fetch.c
index 499f6b6..840be7b 100644
--- a/bin/varnishd/cache_esi_fetch.c
+++ b/bin/varnishd/cache_esi_fetch.c
@@ -303,24 +303,24 @@ vfp_esi_begin(struct sess *sp, size_t estimate)
 	/* XXX: snapshot WS's ? We'll need the space */
 
 	if (sp->wrk->is_gzip && sp->wrk->do_gunzip) {
-		sp->wrk->vgz_rx = VGZ_NewUngzip(sp, sp->ws);
+		sp->wrk->vgz_rx = VGZ_NewUngzip(sp);
 		VEP_Init(sp, NULL);
 	} else if (sp->wrk->is_gunzip && sp->wrk->do_gzip) {
 		vef = (void*)WS_Alloc(sp->ws, sizeof *vef);
 		AN(vef);
 		memset(vef, 0, sizeof *vef);
 		vef->magic = VEF_MAGIC;
-		vef->vgz = VGZ_NewGzip(sp, sp->ws);
+		vef->vgz = VGZ_NewGzip(sp);
 		AZ(sp->wrk->vef_priv);
 		sp->wrk->vef_priv = vef;
 		VEP_Init(sp, vfp_vep_callback);
 	} else if (sp->wrk->is_gzip) {
-		sp->wrk->vgz_rx = VGZ_NewUngzip(sp, sp->ws);
+		sp->wrk->vgz_rx = VGZ_NewUngzip(sp);
 		vef = (void*)WS_Alloc(sp->ws, sizeof *vef);
 		AN(vef);
 		memset(vef, 0, sizeof *vef);
 		vef->magic = VEF_MAGIC;
-		vef->vgz = VGZ_NewGzip(sp, sp->ws);
+		vef->vgz = VGZ_NewGzip(sp);
 		AZ(sp->wrk->vef_priv);
 		sp->wrk->vef_priv = vef;
 		VEP_Init(sp, vfp_vep_callback);
diff --git a/bin/varnishd/cache_gzip.c b/bin/varnishd/cache_gzip.c
index 4b62d24..4bdd37c 100644
--- a/bin/varnishd/cache_gzip.c
+++ b/bin/varnishd/cache_gzip.c
@@ -113,34 +113,48 @@ vgz_free(voidpf opaque, voidpf address)
  */
 
 static struct vgz *
-vgz_alloc_vgz(struct ws *ws)
+vgz_alloc_vgz(struct sess *sp)
 {
-	char *s;
 	struct vgz *vg;
+	struct ws *ws = sp->wrk->ws;
 
 	WS_Assert(ws);
-	s = WS_Snapshot(ws);
 	vg = (void*)WS_Alloc(ws, sizeof *vg);
 	AN(vg);
 	memset(vg, 0, sizeof *vg);
 	vg->magic = VGZ_MAGIC;
-	vg->tmp = ws;
-	vg->tmp_snapshot = s;
-
-	vg->vz.zalloc = vgz_alloc;
-	vg->vz.zfree = vgz_free;
-	vg->vz.opaque = vg;
 
+	switch (params->gzip_tmp_space) {
+	case 0:
+		/* malloc, the default */
+		break;
+	case 1:
+		vg->tmp = sp->ws;
+		vg->tmp_snapshot = WS_Snapshot(vg->tmp);
+		vg->vz.zalloc = vgz_alloc;
+		vg->vz.zfree = vgz_free;
+		vg->vz.opaque = vg;
+		break;
+	case 2:
+		vg->tmp = sp->wrk->ws;
+		vg->tmp_snapshot = WS_Snapshot(vg->tmp);
+		vg->vz.zalloc = vgz_alloc;
+		vg->vz.zfree = vgz_free;
+		vg->vz.opaque = vg;
+		break;
+	default:
+		assert(0 == __LINE__);
+	} 
 	return (vg);
 }
 
 struct vgz *
-VGZ_NewUngzip(const struct sess *sp, struct ws *tmp)
+VGZ_NewUngzip(struct sess *sp)
 {
 	struct vgz *vg;
 
 	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
-	vg = vgz_alloc_vgz(tmp);
+	vg = vgz_alloc_vgz(sp);
 
 	/*
 	 * Max memory usage according to zonf.h:
@@ -148,23 +162,18 @@ VGZ_NewUngzip(const struct sess *sp, struct ws *tmp)
 	 * Since we don't control windowBits, we have to assume
 	 * it is 15, so 34-35KB or so.
 	 */
-#if 1
-	vg->vz.zalloc = NULL;
-	vg->vz.zfree = NULL;
-	vg->vz.opaque = NULL;
-#endif
 	assert(Z_OK == inflateInit2(&vg->vz, 31));
 	return (vg);
 }
 
 struct vgz *
-VGZ_NewGzip(const struct sess *sp, struct ws *tmp)
+VGZ_NewGzip(struct sess *sp)
 {
 	struct vgz *vg;
 	int i;
 
 	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
-	vg = vgz_alloc_vgz(tmp);
+	vg = vgz_alloc_vgz(sp);
 
 	/*
 	 * From zconf.h:
@@ -181,13 +190,8 @@ VGZ_NewGzip(const struct sess *sp, struct ws *tmp)
 	 * XXX: It may be more efficent to malloc them, rather than have
 	 * XXX: too many worker threads grow the stacks.
 	 */
-#if 1
-	vg->vz.zalloc = NULL;
-	vg->vz.zfree = NULL;
-	vg->vz.opaque = NULL;
-#endif
 	i = deflateInit2(&vg->vz,
-	    0,				/* Level */
+	    params->gzip_level,		/* Level */
 	    Z_DEFLATED,			/* Method */
 	    16 + 8,			/* Window bits (16=gzip + 15) */
 	    1,				/* memLevel */
@@ -345,7 +349,8 @@ VGZ_Destroy(struct vgz **vg)
 {
 
 	CHECK_OBJ_NOTNULL(*vg, VGZ_MAGIC);
-	WS_Reset((*vg)->tmp, (*vg)->tmp_snapshot);
+	if ((*vg)->tmp != NULL) 
+		WS_Reset((*vg)->tmp, (*vg)->tmp_snapshot);
 	*vg = NULL;
 }
 
@@ -359,7 +364,7 @@ static void __match_proto__()
 vfp_gunzip_begin(struct sess *sp, size_t estimate)
 {
 	(void)estimate;
-	sp->wrk->vgz_rx = VGZ_NewUngzip(sp, sp->ws);
+	sp->wrk->vgz_rx = VGZ_NewUngzip(sp);
 }
 
 static int __match_proto__()
@@ -428,7 +433,7 @@ vfp_gzip_begin(struct sess *sp, size_t estimate)
 {
 	(void)estimate;
 
-	sp->wrk->vgz_rx = VGZ_NewGzip(sp, sp->ws);
+	sp->wrk->vgz_rx = VGZ_NewGzip(sp);
 }
 
 static int __match_proto__()
@@ -504,7 +509,7 @@ static void __match_proto__()
 vfp_testgzip_begin(struct sess *sp, size_t estimate)
 {
 	(void)estimate;
-	sp->wrk->vgz_rx = VGZ_NewUngzip(sp, sp->ws);
+	sp->wrk->vgz_rx = VGZ_NewUngzip(sp);
 }
 
 static int __match_proto__()
diff --git a/bin/varnishd/cache_response.c b/bin/varnishd/cache_response.c
index 7065e44..7d44eff 100644
--- a/bin/varnishd/cache_response.c
+++ b/bin/varnishd/cache_response.c
@@ -253,7 +253,7 @@ res_WriteGunzipObj(struct sess *sp)
 
 	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
 
-	vg = VGZ_NewUngzip(sp, sp->wrk->ws);
+	vg = VGZ_NewUngzip(sp);
 
 	VTAILQ_FOREACH(st, &sp->obj->store, list) {
 		CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
diff --git a/bin/varnishd/heritage.h b/bin/varnishd/heritage.h
index 8eab742..9c5a328 100644
--- a/bin/varnishd/heritage.h
+++ b/bin/varnishd/heritage.h
@@ -201,6 +201,8 @@ struct params {
 
 	unsigned		http_gzip_support;
 	unsigned		gzip_stack_buffer;
+	unsigned		gzip_tmp_space;
+	unsigned		gzip_level;
 
 	double			critbit_cooloff;
 };
diff --git a/bin/varnishd/mgt_param.c b/bin/varnishd/mgt_param.c
index e2151ad..f46d3ca 100644
--- a/bin/varnishd/mgt_param.c
+++ b/bin/varnishd/mgt_param.c
@@ -817,9 +817,29 @@ static const struct parspec input_parspec[] = {
 		"Enable support for HTTP GZIP compression.\n",
 		EXPERIMENTAL,
 		"on", "bool" },
+	{ "gzip_tmp_space", tweak_uint, &master.gzip_tmp_space, 0, 2,
+		"Where temporary space for gzip/gunzip is allocated.\n"
+		"  0 - malloc\n"
+		"  1 - session workspace\n"
+		"  2 - thread workspace\n"
+		"If you have much gzip/gunzip activity, it may be an"
+		" advantage to use workspace for these allocations to reduce"
+		" malloc activity.  Be aware that gzip needs 256+KB and gunzip"
+		" needs 32+KB of workspace (64+KB if ESI processing).",
+		EXPERIMENTAL,
+		"0", "" },
+	{ "gzip_level", tweak_uint, &master.gzip_level, 0, 9,
+		"Gzip compression level: 0=debug, 1=fast, 9=best",
+		0,
+		"6", ""},
 	{ "gzip_stack_buffer", tweak_uint, &master.gzip_stack_buffer,
 	        2048, UINT_MAX,
-		"Size of stack buffer used for gzip processing.\n",
+		"Size of stack buffer used for gzip processing.\n"
+		"The stack buffers are used for in-transit data,"
+		" for instance gunzip'ed data being sent to a client."
+		"Making this space to small results in more overhead,"
+		" writes to sockets etc, making it too big is probably"
+		" just a waste of memory.",
 		EXPERIMENTAL,
 		"32768", "Bytes" },
 	{ "critbit_cooloff", tweak_timeout_double,
diff --git a/bin/varnishtest/tests/e00020.vtc b/bin/varnishtest/tests/e00020.vtc
index 3fe9239..b654cf1 100644
--- a/bin/varnishtest/tests/e00020.vtc
+++ b/bin/varnishtest/tests/e00020.vtc
@@ -24,6 +24,7 @@ varnish v1 -vcl+backend {
 
 varnish v1 -cliok "param.set esi_syntax 4"
 varnish v1 -cliok "param.set http_gzip_support true"
+varnish v1 -cliok "param.set gzip_tmp_space 2"
 
 client c1 {
 	txreq 
diff --git a/bin/varnishtest/tests/e00022.vtc b/bin/varnishtest/tests/e00022.vtc
index 5634797..871b2c0 100644
--- a/bin/varnishtest/tests/e00022.vtc
+++ b/bin/varnishtest/tests/e00022.vtc
@@ -27,6 +27,7 @@ varnish v1 -vcl+backend {
 
 varnish v1 -cliok "param.set esi_syntax 0xc"
 varnish v1 -cliok "param.set http_gzip_support true"
+varnish v1 -cliok "param.set gzip_tmp_space 1"
 
 client c1 {
 	txreq  -hdr "Accept-Encoding: gzip"



More information about the varnish-commit mailing list