r4958 - trunk/varnish-cache/bin/varnishd

phk at varnish-cache.org phk at varnish-cache.org
Wed Jun 16 11:51:06 CEST 2010


Author: phk
Date: 2010-06-16 11:51:06 +0200 (Wed, 16 Jun 2010)
New Revision: 4958

Added:
   trunk/varnish-cache/bin/varnishd/vsm.c
Modified:
   trunk/varnish-cache/bin/varnishd/Makefile.am
   trunk/varnish-cache/bin/varnishd/cache_shmlog.c
   trunk/varnish-cache/bin/varnishd/common.h
   trunk/varnish-cache/bin/varnishd/mgt_shmem.c
Log:
VSM allocations/frees need to happen from both manager and child process
start abstracting this stuff to a common file (vsm.c).



Modified: trunk/varnish-cache/bin/varnishd/Makefile.am
===================================================================
--- trunk/varnish-cache/bin/varnishd/Makefile.am	2010-06-16 08:44:00 UTC (rev 4957)
+++ trunk/varnish-cache/bin/varnishd/Makefile.am	2010-06-16 09:51:06 UTC (rev 4958)
@@ -57,7 +57,8 @@
 	storage_synth.c \
 	storage_umem.c \
 	stevedore_utils.c \
-	varnishd.c
+	varnishd.c \
+	vsm.c
 
 noinst_HEADERS = \
 	acct_fields.h \

Modified: trunk/varnish-cache/bin/varnishd/cache_shmlog.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_shmlog.c	2010-06-16 08:44:00 UTC (rev 4957)
+++ trunk/varnish-cache/bin/varnishd/cache_shmlog.c	2010-06-16 09:51:06 UTC (rev 4958)
@@ -42,6 +42,10 @@
 
 static pthread_mutex_t vsl_mtx;
 
+static uint32_t			*vsl_start;
+static uint32_t			*vsl_end;
+static uint32_t			*vsl_ptr;
+
 static inline uint32_t
 vsl_w0(uint32_t type, uint32_t length)
 {
@@ -69,13 +73,17 @@
 vsl_wrap(void)
 {
 
-	vsl_log_start[1] = VSL_ENDMARKER;
+	assert(vsl_ptr >= vsl_start + 1);
+	assert(vsl_ptr < vsl_end);
+	vsl_start[1] = VSL_ENDMARKER;
 	do
-		vsl_log_start[0]++;
-	while (vsl_log_start[0] == 0);
+		vsl_start[0]++;
+	while (vsl_start[0] == 0);
 	VWMB();
-	*vsl_log_nxt = VSL_WRAPMARKER;
-	vsl_log_nxt = vsl_log_start + 1;
+	if (vsl_ptr != vsl_start + 1) {
+		*vsl_ptr = VSL_WRAPMARKER;
+		vsl_ptr = vsl_start + 1;
+	}
 	VSL_stats->shm_cycles++;
 }
 
@@ -92,24 +100,24 @@
 		AZ(pthread_mutex_lock(&vsl_mtx));
 		VSL_stats->shm_cont++;
 	}
-	assert(vsl_log_nxt < vsl_log_end);
-	assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
+	assert(vsl_ptr < vsl_end);
+	assert(((uintptr_t)vsl_ptr & 0x3) == 0);
 
 	VSL_stats->shm_writes++;
 	VSL_stats->shm_flushes += flushes;
 	VSL_stats->shm_records += records;
 
 	/* Wrap if necessary */
-	if (VSL_END(vsl_log_nxt, len) >= vsl_log_end)
+	if (VSL_END(vsl_ptr, len) >= vsl_end)
 		vsl_wrap();
 
-	p = vsl_log_nxt;
-	vsl_log_nxt = VSL_END(vsl_log_nxt, len);
+	p = vsl_ptr;
+	vsl_ptr = VSL_END(vsl_ptr, len);
 
-	*vsl_log_nxt = VSL_ENDMARKER;
+	*vsl_ptr = VSL_ENDMARKER;
 
-	assert(vsl_log_nxt < vsl_log_end);
-	assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
+	assert(vsl_ptr < vsl_end);
+	assert(((uintptr_t)vsl_ptr & 0x3) == 0);
 	AZ(pthread_mutex_unlock(&vsl_mtx));
 
 	return (p);
@@ -264,8 +272,18 @@
 void
 VSL_Init(void)
 {
+	struct vsm_chunk *vsc;
 
 	AZ(pthread_mutex_init(&vsl_mtx, NULL));
+
+	VSM_ITER(vsc)
+		if (!strcmp(vsc->class, VSL_CLASS))
+			break;
+	AN(vsc);
+	vsl_start = VSM_PTR(vsc);
+	vsl_end = VSM_NEXT(vsc);
+	vsl_ptr = vsl_start + 1;
+
 	vsl_wrap();
 	loghead->starttime = (intmax_t)TIM_real();
 	loghead->panicstr[0] = '\0';

Modified: trunk/varnish-cache/bin/varnishd/common.h
===================================================================
--- trunk/varnish-cache/bin/varnishd/common.h	2010-06-16 08:44:00 UTC (rev 4957)
+++ trunk/varnish-cache/bin/varnishd/common.h	2010-06-16 09:51:06 UTC (rev 4958)
@@ -42,9 +42,6 @@
 void *mgt_SHM_Alloc(unsigned size, const char *class, const char *type, const char *ident);
 extern struct vsc_main *VSL_stats;
 extern struct vsm_head *loghead;
-extern uint32_t			*vsl_log_start;
-extern uint32_t			*vsl_log_end;
-extern uint32_t			*vsl_log_nxt;
 
 /* varnishd.c */
 struct vsb;
@@ -72,16 +69,11 @@
 
 #define NEEDLESS_RETURN(foo)	return (foo)
 
-/**********************************************************************
- * Guess what:  There is no POSIX standard for memory barriers.
- * XXX: Please try to find the minimal #ifdef to use here, rely on OS
- * supplied facilities if at all possible, to avoid descending into the
- * full cpu/compiler explosion.
- */
+/* vsm.c */
+extern struct vsm_head		*vsm_head;
+extern void			*vsm_end;
 
-#ifdef __FreeBSD__
-#include <machine/atomic.h>
-#define MEMORY_BARRIER()       mb()
-#else
-#define MEMORY_BARRIER()       close(-1)
-#endif
+struct vsm_chunk *vsm_iter_0(void);
+void vsm_iter_n(struct vsm_chunk **pp);
+
+#define VSM_ITER(vd) for ((vd) = vsm_iter_0(); (vd) != NULL; vsm_iter_n(&vd))

Modified: trunk/varnish-cache/bin/varnishd/mgt_shmem.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/mgt_shmem.c	2010-06-16 08:44:00 UTC (rev 4957)
+++ trunk/varnish-cache/bin/varnishd/mgt_shmem.c	2010-06-16 09:51:06 UTC (rev 4958)
@@ -115,9 +115,6 @@
 
 struct vsc_main	*VSL_stats;
 struct vsm_head	*loghead;
-uint32_t		*vsl_log_start;
-uint32_t		*vsl_log_end;
-uint32_t		*vsl_log_nxt;
 
 static int vsl_fd = -1;
 
@@ -260,6 +257,7 @@
 	const char *q;
 	uintmax_t size, s1, s2, ps;
 	char **av, **ap;
+	uint32_t *vsl_log_start;
 
 	if (l_arg == NULL)
 		l_arg = "";
@@ -341,6 +339,9 @@
 	bprintf(loghead->head.class, "%s", "Free");
 	VWMB();
 
+	vsm_head = loghead;
+	vsm_end = (uint8_t*)loghead + size;
+
 	VSL_stats = mgt_SHM_Alloc(sizeof *VSL_stats,
 	    VSC_CLASS, VSC_TYPE_MAIN, "");
 	AN(VSL_stats);
@@ -352,9 +353,7 @@
 
 	vsl_log_start = mgt_SHM_Alloc(s1, VSL_CLASS, "", "");
 	AN(vsl_log_start);
-	vsl_log_end = (void*)((uint8_t *)vsl_log_start + s1);
-	vsl_log_nxt = vsl_log_start + 1;
-	*vsl_log_nxt = VSL_ENDMARKER;
+	vsl_log_start[1] = VSL_ENDMARKER;
 	VWMB();
 
 	do

Copied: trunk/varnish-cache/bin/varnishd/vsm.c (from rev 4957, trunk/varnish-cache/bin/varnishd/mgt_shmem.c)
===================================================================
--- trunk/varnish-cache/bin/varnishd/vsm.c	                        (rev 0)
+++ trunk/varnish-cache/bin/varnishd/vsm.c	2010-06-16 09:51:06 UTC (rev 4958)
@@ -0,0 +1,70 @@
+/*-
+ * Copyright (c) 2010 Redpill Linpro AS
+ * All rights reserved.
+ *
+ * Author: Poul-Henning Kamp <phk at phk.freebsd.dk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * VSM stuff common to manager and child.
+ *
+ */
+
+#include "config.h"
+
+#include "svnid.h"
+SVNID("$Id$")
+
+#include <unistd.h>
+
+#include "miniobj.h"
+#include "libvarnish.h"
+#include "common.h"
+#include "vsm.h"
+
+struct vsm_head		*vsm_head;
+void			*vsm_end;
+
+/*--------------------------------------------------------------------*/
+
+struct vsm_chunk *
+vsm_iter_0(void)
+{
+
+	CHECK_OBJ_NOTNULL(vsm_head, VSM_HEAD_MAGIC);
+	CHECK_OBJ_NOTNULL(&vsm_head->head, VSM_CHUNK_MAGIC);
+	return (&vsm_head->head);
+}
+
+void
+vsm_iter_n(struct vsm_chunk **pp)
+{
+
+	CHECK_OBJ_NOTNULL(vsm_head, VSM_HEAD_MAGIC);
+	CHECK_OBJ_NOTNULL(*pp, VSM_CHUNK_MAGIC);
+	*pp = VSM_NEXT(*pp);
+	if ((void*)(*pp) >= vsm_end) {
+		*pp = NULL;
+		return;
+	}
+	CHECK_OBJ_NOTNULL(*pp, VSM_CHUNK_MAGIC);
+}




More information about the varnish-commit mailing list