source: bin/varnishd/cache_expire.c @ e22de5

Revision e22de5, 7.8 KB checked in by Dag Erling Smørgrav <des@…>, 7 years ago (diff)

Use CHECK_OBJ_ORNULL().

git-svn-id:  http://www.varnish-cache.org/svn/trunk/varnish-cache@2050 d4fa192b-c00b-0410-8231-f00ffab90ce4

  • Property mode set to 100644
Line 
1/*-
2 * Copyright (c) 2006 Verdens Gang AS
3 * Copyright (c) 2006-2007 Linpro AS
4 * All rights reserved.
5 *
6 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $Id$
30 *
31 * Expiry of cached objects and execution of prefetcher
32 *
33 * XXX: Objects can linger on deathrow as long as a slow client
34 * XXX: tickles data away from it.  With many slow clients this could
35 * XXX: possibly make deathrow very long and make the hangman waste
36 * XXX: time.  The solution is to have another queue for such "pending
37 * XXX: cases" and have HSH_Deref() move them to deathrow when they
38 * XXX: are ready.
39 */
40
41#include <unistd.h>
42#include <stdio.h>
43#include <string.h>
44
45#include "shmlog.h"
46#include "binary_heap.h"
47#include "cache.h"
48#include "heritage.h"
49
50static pthread_t exp_thread;
51static struct binheap *exp_heap;
52static MTX exp_mtx;
53static unsigned expearly = 30;
54static VTAILQ_HEAD(,object) exp_deathrow = VTAILQ_HEAD_INITIALIZER(exp_deathrow);
55static VTAILQ_HEAD(,object) exp_lru = VTAILQ_HEAD_INITIALIZER(exp_lru);
56
57/*
58 * This is a magic marker for the objects currently on the SIOP [look it up]
59 * so that other users of the object will not stumble trying to change the
60 * ttl or lru position.
61 */
62static const unsigned lru_target = (unsigned)(-3);
63
64/*--------------------------------------------------------------------*/
65
66void
67EXP_Insert(struct object *o)
68{
69
70        CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
71        assert(o->heap_idx == 0);
72        LOCK(&exp_mtx);
73        binheap_insert(exp_heap, o);
74        VTAILQ_INSERT_TAIL(&exp_lru, o, deathrow);
75        UNLOCK(&exp_mtx);
76}
77
78void
79EXP_Touch(struct object *o, double now)
80{
81
82        CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
83        if (o->lru_stamp + params->lru_timeout < now) {
84                LOCK(&exp_mtx); /* XXX: should be ..._TRY */
85                if (o->heap_idx != lru_target && o->heap_idx != 0) {
86                        VTAILQ_REMOVE(&exp_lru, o, deathrow);
87                        VTAILQ_INSERT_TAIL(&exp_lru, o, deathrow);
88                        o->lru_stamp = now;
89                }
90                UNLOCK(&exp_mtx);
91        }
92}
93
94void
95EXP_TTLchange(struct object *o)
96{
97
98        LOCK(&exp_mtx);
99        if (o->heap_idx != lru_target) {
100                assert(o->heap_idx != 0);
101                binheap_delete(exp_heap, o->heap_idx);
102                binheap_insert(exp_heap, o);
103        }
104        UNLOCK(&exp_mtx);
105}
106
107/*--------------------------------------------------------------------
108 * This thread monitors deathrow and kills objects when they time out.
109 */
110
111static void *
112exp_hangman(void *arg)
113{
114        struct object *o;
115        double t;
116
117        (void)arg;
118
119        t = TIM_real();
120        while (1) {
121                LOCK(&exp_mtx);
122                VTAILQ_FOREACH(o, &exp_deathrow, deathrow) {
123                        CHECK_OBJ(o, OBJECT_MAGIC);
124                        if (o->ttl >= t) {
125                                o = NULL;
126                                break;
127                        }
128                        if (o->busy) {
129                                VSL(SLT_Debug, 0,
130                                    "Grim Reaper: Busy object xid %u", o->xid);
131                                continue;
132                        }
133                        if (o->refcnt == 1)
134                                break;
135                }
136                if (o == NULL) {
137                        UNLOCK(&exp_mtx);
138                        AZ(sleep(1));
139                        t = TIM_real();
140                        continue;
141                }
142                VTAILQ_REMOVE(&exp_deathrow, o, deathrow);
143                VSL_stats->n_deathrow--;
144                VSL_stats->n_expired++;
145                UNLOCK(&exp_mtx);
146                VSL(SLT_ExpKill, 0, "%u %d", o->xid, (int)(o->ttl - t));
147                HSH_Deref(o);
148        }
149}
150
151/*--------------------------------------------------------------------
152 * This thread monitors the root of the binary heap and whenever an
153 * object gets close enough, VCL is asked to decide if it should be
154 * discarded or prefetched.
155 * If discarded, the object is put on deathrow where exp_hangman() will
156 * do what needs to be done.
157 * XXX: If prefetched pass to the pool for pickup.
158 */
159
160static void *
161exp_prefetch(void *arg)
162{
163        struct worker ww;
164        struct object *o;
165        double t;
166        struct sess *sp;
167        struct object *o2;
168
169        (void)arg;
170
171        sp = SES_New(NULL, 0);
172        XXXAN(sp);
173        sp->wrk = &ww;
174        ww.magic = WORKER_MAGIC;
175        ww.wlp = ww.wlog;
176        ww.wle = ww.wlog + sizeof ww.wlog;
177
178        AZ(sleep(10));          /* XXX: Takes time for VCL to arrive */
179        VCL_Get(&sp->vcl);
180        t = TIM_real();
181        while (1) {
182                LOCK(&exp_mtx);
183                o = binheap_root(exp_heap);
184                CHECK_OBJ_ORNULL(o, OBJECT_MAGIC);
185                if (o == NULL || o->ttl > t + expearly) {
186                        UNLOCK(&exp_mtx);
187                        AZ(sleep(1));
188                        VCL_Refresh(&sp->vcl);
189                        t = TIM_real();
190                        continue;
191                }
192                binheap_delete(exp_heap, o->heap_idx);
193                assert(o->heap_idx == 0);
194
195                /* Sanity check */
196                o2 = binheap_root(exp_heap);
197                if (o2 != NULL)
198                        assert(o2->ttl >= o->ttl);
199
200                UNLOCK(&exp_mtx);
201                WSL(&ww, SLT_ExpPick, 0, "%u", o->xid);
202
203                sp->obj = o;
204                VCL_timeout_method(sp);
205
206                if (sp->handling == VCL_RET_DISCARD) {
207                        LOCK(&exp_mtx);
208                        VTAILQ_REMOVE(&exp_lru, o, deathrow);
209                        VTAILQ_INSERT_TAIL(&exp_deathrow, o, deathrow);
210                        VSL_stats->n_deathrow++;
211                        UNLOCK(&exp_mtx);
212                        continue;
213                }
214                assert(sp->handling == VCL_RET_DISCARD);
215        }
216}
217
218/*--------------------------------------------------------------------*/
219
220static int
221object_cmp(void *priv, void *a, void *b)
222{
223        struct object *aa, *bb;
224
225        (void)priv;
226
227        aa = a;
228        bb = b;
229        return (aa->ttl < bb->ttl);
230}
231
232static void
233object_update(void *priv, void *p, unsigned u)
234{
235        struct object *o = p;
236
237        (void)priv;
238        o->heap_idx = u;
239}
240
241/*--------------------------------------------------------------------
242 * Attempt to make space by nuking, with VCLs permission, the oldest
243 * object on the LRU list which isn't in use.
244 * Returns: 1: did, 0: didn't, -1: can't
245 */
246
247int
248EXP_NukeOne(struct sess *sp)
249{
250        struct object *o, *o2;
251
252        /* Find the first currently unused object on the LRU */
253        LOCK(&exp_mtx);
254        VTAILQ_FOREACH(o, &exp_lru, deathrow)
255                if (o->refcnt == 1)
256                        break;
257        if (o != NULL) {
258                /*
259                 * Take it off the binheap while we chew.  This effectively
260                 * means that we own the EXP refcnt on this object.
261                 */
262                VTAILQ_REMOVE(&exp_lru, o, deathrow);
263                binheap_delete(exp_heap, o->heap_idx);
264                assert(o->heap_idx == 0);
265                o->heap_idx = lru_target;
266                VSL_stats->n_lru_nuked++;       /* May be premature */
267        }
268        UNLOCK(&exp_mtx);
269
270        if (o == NULL)
271                return (-1);
272
273        /*
274         * Ask VCL in the context of the requestors session, in order to
275         * allow client QoS considerations to inform the decision.
276         * Temporarily substitute the object we want to nuke for the sessions
277         * own object.
278         */
279        o2 = sp->obj;
280        sp->obj = o;
281        VCL_discard_method(sp);
282        sp->obj = o2;
283
284        if (sp->handling == VCL_RET_DISCARD) {
285                VSL(SLT_ExpKill, 0, "%u LRU", o->xid);
286                HSH_Deref(o);
287                return (1);
288        }
289
290        assert(sp->handling == VCL_RET_KEEP);
291
292        /* Insert in binheap and lru again */
293        LOCK(&exp_mtx);
294        VSL_stats->n_lru_nuked--;               /* It was premature */
295        VSL_stats->n_lru_saved++;
296        o->heap_idx = 0;
297        o->lru_stamp = sp->wrk->used;
298        binheap_insert(exp_heap, o);
299        VTAILQ_INSERT_TAIL(&exp_lru, o, deathrow);
300        UNLOCK(&exp_mtx);
301        return (0);
302}
303
304/*--------------------------------------------------------------------*/
305
306void
307EXP_Init(void)
308{
309
310        MTX_INIT(&exp_mtx);
311        exp_heap = binheap_new(NULL, object_cmp, object_update);
312        XXXAN(exp_heap);
313        AZ(pthread_create(&exp_thread, NULL, exp_prefetch, NULL));
314        AZ(pthread_create(&exp_thread, NULL, exp_hangman, NULL));
315}
Note: See TracBrowser for help on using the repository browser.