xref: /illumos-gate/usr/src/cmd/sendmail/db/mp/mp_sync.c (revision 7c478bd9)
1*7c478bd9Sstevel@tonic-gate /*-
2*7c478bd9Sstevel@tonic-gate  * See the file LICENSE for redistribution information.
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * Copyright (c) 1996, 1997, 1998
5*7c478bd9Sstevel@tonic-gate  *	Sleepycat Software.  All rights reserved.
6*7c478bd9Sstevel@tonic-gate  */
7*7c478bd9Sstevel@tonic-gate #include "config.h"
8*7c478bd9Sstevel@tonic-gate 
9*7c478bd9Sstevel@tonic-gate #ifndef lint
10*7c478bd9Sstevel@tonic-gate static const char sccsid[] = "@(#)mp_sync.c	10.31 (Sleepycat) 12/11/98";
11*7c478bd9Sstevel@tonic-gate #endif /* not lint */
12*7c478bd9Sstevel@tonic-gate 
13*7c478bd9Sstevel@tonic-gate #ifndef NO_SYSTEM_INCLUDES
14*7c478bd9Sstevel@tonic-gate #include <sys/types.h>
15*7c478bd9Sstevel@tonic-gate 
16*7c478bd9Sstevel@tonic-gate #include <errno.h>
17*7c478bd9Sstevel@tonic-gate #include <stdlib.h>
18*7c478bd9Sstevel@tonic-gate #endif
19*7c478bd9Sstevel@tonic-gate 
20*7c478bd9Sstevel@tonic-gate #include "db_int.h"
21*7c478bd9Sstevel@tonic-gate #include "shqueue.h"
22*7c478bd9Sstevel@tonic-gate #include "db_shash.h"
23*7c478bd9Sstevel@tonic-gate #include "mp.h"
24*7c478bd9Sstevel@tonic-gate #include "common_ext.h"
25*7c478bd9Sstevel@tonic-gate 
26*7c478bd9Sstevel@tonic-gate static int __bhcmp __P((const void *, const void *));
27*7c478bd9Sstevel@tonic-gate static int __memp_fsync __P((DB_MPOOLFILE *));
28*7c478bd9Sstevel@tonic-gate 
29*7c478bd9Sstevel@tonic-gate /*
30*7c478bd9Sstevel@tonic-gate  * memp_sync --
31*7c478bd9Sstevel@tonic-gate  *	Mpool sync function.
32*7c478bd9Sstevel@tonic-gate  */
33*7c478bd9Sstevel@tonic-gate int
memp_sync(dbmp,lsnp)34*7c478bd9Sstevel@tonic-gate memp_sync(dbmp, lsnp)
35*7c478bd9Sstevel@tonic-gate 	DB_MPOOL *dbmp;
36*7c478bd9Sstevel@tonic-gate 	DB_LSN *lsnp;
37*7c478bd9Sstevel@tonic-gate {
38*7c478bd9Sstevel@tonic-gate 	BH *bhp, **bharray;
39*7c478bd9Sstevel@tonic-gate 	DB_ENV *dbenv;
40*7c478bd9Sstevel@tonic-gate 	MPOOL *mp;
41*7c478bd9Sstevel@tonic-gate 	MPOOLFILE *mfp;
42*7c478bd9Sstevel@tonic-gate 	int ar_cnt, nalloc, next, maxpin, ret, wrote;
43*7c478bd9Sstevel@tonic-gate 
44*7c478bd9Sstevel@tonic-gate 	MP_PANIC_CHECK(dbmp);
45*7c478bd9Sstevel@tonic-gate 
46*7c478bd9Sstevel@tonic-gate 	dbenv = dbmp->dbenv;
47*7c478bd9Sstevel@tonic-gate 	mp = dbmp->mp;
48*7c478bd9Sstevel@tonic-gate 
49*7c478bd9Sstevel@tonic-gate 	if (dbenv->lg_info == NULL) {
50*7c478bd9Sstevel@tonic-gate 		__db_err(dbenv, "memp_sync: requires logging");
51*7c478bd9Sstevel@tonic-gate 		return (EINVAL);
52*7c478bd9Sstevel@tonic-gate 	}
53*7c478bd9Sstevel@tonic-gate 
54*7c478bd9Sstevel@tonic-gate 	/*
55*7c478bd9Sstevel@tonic-gate 	 * We try and write the buffers in page order: it should reduce seeks
56*7c478bd9Sstevel@tonic-gate 	 * by the underlying filesystem and possibly reduce the actual number
57*7c478bd9Sstevel@tonic-gate 	 * of writes.  We don't want to hold the region lock while we write
58*7c478bd9Sstevel@tonic-gate 	 * the buffers, so only hold it lock while we create a list.  Get a
59*7c478bd9Sstevel@tonic-gate 	 * good-size block of memory to hold buffer pointers, we don't want
60*7c478bd9Sstevel@tonic-gate 	 * to run out.
61*7c478bd9Sstevel@tonic-gate 	 */
62*7c478bd9Sstevel@tonic-gate 	LOCKREGION(dbmp);
63*7c478bd9Sstevel@tonic-gate 	nalloc = mp->stat.st_page_dirty + mp->stat.st_page_dirty / 2 + 10;
64*7c478bd9Sstevel@tonic-gate 	UNLOCKREGION(dbmp);
65*7c478bd9Sstevel@tonic-gate 
66*7c478bd9Sstevel@tonic-gate 	if ((ret = __os_malloc(nalloc * sizeof(BH *), NULL, &bharray)) != 0)
67*7c478bd9Sstevel@tonic-gate 		return (ret);
68*7c478bd9Sstevel@tonic-gate 
69*7c478bd9Sstevel@tonic-gate 	LOCKREGION(dbmp);
70*7c478bd9Sstevel@tonic-gate 
71*7c478bd9Sstevel@tonic-gate 	/*
72*7c478bd9Sstevel@tonic-gate 	 * If the application is asking about a previous call to memp_sync(),
73*7c478bd9Sstevel@tonic-gate 	 * and we haven't found any buffers that the application holding the
74*7c478bd9Sstevel@tonic-gate 	 * pin couldn't write, return yes or no based on the current count.
75*7c478bd9Sstevel@tonic-gate 	 * Note, if the application is asking about a LSN *smaller* than one
76*7c478bd9Sstevel@tonic-gate 	 * we've already handled or are currently handling, then we return a
77*7c478bd9Sstevel@tonic-gate 	 * result based on the count for the larger LSN.
78*7c478bd9Sstevel@tonic-gate 	 */
79*7c478bd9Sstevel@tonic-gate 	if (!F_ISSET(mp, MP_LSN_RETRY) && log_compare(lsnp, &mp->lsn) <= 0) {
80*7c478bd9Sstevel@tonic-gate 		if (mp->lsn_cnt == 0) {
81*7c478bd9Sstevel@tonic-gate 			*lsnp = mp->lsn;
82*7c478bd9Sstevel@tonic-gate 			ret = 0;
83*7c478bd9Sstevel@tonic-gate 		} else
84*7c478bd9Sstevel@tonic-gate 			ret = DB_INCOMPLETE;
85*7c478bd9Sstevel@tonic-gate 		goto done;
86*7c478bd9Sstevel@tonic-gate 	}
87*7c478bd9Sstevel@tonic-gate 
88*7c478bd9Sstevel@tonic-gate 	/* Else, it's a new checkpoint. */
89*7c478bd9Sstevel@tonic-gate 	F_CLR(mp, MP_LSN_RETRY);
90*7c478bd9Sstevel@tonic-gate 
91*7c478bd9Sstevel@tonic-gate 	/*
92*7c478bd9Sstevel@tonic-gate 	 * Save the LSN.  We know that it's a new LSN or larger than the one
93*7c478bd9Sstevel@tonic-gate 	 * for which we were already doing a checkpoint.  (BTW, I don't expect
94*7c478bd9Sstevel@tonic-gate 	 * to see multiple LSN's from the same or multiple processes, but You
95*7c478bd9Sstevel@tonic-gate 	 * Just Never Know.  Responding as if they all called with the largest
96*7c478bd9Sstevel@tonic-gate 	 * of the LSNs specified makes everything work.)
97*7c478bd9Sstevel@tonic-gate 	 *
98*7c478bd9Sstevel@tonic-gate 	 * We don't currently use the LSN we save.  We could potentially save
99*7c478bd9Sstevel@tonic-gate 	 * the last-written LSN in each buffer header and use it to determine
100*7c478bd9Sstevel@tonic-gate 	 * what buffers need to be written.  The problem with this is that it's
101*7c478bd9Sstevel@tonic-gate 	 * sizeof(LSN) more bytes of buffer header.  We currently write all the
102*7c478bd9Sstevel@tonic-gate 	 * dirty buffers instead.
103*7c478bd9Sstevel@tonic-gate 	 *
104*7c478bd9Sstevel@tonic-gate 	 * Walk the list of shared memory segments clearing the count of
105*7c478bd9Sstevel@tonic-gate 	 * buffers waiting to be written.
106*7c478bd9Sstevel@tonic-gate 	 */
107*7c478bd9Sstevel@tonic-gate 	mp->lsn = *lsnp;
108*7c478bd9Sstevel@tonic-gate 	mp->lsn_cnt = 0;
109*7c478bd9Sstevel@tonic-gate 	for (mfp = SH_TAILQ_FIRST(&dbmp->mp->mpfq, __mpoolfile);
110*7c478bd9Sstevel@tonic-gate 	    mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile))
111*7c478bd9Sstevel@tonic-gate 		mfp->lsn_cnt = 0;
112*7c478bd9Sstevel@tonic-gate 
113*7c478bd9Sstevel@tonic-gate 	/*
114*7c478bd9Sstevel@tonic-gate 	 * Walk the list of buffers and mark all dirty buffers to be written
115*7c478bd9Sstevel@tonic-gate 	 * and all pinned buffers to be potentially written (we can't know if
116*7c478bd9Sstevel@tonic-gate 	 * we'll need to write them until the holding process returns them to
117*7c478bd9Sstevel@tonic-gate 	 * the cache).  We do this in one pass while holding the region locked
118*7c478bd9Sstevel@tonic-gate 	 * so that processes can't make new buffers dirty, causing us to never
119*7c478bd9Sstevel@tonic-gate 	 * finish.  Since the application may have restarted the sync, clear
120*7c478bd9Sstevel@tonic-gate 	 * any BH_WRITE flags that appear to be left over from previous calls.
121*7c478bd9Sstevel@tonic-gate 	 *
122*7c478bd9Sstevel@tonic-gate 	 * We don't want to pin down the entire buffer cache, otherwise we'll
123*7c478bd9Sstevel@tonic-gate 	 * starve threads needing new pages.  Don't pin down more than 80% of
124*7c478bd9Sstevel@tonic-gate 	 * the cache.
125*7c478bd9Sstevel@tonic-gate 	 *
126*7c478bd9Sstevel@tonic-gate 	 * Keep a count of the total number of buffers we need to write in
127*7c478bd9Sstevel@tonic-gate 	 * MPOOL->lsn_cnt, and for each file, in MPOOLFILE->lsn_count.
128*7c478bd9Sstevel@tonic-gate 	 */
129*7c478bd9Sstevel@tonic-gate 	ar_cnt = 0;
130*7c478bd9Sstevel@tonic-gate 	maxpin = ((mp->stat.st_page_dirty + mp->stat.st_page_clean) * 8) / 10;
131*7c478bd9Sstevel@tonic-gate 	for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh);
132*7c478bd9Sstevel@tonic-gate 	    bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh))
133*7c478bd9Sstevel@tonic-gate 		if (F_ISSET(bhp, BH_DIRTY) || bhp->ref != 0) {
134*7c478bd9Sstevel@tonic-gate 			F_SET(bhp, BH_WRITE);
135*7c478bd9Sstevel@tonic-gate 
136*7c478bd9Sstevel@tonic-gate 			++mp->lsn_cnt;
137*7c478bd9Sstevel@tonic-gate 
138*7c478bd9Sstevel@tonic-gate 			mfp = R_ADDR(dbmp, bhp->mf_offset);
139*7c478bd9Sstevel@tonic-gate 			++mfp->lsn_cnt;
140*7c478bd9Sstevel@tonic-gate 
141*7c478bd9Sstevel@tonic-gate 			/*
142*7c478bd9Sstevel@tonic-gate 			 * If the buffer isn't in use, we should be able to
143*7c478bd9Sstevel@tonic-gate 			 * write it immediately, so increment the reference
144*7c478bd9Sstevel@tonic-gate 			 * count to lock it and its contents down, and then
145*7c478bd9Sstevel@tonic-gate 			 * save a reference to it.
146*7c478bd9Sstevel@tonic-gate 			 *
147*7c478bd9Sstevel@tonic-gate 			 * If we've run out space to store buffer references,
148*7c478bd9Sstevel@tonic-gate 			 * we're screwed.  We don't want to realloc the array
149*7c478bd9Sstevel@tonic-gate 			 * while holding a region lock, so we set the flag to
150*7c478bd9Sstevel@tonic-gate 			 * force the checkpoint to be done again, from scratch,
151*7c478bd9Sstevel@tonic-gate 			 * later.
152*7c478bd9Sstevel@tonic-gate 			 *
153*7c478bd9Sstevel@tonic-gate 			 * If we've pinned down too much of the cache stop, and
154*7c478bd9Sstevel@tonic-gate 			 * set a flag to force the checkpoint to be tried again
155*7c478bd9Sstevel@tonic-gate 			 * later.
156*7c478bd9Sstevel@tonic-gate 			 */
157*7c478bd9Sstevel@tonic-gate 			if (bhp->ref == 0) {
158*7c478bd9Sstevel@tonic-gate 				++bhp->ref;
159*7c478bd9Sstevel@tonic-gate 				bharray[ar_cnt] = bhp;
160*7c478bd9Sstevel@tonic-gate 				if (++ar_cnt >= nalloc || ar_cnt >= maxpin) {
161*7c478bd9Sstevel@tonic-gate 					F_SET(mp, MP_LSN_RETRY);
162*7c478bd9Sstevel@tonic-gate 					break;
163*7c478bd9Sstevel@tonic-gate 				}
164*7c478bd9Sstevel@tonic-gate 			}
165*7c478bd9Sstevel@tonic-gate 		} else
166*7c478bd9Sstevel@tonic-gate 			if (F_ISSET(bhp, BH_WRITE))
167*7c478bd9Sstevel@tonic-gate 				F_CLR(bhp, BH_WRITE);
168*7c478bd9Sstevel@tonic-gate 
169*7c478bd9Sstevel@tonic-gate 	/* If there no buffers we can write immediately, we're done. */
170*7c478bd9Sstevel@tonic-gate 	if (ar_cnt == 0) {
171*7c478bd9Sstevel@tonic-gate 		ret = mp->lsn_cnt ? DB_INCOMPLETE : 0;
172*7c478bd9Sstevel@tonic-gate 		goto done;
173*7c478bd9Sstevel@tonic-gate 	}
174*7c478bd9Sstevel@tonic-gate 
175*7c478bd9Sstevel@tonic-gate 	UNLOCKREGION(dbmp);
176*7c478bd9Sstevel@tonic-gate 
177*7c478bd9Sstevel@tonic-gate 	/* Sort the buffers we're going to write. */
178*7c478bd9Sstevel@tonic-gate 	qsort(bharray, ar_cnt, sizeof(BH *), __bhcmp);
179*7c478bd9Sstevel@tonic-gate 
180*7c478bd9Sstevel@tonic-gate 	LOCKREGION(dbmp);
181*7c478bd9Sstevel@tonic-gate 
182*7c478bd9Sstevel@tonic-gate 	/* Walk the array, writing buffers. */
183*7c478bd9Sstevel@tonic-gate 	for (next = 0; next < ar_cnt; ++next) {
184*7c478bd9Sstevel@tonic-gate 		/*
185*7c478bd9Sstevel@tonic-gate 		 * It's possible for a thread to have gotten the buffer since
186*7c478bd9Sstevel@tonic-gate 		 * we listed it for writing.  If the reference count is still
187*7c478bd9Sstevel@tonic-gate 		 * 1, we're the only ones using the buffer, go ahead and write.
188*7c478bd9Sstevel@tonic-gate 		 * If it's >1, then skip the buffer and assume that it will be
189*7c478bd9Sstevel@tonic-gate 		 * written when it's returned to the cache.
190*7c478bd9Sstevel@tonic-gate 		 */
191*7c478bd9Sstevel@tonic-gate 		if (bharray[next]->ref > 1) {
192*7c478bd9Sstevel@tonic-gate 			--bharray[next]->ref;
193*7c478bd9Sstevel@tonic-gate 			continue;
194*7c478bd9Sstevel@tonic-gate 		}
195*7c478bd9Sstevel@tonic-gate 
196*7c478bd9Sstevel@tonic-gate 		/* Write the buffer. */
197*7c478bd9Sstevel@tonic-gate 		mfp = R_ADDR(dbmp, bharray[next]->mf_offset);
198*7c478bd9Sstevel@tonic-gate 		ret = __memp_bhwrite(dbmp, mfp, bharray[next], NULL, &wrote);
199*7c478bd9Sstevel@tonic-gate 
200*7c478bd9Sstevel@tonic-gate 		/* Release the buffer. */
201*7c478bd9Sstevel@tonic-gate 		--bharray[next]->ref;
202*7c478bd9Sstevel@tonic-gate 
203*7c478bd9Sstevel@tonic-gate 		/* If there's an error, release the rest of the buffers. */
204*7c478bd9Sstevel@tonic-gate 		if (ret != 0 || !wrote) {
205*7c478bd9Sstevel@tonic-gate 			/*
206*7c478bd9Sstevel@tonic-gate 			 * Any process syncing the shared memory buffer pool
207*7c478bd9Sstevel@tonic-gate 			 * had better be able to write to any underlying file.
208*7c478bd9Sstevel@tonic-gate 			 * Be understanding, but firm, on this point.
209*7c478bd9Sstevel@tonic-gate 			 */
210*7c478bd9Sstevel@tonic-gate 			if (ret == 0) {
211*7c478bd9Sstevel@tonic-gate 				__db_err(dbenv, "%s: unable to flush page: %lu",
212*7c478bd9Sstevel@tonic-gate 				    __memp_fns(dbmp, mfp),
213*7c478bd9Sstevel@tonic-gate 				    (u_long)bharray[next]->pgno);
214*7c478bd9Sstevel@tonic-gate 				ret = EPERM;
215*7c478bd9Sstevel@tonic-gate 			}
216*7c478bd9Sstevel@tonic-gate 
217*7c478bd9Sstevel@tonic-gate 			while (++next < ar_cnt)
218*7c478bd9Sstevel@tonic-gate 				--bharray[next]->ref;
219*7c478bd9Sstevel@tonic-gate 			goto err;
220*7c478bd9Sstevel@tonic-gate 		}
221*7c478bd9Sstevel@tonic-gate 	}
222*7c478bd9Sstevel@tonic-gate 	ret = mp->lsn_cnt != 0 ||
223*7c478bd9Sstevel@tonic-gate 	    F_ISSET(mp, MP_LSN_RETRY) ? DB_INCOMPLETE : 0;
224*7c478bd9Sstevel@tonic-gate 
225*7c478bd9Sstevel@tonic-gate done:
226*7c478bd9Sstevel@tonic-gate 	if (0) {
227*7c478bd9Sstevel@tonic-gate err:		/*
228*7c478bd9Sstevel@tonic-gate 		 * On error, clear:
229*7c478bd9Sstevel@tonic-gate 		 *	MPOOL->lsn_cnt (the total sync count)
230*7c478bd9Sstevel@tonic-gate 		 *	MPOOLFILE->lsn_cnt (the per-file sync count)
231*7c478bd9Sstevel@tonic-gate 		 *	BH_WRITE flag (the scheduled for writing flag)
232*7c478bd9Sstevel@tonic-gate 		 */
233*7c478bd9Sstevel@tonic-gate 		mp->lsn_cnt = 0;
234*7c478bd9Sstevel@tonic-gate 		for (mfp = SH_TAILQ_FIRST(&dbmp->mp->mpfq, __mpoolfile);
235*7c478bd9Sstevel@tonic-gate 		    mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile))
236*7c478bd9Sstevel@tonic-gate 			mfp->lsn_cnt = 0;
237*7c478bd9Sstevel@tonic-gate 		for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh);
238*7c478bd9Sstevel@tonic-gate 		    bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh))
239*7c478bd9Sstevel@tonic-gate 			F_CLR(bhp, BH_WRITE);
240*7c478bd9Sstevel@tonic-gate 	}
241*7c478bd9Sstevel@tonic-gate 	UNLOCKREGION(dbmp);
242*7c478bd9Sstevel@tonic-gate 	__os_free(bharray, nalloc * sizeof(BH *));
243*7c478bd9Sstevel@tonic-gate 	return (ret);
244*7c478bd9Sstevel@tonic-gate }
245*7c478bd9Sstevel@tonic-gate 
246*7c478bd9Sstevel@tonic-gate /*
247*7c478bd9Sstevel@tonic-gate  * memp_fsync --
248*7c478bd9Sstevel@tonic-gate  *	Mpool file sync function.
249*7c478bd9Sstevel@tonic-gate  */
250*7c478bd9Sstevel@tonic-gate int
memp_fsync(dbmfp)251*7c478bd9Sstevel@tonic-gate memp_fsync(dbmfp)
252*7c478bd9Sstevel@tonic-gate 	DB_MPOOLFILE *dbmfp;
253*7c478bd9Sstevel@tonic-gate {
254*7c478bd9Sstevel@tonic-gate 	DB_MPOOL *dbmp;
255*7c478bd9Sstevel@tonic-gate 	int is_tmp;
256*7c478bd9Sstevel@tonic-gate 
257*7c478bd9Sstevel@tonic-gate 	dbmp = dbmfp->dbmp;
258*7c478bd9Sstevel@tonic-gate 
259*7c478bd9Sstevel@tonic-gate 	MP_PANIC_CHECK(dbmp);
260*7c478bd9Sstevel@tonic-gate 
261*7c478bd9Sstevel@tonic-gate 	/*
262*7c478bd9Sstevel@tonic-gate 	 * If this handle doesn't have a file descriptor that's open for
263*7c478bd9Sstevel@tonic-gate 	 * writing, or if the file is a temporary, there's no reason to
264*7c478bd9Sstevel@tonic-gate 	 * proceed further.
265*7c478bd9Sstevel@tonic-gate 	 */
266*7c478bd9Sstevel@tonic-gate 	if (F_ISSET(dbmfp, MP_READONLY))
267*7c478bd9Sstevel@tonic-gate 		return (0);
268*7c478bd9Sstevel@tonic-gate 
269*7c478bd9Sstevel@tonic-gate 	LOCKREGION(dbmp);
270*7c478bd9Sstevel@tonic-gate 	is_tmp = F_ISSET(dbmfp->mfp, MP_TEMP);
271*7c478bd9Sstevel@tonic-gate 	UNLOCKREGION(dbmp);
272*7c478bd9Sstevel@tonic-gate 	if (is_tmp)
273*7c478bd9Sstevel@tonic-gate 		return (0);
274*7c478bd9Sstevel@tonic-gate 
275*7c478bd9Sstevel@tonic-gate 	return (__memp_fsync(dbmfp));
276*7c478bd9Sstevel@tonic-gate }
277*7c478bd9Sstevel@tonic-gate 
278*7c478bd9Sstevel@tonic-gate /*
279*7c478bd9Sstevel@tonic-gate  * __mp_xxx_fd --
280*7c478bd9Sstevel@tonic-gate  *	Return a file descriptor for DB 1.85 compatibility locking.
281*7c478bd9Sstevel@tonic-gate  *
282*7c478bd9Sstevel@tonic-gate  * PUBLIC: int __mp_xxx_fd __P((DB_MPOOLFILE *, int *));
283*7c478bd9Sstevel@tonic-gate  */
284*7c478bd9Sstevel@tonic-gate int
__mp_xxx_fd(dbmfp,fdp)285*7c478bd9Sstevel@tonic-gate __mp_xxx_fd(dbmfp, fdp)
286*7c478bd9Sstevel@tonic-gate 	DB_MPOOLFILE *dbmfp;
287*7c478bd9Sstevel@tonic-gate 	int *fdp;
288*7c478bd9Sstevel@tonic-gate {
289*7c478bd9Sstevel@tonic-gate 	int ret;
290*7c478bd9Sstevel@tonic-gate 
291*7c478bd9Sstevel@tonic-gate 	/*
292*7c478bd9Sstevel@tonic-gate 	 * This is a truly spectacular layering violation, intended ONLY to
293*7c478bd9Sstevel@tonic-gate 	 * support compatibility for the DB 1.85 DB->fd call.
294*7c478bd9Sstevel@tonic-gate 	 *
295*7c478bd9Sstevel@tonic-gate 	 * Sync the database file to disk, creating the file as necessary.
296*7c478bd9Sstevel@tonic-gate 	 *
297*7c478bd9Sstevel@tonic-gate 	 * We skip the MP_READONLY and MP_TEMP tests done by memp_fsync(3).
298*7c478bd9Sstevel@tonic-gate 	 * The MP_READONLY test isn't interesting because we will either
299*7c478bd9Sstevel@tonic-gate 	 * already have a file descriptor (we opened the database file for
300*7c478bd9Sstevel@tonic-gate 	 * reading) or we aren't readonly (we created the database which
301*7c478bd9Sstevel@tonic-gate 	 * requires write privileges).  The MP_TEMP test isn't interesting
302*7c478bd9Sstevel@tonic-gate 	 * because we want to write to the backing file regardless so that
303*7c478bd9Sstevel@tonic-gate 	 * we get a file descriptor to return.
304*7c478bd9Sstevel@tonic-gate 	 */
305*7c478bd9Sstevel@tonic-gate 	ret = dbmfp->fd == -1 ? __memp_fsync(dbmfp) : 0;
306*7c478bd9Sstevel@tonic-gate 
307*7c478bd9Sstevel@tonic-gate 	return ((*fdp = dbmfp->fd) == -1 ? ENOENT : ret);
308*7c478bd9Sstevel@tonic-gate }
309*7c478bd9Sstevel@tonic-gate 
310*7c478bd9Sstevel@tonic-gate /*
311*7c478bd9Sstevel@tonic-gate  * __memp_fsync --
312*7c478bd9Sstevel@tonic-gate  *	Mpool file internal sync function.
313*7c478bd9Sstevel@tonic-gate  */
314*7c478bd9Sstevel@tonic-gate static int
__memp_fsync(dbmfp)315*7c478bd9Sstevel@tonic-gate __memp_fsync(dbmfp)
316*7c478bd9Sstevel@tonic-gate 	DB_MPOOLFILE *dbmfp;
317*7c478bd9Sstevel@tonic-gate {
318*7c478bd9Sstevel@tonic-gate 	BH *bhp, **bharray;
319*7c478bd9Sstevel@tonic-gate 	DB_MPOOL *dbmp;
320*7c478bd9Sstevel@tonic-gate 	MPOOL *mp;
321*7c478bd9Sstevel@tonic-gate 	size_t mf_offset;
322*7c478bd9Sstevel@tonic-gate 	int ar_cnt, incomplete, nalloc, next, ret, wrote;
323*7c478bd9Sstevel@tonic-gate 
324*7c478bd9Sstevel@tonic-gate 	ret = 0;
325*7c478bd9Sstevel@tonic-gate 	dbmp = dbmfp->dbmp;
326*7c478bd9Sstevel@tonic-gate 	mp = dbmp->mp;
327*7c478bd9Sstevel@tonic-gate 	mf_offset = R_OFFSET(dbmp, dbmfp->mfp);
328*7c478bd9Sstevel@tonic-gate 
329*7c478bd9Sstevel@tonic-gate 	/*
330*7c478bd9Sstevel@tonic-gate 	 * We try and write the buffers in page order: it should reduce seeks
331*7c478bd9Sstevel@tonic-gate 	 * by the underlying filesystem and possibly reduce the actual number
332*7c478bd9Sstevel@tonic-gate 	 * of writes.  We don't want to hold the region lock while we write
333*7c478bd9Sstevel@tonic-gate 	 * the buffers, so only hold it lock while we create a list.  Get a
334*7c478bd9Sstevel@tonic-gate 	 * good-size block of memory to hold buffer pointers, we don't want
335*7c478bd9Sstevel@tonic-gate 	 * to run out.
336*7c478bd9Sstevel@tonic-gate 	 */
337*7c478bd9Sstevel@tonic-gate 	LOCKREGION(dbmp);
338*7c478bd9Sstevel@tonic-gate 	nalloc = mp->stat.st_page_dirty + mp->stat.st_page_dirty / 2 + 10;
339*7c478bd9Sstevel@tonic-gate 	UNLOCKREGION(dbmp);
340*7c478bd9Sstevel@tonic-gate 
341*7c478bd9Sstevel@tonic-gate 	if ((ret = __os_malloc(nalloc * sizeof(BH *), NULL, &bharray)) != 0)
342*7c478bd9Sstevel@tonic-gate 		return (ret);
343*7c478bd9Sstevel@tonic-gate 
344*7c478bd9Sstevel@tonic-gate 	LOCKREGION(dbmp);
345*7c478bd9Sstevel@tonic-gate 
346*7c478bd9Sstevel@tonic-gate 	/*
347*7c478bd9Sstevel@tonic-gate 	 * Walk the LRU list of buffer headers, and get a list of buffers to
348*7c478bd9Sstevel@tonic-gate 	 * write for this MPOOLFILE.
349*7c478bd9Sstevel@tonic-gate 	 */
350*7c478bd9Sstevel@tonic-gate 	ar_cnt = incomplete = 0;
351*7c478bd9Sstevel@tonic-gate 	for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh);
352*7c478bd9Sstevel@tonic-gate 	    bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) {
353*7c478bd9Sstevel@tonic-gate 		if (!F_ISSET(bhp, BH_DIRTY) || bhp->mf_offset != mf_offset)
354*7c478bd9Sstevel@tonic-gate 			continue;
355*7c478bd9Sstevel@tonic-gate 		if (bhp->ref != 0 || F_ISSET(bhp, BH_LOCKED)) {
356*7c478bd9Sstevel@tonic-gate 			incomplete = 1;
357*7c478bd9Sstevel@tonic-gate 			continue;
358*7c478bd9Sstevel@tonic-gate 		}
359*7c478bd9Sstevel@tonic-gate 
360*7c478bd9Sstevel@tonic-gate 		++bhp->ref;
361*7c478bd9Sstevel@tonic-gate 		bharray[ar_cnt] = bhp;
362*7c478bd9Sstevel@tonic-gate 
363*7c478bd9Sstevel@tonic-gate 		/*
364*7c478bd9Sstevel@tonic-gate 		 * If we've run out space to store buffer references, we're
365*7c478bd9Sstevel@tonic-gate 		 * screwed, as we don't want to realloc the array holding a
366*7c478bd9Sstevel@tonic-gate 		 * region lock.  Set the incomplete flag -- the only way we
367*7c478bd9Sstevel@tonic-gate 		 * can get here is if the file is active in the buffer cache,
368*7c478bd9Sstevel@tonic-gate 		 * which is the same thing as finding pinned buffers.
369*7c478bd9Sstevel@tonic-gate 		 */
370*7c478bd9Sstevel@tonic-gate 		if (++ar_cnt >= nalloc) {
371*7c478bd9Sstevel@tonic-gate 			incomplete = 1;
372*7c478bd9Sstevel@tonic-gate 			break;
373*7c478bd9Sstevel@tonic-gate 		}
374*7c478bd9Sstevel@tonic-gate 	}
375*7c478bd9Sstevel@tonic-gate 
376*7c478bd9Sstevel@tonic-gate 	UNLOCKREGION(dbmp);
377*7c478bd9Sstevel@tonic-gate 
378*7c478bd9Sstevel@tonic-gate 	/* Sort the buffers we're going to write. */
379*7c478bd9Sstevel@tonic-gate 	if (ar_cnt != 0)
380*7c478bd9Sstevel@tonic-gate 		qsort(bharray, ar_cnt, sizeof(BH *), __bhcmp);
381*7c478bd9Sstevel@tonic-gate 
382*7c478bd9Sstevel@tonic-gate 	LOCKREGION(dbmp);
383*7c478bd9Sstevel@tonic-gate 
384*7c478bd9Sstevel@tonic-gate 	/* Walk the array, writing buffers. */
385*7c478bd9Sstevel@tonic-gate 	for (next = 0; next < ar_cnt; ++next) {
386*7c478bd9Sstevel@tonic-gate 		/*
387*7c478bd9Sstevel@tonic-gate 		 * It's possible for a thread to have gotten the buffer since
388*7c478bd9Sstevel@tonic-gate 		 * we listed it for writing.  If the reference count is still
389*7c478bd9Sstevel@tonic-gate 		 * 1, we're the only ones using the buffer, go ahead and write.
390*7c478bd9Sstevel@tonic-gate 		 * If it's >1, then skip the buffer.
391*7c478bd9Sstevel@tonic-gate 		 */
392*7c478bd9Sstevel@tonic-gate 		if (bharray[next]->ref > 1) {
393*7c478bd9Sstevel@tonic-gate 			incomplete = 1;
394*7c478bd9Sstevel@tonic-gate 
395*7c478bd9Sstevel@tonic-gate 			--bharray[next]->ref;
396*7c478bd9Sstevel@tonic-gate 			continue;
397*7c478bd9Sstevel@tonic-gate 		}
398*7c478bd9Sstevel@tonic-gate 
399*7c478bd9Sstevel@tonic-gate 		/* Write the buffer. */
400*7c478bd9Sstevel@tonic-gate 		ret = __memp_pgwrite(dbmfp, bharray[next], NULL, &wrote);
401*7c478bd9Sstevel@tonic-gate 
402*7c478bd9Sstevel@tonic-gate 		/* Release the buffer. */
403*7c478bd9Sstevel@tonic-gate 		--bharray[next]->ref;
404*7c478bd9Sstevel@tonic-gate 
405*7c478bd9Sstevel@tonic-gate 		/* If there's an error, release the rest of the buffers. */
406*7c478bd9Sstevel@tonic-gate 		if (ret != 0) {
407*7c478bd9Sstevel@tonic-gate 			while (++next < ar_cnt)
408*7c478bd9Sstevel@tonic-gate 				--bharray[next]->ref;
409*7c478bd9Sstevel@tonic-gate 			goto err;
410*7c478bd9Sstevel@tonic-gate 		}
411*7c478bd9Sstevel@tonic-gate 
412*7c478bd9Sstevel@tonic-gate 		/*
413*7c478bd9Sstevel@tonic-gate 		 * If we didn't write the buffer for some reason, don't return
414*7c478bd9Sstevel@tonic-gate 		 * success.
415*7c478bd9Sstevel@tonic-gate 		 */
416*7c478bd9Sstevel@tonic-gate 		if (!wrote)
417*7c478bd9Sstevel@tonic-gate 			incomplete = 1;
418*7c478bd9Sstevel@tonic-gate 	}
419*7c478bd9Sstevel@tonic-gate 
420*7c478bd9Sstevel@tonic-gate err:	UNLOCKREGION(dbmp);
421*7c478bd9Sstevel@tonic-gate 
422*7c478bd9Sstevel@tonic-gate 	__os_free(bharray, nalloc * sizeof(BH *));
423*7c478bd9Sstevel@tonic-gate 
424*7c478bd9Sstevel@tonic-gate 	/*
425*7c478bd9Sstevel@tonic-gate 	 * Sync the underlying file as the last thing we do, so that the OS
426*7c478bd9Sstevel@tonic-gate 	 * has maximal opportunity to flush buffers before we request it.
427*7c478bd9Sstevel@tonic-gate 	 *
428*7c478bd9Sstevel@tonic-gate 	 * XXX:
429*7c478bd9Sstevel@tonic-gate 	 * Don't lock the region around the sync, fsync(2) has no atomicity
430*7c478bd9Sstevel@tonic-gate 	 * issues.
431*7c478bd9Sstevel@tonic-gate 	 */
432*7c478bd9Sstevel@tonic-gate 	if (ret == 0)
433*7c478bd9Sstevel@tonic-gate 		return (incomplete ? DB_INCOMPLETE : __os_fsync(dbmfp->fd));
434*7c478bd9Sstevel@tonic-gate 	return (ret);
435*7c478bd9Sstevel@tonic-gate }
436*7c478bd9Sstevel@tonic-gate 
437*7c478bd9Sstevel@tonic-gate /*
438*7c478bd9Sstevel@tonic-gate  * memp_trickle --
439*7c478bd9Sstevel@tonic-gate  *	Keep a specified percentage of the buffers clean.
440*7c478bd9Sstevel@tonic-gate  */
441*7c478bd9Sstevel@tonic-gate int
memp_trickle(dbmp,pct,nwrotep)442*7c478bd9Sstevel@tonic-gate memp_trickle(dbmp, pct, nwrotep)
443*7c478bd9Sstevel@tonic-gate 	DB_MPOOL *dbmp;
444*7c478bd9Sstevel@tonic-gate 	int pct, *nwrotep;
445*7c478bd9Sstevel@tonic-gate {
446*7c478bd9Sstevel@tonic-gate 	BH *bhp;
447*7c478bd9Sstevel@tonic-gate 	MPOOL *mp;
448*7c478bd9Sstevel@tonic-gate 	MPOOLFILE *mfp;
449*7c478bd9Sstevel@tonic-gate 	db_pgno_t pgno;
450*7c478bd9Sstevel@tonic-gate 	u_long total;
451*7c478bd9Sstevel@tonic-gate 	int ret, wrote;
452*7c478bd9Sstevel@tonic-gate 
453*7c478bd9Sstevel@tonic-gate 	MP_PANIC_CHECK(dbmp);
454*7c478bd9Sstevel@tonic-gate 
455*7c478bd9Sstevel@tonic-gate 	mp = dbmp->mp;
456*7c478bd9Sstevel@tonic-gate 	if (nwrotep != NULL)
457*7c478bd9Sstevel@tonic-gate 		*nwrotep = 0;
458*7c478bd9Sstevel@tonic-gate 
459*7c478bd9Sstevel@tonic-gate 	if (pct < 1 || pct > 100)
460*7c478bd9Sstevel@tonic-gate 		return (EINVAL);
461*7c478bd9Sstevel@tonic-gate 
462*7c478bd9Sstevel@tonic-gate 	LOCKREGION(dbmp);
463*7c478bd9Sstevel@tonic-gate 
464*7c478bd9Sstevel@tonic-gate 	/*
465*7c478bd9Sstevel@tonic-gate 	 * If there are sufficient clean buffers, or no buffers or no dirty
466*7c478bd9Sstevel@tonic-gate 	 * buffers, we're done.
467*7c478bd9Sstevel@tonic-gate 	 *
468*7c478bd9Sstevel@tonic-gate 	 * XXX
469*7c478bd9Sstevel@tonic-gate 	 * Using st_page_clean and st_page_dirty is our only choice at the
470*7c478bd9Sstevel@tonic-gate 	 * moment, but it's not as correct as we might like in the presence
471*7c478bd9Sstevel@tonic-gate 	 * of pools with more than one buffer size, as a free 512-byte buffer
472*7c478bd9Sstevel@tonic-gate 	 * isn't the same as a free 8K buffer.
473*7c478bd9Sstevel@tonic-gate 	 */
474*7c478bd9Sstevel@tonic-gate loop:	total = mp->stat.st_page_clean + mp->stat.st_page_dirty;
475*7c478bd9Sstevel@tonic-gate 	if (total == 0 || mp->stat.st_page_dirty == 0 ||
476*7c478bd9Sstevel@tonic-gate 	    (mp->stat.st_page_clean * 100) / total >= (u_long)pct) {
477*7c478bd9Sstevel@tonic-gate 		UNLOCKREGION(dbmp);
478*7c478bd9Sstevel@tonic-gate 		return (0);
479*7c478bd9Sstevel@tonic-gate 	}
480*7c478bd9Sstevel@tonic-gate 
481*7c478bd9Sstevel@tonic-gate 	/* Loop until we write a buffer. */
482*7c478bd9Sstevel@tonic-gate 	for (bhp = SH_TAILQ_FIRST(&mp->bhq, __bh);
483*7c478bd9Sstevel@tonic-gate 	    bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) {
484*7c478bd9Sstevel@tonic-gate 		if (bhp->ref != 0 ||
485*7c478bd9Sstevel@tonic-gate 		    !F_ISSET(bhp, BH_DIRTY) || F_ISSET(bhp, BH_LOCKED))
486*7c478bd9Sstevel@tonic-gate 			continue;
487*7c478bd9Sstevel@tonic-gate 
488*7c478bd9Sstevel@tonic-gate 		mfp = R_ADDR(dbmp, bhp->mf_offset);
489*7c478bd9Sstevel@tonic-gate 
490*7c478bd9Sstevel@tonic-gate 		/*
491*7c478bd9Sstevel@tonic-gate 		 * We can't write to temporary files -- see the comment in
492*7c478bd9Sstevel@tonic-gate 		 * mp_bh.c:__memp_bhwrite().
493*7c478bd9Sstevel@tonic-gate 		 */
494*7c478bd9Sstevel@tonic-gate 		if (F_ISSET(mfp, MP_TEMP))
495*7c478bd9Sstevel@tonic-gate 			continue;
496*7c478bd9Sstevel@tonic-gate 
497*7c478bd9Sstevel@tonic-gate 		pgno = bhp->pgno;
498*7c478bd9Sstevel@tonic-gate 		if ((ret = __memp_bhwrite(dbmp, mfp, bhp, NULL, &wrote)) != 0)
499*7c478bd9Sstevel@tonic-gate 			goto err;
500*7c478bd9Sstevel@tonic-gate 
501*7c478bd9Sstevel@tonic-gate 		/*
502*7c478bd9Sstevel@tonic-gate 		 * Any process syncing the shared memory buffer pool had better
503*7c478bd9Sstevel@tonic-gate 		 * be able to write to any underlying file.  Be understanding,
504*7c478bd9Sstevel@tonic-gate 		 * but firm, on this point.
505*7c478bd9Sstevel@tonic-gate 		 */
506*7c478bd9Sstevel@tonic-gate 		if (!wrote) {
507*7c478bd9Sstevel@tonic-gate 			__db_err(dbmp->dbenv, "%s: unable to flush page: %lu",
508*7c478bd9Sstevel@tonic-gate 			    __memp_fns(dbmp, mfp), (u_long)pgno);
509*7c478bd9Sstevel@tonic-gate 			ret = EPERM;
510*7c478bd9Sstevel@tonic-gate 			goto err;
511*7c478bd9Sstevel@tonic-gate 		}
512*7c478bd9Sstevel@tonic-gate 
513*7c478bd9Sstevel@tonic-gate 		++mp->stat.st_page_trickle;
514*7c478bd9Sstevel@tonic-gate 		if (nwrotep != NULL)
515*7c478bd9Sstevel@tonic-gate 			++*nwrotep;
516*7c478bd9Sstevel@tonic-gate 		goto loop;
517*7c478bd9Sstevel@tonic-gate 	}
518*7c478bd9Sstevel@tonic-gate 
519*7c478bd9Sstevel@tonic-gate 	/* No more buffers to write. */
520*7c478bd9Sstevel@tonic-gate 	ret = 0;
521*7c478bd9Sstevel@tonic-gate 
522*7c478bd9Sstevel@tonic-gate err:	UNLOCKREGION(dbmp);
523*7c478bd9Sstevel@tonic-gate 	return (ret);
524*7c478bd9Sstevel@tonic-gate }
525*7c478bd9Sstevel@tonic-gate 
526*7c478bd9Sstevel@tonic-gate static int
__bhcmp(p1,p2)527*7c478bd9Sstevel@tonic-gate __bhcmp(p1, p2)
528*7c478bd9Sstevel@tonic-gate 	const void *p1, *p2;
529*7c478bd9Sstevel@tonic-gate {
530*7c478bd9Sstevel@tonic-gate 	BH *bhp1, *bhp2;
531*7c478bd9Sstevel@tonic-gate 
532*7c478bd9Sstevel@tonic-gate 	bhp1 = *(BH * const *)p1;
533*7c478bd9Sstevel@tonic-gate 	bhp2 = *(BH * const *)p2;
534*7c478bd9Sstevel@tonic-gate 
535*7c478bd9Sstevel@tonic-gate 	/* Sort by file (shared memory pool offset). */
536*7c478bd9Sstevel@tonic-gate 	if (bhp1->mf_offset < bhp2->mf_offset)
537*7c478bd9Sstevel@tonic-gate 		return (-1);
538*7c478bd9Sstevel@tonic-gate 	if (bhp1->mf_offset > bhp2->mf_offset)
539*7c478bd9Sstevel@tonic-gate 		return (1);
540*7c478bd9Sstevel@tonic-gate 
541*7c478bd9Sstevel@tonic-gate 	/*
542*7c478bd9Sstevel@tonic-gate 	 * !!!
543*7c478bd9Sstevel@tonic-gate 	 * Defend against badly written quicksort code calling the comparison
544*7c478bd9Sstevel@tonic-gate 	 * function with two identical pointers (e.g., WATCOM C++ (Power++)).
545*7c478bd9Sstevel@tonic-gate 	 */
546*7c478bd9Sstevel@tonic-gate 	if (bhp1->pgno < bhp2->pgno)
547*7c478bd9Sstevel@tonic-gate 		return (-1);
548*7c478bd9Sstevel@tonic-gate 	if (bhp1->pgno > bhp2->pgno)
549*7c478bd9Sstevel@tonic-gate 		return (1);
550*7c478bd9Sstevel@tonic-gate 	return (0);
551*7c478bd9Sstevel@tonic-gate }
552