xref: /illumos-gate/usr/src/cmd/sendmail/db/mp/mp_fget.c (revision 7c478bd9)
1*7c478bd9Sstevel@tonic-gate /*-
2*7c478bd9Sstevel@tonic-gate  * See the file LICENSE for redistribution information.
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * Copyright (c) 1996, 1997, 1998
5*7c478bd9Sstevel@tonic-gate  *	Sleepycat Software.  All rights reserved.
6*7c478bd9Sstevel@tonic-gate  */
7*7c478bd9Sstevel@tonic-gate #include "config.h"
8*7c478bd9Sstevel@tonic-gate 
9*7c478bd9Sstevel@tonic-gate #ifndef lint
10*7c478bd9Sstevel@tonic-gate static const char sccsid[] = "@(#)mp_fget.c	10.53 (Sleepycat) 11/16/98";
11*7c478bd9Sstevel@tonic-gate #endif /* not lint */
12*7c478bd9Sstevel@tonic-gate 
13*7c478bd9Sstevel@tonic-gate #ifndef NO_SYSTEM_INCLUDES
14*7c478bd9Sstevel@tonic-gate #include <sys/types.h>
15*7c478bd9Sstevel@tonic-gate 
16*7c478bd9Sstevel@tonic-gate #include <errno.h>
17*7c478bd9Sstevel@tonic-gate #include <string.h>
18*7c478bd9Sstevel@tonic-gate #endif
19*7c478bd9Sstevel@tonic-gate 
20*7c478bd9Sstevel@tonic-gate #include "db_int.h"
21*7c478bd9Sstevel@tonic-gate #include "shqueue.h"
22*7c478bd9Sstevel@tonic-gate #include "db_shash.h"
23*7c478bd9Sstevel@tonic-gate #include "mp.h"
24*7c478bd9Sstevel@tonic-gate #include "common_ext.h"
25*7c478bd9Sstevel@tonic-gate 
26*7c478bd9Sstevel@tonic-gate /*
27*7c478bd9Sstevel@tonic-gate  * memp_fget --
28*7c478bd9Sstevel@tonic-gate  *	Get a page from the file.
29*7c478bd9Sstevel@tonic-gate  */
30*7c478bd9Sstevel@tonic-gate int
memp_fget(dbmfp,pgnoaddr,flags,addrp)31*7c478bd9Sstevel@tonic-gate memp_fget(dbmfp, pgnoaddr, flags, addrp)
32*7c478bd9Sstevel@tonic-gate 	DB_MPOOLFILE *dbmfp;
33*7c478bd9Sstevel@tonic-gate 	db_pgno_t *pgnoaddr;
34*7c478bd9Sstevel@tonic-gate 	u_int32_t flags;
35*7c478bd9Sstevel@tonic-gate 	void *addrp;
36*7c478bd9Sstevel@tonic-gate {
37*7c478bd9Sstevel@tonic-gate 	BH *bhp;
38*7c478bd9Sstevel@tonic-gate 	DB_MPOOL *dbmp;
39*7c478bd9Sstevel@tonic-gate 	MPOOL *mp;
40*7c478bd9Sstevel@tonic-gate 	MPOOLFILE *mfp;
41*7c478bd9Sstevel@tonic-gate 	size_t bucket, mf_offset;
42*7c478bd9Sstevel@tonic-gate 	u_int32_t st_hsearch;
43*7c478bd9Sstevel@tonic-gate 	int b_incr, first, ret;
44*7c478bd9Sstevel@tonic-gate 
45*7c478bd9Sstevel@tonic-gate 	dbmp = dbmfp->dbmp;
46*7c478bd9Sstevel@tonic-gate 	mp = dbmp->mp;
47*7c478bd9Sstevel@tonic-gate 	mfp = dbmfp->mfp;
48*7c478bd9Sstevel@tonic-gate 
49*7c478bd9Sstevel@tonic-gate 	MP_PANIC_CHECK(dbmp);
50*7c478bd9Sstevel@tonic-gate 
51*7c478bd9Sstevel@tonic-gate 	/*
52*7c478bd9Sstevel@tonic-gate 	 * Validate arguments.
53*7c478bd9Sstevel@tonic-gate 	 *
54*7c478bd9Sstevel@tonic-gate 	 * !!!
55*7c478bd9Sstevel@tonic-gate 	 * Don't test for DB_MPOOL_CREATE and DB_MPOOL_NEW flags for readonly
56*7c478bd9Sstevel@tonic-gate 	 * files here, and create non-existent pages in readonly files if the
57*7c478bd9Sstevel@tonic-gate 	 * flags are set, later.  The reason is that the hash access method
58*7c478bd9Sstevel@tonic-gate 	 * wants to get empty pages that don't really exist in readonly files.
59*7c478bd9Sstevel@tonic-gate 	 * The only alternative is for hash to write the last "bucket" all the
60*7c478bd9Sstevel@tonic-gate 	 * time, which we don't want to do because one of our big goals in life
61*7c478bd9Sstevel@tonic-gate 	 * is to keep database files small.  It's sleazy as hell, but we catch
62*7c478bd9Sstevel@tonic-gate 	 * any attempt to actually write the file in memp_fput().
63*7c478bd9Sstevel@tonic-gate 	 */
64*7c478bd9Sstevel@tonic-gate #define	OKFLAGS	(DB_MPOOL_CREATE | DB_MPOOL_LAST | DB_MPOOL_NEW)
65*7c478bd9Sstevel@tonic-gate 	if (flags != 0) {
66*7c478bd9Sstevel@tonic-gate 		if ((ret =
67*7c478bd9Sstevel@tonic-gate 		    __db_fchk(dbmp->dbenv, "memp_fget", flags, OKFLAGS)) != 0)
68*7c478bd9Sstevel@tonic-gate 			return (ret);
69*7c478bd9Sstevel@tonic-gate 
70*7c478bd9Sstevel@tonic-gate 		switch (flags) {
71*7c478bd9Sstevel@tonic-gate 		case DB_MPOOL_CREATE:
72*7c478bd9Sstevel@tonic-gate 		case DB_MPOOL_LAST:
73*7c478bd9Sstevel@tonic-gate 		case DB_MPOOL_NEW:
74*7c478bd9Sstevel@tonic-gate 		case 0:
75*7c478bd9Sstevel@tonic-gate 			break;
76*7c478bd9Sstevel@tonic-gate 		default:
77*7c478bd9Sstevel@tonic-gate 			return (__db_ferr(dbmp->dbenv, "memp_fget", 1));
78*7c478bd9Sstevel@tonic-gate 		}
79*7c478bd9Sstevel@tonic-gate 	}
80*7c478bd9Sstevel@tonic-gate 
81*7c478bd9Sstevel@tonic-gate #ifdef DIAGNOSTIC
82*7c478bd9Sstevel@tonic-gate 	/*
83*7c478bd9Sstevel@tonic-gate 	 * XXX
84*7c478bd9Sstevel@tonic-gate 	 * We want to switch threads as often as possible.  Yield every time
85*7c478bd9Sstevel@tonic-gate 	 * we get a new page to ensure contention.
86*7c478bd9Sstevel@tonic-gate 	 */
87*7c478bd9Sstevel@tonic-gate 	if (DB_GLOBAL(db_pageyield))
88*7c478bd9Sstevel@tonic-gate 		__os_yield(1);
89*7c478bd9Sstevel@tonic-gate #endif
90*7c478bd9Sstevel@tonic-gate 
91*7c478bd9Sstevel@tonic-gate 	/* Initialize remaining local variables. */
92*7c478bd9Sstevel@tonic-gate 	mf_offset = R_OFFSET(dbmp, mfp);
93*7c478bd9Sstevel@tonic-gate 	bhp = NULL;
94*7c478bd9Sstevel@tonic-gate 	st_hsearch = 0;
95*7c478bd9Sstevel@tonic-gate 	b_incr = ret = 0;
96*7c478bd9Sstevel@tonic-gate 
97*7c478bd9Sstevel@tonic-gate 	/* Determine the hash bucket where this page will live. */
98*7c478bd9Sstevel@tonic-gate 	bucket = BUCKET(mp, mf_offset, *pgnoaddr);
99*7c478bd9Sstevel@tonic-gate 
100*7c478bd9Sstevel@tonic-gate 	LOCKREGION(dbmp);
101*7c478bd9Sstevel@tonic-gate 
102*7c478bd9Sstevel@tonic-gate 	/*
103*7c478bd9Sstevel@tonic-gate 	 * Check for the last or last + 1 page requests.
104*7c478bd9Sstevel@tonic-gate 	 *
105*7c478bd9Sstevel@tonic-gate 	 * Examine and update the file's last_pgno value.  We don't care if
106*7c478bd9Sstevel@tonic-gate 	 * the last_pgno value immediately changes due to another thread --
107*7c478bd9Sstevel@tonic-gate 	 * at this instant in time, the value is correct.  We do increment the
108*7c478bd9Sstevel@tonic-gate 	 * current last_pgno value if the thread is asking for a new page,
109*7c478bd9Sstevel@tonic-gate 	 * however, to ensure that two threads creating pages don't get the
110*7c478bd9Sstevel@tonic-gate 	 * same one.
111*7c478bd9Sstevel@tonic-gate 	 */
112*7c478bd9Sstevel@tonic-gate 	if (LF_ISSET(DB_MPOOL_LAST | DB_MPOOL_NEW)) {
113*7c478bd9Sstevel@tonic-gate 		if (LF_ISSET(DB_MPOOL_NEW))
114*7c478bd9Sstevel@tonic-gate 			++mfp->last_pgno;
115*7c478bd9Sstevel@tonic-gate 		*pgnoaddr = mfp->last_pgno;
116*7c478bd9Sstevel@tonic-gate 		bucket = BUCKET(mp, mf_offset, mfp->last_pgno);
117*7c478bd9Sstevel@tonic-gate 
118*7c478bd9Sstevel@tonic-gate 		if (LF_ISSET(DB_MPOOL_NEW))
119*7c478bd9Sstevel@tonic-gate 			goto alloc;
120*7c478bd9Sstevel@tonic-gate 	}
121*7c478bd9Sstevel@tonic-gate 
122*7c478bd9Sstevel@tonic-gate 	/*
123*7c478bd9Sstevel@tonic-gate 	 * If mmap'ing the file and the page is not past the end of the file,
124*7c478bd9Sstevel@tonic-gate 	 * just return a pointer.
125*7c478bd9Sstevel@tonic-gate 	 *
126*7c478bd9Sstevel@tonic-gate 	 * The page may be past the end of the file, so check the page number
127*7c478bd9Sstevel@tonic-gate 	 * argument against the original length of the file.  If we previously
128*7c478bd9Sstevel@tonic-gate 	 * returned pages past the original end of the file, last_pgno will
129*7c478bd9Sstevel@tonic-gate 	 * have been updated to match the "new" end of the file, and checking
130*7c478bd9Sstevel@tonic-gate 	 * against it would return pointers past the end of the mmap'd region.
131*7c478bd9Sstevel@tonic-gate 	 *
132*7c478bd9Sstevel@tonic-gate 	 * If another process has opened the file for writing since we mmap'd
133*7c478bd9Sstevel@tonic-gate 	 * it, we will start playing the game by their rules, i.e. everything
134*7c478bd9Sstevel@tonic-gate 	 * goes through the cache.  All pages previously returned will be safe,
135*7c478bd9Sstevel@tonic-gate 	 * as long as the correct locking protocol was observed.
136*7c478bd9Sstevel@tonic-gate 	 *
137*7c478bd9Sstevel@tonic-gate 	 * XXX
138*7c478bd9Sstevel@tonic-gate 	 * We don't discard the map because we don't know when all of the
139*7c478bd9Sstevel@tonic-gate 	 * pages will have been discarded from the process' address space.
140*7c478bd9Sstevel@tonic-gate 	 * It would be possible to do so by reference counting the open
141*7c478bd9Sstevel@tonic-gate 	 * pages from the mmap, but it's unclear to me that it's worth it.
142*7c478bd9Sstevel@tonic-gate 	 */
143*7c478bd9Sstevel@tonic-gate 	if (dbmfp->addr != NULL && F_ISSET(mfp, MP_CAN_MMAP))
144*7c478bd9Sstevel@tonic-gate 		if (*pgnoaddr > mfp->orig_last_pgno) {
145*7c478bd9Sstevel@tonic-gate 			/*
146*7c478bd9Sstevel@tonic-gate 			 * !!!
147*7c478bd9Sstevel@tonic-gate 			 * See the comment above about non-existent pages and
148*7c478bd9Sstevel@tonic-gate 			 * the hash access method.
149*7c478bd9Sstevel@tonic-gate 			 */
150*7c478bd9Sstevel@tonic-gate 			if (!LF_ISSET(DB_MPOOL_CREATE)) {
151*7c478bd9Sstevel@tonic-gate 				__db_err(dbmp->dbenv,
152*7c478bd9Sstevel@tonic-gate 				    "%s: page %lu doesn't exist",
153*7c478bd9Sstevel@tonic-gate 				    __memp_fn(dbmfp), (u_long)*pgnoaddr);
154*7c478bd9Sstevel@tonic-gate 				ret = EINVAL;
155*7c478bd9Sstevel@tonic-gate 				goto err;
156*7c478bd9Sstevel@tonic-gate 			}
157*7c478bd9Sstevel@tonic-gate 		} else {
158*7c478bd9Sstevel@tonic-gate 			*(void **)addrp =
159*7c478bd9Sstevel@tonic-gate 			    R_ADDR(dbmfp, *pgnoaddr * mfp->stat.st_pagesize);
160*7c478bd9Sstevel@tonic-gate 			++mp->stat.st_map;
161*7c478bd9Sstevel@tonic-gate 			++mfp->stat.st_map;
162*7c478bd9Sstevel@tonic-gate 			goto done;
163*7c478bd9Sstevel@tonic-gate 		}
164*7c478bd9Sstevel@tonic-gate 
165*7c478bd9Sstevel@tonic-gate 	/* Search the hash chain for the page. */
166*7c478bd9Sstevel@tonic-gate 	for (bhp = SH_TAILQ_FIRST(&dbmp->htab[bucket], __bh);
167*7c478bd9Sstevel@tonic-gate 	    bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) {
168*7c478bd9Sstevel@tonic-gate 		++st_hsearch;
169*7c478bd9Sstevel@tonic-gate 		if (bhp->pgno != *pgnoaddr || bhp->mf_offset != mf_offset)
170*7c478bd9Sstevel@tonic-gate 			continue;
171*7c478bd9Sstevel@tonic-gate 
172*7c478bd9Sstevel@tonic-gate 		/* Increment the reference count. */
173*7c478bd9Sstevel@tonic-gate 		if (bhp->ref == UINT16_T_MAX) {
174*7c478bd9Sstevel@tonic-gate 			__db_err(dbmp->dbenv,
175*7c478bd9Sstevel@tonic-gate 			    "%s: page %lu: reference count overflow",
176*7c478bd9Sstevel@tonic-gate 			    __memp_fn(dbmfp), (u_long)bhp->pgno);
177*7c478bd9Sstevel@tonic-gate 			ret = EINVAL;
178*7c478bd9Sstevel@tonic-gate 			goto err;
179*7c478bd9Sstevel@tonic-gate 		}
180*7c478bd9Sstevel@tonic-gate 
181*7c478bd9Sstevel@tonic-gate 		/*
182*7c478bd9Sstevel@tonic-gate 		 * Increment the reference count.  We may discard the region
183*7c478bd9Sstevel@tonic-gate 		 * lock as we evaluate and/or read the buffer, so we need to
184*7c478bd9Sstevel@tonic-gate 		 * ensure that it doesn't move and that its contents remain
185*7c478bd9Sstevel@tonic-gate 		 * unchanged.
186*7c478bd9Sstevel@tonic-gate 		 */
187*7c478bd9Sstevel@tonic-gate 		++bhp->ref;
188*7c478bd9Sstevel@tonic-gate 		b_incr = 1;
189*7c478bd9Sstevel@tonic-gate 
190*7c478bd9Sstevel@tonic-gate 		/*
191*7c478bd9Sstevel@tonic-gate 	 	 * Any buffer we find might be trouble.
192*7c478bd9Sstevel@tonic-gate 		 *
193*7c478bd9Sstevel@tonic-gate 		 * BH_LOCKED --
194*7c478bd9Sstevel@tonic-gate 		 * I/O is in progress.  Because we've incremented the buffer
195*7c478bd9Sstevel@tonic-gate 		 * reference count, we know the buffer can't move.  Unlock
196*7c478bd9Sstevel@tonic-gate 		 * the region lock, wait for the I/O to complete, and reacquire
197*7c478bd9Sstevel@tonic-gate 		 * the region.
198*7c478bd9Sstevel@tonic-gate 		 */
199*7c478bd9Sstevel@tonic-gate 		for (first = 1; F_ISSET(bhp, BH_LOCKED); first = 0) {
200*7c478bd9Sstevel@tonic-gate 			UNLOCKREGION(dbmp);
201*7c478bd9Sstevel@tonic-gate 
202*7c478bd9Sstevel@tonic-gate 			/*
203*7c478bd9Sstevel@tonic-gate 			 * Explicitly yield the processor if it's not the first
204*7c478bd9Sstevel@tonic-gate 			 * pass through this loop -- if we don't, we might end
205*7c478bd9Sstevel@tonic-gate 			 * up running to the end of our CPU quantum as we will
206*7c478bd9Sstevel@tonic-gate 			 * simply be swapping between the two locks.
207*7c478bd9Sstevel@tonic-gate 			 */
208*7c478bd9Sstevel@tonic-gate 			if (!first)
209*7c478bd9Sstevel@tonic-gate 				__os_yield(1);
210*7c478bd9Sstevel@tonic-gate 
211*7c478bd9Sstevel@tonic-gate 			LOCKBUFFER(dbmp, bhp);
212*7c478bd9Sstevel@tonic-gate 			/* Wait for I/O to finish... */
213*7c478bd9Sstevel@tonic-gate 			UNLOCKBUFFER(dbmp, bhp);
214*7c478bd9Sstevel@tonic-gate 			LOCKREGION(dbmp);
215*7c478bd9Sstevel@tonic-gate 		}
216*7c478bd9Sstevel@tonic-gate 
217*7c478bd9Sstevel@tonic-gate 		/*
218*7c478bd9Sstevel@tonic-gate 		 * BH_TRASH --
219*7c478bd9Sstevel@tonic-gate 		 * The contents of the buffer are garbage.  Shouldn't happen,
220*7c478bd9Sstevel@tonic-gate 		 * and this read is likely to fail, but might as well try.
221*7c478bd9Sstevel@tonic-gate 		 */
222*7c478bd9Sstevel@tonic-gate 		if (F_ISSET(bhp, BH_TRASH))
223*7c478bd9Sstevel@tonic-gate 			goto reread;
224*7c478bd9Sstevel@tonic-gate 
225*7c478bd9Sstevel@tonic-gate 		/*
226*7c478bd9Sstevel@tonic-gate 		 * BH_CALLPGIN --
227*7c478bd9Sstevel@tonic-gate 		 * The buffer was converted so it could be written, and the
228*7c478bd9Sstevel@tonic-gate 		 * contents need to be converted again.
229*7c478bd9Sstevel@tonic-gate 		 */
230*7c478bd9Sstevel@tonic-gate 		if (F_ISSET(bhp, BH_CALLPGIN)) {
231*7c478bd9Sstevel@tonic-gate 			if ((ret = __memp_pg(dbmfp, bhp, 1)) != 0)
232*7c478bd9Sstevel@tonic-gate 				goto err;
233*7c478bd9Sstevel@tonic-gate 			F_CLR(bhp, BH_CALLPGIN);
234*7c478bd9Sstevel@tonic-gate 		}
235*7c478bd9Sstevel@tonic-gate 
236*7c478bd9Sstevel@tonic-gate 		++mp->stat.st_cache_hit;
237*7c478bd9Sstevel@tonic-gate 		++mfp->stat.st_cache_hit;
238*7c478bd9Sstevel@tonic-gate 		*(void **)addrp = bhp->buf;
239*7c478bd9Sstevel@tonic-gate 		goto done;
240*7c478bd9Sstevel@tonic-gate 	}
241*7c478bd9Sstevel@tonic-gate 
242*7c478bd9Sstevel@tonic-gate alloc:	/* Allocate new buffer header and data space. */
243*7c478bd9Sstevel@tonic-gate 	if ((ret = __memp_alloc(dbmp, sizeof(BH) -
244*7c478bd9Sstevel@tonic-gate 	    sizeof(u_int8_t) + mfp->stat.st_pagesize, NULL, &bhp)) != 0)
245*7c478bd9Sstevel@tonic-gate 		goto err;
246*7c478bd9Sstevel@tonic-gate 
247*7c478bd9Sstevel@tonic-gate #ifdef DIAGNOSTIC
248*7c478bd9Sstevel@tonic-gate 	if ((ALIGNTYPE)bhp->buf & (sizeof(size_t) - 1)) {
249*7c478bd9Sstevel@tonic-gate 		__db_err(dbmp->dbenv,
250*7c478bd9Sstevel@tonic-gate 		    "Internal error: BH data NOT size_t aligned.");
251*7c478bd9Sstevel@tonic-gate 		ret = EINVAL;
252*7c478bd9Sstevel@tonic-gate 		goto err;
253*7c478bd9Sstevel@tonic-gate 	}
254*7c478bd9Sstevel@tonic-gate #endif
255*7c478bd9Sstevel@tonic-gate 	/* Initialize the BH fields. */
256*7c478bd9Sstevel@tonic-gate 	memset(bhp, 0, sizeof(BH));
257*7c478bd9Sstevel@tonic-gate 	LOCKINIT(dbmp, &bhp->mutex);
258*7c478bd9Sstevel@tonic-gate 	bhp->ref = 1;
259*7c478bd9Sstevel@tonic-gate 	bhp->pgno = *pgnoaddr;
260*7c478bd9Sstevel@tonic-gate 	bhp->mf_offset = mf_offset;
261*7c478bd9Sstevel@tonic-gate 
262*7c478bd9Sstevel@tonic-gate 	/*
263*7c478bd9Sstevel@tonic-gate 	 * Prepend the bucket header to the head of the appropriate MPOOL
264*7c478bd9Sstevel@tonic-gate 	 * bucket hash list.  Append the bucket header to the tail of the
265*7c478bd9Sstevel@tonic-gate 	 * MPOOL LRU chain.
266*7c478bd9Sstevel@tonic-gate 	 */
267*7c478bd9Sstevel@tonic-gate 	SH_TAILQ_INSERT_HEAD(&dbmp->htab[bucket], bhp, hq, __bh);
268*7c478bd9Sstevel@tonic-gate 	SH_TAILQ_INSERT_TAIL(&mp->bhq, bhp, q);
269*7c478bd9Sstevel@tonic-gate 
270*7c478bd9Sstevel@tonic-gate 	/*
271*7c478bd9Sstevel@tonic-gate 	 * If we created the page, zero it out and continue.
272*7c478bd9Sstevel@tonic-gate 	 *
273*7c478bd9Sstevel@tonic-gate 	 * !!!
274*7c478bd9Sstevel@tonic-gate 	 * Note: DB_MPOOL_NEW specifically doesn't call the pgin function.
275*7c478bd9Sstevel@tonic-gate 	 * If DB_MPOOL_CREATE is used, then the application's pgin function
276*7c478bd9Sstevel@tonic-gate 	 * has to be able to handle pages of 0's -- if it uses DB_MPOOL_NEW,
277*7c478bd9Sstevel@tonic-gate 	 * it can detect all of its page creates, and not bother.
278*7c478bd9Sstevel@tonic-gate 	 *
279*7c478bd9Sstevel@tonic-gate 	 * Otherwise, read the page into memory, optionally creating it if
280*7c478bd9Sstevel@tonic-gate 	 * DB_MPOOL_CREATE is set.
281*7c478bd9Sstevel@tonic-gate 	 */
282*7c478bd9Sstevel@tonic-gate 	if (LF_ISSET(DB_MPOOL_NEW)) {
283*7c478bd9Sstevel@tonic-gate 		if (mfp->clear_len == 0)
284*7c478bd9Sstevel@tonic-gate 			memset(bhp->buf, 0, mfp->stat.st_pagesize);
285*7c478bd9Sstevel@tonic-gate 		else {
286*7c478bd9Sstevel@tonic-gate 			memset(bhp->buf, 0, mfp->clear_len);
287*7c478bd9Sstevel@tonic-gate #ifdef DIAGNOSTIC
288*7c478bd9Sstevel@tonic-gate 			memset(bhp->buf + mfp->clear_len, 0xdb,
289*7c478bd9Sstevel@tonic-gate 			    mfp->stat.st_pagesize - mfp->clear_len);
290*7c478bd9Sstevel@tonic-gate #endif
291*7c478bd9Sstevel@tonic-gate 		}
292*7c478bd9Sstevel@tonic-gate 
293*7c478bd9Sstevel@tonic-gate 		++mp->stat.st_page_create;
294*7c478bd9Sstevel@tonic-gate 		++mfp->stat.st_page_create;
295*7c478bd9Sstevel@tonic-gate 	} else {
296*7c478bd9Sstevel@tonic-gate 		/*
297*7c478bd9Sstevel@tonic-gate 		 * It's possible for the read function to fail, which means
298*7c478bd9Sstevel@tonic-gate 		 * that we fail as well.  Note, the __memp_pgread() function
299*7c478bd9Sstevel@tonic-gate 		 * discards the region lock, so the buffer must be pinned
300*7c478bd9Sstevel@tonic-gate 		 * down so that it cannot move and its contents are unchanged.
301*7c478bd9Sstevel@tonic-gate 		 */
302*7c478bd9Sstevel@tonic-gate reread:		if ((ret = __memp_pgread(dbmfp,
303*7c478bd9Sstevel@tonic-gate 		    bhp, LF_ISSET(DB_MPOOL_CREATE))) != 0) {
304*7c478bd9Sstevel@tonic-gate 			/*
305*7c478bd9Sstevel@tonic-gate 			 * !!!
306*7c478bd9Sstevel@tonic-gate 			 * Discard the buffer unless another thread is waiting
307*7c478bd9Sstevel@tonic-gate 			 * on our I/O to complete.  Regardless, the header has
308*7c478bd9Sstevel@tonic-gate 			 * the BH_TRASH flag set.
309*7c478bd9Sstevel@tonic-gate 			 */
310*7c478bd9Sstevel@tonic-gate 			if (bhp->ref == 1)
311*7c478bd9Sstevel@tonic-gate 				__memp_bhfree(dbmp, mfp, bhp, 1);
312*7c478bd9Sstevel@tonic-gate 			goto err;
313*7c478bd9Sstevel@tonic-gate 		}
314*7c478bd9Sstevel@tonic-gate 
315*7c478bd9Sstevel@tonic-gate 		++mp->stat.st_cache_miss;
316*7c478bd9Sstevel@tonic-gate 		++mfp->stat.st_cache_miss;
317*7c478bd9Sstevel@tonic-gate 	}
318*7c478bd9Sstevel@tonic-gate 
319*7c478bd9Sstevel@tonic-gate 	/*
320*7c478bd9Sstevel@tonic-gate 	 * If we're returning a page after our current notion of the last-page,
321*7c478bd9Sstevel@tonic-gate 	 * update our information.  Note, there's no way to un-instantiate this
322*7c478bd9Sstevel@tonic-gate 	 * page, it's going to exist whether it's returned to us dirty or not.
323*7c478bd9Sstevel@tonic-gate 	 */
324*7c478bd9Sstevel@tonic-gate 	if (bhp->pgno > mfp->last_pgno)
325*7c478bd9Sstevel@tonic-gate 		mfp->last_pgno = bhp->pgno;
326*7c478bd9Sstevel@tonic-gate 
327*7c478bd9Sstevel@tonic-gate 	++mp->stat.st_page_clean;
328*7c478bd9Sstevel@tonic-gate 	*(void **)addrp = bhp->buf;
329*7c478bd9Sstevel@tonic-gate 
330*7c478bd9Sstevel@tonic-gate done:	/* Update the chain search statistics. */
331*7c478bd9Sstevel@tonic-gate 	if (st_hsearch) {
332*7c478bd9Sstevel@tonic-gate 		++mp->stat.st_hash_searches;
333*7c478bd9Sstevel@tonic-gate 		if (st_hsearch > mp->stat.st_hash_longest)
334*7c478bd9Sstevel@tonic-gate 			mp->stat.st_hash_longest = st_hsearch;
335*7c478bd9Sstevel@tonic-gate 		mp->stat.st_hash_examined += st_hsearch;
336*7c478bd9Sstevel@tonic-gate 	}
337*7c478bd9Sstevel@tonic-gate 
338*7c478bd9Sstevel@tonic-gate 	++dbmfp->pinref;
339*7c478bd9Sstevel@tonic-gate 
340*7c478bd9Sstevel@tonic-gate 	UNLOCKREGION(dbmp);
341*7c478bd9Sstevel@tonic-gate 
342*7c478bd9Sstevel@tonic-gate 	return (0);
343*7c478bd9Sstevel@tonic-gate 
344*7c478bd9Sstevel@tonic-gate err:	/* Discard our reference. */
345*7c478bd9Sstevel@tonic-gate 	if (b_incr)
346*7c478bd9Sstevel@tonic-gate 		--bhp->ref;
347*7c478bd9Sstevel@tonic-gate 	UNLOCKREGION(dbmp);
348*7c478bd9Sstevel@tonic-gate 
349*7c478bd9Sstevel@tonic-gate 	*(void **)addrp = NULL;
350*7c478bd9Sstevel@tonic-gate 	return (ret);
351*7c478bd9Sstevel@tonic-gate }
352