1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright (c) 1994-2000 by Sun Microsystems, Inc.
24 * All rights reserved.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29#include <sys/types.h>
30#include <sys/param.h>
31#include <sys/systm.h>		/* for bzero */
32#include <sys/machlock.h>
33#include <sys/spl.h>
34#include <sys/promif.h>
35#include <sys/debug.h>
36
37#include "tnf_buf.h"
38
39/*
40 * Defines
41 */
42
43#define	TNFW_B_ALLOC_LO		0x1
44#define	TNFW_B_MAXALLOCTRY 	32
45
46#define	TNF_MAXALLOC		(TNF_BLOCK_SIZE - sizeof (tnf_block_header_t))
47
48/*
49 * Globals
50 */
51
52TNFW_B_STATE tnfw_b_state = TNFW_B_NOBUFFER | TNFW_B_STOPPED;
53
54/*
55 * Locals
56 */
57
58static int	spinlock_spl;
59
60/*
61 * Declarations
62 */
63
64static tnf_block_header_t *tnfw_b_alloc_block(tnf_buf_file_header_t *,
65    enum tnf_alloc_mode);
66
67/*
68 * (Private) Allocate a new block.  Return NULL on failure and mark
69 * tracing as broken.  'istag' is non-zero if the block is to be
70 * non-reclaimable.  All blocks are returned A-locked.
71 */
72
73static tnf_block_header_t *
74tnfw_b_alloc_block(tnf_buf_file_header_t *fh, enum tnf_alloc_mode istag)
75{
76	tnf_block_header_t 	*block;
77	ulong_t			bcount;
78	ulong_t			tmp_bn, bn, new_bn;
79	ulong_t			tmp_gen, gen, new_gen;
80	ulong_t			next;
81	int			i;
82	lock_t			*lp;
83	ushort_t		spl;
84
85	if (tnfw_b_state != TNFW_B_RUNNING)
86		return (NULL);
87
88	lp = &fh->lock;
89
90	/*
91	 * Check reserved area first for tag block allocations
92	 * Tag allocations are rare, so we move the code out of line
93	 */
94	if (istag)
95		goto try_reserved;
96
97try_loop:
98	/*
99	 * Search for a block, using hint as starting point.
100	 */
101
102	bcount = fh->com.block_count;	/* total block count */
103
104	gen = fh->next_alloc.gen;
105	bn = fh->next_alloc.block[gen & TNFW_B_ALLOC_LO];
106
107	for (i = 0; i < TNFW_B_MAXALLOCTRY; i++) {
108
109		/*
110		 * Calculate next (not this) block to look for.
111		 * Needed for updating the hint.
112		 */
113		if ((new_bn = bn + 1) >= bcount) {
114			new_bn = TNFW_B_DATA_BLOCK_BEGIN >> TNF_BLOCK_SHIFT;
115			new_gen = gen + 1;
116		} else
117			new_gen = gen;
118
119		/*
120		 * Try to reserve candidate block
121		 */
122		/* LINTED pointer cast may result in improper alignment */
123		block = (tnf_block_header_t *)
124			((char *)fh + (bn << TNF_BLOCK_SHIFT));
125
126		if (lock_try(&block->A_lock))
127			if (block->generation < gen &&
128			    lock_try(&block->B_lock))
129				goto update_hint;
130			else
131				lock_clear(&block->A_lock);
132
133		/* Reload hint values */
134		gen = fh->next_alloc.gen;
135		bn = fh->next_alloc.block[gen & TNFW_B_ALLOC_LO];
136
137		/* adjust if we know a little better than the hint */
138		if ((new_bn > bn && new_gen == gen) || new_gen > gen) {
139			gen = new_gen;
140			bn = new_bn;
141		}
142	}
143
144	goto loop_fail;
145
146update_hint:
147	/*
148	 * Re-read the hint and update it only if we'll be increasing it.
149	 */
150	lock_set_spl(lp, spinlock_spl, &spl);
151	tmp_gen = fh->next_alloc.gen;
152	tmp_bn = fh->next_alloc.block[tmp_gen & TNFW_B_ALLOC_LO];
153
154	if ((new_gen == tmp_gen && new_bn > tmp_bn) || new_gen > tmp_gen) {
155		/*
156		 * Order is important here!  It is the write to
157		 * next_alloc.gen that atomically records the new
158		 * value.
159		 */
160		fh->next_alloc.block[new_gen & TNFW_B_ALLOC_LO] = new_bn;
161		fh->next_alloc.gen = new_gen;
162	}
163	lock_clear_splx(lp, spl);
164
165got_block:
166	/*
167	 * Initialize and return the block
168	 */
169	/* ASSERT(block->tag == TNF_BLOCK_HEADER_TAG); */
170	block->bytes_valid = sizeof (tnf_block_header_t);
171	block->next_block = NULL;
172	/* LINTED assignment of 64-bit integer to 32-bit integer */
173	block->generation = istag ? TNF_TAG_GENERATION_NUM : gen;
174	/* ASSERT(LOCK_HELD(&block->A_lock); */
175	lock_clear(&block->B_lock);
176	return (block);
177
178try_reserved:
179	/*
180	 * Look for a free tag block in reserved area
181	 */
182	next = fh->next_tag_alloc;
183	while (next < (TNFW_B_DATA_BLOCK_BEGIN >> TNF_BLOCK_SHIFT)) {
184		/* LINTED pointer cast may result in improper alignment */
185		block = (tnf_block_header_t *)
186			((char *)fh + (next << TNF_BLOCK_SHIFT));
187		next++;
188		/*
189		 * See if block is unclaimed.
190		 * Don't bother clearing the A-lock if the
191		 * block was claimed and released, since it
192		 * will never be reallocated anyway.
193		 */
194		if (lock_try(&block->A_lock) &&
195		    block->generation == 0) {
196			lock_set_spl(lp, spinlock_spl, &spl);
197			if (next > fh->next_tag_alloc)
198				fh->next_tag_alloc = next;
199			lock_clear_splx(lp, spl);
200			goto got_block;
201		}
202	}
203	goto try_loop;
204
205loop_fail:
206	/*
207	 * Only get here if we failed the for loop
208	 */
209	ASSERT(i == TNFW_B_MAXALLOCTRY);
210	tnfw_b_state = TNFW_B_BROKEN;
211#ifdef DEBUG
212	prom_printf("kernel probes: alloc_block failed\n");
213#endif
214	return (NULL);
215
216}
217
218/*
219 * Allocate size bytes from the trace buffer.  Return NULL on failure,
220 * and mark tracing as broken.  We're guaranteed that the buffer will
221 * not be deallocated while we're in this routine.
222 * Allocation requests must be word-sized and are word-aligned.
223 */
224
225void *
226tnfw_b_alloc(TNFW_B_WCB *wcb, size_t size, enum tnf_alloc_mode istag)
227{
228	TNFW_B_POS 		*pos;
229	ushort_t		offset;
230	void 			*destp;
231	tnf_block_header_t	*block, *new_block;
232
233	pos = &wcb->tnfw_w_pos;	/* common case */
234	if (istag)
235		pos = &wcb->tnfw_w_tag_pos;
236	block = pos->tnfw_w_block;
237	offset = pos->tnfw_w_write_off;
238	/* Round size up to a multiple of 8. */
239	size = (size + 7) & ~7;
240
241	if (block == NULL || offset + size > TNF_BLOCK_SIZE) {
242
243		/* Get a new block */
244		/* LINTED pointer cast may result in improper alignment */
245		new_block = tnfw_b_alloc_block(TNF_FILE_HEADER(), istag);
246		if (new_block == NULL)
247			/* tracing has been marked as broken at this point */
248			return (NULL);
249
250		/* ASSERT(size <= TNF_MAXALLOC); */
251
252		/*
253		 * If the old block is clean (i.e., we're in a new
254		 * transaction), just release it.  Else, pad it out
255		 * and attach it to the list of uncommitted blocks.
256		 */
257		if (block != NULL) {
258			if (block->bytes_valid == offset &&
259			    !pos->tnfw_w_dirty) {
260				/* block is clean: release it */
261				lock_clear(&block->A_lock);
262			} else {
263				/* block is dirty */
264				ulong_t *p, *q;
265
266				/* LINTED pointer cast */
267				p = (ulong_t *)((char *)block + offset);
268				/* LINTED pointer cast */
269				q = (ulong_t *)((char *)block + TNF_BLOCK_SIZE);
270				while (p < q)
271					*p++ = TNF_NULL;
272
273				/* append block to release list */
274				new_block->next_block = block;
275
276				/* we have at least one dirty block */
277				pos->tnfw_w_dirty = 1;
278			}
279		}
280
281		/* make new_block the current block */
282		pos->tnfw_w_block = block = new_block;
283		/* write_off is updated below */
284		offset = sizeof (tnf_block_header_t);
285		/* ASSERT(new_block->bytes_valid == offset); */
286	}
287
288	destp = (char *)block + offset;
289	/* update write_off */
290	pos->tnfw_w_write_off = offset + size;
291	/*
292	 * Unconditionally write a 0 into the last word allocated,
293	 * in case we left an alignment gap.  (Assume that doing an
294	 * unconditional write is cheaper than testing and branching
295	 * around the write half the time.)
296	 */
297	/* LINTED pointer cast may result in improper alignment */
298	*((int *)((char *)destp + size - sizeof (int))) = 0;
299	return (destp);
300}
301
302/*
303 * Allocate a directory entry.
304 */
305
306/*ARGSUSED0*/
307void *
308tnfw_b_fw_alloc(TNFW_B_WCB *wcb)
309{
310	tnf_buf_file_header_t	*fh;
311	lock_t			*lp;
312	ushort_t		spl;
313	caddr_t			cell;
314	ulong_t			next;
315
316	/* LINTED pointer cast may result in improper alignment */
317	fh = TNF_FILE_HEADER();
318	lp = &fh->lock;
319
320	lock_set_spl(lp, spinlock_spl, &spl);
321	next = fh->next_fw_alloc;
322	if (next < TNFW_B_FW_ZONE) {
323		cell = (caddr_t)fh + next;
324		fh->next_fw_alloc = next + sizeof (tnf_ref32_t);
325	} else
326		cell = NULL;
327	lock_clear_splx(lp, spl);
328
329	return (cell);
330}
331
332/*
333 * Initialize a buffer.
334 */
335
336void
337tnfw_b_init_buffer(caddr_t buf, size_t size)
338{
339	int 	gen_shift;
340	int 	i;
341	ulong_t	b;
342	ulong_t	blocks;
343	tnf_block_header_t *block;
344	tnf_buf_file_header_t *fh;
345
346	/* Compute platform-specific spinlock_spl */
347	spinlock_spl = __ipltospl(LOCK_LEVEL + 1);
348
349	/* LINTED pointer cast may result in improper alignment */
350	fh = (tnf_buf_file_header_t *)buf;
351
352	/* LINTED logical expression always true: op "||" */
353	ASSERT(TNF_DIRECTORY_SIZE > TNF_BLOCK_SIZE);
354
355	/*
356	 * This assertion is needed because we cannot change
357	 * sys/tnf_com.h this late in the release cycle, but we need the
358	 * interface in sys/machlock.h for locking operations.
359	 */
360	/* LINTED logical expression always true: op "||" */
361	ASSERT(sizeof (tnf_byte_lock_t) == sizeof (lock_t));
362
363	/* Calculate number of blocks */
364	blocks = size >> TNF_BLOCK_SHIFT;
365
366	/* Calculate generation shift */
367	gen_shift = 0;
368	b = 1;
369	while (b < blocks) {
370		b <<= 1;
371		++gen_shift;
372	}
373	ASSERT(gen_shift < 32);
374
375	/* fill in file header */
376	/* magic number comes last */
377	/* LINTED constant truncated by assignment */
378	fh->com.tag = TNF_FILE_HEADER_TAG;
379	fh->com.file_version = TNF_FILE_VERSION;
380	fh->com.file_header_size = sizeof (tnf_file_header_t);
381	fh->com.file_log_size = gen_shift + TNF_BLOCK_SHIFT;
382	fh->com.block_header_size = sizeof (tnf_block_header_t);
383	fh->com.block_size = TNF_BLOCK_SIZE;
384	fh->com.directory_size = TNF_DIRECTORY_SIZE;
385	/* LINTED assignment of 64-bit integer to 32-bit integer */
386	fh->com.block_count = blocks;
387	/* com.blocks_valid is unused */
388	fh->next_alloc.gen = 1;
389	fh->next_alloc.block[0] = 0;
390	fh->next_alloc.block[1] = TNFW_B_DATA_BLOCK_BEGIN >> TNF_BLOCK_SHIFT;
391	fh->next_tag_alloc = TNF_DIRECTORY_SIZE >> TNF_BLOCK_SHIFT;
392	fh->next_fw_alloc = TNF_DIRENT_LAST + 4;
393	LOCK_INIT_CLEAR(&fh->lock);
394
395	(void) bzero(buf + sizeof (*fh), TNF_DIRECTORY_SIZE - sizeof (*fh));
396	i = TNF_DIRECTORY_SIZE >> TNF_BLOCK_SHIFT;
397	for (; i < blocks; ++i) {
398		/* LINTED pointer cast may result in improper alignment */
399		block =	(tnf_block_header_t *)(buf + (i << TNF_BLOCK_SHIFT));
400		block->tag = (tnf_ref32_t)TNF_BLOCK_HEADER_TAG;
401		block->generation = 0;
402		block->bytes_valid = sizeof (tnf_block_header_t);
403		LOCK_INIT_CLEAR(&block->A_lock);
404		LOCK_INIT_CLEAR(&block->B_lock);
405	}
406
407	/* snap in magic number */
408	fh->magic = TNF_MAGIC;
409}
410