1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25/*
26 * PX Interrupt Block implementation
27 */
28
29#include <sys/types.h>
30#include <sys/kmem.h>
31#include <sys/async.h>
32#include <sys/systm.h>		/* panicstr */
33#include <sys/spl.h>
34#include <sys/sunddi.h>
35#include <sys/machsystm.h>	/* intr_dist_add */
36#include <sys/ddi_impldefs.h>
37#include <sys/cpuvar.h>
38#include <sys/time.h>
39#include "px_obj.h"
40
41/*LINTLIBRARY*/
42
43static void px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight);
44static void px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p,
45    uint32_t cpu_id);
46static uint_t px_ib_intr_reset(void *arg);
47static void px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name,
48    char *path_name, int instance);
49
50extern uint64_t xc_tick_jump_limit;
51
52int
53px_ib_attach(px_t *px_p)
54{
55	dev_info_t	*dip = px_p->px_dip;
56	px_ib_t		*ib_p;
57	sysino_t	sysino;
58	px_fault_t	*fault_p = &px_p->px_fault;
59
60	DBG(DBG_IB, dip, "px_ib_attach\n");
61
62	if (px_lib_intr_devino_to_sysino(px_p->px_dip,
63	    px_p->px_inos[PX_INTR_PEC], &sysino) != DDI_SUCCESS)
64		return (DDI_FAILURE);
65
66	/*
67	 * Allocate interrupt block state structure and link it to
68	 * the px state structure.
69	 */
70	ib_p = kmem_zalloc(sizeof (px_ib_t), KM_SLEEP);
71	px_p->px_ib_p = ib_p;
72	ib_p->ib_px_p = px_p;
73	ib_p->ib_ino_lst = (px_ino_t *)NULL;
74
75	mutex_init(&ib_p->ib_intr_lock, NULL, MUTEX_DRIVER, NULL);
76	mutex_init(&ib_p->ib_ino_lst_mutex, NULL, MUTEX_DRIVER, NULL);
77
78	bus_func_register(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p);
79
80	intr_dist_add_weighted(px_ib_intr_redist, ib_p);
81
82	/*
83	 * Initialize PEC fault data structure
84	 */
85	fault_p->px_fh_dip = dip;
86	fault_p->px_fh_sysino = sysino;
87	fault_p->px_err_func = px_err_dmc_pec_intr;
88	fault_p->px_intr_ino = px_p->px_inos[PX_INTR_PEC];
89
90	return (DDI_SUCCESS);
91}
92
93void
94px_ib_detach(px_t *px_p)
95{
96	px_ib_t		*ib_p = px_p->px_ib_p;
97	dev_info_t	*dip = px_p->px_dip;
98
99	DBG(DBG_IB, dip, "px_ib_detach\n");
100
101	bus_func_unregister(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p);
102	intr_dist_rem_weighted(px_ib_intr_redist, ib_p);
103
104	mutex_destroy(&ib_p->ib_ino_lst_mutex);
105	mutex_destroy(&ib_p->ib_intr_lock);
106
107	px_ib_free_ino_all(ib_p);
108
109	px_p->px_ib_p = NULL;
110	kmem_free(ib_p, sizeof (px_ib_t));
111}
112
113void
114px_ib_intr_enable(px_t *px_p, cpuid_t cpu_id, devino_t ino)
115{
116	px_ib_t		*ib_p = px_p->px_ib_p;
117	sysino_t	sysino;
118
119	/*
120	 * Determine the cpu for the interrupt
121	 */
122	mutex_enter(&ib_p->ib_intr_lock);
123
124	DBG(DBG_IB, px_p->px_dip,
125	    "px_ib_intr_enable: ino=%x cpu_id=%x\n", ino, cpu_id);
126
127	if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino,
128	    &sysino) != DDI_SUCCESS) {
129		DBG(DBG_IB, px_p->px_dip,
130		    "px_ib_intr_enable: px_intr_devino_to_sysino() failed\n");
131
132		mutex_exit(&ib_p->ib_intr_lock);
133		return;
134	}
135
136	PX_INTR_ENABLE(px_p->px_dip, sysino, cpu_id);
137	px_lib_intr_setstate(px_p->px_dip, sysino, INTR_IDLE_STATE);
138
139	mutex_exit(&ib_p->ib_intr_lock);
140}
141
142/*ARGSUSED*/
143void
144px_ib_intr_disable(px_ib_t *ib_p, devino_t ino, int wait)
145{
146	sysino_t	sysino;
147
148	mutex_enter(&ib_p->ib_intr_lock);
149
150	DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_disable: ino=%x\n", ino);
151
152	/* Disable the interrupt */
153	if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, ino,
154	    &sysino) != DDI_SUCCESS) {
155		DBG(DBG_IB, ib_p->ib_px_p->px_dip,
156		    "px_ib_intr_disable: px_intr_devino_to_sysino() failed\n");
157
158		mutex_exit(&ib_p->ib_intr_lock);
159		return;
160	}
161
162	PX_INTR_DISABLE(ib_p->ib_px_p->px_dip, sysino);
163
164	mutex_exit(&ib_p->ib_intr_lock);
165}
166
167int
168px_ib_intr_pend(dev_info_t *dip, sysino_t sysino)
169{
170	int		ret = DDI_SUCCESS;
171	hrtime_t	start_time, prev, curr, interval, jump;
172	hrtime_t	intr_timeout;
173	intr_state_t	intr_state;
174
175	/* Disable the interrupt */
176	PX_INTR_DISABLE(dip, sysino);
177
178	intr_timeout = px_intrpend_timeout;
179	jump = TICK_TO_NSEC(xc_tick_jump_limit);
180
181	/* Busy wait on pending interrupt */
182	for (curr = start_time = gethrtime(); !panicstr &&
183	    ((ret = px_lib_intr_getstate(dip, sysino,
184	    &intr_state)) == DDI_SUCCESS) &&
185	    (intr_state == INTR_DELIVERED_STATE); /* */) {
186		/*
187		 * If we have a really large jump in hrtime, it is most
188		 * probably because we entered the debugger (or OBP,
189		 * in general). So, we adjust the timeout accordingly
190		 * to prevent declaring an interrupt timeout. The
191		 * master-interrupt mechanism in OBP should deliver
192		 * the interrupts properly.
193		 */
194		prev = curr;
195		curr = gethrtime();
196		interval = curr - prev;
197		if (interval > jump)
198			intr_timeout += interval;
199		if (curr - start_time > intr_timeout) {
200			ret = DDI_FAILURE;
201			break;
202		}
203	}
204	return (ret);
205}
206
207void
208px_ib_intr_dist_en(dev_info_t *dip, cpuid_t cpu_id, devino_t ino,
209    boolean_t wait_flag)
210{
211	uint32_t	old_cpu_id;
212	sysino_t	sysino;
213	intr_valid_state_t	enabled = 0;
214
215	DBG(DBG_IB, dip, "px_ib_intr_dist_en: ino=0x%x\n", ino);
216
217	if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) {
218		DBG(DBG_IB, dip, "px_ib_intr_dist_en: "
219		    "px_intr_devino_to_sysino() failed, ino 0x%x\n", ino);
220		return;
221	}
222
223	/* Skip enabling disabled interrupts */
224	if (px_lib_intr_getvalid(dip, sysino, &enabled) != DDI_SUCCESS) {
225		DBG(DBG_IB, dip, "px_ib_intr_dist_en: px_intr_getvalid() "
226		    "failed, sysino 0x%x\n", sysino);
227		return;
228	}
229	if (!enabled)
230		return;
231
232	/* Done if redistributed onto the same cpuid */
233	if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) {
234		DBG(DBG_IB, dip, "px_ib_intr_dist_en: "
235		    "px_intr_gettarget() failed\n");
236		return;
237	}
238	if (cpu_id == old_cpu_id)
239		return;
240
241	/* Wait on pending interrupts */
242	if (wait_flag != 0 && px_ib_intr_pend(dip, sysino) != DDI_SUCCESS) {
243		cmn_err(CE_WARN,
244		    "%s%d: px_ib_intr_dist_en: sysino 0x%lx(ino 0x%x) "
245		    "from cpu id 0x%x to 0x%x timeout",
246		    ddi_driver_name(dip), ddi_get_instance(dip),
247		    sysino, ino, old_cpu_id, cpu_id);
248
249		DBG(DBG_IB, dip, "px_ib_intr_dist_en: failed, "
250		    "ino 0x%x sysino 0x%x\n", ino, sysino);
251	}
252
253	PX_INTR_ENABLE(dip, sysino, cpu_id);
254}
255
256static void
257px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, uint32_t cpu_id)
258{
259	extern kmutex_t pxintr_ks_template_lock;
260	hrtime_t ticks;
261
262	/*
263	 * Because we are updating two fields in ih_t we must lock
264	 * pxintr_ks_template_lock to prevent someone from reading the
265	 * kstats after we set ih_ticks to 0 and before we increment
266	 * ih_nsec to compensate.
267	 *
268	 * We must also protect against the interrupt arriving and incrementing
269	 * ih_ticks between the time we read it and when we reset it to 0.
270	 * To do this we use atomic_swap.
271	 */
272
273	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
274
275	mutex_enter(&pxintr_ks_template_lock);
276	ticks = atomic_swap_64(&ih_p->ih_ticks, 0);
277	ih_p->ih_nsec += (uint64_t)tick2ns(ticks, cpu_id);
278	mutex_exit(&pxintr_ks_template_lock);
279}
280
281
282/*
283 * Redistribute interrupts of the specified weight. The first call has a weight
284 * of weight_max, which can be used to trigger initialization for
285 * redistribution. The inos with weight [weight_max, inf.) should be processed
286 * on the "weight == weight_max" call.  This first call is followed by calls
287 * of decreasing weights, inos of that weight should be processed.  The final
288 * call specifies a weight of zero, this can be used to trigger processing of
289 * stragglers.
290 */
291static void
292px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight)
293{
294	px_ib_t		*ib_p = (px_ib_t *)arg;
295	px_t		*px_p = ib_p->ib_px_p;
296	dev_info_t	*dip = px_p->px_dip;
297	px_ino_t	*ino_p;
298	px_ino_pil_t	*ipil_p;
299	px_ih_t		*ih_lst;
300	int32_t		dweight = 0;
301	int		i;
302
303	/* Redistribute internal interrupts */
304	if (weight == 0) {
305		mutex_enter(&ib_p->ib_intr_lock);
306		px_ib_intr_dist_en(dip, intr_dist_cpuid(),
307		    px_p->px_inos[PX_INTR_PEC], B_FALSE);
308		mutex_exit(&ib_p->ib_intr_lock);
309
310		px_hp_intr_redist(px_p);
311	}
312
313	/* Redistribute device interrupts */
314	mutex_enter(&ib_p->ib_ino_lst_mutex);
315	px_msiq_redist(px_p);
316
317	for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next_p) {
318		/*
319		 * Recomputes the sum of interrupt weights of devices that
320		 * share the same ino upon first call marked by
321		 * (weight == weight_max).
322		 */
323		if (weight == weight_max) {
324			ino_p->ino_intr_weight = 0;
325
326			for (ipil_p = ino_p->ino_ipil_p; ipil_p;
327			    ipil_p = ipil_p->ipil_next_p) {
328				for (i = 0, ih_lst = ipil_p->ipil_ih_head;
329				    i < ipil_p->ipil_ih_size; i++,
330				    ih_lst = ih_lst->ih_next) {
331					dweight = i_ddi_get_intr_weight(
332					    ih_lst->ih_dip);
333					if (dweight > 0)
334						ino_p->ino_intr_weight +=
335						    dweight;
336				}
337			}
338		}
339
340		/*
341		 * As part of redistributing weighted interrupts over cpus,
342		 * nexus redistributes device interrupts and updates
343		 * cpu weight. The purpose is for the most light weighted
344		 * cpu to take the next interrupt and gain weight, therefore
345		 * attention demanding device gains more cpu attention by
346		 * making itself heavy.
347		 */
348		if ((weight == ino_p->ino_intr_weight) ||
349		    ((weight >= weight_max) &&
350		    (ino_p->ino_intr_weight >= weight_max))) {
351			uint32_t orig_cpuid = ino_p->ino_cpuid;
352
353			if (cpu[orig_cpuid] == NULL)
354				orig_cpuid = CPU->cpu_id;
355
356			DBG(DBG_IB, dip, "px_ib_intr_redist: sysino 0x%llx "
357			    "current cpuid 0x%x current default cpuid 0x%x\n",
358			    ino_p->ino_sysino, ino_p->ino_cpuid,
359			    ino_p->ino_default_cpuid);
360
361			/* select target cpuid and mark ino established */
362			if (ino_p->ino_default_cpuid == -1)
363				ino_p->ino_cpuid = ino_p->ino_default_cpuid =
364				    intr_dist_cpuid();
365			else if ((ino_p->ino_cpuid !=
366			    ino_p->ino_default_cpuid) &&
367			    cpu[ino_p->ino_default_cpuid] &&
368			    cpu_intr_on(cpu[ino_p->ino_default_cpuid]))
369				ino_p->ino_cpuid = ino_p->ino_default_cpuid;
370			else if (!cpu_intr_on(cpu[ino_p->ino_cpuid]))
371				ino_p->ino_cpuid = intr_dist_cpuid();
372
373			DBG(DBG_IB, dip, "px_ib_intr_redist: sysino 0x%llx "
374			    "new cpuid 0x%x new default cpuid 0x%x\n",
375			    ino_p->ino_sysino, ino_p->ino_cpuid,
376			    ino_p->ino_default_cpuid);
377
378			/* Add device weight to targeted cpu. */
379			for (ipil_p = ino_p->ino_ipil_p; ipil_p;
380			    ipil_p = ipil_p->ipil_next_p) {
381				for (i = 0, ih_lst = ipil_p->ipil_ih_head;
382				    i < ipil_p->ipil_ih_size; i++,
383				    ih_lst = ih_lst->ih_next) {
384
385					dweight = i_ddi_get_intr_weight(
386					    ih_lst->ih_dip);
387					intr_dist_cpuid_add_device_weight(
388					    ino_p->ino_cpuid, ih_lst->ih_dip,
389					    dweight);
390
391					/*
392					 * Different cpus may have different
393					 * clock speeds. to account for this,
394					 * whenever an interrupt is moved to a
395					 * new CPU, we convert the accumulated
396					 * ticks into nsec, based upon the clock
397					 * rate of the prior CPU.
398					 *
399					 * It is possible that the prior CPU no
400					 * longer exists. In this case, fall
401					 * back to using this CPU's clock rate.
402					 *
403					 * Note that the value in ih_ticks has
404					 * already been corrected for any power
405					 * savings mode which might have been
406					 * in effect.
407					 */
408					px_ib_cpu_ticks_to_ih_nsec(ib_p, ih_lst,
409					    orig_cpuid);
410				}
411			}
412
413			/* enable interrupt on new targeted cpu */
414			px_ib_intr_dist_en(dip, ino_p->ino_cpuid,
415			    ino_p->ino_ino, B_TRUE);
416		}
417	}
418	mutex_exit(&ib_p->ib_ino_lst_mutex);
419}
420
421/*
422 * Reset interrupts to IDLE.  This function is called during
423 * panic handling after redistributing interrupts; it's needed to
424 * support dumping to network devices after 'sync' from OBP.
425 *
426 * N.B.  This routine runs in a context where all other threads
427 * are permanently suspended.
428 */
429static uint_t
430px_ib_intr_reset(void *arg)
431{
432	px_ib_t		*ib_p = (px_ib_t *)arg;
433
434	DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_reset\n");
435
436	if (px_lib_intr_reset(ib_p->ib_px_p->px_dip) != DDI_SUCCESS)
437		return (BF_FATAL);
438
439	return (BF_NONE);
440}
441
442/*
443 * Locate px_ino_t structure on ib_p->ib_ino_lst according to ino#
444 * returns NULL if not found.
445 */
446px_ino_t *
447px_ib_locate_ino(px_ib_t *ib_p, devino_t ino_num)
448{
449	px_ino_t	*ino_p = ib_p->ib_ino_lst;
450
451	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
452
453	for (; ino_p && ino_p->ino_ino != ino_num; ino_p = ino_p->ino_next_p)
454		;
455
456	return (ino_p);
457}
458
459px_ino_t *
460px_ib_alloc_ino(px_ib_t *ib_p, devino_t ino_num)
461{
462	sysino_t	sysino;
463	px_ino_t	*ino_p;
464
465	if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip,
466	    ino_num, &sysino) != DDI_SUCCESS)
467		return (NULL);
468
469	ino_p = kmem_zalloc(sizeof (px_ino_t), KM_SLEEP);
470
471	ino_p->ino_next_p = ib_p->ib_ino_lst;
472	ib_p->ib_ino_lst = ino_p;
473
474	ino_p->ino_ino = ino_num;
475	ino_p->ino_sysino = sysino;
476	ino_p->ino_ib_p = ib_p;
477	ino_p->ino_unclaimed_intrs = 0;
478	ino_p->ino_lopil = 0;
479	ino_p->ino_cpuid = ino_p->ino_default_cpuid = (cpuid_t)-1;
480
481	return (ino_p);
482}
483
484px_ino_pil_t *
485px_ib_new_ino_pil(px_ib_t *ib_p, devino_t ino_num, uint_t pil, px_ih_t *ih_p)
486{
487	px_ino_pil_t	*ipil_p = kmem_zalloc(sizeof (px_ino_pil_t), KM_SLEEP);
488	px_ino_t	*ino_p;
489
490	if ((ino_p = px_ib_locate_ino(ib_p, ino_num)) == NULL)
491		ino_p = px_ib_alloc_ino(ib_p, ino_num);
492
493	ASSERT(ino_p != NULL);
494
495	ih_p->ih_next = ih_p;
496	ipil_p->ipil_pil = pil;
497	ipil_p->ipil_ih_head = ih_p;
498	ipil_p->ipil_ih_tail = ih_p;
499	ipil_p->ipil_ih_start = ih_p;
500	ipil_p->ipil_ih_size = 1;
501	ipil_p->ipil_ino_p = ino_p;
502
503	ipil_p->ipil_next_p = ino_p->ino_ipil_p;
504	ino_p->ino_ipil_p = ipil_p;
505	ino_p->ino_ipil_size++;
506
507	if ((ino_p->ino_lopil == 0) || (ino_p->ino_lopil > pil))
508		ino_p->ino_lopil = pil;
509
510	return (ipil_p);
511}
512
513void
514px_ib_delete_ino_pil(px_ib_t *ib_p, px_ino_pil_t *ipil_p)
515{
516	px_ino_t	*ino_p = ipil_p->ipil_ino_p;
517	ushort_t	pil = ipil_p->ipil_pil;
518	px_ino_pil_t	*prev, *next;
519
520	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
521
522	if (ino_p->ino_ipil_p == ipil_p)
523		ino_p->ino_ipil_p = ipil_p->ipil_next_p;
524	else {
525		for (prev = next = ino_p->ino_ipil_p; next != ipil_p;
526		    prev = next, next = next->ipil_next_p)
527			;
528
529		if (prev)
530			prev->ipil_next_p = ipil_p->ipil_next_p;
531	}
532
533	kmem_free(ipil_p, sizeof (px_ino_pil_t));
534
535	if ((--ino_p->ino_ipil_size) && (ino_p->ino_lopil == pil)) {
536		for (next = ino_p->ino_ipil_p, pil = next->ipil_pil;
537		    next; next = next->ipil_next_p) {
538
539			if (pil > next->ipil_pil)
540				pil = next->ipil_pil;
541		}
542
543		/*
544		 * Value stored in pil should be the lowest pil.
545		 */
546		ino_p->ino_lopil = pil;
547	}
548
549	if (ino_p->ino_ipil_size)
550		return;
551
552	ino_p->ino_lopil = 0;
553
554	if (ino_p->ino_msiq_p)
555		return;
556
557	if (ib_p->ib_ino_lst == ino_p)
558		ib_p->ib_ino_lst = ino_p->ino_next_p;
559	else {
560		px_ino_t	*list = ib_p->ib_ino_lst;
561
562		for (; list->ino_next_p != ino_p; list = list->ino_next_p)
563			;
564		list->ino_next_p = ino_p->ino_next_p;
565	}
566}
567
568/*
569 * Free all ino when we are detaching.
570 */
571void
572px_ib_free_ino_all(px_ib_t *ib_p)
573{
574	px_ino_t	*ino_p = ib_p->ib_ino_lst;
575	px_ino_t	*next = NULL;
576
577	while (ino_p) {
578		next = ino_p->ino_next_p;
579		kmem_free(ino_p, sizeof (px_ino_t));
580		ino_p = next;
581	}
582}
583
584/*
585 * Locate px_ino_pil_t structure on ino_p->ino_ipil_p according to ino#
586 * returns NULL if not found.
587 */
588px_ino_pil_t *
589px_ib_ino_locate_ipil(px_ino_t *ino_p, uint_t pil)
590{
591	px_ino_pil_t	*ipil_p = ino_p->ino_ipil_p;
592
593	for (; ipil_p && ipil_p->ipil_pil != pil; ipil_p = ipil_p->ipil_next_p)
594		;
595
596	return (ipil_p);
597}
598
599int
600px_ib_ino_add_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p)
601{
602	px_ino_t	*ino_p = ipil_p->ipil_ino_p;
603	px_ib_t		*ib_p = ino_p->ino_ib_p;
604	devino_t	ino = ino_p->ino_ino;
605	sysino_t	sysino = ino_p->ino_sysino;
606	dev_info_t	*dip = px_p->px_dip;
607	cpuid_t		curr_cpu;
608	int		ret = DDI_SUCCESS;
609
610	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
611	ASSERT(ib_p == px_p->px_ib_p);
612
613	DBG(DBG_IB, dip, "px_ib_ino_add_intr ino=%x\n", ino_p->ino_ino);
614
615	/* Disable the interrupt */
616	if ((ret = px_lib_intr_gettarget(dip, sysino,
617	    &curr_cpu)) != DDI_SUCCESS) {
618		DBG(DBG_IB, dip,
619		    "px_ib_ino_add_intr px_intr_gettarget() failed\n");
620
621		return (ret);
622	}
623
624	/* Wait on pending interrupt */
625	if ((ret = px_ib_intr_pend(dip, sysino)) != DDI_SUCCESS) {
626		cmn_err(CE_WARN, "%s%d: px_ib_ino_add_intr: pending "
627		    "sysino 0x%lx(ino 0x%x) timeout",
628		    ddi_driver_name(dip), ddi_get_instance(dip),
629		    sysino, ino);
630	}
631
632	/*
633	 * If the interrupt was previously blocked (left in pending state)
634	 * because of jabber we need to clear the pending state in case the
635	 * jabber has gone away.
636	 */
637	if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) {
638		cmn_err(CE_WARN,
639		    "%s%d: px_ib_ino_add_intr: ino 0x%x has been unblocked",
640		    ddi_driver_name(dip), ddi_get_instance(dip), ino);
641
642		ino_p->ino_unclaimed_intrs = 0;
643		ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE);
644	}
645
646	if (ret != DDI_SUCCESS) {
647		DBG(DBG_IB, dip, "px_ib_ino_add_intr: failed, "
648		    "ino 0x%x sysino 0x%x\n", ino, sysino);
649
650		return (ret);
651	}
652
653	/* Link up px_ih_t */
654	ih_p->ih_next = ipil_p->ipil_ih_head;
655	ipil_p->ipil_ih_tail->ih_next = ih_p;
656	ipil_p->ipil_ih_tail = ih_p;
657
658	ipil_p->ipil_ih_start = ipil_p->ipil_ih_head;
659	ipil_p->ipil_ih_size++;
660
661	/* Re-enable interrupt */
662	PX_INTR_ENABLE(dip, sysino, curr_cpu);
663
664	return (ret);
665}
666
667/*
668 * Removes px_ih_t from the ino's link list.
669 * uses hardware mutex to lock out interrupt threads.
670 * Side effects: interrupt belongs to that ino is turned off on return.
671 * if we are sharing PX slot with other inos, the caller needs
672 * to turn it back on.
673 */
674int
675px_ib_ino_rem_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p)
676{
677	px_ino_t	*ino_p = ipil_p->ipil_ino_p;
678	devino_t	ino = ino_p->ino_ino;
679	sysino_t	sysino = ino_p->ino_sysino;
680	dev_info_t	*dip = px_p->px_dip;
681	px_ih_t		*ih_lst = ipil_p->ipil_ih_head;
682	int		i, ret = DDI_SUCCESS;
683
684	ASSERT(MUTEX_HELD(&ino_p->ino_ib_p->ib_ino_lst_mutex));
685
686	DBG(DBG_IB, px_p->px_dip, "px_ib_ino_rem_intr ino=%x\n",
687	    ino_p->ino_ino);
688
689	/* Wait on pending interrupt */
690	if ((ret = px_ib_intr_pend(dip, sysino)) != DDI_SUCCESS) {
691		cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: pending "
692		    "sysino 0x%lx(ino 0x%x) timeout",
693		    ddi_driver_name(dip), ddi_get_instance(dip),
694		    sysino, ino);
695	}
696
697	/*
698	 * If the interrupt was previously blocked (left in pending state)
699	 * because of jabber we need to clear the pending state in case the
700	 * jabber has gone away.
701	 */
702	if (ret == DDI_SUCCESS &&
703	    ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) {
704		cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: "
705		    "ino 0x%x has been unblocked",
706		    ddi_driver_name(dip), ddi_get_instance(dip), ino);
707
708		ino_p->ino_unclaimed_intrs = 0;
709		ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE);
710	}
711
712	if (ret != DDI_SUCCESS) {
713		DBG(DBG_IB, dip, "px_ib_ino_rem_intr: failed, "
714		    "ino 0x%x sysino 0x%x\n", ino, sysino);
715
716		return (ret);
717	}
718
719	if (ipil_p->ipil_ih_size == 1) {
720		if (ih_lst != ih_p)
721			goto not_found;
722
723		/* No need to set head/tail as ino_p will be freed */
724		goto reset;
725	}
726
727	/* Search the link list for ih_p */
728	for (i = 0; (i < ipil_p->ipil_ih_size) &&
729	    (ih_lst->ih_next != ih_p); i++, ih_lst = ih_lst->ih_next)
730		;
731
732	if (ih_lst->ih_next != ih_p)
733		goto not_found;
734
735	/* Remove ih_p from the link list and maintain the head/tail */
736	ih_lst->ih_next = ih_p->ih_next;
737
738	if (ipil_p->ipil_ih_head == ih_p)
739		ipil_p->ipil_ih_head = ih_p->ih_next;
740	if (ipil_p->ipil_ih_tail == ih_p)
741		ipil_p->ipil_ih_tail = ih_lst;
742
743	ipil_p->ipil_ih_start = ipil_p->ipil_ih_head;
744
745reset:
746	if (ih_p->ih_config_handle)
747		pci_config_teardown(&ih_p->ih_config_handle);
748	if (ih_p->ih_ksp != NULL)
749		kstat_delete(ih_p->ih_ksp);
750
751	kmem_free(ih_p, sizeof (px_ih_t));
752	ipil_p->ipil_ih_size--;
753
754	return (ret);
755
756not_found:
757	DBG(DBG_R_INTX, ino_p->ino_ib_p->ib_px_p->px_dip,
758	    "ino_p=%x does not have ih_p=%x\n", ino_p, ih_p);
759
760	return (DDI_FAILURE);
761}
762
763px_ih_t *
764px_ib_intr_locate_ih(px_ino_pil_t *ipil_p, dev_info_t *rdip,
765    uint32_t inum, msiq_rec_type_t rec_type, msgcode_t msg_code)
766{
767	px_ih_t	*ih_p = ipil_p->ipil_ih_head;
768	int	i;
769
770	for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) {
771		if ((ih_p->ih_dip == rdip) && (ih_p->ih_inum == inum) &&
772		    (ih_p->ih_rec_type == rec_type) &&
773		    (ih_p->ih_msg_code == msg_code))
774			return (ih_p);
775	}
776
777	return ((px_ih_t *)NULL);
778}
779
780px_ih_t *
781px_ib_alloc_ih(dev_info_t *rdip, uint32_t inum,
782    uint_t (*int_handler)(caddr_t int_handler_arg1, caddr_t int_handler_arg2),
783    caddr_t int_handler_arg1, caddr_t int_handler_arg2,
784    msiq_rec_type_t rec_type, msgcode_t msg_code)
785{
786	px_ih_t	*ih_p;
787
788	ih_p = kmem_alloc(sizeof (px_ih_t), KM_SLEEP);
789	ih_p->ih_dip = rdip;
790	ih_p->ih_inum = inum;
791	ih_p->ih_intr_state = PX_INTR_STATE_DISABLE;
792	ih_p->ih_intr_flags = PX_INTR_IDLE;
793	ih_p->ih_handler = int_handler;
794	ih_p->ih_handler_arg1 = int_handler_arg1;
795	ih_p->ih_handler_arg2 = int_handler_arg2;
796	ih_p->ih_config_handle = NULL;
797	ih_p->ih_rec_type = rec_type;
798	ih_p->ih_msg_code = msg_code;
799	ih_p->ih_nsec = 0;
800	ih_p->ih_ticks = 0;
801	ih_p->ih_ksp = NULL;
802
803	return (ih_p);
804}
805
806int
807px_ib_update_intr_state(px_t *px_p, dev_info_t *rdip,
808    uint_t inum, devino_t ino, uint_t pil,
809    uint_t new_intr_state, msiq_rec_type_t rec_type,
810    msgcode_t msg_code)
811{
812	px_ib_t		*ib_p = px_p->px_ib_p;
813	px_ino_t	*ino_p;
814	px_ino_pil_t	*ipil_p;
815	px_ih_t		*ih_p;
816	int		ret = DDI_FAILURE;
817
818	DBG(DBG_IB, px_p->px_dip, "px_ib_update_intr_state: %s%d "
819	    "inum %x devino %x pil %x state %x\n", ddi_driver_name(rdip),
820	    ddi_get_instance(rdip), inum, ino, pil, new_intr_state);
821
822	mutex_enter(&ib_p->ib_ino_lst_mutex);
823
824	ino_p = px_ib_locate_ino(ib_p, ino);
825	if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, pil))) {
826		if (ih_p = px_ib_intr_locate_ih(ipil_p, rdip, inum, rec_type,
827		    msg_code)) {
828			ih_p->ih_intr_state = new_intr_state;
829			ret = DDI_SUCCESS;
830		}
831	}
832
833	mutex_exit(&ib_p->ib_ino_lst_mutex);
834	return (ret);
835}
836
837
838/*
839 * Get interrupt CPU for a given ino.
840 * Return info only for inos which are already mapped to devices.
841 */
842/*ARGSUSED*/
843int
844px_ib_get_intr_target(px_t *px_p, devino_t ino, cpuid_t *cpu_id_p)
845{
846	dev_info_t	*dip = px_p->px_dip;
847	sysino_t	sysino;
848	int		ret;
849
850	DBG(DBG_IB, px_p->px_dip, "px_ib_get_intr_target: devino %x\n", ino);
851
852	/* Convert leaf-wide intr to system-wide intr */
853	if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS)
854		return (DDI_FAILURE);
855
856	ret = px_lib_intr_gettarget(dip, sysino, cpu_id_p);
857
858	DBG(DBG_IB, px_p->px_dip, "px_ib_get_intr_target: cpu_id %x\n",
859	    *cpu_id_p);
860
861	return (ret);
862}
863
864
865/*
866 * Associate a new CPU with a given ino.
867 * Operate only on INOs which are already mapped to devices.
868 */
869int
870px_ib_set_intr_target(px_t *px_p, devino_t ino, cpuid_t cpu_id)
871{
872	dev_info_t		*dip = px_p->px_dip;
873	cpuid_t			old_cpu_id;
874	sysino_t		sysino;
875	int			ret = DDI_SUCCESS;
876	extern const int	_ncpu;
877	extern cpu_t		*cpu[];
878
879	DBG(DBG_IB, px_p->px_dip, "px_ib_set_intr_target: devino %x "
880	    "cpu_id %x\n", ino, cpu_id);
881
882	mutex_enter(&cpu_lock);
883
884	/* Convert leaf-wide intr to system-wide intr */
885	if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) {
886		ret = DDI_FAILURE;
887		goto done;
888	}
889
890	if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) {
891		ret = DDI_FAILURE;
892		goto done;
893	}
894
895	/*
896	 * Get lock, validate cpu and write it.
897	 */
898	if ((cpu_id < _ncpu) && (cpu[cpu_id] && cpu_is_online(cpu[cpu_id]))) {
899		DBG(DBG_IB, dip, "px_ib_set_intr_target: Enabling CPU %d\n",
900		    cpu_id);
901		px_ib_intr_dist_en(dip, cpu_id, ino, B_TRUE);
902		px_ib_log_new_cpu(px_p->px_ib_p, old_cpu_id, cpu_id, ino);
903	} else {	/* Invalid cpu */
904		DBG(DBG_IB, dip, "px_ib_set_intr_target: Invalid cpuid %x\n",
905		    cpu_id);
906		ret = DDI_EINVAL;
907	}
908
909done:
910	mutex_exit(&cpu_lock);
911	return (ret);
912}
913
914hrtime_t px_ib_msix_retarget_timeout = 120ll * NANOSEC;	/* 120 seconds */
915
916/*
917 * Associate a new CPU with a given MSI/X.
918 * Operate only on MSI/Xs which are already mapped to devices.
919 */
920int
921px_ib_set_msix_target(px_t *px_p, ddi_intr_handle_impl_t *hdlp,
922    msinum_t msi_num, cpuid_t cpu_id)
923{
924	px_ib_t			*ib_p = px_p->px_ib_p;
925	px_msi_state_t		*msi_state_p = &px_p->px_ib_p->ib_msi_state;
926	dev_info_t		*dip = px_p->px_dip;
927	dev_info_t		*rdip = hdlp->ih_dip;
928	msiqid_t		msiq_id, old_msiq_id;
929	pci_msi_state_t		msi_state;
930	msiq_rec_type_t		msiq_rec_type;
931	msi_type_t		msi_type;
932	px_ino_t		*ino_p;
933	px_ih_t			*ih_p, *old_ih_p;
934	cpuid_t			old_cpu_id;
935	hrtime_t		start_time, end_time;
936	int			ret = DDI_SUCCESS;
937	extern const int	_ncpu;
938	extern cpu_t		*cpu[];
939
940	DBG(DBG_IB, dip, "px_ib_set_msix_target: msi_num %x new cpu_id %x\n",
941	    msi_num, cpu_id);
942
943	mutex_enter(&cpu_lock);
944
945	/* Check for MSI64 support */
946	if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) {
947		msiq_rec_type = MSI64_REC;
948		msi_type = MSI64_TYPE;
949	} else {
950		msiq_rec_type = MSI32_REC;
951		msi_type = MSI32_TYPE;
952	}
953
954	if ((ret = px_lib_msi_getmsiq(dip, msi_num,
955	    &old_msiq_id)) != DDI_SUCCESS) {
956
957		mutex_exit(&cpu_lock);
958		return (ret);
959	}
960
961	DBG(DBG_IB, dip, "px_ib_set_msix_target: current msiq 0x%x\n",
962	    old_msiq_id);
963
964	if ((ret = px_ib_get_intr_target(px_p,
965	    px_msiqid_to_devino(px_p, old_msiq_id),
966	    &old_cpu_id)) != DDI_SUCCESS) {
967
968		mutex_exit(&cpu_lock);
969		return (ret);
970	}
971
972	DBG(DBG_IB, dip, "px_ib_set_msix_target: current cpuid 0x%x\n",
973	    old_cpu_id);
974
975	if (cpu_id == old_cpu_id) {
976
977		mutex_exit(&cpu_lock);
978		return (DDI_SUCCESS);
979	}
980
981	/*
982	 * Get lock, validate cpu and write it.
983	 */
984	if (!((cpu_id < _ncpu) && (cpu[cpu_id] &&
985	    cpu_is_online(cpu[cpu_id])))) {
986		/* Invalid cpu */
987		DBG(DBG_IB, dip, "px_ib_set_msix_target: Invalid cpuid %x\n",
988		    cpu_id);
989
990		mutex_exit(&cpu_lock);
991		return (DDI_EINVAL);
992	}
993
994	DBG(DBG_IB, dip, "px_ib_set_msix_target: Enabling CPU %d\n", cpu_id);
995
996	if ((ret = px_add_msiq_intr(dip, rdip, hdlp,
997	    msiq_rec_type, msi_num, cpu_id, &msiq_id)) != DDI_SUCCESS) {
998		DBG(DBG_IB, dip, "px_ib_set_msix_target: Add MSI handler "
999		    "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num);
1000
1001		mutex_exit(&cpu_lock);
1002		return (ret);
1003	}
1004
1005	if ((ret = px_lib_msi_setmsiq(dip, msi_num,
1006	    msiq_id, msi_type)) != DDI_SUCCESS) {
1007		mutex_exit(&cpu_lock);
1008
1009		(void) px_rem_msiq_intr(dip, rdip,
1010		    hdlp, msiq_rec_type, msi_num, msiq_id);
1011
1012		return (ret);
1013	}
1014
1015	if ((ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum,
1016	    px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri,
1017	    PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num)) != DDI_SUCCESS) {
1018		mutex_exit(&cpu_lock);
1019
1020		(void) px_rem_msiq_intr(dip, rdip,
1021		    hdlp, msiq_rec_type, msi_num, msiq_id);
1022
1023		return (ret);
1024	}
1025
1026	mutex_exit(&cpu_lock);
1027
1028	/*
1029	 * Remove the old handler, but first ensure it is finished.
1030	 *
1031	 * Each handler sets its PENDING flag before it clears the MSI state.
1032	 * Then it clears that flag when finished.  If a re-target occurs while
1033	 * the MSI state is DELIVERED, then it is not yet known which of the
1034	 * two handlers will take the interrupt.  So the re-target operation
1035	 * sets a RETARGET flag on both handlers in that case.  Monitoring both
1036	 * flags on both handlers then determines when the old handler can be
1037	 * be safely removed.
1038	 */
1039	mutex_enter(&ib_p->ib_ino_lst_mutex);
1040
1041	ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, old_msiq_id));
1042	old_ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p,
1043	    hdlp->ih_pri), rdip, hdlp->ih_inum, msiq_rec_type, msi_num);
1044
1045	ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, msiq_id));
1046	ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri),
1047	    rdip, hdlp->ih_inum, msiq_rec_type, msi_num);
1048
1049	if ((ret = px_lib_msi_getstate(dip, msi_num,
1050	    &msi_state)) != DDI_SUCCESS) {
1051		(void) px_rem_msiq_intr(dip, rdip,
1052		    hdlp, msiq_rec_type, msi_num, msiq_id);
1053
1054		mutex_exit(&ib_p->ib_ino_lst_mutex);
1055		return (ret);
1056	}
1057
1058	if (msi_state == PCI_MSI_STATE_DELIVERED) {
1059		ih_p->ih_intr_flags |= PX_INTR_RETARGET;
1060		old_ih_p->ih_intr_flags |= PX_INTR_RETARGET;
1061	}
1062
1063	start_time = gethrtime();
1064	while (((ih_p->ih_intr_flags & PX_INTR_RETARGET) &&
1065	    (old_ih_p->ih_intr_flags & PX_INTR_RETARGET)) ||
1066	    (old_ih_p->ih_intr_flags & PX_INTR_PENDING)) {
1067
1068		/* Wait for one second */
1069		delay(drv_usectohz(1000000));
1070
1071		end_time = gethrtime() - start_time;
1072		if (end_time > px_ib_msix_retarget_timeout) {
1073			cmn_err(CE_WARN, "MSIX retarget %x is not completed, "
1074			    "even after waiting %llx ticks\n",
1075			    msi_num, end_time);
1076			break;
1077		}
1078	}
1079
1080	ih_p->ih_intr_flags &= ~(PX_INTR_RETARGET);
1081
1082	mutex_exit(&ib_p->ib_ino_lst_mutex);
1083
1084	ret = px_rem_msiq_intr(dip, rdip,
1085	    hdlp, msiq_rec_type, msi_num, old_msiq_id);
1086
1087	return (ret);
1088}
1089
1090
1091static void
1092px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name,
1093    char *path_name, int instance)
1094{
1095	(void) strlcpy(dev->driver_name, driver_name, MAXMODCONFNAME);
1096	(void) strlcpy(dev->path, path_name, MAXPATHLEN);
1097	dev->dev_inst = instance;
1098}
1099
1100
1101/*
1102 * Return the dips or number of dips associated with a given interrupt block.
1103 * Size of dips array arg is passed in as dips_ret arg.
1104 * Number of dips returned is returned in dips_ret arg.
1105 * Array of dips gets returned in the dips argument.
1106 * Function returns number of dips existing for the given interrupt block.
1107 *
1108 * Note: this function assumes an enabled/valid INO, which is why it returns
1109 * the px node and (Internal) when it finds no other devices (and *devs_ret > 0)
1110 */
1111uint8_t
1112pxtool_ib_get_ino_devs(px_t *px_p, uint32_t ino, uint32_t msi_num,
1113    uint8_t *devs_ret, pcitool_intr_dev_t *devs)
1114{
1115	px_ib_t		*ib_p = px_p->px_ib_p;
1116	px_ino_t	*ino_p;
1117	px_ino_pil_t	*ipil_p;
1118	px_ih_t 	*ih_p;
1119	uint32_t 	num_devs = 0;
1120	char		pathname[MAXPATHLEN];
1121	int		i, j;
1122
1123	mutex_enter(&ib_p->ib_ino_lst_mutex);
1124	ino_p = px_ib_locate_ino(ib_p, ino);
1125	if (ino_p != NULL) {
1126		for (j = 0, ipil_p = ino_p->ino_ipil_p; ipil_p;
1127		    ipil_p = ipil_p->ipil_next_p) {
1128			num_devs += ipil_p->ipil_ih_size;
1129
1130			for (i = 0, ih_p = ipil_p->ipil_ih_head;
1131			    ((i < ipil_p->ipil_ih_size) && (i < *devs_ret));
1132			    i++, j++, ih_p = ih_p->ih_next) {
1133				(void) ddi_pathname(ih_p->ih_dip, pathname);
1134
1135				if (ih_p->ih_msg_code == msi_num) {
1136					num_devs = *devs_ret = 1;
1137					px_fill_in_intr_devs(&devs[0],
1138					    (char *)ddi_driver_name(
1139					    ih_p->ih_dip), pathname,
1140					    ddi_get_instance(ih_p->ih_dip));
1141					goto done;
1142				}
1143
1144				px_fill_in_intr_devs(&devs[j],
1145				    (char *)ddi_driver_name(ih_p->ih_dip),
1146				    pathname, ddi_get_instance(ih_p->ih_dip));
1147			}
1148		}
1149
1150		*devs_ret = j;
1151	} else if (*devs_ret > 0) {
1152		(void) ddi_pathname(px_p->px_dip, pathname);
1153		strcat(pathname, " (Internal)");
1154		px_fill_in_intr_devs(&devs[0],
1155		    (char *)ddi_driver_name(px_p->px_dip),  pathname,
1156		    ddi_get_instance(px_p->px_dip));
1157		num_devs = *devs_ret = 1;
1158	}
1159
1160done:
1161	mutex_exit(&ib_p->ib_ino_lst_mutex);
1162
1163	return (num_devs);
1164}
1165
1166
1167int
1168pxtool_ib_get_msi_info(px_t *px_p, devino_t ino, msinum_t msi_num,
1169    ddi_intr_handle_impl_t *hdlp)
1170{
1171	px_ib_t		*ib_p = px_p->px_ib_p;
1172	px_ino_t	*ino_p;
1173	px_ino_pil_t	*ipil_p;
1174	px_ih_t 	*ih_p;
1175	int		i;
1176
1177	mutex_enter(&ib_p->ib_ino_lst_mutex);
1178
1179	if ((ino_p = px_ib_locate_ino(ib_p, ino)) == NULL) {
1180		mutex_exit(&ib_p->ib_ino_lst_mutex);
1181		return (DDI_FAILURE);
1182	}
1183
1184	for (ipil_p = ino_p->ino_ipil_p; ipil_p;
1185	    ipil_p = ipil_p->ipil_next_p) {
1186		for (i = 0, ih_p = ipil_p->ipil_ih_head;
1187		    ((i < ipil_p->ipil_ih_size) && ih_p);
1188		    i++, ih_p = ih_p->ih_next) {
1189
1190			if (ih_p->ih_msg_code != msi_num)
1191				continue;
1192
1193			hdlp->ih_dip = ih_p->ih_dip;
1194			hdlp->ih_inum = ih_p->ih_inum;
1195			hdlp->ih_cb_func = ih_p->ih_handler;
1196			hdlp->ih_cb_arg1 = ih_p->ih_handler_arg1;
1197			hdlp->ih_cb_arg2 = ih_p->ih_handler_arg2;
1198			if (ih_p->ih_rec_type == MSI64_REC)
1199				hdlp->ih_cap = DDI_INTR_FLAG_MSI64;
1200			hdlp->ih_pri = ipil_p->ipil_pil;
1201			hdlp->ih_ver = DDI_INTR_VERSION;
1202
1203			mutex_exit(&ib_p->ib_ino_lst_mutex);
1204			return (DDI_SUCCESS);
1205		}
1206	}
1207
1208	mutex_exit(&ib_p->ib_ino_lst_mutex);
1209	return (DDI_FAILURE);
1210}
1211
1212void
1213px_ib_log_new_cpu(px_ib_t *ib_p, cpuid_t old_cpu_id, cpuid_t new_cpu_id,
1214    uint32_t ino)
1215{
1216	px_ino_t	*ino_p;
1217	px_ino_pil_t	*ipil_p;
1218	px_ih_t 	*ih_p;
1219	int		i;
1220
1221	mutex_enter(&ib_p->ib_ino_lst_mutex);
1222
1223	/* Log in OS data structures the new CPU. */
1224	if (ino_p = px_ib_locate_ino(ib_p, ino)) {
1225
1226		/* Log in OS data structures the new CPU. */
1227		ino_p->ino_cpuid = new_cpu_id;
1228
1229		for (ipil_p = ino_p->ino_ipil_p; ipil_p;
1230		    ipil_p = ipil_p->ipil_next_p) {
1231			for (i = 0, ih_p = ipil_p->ipil_ih_head;
1232			    (i < ipil_p->ipil_ih_size);
1233			    i++, ih_p = ih_p->ih_next) {
1234				/*
1235				 * Account for any residual time
1236				 * to be logged for old cpu.
1237				 */
1238				px_ib_cpu_ticks_to_ih_nsec(ib_p,
1239				    ih_p, old_cpu_id);
1240			}
1241		}
1242	}
1243
1244	mutex_exit(&ib_p->ib_ino_lst_mutex);
1245}
1246