1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * CMU-CH nexus interrupt handling:
30 *	PCI device interrupt handler wrapper
31 *	pil lookup routine
32 *	PCI device interrupt related initchild code
33 */
34
35#include <sys/types.h>
36#include <sys/kmem.h>
37#include <sys/async.h>
38#include <sys/spl.h>
39#include <sys/sunddi.h>
40#include <sys/machsystm.h>
41#include <sys/ddi_impldefs.h>
42#include <sys/pcicmu/pcicmu.h>
43#include <sys/sdt.h>
44
45uint_t pcmu_intr_wrapper(caddr_t arg);
46
47/*
48 * interrupt jabber:
49 *
50 * When an interrupt line is jabbering, every time the state machine for the
51 * associated ino is idled, a new mondo will be sent and the ino will go into
52 * the pending state again. The mondo will cause a new call to
53 * pcmu_intr_wrapper() which normally idles the ino's state machine which would
54 * precipitate another trip round the loop.
55 * The loop can be broken by preventing the ino's state machine from being
56 * idled when an interrupt line is jabbering. See the comment at the
57 * beginning of pcmu_intr_wrapper() explaining how the 'interrupt jabber
58 * protection' code does this.
59 */
60
61
62/*
63 * If the unclaimed interrupt count has reached the limit set by
64 * pcmu_unclaimed_intr_max within the time limit, then all interrupts
65 * on this ino is blocked by not idling the interrupt state machine.
66 */
67static int
68pcmu_spurintr(pcmu_ib_ino_info_t *ino_p) {
69	int i;
70	ih_t *ih_p = ino_p->pino_ih_start;
71	pcmu_t *pcmu_p = ino_p->pino_ib_p->pib_pcmu_p;
72	char *err_fmt_str;
73
74	if (ino_p->pino_unclaimed > pcmu_unclaimed_intr_max) {
75		return (DDI_INTR_CLAIMED);
76	}
77	if (!ino_p->pino_unclaimed) {
78		ino_p->pino_spurintr_begin = ddi_get_lbolt();
79	}
80	ino_p->pino_unclaimed++;
81	if (ino_p->pino_unclaimed <= pcmu_unclaimed_intr_max) {
82		goto clear;
83	}
84	if (drv_hztousec(ddi_get_lbolt() - ino_p->pino_spurintr_begin)
85	    > pcmu_spurintr_duration) {
86		ino_p->pino_unclaimed = 0;
87		goto clear;
88	}
89	err_fmt_str = "%s%d: ino 0x%x blocked";
90	goto warn;
91clear:
92	/* clear the pending state */
93	PCMU_IB_INO_INTR_CLEAR(ino_p->pino_clr_reg);
94	err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x";
95warn:
96	cmn_err(CE_WARN, err_fmt_str, NAMEINST(pcmu_p->pcmu_dip),
97	    ino_p->pino_ino);
98	for (i = 0; i < ino_p->pino_ih_size; i++, ih_p = ih_p->ih_next) {
99		cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip),
100		    ih_p->ih_inum);
101	}
102	cmn_err(CE_CONT, "!\n");
103	return (DDI_INTR_CLAIMED);
104}
105
106/*
107 * pcmu_intr_wrapper
108 *
109 * This routine is used as wrapper around interrupt handlers installed by child
110 * device drivers.  This routine invokes the driver interrupt handlers and
111 * examines the return codes.
112 * There is a count of unclaimed interrupts kept on a per-ino basis. If at
113 * least one handler claims the interrupt then the counter is halved and the
114 * interrupt state machine is idled. If no handler claims the interrupt then
115 * the counter is incremented by one and the state machine is idled.
116 * If the count ever reaches the limit value set by pcmu_unclaimed_intr_max
117 * then the interrupt state machine is not idled thus preventing any further
118 * interrupts on that ino. The state machine will only be idled again if a
119 * handler is subsequently added or removed.
120 *
121 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt,
122 * DDI_INTR_UNCLAIMED otherwise.
123 */
124uint_t
125pcmu_intr_wrapper(caddr_t arg)
126{
127	pcmu_ib_ino_info_t *ino_p = (pcmu_ib_ino_info_t *)arg;
128	uint_t result = 0, r;
129	ih_t *ih_p = ino_p->pino_ih_start;
130	int i;
131#ifdef	DEBUG
132	pcmu_t *pcmu_p = ino_p->pino_ib_p->pib_pcmu_p;
133#endif
134
135
136	for (i = 0; i < ino_p->pino_ih_size; i++, ih_p = ih_p->ih_next) {
137		dev_info_t *dip = ih_p->ih_dip;
138		uint_t (*handler)() = ih_p->ih_handler;
139		caddr_t arg1 = ih_p->ih_handler_arg1;
140		caddr_t arg2 = ih_p->ih_handler_arg2;
141
142		if (ih_p->ih_intr_state == PCMU_INTR_STATE_DISABLE) {
143			PCMU_DBG3(PCMU_DBG_INTR, pcmu_p->pcmu_dip,
144			    "pcmu_intr_wrapper: %s%d interrupt %d is "
145			    "disabled\n", ddi_driver_name(dip),
146			    ddi_get_instance(dip), ino_p->pino_ino);
147			continue;
148		}
149
150		DTRACE_PROBE4(pcmu__interrupt__start, dev_info_t, dip,
151		    void *, handler, caddr_t, arg1, caddr_t, arg2);
152
153		r = (*handler)(arg1, arg2);
154		DTRACE_PROBE4(pcmu__interrupt__complete, dev_info_t, dip,
155		    void *, handler, caddr_t, arg1, int, r);
156
157		result += r;
158	}
159
160	if (!result) {
161		return (pcmu_spurintr(ino_p));
162	}
163	ino_p->pino_unclaimed = 0;
164	/* clear the pending state */
165	PCMU_IB_INO_INTR_CLEAR(ino_p->pino_clr_reg);
166	return (DDI_INTR_CLAIMED);
167}
168
169int
170pcmu_add_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp)
171{
172	pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
173	pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
174	ih_t *ih_p;
175	pcmu_ib_ino_t ino;
176	pcmu_ib_ino_info_t *ino_p; /* pulse interrupts have no ino */
177	pcmu_ib_mondo_t mondo;
178	uint32_t cpu_id;
179	int ret;
180
181	ino = PCMU_IB_MONDO_TO_INO(hdlp->ih_vector);
182
183	PCMU_DBG3(PCMU_DBG_A_INTX, dip, "pcmu_add_intr: rdip=%s%d ino=%x\n",
184	    ddi_driver_name(rdip), ddi_get_instance(rdip), ino);
185
186	if (ino > pib_p->pib_max_ino) {
187		PCMU_DBG1(PCMU_DBG_A_INTX, dip, "ino %x is invalid\n", ino);
188		return (DDI_INTR_NOTFOUND);
189	}
190
191	if ((mondo = PCMU_IB_INO_TO_MONDO(pcmu_p->pcmu_ib_p, ino)) == 0)
192		goto fail1;
193
194	ino = PCMU_IB_MONDO_TO_INO(mondo);
195
196	mutex_enter(&pib_p->pib_ino_lst_mutex);
197	ih_p = pcmu_ib_alloc_ih(rdip, hdlp->ih_inum,
198	    hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);
199
200	if (ino_p = pcmu_ib_locate_ino(pib_p, ino)) {	/* sharing ino */
201		uint32_t intr_index = hdlp->ih_inum;
202		if (pcmu_ib_ino_locate_intr(ino_p, rdip, intr_index)) {
203			PCMU_DBG1(PCMU_DBG_A_INTX, dip,
204			    "dup intr #%d\n", intr_index);
205			goto fail3;
206		}
207
208		/*
209		 * add default weight(0) to the cpu that we are
210		 * already targeting
211		 */
212		cpu_id = ino_p->pino_cpuid;
213		intr_dist_cpuid_add_device_weight(cpu_id, rdip, 0);
214		pcmu_ib_ino_add_intr(pcmu_p, ino_p, ih_p);
215		goto ino_done;
216	}
217
218	ino_p = pcmu_ib_new_ino(pib_p, ino, ih_p);
219	hdlp->ih_vector = mondo;
220
221	PCMU_DBG2(PCMU_DBG_A_INTX, dip, "pcmu_add_intr:  pil=0x%x mondo=0x%x\n",
222	    hdlp->ih_pri, hdlp->ih_vector);
223
224	DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp,
225	    (ddi_intr_handler_t *)pcmu_intr_wrapper, (caddr_t)ino_p, NULL);
226
227	ret = i_ddi_add_ivintr(hdlp);
228
229	/*
230	 * Restore original interrupt handler
231	 * and arguments in interrupt handle.
232	 */
233	DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler,
234	    ih_p->ih_handler_arg1, ih_p->ih_handler_arg2);
235
236	if (ret != DDI_SUCCESS) {
237		goto fail4;
238	}
239	/* Save the pil for this ino */
240	ino_p->pino_pil = hdlp->ih_pri;
241
242	/* clear and enable interrupt */
243	PCMU_IB_INO_INTR_CLEAR(ino_p->pino_clr_reg);
244
245	/* select cpu for sharing and removal */
246	cpu_id = pcmu_intr_dist_cpuid(pib_p, ino_p);
247	ino_p->pino_cpuid = cpu_id;
248	ino_p->pino_established = 1;
249	intr_dist_cpuid_add_device_weight(cpu_id, rdip, 0);
250
251	cpu_id = u2u_translate_tgtid(pib_p->pib_pcmu_p,
252	    cpu_id, ino_p->pino_map_reg);
253	*ino_p->pino_map_reg = ib_get_map_reg(mondo, cpu_id);
254	*ino_p->pino_map_reg;
255ino_done:
256	mutex_exit(&pib_p->pib_ino_lst_mutex);
257done:
258	PCMU_DBG2(PCMU_DBG_A_INTX, dip, "done! Interrupt 0x%x pil=%x\n",
259	    hdlp->ih_vector, hdlp->ih_pri);
260	return (DDI_SUCCESS);
261fail4:
262	pcmu_ib_delete_ino(pib_p, ino_p);
263fail3:
264	if (ih_p->ih_config_handle)
265		pci_config_teardown(&ih_p->ih_config_handle);
266	mutex_exit(&pib_p->pib_ino_lst_mutex);
267	kmem_free(ih_p, sizeof (ih_t));
268fail1:
269	PCMU_DBG2(PCMU_DBG_A_INTX, dip, "Failed! Interrupt 0x%x pil=%x\n",
270	    hdlp->ih_vector, hdlp->ih_pri);
271	return (DDI_FAILURE);
272}
273
274int
275pcmu_remove_intr(dev_info_t *dip, dev_info_t *rdip,
276    ddi_intr_handle_impl_t *hdlp)
277{
278	pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
279	pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
280	pcmu_ib_ino_t ino;
281	pcmu_ib_mondo_t mondo;
282	pcmu_ib_ino_info_t *ino_p;	/* non-pulse only */
283	ih_t *ih_p;			/* non-pulse only */
284
285	ino = PCMU_IB_MONDO_TO_INO(hdlp->ih_vector);
286
287	PCMU_DBG3(PCMU_DBG_R_INTX, dip, "pcmu_rem_intr: rdip=%s%d ino=%x\n",
288	    ddi_driver_name(rdip), ddi_get_instance(rdip), ino);
289
290	/* Translate the interrupt property */
291	mondo = PCMU_IB_INO_TO_MONDO(pcmu_p->pcmu_ib_p, ino);
292	if (mondo == 0) {
293		PCMU_DBG1(PCMU_DBG_R_INTX, dip,
294		    "can't get mondo for ino %x\n", ino);
295		return (DDI_FAILURE);
296	}
297	ino = PCMU_IB_MONDO_TO_INO(mondo);
298
299	mutex_enter(&pib_p->pib_ino_lst_mutex);
300	ino_p = pcmu_ib_locate_ino(pib_p, ino);
301	if (!ino_p) {
302		mutex_exit(&pib_p->pib_ino_lst_mutex);
303		return (DDI_SUCCESS);
304	}
305
306	ih_p = pcmu_ib_ino_locate_intr(ino_p, rdip, hdlp->ih_inum);
307	if (pcmu_ib_ino_rem_intr(pcmu_p, ino_p, ih_p) != DDI_SUCCESS) {
308		mutex_exit(&pib_p->pib_ino_lst_mutex);
309		return (DDI_FAILURE);
310	}
311	intr_dist_cpuid_rem_device_weight(ino_p->pino_cpuid, rdip);
312	if (ino_p->pino_ih_size == 0) {
313		PCMU_IB_INO_INTR_PEND(ib_clear_intr_reg_addr(pib_p, ino));
314		hdlp->ih_vector = mondo;
315		i_ddi_rem_ivintr(hdlp);
316		pcmu_ib_delete_ino(pib_p, ino_p);
317	}
318
319	/* re-enable interrupt only if mapping register still shared */
320	if (ino_p->pino_ih_size) {
321		PCMU_IB_INO_INTR_ON(ino_p->pino_map_reg);
322		*ino_p->pino_map_reg;
323	}
324	mutex_exit(&pib_p->pib_ino_lst_mutex);
325	if (ino_p->pino_ih_size == 0) {
326		kmem_free(ino_p, sizeof (pcmu_ib_ino_info_t));
327	}
328	PCMU_DBG1(PCMU_DBG_R_INTX, dip, "success! mondo=%x\n", mondo);
329	return (DDI_SUCCESS);
330}
331
332/*
333 * free the pcmu_inos array allocated during pcmu_intr_setup. the actual
334 * interrupts are torn down by their respective block destroy routines:
335 * cb_destroy, pcmu_pbm_destroy, and ib_destroy.
336 */
337void
338pcmu_intr_teardown(pcmu_t *pcmu_p)
339{
340	kmem_free(pcmu_p->pcmu_inos, pcmu_p->pcmu_inos_len);
341	pcmu_p->pcmu_inos = NULL;
342	pcmu_p->pcmu_inos_len = 0;
343}
344