1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/sunddi.h>
28 #include <sys/amd_iommu.h>
29 #include "amd_iommu_impl.h"
30 
31 extern int servicing_interrupt(void);
32 
33 static void
amd_iommu_wait_for_completion(amd_iommu_t * iommu)34 amd_iommu_wait_for_completion(amd_iommu_t *iommu)
35 {
36 	ASSERT(MUTEX_HELD(&iommu->aiomt_cmdlock));
37 	while (AMD_IOMMU_REG_GET64(REGADDR64(
38 	    iommu->aiomt_reg_status_va), AMD_IOMMU_COMWAIT_INT) != 1) {
39 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
40 		    AMD_IOMMU_CMDBUF_ENABLE, 1);
41 		WAIT_SEC(1);
42 	}
43 }
44 
45 static int
create_compl_wait_cmd(amd_iommu_t * iommu,amd_iommu_cmdargs_t * cmdargsp,amd_iommu_cmd_flags_t flags,uint32_t * cmdptr)46 create_compl_wait_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
47     amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
48 {
49 	const char *driver = ddi_driver_name(iommu->aiomt_dip);
50 	int instance = ddi_get_instance(iommu->aiomt_dip);
51 	const char *f = "create_compl_wait_cmd";
52 
53 	ASSERT(cmdargsp == NULL);
54 
55 	if (flags & AMD_IOMMU_CMD_FLAGS_COMPL_WAIT_S) {
56 		cmn_err(CE_WARN, "%s: %s%d: idx=%d: 'store' completion "
57 		    "not supported for completion wait command",
58 		    f, driver, instance, iommu->aiomt_idx);
59 		return (DDI_FAILURE);
60 	}
61 
62 	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_COMPL_WAIT_S, 0);
63 	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_COMPL_WAIT_I, 1);
64 	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_COMPL_WAIT_F,
65 	    (flags & AMD_IOMMU_CMD_FLAGS_COMPL_WAIT_F) != 0);
66 	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_COMPL_WAIT_STORE_ADDR_LO,
67 	    0);
68 	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x01);
69 	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_COMPL_WAIT_STORE_ADDR_HI,
70 	    0);
71 	cmdptr[2] = 0;
72 	cmdptr[3] = 0;
73 
74 	return (DDI_SUCCESS);
75 }
76 
77 static int
create_inval_devtab_entry_cmd(amd_iommu_t * iommu,amd_iommu_cmdargs_t * cmdargsp,amd_iommu_cmd_flags_t flags,uint32_t * cmdptr)78 create_inval_devtab_entry_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
79     amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
80 {
81 	const char *driver = ddi_driver_name(iommu->aiomt_dip);
82 	int instance = ddi_get_instance(iommu->aiomt_dip);
83 	const char *f = "create_inval_devtab_entry_cmd";
84 	uint16_t deviceid;
85 
86 	ASSERT(cmdargsp);
87 
88 	if (flags != AMD_IOMMU_CMD_FLAGS_NONE) {
89 		cmn_err(CE_WARN, "%s: %s%d: idx=%d: invalidate devtab entry "
90 		    "no flags supported", f, driver, instance,
91 		    iommu->aiomt_idx);
92 		return (DDI_FAILURE);
93 	}
94 
95 	deviceid = cmdargsp->ca_deviceid;
96 
97 	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_INVAL_DEVTAB_DEVICEID,
98 	    deviceid);
99 	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x02);
100 	cmdptr[2] = 0;
101 	cmdptr[3] = 0;
102 
103 	return (DDI_SUCCESS);
104 }
105 
106 /*ARGSUSED*/
107 static int
create_inval_iommu_pages_cmd(amd_iommu_t * iommu,amd_iommu_cmdargs_t * cmdargsp,amd_iommu_cmd_flags_t flags,uint32_t * cmdptr)108 create_inval_iommu_pages_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
109     amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
110 {
111 	uint32_t addr_lo;
112 	uint32_t addr_hi;
113 
114 	ASSERT(cmdargsp);
115 
116 	addr_lo = AMD_IOMMU_REG_GET64(REGADDR64(&cmdargsp->ca_addr),
117 	    AMD_IOMMU_CMD_INVAL_PAGES_ADDR_LO);
118 	addr_hi = AMD_IOMMU_REG_GET64(REGADDR64(&cmdargsp->ca_addr),
119 	    AMD_IOMMU_CMD_INVAL_PAGES_ADDR_HI);
120 
121 	cmdptr[0] = 0;
122 	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_INVAL_PAGES_DOMAINID,
123 	    cmdargsp->ca_domainid);
124 	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x03);
125 	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_PAGES_PDE,
126 	    (flags & AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL) != 0);
127 	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_PAGES_S,
128 	    (flags & AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S) != 0);
129 	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_PAGES_ADDR_LO,
130 	    addr_lo);
131 	cmdptr[3] = addr_hi;
132 
133 	return (DDI_SUCCESS);
134 
135 }
136 
137 /*ARGSUSED*/
138 static int
create_inval_iotlb_pages_cmd(amd_iommu_t * iommu,amd_iommu_cmdargs_t * cmdargsp,amd_iommu_cmd_flags_t flags,uint32_t * cmdptr)139 create_inval_iotlb_pages_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
140     amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
141 {
142 	uint32_t addr_lo;
143 	uint32_t addr_hi;
144 
145 	ASSERT(cmdargsp);
146 
147 	addr_lo = AMD_IOMMU_REG_GET64(REGADDR64(&cmdargsp->ca_addr),
148 	    AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_LO);
149 
150 	addr_hi = AMD_IOMMU_REG_GET64(REGADDR64(&cmdargsp->ca_addr),
151 	    AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_HI);
152 
153 	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_INVAL_IOTLB_DEVICEID,
154 	    cmdargsp->ca_deviceid);
155 	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_INVAL_IOTLB_MAXPEND,
156 	    AMD_IOMMU_DEFAULT_MAXPEND);
157 	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x04);
158 	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_INVAL_IOTLB_QUEUEID,
159 	    cmdargsp->ca_deviceid);
160 	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_IOTLB_ADDR_LO,
161 	    addr_lo);
162 	AMD_IOMMU_REG_SET32(&cmdptr[2], AMD_IOMMU_CMD_INVAL_IOTLB_S,
163 	    (flags & AMD_IOMMU_CMD_FLAGS_IOTLB_INVAL_S) != 0);
164 	cmdptr[3] = addr_hi;
165 
166 	return (DDI_SUCCESS);
167 }
168 
169 static int
create_inval_intr_table_cmd(amd_iommu_t * iommu,amd_iommu_cmdargs_t * cmdargsp,amd_iommu_cmd_flags_t flags,uint32_t * cmdptr)170 create_inval_intr_table_cmd(amd_iommu_t *iommu, amd_iommu_cmdargs_t *cmdargsp,
171     amd_iommu_cmd_flags_t flags, uint32_t *cmdptr)
172 {
173 	const char *driver = ddi_driver_name(iommu->aiomt_dip);
174 	int instance = ddi_get_instance(iommu->aiomt_dip);
175 	const char *f = "create_inval_intr_table_cmd";
176 
177 	ASSERT(cmdargsp);
178 
179 	if (flags != AMD_IOMMU_CMD_FLAGS_NONE) {
180 		cmn_err(CE_WARN, "%s: %s%d: idx=%d: flags not supported "
181 		    "for invalidate interrupt table command",
182 		    f, driver, instance, iommu->aiomt_idx);
183 		return (DDI_FAILURE);
184 	}
185 
186 	AMD_IOMMU_REG_SET32(&cmdptr[0], AMD_IOMMU_CMD_INVAL_INTR_DEVICEID,
187 	    cmdargsp->ca_deviceid);
188 	AMD_IOMMU_REG_SET32(&cmdptr[1], AMD_IOMMU_CMD_OPCODE, 0x05);
189 	cmdptr[2] = 0;
190 	cmdptr[3] = 0;
191 
192 	return (DDI_SUCCESS);
193 }
194 
195 int
amd_iommu_cmd(amd_iommu_t * iommu,amd_iommu_cmd_t cmd,amd_iommu_cmdargs_t * cmdargs,amd_iommu_cmd_flags_t flags,int lock_held)196 amd_iommu_cmd(amd_iommu_t *iommu, amd_iommu_cmd_t cmd,
197     amd_iommu_cmdargs_t *cmdargs, amd_iommu_cmd_flags_t flags, int lock_held)
198 {
199 	int error;
200 	int i;
201 	uint32_t cmdptr[4] = {0};
202 	const char *driver = ddi_driver_name(iommu->aiomt_dip);
203 	int instance = ddi_get_instance(iommu->aiomt_dip);
204 	uint64_t cmdhead_off;
205 	uint64_t cmdtail_off;
206 	const char *f = "amd_iommu_cmd";
207 
208 	ASSERT(lock_held == 0 || lock_held == 1);
209 	ASSERT(lock_held == 0 || MUTEX_HELD(&iommu->aiomt_cmdlock));
210 
211 	if (!lock_held)
212 		mutex_enter(&iommu->aiomt_cmdlock);
213 
214 	/*
215 	 * Prepare the command
216 	 */
217 	switch (cmd) {
218 	case AMD_IOMMU_CMD_COMPL_WAIT:
219 		if (flags & AMD_IOMMU_CMD_FLAGS_COMPL_WAIT) {
220 			cmn_err(CE_WARN, "%s: %s%d: idx=%d: No completion wait "
221 			    " after completion wait command",
222 			    f, driver, instance, iommu->aiomt_idx);
223 			error = DDI_FAILURE;
224 			goto out;
225 		}
226 		error = create_compl_wait_cmd(iommu, cmdargs, flags, cmdptr);
227 		break;
228 	case AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY:
229 		error = create_inval_devtab_entry_cmd(iommu, cmdargs,
230 		    flags & ~AMD_IOMMU_CMD_FLAGS_COMPL_WAIT, cmdptr);
231 		break;
232 	case AMD_IOMMU_CMD_INVAL_IOMMU_PAGES:
233 		error = create_inval_iommu_pages_cmd(iommu, cmdargs,
234 		    flags & ~AMD_IOMMU_CMD_FLAGS_COMPL_WAIT, cmdptr);
235 		break;
236 	case AMD_IOMMU_CMD_INVAL_IOTLB_PAGES:
237 		error = create_inval_iotlb_pages_cmd(iommu, cmdargs,
238 		    flags & ~AMD_IOMMU_CMD_FLAGS_COMPL_WAIT, cmdptr);
239 		break;
240 	case AMD_IOMMU_CMD_INVAL_INTR_TABLE:
241 		error = create_inval_intr_table_cmd(iommu, cmdargs,
242 		    flags & ~AMD_IOMMU_CMD_FLAGS_COMPL_WAIT, cmdptr);
243 		break;
244 	default:
245 		cmn_err(CE_WARN, "%s: %s%d: idx=%d: Unsupported cmd: %d",
246 		    f, driver, instance, iommu->aiomt_idx, cmd);
247 		error = DDI_FAILURE;
248 		goto out;
249 	}
250 
251 	if (error != DDI_SUCCESS) {
252 		error = DDI_FAILURE;
253 		goto out;
254 	}
255 
256 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
257 	    AMD_IOMMU_CMDBUF_ENABLE, 1);
258 
259 	ASSERT(iommu->aiomt_cmd_tail != NULL);
260 
261 	for (i = 0; i < 4; i++) {
262 		iommu->aiomt_cmd_tail[i] = cmdptr[i];
263 	}
264 
265 wait_for_drain:
266 	cmdhead_off = AMD_IOMMU_REG_GET64(
267 	    REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
268 	    AMD_IOMMU_CMDHEADPTR);
269 
270 	cmdhead_off = CMD2OFF(cmdhead_off);
271 
272 	ASSERT(cmdhead_off < iommu->aiomt_cmdbuf_sz);
273 
274 	/* check for overflow */
275 	if ((caddr_t)iommu->aiomt_cmd_tail <
276 	    (cmdhead_off + iommu->aiomt_cmdbuf)) {
277 		if ((caddr_t)iommu->aiomt_cmd_tail + 16 >=
278 		    (cmdhead_off + iommu->aiomt_cmdbuf))
279 #ifdef DEBUG
280 			cmn_err(CE_WARN, "cmdbuffer overflow: waiting for "
281 			    "drain");
282 #endif
283 			goto wait_for_drain;
284 	}
285 
286 	SYNC_FORDEV(iommu->aiomt_dmahdl);
287 
288 	/*
289 	 * Update the tail pointer in soft state
290 	 * and the tail pointer register
291 	 */
292 	iommu->aiomt_cmd_tail += 4;
293 	if ((caddr_t)iommu->aiomt_cmd_tail >= (iommu->aiomt_cmdbuf
294 	    + iommu->aiomt_cmdbuf_sz)) {
295 		/* wraparound */
296 		/*LINTED*/
297 		iommu->aiomt_cmd_tail = (uint32_t *)iommu->aiomt_cmdbuf;
298 		cmdtail_off = 0;
299 	} else {
300 		cmdtail_off = (caddr_t)iommu->aiomt_cmd_tail
301 		/*LINTED*/
302 		    - iommu->aiomt_cmdbuf;
303 	}
304 
305 	ASSERT(cmdtail_off < iommu->aiomt_cmdbuf_sz);
306 
307 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_tail_va),
308 	    AMD_IOMMU_CMDTAILPTR, OFF2CMD(cmdtail_off));
309 
310 	if (cmd == AMD_IOMMU_CMD_COMPL_WAIT) {
311 		amd_iommu_wait_for_completion(iommu);
312 	} else if (flags & AMD_IOMMU_CMD_FLAGS_COMPL_WAIT) {
313 		error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_COMPL_WAIT,
314 		    NULL, 0, 1);
315 	}
316 
317 out:
318 	if (!lock_held)
319 		mutex_exit(&iommu->aiomt_cmdlock);
320 	return (error);
321 }
322