xref: /illumos-gate/usr/src/uts/sun4u/io/pci/pci_fdvma.c (revision 0ea48847)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Internal PCI Fast DVMA implementation
27  */
28 #include <sys/types.h>
29 #include <sys/kmem.h>
30 #include <sys/async.h>
31 #include <sys/sysmacros.h>
32 #include <sys/sunddi.h>
33 #include <sys/ddi_impldefs.h>
34 #include <sys/dvma.h>
35 #include <vm/hat.h>
36 #include <sys/pci/pci_obj.h>
37 
38 /*LINTLIBRARY*/
39 
40 static struct dvma_ops fdvma_ops;
41 
42 /*
43  * The following routines are used to implement the sun4u fast dvma
44  * routines on this bus.
45  */
46 
47 /*ARGSUSED*/
48 static void
pci_fdvma_load(ddi_dma_handle_t h,caddr_t a,uint_t len,uint_t index,ddi_dma_cookie_t * cp)49 pci_fdvma_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
50 	ddi_dma_cookie_t *cp)
51 {
52 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
53 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
54 	pci_t *pci_p = (pci_t *)fdvma_p->softsp;
55 	iommu_t *iommu_p = pci_p->pci_iommu_p;
56 	dev_info_t *dip = pci_p->pci_dip;
57 	dvma_addr_t dvma_addr, dvma_pg;
58 	caddr_t baseaddr = (caddr_t)((uintptr_t)a & PAGEMASK);
59 	uint32_t offset;
60 	size_t npages, pg_index;
61 	pfn_t pfn;
62 	int i;
63 	uint64_t tte;
64 
65 	offset = (uint32_t)(uintptr_t)a & IOMMU_PAGE_OFFSET;
66 	npages = IOMMU_BTOPR(len + offset);
67 	if (!npages)
68 		return;
69 
70 	/* make sure we don't exceed reserved boundary */
71 	DEBUG3(DBG_FAST_DVMA, dip, "load index=%x: %p+%x ", index, a, len);
72 	if (index + npages > mp->dmai_ndvmapages) {
73 		cmn_err(pci_panic_on_fatal_errors ? CE_PANIC : CE_WARN,
74 			"%s%d: kaddr_load index(%x)+pgs(%lx) exceeds limit\n",
75 			ddi_driver_name(dip), ddi_get_instance(dip),
76 			index, npages);
77 		return;
78 	}
79 
80 	/* better have not already loaded something at this address */
81 	ASSERT(fdvma_p->kvbase[index] == NULL);
82 	ASSERT(fdvma_p->pagecnt[index] == 0);
83 
84 	dvma_addr = mp->dmai_mapping + IOMMU_PTOB(index);
85 	dvma_pg = IOMMU_BTOP(dvma_addr);
86 	pg_index = dvma_pg - iommu_p->dvma_base_pg;
87 
88 	/* construct the dma cookie to be returned */
89 	MAKE_DMA_COOKIE(cp, dvma_addr | offset, len);
90 	DEBUG2(DBG_FAST_DVMA | DBG_CONT, dip, "cookie: %x+%x\n",
91 		cp->dmac_address, cp->dmac_size);
92 
93 	for (i = 0, a = baseaddr; i < npages; i++, a += IOMMU_PAGE_SIZE) {
94 		if (pci_dvma_remap_enabled) {
95 			uint_t flags = HAC_NOSLEEP | HAC_PAGELOCK;
96 
97 			(void) hat_add_callback(pci_fast_dvma_cbid, a,
98 			    IOMMU_PAGE_SIZE, flags, mp, &pfn,
99 			    &fdvma_p->cbcookie[index + i]);
100 
101 			mp->dmai_flags |= DMAI_FLAGS_RELOC;
102 		} else {
103 			pfn = hat_getpfnum(kas.a_hat, a);
104 		}
105 		if (pfn == PFN_INVALID)
106 			goto bad_pfn;
107 
108 		if (i == 0)	/* setup template, all bits except pfn value */
109 			tte = MAKE_TTE_TEMPLATE((iopfn_t)pfn, mp);
110 
111 		/* XXX assumes iommu and mmu has same page size */
112 		iommu_p->iommu_tsb_vaddr[pg_index + i] = tte | IOMMU_PTOB(pfn);
113 		IOMMU_PAGE_FLUSH(iommu_p, (dvma_pg + i));
114 	}
115 
116 	mp->dmai_flags |= DMAI_FLAGS_MAPPED;
117 	fdvma_p->kvbase[index] = baseaddr;
118 	fdvma_p->pagecnt[index] = npages;
119 
120 	return;
121 bad_pfn:
122 	cmn_err(CE_WARN, "%s%d: kaddr_load can't get page frame for vaddr %x",
123 		ddi_driver_name(dip), ddi_get_instance(dip), (int)(uintptr_t)a);
124 }
125 
126 /*ARGSUSED*/
127 static void
pci_fdvma_unload(ddi_dma_handle_t h,uint_t index,uint_t sync_flags)128 pci_fdvma_unload(ddi_dma_handle_t h, uint_t index, uint_t sync_flags)
129 {
130 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
131 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
132 	pci_t *pci_p = (pci_t *)fdvma_p->softsp;
133 	size_t npg = fdvma_p->pagecnt[index];
134 
135 	dvma_addr_t dvma_pg = IOMMU_BTOP(mp->dmai_mapping + IOMMU_PTOB(index));
136 
137 	DEBUG5(DBG_FAST_DVMA, pci_p->pci_dip,
138 		"unload index=%x flags=%x %x+%x+%x\n", index, sync_flags,
139 		mp->dmai_mapping, IOMMU_PTOB(index), IOMMU_PTOB(npg));
140 
141 	if (!pci_dvma_sync_before_unmap) {
142 		if (PCI_DMA_CANRELOC(mp))
143 			pci_fdvma_unregister_callbacks(pci_p, fdvma_p, mp,
144 				index);
145 		fdvma_p->kvbase[index] = NULL;
146 		iommu_unmap_pages(pci_p->pci_iommu_p, dvma_pg, npg);
147 	}
148 	if (sync_flags != -1)
149 		pci_dma_sync(pci_p->pci_dip, mp->dmai_rdip, h,
150 			IOMMU_PTOB(index), IOMMU_PTOB(npg), sync_flags);
151 	if (pci_dvma_sync_before_unmap) {
152 		if (PCI_DMA_CANRELOC(mp))
153 			pci_fdvma_unregister_callbacks(pci_p, fdvma_p, mp,
154 				index);
155 		fdvma_p->kvbase[index] = NULL;
156 		iommu_unmap_pages(pci_p->pci_iommu_p, dvma_pg, npg);
157 	}
158 	fdvma_p->pagecnt[index] = 0;
159 }
160 
161 /*ARGSUSED*/
162 static void
pci_fdvma_sync(ddi_dma_handle_t h,uint_t index,uint_t sync_flags)163 pci_fdvma_sync(ddi_dma_handle_t h, uint_t index, uint_t sync_flags)
164 {
165 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
166 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
167 	pci_t *pci_p = (pci_t *)fdvma_p->softsp;
168 	size_t npg = fdvma_p->pagecnt[index];
169 
170 	DEBUG5(DBG_FAST_DVMA, pci_p->pci_dip,
171 		"sync index=%x flags=%x %x+%x+%x\n", index, sync_flags,
172 		mp->dmai_mapping, IOMMU_PTOB(index), IOMMU_PTOB(npg));
173 	pci_dma_sync(pci_p->pci_dip, mp->dmai_rdip, h, IOMMU_PTOB(index),
174 		IOMMU_PTOB(npg), sync_flags);
175 }
176 
177 int
pci_fdvma_reserve(dev_info_t * dip,dev_info_t * rdip,pci_t * pci_p,ddi_dma_req_t * dmareq,ddi_dma_handle_t * handlep)178 pci_fdvma_reserve(dev_info_t *dip, dev_info_t *rdip, pci_t *pci_p,
179 	ddi_dma_req_t *dmareq, ddi_dma_handle_t *handlep)
180 {
181 	fdvma_t *fdvma_p;
182 	dvma_addr_t dvma_pg;
183 	iommu_t *iommu_p = pci_p->pci_iommu_p;
184 	size_t npages;
185 	ddi_dma_impl_t *mp;
186 	ddi_dma_lim_t *lim_p = dmareq->dmar_limits;
187 	ulong_t hi = lim_p->dlim_addr_hi;
188 	ulong_t lo = lim_p->dlim_addr_lo;
189 	size_t counter_max = (lim_p->dlim_cntr_max + 1) & IOMMU_PAGE_MASK;
190 
191 	if (pci_disable_fdvma)
192 		return (DDI_FAILURE);
193 
194 	DEBUG2(DBG_DMA_CTL, dip, "DDI_DMA_RESERVE: rdip=%s%d\n",
195 		ddi_driver_name(rdip), ddi_get_instance(rdip));
196 
197 	/*
198 	 * Check the limit structure.
199 	 */
200 	if ((lo >= hi) || (hi < iommu_p->iommu_dvma_base))
201 		return (DDI_DMA_BADLIMITS);
202 
203 	/*
204 	 * Allocate DVMA space from reserve.
205 	 */
206 	npages = dmareq->dmar_object.dmao_size;
207 	if ((long)atomic_add_long_nv(&iommu_p->iommu_dvma_reserve,
208 	    -npages) < 0) {
209 		atomic_add_long(&iommu_p->iommu_dvma_reserve, npages);
210 		return (DDI_DMA_NORESOURCES);
211 	}
212 
213 	/*
214 	 * Allocate the dma handle.
215 	 */
216 	mp = kmem_zalloc(sizeof (pci_dma_hdl_t), KM_SLEEP);
217 
218 	/*
219 	 * Get entries from dvma space map.
220 	 * (vmem_t *vmp,
221 	 *	size_t size, size_t align, size_t phase,
222 	 *	size_t nocross, void *minaddr, void *maxaddr, int vmflag)
223 	 */
224 	dvma_pg = IOMMU_BTOP((ulong_t)vmem_xalloc(iommu_p->iommu_dvma_map,
225 		IOMMU_PTOB(npages), IOMMU_PAGE_SIZE, 0,
226 		counter_max, (void *)lo, (void *)(hi + 1),
227 		dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP));
228 	if (dvma_pg == 0) {
229 		atomic_add_long(&iommu_p->iommu_dvma_reserve, npages);
230 		kmem_free(mp, sizeof (pci_dma_hdl_t));
231 		return (DDI_DMA_NOMAPPING);
232 	}
233 
234 	/*
235 	 * Create the fast dvma request structure.
236 	 */
237 	fdvma_p = kmem_alloc(sizeof (fdvma_t), KM_SLEEP);
238 	fdvma_p->kvbase = kmem_zalloc(npages * sizeof (caddr_t), KM_SLEEP);
239 	fdvma_p->pagecnt = kmem_zalloc(npages * sizeof (uint_t), KM_SLEEP);
240 	fdvma_p->cbcookie = kmem_zalloc(npages * sizeof (void *), KM_SLEEP);
241 	fdvma_p->ops = &fdvma_ops;
242 	fdvma_p->softsp = (caddr_t)pci_p;
243 	fdvma_p->sync_flag = NULL;
244 
245 	/*
246 	 * Initialize the handle.
247 	 */
248 	mp->dmai_rdip = rdip;
249 	mp->dmai_rflags = DMP_BYPASSNEXUS |
250 		pci_dma_consist_check(dmareq->dmar_flags, pci_p->pci_pbm_p);
251 	if (!(dmareq->dmar_flags & DDI_DMA_RDWR))
252 		mp->dmai_rflags |= DDI_DMA_READ;
253 	mp->dmai_flags = DMAI_FLAGS_INUSE |
254 		(mp->dmai_rflags & DMP_NOSYNC ? DMAI_FLAGS_NOSYNC : 0);
255 	mp->dmai_minxfer = dmareq->dmar_limits->dlim_minxfer;
256 	mp->dmai_burstsizes = dmareq->dmar_limits->dlim_burstsizes;
257 	mp->dmai_mapping = IOMMU_PTOB(dvma_pg);
258 	mp->dmai_ndvmapages = npages;
259 	mp->dmai_size = npages * IOMMU_PAGE_SIZE;
260 	mp->dmai_nwin = 0;
261 	mp->dmai_fdvma = (caddr_t)fdvma_p;
262 
263 	DEBUG4(DBG_DMA_CTL, dip,
264 		"PCI_DVMA_RESERVE: mp=%p dvma=%x npages=%x private=%p\n",
265 		mp, mp->dmai_mapping, npages, fdvma_p);
266 	*handlep = (ddi_dma_handle_t)mp;
267 	return (DDI_SUCCESS);
268 }
269 
270 int
pci_fdvma_release(dev_info_t * dip,pci_t * pci_p,ddi_dma_impl_t * mp)271 pci_fdvma_release(dev_info_t *dip, pci_t *pci_p, ddi_dma_impl_t *mp)
272 {
273 	iommu_t *iommu_p = pci_p->pci_iommu_p;
274 	size_t npages;
275 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
276 
277 	if (pci_disable_fdvma)
278 		return (DDI_FAILURE);
279 
280 	/* validate fdvma handle */
281 	if (!(mp->dmai_rflags & DMP_BYPASSNEXUS)) {
282 		DEBUG0(DBG_DMA_CTL, dip, "DDI_DMA_RELEASE: not fast dma\n");
283 		return (DDI_FAILURE);
284 	}
285 
286 	/* flush all reserved dvma addresses from iommu */
287 	pci_dma_sync_unmap(dip, mp->dmai_rdip, mp);
288 
289 	npages = mp->dmai_ndvmapages;
290 	pci_vmem_free(iommu_p, mp, (void *)mp->dmai_mapping, npages);
291 
292 	atomic_add_long(&iommu_p->iommu_dvma_reserve, npages);
293 	mp->dmai_ndvmapages = 0;
294 
295 	/* see if there is anyone waiting for dvma space */
296 	if (iommu_p->iommu_dvma_clid != 0) {
297 		DEBUG0(DBG_DMA_CTL, dip, "run dvma callback\n");
298 		ddi_run_callback(&iommu_p->iommu_dvma_clid);
299 	}
300 
301 	/* free data structures */
302 	kmem_free(fdvma_p->kvbase, npages * sizeof (caddr_t));
303 	kmem_free(fdvma_p->pagecnt, npages * sizeof (uint_t));
304 	kmem_free(fdvma_p->cbcookie, npages * sizeof (void *));
305 	kmem_free(fdvma_p, sizeof (fdvma_t));
306 	kmem_free(mp, sizeof (pci_dma_hdl_t));
307 
308 	/* see if there is anyone waiting for kmem */
309 	if (pci_kmem_clid != 0) {
310 		DEBUG0(DBG_DMA_CTL, dip, "run handle callback\n");
311 		ddi_run_callback(&pci_kmem_clid);
312 	}
313 	return (DDI_SUCCESS);
314 }
315 
316 /*
317  * fast dvma ops structure:
318  */
319 static struct dvma_ops fdvma_ops = {
320 	DVMAO_REV,
321 	pci_fdvma_load,
322 	pci_fdvma_unload,
323 	pci_fdvma_sync
324 };
325