1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright 2018 Joyent, Inc.
27 */
28
29#include <sys/debug.h>
30#include <sys/types.h>
31#include <sys/param.h>
32#include <sys/time.h>
33#include <sys/buf.h>
34#include <sys/errno.h>
35#include <sys/systm.h>
36#include <sys/conf.h>
37#include <sys/signal.h>
38#include <sys/file.h>
39#include <sys/uio.h>
40#include <sys/ioctl.h>
41#include <sys/map.h>
42#include <sys/proc.h>
43#include <sys/user.h>
44#include <sys/mman.h>
45#include <sys/cred.h>
46#include <sys/open.h>
47#include <sys/stat.h>
48#include <sys/utsname.h>
49#include <sys/kmem.h>
50#include <sys/cmn_err.h>
51#include <sys/vnode.h>
52#include <vm/page.h>
53#include <vm/as.h>
54#include <vm/hat.h>
55#include <vm/seg.h>
56#include <vm/seg_kmem.h>
57#include <vm/hat_i86.h>
58#include <sys/vmsystm.h>
59#include <sys/ddi.h>
60#include <sys/devops.h>
61#include <sys/sunddi.h>
62#include <sys/ddi_impldefs.h>
63#include <sys/fs/snode.h>
64#include <sys/pci.h>
65#include <sys/modctl.h>
66#include <sys/uio.h>
67#include <sys/visual_io.h>
68#include <sys/fbio.h>
69#include <sys/ddidmareq.h>
70#include <sys/tnf_probe.h>
71#include <sys/kstat.h>
72#include <sys/callb.h>
73#include <sys/promif.h>
74#include <sys/atomic.h>
75#include <sys/gfx_private.h>
76
77#ifdef __xpv
78#include <sys/hypervisor.h>
79#endif
80
81/*
82 * Create a kva mapping for a pa (start..start+size) with
83 * the specified cache attributes (mode).
84 */
85gfxp_kva_t
86gfxp_map_kernel_space(uint64_t start, size_t size, uint32_t mode)
87{
88	uint_t pgoffset;
89	uint64_t base;
90	pgcnt_t npages;
91	caddr_t cvaddr;
92	int hat_flags;
93	uint_t hat_attr;
94	pfn_t pfn;
95
96	if (size == 0)
97		return (0);
98
99#ifdef __xpv
100	/*
101	 * The hypervisor doesn't allow r/w mappings to some pages, such as
102	 * page tables, gdt, etc. Detect %cr3 to notify users of this interface.
103	 */
104	if (start == mmu_ptob(mmu_btop(getcr3_pa())))
105		return (0);
106#endif
107
108	if (mode == GFXP_MEMORY_CACHED)
109		hat_attr = HAT_STORECACHING_OK;
110	else if (mode == GFXP_MEMORY_WRITECOMBINED)
111		hat_attr = HAT_MERGING_OK | HAT_PLAT_NOCACHE;
112	else	/* GFXP_MEMORY_UNCACHED */
113		hat_attr = HAT_STRICTORDER | HAT_PLAT_NOCACHE;
114	hat_flags = HAT_LOAD_LOCK;
115	pgoffset = start & PAGEOFFSET;
116	base = start - pgoffset;
117	npages = btopr(size + pgoffset);
118	cvaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP);
119	if (cvaddr == NULL)
120		return (NULL);
121
122#ifdef __xpv
123	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
124	pfn = xen_assign_pfn(mmu_btop(base));
125#else
126	pfn = btop(base);
127#endif
128
129	hat_devload(kas.a_hat, cvaddr, ptob(npages), pfn,
130	    PROT_READ|PROT_WRITE|hat_attr, hat_flags);
131	return (cvaddr + pgoffset);
132}
133
134/*
135 * Destroy the mapping created by gfxp_map_kernel_space().
136 * Physical memory is not reclaimed.
137 */
138void
139gfxp_unmap_kernel_space(gfxp_kva_t address, size_t size)
140{
141	uint_t pgoffset;
142	caddr_t base;
143	pgcnt_t npages;
144
145	if (size == 0 || address == NULL)
146		return;
147
148	pgoffset = (uintptr_t)address & PAGEOFFSET;
149	base = (caddr_t)address - pgoffset;
150	npages = btopr(size + pgoffset);
151	hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK);
152	vmem_free(heap_arena, base, ptob(npages));
153}
154
155/*
156 * For a VA return the pfn
157 */
158int
159gfxp_va2pa(struct as *as, caddr_t addr, uint64_t *pa)
160{
161#ifdef __xpv
162	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
163	*pa = pa_to_ma(pfn_to_pa(hat_getpfnum(as->a_hat, addr)));
164#else
165	*pa = pfn_to_pa(hat_getpfnum(as->a_hat, addr));
166#endif
167	return (0);
168}
169
170/*
171 * NOP now
172 */
173/* ARGSUSED */
174void
175gfxp_fix_mem_cache_attrs(caddr_t kva_start, size_t length, int cache_attr)
176{
177}
178
179int
180gfxp_ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
181    ddi_device_acc_attr_t  *accattrp, uint_t flags, int (*waitfp) (caddr_t),
182    caddr_t arg, caddr_t *kaddrp, size_t *real_length,
183    ddi_acc_handle_t *handlep)
184{
185	uint_t l_flags = flags & ~IOMEM_DATA_MASK; /* clear cache attrs */
186	int e;
187
188	/*
189	 * Set an appropriate attribute from devacc_attr_dataorder
190	 * to keep compatibility. The cache attributes are igonred
191	 * if specified.
192	 */
193	if (accattrp != NULL) {
194		if (accattrp->devacc_attr_dataorder == DDI_STRICTORDER_ACC) {
195			l_flags |= IOMEM_DATA_UNCACHED;
196		} else if (accattrp->devacc_attr_dataorder ==
197		    DDI_MERGING_OK_ACC) {
198			l_flags |= IOMEM_DATA_UC_WR_COMBINE;
199		} else {
200			l_flags |= IOMEM_DATA_CACHED;
201		}
202	}
203
204	e = ddi_dma_mem_alloc(handle, length, accattrp, l_flags, waitfp,
205	    arg, kaddrp, real_length, handlep);
206	return (e);
207}
208
209int
210gfxp_mlock_user_memory(caddr_t address, size_t length)
211{
212	struct as *as = ttoproc(curthread)->p_as;
213	int error = 0;
214
215	if (((uintptr_t)address & PAGEOFFSET) != 0 || length == 0)
216		return (set_errno(EINVAL));
217
218	if (valid_usr_range(address, length, 0, as, as->a_userlimit) !=
219	    RANGE_OKAY)
220		return (set_errno(ENOMEM));
221
222	error = as_ctl(as, address, length, MC_LOCK, 0, 0, NULL, 0);
223	if (error)
224		(void) set_errno(error);
225
226	return (error);
227}
228
229int
230gfxp_munlock_user_memory(caddr_t address, size_t length)
231{
232	struct as *as = ttoproc(curthread)->p_as;
233	int error = 0;
234
235	if (((uintptr_t)address & PAGEOFFSET) != 0 || length == 0)
236		return (set_errno(EINVAL));
237
238	if (valid_usr_range(address, length, 0, as, as->a_userlimit) !=
239	    RANGE_OKAY)
240		return (set_errno(ENOMEM));
241
242	error = as_ctl(as, address, length, MC_UNLOCK, 0, 0, NULL, 0);
243	if (error)
244		(void) set_errno(error);
245
246	return (error);
247}
248
249gfx_maddr_t
250gfxp_convert_addr(paddr_t paddr)
251{
252#ifdef __xpv
253	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
254	return (pfn_to_pa(xen_assign_pfn(btop(paddr))));
255#else
256	return ((gfx_maddr_t)paddr);
257#endif
258}
259
260/*
261 * Support getting VA space separately from pages
262 */
263
264/*
265 * A little like gfxp_map_kernel_space, but
266 * just the vmem_alloc part.
267 */
268caddr_t
269gfxp_alloc_kernel_space(size_t size)
270{
271	caddr_t cvaddr;
272	pgcnt_t npages;
273
274	npages = btopr(size);
275	cvaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP);
276	return (cvaddr);
277}
278
279/*
280 * Like gfxp_unmap_kernel_space, but
281 * just the vmem_free part.
282 */
283void
284gfxp_free_kernel_space(caddr_t address, size_t size)
285{
286
287	uint_t pgoffset;
288	caddr_t base;
289	pgcnt_t npages;
290
291	if (size == 0 || address == NULL)
292		return;
293
294	pgoffset = (uintptr_t)address & PAGEOFFSET;
295	base = (caddr_t)address - pgoffset;
296	npages = btopr(size + pgoffset);
297	vmem_free(heap_arena, base, ptob(npages));
298}
299
300/*
301 * Like gfxp_map_kernel_space, but
302 * just the hat_devload part.
303 */
304void
305gfxp_load_kernel_space(uint64_t start, size_t size,
306    uint32_t mode, caddr_t cvaddr)
307{
308	uint_t pgoffset;
309	uint64_t base;
310	pgcnt_t npages;
311	int hat_flags;
312	uint_t hat_attr;
313	pfn_t pfn;
314
315	if (size == 0)
316		return;
317
318#ifdef __xpv
319	/*
320	 * The hypervisor doesn't allow r/w mappings to some pages, such as
321	 * page tables, gdt, etc. Detect %cr3 to notify users of this interface.
322	 */
323	if (start == mmu_ptob(mmu_btop(getcr3_pa())))
324		return;
325#endif
326
327	if (mode == GFXP_MEMORY_CACHED)
328		hat_attr = HAT_STORECACHING_OK;
329	else if (mode == GFXP_MEMORY_WRITECOMBINED)
330		hat_attr = HAT_MERGING_OK | HAT_PLAT_NOCACHE;
331	else	/* GFXP_MEMORY_UNCACHED */
332		hat_attr = HAT_STRICTORDER | HAT_PLAT_NOCACHE;
333	hat_flags = HAT_LOAD_LOCK;
334
335	pgoffset = start & PAGEOFFSET;
336	base = start - pgoffset;
337	npages = btopr(size + pgoffset);
338
339#ifdef __xpv
340	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
341	pfn = xen_assign_pfn(mmu_btop(base));
342#else
343	pfn = btop(base);
344#endif
345
346	hat_devload(kas.a_hat, cvaddr, ptob(npages), pfn,
347	    PROT_READ|PROT_WRITE|hat_attr, hat_flags);
348}
349
350/*
351 * Like gfxp_unmap_kernel_space, but
352 * just the had_unload part.
353 */
354void
355gfxp_unload_kernel_space(caddr_t address, size_t size)
356{
357	uint_t pgoffset;
358	caddr_t base;
359	pgcnt_t npages;
360
361	if (size == 0 || address == NULL)
362		return;
363
364	pgoffset = (uintptr_t)address & PAGEOFFSET;
365	base = (caddr_t)address - pgoffset;
366	npages = btopr(size + pgoffset);
367	hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK);
368}
369
370/*
371 * Note that "mempool" is optional and normally disabled in drm_gem.c
372 * (see HAS_MEM_POOL).  Let's just stub these out so we can reduce
373 * changes from the upstream in the DRM driver code.
374 */
375
376void
377gfxp_mempool_init(void)
378{
379}
380
381void
382gfxp_mempool_destroy(void)
383{
384}
385
386/* ARGSUSED */
387int
388gfxp_alloc_from_mempool(struct gfxp_pmem_cookie *cookie, caddr_t *kva,
389    pfn_t *pgarray, pgcnt_t alen, int flags)
390{
391	return (-1);
392}
393
394/* ARGSUSED */
395void
396gfxp_free_mempool(struct gfxp_pmem_cookie *cookie, caddr_t kva, size_t len)
397{
398}
399