1 /*
2 * Copyright (c) 2009, Intel Corporation.
3 * All Rights Reserved.
4 */
5
6 /*
7 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
8 * Use is subject to license terms.
9 */
10 /*
11 * Portions Philip Brown phil@bolthole.com Dec 2001
12 */
13
14
15 /*
16 * agpgart driver
17 *
18 * This driver is primary targeted at providing memory support for INTEL
19 * AGP device, INTEL memory less video card, and AMD64 cpu GART devices.
20 * So there are four main architectures, ARC_IGD810, ARC_IGD830, ARC_INTELAGP,
21 * ARC_AMD64AGP to agpgart driver. However, the memory
22 * interfaces are the same for these architectures. The difference is how to
23 * manage the hardware GART table for them.
24 *
25 * For large memory allocation, this driver use direct mapping to userland
26 * application interface to save kernel virtual memory .
27 */
28
29 #include <sys/types.h>
30 #include <sys/pci.h>
31 #include <sys/systm.h>
32 #include <sys/conf.h>
33 #include <sys/file.h>
34 #include <sys/kstat.h>
35 #include <sys/stat.h>
36 #include <sys/modctl.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/sunldi.h>
40 #include <sys/policy.h>
41 #include <sys/ddidevmap.h>
42 #include <vm/seg_dev.h>
43 #include <sys/pmem.h>
44 #include <sys/agpgart.h>
45 #include <sys/agp/agpdefs.h>
46 #include <sys/agp/agpgart_impl.h>
47 #include <sys/agp/agpamd64gart_io.h>
48 #include <sys/agp/agpmaster_io.h>
49 #include <sys/agp/agptarget_io.h>
50
51 /* Dynamic debug support */
52 int agp_debug_var = 0;
53 #define AGPDB_PRINT1(fmt) if (agp_debug_var == 1) cmn_err fmt
54 #define AGPDB_PRINT2(fmt) if (agp_debug_var >= 1) cmn_err fmt
55
56 /* Driver global softstate handle */
57 static void *agpgart_glob_soft_handle;
58
59 #define MAX_INSTNUM 16
60
61 #define AGP_DEV2INST(devt) (getminor((devt)) >> 4)
62 #define AGP_INST2MINOR(instance) ((instance) << 4)
63 #define IS_INTEL_830(type) ((type) == ARC_IGD830)
64 #define IS_TRUE_AGP(type) (((type) == ARC_INTELAGP) || \
65 ((type) == ARC_AMD64AGP))
66
67
68 #define agpinfo_default_to_32(v, v32) \
69 { \
70 (v32).agpi32_version = (v).agpi_version; \
71 (v32).agpi32_devid = (v).agpi_devid; \
72 (v32).agpi32_mode = (v).agpi_mode; \
73 (v32).agpi32_aperbase = (uint32_t)(v).agpi_aperbase; \
74 (v32).agpi32_apersize = (uint32_t)(v).agpi_apersize; \
75 (v32).agpi32_pgtotal = (v).agpi_pgtotal; \
76 (v32).agpi32_pgsystem = (v).agpi_pgsystem; \
77 (v32).agpi32_pgused = (v).agpi_pgused; \
78 }
79
80 static ddi_dma_attr_t agpgart_dma_attr = {
81 DMA_ATTR_V0,
82 0U, /* dma_attr_addr_lo */
83 0xffffffffU, /* dma_attr_addr_hi */
84 0xffffffffU, /* dma_attr_count_max */
85 (uint64_t)AGP_PAGE_SIZE, /* dma_attr_align */
86 1, /* dma_attr_burstsizes */
87 1, /* dma_attr_minxfer */
88 0xffffffffU, /* dma_attr_maxxfer */
89 0xffffffffU, /* dma_attr_seg */
90 1, /* dma_attr_sgllen, variable */
91 4, /* dma_attr_granular */
92 0 /* dma_attr_flags */
93 };
94
95 /*
96 * AMD64 supports gart table above 4G. See alloc_gart_table.
97 */
98 static ddi_dma_attr_t garttable_dma_attr = {
99 DMA_ATTR_V0,
100 0U, /* dma_attr_addr_lo */
101 0xffffffffU, /* dma_attr_addr_hi */
102 0xffffffffU, /* dma_attr_count_max */
103 (uint64_t)AGP_PAGE_SIZE, /* dma_attr_align */
104 1, /* dma_attr_burstsizes */
105 1, /* dma_attr_minxfer */
106 0xffffffffU, /* dma_attr_maxxfer */
107 0xffffffffU, /* dma_attr_seg */
108 1, /* dma_attr_sgllen, variable */
109 4, /* dma_attr_granular */
110 0 /* dma_attr_flags */
111 };
112
113 /*
114 * AGPGART table need a physical contiguous memory. To assure that
115 * each access to gart table is strongly ordered and uncachable,
116 * we use DDI_STRICTORDER_ACC.
117 */
118 static ddi_device_acc_attr_t gart_dev_acc_attr = {
119 DDI_DEVICE_ATTR_V0,
120 DDI_NEVERSWAP_ACC,
121 DDI_STRICTORDER_ACC /* must be DDI_STRICTORDER_ACC */
122 };
123
124 /*
125 * AGP memory is usually used as texture memory or for a framebuffer, so we
126 * can set the memory attribute to write combining. Video drivers will
127 * determine the frame buffer attributes, for example the memory is write
128 * combinging or non-cachable. However, the interface between Xorg and agpgart
129 * driver to support attribute selcetion doesn't exist yet. So we set agp memory
130 * to non-cachable by default now. This attribute might be overridden
131 * by MTTR in X86.
132 */
133 static ddi_device_acc_attr_t mem_dev_acc_attr = {
134 DDI_DEVICE_ATTR_V0,
135 DDI_NEVERSWAP_ACC,
136 DDI_STRICTORDER_ACC /* Can be DDI_MERGING_OK_ACC */
137 };
138
139 static keytable_ent_t *
140 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset);
141 static void
142 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts);
143
144
145 /*ARGSUSED*/
146 static void
agp_devmap_unmap(devmap_cookie_t handle,void * devprivate,offset_t off,size_t len,devmap_cookie_t new_handle1,void ** new_devprivate1,devmap_cookie_t new_handle2,void ** new_devprivate2)147 agp_devmap_unmap(devmap_cookie_t handle, void *devprivate,
148 offset_t off, size_t len, devmap_cookie_t new_handle1,
149 void **new_devprivate1, devmap_cookie_t new_handle2,
150 void **new_devprivate2)
151 {
152
153 struct keytable_ent *mementry = NULL;
154 agpgart_softstate_t *softstate;
155 agpgart_ctx_t *ctxp, *newctxp1, *newctxp2;
156
157 ASSERT(AGP_ALIGNED(len) && AGP_ALIGNED(off));
158 ASSERT(devprivate);
159 ASSERT(handle);
160
161 ctxp = (agpgart_ctx_t *)devprivate;
162 softstate = ctxp->actx_sc;
163 ASSERT(softstate);
164
165 if (new_handle1 != NULL) {
166 newctxp1 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
167 newctxp1->actx_sc = softstate;
168 newctxp1->actx_off = ctxp->actx_off;
169 *new_devprivate1 = newctxp1;
170 }
171
172 if (new_handle2 != NULL) {
173 newctxp2 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
174 newctxp2->actx_sc = softstate;
175 newctxp2->actx_off = off + len;
176 *new_devprivate2 = newctxp2;
177 }
178
179 mutex_enter(&softstate->asoft_instmutex);
180 if ((new_handle1 == NULL) && (new_handle2 == NULL)) {
181 mementry =
182 agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
183 ASSERT(mementry);
184 mementry->kte_refcnt--;
185 } else if ((new_handle1 != NULL) && (new_handle2 != NULL)) {
186 mementry =
187 agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
188 ASSERT(mementry);
189 mementry->kte_refcnt++;
190 }
191 ASSERT(mementry == NULL || mementry->kte_refcnt >= 0);
192 mutex_exit(&softstate->asoft_instmutex);
193 kmem_free(ctxp, sizeof (struct agpgart_ctx));
194 }
195
196 /*ARGSUSED*/
197 static int
agp_devmap_map(devmap_cookie_t handle,dev_t dev,uint_t flags,offset_t offset,size_t len,void ** new_devprivate)198 agp_devmap_map(devmap_cookie_t handle, dev_t dev,
199 uint_t flags, offset_t offset, size_t len, void **new_devprivate)
200 {
201 agpgart_softstate_t *softstate;
202 int instance;
203 struct keytable_ent *mementry;
204 agpgart_ctx_t *newctxp;
205
206 ASSERT(handle);
207 instance = AGP_DEV2INST(dev);
208 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
209 if (softstate == NULL) {
210 AGPDB_PRINT2((CE_WARN, "agp_devmap_map: get soft state err"));
211 return (ENXIO);
212 }
213
214 ASSERT(softstate);
215 ASSERT(mutex_owned(&softstate->asoft_instmutex));
216 ASSERT(len);
217 ASSERT(AGP_ALIGNED(offset) && AGP_ALIGNED(len));
218
219 mementry =
220 agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
221 ASSERT(mementry);
222 mementry->kte_refcnt++;
223 ASSERT(mementry->kte_refcnt >= 0);
224 newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
225 newctxp->actx_off = offset;
226 newctxp->actx_sc = softstate;
227 *new_devprivate = newctxp;
228
229 return (0);
230 }
231
232 /*ARGSUSED*/
agp_devmap_dup(devmap_cookie_t handle,void * devprivate,devmap_cookie_t new_handle,void ** new_devprivate)233 static int agp_devmap_dup(devmap_cookie_t handle, void *devprivate,
234 devmap_cookie_t new_handle, void **new_devprivate)
235 {
236 struct keytable_ent *mementry;
237 agpgart_ctx_t *newctxp, *ctxp;
238 agpgart_softstate_t *softstate;
239
240 ASSERT(devprivate);
241 ASSERT(handle && new_handle);
242
243 ctxp = (agpgart_ctx_t *)devprivate;
244 ASSERT(AGP_ALIGNED(ctxp->actx_off));
245
246 newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
247 newctxp->actx_off = ctxp->actx_off;
248 newctxp->actx_sc = ctxp->actx_sc;
249 softstate = (agpgart_softstate_t *)newctxp->actx_sc;
250
251 mutex_enter(&softstate->asoft_instmutex);
252 mementry = agp_find_bound_keyent(softstate,
253 AGP_BYTES2PAGES(newctxp->actx_off));
254 mementry->kte_refcnt++;
255 ASSERT(mementry->kte_refcnt >= 0);
256 mutex_exit(&softstate->asoft_instmutex);
257 *new_devprivate = newctxp;
258
259 return (0);
260 }
261
262 struct devmap_callback_ctl agp_devmap_cb = {
263 DEVMAP_OPS_REV, /* rev */
264 agp_devmap_map, /* map */
265 NULL, /* access */
266 agp_devmap_dup, /* dup */
267 agp_devmap_unmap, /* unmap */
268 };
269
270 /*
271 * agp_master_regis_byname()
272 *
273 * Description:
274 * Open the AGP master device node by device path name and
275 * register the device handle for later operations.
276 * We check all possible driver instance from 0
277 * to MAX_INSTNUM because the master device could be
278 * at any instance number. Only one AGP master is supported.
279 *
280 * Arguments:
281 * master_hdlp AGP master device LDI handle pointer
282 * agpgart_l AGPGART driver LDI identifier
283 *
284 * Returns:
285 * -1 failed
286 * 0 success
287 */
288 static int
agp_master_regis_byname(ldi_handle_t * master_hdlp,ldi_ident_t agpgart_li)289 agp_master_regis_byname(ldi_handle_t *master_hdlp, ldi_ident_t agpgart_li)
290 {
291 int i;
292 char buf[MAXPATHLEN];
293
294 ASSERT(master_hdlp);
295 ASSERT(agpgart_li);
296
297 /*
298 * Search all possible instance numbers for the agp master device.
299 * Only one master device is supported now, so the search ends
300 * when one master device is found.
301 */
302 for (i = 0; i < MAX_INSTNUM; i++) {
303 (void) snprintf(buf, MAXPATHLEN, "%s%d", AGPMASTER_DEVLINK, i);
304 if ((ldi_open_by_name(buf, 0, kcred,
305 master_hdlp, agpgart_li)))
306 continue;
307 AGPDB_PRINT1((CE_NOTE,
308 "master device found: instance number=%d", i));
309 break;
310
311 }
312
313 /* AGP master device not found */
314 if (i == MAX_INSTNUM)
315 return (-1);
316
317 return (0);
318 }
319
320 /*
321 * agp_target_regis_byname()
322 *
323 * Description:
324 * This function opens agp bridge device node by
325 * device path name and registers the device handle
326 * for later operations.
327 * We check driver instance from 0 to MAX_INSTNUM
328 * because the master device could be at any instance
329 * number. Only one agp target is supported.
330 *
331 *
332 * Arguments:
333 * target_hdlp AGP target device LDI handle pointer
334 * agpgart_l AGPGART driver LDI identifier
335 *
336 * Returns:
337 * -1 failed
338 * 0 success
339 */
340 static int
agp_target_regis_byname(ldi_handle_t * target_hdlp,ldi_ident_t agpgart_li)341 agp_target_regis_byname(ldi_handle_t *target_hdlp, ldi_ident_t agpgart_li)
342 {
343 int i;
344 char buf[MAXPATHLEN];
345
346 ASSERT(target_hdlp);
347 ASSERT(agpgart_li);
348
349 for (i = 0; i < MAX_INSTNUM; i++) {
350 (void) snprintf(buf, MAXPATHLEN, "%s%d", AGPTARGET_DEVLINK, i);
351 if ((ldi_open_by_name(buf, 0, kcred,
352 target_hdlp, agpgart_li)))
353 continue;
354
355 AGPDB_PRINT1((CE_NOTE,
356 "bridge device found: instance number=%d", i));
357 break;
358
359 }
360
361 /* AGP bridge device not found */
362 if (i == MAX_INSTNUM) {
363 AGPDB_PRINT2((CE_WARN, "bridge device not found"));
364 return (-1);
365 }
366
367 return (0);
368 }
369
370 /*
371 * amd64_gart_regis_byname()
372 *
373 * Description:
374 * Open all amd64 gart device nodes by deice path name and
375 * register the device handles for later operations. Each cpu
376 * has its own amd64 gart device.
377 *
378 * Arguments:
379 * cpu_garts cpu garts device list header
380 * agpgart_l AGPGART driver LDI identifier
381 *
382 * Returns:
383 * -1 failed
384 * 0 success
385 */
386 static int
amd64_gart_regis_byname(amd64_garts_dev_t * cpu_garts,ldi_ident_t agpgart_li)387 amd64_gart_regis_byname(amd64_garts_dev_t *cpu_garts, ldi_ident_t agpgart_li)
388 {
389 amd64_gart_dev_list_t *gart_list;
390 int i;
391 char buf[MAXPATHLEN];
392 ldi_handle_t gart_hdl;
393 int ret;
394
395 ASSERT(cpu_garts);
396 ASSERT(agpgart_li);
397
398 /*
399 * Search all possible instance numbers for the gart devices.
400 * There can be multiple on-cpu gart devices for Opteron server.
401 */
402 for (i = 0; i < MAX_INSTNUM; i++) {
403 (void) snprintf(buf, MAXPATHLEN, "%s%d", CPUGART_DEVLINK, i);
404 ret = ldi_open_by_name(buf, 0, kcred,
405 &gart_hdl, agpgart_li);
406
407 if (ret == ENODEV)
408 continue;
409 else if (ret != 0) { /* There was an error opening the device */
410 amd64_gart_unregister(cpu_garts);
411 return (ret);
412 }
413
414 AGPDB_PRINT1((CE_NOTE,
415 "amd64 gart device found: instance number=%d", i));
416
417 gart_list = (amd64_gart_dev_list_t *)
418 kmem_zalloc(sizeof (amd64_gart_dev_list_t), KM_SLEEP);
419
420 /* Add new item to the head of the gart device list */
421 gart_list->gart_devhdl = gart_hdl;
422 gart_list->next = cpu_garts->gart_dev_list_head;
423 cpu_garts->gart_dev_list_head = gart_list;
424 cpu_garts->gart_device_num++;
425 }
426
427 if (cpu_garts->gart_device_num == 0)
428 return (ENODEV);
429 return (0);
430 }
431
432 /*
433 * Unregister agp master device handle
434 */
435 static void
agp_master_unregister(ldi_handle_t * master_hdlp)436 agp_master_unregister(ldi_handle_t *master_hdlp)
437 {
438 ASSERT(master_hdlp);
439
440 if (master_hdlp) {
441 (void) ldi_close(*master_hdlp, 0, kcred);
442 *master_hdlp = NULL;
443 }
444 }
445
446 /*
447 * Unregister agp bridge device handle
448 */
449 static void
agp_target_unregister(ldi_handle_t * target_hdlp)450 agp_target_unregister(ldi_handle_t *target_hdlp)
451 {
452 if (target_hdlp) {
453 (void) ldi_close(*target_hdlp, 0, kcred);
454 *target_hdlp = NULL;
455 }
456 }
457
458 /*
459 * Unregister all amd64 gart device handles
460 */
461 static void
amd64_gart_unregister(amd64_garts_dev_t * cpu_garts)462 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts)
463 {
464 amd64_gart_dev_list_t *gart_list;
465 amd64_gart_dev_list_t *next;
466
467 ASSERT(cpu_garts);
468
469 for (gart_list = cpu_garts->gart_dev_list_head;
470 gart_list; gart_list = next) {
471
472 ASSERT(gart_list->gart_devhdl);
473 (void) ldi_close(gart_list->gart_devhdl, 0, kcred);
474 next = gart_list->next;
475 /* Free allocated memory */
476 kmem_free(gart_list, sizeof (amd64_gart_dev_list_t));
477 }
478 cpu_garts->gart_dev_list_head = NULL;
479 cpu_garts->gart_device_num = 0;
480 }
481
482 /*
483 * lyr_detect_master_type()
484 *
485 * Description:
486 * This function gets agp master type by querying agp master device.
487 *
488 * Arguments:
489 * master_hdlp agp master device ldi handle pointer
490 *
491 * Returns:
492 * -1 unsupported device
493 * DEVICE_IS_I810 i810 series
494 * DEVICE_IS_I810 i830 series
495 * DEVICE_IS_AGP true agp master
496 */
497 static int
lyr_detect_master_type(ldi_handle_t * master_hdlp)498 lyr_detect_master_type(ldi_handle_t *master_hdlp)
499 {
500 int vtype;
501 int err;
502
503 ASSERT(master_hdlp);
504
505 /* ldi_ioctl(agpmaster) */
506 err = ldi_ioctl(*master_hdlp, DEVICE_DETECT,
507 (intptr_t)&vtype, FKIOCTL, kcred, 0);
508 if (err) /* Unsupported graphics device */
509 return (-1);
510 return (vtype);
511 }
512
513 /*
514 * devtect_target_type()
515 *
516 * Description:
517 * This function gets the host bridge chipset type by querying the agp
518 * target device.
519 *
520 * Arguments:
521 * target_hdlp agp target device LDI handle pointer
522 *
523 * Returns:
524 * CHIP_IS_INTEL Intel agp chipsets
525 * CHIP_IS_AMD AMD agp chipset
526 * -1 unsupported chipset
527 */
528 static int
lyr_detect_target_type(ldi_handle_t * target_hdlp)529 lyr_detect_target_type(ldi_handle_t *target_hdlp)
530 {
531 int btype;
532 int err;
533
534 ASSERT(target_hdlp);
535
536 err = ldi_ioctl(*target_hdlp, CHIP_DETECT, (intptr_t)&btype,
537 FKIOCTL, kcred, 0);
538 if (err) /* Unsupported bridge device */
539 return (-1);
540 return (btype);
541 }
542
543 /*
544 * lyr_init()
545 *
546 * Description:
547 * This function detects the graphics system architecture and
548 * registers all relative device handles in a global structure
549 * "agp_regdev". Then it stores the system arc type in driver
550 * soft state.
551 *
552 * Arguments:
553 * agp_regdev AGP devices registration struct pointer
554 * agpgart_l AGPGART driver LDI identifier
555 *
556 * Returns:
557 * 0 System arc supported and agp devices registration successed.
558 * -1 System arc not supported or device registration failed.
559 */
560 int
lyr_init(agp_registered_dev_t * agp_regdev,ldi_ident_t agpgart_li)561 lyr_init(agp_registered_dev_t *agp_regdev, ldi_ident_t agpgart_li)
562 {
563 ldi_handle_t *master_hdlp;
564 ldi_handle_t *target_hdlp;
565 amd64_garts_dev_t *garts_dev;
566 int card_type, chip_type;
567 int ret;
568
569 ASSERT(agp_regdev);
570
571 bzero(agp_regdev, sizeof (agp_registered_dev_t));
572 agp_regdev->agprd_arctype = ARC_UNKNOWN;
573 /*
574 * Register agp devices, assuming all instances attached, and
575 * detect which agp architucture this server belongs to. This
576 * must be done before the agpgart driver starts to use layered
577 * driver interfaces.
578 */
579 master_hdlp = &agp_regdev->agprd_masterhdl;
580 target_hdlp = &agp_regdev->agprd_targethdl;
581 garts_dev = &agp_regdev->agprd_cpugarts;
582
583 /* Check whether the system is amd64 arc */
584 if ((ret = amd64_gart_regis_byname(garts_dev, agpgart_li)) == ENODEV) {
585 /* No amd64 gart devices */
586 AGPDB_PRINT1((CE_NOTE,
587 "lyr_init: this is not an amd64 system"));
588 if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
589 AGPDB_PRINT2((CE_WARN,
590 "lyr_init: register master device unsuccessful"));
591 goto err1;
592 }
593 if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
594 AGPDB_PRINT2((CE_WARN,
595 "lyr_init: register target device unsuccessful"));
596 goto err2;
597 }
598 card_type = lyr_detect_master_type(master_hdlp);
599 /*
600 * Detect system arc by master device. If it is a intel
601 * integrated device, finish the detection successfully.
602 */
603 switch (card_type) {
604 case DEVICE_IS_I810: /* I810 likewise graphics */
605 AGPDB_PRINT1((CE_NOTE,
606 "lyr_init: the system is Intel 810 arch"));
607 agp_regdev->agprd_arctype = ARC_IGD810;
608 return (0);
609 case DEVICE_IS_I830: /* I830 likewise graphics */
610 AGPDB_PRINT1((CE_NOTE,
611 "lyr_init: the system is Intel 830 arch"));
612 agp_regdev->agprd_arctype = ARC_IGD830;
613 return (0);
614 case DEVICE_IS_AGP: /* AGP graphics */
615 break;
616 default: /* Non IGD/AGP graphics */
617 AGPDB_PRINT2((CE_WARN,
618 "lyr_init: non-supported master device"));
619 goto err3;
620 }
621
622 chip_type = lyr_detect_target_type(target_hdlp);
623
624 /* Continue to detect AGP arc by target device */
625 switch (chip_type) {
626 case CHIP_IS_INTEL: /* Intel chipset */
627 AGPDB_PRINT1((CE_NOTE,
628 "lyr_init: Intel AGP arch detected"));
629 agp_regdev->agprd_arctype = ARC_INTELAGP;
630 return (0);
631 case CHIP_IS_AMD: /* AMD chipset */
632 AGPDB_PRINT2((CE_WARN,
633 "lyr_init: no cpu gart, but have AMD64 chipsets"));
634 goto err3;
635 default: /* Non supported chipset */
636 AGPDB_PRINT2((CE_WARN,
637 "lyr_init: detection can not continue"));
638 goto err3;
639 }
640
641 }
642
643 if (ret)
644 return (-1); /* Errors in open amd64 cpu gart devices */
645
646 /*
647 * AMD64 cpu gart device exsits, continue detection
648 */
649 if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
650 AGPDB_PRINT1((CE_NOTE, "lyr_init: no AGP master in amd64"));
651 goto err1;
652 }
653
654 if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
655 AGPDB_PRINT1((CE_NOTE,
656 "lyr_init: no AGP bridge"));
657 goto err2;
658 }
659
660 AGPDB_PRINT1((CE_NOTE,
661 "lyr_init: the system is AMD64 AGP architecture"));
662
663 agp_regdev->agprd_arctype = ARC_AMD64AGP;
664
665 return (0); /* Finished successfully */
666
667 err3:
668 agp_target_unregister(&agp_regdev->agprd_targethdl);
669 err2:
670 agp_master_unregister(&agp_regdev->agprd_masterhdl);
671 err1:
672 /* AMD64 CPU gart registered ? */
673 if (ret == 0) {
674 amd64_gart_unregister(garts_dev);
675 }
676 agp_regdev->agprd_arctype = ARC_UNKNOWN;
677 return (-1);
678 }
679
680 void
lyr_end(agp_registered_dev_t * agp_regdev)681 lyr_end(agp_registered_dev_t *agp_regdev)
682 {
683 ASSERT(agp_regdev);
684
685 switch (agp_regdev->agprd_arctype) {
686 case ARC_IGD810:
687 case ARC_IGD830:
688 case ARC_INTELAGP:
689 agp_master_unregister(&agp_regdev->agprd_masterhdl);
690 agp_target_unregister(&agp_regdev->agprd_targethdl);
691
692 return;
693 case ARC_AMD64AGP:
694 agp_master_unregister(&agp_regdev->agprd_masterhdl);
695 agp_target_unregister(&agp_regdev->agprd_targethdl);
696 amd64_gart_unregister(&agp_regdev->agprd_cpugarts);
697
698 return;
699 default:
700 ASSERT(0);
701 return;
702 }
703 }
704
705 int
lyr_get_info(agp_kern_info_t * info,agp_registered_dev_t * agp_regdev)706 lyr_get_info(agp_kern_info_t *info, agp_registered_dev_t *agp_regdev)
707 {
708 ldi_handle_t hdl;
709 igd_info_t value1;
710 i_agp_info_t value2;
711 size_t prealloc_size;
712 int err;
713
714 ASSERT(info);
715 ASSERT(agp_regdev);
716
717 switch (agp_regdev->agprd_arctype) {
718 case ARC_IGD810:
719 hdl = agp_regdev->agprd_masterhdl;
720 err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
721 FKIOCTL, kcred, 0);
722 if (err)
723 return (-1);
724 info->agpki_mdevid = value1.igd_devid;
725 info->agpki_aperbase = value1.igd_aperbase;
726 info->agpki_apersize = (uint32_t)value1.igd_apersize;
727
728 hdl = agp_regdev->agprd_targethdl;
729 err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
730 (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
731 if (err)
732 return (-1);
733 info->agpki_presize = prealloc_size;
734
735 break;
736
737 case ARC_IGD830:
738 hdl = agp_regdev->agprd_masterhdl;
739 err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
740 FKIOCTL, kcred, 0);
741 if (err)
742 return (-1);
743 info->agpki_mdevid = value1.igd_devid;
744 info->agpki_aperbase = value1.igd_aperbase;
745 info->agpki_apersize = (uint32_t)value1.igd_apersize;
746
747 hdl = agp_regdev->agprd_targethdl;
748 err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
749 (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
750 if (err)
751 return (-1);
752
753 /*
754 * Assume all units are kilobytes unless explicitly
755 * stated below:
756 * preallocated GTT memory = preallocated memory - GTT size
757 * - scratch page size
758 *
759 * scratch page size = 4
760 * GTT size (KB) = aperture size (MB)
761 * this algorithm came from Xorg source code
762 */
763 if (prealloc_size > (info->agpki_apersize + 4))
764 prealloc_size =
765 prealloc_size - info->agpki_apersize - 4;
766 else {
767 AGPDB_PRINT2((CE_WARN, "lyr_get_info: "
768 "pre-allocated memory too small, setting to zero"));
769 prealloc_size = 0;
770 }
771 info->agpki_presize = prealloc_size;
772 AGPDB_PRINT2((CE_NOTE,
773 "lyr_get_info: prealloc_size = %ldKB, apersize = %dMB",
774 prealloc_size, info->agpki_apersize));
775 break;
776 case ARC_INTELAGP:
777 case ARC_AMD64AGP:
778 /* AGP devices */
779 hdl = agp_regdev->agprd_masterhdl;
780 err = ldi_ioctl(hdl, AGP_MASTER_GETINFO,
781 (intptr_t)&value2, FKIOCTL, kcred, 0);
782 if (err)
783 return (-1);
784 info->agpki_mdevid = value2.iagp_devid;
785 info->agpki_mver = value2.iagp_ver;
786 info->agpki_mstatus = value2.iagp_mode;
787 hdl = agp_regdev->agprd_targethdl;
788 err = ldi_ioctl(hdl, AGP_TARGET_GETINFO,
789 (intptr_t)&value2, FKIOCTL, kcred, 0);
790 if (err)
791 return (-1);
792 info->agpki_tdevid = value2.iagp_devid;
793 info->agpki_tver = value2.iagp_ver;
794 info->agpki_tstatus = value2.iagp_mode;
795 info->agpki_aperbase = value2.iagp_aperbase;
796 info->agpki_apersize = (uint32_t)value2.iagp_apersize;
797 break;
798 default:
799 AGPDB_PRINT2((CE_WARN,
800 "lyr_get_info: function doesn't work for unknown arc"));
801 return (-1);
802 }
803 if ((info->agpki_apersize >= MAXAPERMEGAS) ||
804 (info->agpki_apersize == 0) ||
805 (info->agpki_aperbase == 0)) {
806 AGPDB_PRINT2((CE_WARN,
807 "lyr_get_info: aperture is not programmed correctly!"));
808 return (-1);
809 }
810
811 return (0);
812 }
813
814 /*
815 * lyr_i8xx_add_to_gtt()
816 *
817 * Description:
818 * This function sets up the integrated video device gtt table
819 * via an ioclt to the AGP master driver.
820 *
821 * Arguments:
822 * pg_offset The start entry to be setup
823 * keyent Keytable entity pointer
824 * agp_regdev AGP devices registration struct pointer
825 *
826 * Returns:
827 * 0 success
828 * -1 invalid operations
829 */
830 int
lyr_i8xx_add_to_gtt(uint32_t pg_offset,keytable_ent_t * keyent,agp_registered_dev_t * agp_regdev)831 lyr_i8xx_add_to_gtt(uint32_t pg_offset, keytable_ent_t *keyent,
832 agp_registered_dev_t *agp_regdev)
833 {
834 int err = 0;
835 int rval;
836 ldi_handle_t hdl;
837 igd_gtt_seg_t gttseg;
838 uint32_t *addrp, i;
839 uint32_t npages;
840
841 ASSERT(keyent);
842 ASSERT(agp_regdev);
843 gttseg.igs_pgstart = pg_offset;
844 npages = keyent->kte_pages;
845 gttseg.igs_npage = npages;
846 gttseg.igs_type = keyent->kte_type;
847 gttseg.igs_phyaddr = (uint32_t *)kmem_zalloc
848 (sizeof (uint32_t) * gttseg.igs_npage, KM_SLEEP);
849
850 addrp = gttseg.igs_phyaddr;
851 for (i = 0; i < npages; i++, addrp++) {
852 *addrp =
853 (uint32_t)((keyent->kte_pfnarray[i]) << GTT_PAGE_SHIFT);
854 }
855
856 hdl = agp_regdev->agprd_masterhdl;
857 if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)>tseg, FKIOCTL,
858 kcred, &rval)) {
859 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: ldi_ioctl error"));
860 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pg_start=0x%x",
861 gttseg.igs_pgstart));
862 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pages=0x%x",
863 gttseg.igs_npage));
864 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: type=0x%x",
865 gttseg.igs_type));
866 err = -1;
867 }
868 kmem_free(gttseg.igs_phyaddr, sizeof (uint32_t) * gttseg.igs_npage);
869 return (err);
870 }
871
872 /*
873 * lyr_i8xx_remove_from_gtt()
874 *
875 * Description:
876 * This function clears the integrated video device gtt table via
877 * an ioctl to the agp master device.
878 *
879 * Arguments:
880 * pg_offset The starting entry to be cleared
881 * npage The number of entries to be cleared
882 * agp_regdev AGP devices struct pointer
883 *
884 * Returns:
885 * 0 success
886 * -1 invalid operations
887 */
888 int
lyr_i8xx_remove_from_gtt(uint32_t pg_offset,uint32_t npage,agp_registered_dev_t * agp_regdev)889 lyr_i8xx_remove_from_gtt(uint32_t pg_offset, uint32_t npage,
890 agp_registered_dev_t *agp_regdev)
891 {
892 int rval;
893 ldi_handle_t hdl;
894 igd_gtt_seg_t gttseg;
895
896 gttseg.igs_pgstart = pg_offset;
897 gttseg.igs_npage = npage;
898
899 hdl = agp_regdev->agprd_masterhdl;
900 if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)>tseg, FKIOCTL,
901 kcred, &rval))
902 return (-1);
903
904 return (0);
905 }
906
907 /*
908 * lyr_set_gart_addr()
909 *
910 * Description:
911 * This function puts the gart table physical address in the
912 * gart base register.
913 * Please refer to gart and gtt table base register format for
914 * gart base register format in agpdefs.h.
915 *
916 * Arguments:
917 * phy_base The base physical address of gart table
918 * agp_regdev AGP devices registration struct pointer
919 *
920 * Returns:
921 * 0 success
922 * -1 failed
923 *
924 */
925
926 int
lyr_set_gart_addr(uint64_t phy_base,agp_registered_dev_t * agp_regdev)927 lyr_set_gart_addr(uint64_t phy_base, agp_registered_dev_t *agp_regdev)
928 {
929 amd64_gart_dev_list_t *gart_list;
930 ldi_handle_t hdl;
931 int err = 0;
932
933 ASSERT(agp_regdev);
934 switch (agp_regdev->agprd_arctype) {
935 case ARC_IGD810:
936 {
937 uint32_t base;
938
939 ASSERT((phy_base & I810_POINTER_MASK) == 0);
940 base = (uint32_t)phy_base;
941
942 hdl = agp_regdev->agprd_masterhdl;
943 err = ldi_ioctl(hdl, I810_SET_GTT_BASE,
944 (intptr_t)&base, FKIOCTL, kcred, 0);
945 break;
946 }
947 case ARC_INTELAGP:
948 {
949 uint32_t addr;
950 addr = (uint32_t)phy_base;
951
952 ASSERT((phy_base & GTT_POINTER_MASK) == 0);
953 hdl = agp_regdev->agprd_targethdl;
954 err = ldi_ioctl(hdl, AGP_TARGET_SET_GATTADDR,
955 (intptr_t)&addr, FKIOCTL, kcred, 0);
956 break;
957 }
958 case ARC_AMD64AGP:
959 {
960 uint32_t addr;
961
962 ASSERT((phy_base & AMD64_POINTER_MASK) == 0);
963 addr = (uint32_t)((phy_base >> AMD64_GARTBASE_SHIFT)
964 & AMD64_GARTBASE_MASK);
965
966 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
967 gart_list;
968 gart_list = gart_list->next) {
969 hdl = gart_list->gart_devhdl;
970 if (ldi_ioctl(hdl, AMD64_SET_GART_ADDR,
971 (intptr_t)&addr, FKIOCTL, kcred, 0)) {
972 err = -1;
973 break;
974 }
975 }
976 break;
977 }
978 default:
979 err = -1;
980 }
981
982 if (err)
983 return (-1);
984
985 return (0);
986 }
987
988 int
lyr_set_agp_cmd(uint32_t cmd,agp_registered_dev_t * agp_regdev)989 lyr_set_agp_cmd(uint32_t cmd, agp_registered_dev_t *agp_regdev)
990 {
991 ldi_handle_t hdl;
992 uint32_t command;
993
994 ASSERT(agp_regdev);
995 command = cmd;
996 hdl = agp_regdev->agprd_targethdl;
997 if (ldi_ioctl(hdl, AGP_TARGET_SETCMD,
998 (intptr_t)&command, FKIOCTL, kcred, 0))
999 return (-1);
1000 hdl = agp_regdev->agprd_masterhdl;
1001 if (ldi_ioctl(hdl, AGP_MASTER_SETCMD,
1002 (intptr_t)&command, FKIOCTL, kcred, 0))
1003 return (-1);
1004
1005 return (0);
1006 }
1007
1008 int
lyr_config_devices(agp_registered_dev_t * agp_regdev)1009 lyr_config_devices(agp_registered_dev_t *agp_regdev)
1010 {
1011 amd64_gart_dev_list_t *gart_list;
1012 ldi_handle_t hdl;
1013 int rc = 0;
1014
1015 ASSERT(agp_regdev);
1016 switch (agp_regdev->agprd_arctype) {
1017 case ARC_IGD830:
1018 case ARC_IGD810:
1019 break;
1020 case ARC_INTELAGP:
1021 {
1022 hdl = agp_regdev->agprd_targethdl;
1023 rc = ldi_ioctl(hdl, AGP_TARGET_CONFIGURE,
1024 0, FKIOCTL, kcred, 0);
1025 break;
1026 }
1027 case ARC_AMD64AGP:
1028 {
1029 /*
1030 * BIOS always shadow registers such like Aperture Base
1031 * register, Aperture Size Register from the AGP bridge
1032 * to the AMD64 CPU host bridge. If future BIOSes are broken
1033 * in this regard, we may need to shadow these registers
1034 * in driver.
1035 */
1036
1037 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1038 gart_list;
1039 gart_list = gart_list->next) {
1040 hdl = gart_list->gart_devhdl;
1041 if (ldi_ioctl(hdl, AMD64_CONFIGURE,
1042 0, FKIOCTL, kcred, 0)) {
1043 rc = -1;
1044 break;
1045 }
1046 }
1047 break;
1048 }
1049 default:
1050 rc = -1;
1051 }
1052
1053 if (rc)
1054 return (-1);
1055
1056 return (0);
1057 }
1058
1059 int
lyr_unconfig_devices(agp_registered_dev_t * agp_regdev)1060 lyr_unconfig_devices(agp_registered_dev_t *agp_regdev)
1061 {
1062 amd64_gart_dev_list_t *gart_list;
1063 ldi_handle_t hdl;
1064 int rc = 0;
1065
1066 ASSERT(agp_regdev);
1067 switch (agp_regdev->agprd_arctype) {
1068 case ARC_IGD830:
1069 case ARC_IGD810:
1070 {
1071 hdl = agp_regdev->agprd_masterhdl;
1072 rc = ldi_ioctl(hdl, I8XX_UNCONFIG, 0, FKIOCTL, kcred, 0);
1073 break;
1074 }
1075 case ARC_INTELAGP:
1076 {
1077 hdl = agp_regdev->agprd_targethdl;
1078 rc = ldi_ioctl(hdl, AGP_TARGET_UNCONFIG,
1079 0, FKIOCTL, kcred, 0);
1080 break;
1081 }
1082 case ARC_AMD64AGP:
1083 {
1084 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1085 gart_list; gart_list = gart_list->next) {
1086 hdl = gart_list->gart_devhdl;
1087 if (ldi_ioctl(hdl, AMD64_UNCONFIG,
1088 0, FKIOCTL, kcred, 0)) {
1089 rc = -1;
1090 break;
1091 }
1092 }
1093 break;
1094 }
1095 default:
1096 rc = -1;
1097 }
1098
1099 if (rc)
1100 return (-1);
1101
1102 return (0);
1103 }
1104
1105 /*
1106 * lyr_flush_gart_cache()
1107 *
1108 * Description:
1109 * This function flushes the GART translation look-aside buffer. All
1110 * GART translation caches will be flushed after this operation.
1111 *
1112 * Arguments:
1113 * agp_regdev AGP devices struct pointer
1114 */
1115 void
lyr_flush_gart_cache(agp_registered_dev_t * agp_regdev)1116 lyr_flush_gart_cache(agp_registered_dev_t *agp_regdev)
1117 {
1118 amd64_gart_dev_list_t *gart_list;
1119 ldi_handle_t hdl;
1120
1121 ASSERT(agp_regdev);
1122 if (agp_regdev->agprd_arctype == ARC_AMD64AGP) {
1123 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1124 gart_list; gart_list = gart_list->next) {
1125 hdl = gart_list->gart_devhdl;
1126 (void) ldi_ioctl(hdl, AMD64_FLUSH_GTLB,
1127 0, FKIOCTL, kcred, 0);
1128 }
1129 } else if (agp_regdev->agprd_arctype == ARC_INTELAGP) {
1130 hdl = agp_regdev->agprd_targethdl;
1131 (void) ldi_ioctl(hdl, AGP_TARGET_FLUSH_GTLB, 0,
1132 FKIOCTL, kcred, 0);
1133 }
1134 }
1135
1136 /*
1137 * get_max_pages()
1138 *
1139 * Description:
1140 * This function compute the total pages allowed for agp aperture
1141 * based on the ammount of physical pages.
1142 * The algorithm is: compare the aperture size with 1/4 of total
1143 * physical pages, and use the smaller one to for the max available
1144 * pages. But the minimum video memory should be 192M.
1145 *
1146 * Arguments:
1147 * aper_size system agp aperture size (in MB)
1148 *
1149 * Returns:
1150 * The max possible number of agp memory pages available to users
1151 */
1152 static uint32_t
get_max_pages(uint32_t aper_size)1153 get_max_pages(uint32_t aper_size)
1154 {
1155 uint32_t i, j, size;
1156
1157 ASSERT(aper_size <= MAXAPERMEGAS);
1158
1159 i = AGP_MB2PAGES(aper_size);
1160 j = (physmem >> 2);
1161
1162 size = ((i < j) ? i : j);
1163
1164 if (size < AGP_MB2PAGES(MINAPERMEGAS))
1165 size = AGP_MB2PAGES(MINAPERMEGAS);
1166 return (size);
1167 }
1168
1169 /*
1170 * agp_fill_empty_keyent()
1171 *
1172 * Description:
1173 * This function finds a empty key table slot and
1174 * fills it with a new entity.
1175 *
1176 * Arguments:
1177 * softsate driver soft state pointer
1178 * entryp new entity data pointer
1179 *
1180 * Returns:
1181 * NULL no key table slot available
1182 * entryp the new entity slot pointer
1183 */
1184 static keytable_ent_t *
agp_fill_empty_keyent(agpgart_softstate_t * softstate,keytable_ent_t * entryp)1185 agp_fill_empty_keyent(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
1186 {
1187 int key;
1188 keytable_ent_t *newentryp;
1189
1190 ASSERT(softstate);
1191 ASSERT(entryp);
1192 ASSERT(entryp->kte_memhdl);
1193 ASSERT(entryp->kte_pfnarray);
1194 ASSERT(mutex_owned(&softstate->asoft_instmutex));
1195
1196 for (key = 0; key < AGP_MAXKEYS; key++) {
1197 newentryp = &softstate->asoft_table[key];
1198 if (newentryp->kte_memhdl == NULL) {
1199 break;
1200 }
1201 }
1202
1203 if (key >= AGP_MAXKEYS) {
1204 AGPDB_PRINT2((CE_WARN,
1205 "agp_fill_empty_keyent: key table exhausted"));
1206 return (NULL);
1207 }
1208
1209 ASSERT(newentryp->kte_pfnarray == NULL);
1210 bcopy(entryp, newentryp, sizeof (keytable_ent_t));
1211 newentryp->kte_key = key;
1212
1213 return (newentryp);
1214 }
1215
1216 /*
1217 * agp_find_bound_keyent()
1218 *
1219 * Description:
1220 * This function finds the key table entity by agp aperture page offset.
1221 * Every keytable entity will have an agp aperture range after the binding
1222 * operation.
1223 *
1224 * Arguments:
1225 * softsate driver soft state pointer
1226 * pg_offset agp aperture page offset
1227 *
1228 * Returns:
1229 * NULL no such keytable entity
1230 * pointer key table entity pointer found
1231 */
1232 static keytable_ent_t *
agp_find_bound_keyent(agpgart_softstate_t * softstate,uint32_t pg_offset)1233 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset)
1234 {
1235 int keycount;
1236 keytable_ent_t *entryp;
1237
1238 ASSERT(softstate);
1239 ASSERT(mutex_owned(&softstate->asoft_instmutex));
1240
1241 for (keycount = 0; keycount < AGP_MAXKEYS; keycount++) {
1242 entryp = &softstate->asoft_table[keycount];
1243 if (entryp->kte_bound == 0) {
1244 continue;
1245 }
1246
1247 if (pg_offset < entryp->kte_pgoff)
1248 continue;
1249 if (pg_offset >= (entryp->kte_pgoff + entryp->kte_pages))
1250 continue;
1251
1252 ASSERT(entryp->kte_memhdl);
1253 ASSERT(entryp->kte_pfnarray);
1254
1255 return (entryp);
1256 }
1257
1258 return (NULL);
1259 }
1260
1261 /*
1262 * agp_check_off()
1263 *
1264 * Description:
1265 * This function checks whether an AGP aperture range to be bound
1266 * overlaps with AGP offset already bound.
1267 *
1268 * Arguments:
1269 * entryp key table start entry pointer
1270 * pg_start AGP range start page offset
1271 * pg_num pages number to be bound
1272 *
1273 * Returns:
1274 * 0 Does not overlap
1275 * -1 Overlaps
1276 */
1277
1278 static int
agp_check_off(keytable_ent_t * entryp,uint32_t pg_start,uint32_t pg_num)1279 agp_check_off(keytable_ent_t *entryp, uint32_t pg_start, uint32_t pg_num)
1280 {
1281 int key;
1282 uint64_t pg_end;
1283 uint64_t kpg_end;
1284
1285 ASSERT(entryp);
1286
1287 pg_end = pg_start + pg_num;
1288 for (key = 0; key < AGP_MAXKEYS; key++) {
1289 if (!entryp[key].kte_bound)
1290 continue;
1291
1292 kpg_end = entryp[key].kte_pgoff + entryp[key].kte_pages;
1293 if (!((pg_end <= entryp[key].kte_pgoff) ||
1294 (pg_start >= kpg_end)))
1295 break;
1296 }
1297
1298 if (key == AGP_MAXKEYS)
1299 return (0);
1300 else
1301 return (-1);
1302 }
1303
1304 static int
is_controlling_proc(agpgart_softstate_t * st)1305 is_controlling_proc(agpgart_softstate_t *st)
1306 {
1307 ASSERT(st);
1308
1309 if (!st->asoft_acquired) {
1310 AGPDB_PRINT2((CE_WARN,
1311 "ioctl_agpgart_setup: gart not acquired"));
1312 return (-1);
1313 }
1314 if (st->asoft_curpid != ddi_get_pid()) {
1315 AGPDB_PRINT2((CE_WARN,
1316 "ioctl_agpgart_release: not controlling process"));
1317 return (-1);
1318 }
1319
1320 return (0);
1321 }
1322
release_control(agpgart_softstate_t * st)1323 static void release_control(agpgart_softstate_t *st)
1324 {
1325 st->asoft_curpid = 0;
1326 st->asoft_acquired = 0;
1327 }
1328
acquire_control(agpgart_softstate_t * st)1329 static void acquire_control(agpgart_softstate_t *st)
1330 {
1331 st->asoft_curpid = ddi_get_pid();
1332 st->asoft_acquired = 1;
1333 }
1334
1335 /*
1336 * agp_remove_from_gart()
1337 *
1338 * Description:
1339 * This function fills the gart table entries by a given page
1340 * frame number array and setup the agp aperture page to physical
1341 * memory page translation.
1342 * Arguments:
1343 * pg_offset Starting aperture page to be bound
1344 * entries the number of pages to be bound
1345 * acc_hdl GART table dma memory acc handle
1346 * tablep GART table kernel virtual address
1347 */
1348 static void
agp_remove_from_gart(uint32_t pg_offset,uint32_t entries,ddi_dma_handle_t dma_hdl,uint32_t * tablep)1349 agp_remove_from_gart(
1350 uint32_t pg_offset,
1351 uint32_t entries,
1352 ddi_dma_handle_t dma_hdl,
1353 uint32_t *tablep)
1354 {
1355 uint32_t items = 0;
1356 uint32_t *entryp;
1357
1358 entryp = tablep + pg_offset;
1359 while (items < entries) {
1360 *(entryp + items) = 0;
1361 items++;
1362 }
1363 (void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
1364 entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
1365 }
1366
1367 /*
1368 * agp_unbind_key()
1369 *
1370 * Description:
1371 * This function unbinds AGP memory from the gart table. It will clear
1372 * all the gart entries related to this agp memory.
1373 *
1374 * Arguments:
1375 * softstate driver soft state pointer
1376 * entryp key table entity pointer
1377 *
1378 * Returns:
1379 * EINVAL invalid key table entity pointer
1380 * 0 success
1381 *
1382 */
1383 static int
agp_unbind_key(agpgart_softstate_t * softstate,keytable_ent_t * entryp)1384 agp_unbind_key(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
1385 {
1386 int retval = 0;
1387
1388 ASSERT(entryp);
1389 ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
1390
1391 if (!entryp->kte_bound) {
1392 AGPDB_PRINT2((CE_WARN,
1393 "agp_unbind_key: key = 0x%x, not bound",
1394 entryp->kte_key));
1395 return (EINVAL);
1396 }
1397 if (entryp->kte_refcnt) {
1398 AGPDB_PRINT2((CE_WARN,
1399 "agp_unbind_key: memory is exported to users"));
1400 return (EINVAL);
1401 }
1402
1403 ASSERT((entryp->kte_pgoff + entryp->kte_pages) <=
1404 AGP_MB2PAGES(softstate->asoft_info.agpki_apersize));
1405 ASSERT((softstate->asoft_devreg.agprd_arctype != ARC_UNKNOWN));
1406
1407 switch (softstate->asoft_devreg.agprd_arctype) {
1408 case ARC_IGD810:
1409 case ARC_IGD830:
1410 retval = lyr_i8xx_remove_from_gtt(
1411 entryp->kte_pgoff, entryp->kte_pages,
1412 &softstate->asoft_devreg);
1413 if (retval) {
1414 AGPDB_PRINT2((CE_WARN,
1415 "agp_unbind_key: Key = 0x%x, clear table error",
1416 entryp->kte_key));
1417 return (EIO);
1418 }
1419 break;
1420 case ARC_INTELAGP:
1421 case ARC_AMD64AGP:
1422 agp_remove_from_gart(entryp->kte_pgoff,
1423 entryp->kte_pages,
1424 softstate->gart_dma_handle,
1425 (uint32_t *)softstate->gart_vbase);
1426 /* Flush GTLB table */
1427 lyr_flush_gart_cache(&softstate->asoft_devreg);
1428
1429 break;
1430 default:
1431 /* Never happens, but avoid gcc switch warning. */
1432 return (EIO);
1433 }
1434
1435 entryp->kte_bound = 0;
1436
1437 return (0);
1438 }
1439
1440 /*
1441 * agp_dealloc_kmem()
1442 *
1443 * Description:
1444 * This function deallocates dma memory resources for userland
1445 * applications.
1446 *
1447 * Arguments:
1448 * entryp keytable entity pointer
1449 */
1450 static void
agp_dealloc_kmem(keytable_ent_t * entryp)1451 agp_dealloc_kmem(keytable_ent_t *entryp)
1452 {
1453 kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages);
1454 entryp->kte_pfnarray = NULL;
1455
1456 (void) ddi_dma_unbind_handle(KMEMP(entryp->kte_memhdl)->kmem_handle);
1457 KMEMP(entryp->kte_memhdl)->kmem_cookies_num = 0;
1458 ddi_dma_mem_free(&KMEMP(entryp->kte_memhdl)->kmem_acchdl);
1459 KMEMP(entryp->kte_memhdl)->kmem_acchdl = NULL;
1460 KMEMP(entryp->kte_memhdl)->kmem_reallen = 0;
1461 KMEMP(entryp->kte_memhdl)->kmem_kvaddr = NULL;
1462
1463 ddi_dma_free_handle(&(KMEMP(entryp->kte_memhdl)->kmem_handle));
1464 KMEMP(entryp->kte_memhdl)->kmem_handle = NULL;
1465
1466 kmem_free(entryp->kte_memhdl, sizeof (agp_kmem_handle_t));
1467 entryp->kte_memhdl = NULL;
1468 }
1469
1470 /*
1471 * agp_dealloc_mem()
1472 *
1473 * Description:
1474 * This function deallocates physical memory resources allocated for
1475 * userland applications.
1476 *
1477 * Arguments:
1478 * st driver soft state pointer
1479 * entryp key table entity pointer
1480 *
1481 * Returns:
1482 * -1 not a valid memory type or the memory is mapped by
1483 * user area applications
1484 * 0 success
1485 */
1486 static int
agp_dealloc_mem(agpgart_softstate_t * st,keytable_ent_t * entryp)1487 agp_dealloc_mem(agpgart_softstate_t *st, keytable_ent_t *entryp)
1488 {
1489
1490 ASSERT(entryp);
1491 ASSERT(st);
1492 ASSERT(entryp->kte_memhdl);
1493 ASSERT(mutex_owned(&st->asoft_instmutex));
1494
1495 /* auto unbind here */
1496 if (entryp->kte_bound && !entryp->kte_refcnt) {
1497 AGPDB_PRINT2((CE_WARN,
1498 "agp_dealloc_mem: key=0x%x, auto unbind",
1499 entryp->kte_key));
1500
1501 /*
1502 * agp_dealloc_mem may be called indirectly by agp_detach.
1503 * In the agp_detach function, agpgart_close is already
1504 * called which will free the gart table. agp_unbind_key
1505 * will panic if no valid gart table exists. So test if
1506 * gart table exsits here.
1507 */
1508 if (st->asoft_opened)
1509 (void) agp_unbind_key(st, entryp);
1510 }
1511 if (entryp->kte_refcnt) {
1512 AGPDB_PRINT2((CE_WARN,
1513 "agp_dealloc_mem: memory is exported to users"));
1514 return (-1);
1515 }
1516
1517 switch (entryp->kte_type) {
1518 case AGP_NORMAL:
1519 case AGP_PHYSICAL:
1520 agp_dealloc_kmem(entryp);
1521 break;
1522 default:
1523 return (-1);
1524 }
1525
1526 return (0);
1527 }
1528
1529 /*
1530 * agp_del_allkeys()
1531 *
1532 * Description:
1533 * This function calls agp_dealloc_mem to release all the agp memory
1534 * resource allocated.
1535 *
1536 * Arguments:
1537 * softsate driver soft state pointer
1538 * Returns:
1539 * -1 can not free all agp memory
1540 * 0 success
1541 *
1542 */
1543 static int
agp_del_allkeys(agpgart_softstate_t * softstate)1544 agp_del_allkeys(agpgart_softstate_t *softstate)
1545 {
1546 int key;
1547 int ret = 0;
1548
1549 ASSERT(softstate);
1550 for (key = 0; key < AGP_MAXKEYS; key++) {
1551 if (softstate->asoft_table[key].kte_memhdl != NULL) {
1552 /*
1553 * Check if we can free agp memory now.
1554 * If agp memory is exported to user
1555 * applications, agp_dealloc_mem will fail.
1556 */
1557 if (agp_dealloc_mem(softstate,
1558 &softstate->asoft_table[key]))
1559 ret = -1;
1560 }
1561 }
1562
1563 return (ret);
1564 }
1565
1566 /*
1567 * pfn2gartentry()
1568 *
1569 * Description:
1570 * This function converts a physical address to GART entry.
1571 * For AMD64, hardware only support addresses below 40bits,
1572 * about 1024G physical address, so the largest pfn
1573 * number is below 28 bits. Please refer to GART and GTT entry
1574 * format table in agpdefs.h for entry format. Intel IGD only
1575 * only supports GTT entry below 1G. Intel AGP only supports
1576 * GART entry below 4G.
1577 *
1578 * Arguments:
1579 * arc_type system agp arc type
1580 * pfn page frame number
1581 * itemv the entry item to be returned
1582 * Returns:
1583 * -1 not a invalid page frame
1584 * 0 conversion success
1585 */
1586 static int
pfn2gartentry(agp_arc_type_t arc_type,pfn_t pfn,uint32_t * itemv)1587 pfn2gartentry(agp_arc_type_t arc_type, pfn_t pfn, uint32_t *itemv)
1588 {
1589 uint64_t paddr;
1590
1591 paddr = (uint64_t)pfn << AGP_PAGE_SHIFT;
1592 AGPDB_PRINT1((CE_NOTE, "checking pfn number %lu for type %d",
1593 pfn, arc_type));
1594
1595 switch (arc_type) {
1596 case ARC_INTELAGP:
1597 {
1598 /* Only support 32-bit hardware address */
1599 if ((paddr & AGP_INTEL_POINTER_MASK) != 0) {
1600 AGPDB_PRINT2((CE_WARN,
1601 "INTEL AGP Hardware only support 32 bits"));
1602 return (-1);
1603 }
1604 *itemv = (pfn << AGP_PAGE_SHIFT) | AGP_ENTRY_VALID;
1605
1606 break;
1607 }
1608 case ARC_AMD64AGP:
1609 {
1610 uint32_t value1, value2;
1611 /* Physaddr should not exceed 40-bit */
1612 if ((paddr & AMD64_POINTER_MASK) != 0) {
1613 AGPDB_PRINT2((CE_WARN,
1614 "AMD64 GART hardware only supoort 40 bits"));
1615 return (-1);
1616 }
1617 value1 = (uint32_t)pfn >> 20;
1618 value1 <<= 4;
1619 value2 = (uint32_t)pfn << 12;
1620
1621 *itemv = value1 | value2 | AMD64_ENTRY_VALID;
1622 break;
1623 }
1624 case ARC_IGD810:
1625 if ((paddr & I810_POINTER_MASK) != 0) {
1626 AGPDB_PRINT2((CE_WARN,
1627 "Intel i810 only support 30 bits"));
1628 return (-1);
1629 }
1630 break;
1631
1632 case ARC_IGD830:
1633 if ((paddr & GTT_POINTER_MASK) != 0) {
1634 AGPDB_PRINT2((CE_WARN,
1635 "Intel IGD only support 32 bits"));
1636 return (-1);
1637 }
1638 break;
1639 default:
1640 AGPDB_PRINT2((CE_WARN,
1641 "pfn2gartentry: arc type = %d, not support", arc_type));
1642 return (-1);
1643 }
1644 return (0);
1645 }
1646
1647 /*
1648 * Check allocated physical pages validity, only called in DEBUG
1649 * mode.
1650 */
1651 static int
agp_check_pfns(agp_arc_type_t arc_type,pfn_t * pfnarray,int items)1652 agp_check_pfns(agp_arc_type_t arc_type, pfn_t *pfnarray, int items)
1653 {
1654 int count;
1655 uint32_t ret;
1656
1657 for (count = 0; count < items; count++) {
1658 if (pfn2gartentry(arc_type, pfnarray[count], &ret))
1659 break;
1660 }
1661 if (count < items)
1662 return (-1);
1663 else
1664 return (0);
1665 }
1666
1667 /*
1668 * kmem_getpfns()
1669 *
1670 * Description:
1671 * This function gets page frame numbers from dma handle.
1672 *
1673 * Arguments:
1674 * dma_handle dma hanle allocated by ddi_dma_alloc_handle
1675 * dma_cookip dma cookie pointer
1676 * cookies_num cookies number
1677 * pfnarray array to store page frames
1678 *
1679 * Returns:
1680 * 0 success
1681 */
1682 static int
kmem_getpfns(ddi_dma_handle_t dma_handle,ddi_dma_cookie_t * dma_cookiep,int cookies_num,pfn_t * pfnarray)1683 kmem_getpfns(
1684 ddi_dma_handle_t dma_handle,
1685 ddi_dma_cookie_t *dma_cookiep,
1686 int cookies_num,
1687 pfn_t *pfnarray)
1688 {
1689 int num_cookies;
1690 int index = 0;
1691
1692 num_cookies = cookies_num;
1693
1694 while (num_cookies > 0) {
1695 uint64_t ck_startaddr, ck_length, ck_end;
1696 ck_startaddr = dma_cookiep->dmac_address;
1697 ck_length = dma_cookiep->dmac_size;
1698
1699 ck_end = ck_startaddr + ck_length;
1700 while (ck_startaddr < ck_end) {
1701 pfnarray[index] = (pfn_t)ck_startaddr >> AGP_PAGE_SHIFT;
1702 ck_startaddr += AGP_PAGE_SIZE;
1703 index++;
1704 }
1705
1706 num_cookies--;
1707 if (num_cookies > 0) {
1708 ddi_dma_nextcookie(dma_handle, dma_cookiep);
1709 }
1710 }
1711
1712 return (0);
1713 }
1714
1715 static int
copyinfo(agpgart_softstate_t * softstate,agp_info_t * info)1716 copyinfo(agpgart_softstate_t *softstate, agp_info_t *info)
1717 {
1718 switch (softstate->asoft_devreg.agprd_arctype) {
1719 case ARC_IGD810:
1720 case ARC_IGD830:
1721 info->agpi_version.agpv_major = 0;
1722 info->agpi_version.agpv_minor = 0;
1723 info->agpi_devid = softstate->asoft_info.agpki_mdevid;
1724 info->agpi_mode = 0;
1725 break;
1726 case ARC_INTELAGP:
1727 case ARC_AMD64AGP:
1728 info->agpi_version = softstate->asoft_info.agpki_tver;
1729 info->agpi_devid = softstate->asoft_info.agpki_tdevid;
1730 info->agpi_mode = softstate->asoft_info.agpki_tstatus;
1731 break;
1732 default:
1733 AGPDB_PRINT2((CE_WARN, "copyinfo: UNKNOW ARC"));
1734 return (-1);
1735 }
1736 /*
1737 * 64bit->32bit conversion possible
1738 */
1739 info->agpi_aperbase = softstate->asoft_info.agpki_aperbase;
1740 info->agpi_apersize = softstate->asoft_info.agpki_apersize;
1741 info->agpi_pgtotal = softstate->asoft_pgtotal;
1742 info->agpi_pgsystem = info->agpi_pgtotal;
1743 info->agpi_pgused = softstate->asoft_pgused;
1744
1745 return (0);
1746 }
1747
1748 static uint32_t
agp_v2_setup(uint32_t tstatus,uint32_t mstatus,uint32_t mode)1749 agp_v2_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
1750 {
1751 uint32_t cmd;
1752 int rq, sba, over4g, fw, rate;
1753
1754 /*
1755 * tstatus: target device status
1756 * mstatus: master device status
1757 * mode: the agp mode to be sent
1758 */
1759
1760 /*
1761 * RQ - Request Queue size
1762 * set RQ to the min of mode and tstatus
1763 * if mode set a RQ larger than hardware can support,
1764 * use the max RQ which hardware can support.
1765 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
1766 * Corelogic will enqueue agp transaction
1767 */
1768 rq = mode & AGPSTAT_RQ_MASK;
1769 if ((tstatus & AGPSTAT_RQ_MASK) < rq)
1770 rq = tstatus & AGPSTAT_RQ_MASK;
1771
1772 /*
1773 * SBA - Sideband Addressing
1774 *
1775 * Sideband Addressing provides an additional bus to pass requests
1776 * (address and command) to the target from the master.
1777 *
1778 * set SBA if all three support it
1779 */
1780 sba = (tstatus & AGPSTAT_SBA) & (mstatus & AGPSTAT_SBA)
1781 & (mode & AGPSTAT_SBA);
1782
1783 /* set OVER4G if all three support it */
1784 over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
1785 & (mode & AGPSTAT_OVER4G);
1786
1787 /*
1788 * FW - fast write
1789 *
1790 * acceleration of memory write transactions from the corelogic to the
1791 * A.G.P. master device acting like a PCI target.
1792 *
1793 * set FW if all three support it
1794 */
1795 fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
1796 & (mode & AGPSTAT_FW);
1797
1798 /*
1799 * figure out the max rate
1800 * AGP v2 support: 4X, 2X, 1X speed
1801 * status bit meaning
1802 * ---------------------------------------------
1803 * 7:3 others
1804 * 3 0 stand for V2 support
1805 * 0:2 001:1X, 010:2X, 100:4X
1806 * ----------------------------------------------
1807 */
1808 rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
1809 & (mode & AGPSTAT_RATE_MASK);
1810 if (rate & AGP2_RATE_4X)
1811 rate = AGP2_RATE_4X;
1812 else if (rate & AGP2_RATE_2X)
1813 rate = AGP2_RATE_2X;
1814 else
1815 rate = AGP2_RATE_1X;
1816
1817 cmd = rq | sba | over4g | fw | rate;
1818 /* enable agp mode */
1819 cmd |= AGPCMD_AGPEN;
1820
1821 return (cmd);
1822 }
1823
1824 static uint32_t
agp_v3_setup(uint32_t tstatus,uint32_t mstatus,uint32_t mode)1825 agp_v3_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
1826 {
1827 uint32_t cmd = 0;
1828 uint32_t rq, arqsz, cal, sba, over4g, fw, rate;
1829
1830 /*
1831 * tstatus: target device status
1832 * mstatus: master device status
1833 * mode: the agp mode to be set
1834 */
1835
1836 /*
1837 * RQ - Request Queue size
1838 * Set RQ to the min of mode and tstatus
1839 * If mode set a RQ larger than hardware can support,
1840 * use the max RQ which hardware can support.
1841 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
1842 * Corelogic will enqueue agp transaction;
1843 */
1844 rq = mode & AGPSTAT_RQ_MASK;
1845 if ((tstatus & AGPSTAT_RQ_MASK) < rq)
1846 rq = tstatus & AGPSTAT_RQ_MASK;
1847
1848 /*
1849 * ARQSZ - Asynchronous Request Queue size
1850 * Set the value equal to tstatus.
1851 * Don't allow the mode register to override values
1852 */
1853 arqsz = tstatus & AGPSTAT_ARQSZ_MASK;
1854
1855 /*
1856 * CAL - Calibration cycle
1857 * Set to the min of tstatus and mstatus
1858 * Don't allow override by mode register
1859 */
1860 cal = tstatus & AGPSTAT_CAL_MASK;
1861 if ((mstatus & AGPSTAT_CAL_MASK) < cal)
1862 cal = mstatus & AGPSTAT_CAL_MASK;
1863
1864 /*
1865 * SBA - Sideband Addressing
1866 *
1867 * Sideband Addressing provides an additional bus to pass requests
1868 * (address and command) to the target from the master.
1869 *
1870 * SBA in agp v3.0 must be set
1871 */
1872 sba = AGPCMD_SBAEN;
1873
1874 /* GART64B is not set since no hardware supports it now */
1875
1876 /* Set OVER4G if all three support it */
1877 over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
1878 & (mode & AGPSTAT_OVER4G);
1879
1880 /*
1881 * FW - fast write
1882 *
1883 * Acceleration of memory write transactions from the corelogic to the
1884 * A.G.P. master device acting like a PCI target.
1885 *
1886 * Always set FW in AGP 3.0
1887 */
1888 fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
1889 & (mode & AGPSTAT_FW);
1890
1891 /*
1892 * Figure out the max rate
1893 *
1894 * AGP v3 support: 8X, 4X speed
1895 *
1896 * status bit meaning
1897 * ---------------------------------------------
1898 * 7:3 others
1899 * 3 1 stand for V3 support
1900 * 0:2 001:4X, 010:8X, 011:4X,8X
1901 * ----------------------------------------------
1902 */
1903 rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
1904 & (mode & AGPSTAT_RATE_MASK);
1905 if (rate & AGP3_RATE_8X)
1906 rate = AGP3_RATE_8X;
1907 else
1908 rate = AGP3_RATE_4X;
1909
1910 cmd = rq | arqsz | cal | sba | over4g | fw | rate;
1911 /* Enable AGP mode */
1912 cmd |= AGPCMD_AGPEN;
1913
1914 return (cmd);
1915 }
1916
1917 static int
agp_setup(agpgart_softstate_t * softstate,uint32_t mode)1918 agp_setup(agpgart_softstate_t *softstate, uint32_t mode)
1919 {
1920 uint32_t tstatus, mstatus;
1921 uint32_t agp_mode;
1922
1923 tstatus = softstate->asoft_info.agpki_tstatus;
1924 mstatus = softstate->asoft_info.agpki_mstatus;
1925
1926 /*
1927 * There are three kinds of AGP mode. AGP mode 1.0, 2.0, 3.0
1928 * AGP mode 2.0 is fully compatible with AGP mode 1.0, so we
1929 * only check 2.0 and 3.0 mode. AGP 3.0 device can work in
1930 * two AGP 2.0 or AGP 3.0 mode. By checking AGP status register,
1931 * we can get which mode it is working at. The working mode of
1932 * AGP master and AGP target must be consistent. That is, both
1933 * of them must work on AGP 3.0 mode or AGP 2.0 mode.
1934 */
1935 if ((softstate->asoft_info.agpki_tver.agpv_major == 3) &&
1936 (tstatus & AGPSTAT_MODE3)) {
1937 /* Master device should be 3.0 mode, too */
1938 if ((softstate->asoft_info.agpki_mver.agpv_major != 3) ||
1939 ((mstatus & AGPSTAT_MODE3) == 0))
1940 return (EIO);
1941
1942 agp_mode = agp_v3_setup(tstatus, mstatus, mode);
1943 /* Write to the AGPCMD register of target and master devices */
1944 if (lyr_set_agp_cmd(agp_mode,
1945 &softstate->asoft_devreg))
1946 return (EIO);
1947
1948 softstate->asoft_mode = agp_mode;
1949
1950 return (0);
1951 }
1952
1953 /*
1954 * If agp taget device doesn't work in AGP 3.0 mode,
1955 * it must work in AGP 2.0 mode. And make sure
1956 * master device work in AGP 2.0 mode too
1957 */
1958 if ((softstate->asoft_info.agpki_mver.agpv_major == 3) &&
1959 (mstatus & AGPSTAT_MODE3))
1960 return (EIO);
1961
1962 agp_mode = agp_v2_setup(tstatus, mstatus, mode);
1963 if (lyr_set_agp_cmd(agp_mode, &softstate->asoft_devreg))
1964 return (EIO);
1965 softstate->asoft_mode = agp_mode;
1966
1967 return (0);
1968 }
1969
1970 /*
1971 * agp_alloc_kmem()
1972 *
1973 * Description:
1974 * This function allocates physical memory for userland applications
1975 * by ddi interfaces. This function can also be called to allocate
1976 * small phsyical contiguous pages, usually tens of kilobytes.
1977 *
1978 * Arguments:
1979 * softsate driver soft state pointer
1980 * length memory size
1981 *
1982 * Returns:
1983 * entryp new keytable entity pointer
1984 * NULL no keytable slot available or no physical
1985 * memory available
1986 */
1987 static keytable_ent_t *
agp_alloc_kmem(agpgart_softstate_t * softstate,size_t length,int type)1988 agp_alloc_kmem(agpgart_softstate_t *softstate, size_t length, int type)
1989 {
1990 keytable_ent_t keyentry;
1991 keytable_ent_t *entryp;
1992 int ret;
1993
1994 ASSERT(AGP_ALIGNED(length));
1995
1996 bzero(&keyentry, sizeof (keytable_ent_t));
1997
1998 keyentry.kte_pages = AGP_BYTES2PAGES(length);
1999 keyentry.kte_type = type;
2000
2001 /*
2002 * Set dma_attr_sgllen to assure contiguous physical pages
2003 */
2004 if (type == AGP_PHYSICAL)
2005 agpgart_dma_attr.dma_attr_sgllen = 1;
2006 else
2007 agpgart_dma_attr.dma_attr_sgllen = (int)keyentry.kte_pages;
2008
2009 /* 4k size pages */
2010 keyentry.kte_memhdl = kmem_zalloc(sizeof (agp_kmem_handle_t), KM_SLEEP);
2011
2012 if (ddi_dma_alloc_handle(softstate->asoft_dip,
2013 &agpgart_dma_attr,
2014 DDI_DMA_SLEEP, NULL,
2015 &(KMEMP(keyentry.kte_memhdl)->kmem_handle))) {
2016 AGPDB_PRINT2((CE_WARN,
2017 "agp_alloc_kmem: ddi_dma_allco_hanlde error"));
2018 goto err4;
2019 }
2020
2021 if ((ret = ddi_dma_mem_alloc(
2022 KMEMP(keyentry.kte_memhdl)->kmem_handle,
2023 length,
2024 &gart_dev_acc_attr,
2025 DDI_DMA_CONSISTENT,
2026 DDI_DMA_SLEEP, NULL,
2027 &KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
2028 &KMEMP(keyentry.kte_memhdl)->kmem_reallen,
2029 &KMEMP(keyentry.kte_memhdl)->kmem_acchdl)) != 0) {
2030 AGPDB_PRINT2((CE_WARN,
2031 "agp_alloc_kmem: ddi_dma_mem_alloc error"));
2032
2033 goto err3;
2034 }
2035
2036 ret = ddi_dma_addr_bind_handle(
2037 KMEMP(keyentry.kte_memhdl)->kmem_handle,
2038 NULL,
2039 KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
2040 length,
2041 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2042 DDI_DMA_SLEEP,
2043 NULL,
2044 &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
2045 &KMEMP(keyentry.kte_memhdl)->kmem_cookies_num);
2046
2047 /*
2048 * Even dma_attr_sgllen = 1, ddi_dma_addr_bind_handle may return more
2049 * than one cookie, we check this in the if statement.
2050 */
2051
2052 if ((ret != DDI_DMA_MAPPED) ||
2053 ((agpgart_dma_attr.dma_attr_sgllen == 1) &&
2054 (KMEMP(keyentry.kte_memhdl)->kmem_cookies_num != 1))) {
2055 AGPDB_PRINT2((CE_WARN,
2056 "agp_alloc_kmem: can not alloc physical memory properly"));
2057 goto err2;
2058 }
2059
2060 keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) *
2061 keyentry.kte_pages, KM_SLEEP);
2062
2063 if (kmem_getpfns(
2064 KMEMP(keyentry.kte_memhdl)->kmem_handle,
2065 &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
2066 KMEMP(keyentry.kte_memhdl)->kmem_cookies_num,
2067 keyentry.kte_pfnarray)) {
2068 AGPDB_PRINT2((CE_WARN, "agp_alloc_kmem: get pfn array error"));
2069 goto err1;
2070 }
2071
2072 ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
2073 keyentry.kte_pfnarray, keyentry.kte_pages));
2074 if (agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
2075 keyentry.kte_pfnarray, keyentry.kte_pages))
2076 goto err1;
2077 entryp = agp_fill_empty_keyent(softstate, &keyentry);
2078 if (!entryp) {
2079 AGPDB_PRINT2((CE_WARN,
2080 "agp_alloc_kmem: agp_fill_empty_keyent error"));
2081
2082 goto err1;
2083 }
2084 ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2085
2086 return (entryp);
2087
2088 err1:
2089 kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages);
2090 keyentry.kte_pfnarray = NULL;
2091 (void) ddi_dma_unbind_handle(KMEMP(keyentry.kte_memhdl)->kmem_handle);
2092 KMEMP(keyentry.kte_memhdl)->kmem_cookies_num = 0;
2093 err2:
2094 ddi_dma_mem_free(&KMEMP(keyentry.kte_memhdl)->kmem_acchdl);
2095 KMEMP(keyentry.kte_memhdl)->kmem_acchdl = NULL;
2096 KMEMP(keyentry.kte_memhdl)->kmem_reallen = 0;
2097 KMEMP(keyentry.kte_memhdl)->kmem_kvaddr = NULL;
2098 err3:
2099 ddi_dma_free_handle(&(KMEMP(keyentry.kte_memhdl)->kmem_handle));
2100 KMEMP(keyentry.kte_memhdl)->kmem_handle = NULL;
2101 err4:
2102 kmem_free(keyentry.kte_memhdl, sizeof (agp_kmem_handle_t));
2103 keyentry.kte_memhdl = NULL;
2104 return (NULL);
2105
2106 }
2107
2108 /*
2109 * agp_alloc_mem()
2110 *
2111 * Description:
2112 * This function allocate physical memory for userland applications,
2113 * in order to save kernel virtual space, we use the direct mapping
2114 * memory interface if it is available.
2115 *
2116 * Arguments:
2117 * st driver soft state pointer
2118 * length memory size
2119 * type AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical
2120 * memory type for intel i810 IGD
2121 *
2122 * Returns:
2123 * NULL Invalid memory type or can not allocate memory
2124 * Keytable entry pointer returned by agp_alloc_kmem
2125 */
2126 static keytable_ent_t *
agp_alloc_mem(agpgart_softstate_t * st,size_t length,int type)2127 agp_alloc_mem(agpgart_softstate_t *st, size_t length, int type)
2128 {
2129
2130 /*
2131 * AGP_PHYSICAL type require contiguous physical pages exported
2132 * to X drivers, like i810 HW cursor, ARGB cursor. the number of
2133 * pages needed is usuallysmall and contiguous, 4K, 16K. So we
2134 * use DDI interface to allocated such memory. And X use xsvc
2135 * drivers to map this memory into its own address space.
2136 */
2137 ASSERT(st);
2138
2139 switch (type) {
2140 case AGP_NORMAL:
2141 case AGP_PHYSICAL:
2142 return (agp_alloc_kmem(st, length, type));
2143 default:
2144 return (NULL);
2145 }
2146 }
2147
2148 /*
2149 * free_gart_table()
2150 *
2151 * Description:
2152 * This function frees the gart table memory allocated by driver.
2153 * Must disable gart table before calling this function.
2154 *
2155 * Arguments:
2156 * softstate driver soft state pointer
2157 *
2158 */
2159 static void
free_gart_table(agpgart_softstate_t * st)2160 free_gart_table(agpgart_softstate_t *st)
2161 {
2162
2163 if (st->gart_dma_handle == NULL)
2164 return;
2165
2166 (void) ddi_dma_unbind_handle(st->gart_dma_handle);
2167 ddi_dma_mem_free(&st->gart_dma_acc_handle);
2168 st->gart_dma_acc_handle = NULL;
2169 ddi_dma_free_handle(&st->gart_dma_handle);
2170 st->gart_dma_handle = NULL;
2171 st->gart_vbase = 0;
2172 st->gart_size = 0;
2173 }
2174
2175 /*
2176 * alloc_gart_table()
2177 *
2178 * Description:
2179 * This function allocates one physical continuous gart table.
2180 * INTEL integrated video device except i810 have their special
2181 * video bios; No need to allocate gart table for them.
2182 *
2183 * Arguments:
2184 * st driver soft state pointer
2185 *
2186 * Returns:
2187 * 0 success
2188 * -1 can not allocate gart tabl
2189 */
2190 static int
alloc_gart_table(agpgart_softstate_t * st)2191 alloc_gart_table(agpgart_softstate_t *st)
2192 {
2193 int num_pages;
2194 size_t table_size;
2195 int ret = DDI_SUCCESS;
2196 ddi_dma_cookie_t cookie;
2197 uint32_t num_cookies;
2198
2199 num_pages = AGP_MB2PAGES(st->asoft_info.agpki_apersize);
2200
2201 /*
2202 * Only 40-bit maximum physical memory is supported by today's
2203 * AGP hardware (32-bit gart tables can hold 40-bit memory addresses).
2204 * No one supports 64-bit gart entries now, so the size of gart
2205 * entries defaults to 32-bit though AGP3.0 specifies the possibility
2206 * of 64-bit gart entries.
2207 */
2208
2209 table_size = num_pages * (sizeof (uint32_t));
2210
2211 /*
2212 * Only AMD64 can put gart table above 4G, 40 bits at maximum
2213 */
2214 if (st->asoft_devreg.agprd_arctype == ARC_AMD64AGP)
2215 garttable_dma_attr.dma_attr_addr_hi = 0xffffffffffLL;
2216 else
2217 garttable_dma_attr.dma_attr_addr_hi = 0xffffffffU;
2218 /* Allocate physical continuous page frame for gart table */
2219 if (ret = ddi_dma_alloc_handle(st->asoft_dip,
2220 &garttable_dma_attr,
2221 DDI_DMA_SLEEP,
2222 NULL, &st->gart_dma_handle)) {
2223 AGPDB_PRINT2((CE_WARN,
2224 "alloc_gart_table: ddi_dma_alloc_handle failed"));
2225 goto err3;
2226 }
2227
2228 if (ret = ddi_dma_mem_alloc(st->gart_dma_handle,
2229 table_size,
2230 &gart_dev_acc_attr,
2231 DDI_DMA_CONSISTENT,
2232 DDI_DMA_SLEEP, NULL,
2233 &st->gart_vbase,
2234 &st->gart_size,
2235 &st->gart_dma_acc_handle)) {
2236 AGPDB_PRINT2((CE_WARN,
2237 "alloc_gart_table: ddi_dma_mem_alloc failed"));
2238 goto err2;
2239
2240 }
2241
2242 ret = ddi_dma_addr_bind_handle(st->gart_dma_handle,
2243 NULL, st->gart_vbase,
2244 table_size,
2245 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2246 DDI_DMA_SLEEP, NULL,
2247 &cookie, &num_cookies);
2248
2249 st->gart_pbase = cookie.dmac_address;
2250
2251 if ((ret != DDI_DMA_MAPPED) || (num_cookies != 1)) {
2252 if (num_cookies > 1)
2253 (void) ddi_dma_unbind_handle(st->gart_dma_handle);
2254 AGPDB_PRINT2((CE_WARN,
2255 "alloc_gart_table: alloc contiguous phys memory failed"));
2256 goto err1;
2257 }
2258
2259 return (0);
2260 err1:
2261 ddi_dma_mem_free(&st->gart_dma_acc_handle);
2262 st->gart_dma_acc_handle = NULL;
2263 err2:
2264 ddi_dma_free_handle(&st->gart_dma_handle);
2265 st->gart_dma_handle = NULL;
2266 err3:
2267 st->gart_pbase = 0;
2268 st->gart_size = 0;
2269 st->gart_vbase = 0;
2270
2271 return (-1);
2272 }
2273
2274 /*
2275 * agp_add_to_gart()
2276 *
2277 * Description:
2278 * This function fills the gart table entries by a given page frame number
2279 * array and set up the agp aperture page to physical memory page
2280 * translation.
2281 * Arguments:
2282 * type valid sytem arc types ARC_AMD64AGP, ARC_INTELAGP,
2283 * ARC_AMD64AGP
2284 * pfnarray allocated physical page frame number array
2285 * pg_offset agp aperture start page to be bound
2286 * entries the number of pages to be bound
2287 * dma_hdl gart table dma memory handle
2288 * tablep gart table kernel virtual address
2289 * Returns:
2290 * -1 failed
2291 * 0 success
2292 */
2293 static int
agp_add_to_gart(agp_arc_type_t type,pfn_t * pfnarray,uint32_t pg_offset,uint32_t entries,ddi_dma_handle_t dma_hdl,uint32_t * tablep)2294 agp_add_to_gart(
2295 agp_arc_type_t type,
2296 pfn_t *pfnarray,
2297 uint32_t pg_offset,
2298 uint32_t entries,
2299 ddi_dma_handle_t dma_hdl,
2300 uint32_t *tablep)
2301 {
2302 int items = 0;
2303 uint32_t *entryp;
2304 uint32_t itemv;
2305
2306 entryp = tablep + pg_offset;
2307 while (items < entries) {
2308 if (pfn2gartentry(type, pfnarray[items], &itemv))
2309 break;
2310 *(entryp + items) = itemv;
2311 items++;
2312 }
2313 if (items < entries)
2314 return (-1);
2315
2316 (void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
2317 entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
2318
2319 return (0);
2320 }
2321
2322 /*
2323 * agp_bind_key()
2324 *
2325 * Description:
2326 * This function will call low level gart table access functions to
2327 * set up gart table translation. Also it will do some sanity
2328 * checking on key table entry.
2329 *
2330 * Arguments:
2331 * softstate driver soft state pointer
2332 * keyent key table entity pointer to be bound
2333 * pg_offset aperture start page to be bound
2334 * Returns:
2335 * EINVAL not a valid operation
2336 */
2337 static int
agp_bind_key(agpgart_softstate_t * softstate,keytable_ent_t * keyent,uint32_t pg_offset)2338 agp_bind_key(agpgart_softstate_t *softstate,
2339 keytable_ent_t *keyent, uint32_t pg_offset)
2340 {
2341 uint64_t pg_end;
2342 int ret = 0;
2343
2344 ASSERT(keyent);
2345 ASSERT((keyent->kte_key >= 0) && (keyent->kte_key < AGP_MAXKEYS));
2346 ASSERT(mutex_owned(&softstate->asoft_instmutex));
2347
2348 pg_end = pg_offset + keyent->kte_pages;
2349
2350 if (pg_end > AGP_MB2PAGES(softstate->asoft_info.agpki_apersize)) {
2351 AGPDB_PRINT2((CE_WARN,
2352 "agp_bind_key: key=0x%x,exceed aper range",
2353 keyent->kte_key));
2354
2355 return (EINVAL);
2356 }
2357
2358 if (agp_check_off(softstate->asoft_table,
2359 pg_offset, keyent->kte_pages)) {
2360 AGPDB_PRINT2((CE_WARN,
2361 "agp_bind_key: pg_offset=0x%x, pages=0x%lx overlaped",
2362 pg_offset, keyent->kte_pages));
2363 return (EINVAL);
2364 }
2365
2366 ASSERT(keyent->kte_pfnarray != NULL);
2367
2368 switch (softstate->asoft_devreg.agprd_arctype) {
2369 case ARC_IGD810:
2370 case ARC_IGD830:
2371 ret = lyr_i8xx_add_to_gtt(pg_offset, keyent,
2372 &softstate->asoft_devreg);
2373 if (ret)
2374 return (EIO);
2375 break;
2376 case ARC_INTELAGP:
2377 case ARC_AMD64AGP:
2378 ret = agp_add_to_gart(
2379 softstate->asoft_devreg.agprd_arctype,
2380 keyent->kte_pfnarray,
2381 pg_offset,
2382 keyent->kte_pages,
2383 softstate->gart_dma_handle,
2384 (uint32_t *)softstate->gart_vbase);
2385 if (ret)
2386 return (EINVAL);
2387 /* Flush GTLB table */
2388 lyr_flush_gart_cache(&softstate->asoft_devreg);
2389 break;
2390 default:
2391 AGPDB_PRINT2((CE_WARN,
2392 "agp_bind_key: arc type = 0x%x unsupported",
2393 softstate->asoft_devreg.agprd_arctype));
2394 return (EINVAL);
2395 }
2396 return (0);
2397 }
2398
2399 static int
agpgart_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)2400 agpgart_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2401 {
2402 int instance;
2403 agpgart_softstate_t *softstate;
2404
2405 if (cmd != DDI_ATTACH) {
2406 AGPDB_PRINT2((CE_WARN,
2407 "agpgart_attach: only attach op supported"));
2408 return (DDI_FAILURE);
2409 }
2410 instance = ddi_get_instance(dip);
2411
2412 if (ddi_soft_state_zalloc(agpgart_glob_soft_handle, instance)
2413 != DDI_SUCCESS) {
2414 AGPDB_PRINT2((CE_WARN,
2415 "agpgart_attach: soft state zalloc failed"));
2416 goto err1;
2417
2418 }
2419 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2420 mutex_init(&softstate->asoft_instmutex, NULL, MUTEX_DRIVER, NULL);
2421 softstate->asoft_dip = dip;
2422 /*
2423 * Allocate LDI identifier for agpgart driver
2424 * Agpgart driver is the kernel consumer
2425 */
2426 if (ldi_ident_from_dip(dip, &softstate->asoft_li)) {
2427 AGPDB_PRINT2((CE_WARN,
2428 "agpgart_attach: LDI indentifier allcation failed"));
2429 goto err2;
2430 }
2431
2432 softstate->asoft_devreg.agprd_arctype = ARC_UNKNOWN;
2433 /* Install agp kstat */
2434 if (agp_init_kstats(softstate)) {
2435 AGPDB_PRINT2((CE_WARN, "agpgart_attach: init kstats error"));
2436 goto err3;
2437 }
2438 /*
2439 * devfs will create /dev/agpgart
2440 * and /devices/agpgart:agpgart
2441 */
2442
2443 if (ddi_create_minor_node(dip, AGPGART_DEVNODE, S_IFCHR,
2444 AGP_INST2MINOR(instance),
2445 DDI_NT_AGP_PSEUDO, 0)) {
2446 AGPDB_PRINT2((CE_WARN,
2447 "agpgart_attach: Can not create minor node"));
2448 goto err4;
2449 }
2450
2451 softstate->asoft_table = kmem_zalloc(
2452 AGP_MAXKEYS * (sizeof (keytable_ent_t)),
2453 KM_SLEEP);
2454
2455 return (DDI_SUCCESS);
2456 err4:
2457 agp_fini_kstats(softstate);
2458 err3:
2459 ldi_ident_release(softstate->asoft_li);
2460 err2:
2461 ddi_soft_state_free(agpgart_glob_soft_handle, instance);
2462 err1:
2463 return (DDI_FAILURE);
2464 }
2465
2466 static int
agpgart_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)2467 agpgart_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2468 {
2469 int instance;
2470 agpgart_softstate_t *st;
2471
2472 instance = ddi_get_instance(dip);
2473
2474 st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2475
2476 if (cmd != DDI_DETACH)
2477 return (DDI_FAILURE);
2478
2479 /*
2480 * Caller should free all the memory allocated explicitly.
2481 * We release the memory allocated by caller which is not
2482 * properly freed. mutex_enter here make sure assertion on
2483 * softstate mutex success in agp_dealloc_mem.
2484 */
2485 mutex_enter(&st->asoft_instmutex);
2486 if (agp_del_allkeys(st)) {
2487 AGPDB_PRINT2((CE_WARN, "agpgart_detach: agp_del_allkeys err"));
2488 AGPDB_PRINT2((CE_WARN,
2489 "you might free agp memory exported to your applications"));
2490
2491 mutex_exit(&st->asoft_instmutex);
2492 return (DDI_FAILURE);
2493 }
2494 mutex_exit(&st->asoft_instmutex);
2495 if (st->asoft_table) {
2496 kmem_free(st->asoft_table,
2497 AGP_MAXKEYS * (sizeof (keytable_ent_t)));
2498 st->asoft_table = 0;
2499 }
2500
2501 ddi_remove_minor_node(dip, AGPGART_DEVNODE);
2502 agp_fini_kstats(st);
2503 ldi_ident_release(st->asoft_li);
2504 mutex_destroy(&st->asoft_instmutex);
2505 ddi_soft_state_free(agpgart_glob_soft_handle, instance);
2506
2507 return (DDI_SUCCESS);
2508 }
2509
2510 /*ARGSUSED*/
2511 static int
agpgart_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** resultp)2512 agpgart_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
2513 void **resultp)
2514 {
2515 agpgart_softstate_t *st;
2516 int instance, rval = DDI_FAILURE;
2517 dev_t dev;
2518
2519 switch (cmd) {
2520 case DDI_INFO_DEVT2DEVINFO:
2521 dev = (dev_t)arg;
2522 instance = AGP_DEV2INST(dev);
2523 st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2524 if (st != NULL) {
2525 mutex_enter(&st->asoft_instmutex);
2526 *resultp = st->asoft_dip;
2527 mutex_exit(&st->asoft_instmutex);
2528 rval = DDI_SUCCESS;
2529 } else
2530 *resultp = NULL;
2531
2532 break;
2533 case DDI_INFO_DEVT2INSTANCE:
2534 dev = (dev_t)arg;
2535 instance = AGP_DEV2INST(dev);
2536 *resultp = (void *)(uintptr_t)instance;
2537 rval = DDI_SUCCESS;
2538
2539 break;
2540 default:
2541 break;
2542 }
2543
2544 return (rval);
2545 }
2546
2547 /*
2548 * agpgart_open()
2549 *
2550 * Description:
2551 * This function is the driver open entry point. If it is the
2552 * first time the agpgart driver is opened, the driver will
2553 * open other agp related layered drivers and set up the agpgart
2554 * table properly.
2555 *
2556 * Arguments:
2557 * dev device number pointer
2558 * openflags open flags
2559 * otyp OTYP_BLK, OTYP_CHR
2560 * credp user's credential's struct pointer
2561 *
2562 * Returns:
2563 * ENXIO operation error
2564 * EAGAIN resoure temporarily unvailable
2565 * 0 success
2566 */
2567 /*ARGSUSED*/
2568 static int
agpgart_open(dev_t * dev,int openflags,int otyp,cred_t * credp)2569 agpgart_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
2570 {
2571 int instance = AGP_DEV2INST(*dev);
2572 agpgart_softstate_t *softstate;
2573 int rc = 0;
2574 uint32_t devid;
2575
2576 if (secpolicy_gart_access(credp)) {
2577 AGPDB_PRINT2((CE_WARN, "agpgart_open: permission denied"));
2578 return (EPERM);
2579 }
2580 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2581 if (softstate == NULL) {
2582 AGPDB_PRINT2((CE_WARN, "agpgart_open: get soft state err"));
2583 return (ENXIO);
2584 }
2585
2586 mutex_enter(&softstate->asoft_instmutex);
2587
2588 if (softstate->asoft_opened) {
2589 softstate->asoft_opened++;
2590 mutex_exit(&softstate->asoft_instmutex);
2591 return (0);
2592 }
2593
2594 /*
2595 * The driver is opened first time, so we initialize layered
2596 * driver interface and softstate member here.
2597 */
2598 softstate->asoft_pgused = 0;
2599 if (lyr_init(&softstate->asoft_devreg, softstate->asoft_li)) {
2600 AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_init failed"));
2601 mutex_exit(&softstate->asoft_instmutex);
2602 return (EAGAIN);
2603 }
2604
2605 /* Call into layered driver */
2606 if (lyr_get_info(&softstate->asoft_info, &softstate->asoft_devreg)) {
2607 AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_get_info error"));
2608 lyr_end(&softstate->asoft_devreg);
2609 mutex_exit(&softstate->asoft_instmutex);
2610 return (EIO);
2611 }
2612
2613 /*
2614 * BIOS already set up gtt table for ARC_IGD830
2615 */
2616 if (IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
2617 softstate->asoft_opened++;
2618
2619 softstate->asoft_pgtotal =
2620 get_max_pages(softstate->asoft_info.agpki_apersize);
2621
2622 if (lyr_config_devices(&softstate->asoft_devreg)) {
2623 AGPDB_PRINT2((CE_WARN,
2624 "agpgart_open: lyr_config_devices error"));
2625 lyr_end(&softstate->asoft_devreg);
2626 mutex_exit(&softstate->asoft_instmutex);
2627
2628 return (EIO);
2629 }
2630 devid = softstate->asoft_info.agpki_mdevid;
2631 if (IS_INTEL_915(devid) ||
2632 IS_INTEL_965(devid) ||
2633 IS_INTEL_X33(devid) ||
2634 IS_INTEL_G4X(devid)) {
2635 rc = ldi_ioctl(softstate->asoft_devreg.agprd_targethdl,
2636 INTEL_CHIPSET_FLUSH_SETUP, 0, FKIOCTL, kcred, 0);
2637 }
2638 if (rc) {
2639 AGPDB_PRINT2((CE_WARN,
2640 "agpgart_open: Intel chipset flush setup error"));
2641 lyr_end(&softstate->asoft_devreg);
2642 mutex_exit(&softstate->asoft_instmutex);
2643 return (EIO);
2644 }
2645 mutex_exit(&softstate->asoft_instmutex);
2646 return (0);
2647 }
2648
2649 rc = alloc_gart_table(softstate);
2650
2651 /*
2652 * Allocate physically contiguous pages for AGP arc or
2653 * i810 arc. If failed, divide aper_size by 2 to
2654 * reduce gart table size until 4 megabytes. This
2655 * is just a workaround for systems with very few
2656 * physically contiguous memory.
2657 */
2658 if (rc) {
2659 while ((softstate->asoft_info.agpki_apersize >= 4) &&
2660 (alloc_gart_table(softstate))) {
2661 softstate->asoft_info.agpki_apersize >>= 1;
2662 }
2663 if (softstate->asoft_info.agpki_apersize >= 4)
2664 rc = 0;
2665 }
2666
2667 if (rc != 0) {
2668 AGPDB_PRINT2((CE_WARN,
2669 "agpgart_open: alloc gart table failed"));
2670 lyr_end(&softstate->asoft_devreg);
2671 mutex_exit(&softstate->asoft_instmutex);
2672 return (EAGAIN);
2673 }
2674
2675 softstate->asoft_pgtotal =
2676 get_max_pages(softstate->asoft_info.agpki_apersize);
2677 /*
2678 * BIOS doesn't initialize GTT for i810,
2679 * So i810 GTT must be created by driver.
2680 *
2681 * Set up gart table and enable it.
2682 */
2683 if (lyr_set_gart_addr(softstate->gart_pbase,
2684 &softstate->asoft_devreg)) {
2685 AGPDB_PRINT2((CE_WARN,
2686 "agpgart_open: set gart table addr failed"));
2687 free_gart_table(softstate);
2688 lyr_end(&softstate->asoft_devreg);
2689 mutex_exit(&softstate->asoft_instmutex);
2690 return (EIO);
2691 }
2692 if (lyr_config_devices(&softstate->asoft_devreg)) {
2693 AGPDB_PRINT2((CE_WARN,
2694 "agpgart_open: lyr_config_devices failed"));
2695 free_gart_table(softstate);
2696 lyr_end(&softstate->asoft_devreg);
2697 mutex_exit(&softstate->asoft_instmutex);
2698 return (EIO);
2699 }
2700
2701 softstate->asoft_opened++;
2702 mutex_exit(&softstate->asoft_instmutex);
2703
2704 return (0);
2705 }
2706
2707 /*
2708 * agpgart_close()
2709 *
2710 * Description:
2711 * agpgart_close will release resources allocated in the first open
2712 * and close other open layered drivers. Also it frees the memory
2713 * allocated by ioctls.
2714 *
2715 * Arguments:
2716 * dev device number
2717 * flag file status flag
2718 * otyp OTYP_BLK, OTYP_CHR
2719 * credp user's credential's struct pointer
2720 *
2721 * Returns:
2722 * ENXIO not an error, to support "deferred attach"
2723 * 0 success
2724 */
2725 /*ARGSUSED*/
2726 static int
agpgart_close(dev_t dev,int flag,int otyp,cred_t * credp)2727 agpgart_close(dev_t dev, int flag, int otyp, cred_t *credp)
2728 {
2729 int instance = AGP_DEV2INST(dev);
2730 agpgart_softstate_t *softstate;
2731 int rc = 0;
2732 uint32_t devid;
2733
2734 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2735 if (softstate == NULL) {
2736 AGPDB_PRINT2((CE_WARN, "agpgart_close: get soft state err"));
2737 return (ENXIO);
2738 }
2739
2740 mutex_enter(&softstate->asoft_instmutex);
2741 ASSERT(softstate->asoft_opened);
2742
2743
2744 /*
2745 * If the last process close this device is not the controlling
2746 * process, also release the control over agpgart driver here if the
2747 * the controlling process fails to release the control before it
2748 * close the driver.
2749 */
2750 if (softstate->asoft_acquired == 1) {
2751 AGPDB_PRINT2((CE_WARN,
2752 "agpgart_close: auto release control over driver"));
2753 release_control(softstate);
2754 }
2755
2756 devid = softstate->asoft_info.agpki_mdevid;
2757 if (IS_INTEL_915(devid) ||
2758 IS_INTEL_965(devid) ||
2759 IS_INTEL_X33(devid) ||
2760 IS_INTEL_G4X(devid)) {
2761 rc = ldi_ioctl(softstate->asoft_devreg.agprd_targethdl,
2762 INTEL_CHIPSET_FLUSH_FREE, 0, FKIOCTL, kcred, 0);
2763 }
2764 if (rc) {
2765 AGPDB_PRINT2((CE_WARN,
2766 "agpgart_open: Intel chipset flush free error"));
2767 }
2768
2769 if (lyr_unconfig_devices(&softstate->asoft_devreg)) {
2770 AGPDB_PRINT2((CE_WARN,
2771 "agpgart_close: lyr_unconfig_device error"));
2772 mutex_exit(&softstate->asoft_instmutex);
2773 return (EIO);
2774 }
2775 softstate->asoft_agpen = 0;
2776
2777 if (!IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
2778 free_gart_table(softstate);
2779 }
2780
2781 lyr_end(&softstate->asoft_devreg);
2782
2783 /*
2784 * This statement must be positioned before agp_del_allkeys
2785 * agp_dealloc_mem indirectly called by agp_del_allkeys
2786 * will test this variable.
2787 */
2788 softstate->asoft_opened = 0;
2789
2790 /*
2791 * Free the memory allocated by user applications which
2792 * was never deallocated.
2793 */
2794 (void) agp_del_allkeys(softstate);
2795
2796 mutex_exit(&softstate->asoft_instmutex);
2797
2798 return (0);
2799 }
2800
2801 static int
ioctl_agpgart_info(agpgart_softstate_t * softstate,void * arg,int flags)2802 ioctl_agpgart_info(agpgart_softstate_t *softstate, void *arg, int flags)
2803 {
2804 agp_info_t infostruct;
2805 #ifdef _MULTI_DATAMODEL
2806 agp_info32_t infostruct32;
2807 #endif
2808
2809 bzero(&infostruct, sizeof (agp_info_t));
2810
2811 #ifdef _MULTI_DATAMODEL
2812 bzero(&infostruct32, sizeof (agp_info32_t));
2813 if (ddi_model_convert_from(flags & FMODELS) == DDI_MODEL_ILP32) {
2814 if (copyinfo(softstate, &infostruct))
2815 return (EINVAL);
2816
2817 agpinfo_default_to_32(infostruct, infostruct32);
2818 if (ddi_copyout(&infostruct32, arg,
2819 sizeof (agp_info32_t), flags) != 0)
2820 return (EFAULT);
2821
2822 return (0);
2823 }
2824 #endif /* _MULTI_DATAMODEL */
2825 if (copyinfo(softstate, &infostruct))
2826 return (EINVAL);
2827
2828 if (ddi_copyout(&infostruct, arg, sizeof (agp_info_t), flags) != 0) {
2829 return (EFAULT);
2830 }
2831
2832 return (0);
2833 }
2834
2835 static int
ioctl_agpgart_acquire(agpgart_softstate_t * st)2836 ioctl_agpgart_acquire(agpgart_softstate_t *st)
2837 {
2838 if (st->asoft_acquired) {
2839 AGPDB_PRINT2((CE_WARN, "ioctl_acquire: already acquired"));
2840 return (EBUSY);
2841 }
2842 acquire_control(st);
2843 return (0);
2844 }
2845
2846 static int
ioctl_agpgart_release(agpgart_softstate_t * st)2847 ioctl_agpgart_release(agpgart_softstate_t *st)
2848 {
2849 if (is_controlling_proc(st) < 0) {
2850 AGPDB_PRINT2((CE_WARN,
2851 "ioctl_agpgart_release: not a controlling process"));
2852 return (EPERM);
2853 }
2854 release_control(st);
2855 return (0);
2856 }
2857
2858 static int
ioctl_agpgart_setup(agpgart_softstate_t * st,void * arg,int flags)2859 ioctl_agpgart_setup(agpgart_softstate_t *st, void *arg, int flags)
2860 {
2861 agp_setup_t data;
2862 int rc = 0;
2863
2864 if (is_controlling_proc(st) < 0) {
2865 AGPDB_PRINT2((CE_WARN,
2866 "ioctl_agpgart_setup: not a controlling process"));
2867 return (EPERM);
2868 }
2869
2870 if (!IS_TRUE_AGP(st->asoft_devreg.agprd_arctype)) {
2871 AGPDB_PRINT2((CE_WARN,
2872 "ioctl_agpgart_setup: no true agp bridge"));
2873 return (EINVAL);
2874 }
2875
2876 if (ddi_copyin(arg, &data, sizeof (agp_setup_t), flags) != 0)
2877 return (EFAULT);
2878
2879 if (rc = agp_setup(st, data.agps_mode))
2880 return (rc);
2881 /* Store agp mode status for kstat */
2882 st->asoft_agpen = 1;
2883 return (0);
2884 }
2885
2886 static int
ioctl_agpgart_alloc(agpgart_softstate_t * st,void * arg,int flags)2887 ioctl_agpgart_alloc(agpgart_softstate_t *st, void *arg, int flags)
2888 {
2889 agp_allocate_t alloc_info;
2890 keytable_ent_t *entryp;
2891 size_t length;
2892 uint64_t pg_num;
2893
2894 if (is_controlling_proc(st) < 0) {
2895 AGPDB_PRINT2((CE_WARN,
2896 "ioctl_agpgart_alloc: not a controlling process"));
2897 return (EPERM);
2898 }
2899
2900 if (ddi_copyin(arg, &alloc_info,
2901 sizeof (agp_allocate_t), flags) != 0) {
2902 return (EFAULT);
2903 }
2904 pg_num = st->asoft_pgused + alloc_info.agpa_pgcount;
2905 if (pg_num > st->asoft_pgtotal) {
2906 AGPDB_PRINT2((CE_WARN,
2907 "ioctl_agpgart_alloc: exceeding the memory pages limit"));
2908 AGPDB_PRINT2((CE_WARN,
2909 "ioctl_agpgart_alloc: request %x pages failed",
2910 alloc_info.agpa_pgcount));
2911 AGPDB_PRINT2((CE_WARN,
2912 "ioctl_agpgart_alloc: pages used %x total is %x",
2913 st->asoft_pgused, st->asoft_pgtotal));
2914
2915 return (EINVAL);
2916 }
2917
2918 length = AGP_PAGES2BYTES(alloc_info.agpa_pgcount);
2919 entryp = agp_alloc_mem(st, length, alloc_info.agpa_type);
2920 if (!entryp) {
2921 AGPDB_PRINT2((CE_WARN,
2922 "ioctl_agpgart_alloc: allocate 0x%lx bytes failed",
2923 length));
2924 return (ENOMEM);
2925 }
2926 ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2927 alloc_info.agpa_key = entryp->kte_key;
2928 if (alloc_info.agpa_type == AGP_PHYSICAL) {
2929 alloc_info.agpa_physical =
2930 (uint32_t)(entryp->kte_pfnarray[0] << AGP_PAGE_SHIFT);
2931 }
2932 /* Update the memory pagse used */
2933 st->asoft_pgused += alloc_info.agpa_pgcount;
2934
2935 if (ddi_copyout(&alloc_info, arg,
2936 sizeof (agp_allocate_t), flags) != 0) {
2937
2938 return (EFAULT);
2939 }
2940
2941 return (0);
2942 }
2943
2944 static int
ioctl_agpgart_dealloc(agpgart_softstate_t * st,intptr_t arg)2945 ioctl_agpgart_dealloc(agpgart_softstate_t *st, intptr_t arg)
2946 {
2947 int key;
2948 keytable_ent_t *keyent;
2949
2950 if (is_controlling_proc(st) < 0) {
2951 AGPDB_PRINT2((CE_WARN,
2952 "ioctl_agpgart_dealloc: not a controlling process"));
2953 return (EPERM);
2954 }
2955 key = (int)arg;
2956 if ((key >= AGP_MAXKEYS) || key < 0) {
2957 return (EINVAL);
2958 }
2959 keyent = &st->asoft_table[key];
2960 if (!keyent->kte_memhdl) {
2961 return (EINVAL);
2962 }
2963
2964 if (agp_dealloc_mem(st, keyent))
2965 return (EINVAL);
2966
2967 /* Update the memory pages used */
2968 st->asoft_pgused -= keyent->kte_pages;
2969 bzero(keyent, sizeof (keytable_ent_t));
2970
2971 return (0);
2972 }
2973
2974 static int
ioctl_agpgart_bind(agpgart_softstate_t * st,void * arg,int flags)2975 ioctl_agpgart_bind(agpgart_softstate_t *st, void *arg, int flags)
2976 {
2977 agp_bind_t bind_info;
2978 keytable_ent_t *keyent;
2979 int key;
2980 uint32_t pg_offset;
2981 int retval = 0;
2982
2983 if (is_controlling_proc(st) < 0) {
2984 AGPDB_PRINT2((CE_WARN,
2985 "ioctl_agpgart_bind: not a controlling process"));
2986 return (EPERM);
2987 }
2988
2989 if (ddi_copyin(arg, &bind_info, sizeof (agp_bind_t), flags) != 0) {
2990 return (EFAULT);
2991 }
2992
2993 key = bind_info.agpb_key;
2994 if ((key >= AGP_MAXKEYS) || key < 0) {
2995 AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_bind: invalid key"));
2996 return (EINVAL);
2997 }
2998
2999 if (IS_INTEL_830(st->asoft_devreg.agprd_arctype)) {
3000 if (AGP_PAGES2KB(bind_info.agpb_pgstart) <
3001 st->asoft_info.agpki_presize) {
3002 AGPDB_PRINT2((CE_WARN,
3003 "ioctl_agpgart_bind: bind to prealloc area "
3004 "pgstart = %dKB < presize = %ldKB",
3005 AGP_PAGES2KB(bind_info.agpb_pgstart),
3006 st->asoft_info.agpki_presize));
3007 return (EINVAL);
3008 }
3009 }
3010
3011 pg_offset = bind_info.agpb_pgstart;
3012 keyent = &st->asoft_table[key];
3013 if (!keyent->kte_memhdl) {
3014 AGPDB_PRINT2((CE_WARN,
3015 "ioctl_agpgart_bind: Key = 0x%x can't get keyenty",
3016 key));
3017 return (EINVAL);
3018 }
3019
3020 if (keyent->kte_bound != 0) {
3021 AGPDB_PRINT2((CE_WARN,
3022 "ioctl_agpgart_bind: Key = 0x%x already bound",
3023 key));
3024 return (EINVAL);
3025 }
3026 retval = agp_bind_key(st, keyent, pg_offset);
3027
3028 if (retval == 0) {
3029 keyent->kte_pgoff = pg_offset;
3030 keyent->kte_bound = 1;
3031 }
3032
3033 return (retval);
3034 }
3035
3036 static int
ioctl_agpgart_unbind(agpgart_softstate_t * st,void * arg,int flags)3037 ioctl_agpgart_unbind(agpgart_softstate_t *st, void *arg, int flags)
3038 {
3039 int key, retval = 0;
3040 agp_unbind_t unbindinfo;
3041 keytable_ent_t *keyent;
3042
3043 if (is_controlling_proc(st) < 0) {
3044 AGPDB_PRINT2((CE_WARN,
3045 "ioctl_agpgart_bind: not a controlling process"));
3046 return (EPERM);
3047 }
3048
3049 if (ddi_copyin(arg, &unbindinfo, sizeof (unbindinfo), flags) != 0) {
3050 return (EFAULT);
3051 }
3052 key = unbindinfo.agpu_key;
3053 if ((key >= AGP_MAXKEYS) || key < 0) {
3054 AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_unbind: invalid key"));
3055 return (EINVAL);
3056 }
3057 keyent = &st->asoft_table[key];
3058 if (!keyent->kte_bound) {
3059 return (EINVAL);
3060 }
3061
3062 if ((retval = agp_unbind_key(st, keyent)) != 0)
3063 return (retval);
3064
3065 return (0);
3066 }
3067
3068 static int
ioctl_agpgart_flush_chipset(agpgart_softstate_t * st)3069 ioctl_agpgart_flush_chipset(agpgart_softstate_t *st)
3070 {
3071 ldi_handle_t hdl;
3072 uint32_t devid;
3073 int rc = 0;
3074 devid = st->asoft_info.agpki_mdevid;
3075 hdl = st->asoft_devreg.agprd_targethdl;
3076 if (IS_INTEL_915(devid) ||
3077 IS_INTEL_965(devid) ||
3078 IS_INTEL_X33(devid) ||
3079 IS_INTEL_G4X(devid)) {
3080 rc = ldi_ioctl(hdl, INTEL_CHIPSET_FLUSH, 0, FKIOCTL, kcred, 0);
3081 }
3082 return (rc);
3083 }
3084
3085 static int
ioctl_agpgart_pages_bind(agpgart_softstate_t * st,void * arg,int flags)3086 ioctl_agpgart_pages_bind(agpgart_softstate_t *st, void *arg, int flags)
3087 {
3088 agp_bind_pages_t bind_info;
3089 igd_gtt_seg_t gttseg;
3090 size_t array_size;
3091 ulong_t paddr;
3092 uint32_t igs_type;
3093 ldi_handle_t hdl;
3094 int i, rval;
3095 int rc = 0;
3096
3097 if (ddi_copyin(arg, &bind_info, sizeof (bind_info), flags) != 0) {
3098 return (EFAULT);
3099 }
3100
3101 /*
3102 * Convert agp_type to something the igs layer understands.
3103 */
3104 switch (bind_info.agpb_type) {
3105 case AGP_USER_MEMORY:
3106 case AGP_USER_CACHED_MEMORY: /* Todo */
3107 igs_type = AGP_NORMAL;
3108 break;
3109 default:
3110 return (EINVAL);
3111 }
3112
3113 if (bind_info.agpb_pgcount == 0)
3114 return (0);
3115
3116 gttseg.igs_pgstart = bind_info.agpb_pgstart;
3117 gttseg.igs_npage = bind_info.agpb_pgcount;
3118
3119 array_size = bind_info.agpb_pgcount * sizeof (uint32_t);
3120 gttseg.igs_phyaddr = kmem_zalloc(array_size, KM_SLEEP);
3121 if (bind_info.agpb_pages != NULL) {
3122 for (i = 0; i < bind_info.agpb_pgcount; i++) {
3123 paddr = bind_info.agpb_pages[i] << GTT_PAGE_SHIFT;
3124 gttseg.igs_phyaddr[i] = (uint32_t)paddr;
3125 }
3126 }
3127
3128 gttseg.igs_type = igs_type;
3129 gttseg.igs_flags = 0; /* not used */
3130 gttseg.igs_scratch = 0; /* not used */
3131
3132 /* See i8xx_add_to_gtt */
3133 hdl = st->asoft_devreg.agprd_masterhdl;
3134 if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)>tseg, FKIOCTL,
3135 kcred, &rval)) {
3136 rc = -1;
3137 }
3138
3139 kmem_free(gttseg.igs_phyaddr, array_size);
3140
3141 return (rc);
3142 }
3143
3144 static int
ioctl_agpgart_pages_unbind(agpgart_softstate_t * st,void * arg,int flags)3145 ioctl_agpgart_pages_unbind(agpgart_softstate_t *st, void *arg, int flags)
3146 {
3147 agp_unbind_pages_t unbind_info;
3148 igd_gtt_seg_t gttseg;
3149 ldi_handle_t hdl;
3150 int rval;
3151
3152 if (ddi_copyin(arg, &unbind_info, sizeof (unbind_info), flags) != 0) {
3153 return (EFAULT);
3154 }
3155
3156 gttseg.igs_pgstart = unbind_info.agpu_pgstart;
3157 gttseg.igs_npage = unbind_info.agpu_pgcount;
3158 gttseg.igs_phyaddr = NULL; /* not used */
3159 gttseg.igs_type = AGP_NORMAL;
3160 gttseg.igs_flags = unbind_info.agpu_flags;
3161 gttseg.igs_scratch = (uint32_t) unbind_info.agpu_scratch;
3162
3163 /* See i8xx_remove_from_gtt */
3164 hdl = st->asoft_devreg.agprd_masterhdl;
3165 if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)>tseg, FKIOCTL,
3166 kcred, &rval))
3167 return (-1);
3168
3169 return (0);
3170 }
3171
3172 /*
3173 * Read or write GTT registers. In the DRM code, see:
3174 * i915_gem_gtt.c:intel_rw_gtt
3175 *
3176 * Used to save/restore GTT state.
3177 */
3178 static int
ioctl_agpgart_rw_gtt(agpgart_softstate_t * st,void * arg,int flags)3179 ioctl_agpgart_rw_gtt(agpgart_softstate_t *st, void *arg, int flags)
3180 {
3181 agp_rw_gtt_t rw_info;
3182 igd_gtt_seg_t gttseg;
3183 ldi_handle_t hdl;
3184 int rval;
3185
3186 if (ddi_copyin(arg, &rw_info, sizeof (rw_info), flags) != 0) {
3187 return (EFAULT);
3188 }
3189
3190 gttseg.igs_pgstart = rw_info.agprw_pgstart;
3191 gttseg.igs_npage = rw_info.agprw_pgcount;
3192 gttseg.igs_phyaddr = rw_info.agprw_addr;
3193 gttseg.igs_type = 0; /* not used */
3194 gttseg.igs_flags = rw_info.agprw_flags;
3195 gttseg.igs_scratch = 0; /* not used */
3196
3197 /* See: i8xx_read_gtt, i8xx_write_gtt */
3198 hdl = st->asoft_devreg.agprd_masterhdl;
3199 if (ldi_ioctl(hdl, I8XX_RW_GTT, (intptr_t)>tseg, FKIOCTL,
3200 kcred, &rval)) {
3201 AGPDB_PRINT2((CE_WARN, "agpgart_rw_gtt error"));
3202 return (-1);
3203 }
3204
3205 return (0);
3206 }
3207
3208 /*ARGSUSED*/
3209 static int
agpgart_ioctl(dev_t dev,int cmd,intptr_t intarg,int flags,cred_t * credp,int * rvalp)3210 agpgart_ioctl(dev_t dev, int cmd, intptr_t intarg, int flags,
3211 cred_t *credp, int *rvalp)
3212 {
3213 int instance;
3214 int retval = 0;
3215 void *arg = (void*)intarg;
3216
3217 agpgart_softstate_t *softstate;
3218
3219 instance = AGP_DEV2INST(dev);
3220 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3221 if (softstate == NULL) {
3222 AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: get soft state err"));
3223 return (ENXIO);
3224 }
3225
3226 mutex_enter(&softstate->asoft_instmutex);
3227
3228 switch (cmd) {
3229 case AGPIOC_INFO:
3230 retval = ioctl_agpgart_info(softstate, arg, flags);
3231 break;
3232 case AGPIOC_ACQUIRE:
3233 retval = ioctl_agpgart_acquire(softstate);
3234 break;
3235 case AGPIOC_RELEASE:
3236 retval = ioctl_agpgart_release(softstate);
3237 break;
3238 case AGPIOC_SETUP:
3239 retval = ioctl_agpgart_setup(softstate, arg, flags);
3240 break;
3241 case AGPIOC_ALLOCATE:
3242 retval = ioctl_agpgart_alloc(softstate, arg, flags);
3243 break;
3244 case AGPIOC_DEALLOCATE:
3245 retval = ioctl_agpgart_dealloc(softstate, intarg);
3246 break;
3247 case AGPIOC_BIND:
3248 retval = ioctl_agpgart_bind(softstate, arg, flags);
3249 break;
3250 case AGPIOC_UNBIND:
3251 retval = ioctl_agpgart_unbind(softstate, arg, flags);
3252 break;
3253 case AGPIOC_FLUSHCHIPSET:
3254 retval = ioctl_agpgart_flush_chipset(softstate);
3255 break;
3256 case AGPIOC_PAGES_BIND:
3257 retval = ioctl_agpgart_pages_bind(softstate, arg, flags);
3258 break;
3259 case AGPIOC_PAGES_UNBIND:
3260 retval = ioctl_agpgart_pages_unbind(softstate, arg, flags);
3261 break;
3262 case AGPIOC_RW_GTT:
3263 retval = ioctl_agpgart_rw_gtt(softstate, arg, flags);
3264 break;
3265
3266 default:
3267 AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: wrong argument"));
3268 retval = ENXIO;
3269 break;
3270 }
3271
3272 mutex_exit(&softstate->asoft_instmutex);
3273 return (retval);
3274 }
3275
3276 static int
agpgart_segmap(dev_t dev,off_t off,struct as * asp,caddr_t * addrp,off_t len,unsigned int prot,unsigned int maxprot,unsigned int flags,cred_t * credp)3277 agpgart_segmap(dev_t dev, off_t off, struct as *asp,
3278 caddr_t *addrp, off_t len, unsigned int prot,
3279 unsigned int maxprot, unsigned int flags, cred_t *credp)
3280 {
3281
3282 struct agpgart_softstate *softstate;
3283 int instance;
3284 int rc = 0;
3285
3286 instance = AGP_DEV2INST(dev);
3287 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3288 if (softstate == NULL) {
3289 AGPDB_PRINT2((CE_WARN, "agpgart_segmap: get soft state err"));
3290 return (ENXIO);
3291 }
3292 if (!AGP_ALIGNED(len))
3293 return (EINVAL);
3294
3295 mutex_enter(&softstate->asoft_instmutex);
3296
3297 rc = devmap_setup(dev, (offset_t)off, asp, addrp,
3298 (size_t)len, prot, maxprot, flags, credp);
3299
3300 mutex_exit(&softstate->asoft_instmutex);
3301 return (rc);
3302 }
3303
3304 /*ARGSUSED*/
3305 static int
agpgart_devmap(dev_t dev,devmap_cookie_t cookie,offset_t offset,size_t len,size_t * mappedlen,uint_t model)3306 agpgart_devmap(dev_t dev, devmap_cookie_t cookie, offset_t offset, size_t len,
3307 size_t *mappedlen, uint_t model)
3308 {
3309 struct agpgart_softstate *softstate;
3310 int instance, status;
3311 struct keytable_ent *mementry;
3312 offset_t local_offset;
3313
3314 instance = AGP_DEV2INST(dev);
3315 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3316 if (softstate == NULL) {
3317 AGPDB_PRINT2((CE_WARN, "agpgart_devmap: get soft state err"));
3318 return (ENXIO);
3319 }
3320
3321
3322 if (offset > MB2BYTES(softstate->asoft_info.agpki_apersize)) {
3323 AGPDB_PRINT2((CE_WARN, "agpgart_devmap: offset is too large"));
3324 return (EINVAL);
3325 }
3326
3327 /*
3328 * Can not find any memory now, so fail.
3329 */
3330
3331 mementry = agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
3332
3333 if (mementry == NULL) {
3334 AGPDB_PRINT2((CE_WARN,
3335 "agpgart_devmap: can not find the proper keyent"));
3336 return (EINVAL);
3337 }
3338
3339 local_offset = offset - AGP_PAGES2BYTES(mementry->kte_pgoff);
3340
3341 if (len > (AGP_PAGES2BYTES(mementry->kte_pages) - local_offset)) {
3342 len = AGP_PAGES2BYTES(mementry->kte_pages) - local_offset;
3343 }
3344
3345 switch (mementry->kte_type) {
3346 case AGP_NORMAL:
3347 if (PMEMP(mementry->kte_memhdl)->pmem_cookie) {
3348 status = devmap_pmem_setup(cookie,
3349 softstate->asoft_dip,
3350 &agp_devmap_cb,
3351 PMEMP(mementry->kte_memhdl)->pmem_cookie,
3352 local_offset,
3353 len, PROT_ALL,
3354 (DEVMAP_DEFAULTS|IOMEM_DATA_UC_WR_COMBINE),
3355 &mem_dev_acc_attr);
3356 } else {
3357 AGPDB_PRINT2((CE_WARN,
3358 "agpgart_devmap: not a valid memory type"));
3359 return (EINVAL);
3360
3361 }
3362
3363 break;
3364 default:
3365 AGPDB_PRINT2((CE_WARN,
3366 "agpgart_devmap: not a valid memory type"));
3367 return (EINVAL);
3368 }
3369
3370
3371 if (status == 0) {
3372 *mappedlen = len;
3373 } else {
3374 *mappedlen = 0;
3375 AGPDB_PRINT2((CE_WARN,
3376 "agpgart_devmap: devmap interface failed"));
3377 return (EINVAL);
3378 }
3379
3380 return (0);
3381 }
3382
3383 static struct cb_ops agpgart_cb_ops = {
3384 agpgart_open, /* open() */
3385 agpgart_close, /* close() */
3386 nodev, /* strategy() */
3387 nodev, /* print routine */
3388 nodev, /* no dump routine */
3389 nodev, /* read() */
3390 nodev, /* write() */
3391 agpgart_ioctl, /* agpgart_ioctl */
3392 agpgart_devmap, /* devmap routine */
3393 nodev, /* no longer use mmap routine */
3394 agpgart_segmap, /* system segmap routine */
3395 nochpoll, /* no chpoll routine */
3396 ddi_prop_op, /* system prop operations */
3397 0, /* not a STREAMS driver */
3398 D_DEVMAP | D_MP, /* safe for multi-thread/multi-processor */
3399 CB_REV, /* cb_ops version? */
3400 nodev, /* cb_aread() */
3401 nodev, /* cb_awrite() */
3402 };
3403
3404 static struct dev_ops agpgart_ops = {
3405 DEVO_REV, /* devo_rev */
3406 0, /* devo_refcnt */
3407 agpgart_getinfo, /* devo_getinfo */
3408 nulldev, /* devo_identify */
3409 nulldev, /* devo_probe */
3410 agpgart_attach, /* devo_attach */
3411 agpgart_detach, /* devo_detach */
3412 nodev, /* devo_reset */
3413 &agpgart_cb_ops, /* devo_cb_ops */
3414 (struct bus_ops *)0, /* devo_bus_ops */
3415 NULL, /* devo_power */
3416 ddi_quiesce_not_needed, /* devo_quiesce */
3417 };
3418
3419 static struct modldrv modldrv = {
3420 &mod_driverops,
3421 "AGP driver",
3422 &agpgart_ops,
3423 };
3424
3425 static struct modlinkage modlinkage = {
3426 MODREV_1, /* MODREV_1 is indicated by manual */
3427 {&modldrv, NULL, NULL, NULL}
3428 };
3429
3430 static void *agpgart_glob_soft_handle;
3431
3432 int
_init(void)3433 _init(void)
3434 {
3435 int ret = DDI_SUCCESS;
3436
3437 ret = ddi_soft_state_init(&agpgart_glob_soft_handle,
3438 sizeof (agpgart_softstate_t),
3439 AGPGART_MAX_INSTANCES);
3440
3441 if (ret != 0) {
3442 AGPDB_PRINT2((CE_WARN,
3443 "_init: soft state init error code=0x%x", ret));
3444 return (ret);
3445 }
3446
3447 if ((ret = mod_install(&modlinkage)) != 0) {
3448 AGPDB_PRINT2((CE_WARN,
3449 "_init: mod install error code=0x%x", ret));
3450 ddi_soft_state_fini(&agpgart_glob_soft_handle);
3451 return (ret);
3452 }
3453
3454 return (DDI_SUCCESS);
3455 }
3456
3457 int
_info(struct modinfo * modinfop)3458 _info(struct modinfo *modinfop)
3459 {
3460 return (mod_info(&modlinkage, modinfop));
3461 }
3462
3463 int
_fini(void)3464 _fini(void)
3465 {
3466 int ret;
3467
3468 if ((ret = mod_remove(&modlinkage)) == 0) {
3469 ddi_soft_state_fini(&agpgart_glob_soft_handle);
3470 }
3471
3472 return (ret);
3473 }
3474