1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/note.h> 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/buf.h> 32 #include <sys/uio.h> 33 #include <sys/cred.h> 34 #include <sys/poll.h> 35 #include <sys/mman.h> 36 #include <sys/kmem.h> 37 #include <sys/model.h> 38 #include <sys/file.h> 39 #include <sys/proc.h> 40 #include <sys/open.h> 41 #include <sys/user.h> 42 #include <sys/t_lock.h> 43 #include <sys/vm.h> 44 #include <sys/stat.h> 45 #include <vm/hat.h> 46 #include <vm/seg.h> 47 #include <vm/seg_vn.h> 48 #include <vm/seg_dev.h> 49 #include <vm/as.h> 50 #include <sys/cmn_err.h> 51 #include <sys/cpuvar.h> 52 #include <sys/debug.h> 53 #include <sys/autoconf.h> 54 #include <sys/sunddi.h> 55 #include <sys/esunddi.h> 56 #include <sys/sunndi.h> 57 #include <sys/kstat.h> 58 #include <sys/conf.h> 59 #include <sys/ddi_impldefs.h> /* include implementation structure defs */ 60 #include <sys/ndi_impldefs.h> /* include prototypes */ 61 #include <sys/ddi_timer.h> 62 #include <sys/hwconf.h> 63 #include <sys/pathname.h> 64 #include <sys/modctl.h> 65 #include <sys/epm.h> 66 #include <sys/devctl.h> 67 #include <sys/callb.h> 68 #include <sys/cladm.h> 69 #include <sys/sysevent.h> 70 #include <sys/dacf_impl.h> 71 #include <sys/ddidevmap.h> 72 #include <sys/bootconf.h> 73 #include <sys/disp.h> 74 #include <sys/atomic.h> 75 #include <sys/promif.h> 76 #include <sys/instance.h> 77 #include <sys/sysevent/eventdefs.h> 78 #include <sys/task.h> 79 #include <sys/project.h> 80 #include <sys/taskq.h> 81 #include <sys/devpolicy.h> 82 #include <sys/ctype.h> 83 #include <net/if.h> 84 #include <sys/rctl.h> 85 #include <sys/zone.h> 86 #include <sys/clock_impl.h> 87 #include <sys/ddi.h> 88 89 extern pri_t minclsyspri; 90 91 extern rctl_hndl_t rc_project_locked_mem; 92 extern rctl_hndl_t rc_zone_locked_mem; 93 94 #ifdef DEBUG 95 static int sunddi_debug = 0; 96 #endif /* DEBUG */ 97 98 /* ddi_umem_unlock miscellaneous */ 99 100 static void i_ddi_umem_unlock_thread_start(void); 101 102 static kmutex_t ddi_umem_unlock_mutex; /* unlock list mutex */ 103 static kcondvar_t ddi_umem_unlock_cv; /* unlock list block/unblock */ 104 static kthread_t *ddi_umem_unlock_thread; 105 /* 106 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list. 107 */ 108 static struct ddi_umem_cookie *ddi_umem_unlock_head = NULL; 109 static struct ddi_umem_cookie *ddi_umem_unlock_tail = NULL; 110 111 /* 112 * DDI(Sun) Function and flag definitions: 113 */ 114 115 #if defined(__x86) 116 /* 117 * Used to indicate which entries were chosen from a range. 118 */ 119 char *chosen_reg = "chosen-reg"; 120 #endif 121 122 /* 123 * Function used to ring system console bell 124 */ 125 void (*ddi_console_bell_func)(clock_t duration); 126 127 /* 128 * Creating register mappings and handling interrupts: 129 */ 130 131 /* 132 * Generic ddi_map: Call parent to fulfill request... 133 */ 134 135 int 136 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset, 137 off_t len, caddr_t *addrp) 138 { 139 dev_info_t *pdip; 140 141 ASSERT(dp); 142 pdip = (dev_info_t *)DEVI(dp)->devi_parent; 143 return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip, 144 dp, mp, offset, len, addrp)); 145 } 146 147 /* 148 * ddi_apply_range: (Called by nexi only.) 149 * Apply ranges in parent node dp, to child regspec rp... 150 */ 151 152 int 153 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp) 154 { 155 return (i_ddi_apply_range(dp, rdip, rp)); 156 } 157 158 int 159 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset, 160 off_t len) 161 { 162 ddi_map_req_t mr; 163 #if defined(__x86) 164 struct { 165 int bus; 166 int addr; 167 int size; 168 } reg, *reglist; 169 uint_t length; 170 int rc; 171 172 /* 173 * get the 'registers' or the 'reg' property. 174 * We look up the reg property as an array of 175 * int's. 176 */ 177 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 178 DDI_PROP_DONTPASS, "registers", (int **)®list, &length); 179 if (rc != DDI_PROP_SUCCESS) 180 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 181 DDI_PROP_DONTPASS, "reg", (int **)®list, &length); 182 if (rc == DDI_PROP_SUCCESS) { 183 /* 184 * point to the required entry. 185 */ 186 reg = reglist[rnumber]; 187 reg.addr += offset; 188 if (len != 0) 189 reg.size = len; 190 /* 191 * make a new property containing ONLY the required tuple. 192 */ 193 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip, 194 chosen_reg, (int *)®, (sizeof (reg)/sizeof (int))) 195 != DDI_PROP_SUCCESS) { 196 cmn_err(CE_WARN, "%s%d: cannot create '%s' " 197 "property", DEVI(dip)->devi_name, 198 DEVI(dip)->devi_instance, chosen_reg); 199 } 200 /* 201 * free the memory allocated by 202 * ddi_prop_lookup_int_array (). 203 */ 204 ddi_prop_free((void *)reglist); 205 } 206 #endif 207 mr.map_op = DDI_MO_MAP_LOCKED; 208 mr.map_type = DDI_MT_RNUMBER; 209 mr.map_obj.rnumber = rnumber; 210 mr.map_prot = PROT_READ | PROT_WRITE; 211 mr.map_flags = DDI_MF_KERNEL_MAPPING; 212 mr.map_handlep = NULL; 213 mr.map_vers = DDI_MAP_VERSION; 214 215 /* 216 * Call my parent to map in my regs. 217 */ 218 219 return (ddi_map(dip, &mr, offset, len, kaddrp)); 220 } 221 222 void 223 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset, 224 off_t len) 225 { 226 ddi_map_req_t mr; 227 228 mr.map_op = DDI_MO_UNMAP; 229 mr.map_type = DDI_MT_RNUMBER; 230 mr.map_flags = DDI_MF_KERNEL_MAPPING; 231 mr.map_prot = PROT_READ | PROT_WRITE; /* who cares? */ 232 mr.map_obj.rnumber = rnumber; 233 mr.map_handlep = NULL; 234 mr.map_vers = DDI_MAP_VERSION; 235 236 /* 237 * Call my parent to unmap my regs. 238 */ 239 240 (void) ddi_map(dip, &mr, offset, len, kaddrp); 241 *kaddrp = (caddr_t)0; 242 #if defined(__x86) 243 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg); 244 #endif 245 } 246 247 int 248 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 249 off_t offset, off_t len, caddr_t *vaddrp) 250 { 251 return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp)); 252 } 253 254 /* 255 * nullbusmap: The/DDI default bus_map entry point for nexi 256 * not conforming to the reg/range paradigm (i.e. scsi, etc.) 257 * with no HAT/MMU layer to be programmed at this level. 258 * 259 * If the call is to map by rnumber, return an error, 260 * otherwise pass anything else up the tree to my parent. 261 */ 262 int 263 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 264 off_t offset, off_t len, caddr_t *vaddrp) 265 { 266 _NOTE(ARGUNUSED(rdip)) 267 if (mp->map_type == DDI_MT_RNUMBER) 268 return (DDI_ME_UNSUPPORTED); 269 270 return (ddi_map(dip, mp, offset, len, vaddrp)); 271 } 272 273 /* 274 * ddi_rnumber_to_regspec: Not for use by leaf drivers. 275 * Only for use by nexi using the reg/range paradigm. 276 */ 277 struct regspec * 278 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber) 279 { 280 return (i_ddi_rnumber_to_regspec(dip, rnumber)); 281 } 282 283 284 /* 285 * Note that we allow the dip to be nil because we may be called 286 * prior even to the instantiation of the devinfo tree itself - all 287 * regular leaf and nexus drivers should always use a non-nil dip! 288 * 289 * We treat peek in a somewhat cavalier fashion .. assuming that we'll 290 * simply get a synchronous fault as soon as we touch a missing address. 291 * 292 * Poke is rather more carefully handled because we might poke to a write 293 * buffer, "succeed", then only find some time later that we got an 294 * asynchronous fault that indicated that the address we were writing to 295 * was not really backed by hardware. 296 */ 297 298 static int 299 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size, 300 void *addr, void *value_p) 301 { 302 union { 303 uint64_t u64; 304 uint32_t u32; 305 uint16_t u16; 306 uint8_t u8; 307 } peekpoke_value; 308 309 peekpoke_ctlops_t peekpoke_args; 310 uint64_t dummy_result; 311 int rval; 312 313 /* Note: size is assumed to be correct; it is not checked. */ 314 peekpoke_args.size = size; 315 peekpoke_args.dev_addr = (uintptr_t)addr; 316 peekpoke_args.handle = NULL; 317 peekpoke_args.repcount = 1; 318 peekpoke_args.flags = 0; 319 320 if (cmd == DDI_CTLOPS_POKE) { 321 switch (size) { 322 case sizeof (uint8_t): 323 peekpoke_value.u8 = *(uint8_t *)value_p; 324 break; 325 case sizeof (uint16_t): 326 peekpoke_value.u16 = *(uint16_t *)value_p; 327 break; 328 case sizeof (uint32_t): 329 peekpoke_value.u32 = *(uint32_t *)value_p; 330 break; 331 case sizeof (uint64_t): 332 peekpoke_value.u64 = *(uint64_t *)value_p; 333 break; 334 } 335 } 336 337 peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64; 338 339 if (devi != NULL) 340 rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args, 341 &dummy_result); 342 else 343 rval = peekpoke_mem(cmd, &peekpoke_args); 344 345 /* 346 * A NULL value_p is permitted by ddi_peek(9F); discard the result. 347 */ 348 if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) { 349 switch (size) { 350 case sizeof (uint8_t): 351 *(uint8_t *)value_p = peekpoke_value.u8; 352 break; 353 case sizeof (uint16_t): 354 *(uint16_t *)value_p = peekpoke_value.u16; 355 break; 356 case sizeof (uint32_t): 357 *(uint32_t *)value_p = peekpoke_value.u32; 358 break; 359 case sizeof (uint64_t): 360 *(uint64_t *)value_p = peekpoke_value.u64; 361 break; 362 } 363 } 364 365 return (rval); 366 } 367 368 /* 369 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this. 370 * they shouldn't be, but the 9f manpage kind of pseudo exposes it. 371 */ 372 int 373 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p) 374 { 375 switch (size) { 376 case sizeof (uint8_t): 377 case sizeof (uint16_t): 378 case sizeof (uint32_t): 379 case sizeof (uint64_t): 380 break; 381 default: 382 return (DDI_FAILURE); 383 } 384 385 return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p)); 386 } 387 388 int 389 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p) 390 { 391 switch (size) { 392 case sizeof (uint8_t): 393 case sizeof (uint16_t): 394 case sizeof (uint32_t): 395 case sizeof (uint64_t): 396 break; 397 default: 398 return (DDI_FAILURE); 399 } 400 401 return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p)); 402 } 403 404 int 405 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p) 406 { 407 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 408 val_p)); 409 } 410 411 int 412 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p) 413 { 414 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 415 val_p)); 416 } 417 418 int 419 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p) 420 { 421 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 422 val_p)); 423 } 424 425 int 426 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p) 427 { 428 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 429 val_p)); 430 } 431 432 433 /* 434 * We need to separate the old interfaces from the new ones and leave them 435 * in here for a while. Previous versions of the OS defined the new interfaces 436 * to the old interfaces. This way we can fix things up so that we can 437 * eventually remove these interfaces. 438 * e.g. A 3rd party module/driver using ddi_peek8 and built against S10 439 * or earlier will actually have a reference to ddi_peekc in the binary. 440 */ 441 #ifdef _ILP32 442 int 443 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p) 444 { 445 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 446 val_p)); 447 } 448 449 int 450 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p) 451 { 452 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 453 val_p)); 454 } 455 456 int 457 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p) 458 { 459 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 460 val_p)); 461 } 462 463 int 464 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p) 465 { 466 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 467 val_p)); 468 } 469 #endif /* _ILP32 */ 470 471 int 472 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val) 473 { 474 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 475 } 476 477 int 478 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val) 479 { 480 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 481 } 482 483 int 484 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val) 485 { 486 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 487 } 488 489 int 490 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val) 491 { 492 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 493 } 494 495 /* 496 * We need to separate the old interfaces from the new ones and leave them 497 * in here for a while. Previous versions of the OS defined the new interfaces 498 * to the old interfaces. This way we can fix things up so that we can 499 * eventually remove these interfaces. 500 * e.g. A 3rd party module/driver using ddi_poke8 and built against S10 501 * or earlier will actually have a reference to ddi_pokec in the binary. 502 */ 503 #ifdef _ILP32 504 int 505 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val) 506 { 507 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 508 } 509 510 int 511 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val) 512 { 513 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 514 } 515 516 int 517 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val) 518 { 519 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 520 } 521 522 int 523 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val) 524 { 525 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 526 } 527 #endif /* _ILP32 */ 528 529 /* 530 * ddi_peekpokeio() is used primarily by the mem drivers for moving 531 * data to and from uio structures via peek and poke. Note that we 532 * use "internal" routines ddi_peek and ddi_poke to make this go 533 * slightly faster, avoiding the call overhead .. 534 */ 535 int 536 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw, 537 caddr_t addr, size_t len, uint_t xfersize) 538 { 539 int64_t ibuffer; 540 int8_t w8; 541 size_t sz; 542 int o; 543 544 if (xfersize > sizeof (long)) 545 xfersize = sizeof (long); 546 547 while (len != 0) { 548 if ((len | (uintptr_t)addr) & 1) { 549 sz = sizeof (int8_t); 550 if (rw == UIO_WRITE) { 551 if ((o = uwritec(uio)) == -1) 552 return (DDI_FAILURE); 553 if (ddi_poke8(devi, (int8_t *)addr, 554 (int8_t)o) != DDI_SUCCESS) 555 return (DDI_FAILURE); 556 } else { 557 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz, 558 (int8_t *)addr, &w8) != DDI_SUCCESS) 559 return (DDI_FAILURE); 560 if (ureadc(w8, uio)) 561 return (DDI_FAILURE); 562 } 563 } else { 564 switch (xfersize) { 565 case sizeof (int64_t): 566 if (((len | (uintptr_t)addr) & 567 (sizeof (int64_t) - 1)) == 0) { 568 sz = xfersize; 569 break; 570 } 571 /*FALLTHROUGH*/ 572 case sizeof (int32_t): 573 if (((len | (uintptr_t)addr) & 574 (sizeof (int32_t) - 1)) == 0) { 575 sz = xfersize; 576 break; 577 } 578 /*FALLTHROUGH*/ 579 default: 580 /* 581 * This still assumes that we might have an 582 * I/O bus out there that permits 16-bit 583 * transfers (and that it would be upset by 584 * 32-bit transfers from such locations). 585 */ 586 sz = sizeof (int16_t); 587 break; 588 } 589 590 if (rw == UIO_READ) { 591 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz, 592 addr, &ibuffer) != DDI_SUCCESS) 593 return (DDI_FAILURE); 594 } 595 596 if (uiomove(&ibuffer, sz, rw, uio)) 597 return (DDI_FAILURE); 598 599 if (rw == UIO_WRITE) { 600 if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz, 601 addr, &ibuffer) != DDI_SUCCESS) 602 return (DDI_FAILURE); 603 } 604 } 605 addr += sz; 606 len -= sz; 607 } 608 return (DDI_SUCCESS); 609 } 610 611 /* 612 * These routines are used by drivers that do layered ioctls 613 * On sparc, they're implemented in assembler to avoid spilling 614 * register windows in the common (copyin) case .. 615 */ 616 #if !defined(__sparc) 617 int 618 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags) 619 { 620 if (flags & FKIOCTL) 621 return (kcopy(buf, kernbuf, size) ? -1 : 0); 622 return (copyin(buf, kernbuf, size)); 623 } 624 625 int 626 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags) 627 { 628 if (flags & FKIOCTL) 629 return (kcopy(buf, kernbuf, size) ? -1 : 0); 630 return (copyout(buf, kernbuf, size)); 631 } 632 #endif /* !__sparc */ 633 634 /* 635 * Conversions in nexus pagesize units. We don't duplicate the 636 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI 637 * routines anyway. 638 */ 639 unsigned long 640 ddi_btop(dev_info_t *dip, unsigned long bytes) 641 { 642 unsigned long pages; 643 644 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages); 645 return (pages); 646 } 647 648 unsigned long 649 ddi_btopr(dev_info_t *dip, unsigned long bytes) 650 { 651 unsigned long pages; 652 653 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages); 654 return (pages); 655 } 656 657 unsigned long 658 ddi_ptob(dev_info_t *dip, unsigned long pages) 659 { 660 unsigned long bytes; 661 662 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes); 663 return (bytes); 664 } 665 666 unsigned int 667 ddi_enter_critical(void) 668 { 669 return ((uint_t)spl7()); 670 } 671 672 void 673 ddi_exit_critical(unsigned int spl) 674 { 675 splx((int)spl); 676 } 677 678 /* 679 * Nexus ctlops punter 680 */ 681 682 #if !defined(__sparc) 683 /* 684 * Request bus_ctl parent to handle a bus_ctl request 685 * 686 * (The sparc version is in sparc_ddi.s) 687 */ 688 int 689 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v) 690 { 691 int (*fp)(); 692 693 if (!d || !r) 694 return (DDI_FAILURE); 695 696 if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL) 697 return (DDI_FAILURE); 698 699 fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl; 700 return ((*fp)(d, r, op, a, v)); 701 } 702 703 #endif 704 705 /* 706 * DMA/DVMA setup 707 */ 708 709 #if defined(__sparc) 710 static ddi_dma_lim_t standard_limits = { 711 (uint_t)0, /* addr_t dlim_addr_lo */ 712 (uint_t)-1, /* addr_t dlim_addr_hi */ 713 (uint_t)-1, /* uint_t dlim_cntr_max */ 714 (uint_t)1, /* uint_t dlim_burstsizes */ 715 (uint_t)1, /* uint_t dlim_minxfer */ 716 0 /* uint_t dlim_dmaspeed */ 717 }; 718 #elif defined(__x86) 719 static ddi_dma_lim_t standard_limits = { 720 (uint_t)0, /* addr_t dlim_addr_lo */ 721 (uint_t)0xffffff, /* addr_t dlim_addr_hi */ 722 (uint_t)0, /* uint_t dlim_cntr_max */ 723 (uint_t)0x00000001, /* uint_t dlim_burstsizes */ 724 (uint_t)DMA_UNIT_8, /* uint_t dlim_minxfer */ 725 (uint_t)0, /* uint_t dlim_dmaspeed */ 726 (uint_t)0x86<<24+0, /* uint_t dlim_version */ 727 (uint_t)0xffff, /* uint_t dlim_adreg_max */ 728 (uint_t)0xffff, /* uint_t dlim_ctreg_max */ 729 (uint_t)512, /* uint_t dlim_granular */ 730 (int)1, /* int dlim_sgllen */ 731 (uint_t)0xffffffff /* uint_t dlim_reqsizes */ 732 }; 733 734 #endif 735 736 int 737 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp, 738 ddi_dma_handle_t *handlep) 739 { 740 int (*funcp)() = ddi_dma_map; 741 struct bus_ops *bop; 742 #if defined(__sparc) 743 auto ddi_dma_lim_t dma_lim; 744 745 if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) { 746 dma_lim = standard_limits; 747 } else { 748 dma_lim = *dmareqp->dmar_limits; 749 } 750 dmareqp->dmar_limits = &dma_lim; 751 #endif 752 #if defined(__x86) 753 if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) 754 return (DDI_FAILURE); 755 #endif 756 757 /* 758 * Handle the case that the requester is both a leaf 759 * and a nexus driver simultaneously by calling the 760 * requester's bus_dma_map function directly instead 761 * of ddi_dma_map. 762 */ 763 bop = DEVI(dip)->devi_ops->devo_bus_ops; 764 if (bop && bop->bus_dma_map) 765 funcp = bop->bus_dma_map; 766 return ((*funcp)(dip, dip, dmareqp, handlep)); 767 } 768 769 int 770 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len, 771 uint_t flags, int (*waitfp)(), caddr_t arg, 772 ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep) 773 { 774 int (*funcp)() = ddi_dma_map; 775 ddi_dma_lim_t dma_lim; 776 struct ddi_dma_req dmareq; 777 struct bus_ops *bop; 778 779 if (len == 0) { 780 return (DDI_DMA_NOMAPPING); 781 } 782 if (limits == (ddi_dma_lim_t *)0) { 783 dma_lim = standard_limits; 784 } else { 785 dma_lim = *limits; 786 } 787 dmareq.dmar_limits = &dma_lim; 788 dmareq.dmar_flags = flags; 789 dmareq.dmar_fp = waitfp; 790 dmareq.dmar_arg = arg; 791 dmareq.dmar_object.dmao_size = len; 792 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR; 793 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as; 794 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr; 795 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 796 797 /* 798 * Handle the case that the requester is both a leaf 799 * and a nexus driver simultaneously by calling the 800 * requester's bus_dma_map function directly instead 801 * of ddi_dma_map. 802 */ 803 bop = DEVI(dip)->devi_ops->devo_bus_ops; 804 if (bop && bop->bus_dma_map) 805 funcp = bop->bus_dma_map; 806 807 return ((*funcp)(dip, dip, &dmareq, handlep)); 808 } 809 810 int 811 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags, 812 int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits, 813 ddi_dma_handle_t *handlep) 814 { 815 int (*funcp)() = ddi_dma_map; 816 ddi_dma_lim_t dma_lim; 817 struct ddi_dma_req dmareq; 818 struct bus_ops *bop; 819 820 if (limits == (ddi_dma_lim_t *)0) { 821 dma_lim = standard_limits; 822 } else { 823 dma_lim = *limits; 824 } 825 dmareq.dmar_limits = &dma_lim; 826 dmareq.dmar_flags = flags; 827 dmareq.dmar_fp = waitfp; 828 dmareq.dmar_arg = arg; 829 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount; 830 831 if (bp->b_flags & B_PAGEIO) { 832 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES; 833 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages; 834 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset = 835 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET); 836 } else { 837 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR; 838 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr; 839 if (bp->b_flags & B_SHADOW) { 840 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = 841 bp->b_shadow; 842 } else { 843 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 844 } 845 846 /* 847 * If the buffer has no proc pointer, or the proc 848 * struct has the kernel address space, or the buffer has 849 * been marked B_REMAPPED (meaning that it is now 850 * mapped into the kernel's address space), then 851 * the address space is kas (kernel address space). 852 */ 853 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) || 854 (bp->b_flags & B_REMAPPED)) { 855 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0; 856 } else { 857 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 858 bp->b_proc->p_as; 859 } 860 } 861 862 /* 863 * Handle the case that the requester is both a leaf 864 * and a nexus driver simultaneously by calling the 865 * requester's bus_dma_map function directly instead 866 * of ddi_dma_map. 867 */ 868 bop = DEVI(dip)->devi_ops->devo_bus_ops; 869 if (bop && bop->bus_dma_map) 870 funcp = bop->bus_dma_map; 871 872 return ((*funcp)(dip, dip, &dmareq, handlep)); 873 } 874 875 #if !defined(__sparc) 876 /* 877 * Request bus_dma_ctl parent to fiddle with a dma request. 878 * 879 * (The sparc version is in sparc_subr.s) 880 */ 881 int 882 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 883 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 884 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 885 { 886 int (*fp)(); 887 888 if (dip != ddi_root_node()) 889 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl; 890 fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl; 891 return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags)); 892 } 893 #endif 894 895 /* 896 * For all DMA control functions, call the DMA control 897 * routine and return status. 898 * 899 * Just plain assume that the parent is to be called. 900 * If a nexus driver or a thread outside the framework 901 * of a nexus driver or a leaf driver calls these functions, 902 * it is up to them to deal with the fact that the parent's 903 * bus_dma_ctl function will be the first one called. 904 */ 905 906 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip 907 908 int 909 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp) 910 { 911 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0)); 912 } 913 914 int 915 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c) 916 { 917 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0)); 918 } 919 920 int 921 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o) 922 { 923 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF, 924 (off_t *)c, 0, (caddr_t *)o, 0)); 925 } 926 927 int 928 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c) 929 { 930 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o, 931 l, (caddr_t *)c, 0)); 932 } 933 934 int 935 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l) 936 { 937 if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0) 938 return (DDI_FAILURE); 939 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0)); 940 } 941 942 int 943 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win, 944 ddi_dma_win_t *nwin) 945 { 946 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0, 947 (caddr_t *)nwin, 0)); 948 } 949 950 int 951 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg) 952 { 953 ddi_dma_handle_t h = (ddi_dma_handle_t)win; 954 955 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win, 956 (size_t *)&seg, (caddr_t *)nseg, 0)); 957 } 958 959 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc) 960 /* 961 * This routine is Obsolete and should be removed from ALL architectures 962 * in a future release of Solaris. 963 * 964 * It is deliberately NOT ported to amd64; please fix the code that 965 * depends on this routine to use ddi_dma_nextcookie(9F). 966 * 967 * NOTE: even though we fixed the pointer through a 32-bit param issue (the fix 968 * is a side effect to some other cleanup), we're still not going to support 969 * this interface on x64. 970 */ 971 int 972 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l, 973 ddi_dma_cookie_t *cookiep) 974 { 975 ddi_dma_handle_t h = (ddi_dma_handle_t)seg; 976 977 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l, 978 (caddr_t *)cookiep, 0)); 979 } 980 #endif /* (__i386 && !__amd64) || __sparc */ 981 982 #if !defined(__sparc) 983 984 /* 985 * The SPARC versions of these routines are done in assembler to 986 * save register windows, so they're in sparc_subr.s. 987 */ 988 989 int 990 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip, 991 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep) 992 { 993 int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *, 994 ddi_dma_handle_t *); 995 996 if (dip != ddi_root_node()) 997 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map; 998 999 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_map; 1000 return ((*funcp)(dip, rdip, dmareqp, handlep)); 1001 } 1002 1003 int 1004 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 1005 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 1006 { 1007 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *, 1008 int (*)(caddr_t), caddr_t, ddi_dma_handle_t *); 1009 1010 if (dip != ddi_root_node()) 1011 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl; 1012 1013 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl; 1014 return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep)); 1015 } 1016 1017 int 1018 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep) 1019 { 1020 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 1021 1022 if (dip != ddi_root_node()) 1023 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl; 1024 1025 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl; 1026 return ((*funcp)(dip, rdip, handlep)); 1027 } 1028 1029 int 1030 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 1031 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 1032 ddi_dma_cookie_t *cp, uint_t *ccountp) 1033 { 1034 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 1035 struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *); 1036 1037 if (dip != ddi_root_node()) 1038 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 1039 1040 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl; 1041 return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp)); 1042 } 1043 1044 int 1045 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 1046 ddi_dma_handle_t handle) 1047 { 1048 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 1049 1050 if (dip != ddi_root_node()) 1051 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl; 1052 1053 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl; 1054 return ((*funcp)(dip, rdip, handle)); 1055 } 1056 1057 1058 int 1059 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip, 1060 ddi_dma_handle_t handle, off_t off, size_t len, 1061 uint_t cache_flags) 1062 { 1063 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 1064 off_t, size_t, uint_t); 1065 1066 if (dip != ddi_root_node()) 1067 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush; 1068 1069 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush; 1070 return ((*funcp)(dip, rdip, handle, off, len, cache_flags)); 1071 } 1072 1073 int 1074 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip, 1075 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1076 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1077 { 1078 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 1079 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *); 1080 1081 if (dip != ddi_root_node()) 1082 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win; 1083 1084 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win; 1085 return ((*funcp)(dip, rdip, handle, win, offp, lenp, 1086 cookiep, ccountp)); 1087 } 1088 1089 int 1090 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom) 1091 { 1092 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h; 1093 dev_info_t *dip, *rdip; 1094 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t, 1095 size_t, uint_t); 1096 1097 /* 1098 * the DMA nexus driver will set DMP_NOSYNC if the 1099 * platform does not require any sync operation. For 1100 * example if the memory is uncached or consistent 1101 * and without any I/O write buffers involved. 1102 */ 1103 if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC) 1104 return (DDI_SUCCESS); 1105 1106 dip = rdip = hp->dmai_rdip; 1107 if (dip != ddi_root_node()) 1108 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush; 1109 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush; 1110 return ((*funcp)(dip, rdip, h, o, l, whom)); 1111 } 1112 1113 int 1114 ddi_dma_unbind_handle(ddi_dma_handle_t h) 1115 { 1116 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h; 1117 dev_info_t *dip, *rdip; 1118 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 1119 1120 dip = rdip = hp->dmai_rdip; 1121 if (dip != ddi_root_node()) 1122 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl; 1123 funcp = DEVI(rdip)->devi_bus_dma_unbindfunc; 1124 return ((*funcp)(dip, rdip, h)); 1125 } 1126 1127 #endif /* !__sparc */ 1128 1129 int 1130 ddi_dma_free(ddi_dma_handle_t h) 1131 { 1132 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0)); 1133 } 1134 1135 int 1136 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp) 1137 { 1138 ddi_dma_lim_t defalt; 1139 size_t size = len; 1140 1141 if (!limp) { 1142 defalt = standard_limits; 1143 limp = &defalt; 1144 } 1145 return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0, 1146 iopbp, NULL, NULL)); 1147 } 1148 1149 void 1150 ddi_iopb_free(caddr_t iopb) 1151 { 1152 i_ddi_mem_free(iopb, NULL); 1153 } 1154 1155 int 1156 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length, 1157 uint_t flags, caddr_t *kaddrp, uint_t *real_length) 1158 { 1159 ddi_dma_lim_t defalt; 1160 size_t size = length; 1161 1162 if (!limits) { 1163 defalt = standard_limits; 1164 limits = &defalt; 1165 } 1166 return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1, 1167 1, 0, kaddrp, real_length, NULL)); 1168 } 1169 1170 void 1171 ddi_mem_free(caddr_t kaddr) 1172 { 1173 i_ddi_mem_free(kaddr, NULL); 1174 } 1175 1176 /* 1177 * DMA attributes, alignment, burst sizes, and transfer minimums 1178 */ 1179 int 1180 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp) 1181 { 1182 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 1183 1184 if (attrp == NULL) 1185 return (DDI_FAILURE); 1186 *attrp = dimp->dmai_attr; 1187 return (DDI_SUCCESS); 1188 } 1189 1190 int 1191 ddi_dma_burstsizes(ddi_dma_handle_t handle) 1192 { 1193 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 1194 1195 if (!dimp) 1196 return (0); 1197 else 1198 return (dimp->dmai_burstsizes); 1199 } 1200 1201 int 1202 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect) 1203 { 1204 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 1205 1206 if (!dimp || !alignment || !mineffect) 1207 return (DDI_FAILURE); 1208 if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) { 1209 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes); 1210 } else { 1211 if (dimp->dmai_burstsizes & 0xff0000) { 1212 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16); 1213 } else { 1214 *alignment = 1 << ddi_ffs(dimp->dmai_burstsizes); 1215 } 1216 } 1217 *mineffect = dimp->dmai_minxfer; 1218 return (DDI_SUCCESS); 1219 } 1220 1221 int 1222 ddi_iomin(dev_info_t *a, int i, int stream) 1223 { 1224 int r; 1225 1226 /* 1227 * Make sure that the initial value is sane 1228 */ 1229 if (i & (i - 1)) 1230 return (0); 1231 if (i == 0) 1232 i = (stream) ? 4 : 1; 1233 1234 r = ddi_ctlops(a, a, 1235 DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i); 1236 if (r != DDI_SUCCESS || (i & (i - 1))) 1237 return (0); 1238 return (i); 1239 } 1240 1241 /* 1242 * Given two DMA attribute structures, apply the attributes 1243 * of one to the other, following the rules of attributes 1244 * and the wishes of the caller. 1245 * 1246 * The rules of DMA attribute structures are that you cannot 1247 * make things *less* restrictive as you apply one set 1248 * of attributes to another. 1249 * 1250 */ 1251 void 1252 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod) 1253 { 1254 attr->dma_attr_addr_lo = 1255 MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo); 1256 attr->dma_attr_addr_hi = 1257 MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi); 1258 attr->dma_attr_count_max = 1259 MIN(attr->dma_attr_count_max, mod->dma_attr_count_max); 1260 attr->dma_attr_align = 1261 MAX(attr->dma_attr_align, mod->dma_attr_align); 1262 attr->dma_attr_burstsizes = 1263 (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes); 1264 attr->dma_attr_minxfer = 1265 maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer); 1266 attr->dma_attr_maxxfer = 1267 MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer); 1268 attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg); 1269 attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen, 1270 (uint_t)mod->dma_attr_sgllen); 1271 attr->dma_attr_granular = 1272 MAX(attr->dma_attr_granular, mod->dma_attr_granular); 1273 } 1274 1275 /* 1276 * mmap/segmap interface: 1277 */ 1278 1279 /* 1280 * ddi_segmap: setup the default segment driver. Calls the drivers 1281 * XXmmap routine to validate the range to be mapped. 1282 * Return ENXIO of the range is not valid. Create 1283 * a seg_dev segment that contains all of the 1284 * necessary information and will reference the 1285 * default segment driver routines. It returns zero 1286 * on success or non-zero on failure. 1287 */ 1288 int 1289 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len, 1290 uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp) 1291 { 1292 extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *, 1293 off_t, uint_t, uint_t, uint_t, struct cred *); 1294 1295 return (spec_segmap(dev, offset, asp, addrp, len, 1296 prot, maxprot, flags, credp)); 1297 } 1298 1299 /* 1300 * ddi_map_fault: Resolve mappings at fault time. Used by segment 1301 * drivers. Allows each successive parent to resolve 1302 * address translations and add its mappings to the 1303 * mapping list supplied in the page structure. It 1304 * returns zero on success or non-zero on failure. 1305 */ 1306 1307 int 1308 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg, 1309 caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock) 1310 { 1311 return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock)); 1312 } 1313 1314 /* 1315 * ddi_device_mapping_check: Called from ddi_segmap_setup. 1316 * Invokes platform specific DDI to determine whether attributes specified 1317 * in attr(9s) are valid for the region of memory that will be made 1318 * available for direct access to user process via the mmap(2) system call. 1319 */ 1320 int 1321 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp, 1322 uint_t rnumber, uint_t *hat_flags) 1323 { 1324 ddi_acc_handle_t handle; 1325 ddi_map_req_t mr; 1326 ddi_acc_hdl_t *hp; 1327 int result; 1328 dev_info_t *dip; 1329 1330 /* 1331 * we use e_ddi_hold_devi_by_dev to search for the devi. We 1332 * release it immediately since it should already be held by 1333 * a devfs vnode. 1334 */ 1335 if ((dip = 1336 e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL) 1337 return (-1); 1338 ddi_release_devi(dip); /* for e_ddi_hold_devi_by_dev() */ 1339 1340 /* 1341 * Allocate and initialize the common elements of data 1342 * access handle. 1343 */ 1344 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1345 if (handle == NULL) 1346 return (-1); 1347 1348 hp = impl_acc_hdl_get(handle); 1349 hp->ah_vers = VERS_ACCHDL; 1350 hp->ah_dip = dip; 1351 hp->ah_rnumber = rnumber; 1352 hp->ah_offset = 0; 1353 hp->ah_len = 0; 1354 hp->ah_acc = *accattrp; 1355 1356 /* 1357 * Set up the mapping request and call to parent. 1358 */ 1359 mr.map_op = DDI_MO_MAP_HANDLE; 1360 mr.map_type = DDI_MT_RNUMBER; 1361 mr.map_obj.rnumber = rnumber; 1362 mr.map_prot = PROT_READ | PROT_WRITE; 1363 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1364 mr.map_handlep = hp; 1365 mr.map_vers = DDI_MAP_VERSION; 1366 result = ddi_map(dip, &mr, 0, 0, NULL); 1367 1368 /* 1369 * Region must be mappable, pick up flags from the framework. 1370 */ 1371 *hat_flags = hp->ah_hat_flags; 1372 1373 impl_acc_hdl_free(handle); 1374 1375 /* 1376 * check for end result. 1377 */ 1378 if (result != DDI_SUCCESS) 1379 return (-1); 1380 return (0); 1381 } 1382 1383 1384 /* 1385 * Property functions: See also, ddipropdefs.h. 1386 * 1387 * These functions are the framework for the property functions, 1388 * i.e. they support software defined properties. All implementation 1389 * specific property handling (i.e.: self-identifying devices and 1390 * PROM defined properties are handled in the implementation specific 1391 * functions (defined in ddi_implfuncs.h). 1392 */ 1393 1394 /* 1395 * nopropop: Shouldn't be called, right? 1396 */ 1397 int 1398 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1399 char *name, caddr_t valuep, int *lengthp) 1400 { 1401 _NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp)) 1402 return (DDI_PROP_NOT_FOUND); 1403 } 1404 1405 #ifdef DDI_PROP_DEBUG 1406 int ddi_prop_debug_flag = 0; 1407 1408 int 1409 ddi_prop_debug(int enable) 1410 { 1411 int prev = ddi_prop_debug_flag; 1412 1413 if ((enable != 0) || (prev != 0)) 1414 printf("ddi_prop_debug: debugging %s\n", 1415 enable ? "enabled" : "disabled"); 1416 ddi_prop_debug_flag = enable; 1417 return (prev); 1418 } 1419 1420 #endif /* DDI_PROP_DEBUG */ 1421 1422 /* 1423 * Search a property list for a match, if found return pointer 1424 * to matching prop struct, else return NULL. 1425 */ 1426 1427 ddi_prop_t * 1428 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head) 1429 { 1430 ddi_prop_t *propp; 1431 1432 /* 1433 * find the property in child's devinfo: 1434 * Search order defined by this search function is first matching 1435 * property with input dev == DDI_DEV_T_ANY matching any dev or 1436 * dev == propp->prop_dev, name == propp->name, and the correct 1437 * data type as specified in the flags. If a DDI_DEV_T_NONE dev 1438 * value made it this far then it implies a DDI_DEV_T_ANY search. 1439 */ 1440 if (dev == DDI_DEV_T_NONE) 1441 dev = DDI_DEV_T_ANY; 1442 1443 for (propp = *list_head; propp != NULL; propp = propp->prop_next) { 1444 1445 if (!DDI_STRSAME(propp->prop_name, name)) 1446 continue; 1447 1448 if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev)) 1449 continue; 1450 1451 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0) 1452 continue; 1453 1454 return (propp); 1455 } 1456 1457 return ((ddi_prop_t *)0); 1458 } 1459 1460 /* 1461 * Search for property within devnames structures 1462 */ 1463 ddi_prop_t * 1464 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags) 1465 { 1466 major_t major; 1467 struct devnames *dnp; 1468 ddi_prop_t *propp; 1469 1470 /* 1471 * Valid dev_t value is needed to index into the 1472 * correct devnames entry, therefore a dev_t 1473 * value of DDI_DEV_T_ANY is not appropriate. 1474 */ 1475 ASSERT(dev != DDI_DEV_T_ANY); 1476 if (dev == DDI_DEV_T_ANY) { 1477 return ((ddi_prop_t *)0); 1478 } 1479 1480 major = getmajor(dev); 1481 dnp = &(devnamesp[major]); 1482 1483 if (dnp->dn_global_prop_ptr == NULL) 1484 return ((ddi_prop_t *)0); 1485 1486 LOCK_DEV_OPS(&dnp->dn_lock); 1487 1488 for (propp = dnp->dn_global_prop_ptr->prop_list; 1489 propp != NULL; 1490 propp = (ddi_prop_t *)propp->prop_next) { 1491 1492 if (!DDI_STRSAME(propp->prop_name, name)) 1493 continue; 1494 1495 if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) && 1496 (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev)) 1497 continue; 1498 1499 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0) 1500 continue; 1501 1502 /* Property found, return it */ 1503 UNLOCK_DEV_OPS(&dnp->dn_lock); 1504 return (propp); 1505 } 1506 1507 UNLOCK_DEV_OPS(&dnp->dn_lock); 1508 return ((ddi_prop_t *)0); 1509 } 1510 1511 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>"; 1512 1513 /* 1514 * ddi_prop_search_global: 1515 * Search the global property list within devnames 1516 * for the named property. Return the encoded value. 1517 */ 1518 static int 1519 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name, 1520 void *valuep, uint_t *lengthp) 1521 { 1522 ddi_prop_t *propp; 1523 caddr_t buffer; 1524 1525 propp = i_ddi_search_global_prop(dev, name, flags); 1526 1527 /* Property NOT found, bail */ 1528 if (propp == (ddi_prop_t *)0) 1529 return (DDI_PROP_NOT_FOUND); 1530 1531 if (propp->prop_flags & DDI_PROP_UNDEF_IT) 1532 return (DDI_PROP_UNDEFINED); 1533 1534 if ((buffer = kmem_alloc(propp->prop_len, 1535 (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) { 1536 cmn_err(CE_CONT, prop_no_mem_msg, name); 1537 return (DDI_PROP_NO_MEMORY); 1538 } 1539 1540 /* 1541 * Return the encoded data 1542 */ 1543 *(caddr_t *)valuep = buffer; 1544 *lengthp = propp->prop_len; 1545 bcopy(propp->prop_val, buffer, propp->prop_len); 1546 1547 return (DDI_PROP_SUCCESS); 1548 } 1549 1550 /* 1551 * ddi_prop_search_common: Lookup and return the encoded value 1552 */ 1553 int 1554 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1555 uint_t flags, char *name, void *valuep, uint_t *lengthp) 1556 { 1557 ddi_prop_t *propp; 1558 int i; 1559 caddr_t buffer; 1560 caddr_t prealloc = NULL; 1561 int plength = 0; 1562 dev_info_t *pdip; 1563 int (*bop)(); 1564 1565 /*CONSTANTCONDITION*/ 1566 while (1) { 1567 1568 mutex_enter(&(DEVI(dip)->devi_lock)); 1569 1570 1571 /* 1572 * find the property in child's devinfo: 1573 * Search order is: 1574 * 1. driver defined properties 1575 * 2. system defined properties 1576 * 3. driver global properties 1577 * 4. boot defined properties 1578 */ 1579 1580 propp = i_ddi_prop_search(dev, name, flags, 1581 &(DEVI(dip)->devi_drv_prop_ptr)); 1582 if (propp == NULL) { 1583 propp = i_ddi_prop_search(dev, name, flags, 1584 &(DEVI(dip)->devi_sys_prop_ptr)); 1585 } 1586 if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) { 1587 propp = i_ddi_prop_search(dev, name, flags, 1588 &DEVI(dip)->devi_global_prop_list->prop_list); 1589 } 1590 1591 if (propp == NULL) { 1592 propp = i_ddi_prop_search(dev, name, flags, 1593 &(DEVI(dip)->devi_hw_prop_ptr)); 1594 } 1595 1596 /* 1597 * Software property found? 1598 */ 1599 if (propp != (ddi_prop_t *)0) { 1600 1601 /* 1602 * If explicit undefine, return now. 1603 */ 1604 if (propp->prop_flags & DDI_PROP_UNDEF_IT) { 1605 mutex_exit(&(DEVI(dip)->devi_lock)); 1606 if (prealloc) 1607 kmem_free(prealloc, plength); 1608 return (DDI_PROP_UNDEFINED); 1609 } 1610 1611 /* 1612 * If we only want to know if it exists, return now 1613 */ 1614 if (prop_op == PROP_EXISTS) { 1615 mutex_exit(&(DEVI(dip)->devi_lock)); 1616 ASSERT(prealloc == NULL); 1617 return (DDI_PROP_SUCCESS); 1618 } 1619 1620 /* 1621 * If length only request or prop length == 0, 1622 * service request and return now. 1623 */ 1624 if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) { 1625 *lengthp = propp->prop_len; 1626 1627 /* 1628 * if prop_op is PROP_LEN_AND_VAL_ALLOC 1629 * that means prop_len is 0, so set valuep 1630 * also to NULL 1631 */ 1632 if (prop_op == PROP_LEN_AND_VAL_ALLOC) 1633 *(caddr_t *)valuep = NULL; 1634 1635 mutex_exit(&(DEVI(dip)->devi_lock)); 1636 if (prealloc) 1637 kmem_free(prealloc, plength); 1638 return (DDI_PROP_SUCCESS); 1639 } 1640 1641 /* 1642 * If LEN_AND_VAL_ALLOC and the request can sleep, 1643 * drop the mutex, allocate the buffer, and go 1644 * through the loop again. If we already allocated 1645 * the buffer, and the size of the property changed, 1646 * keep trying... 1647 */ 1648 if ((prop_op == PROP_LEN_AND_VAL_ALLOC) && 1649 (flags & DDI_PROP_CANSLEEP)) { 1650 if (prealloc && (propp->prop_len != plength)) { 1651 kmem_free(prealloc, plength); 1652 prealloc = NULL; 1653 } 1654 if (prealloc == NULL) { 1655 plength = propp->prop_len; 1656 mutex_exit(&(DEVI(dip)->devi_lock)); 1657 prealloc = kmem_alloc(plength, 1658 KM_SLEEP); 1659 continue; 1660 } 1661 } 1662 1663 /* 1664 * Allocate buffer, if required. Either way, 1665 * set `buffer' variable. 1666 */ 1667 i = *lengthp; /* Get callers length */ 1668 *lengthp = propp->prop_len; /* Set callers length */ 1669 1670 switch (prop_op) { 1671 1672 case PROP_LEN_AND_VAL_ALLOC: 1673 1674 if (prealloc == NULL) { 1675 buffer = kmem_alloc(propp->prop_len, 1676 KM_NOSLEEP); 1677 } else { 1678 buffer = prealloc; 1679 } 1680 1681 if (buffer == NULL) { 1682 mutex_exit(&(DEVI(dip)->devi_lock)); 1683 cmn_err(CE_CONT, prop_no_mem_msg, name); 1684 return (DDI_PROP_NO_MEMORY); 1685 } 1686 /* Set callers buf ptr */ 1687 *(caddr_t *)valuep = buffer; 1688 break; 1689 1690 case PROP_LEN_AND_VAL_BUF: 1691 1692 if (propp->prop_len > (i)) { 1693 mutex_exit(&(DEVI(dip)->devi_lock)); 1694 return (DDI_PROP_BUF_TOO_SMALL); 1695 } 1696 1697 buffer = valuep; /* Get callers buf ptr */ 1698 break; 1699 1700 default: 1701 break; 1702 } 1703 1704 /* 1705 * Do the copy. 1706 */ 1707 bcopy(propp->prop_val, buffer, propp->prop_len); 1708 mutex_exit(&(DEVI(dip)->devi_lock)); 1709 return (DDI_PROP_SUCCESS); 1710 } 1711 1712 mutex_exit(&(DEVI(dip)->devi_lock)); 1713 if (prealloc) 1714 kmem_free(prealloc, plength); 1715 prealloc = NULL; 1716 1717 /* 1718 * Prop not found, call parent bus_ops to deal with possible 1719 * h/w layer (possible PROM defined props, etc.) and to 1720 * possibly ascend the hierarchy, if allowed by flags. 1721 */ 1722 pdip = (dev_info_t *)DEVI(dip)->devi_parent; 1723 1724 /* 1725 * One last call for the root driver PROM props? 1726 */ 1727 if (dip == ddi_root_node()) { 1728 return (ddi_bus_prop_op(dev, dip, dip, prop_op, 1729 flags, name, valuep, (int *)lengthp)); 1730 } 1731 1732 /* 1733 * We may have been called to check for properties 1734 * within a single devinfo node that has no parent - 1735 * see make_prop() 1736 */ 1737 if (pdip == NULL) { 1738 ASSERT((flags & 1739 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) == 1740 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)); 1741 return (DDI_PROP_NOT_FOUND); 1742 } 1743 1744 /* 1745 * Instead of recursing, we do iterative calls up the tree. 1746 * As a bit of optimization, skip the bus_op level if the 1747 * node is a s/w node and if the parent's bus_prop_op function 1748 * is `ddi_bus_prop_op', because we know that in this case, 1749 * this function does nothing. 1750 * 1751 * 4225415: If the parent isn't attached, or the child 1752 * hasn't been named by the parent yet, use the default 1753 * ddi_bus_prop_op as a proxy for the parent. This 1754 * allows property lookups in any child/parent state to 1755 * include 'prom' and inherited properties, even when 1756 * there are no drivers attached to the child or parent. 1757 */ 1758 1759 bop = ddi_bus_prop_op; 1760 if (i_ddi_devi_attached(pdip) && 1761 (i_ddi_node_state(dip) >= DS_INITIALIZED)) 1762 bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op; 1763 1764 i = DDI_PROP_NOT_FOUND; 1765 1766 if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) { 1767 i = (*bop)(dev, pdip, dip, prop_op, 1768 flags | DDI_PROP_DONTPASS, 1769 name, valuep, lengthp); 1770 } 1771 1772 if ((flags & DDI_PROP_DONTPASS) || 1773 (i != DDI_PROP_NOT_FOUND)) 1774 return (i); 1775 1776 dip = pdip; 1777 } 1778 /*NOTREACHED*/ 1779 } 1780 1781 1782 /* 1783 * ddi_prop_op: The basic property operator for drivers. 1784 * 1785 * In ddi_prop_op, the type of valuep is interpreted based on prop_op: 1786 * 1787 * prop_op valuep 1788 * ------ ------ 1789 * 1790 * PROP_LEN <unused> 1791 * 1792 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer 1793 * 1794 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to 1795 * address of allocated buffer, if successful) 1796 */ 1797 int 1798 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1799 char *name, caddr_t valuep, int *lengthp) 1800 { 1801 int i; 1802 1803 ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0); 1804 1805 /* 1806 * If this was originally an LDI prop lookup then we bail here. 1807 * The reason is that the LDI property lookup interfaces first call 1808 * a drivers prop_op() entry point to allow it to override 1809 * properties. But if we've made it here, then the driver hasn't 1810 * overriden any properties. We don't want to continue with the 1811 * property search here because we don't have any type inforamtion. 1812 * When we return failure, the LDI interfaces will then proceed to 1813 * call the typed property interfaces to look up the property. 1814 */ 1815 if (mod_flags & DDI_PROP_DYNAMIC) 1816 return (DDI_PROP_NOT_FOUND); 1817 1818 /* 1819 * check for pre-typed property consumer asking for typed property: 1820 * see e_ddi_getprop_int64. 1821 */ 1822 if (mod_flags & DDI_PROP_CONSUMER_TYPED) 1823 mod_flags |= DDI_PROP_TYPE_INT64; 1824 mod_flags |= DDI_PROP_TYPE_ANY; 1825 1826 i = ddi_prop_search_common(dev, dip, prop_op, 1827 mod_flags, name, valuep, (uint_t *)lengthp); 1828 if (i == DDI_PROP_FOUND_1275) 1829 return (DDI_PROP_SUCCESS); 1830 return (i); 1831 } 1832 1833 /* 1834 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that 1835 * maintain size in number of blksize blocks. Provides a dynamic property 1836 * implementation for size oriented properties based on nblocks64 and blksize 1837 * values passed in by the driver. Fallback to ddi_prop_op if the nblocks64 1838 * is too large. This interface should not be used with a nblocks64 that 1839 * represents the driver's idea of how to represent unknown, if nblocks is 1840 * unknown use ddi_prop_op. 1841 */ 1842 int 1843 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1844 int mod_flags, char *name, caddr_t valuep, int *lengthp, 1845 uint64_t nblocks64, uint_t blksize) 1846 { 1847 uint64_t size64; 1848 int blkshift; 1849 1850 /* convert block size to shift value */ 1851 ASSERT(BIT_ONLYONESET(blksize)); 1852 blkshift = highbit(blksize) - 1; 1853 1854 /* 1855 * There is no point in supporting nblocks64 values that don't have 1856 * an accurate uint64_t byte count representation. 1857 */ 1858 if (nblocks64 >= (UINT64_MAX >> blkshift)) 1859 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1860 name, valuep, lengthp)); 1861 1862 size64 = nblocks64 << blkshift; 1863 return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags, 1864 name, valuep, lengthp, size64, blksize)); 1865 } 1866 1867 /* 1868 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize. 1869 */ 1870 int 1871 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1872 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64) 1873 { 1874 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, 1875 mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE)); 1876 } 1877 1878 /* 1879 * ddi_prop_op_size_blksize: The basic property operator for block drivers that 1880 * maintain size in bytes. Provides a of dynamic property implementation for 1881 * size oriented properties based on size64 value and blksize passed in by the 1882 * driver. Fallback to ddi_prop_op if the size64 is too large. This interface 1883 * should not be used with a size64 that represents the driver's idea of how 1884 * to represent unknown, if size is unknown use ddi_prop_op. 1885 * 1886 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned 1887 * integers. While the most likely interface to request them ([bc]devi_size) 1888 * is declared int (signed) there is no enforcement of this, which means we 1889 * can't enforce limitations here without risking regression. 1890 */ 1891 int 1892 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1893 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64, 1894 uint_t blksize) 1895 { 1896 uint64_t nblocks64; 1897 int callers_length; 1898 caddr_t buffer; 1899 int blkshift; 1900 1901 /* 1902 * This is a kludge to support capture of size(9P) pure dynamic 1903 * properties in snapshots for non-cmlb code (without exposing 1904 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code 1905 * should be removed. 1906 */ 1907 if (i_ddi_prop_dyn_driver_get(dip) == NULL) { 1908 static i_ddi_prop_dyn_t prop_dyn_size[] = { 1909 {"Size", DDI_PROP_TYPE_INT64, S_IFCHR}, 1910 {"Nblocks", DDI_PROP_TYPE_INT64, S_IFBLK}, 1911 {NULL} 1912 }; 1913 i_ddi_prop_dyn_driver_set(dip, prop_dyn_size); 1914 } 1915 1916 /* convert block size to shift value */ 1917 ASSERT(BIT_ONLYONESET(blksize)); 1918 blkshift = highbit(blksize) - 1; 1919 1920 /* compute DEV_BSIZE nblocks value */ 1921 nblocks64 = size64 >> blkshift; 1922 1923 /* get callers length, establish length of our dynamic properties */ 1924 callers_length = *lengthp; 1925 1926 if (strcmp(name, "Nblocks") == 0) 1927 *lengthp = sizeof (uint64_t); 1928 else if (strcmp(name, "Size") == 0) 1929 *lengthp = sizeof (uint64_t); 1930 else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX)) 1931 *lengthp = sizeof (uint32_t); 1932 else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX)) 1933 *lengthp = sizeof (uint32_t); 1934 else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX)) 1935 *lengthp = sizeof (uint32_t); 1936 else { 1937 /* fallback to ddi_prop_op */ 1938 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1939 name, valuep, lengthp)); 1940 } 1941 1942 /* service request for the length of the property */ 1943 if (prop_op == PROP_LEN) 1944 return (DDI_PROP_SUCCESS); 1945 1946 switch (prop_op) { 1947 case PROP_LEN_AND_VAL_ALLOC: 1948 if ((buffer = kmem_alloc(*lengthp, 1949 (mod_flags & DDI_PROP_CANSLEEP) ? 1950 KM_SLEEP : KM_NOSLEEP)) == NULL) 1951 return (DDI_PROP_NO_MEMORY); 1952 1953 *(caddr_t *)valuep = buffer; /* set callers buf ptr */ 1954 break; 1955 1956 case PROP_LEN_AND_VAL_BUF: 1957 /* the length of the property and the request must match */ 1958 if (callers_length != *lengthp) 1959 return (DDI_PROP_INVAL_ARG); 1960 1961 buffer = valuep; /* get callers buf ptr */ 1962 break; 1963 1964 default: 1965 return (DDI_PROP_INVAL_ARG); 1966 } 1967 1968 /* transfer the value into the buffer */ 1969 if (strcmp(name, "Nblocks") == 0) 1970 *((uint64_t *)buffer) = nblocks64; 1971 else if (strcmp(name, "Size") == 0) 1972 *((uint64_t *)buffer) = size64; 1973 else if (strcmp(name, "nblocks") == 0) 1974 *((uint32_t *)buffer) = (uint32_t)nblocks64; 1975 else if (strcmp(name, "size") == 0) 1976 *((uint32_t *)buffer) = (uint32_t)size64; 1977 else if (strcmp(name, "blksize") == 0) 1978 *((uint32_t *)buffer) = (uint32_t)blksize; 1979 return (DDI_PROP_SUCCESS); 1980 } 1981 1982 /* 1983 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size. 1984 */ 1985 int 1986 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1987 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64) 1988 { 1989 return (ddi_prop_op_size_blksize(dev, dip, prop_op, 1990 mod_flags, name, valuep, lengthp, size64, DEV_BSIZE)); 1991 } 1992 1993 /* 1994 * Variable length props... 1995 */ 1996 1997 /* 1998 * ddi_getlongprop: Get variable length property len+val into a buffer 1999 * allocated by property provider via kmem_alloc. Requester 2000 * is responsible for freeing returned property via kmem_free. 2001 * 2002 * Arguments: 2003 * 2004 * dev_t: Input: dev_t of property. 2005 * dip: Input: dev_info_t pointer of child. 2006 * flags: Input: Possible flag modifiers are: 2007 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found. 2008 * DDI_PROP_CANSLEEP: Memory allocation may sleep. 2009 * name: Input: name of property. 2010 * valuep: Output: Addr of callers buffer pointer. 2011 * lengthp:Output: *lengthp will contain prop length on exit. 2012 * 2013 * Possible Returns: 2014 * 2015 * DDI_PROP_SUCCESS: Prop found and returned. 2016 * DDI_PROP_NOT_FOUND: Prop not found 2017 * DDI_PROP_UNDEFINED: Prop explicitly undefined. 2018 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem. 2019 */ 2020 2021 int 2022 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags, 2023 char *name, caddr_t valuep, int *lengthp) 2024 { 2025 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC, 2026 flags, name, valuep, lengthp)); 2027 } 2028 2029 /* 2030 * 2031 * ddi_getlongprop_buf: Get long prop into pre-allocated callers 2032 * buffer. (no memory allocation by provider). 2033 * 2034 * dev_t: Input: dev_t of property. 2035 * dip: Input: dev_info_t pointer of child. 2036 * flags: Input: DDI_PROP_DONTPASS or NULL 2037 * name: Input: name of property 2038 * valuep: Input: ptr to callers buffer. 2039 * lengthp:I/O: ptr to length of callers buffer on entry, 2040 * actual length of property on exit. 2041 * 2042 * Possible returns: 2043 * 2044 * DDI_PROP_SUCCESS Prop found and returned 2045 * DDI_PROP_NOT_FOUND Prop not found 2046 * DDI_PROP_UNDEFINED Prop explicitly undefined. 2047 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small, 2048 * no value returned, but actual prop 2049 * length returned in *lengthp 2050 * 2051 */ 2052 2053 int 2054 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags, 2055 char *name, caddr_t valuep, int *lengthp) 2056 { 2057 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 2058 flags, name, valuep, lengthp)); 2059 } 2060 2061 /* 2062 * Integer/boolean sized props. 2063 * 2064 * Call is value only... returns found boolean or int sized prop value or 2065 * defvalue if prop not found or is wrong length or is explicitly undefined. 2066 * Only flag is DDI_PROP_DONTPASS... 2067 * 2068 * By convention, this interface returns boolean (0) sized properties 2069 * as value (int)1. 2070 * 2071 * This never returns an error, if property not found or specifically 2072 * undefined, the input `defvalue' is returned. 2073 */ 2074 2075 int 2076 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue) 2077 { 2078 int propvalue = defvalue; 2079 int proplength = sizeof (int); 2080 int error; 2081 2082 error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 2083 flags, name, (caddr_t)&propvalue, &proplength); 2084 2085 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 2086 propvalue = 1; 2087 2088 return (propvalue); 2089 } 2090 2091 /* 2092 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS 2093 * if returns DDI_PROP_SUCCESS, length returned in *lengthp. 2094 */ 2095 2096 int 2097 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp) 2098 { 2099 return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp)); 2100 } 2101 2102 /* 2103 * Allocate a struct prop_driver_data, along with 'size' bytes 2104 * for decoded property data. This structure is freed by 2105 * calling ddi_prop_free(9F). 2106 */ 2107 static void * 2108 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *)) 2109 { 2110 struct prop_driver_data *pdd; 2111 2112 /* 2113 * Allocate a structure with enough memory to store the decoded data. 2114 */ 2115 pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP); 2116 pdd->pdd_size = (sizeof (struct prop_driver_data) + size); 2117 pdd->pdd_prop_free = prop_free; 2118 2119 /* 2120 * Return a pointer to the location to put the decoded data. 2121 */ 2122 return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data))); 2123 } 2124 2125 /* 2126 * Allocated the memory needed to store the encoded data in the property 2127 * handle. 2128 */ 2129 static int 2130 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size) 2131 { 2132 /* 2133 * If size is zero, then set data to NULL and size to 0. This 2134 * is a boolean property. 2135 */ 2136 if (size == 0) { 2137 ph->ph_size = 0; 2138 ph->ph_data = NULL; 2139 ph->ph_cur_pos = NULL; 2140 ph->ph_save_pos = NULL; 2141 } else { 2142 if (ph->ph_flags == DDI_PROP_DONTSLEEP) { 2143 ph->ph_data = kmem_zalloc(size, KM_NOSLEEP); 2144 if (ph->ph_data == NULL) 2145 return (DDI_PROP_NO_MEMORY); 2146 } else 2147 ph->ph_data = kmem_zalloc(size, KM_SLEEP); 2148 ph->ph_size = size; 2149 ph->ph_cur_pos = ph->ph_data; 2150 ph->ph_save_pos = ph->ph_data; 2151 } 2152 return (DDI_PROP_SUCCESS); 2153 } 2154 2155 /* 2156 * Free the space allocated by the lookup routines. Each lookup routine 2157 * returns a pointer to the decoded data to the driver. The driver then 2158 * passes this pointer back to us. This data actually lives in a struct 2159 * prop_driver_data. We use negative indexing to find the beginning of 2160 * the structure and then free the entire structure using the size and 2161 * the free routine stored in the structure. 2162 */ 2163 void 2164 ddi_prop_free(void *datap) 2165 { 2166 struct prop_driver_data *pdd; 2167 2168 /* 2169 * Get the structure 2170 */ 2171 pdd = (struct prop_driver_data *) 2172 ((caddr_t)datap - sizeof (struct prop_driver_data)); 2173 /* 2174 * Call the free routine to free it 2175 */ 2176 (*pdd->pdd_prop_free)(pdd); 2177 } 2178 2179 /* 2180 * Free the data associated with an array of ints, 2181 * allocated with ddi_prop_decode_alloc(). 2182 */ 2183 static void 2184 ddi_prop_free_ints(struct prop_driver_data *pdd) 2185 { 2186 kmem_free(pdd, pdd->pdd_size); 2187 } 2188 2189 /* 2190 * Free a single string property or a single string contained within 2191 * the argv style return value of an array of strings. 2192 */ 2193 static void 2194 ddi_prop_free_string(struct prop_driver_data *pdd) 2195 { 2196 kmem_free(pdd, pdd->pdd_size); 2197 2198 } 2199 2200 /* 2201 * Free an array of strings. 2202 */ 2203 static void 2204 ddi_prop_free_strings(struct prop_driver_data *pdd) 2205 { 2206 kmem_free(pdd, pdd->pdd_size); 2207 } 2208 2209 /* 2210 * Free the data associated with an array of bytes. 2211 */ 2212 static void 2213 ddi_prop_free_bytes(struct prop_driver_data *pdd) 2214 { 2215 kmem_free(pdd, pdd->pdd_size); 2216 } 2217 2218 /* 2219 * Reset the current location pointer in the property handle to the 2220 * beginning of the data. 2221 */ 2222 void 2223 ddi_prop_reset_pos(prop_handle_t *ph) 2224 { 2225 ph->ph_cur_pos = ph->ph_data; 2226 ph->ph_save_pos = ph->ph_data; 2227 } 2228 2229 /* 2230 * Restore the current location pointer in the property handle to the 2231 * saved position. 2232 */ 2233 void 2234 ddi_prop_save_pos(prop_handle_t *ph) 2235 { 2236 ph->ph_save_pos = ph->ph_cur_pos; 2237 } 2238 2239 /* 2240 * Save the location that the current location pointer is pointing to.. 2241 */ 2242 void 2243 ddi_prop_restore_pos(prop_handle_t *ph) 2244 { 2245 ph->ph_cur_pos = ph->ph_save_pos; 2246 } 2247 2248 /* 2249 * Property encode/decode functions 2250 */ 2251 2252 /* 2253 * Decode a single integer property 2254 */ 2255 static int 2256 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements) 2257 { 2258 int i; 2259 int tmp; 2260 2261 /* 2262 * If there is nothing to decode return an error 2263 */ 2264 if (ph->ph_size == 0) 2265 return (DDI_PROP_END_OF_DATA); 2266 2267 /* 2268 * Decode the property as a single integer and return it 2269 * in data if we were able to decode it. 2270 */ 2271 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp); 2272 if (i < DDI_PROP_RESULT_OK) { 2273 switch (i) { 2274 case DDI_PROP_RESULT_EOF: 2275 return (DDI_PROP_END_OF_DATA); 2276 2277 case DDI_PROP_RESULT_ERROR: 2278 return (DDI_PROP_CANNOT_DECODE); 2279 } 2280 } 2281 2282 *(int *)data = tmp; 2283 *nelements = 1; 2284 return (DDI_PROP_SUCCESS); 2285 } 2286 2287 /* 2288 * Decode a single 64 bit integer property 2289 */ 2290 static int 2291 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements) 2292 { 2293 int i; 2294 int64_t tmp; 2295 2296 /* 2297 * If there is nothing to decode return an error 2298 */ 2299 if (ph->ph_size == 0) 2300 return (DDI_PROP_END_OF_DATA); 2301 2302 /* 2303 * Decode the property as a single integer and return it 2304 * in data if we were able to decode it. 2305 */ 2306 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp); 2307 if (i < DDI_PROP_RESULT_OK) { 2308 switch (i) { 2309 case DDI_PROP_RESULT_EOF: 2310 return (DDI_PROP_END_OF_DATA); 2311 2312 case DDI_PROP_RESULT_ERROR: 2313 return (DDI_PROP_CANNOT_DECODE); 2314 } 2315 } 2316 2317 *(int64_t *)data = tmp; 2318 *nelements = 1; 2319 return (DDI_PROP_SUCCESS); 2320 } 2321 2322 /* 2323 * Decode an array of integers property 2324 */ 2325 static int 2326 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements) 2327 { 2328 int i; 2329 int cnt = 0; 2330 int *tmp; 2331 int *intp; 2332 int n; 2333 2334 /* 2335 * Figure out how many array elements there are by going through the 2336 * data without decoding it first and counting. 2337 */ 2338 for (;;) { 2339 i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL); 2340 if (i < 0) 2341 break; 2342 cnt++; 2343 } 2344 2345 /* 2346 * If there are no elements return an error 2347 */ 2348 if (cnt == 0) 2349 return (DDI_PROP_END_OF_DATA); 2350 2351 /* 2352 * If we cannot skip through the data, we cannot decode it 2353 */ 2354 if (i == DDI_PROP_RESULT_ERROR) 2355 return (DDI_PROP_CANNOT_DECODE); 2356 2357 /* 2358 * Reset the data pointer to the beginning of the encoded data 2359 */ 2360 ddi_prop_reset_pos(ph); 2361 2362 /* 2363 * Allocated memory to store the decoded value in. 2364 */ 2365 intp = ddi_prop_decode_alloc((cnt * sizeof (int)), 2366 ddi_prop_free_ints); 2367 2368 /* 2369 * Decode each element and place it in the space we just allocated 2370 */ 2371 tmp = intp; 2372 for (n = 0; n < cnt; n++, tmp++) { 2373 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp); 2374 if (i < DDI_PROP_RESULT_OK) { 2375 /* 2376 * Free the space we just allocated 2377 * and return an error. 2378 */ 2379 ddi_prop_free(intp); 2380 switch (i) { 2381 case DDI_PROP_RESULT_EOF: 2382 return (DDI_PROP_END_OF_DATA); 2383 2384 case DDI_PROP_RESULT_ERROR: 2385 return (DDI_PROP_CANNOT_DECODE); 2386 } 2387 } 2388 } 2389 2390 *nelements = cnt; 2391 *(int **)data = intp; 2392 2393 return (DDI_PROP_SUCCESS); 2394 } 2395 2396 /* 2397 * Decode a 64 bit integer array property 2398 */ 2399 static int 2400 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements) 2401 { 2402 int i; 2403 int n; 2404 int cnt = 0; 2405 int64_t *tmp; 2406 int64_t *intp; 2407 2408 /* 2409 * Count the number of array elements by going 2410 * through the data without decoding it. 2411 */ 2412 for (;;) { 2413 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL); 2414 if (i < 0) 2415 break; 2416 cnt++; 2417 } 2418 2419 /* 2420 * If there are no elements return an error 2421 */ 2422 if (cnt == 0) 2423 return (DDI_PROP_END_OF_DATA); 2424 2425 /* 2426 * If we cannot skip through the data, we cannot decode it 2427 */ 2428 if (i == DDI_PROP_RESULT_ERROR) 2429 return (DDI_PROP_CANNOT_DECODE); 2430 2431 /* 2432 * Reset the data pointer to the beginning of the encoded data 2433 */ 2434 ddi_prop_reset_pos(ph); 2435 2436 /* 2437 * Allocate memory to store the decoded value. 2438 */ 2439 intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)), 2440 ddi_prop_free_ints); 2441 2442 /* 2443 * Decode each element and place it in the space allocated 2444 */ 2445 tmp = intp; 2446 for (n = 0; n < cnt; n++, tmp++) { 2447 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp); 2448 if (i < DDI_PROP_RESULT_OK) { 2449 /* 2450 * Free the space we just allocated 2451 * and return an error. 2452 */ 2453 ddi_prop_free(intp); 2454 switch (i) { 2455 case DDI_PROP_RESULT_EOF: 2456 return (DDI_PROP_END_OF_DATA); 2457 2458 case DDI_PROP_RESULT_ERROR: 2459 return (DDI_PROP_CANNOT_DECODE); 2460 } 2461 } 2462 } 2463 2464 *nelements = cnt; 2465 *(int64_t **)data = intp; 2466 2467 return (DDI_PROP_SUCCESS); 2468 } 2469 2470 /* 2471 * Encode an array of integers property (Can be one element) 2472 */ 2473 int 2474 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements) 2475 { 2476 int i; 2477 int *tmp; 2478 int cnt; 2479 int size; 2480 2481 /* 2482 * If there is no data, we cannot do anything 2483 */ 2484 if (nelements == 0) 2485 return (DDI_PROP_CANNOT_ENCODE); 2486 2487 /* 2488 * Get the size of an encoded int. 2489 */ 2490 size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL); 2491 2492 if (size < DDI_PROP_RESULT_OK) { 2493 switch (size) { 2494 case DDI_PROP_RESULT_EOF: 2495 return (DDI_PROP_END_OF_DATA); 2496 2497 case DDI_PROP_RESULT_ERROR: 2498 return (DDI_PROP_CANNOT_ENCODE); 2499 } 2500 } 2501 2502 /* 2503 * Allocate space in the handle to store the encoded int. 2504 */ 2505 if (ddi_prop_encode_alloc(ph, size * nelements) != 2506 DDI_PROP_SUCCESS) 2507 return (DDI_PROP_NO_MEMORY); 2508 2509 /* 2510 * Encode the array of ints. 2511 */ 2512 tmp = (int *)data; 2513 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2514 i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp); 2515 if (i < DDI_PROP_RESULT_OK) { 2516 switch (i) { 2517 case DDI_PROP_RESULT_EOF: 2518 return (DDI_PROP_END_OF_DATA); 2519 2520 case DDI_PROP_RESULT_ERROR: 2521 return (DDI_PROP_CANNOT_ENCODE); 2522 } 2523 } 2524 } 2525 2526 return (DDI_PROP_SUCCESS); 2527 } 2528 2529 2530 /* 2531 * Encode a 64 bit integer array property 2532 */ 2533 int 2534 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements) 2535 { 2536 int i; 2537 int cnt; 2538 int size; 2539 int64_t *tmp; 2540 2541 /* 2542 * If there is no data, we cannot do anything 2543 */ 2544 if (nelements == 0) 2545 return (DDI_PROP_CANNOT_ENCODE); 2546 2547 /* 2548 * Get the size of an encoded 64 bit int. 2549 */ 2550 size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL); 2551 2552 if (size < DDI_PROP_RESULT_OK) { 2553 switch (size) { 2554 case DDI_PROP_RESULT_EOF: 2555 return (DDI_PROP_END_OF_DATA); 2556 2557 case DDI_PROP_RESULT_ERROR: 2558 return (DDI_PROP_CANNOT_ENCODE); 2559 } 2560 } 2561 2562 /* 2563 * Allocate space in the handle to store the encoded int. 2564 */ 2565 if (ddi_prop_encode_alloc(ph, size * nelements) != 2566 DDI_PROP_SUCCESS) 2567 return (DDI_PROP_NO_MEMORY); 2568 2569 /* 2570 * Encode the array of ints. 2571 */ 2572 tmp = (int64_t *)data; 2573 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2574 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp); 2575 if (i < DDI_PROP_RESULT_OK) { 2576 switch (i) { 2577 case DDI_PROP_RESULT_EOF: 2578 return (DDI_PROP_END_OF_DATA); 2579 2580 case DDI_PROP_RESULT_ERROR: 2581 return (DDI_PROP_CANNOT_ENCODE); 2582 } 2583 } 2584 } 2585 2586 return (DDI_PROP_SUCCESS); 2587 } 2588 2589 /* 2590 * Decode a single string property 2591 */ 2592 static int 2593 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements) 2594 { 2595 char *tmp; 2596 char *str; 2597 int i; 2598 int size; 2599 2600 /* 2601 * If there is nothing to decode return an error 2602 */ 2603 if (ph->ph_size == 0) 2604 return (DDI_PROP_END_OF_DATA); 2605 2606 /* 2607 * Get the decoded size of the encoded string. 2608 */ 2609 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2610 if (size < DDI_PROP_RESULT_OK) { 2611 switch (size) { 2612 case DDI_PROP_RESULT_EOF: 2613 return (DDI_PROP_END_OF_DATA); 2614 2615 case DDI_PROP_RESULT_ERROR: 2616 return (DDI_PROP_CANNOT_DECODE); 2617 } 2618 } 2619 2620 /* 2621 * Allocated memory to store the decoded value in. 2622 */ 2623 str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string); 2624 2625 ddi_prop_reset_pos(ph); 2626 2627 /* 2628 * Decode the str and place it in the space we just allocated 2629 */ 2630 tmp = str; 2631 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp); 2632 if (i < DDI_PROP_RESULT_OK) { 2633 /* 2634 * Free the space we just allocated 2635 * and return an error. 2636 */ 2637 ddi_prop_free(str); 2638 switch (i) { 2639 case DDI_PROP_RESULT_EOF: 2640 return (DDI_PROP_END_OF_DATA); 2641 2642 case DDI_PROP_RESULT_ERROR: 2643 return (DDI_PROP_CANNOT_DECODE); 2644 } 2645 } 2646 2647 *(char **)data = str; 2648 *nelements = 1; 2649 2650 return (DDI_PROP_SUCCESS); 2651 } 2652 2653 /* 2654 * Decode an array of strings. 2655 */ 2656 int 2657 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements) 2658 { 2659 int cnt = 0; 2660 char **strs; 2661 char **tmp; 2662 char *ptr; 2663 int i; 2664 int n; 2665 int size; 2666 size_t nbytes; 2667 2668 /* 2669 * Figure out how many array elements there are by going through the 2670 * data without decoding it first and counting. 2671 */ 2672 for (;;) { 2673 i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL); 2674 if (i < 0) 2675 break; 2676 cnt++; 2677 } 2678 2679 /* 2680 * If there are no elements return an error 2681 */ 2682 if (cnt == 0) 2683 return (DDI_PROP_END_OF_DATA); 2684 2685 /* 2686 * If we cannot skip through the data, we cannot decode it 2687 */ 2688 if (i == DDI_PROP_RESULT_ERROR) 2689 return (DDI_PROP_CANNOT_DECODE); 2690 2691 /* 2692 * Reset the data pointer to the beginning of the encoded data 2693 */ 2694 ddi_prop_reset_pos(ph); 2695 2696 /* 2697 * Figure out how much memory we need for the sum total 2698 */ 2699 nbytes = (cnt + 1) * sizeof (char *); 2700 2701 for (n = 0; n < cnt; n++) { 2702 /* 2703 * Get the decoded size of the current encoded string. 2704 */ 2705 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2706 if (size < DDI_PROP_RESULT_OK) { 2707 switch (size) { 2708 case DDI_PROP_RESULT_EOF: 2709 return (DDI_PROP_END_OF_DATA); 2710 2711 case DDI_PROP_RESULT_ERROR: 2712 return (DDI_PROP_CANNOT_DECODE); 2713 } 2714 } 2715 2716 nbytes += size; 2717 } 2718 2719 /* 2720 * Allocate memory in which to store the decoded strings. 2721 */ 2722 strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings); 2723 2724 /* 2725 * Set up pointers for each string by figuring out yet 2726 * again how long each string is. 2727 */ 2728 ddi_prop_reset_pos(ph); 2729 ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *)); 2730 for (tmp = strs, n = 0; n < cnt; n++, tmp++) { 2731 /* 2732 * Get the decoded size of the current encoded string. 2733 */ 2734 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2735 if (size < DDI_PROP_RESULT_OK) { 2736 ddi_prop_free(strs); 2737 switch (size) { 2738 case DDI_PROP_RESULT_EOF: 2739 return (DDI_PROP_END_OF_DATA); 2740 2741 case DDI_PROP_RESULT_ERROR: 2742 return (DDI_PROP_CANNOT_DECODE); 2743 } 2744 } 2745 2746 *tmp = ptr; 2747 ptr += size; 2748 } 2749 2750 /* 2751 * String array is terminated by a NULL 2752 */ 2753 *tmp = NULL; 2754 2755 /* 2756 * Finally, we can decode each string 2757 */ 2758 ddi_prop_reset_pos(ph); 2759 for (tmp = strs, n = 0; n < cnt; n++, tmp++) { 2760 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp); 2761 if (i < DDI_PROP_RESULT_OK) { 2762 /* 2763 * Free the space we just allocated 2764 * and return an error 2765 */ 2766 ddi_prop_free(strs); 2767 switch (i) { 2768 case DDI_PROP_RESULT_EOF: 2769 return (DDI_PROP_END_OF_DATA); 2770 2771 case DDI_PROP_RESULT_ERROR: 2772 return (DDI_PROP_CANNOT_DECODE); 2773 } 2774 } 2775 } 2776 2777 *(char ***)data = strs; 2778 *nelements = cnt; 2779 2780 return (DDI_PROP_SUCCESS); 2781 } 2782 2783 /* 2784 * Encode a string. 2785 */ 2786 int 2787 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements) 2788 { 2789 char **tmp; 2790 int size; 2791 int i; 2792 2793 /* 2794 * If there is no data, we cannot do anything 2795 */ 2796 if (nelements == 0) 2797 return (DDI_PROP_CANNOT_ENCODE); 2798 2799 /* 2800 * Get the size of the encoded string. 2801 */ 2802 tmp = (char **)data; 2803 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp); 2804 if (size < DDI_PROP_RESULT_OK) { 2805 switch (size) { 2806 case DDI_PROP_RESULT_EOF: 2807 return (DDI_PROP_END_OF_DATA); 2808 2809 case DDI_PROP_RESULT_ERROR: 2810 return (DDI_PROP_CANNOT_ENCODE); 2811 } 2812 } 2813 2814 /* 2815 * Allocate space in the handle to store the encoded string. 2816 */ 2817 if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS) 2818 return (DDI_PROP_NO_MEMORY); 2819 2820 ddi_prop_reset_pos(ph); 2821 2822 /* 2823 * Encode the string. 2824 */ 2825 tmp = (char **)data; 2826 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp); 2827 if (i < DDI_PROP_RESULT_OK) { 2828 switch (i) { 2829 case DDI_PROP_RESULT_EOF: 2830 return (DDI_PROP_END_OF_DATA); 2831 2832 case DDI_PROP_RESULT_ERROR: 2833 return (DDI_PROP_CANNOT_ENCODE); 2834 } 2835 } 2836 2837 return (DDI_PROP_SUCCESS); 2838 } 2839 2840 2841 /* 2842 * Encode an array of strings. 2843 */ 2844 int 2845 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements) 2846 { 2847 int cnt = 0; 2848 char **tmp; 2849 int size; 2850 uint_t total_size; 2851 int i; 2852 2853 /* 2854 * If there is no data, we cannot do anything 2855 */ 2856 if (nelements == 0) 2857 return (DDI_PROP_CANNOT_ENCODE); 2858 2859 /* 2860 * Get the total size required to encode all the strings. 2861 */ 2862 total_size = 0; 2863 tmp = (char **)data; 2864 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2865 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp); 2866 if (size < DDI_PROP_RESULT_OK) { 2867 switch (size) { 2868 case DDI_PROP_RESULT_EOF: 2869 return (DDI_PROP_END_OF_DATA); 2870 2871 case DDI_PROP_RESULT_ERROR: 2872 return (DDI_PROP_CANNOT_ENCODE); 2873 } 2874 } 2875 total_size += (uint_t)size; 2876 } 2877 2878 /* 2879 * Allocate space in the handle to store the encoded strings. 2880 */ 2881 if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS) 2882 return (DDI_PROP_NO_MEMORY); 2883 2884 ddi_prop_reset_pos(ph); 2885 2886 /* 2887 * Encode the array of strings. 2888 */ 2889 tmp = (char **)data; 2890 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2891 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp); 2892 if (i < DDI_PROP_RESULT_OK) { 2893 switch (i) { 2894 case DDI_PROP_RESULT_EOF: 2895 return (DDI_PROP_END_OF_DATA); 2896 2897 case DDI_PROP_RESULT_ERROR: 2898 return (DDI_PROP_CANNOT_ENCODE); 2899 } 2900 } 2901 } 2902 2903 return (DDI_PROP_SUCCESS); 2904 } 2905 2906 2907 /* 2908 * Decode an array of bytes. 2909 */ 2910 static int 2911 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements) 2912 { 2913 uchar_t *tmp; 2914 int nbytes; 2915 int i; 2916 2917 /* 2918 * If there are no elements return an error 2919 */ 2920 if (ph->ph_size == 0) 2921 return (DDI_PROP_END_OF_DATA); 2922 2923 /* 2924 * Get the size of the encoded array of bytes. 2925 */ 2926 nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE, 2927 data, ph->ph_size); 2928 if (nbytes < DDI_PROP_RESULT_OK) { 2929 switch (nbytes) { 2930 case DDI_PROP_RESULT_EOF: 2931 return (DDI_PROP_END_OF_DATA); 2932 2933 case DDI_PROP_RESULT_ERROR: 2934 return (DDI_PROP_CANNOT_DECODE); 2935 } 2936 } 2937 2938 /* 2939 * Allocated memory to store the decoded value in. 2940 */ 2941 tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes); 2942 2943 /* 2944 * Decode each element and place it in the space we just allocated 2945 */ 2946 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes); 2947 if (i < DDI_PROP_RESULT_OK) { 2948 /* 2949 * Free the space we just allocated 2950 * and return an error 2951 */ 2952 ddi_prop_free(tmp); 2953 switch (i) { 2954 case DDI_PROP_RESULT_EOF: 2955 return (DDI_PROP_END_OF_DATA); 2956 2957 case DDI_PROP_RESULT_ERROR: 2958 return (DDI_PROP_CANNOT_DECODE); 2959 } 2960 } 2961 2962 *(uchar_t **)data = tmp; 2963 *nelements = nbytes; 2964 2965 return (DDI_PROP_SUCCESS); 2966 } 2967 2968 /* 2969 * Encode an array of bytes. 2970 */ 2971 int 2972 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements) 2973 { 2974 int size; 2975 int i; 2976 2977 /* 2978 * If there are no elements, then this is a boolean property, 2979 * so just create a property handle with no data and return. 2980 */ 2981 if (nelements == 0) { 2982 (void) ddi_prop_encode_alloc(ph, 0); 2983 return (DDI_PROP_SUCCESS); 2984 } 2985 2986 /* 2987 * Get the size of the encoded array of bytes. 2988 */ 2989 size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data, 2990 nelements); 2991 if (size < DDI_PROP_RESULT_OK) { 2992 switch (size) { 2993 case DDI_PROP_RESULT_EOF: 2994 return (DDI_PROP_END_OF_DATA); 2995 2996 case DDI_PROP_RESULT_ERROR: 2997 return (DDI_PROP_CANNOT_DECODE); 2998 } 2999 } 3000 3001 /* 3002 * Allocate space in the handle to store the encoded bytes. 3003 */ 3004 if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS) 3005 return (DDI_PROP_NO_MEMORY); 3006 3007 /* 3008 * Encode the array of bytes. 3009 */ 3010 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data, 3011 nelements); 3012 if (i < DDI_PROP_RESULT_OK) { 3013 switch (i) { 3014 case DDI_PROP_RESULT_EOF: 3015 return (DDI_PROP_END_OF_DATA); 3016 3017 case DDI_PROP_RESULT_ERROR: 3018 return (DDI_PROP_CANNOT_ENCODE); 3019 } 3020 } 3021 3022 return (DDI_PROP_SUCCESS); 3023 } 3024 3025 /* 3026 * OBP 1275 integer, string and byte operators. 3027 * 3028 * DDI_PROP_CMD_DECODE: 3029 * 3030 * DDI_PROP_RESULT_ERROR: cannot decode the data 3031 * DDI_PROP_RESULT_EOF: end of data 3032 * DDI_PROP_OK: data was decoded 3033 * 3034 * DDI_PROP_CMD_ENCODE: 3035 * 3036 * DDI_PROP_RESULT_ERROR: cannot encode the data 3037 * DDI_PROP_RESULT_EOF: end of data 3038 * DDI_PROP_OK: data was encoded 3039 * 3040 * DDI_PROP_CMD_SKIP: 3041 * 3042 * DDI_PROP_RESULT_ERROR: cannot skip the data 3043 * DDI_PROP_RESULT_EOF: end of data 3044 * DDI_PROP_OK: data was skipped 3045 * 3046 * DDI_PROP_CMD_GET_ESIZE: 3047 * 3048 * DDI_PROP_RESULT_ERROR: cannot get encoded size 3049 * DDI_PROP_RESULT_EOF: end of data 3050 * > 0: the encoded size 3051 * 3052 * DDI_PROP_CMD_GET_DSIZE: 3053 * 3054 * DDI_PROP_RESULT_ERROR: cannot get decoded size 3055 * DDI_PROP_RESULT_EOF: end of data 3056 * > 0: the decoded size 3057 */ 3058 3059 /* 3060 * OBP 1275 integer operator 3061 * 3062 * OBP properties are a byte stream of data, so integers may not be 3063 * properly aligned. Therefore we need to copy them one byte at a time. 3064 */ 3065 int 3066 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data) 3067 { 3068 int i; 3069 3070 switch (cmd) { 3071 case DDI_PROP_CMD_DECODE: 3072 /* 3073 * Check that there is encoded data 3074 */ 3075 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) 3076 return (DDI_PROP_RESULT_ERROR); 3077 if (ph->ph_flags & PH_FROM_PROM) { 3078 i = MIN(ph->ph_size, PROP_1275_INT_SIZE); 3079 if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 3080 ph->ph_size - i)) 3081 return (DDI_PROP_RESULT_ERROR); 3082 } else { 3083 if (ph->ph_size < sizeof (int) || 3084 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 3085 ph->ph_size - sizeof (int)))) 3086 return (DDI_PROP_RESULT_ERROR); 3087 } 3088 3089 /* 3090 * Copy the integer, using the implementation-specific 3091 * copy function if the property is coming from the PROM. 3092 */ 3093 if (ph->ph_flags & PH_FROM_PROM) { 3094 *data = impl_ddi_prop_int_from_prom( 3095 (uchar_t *)ph->ph_cur_pos, 3096 (ph->ph_size < PROP_1275_INT_SIZE) ? 3097 ph->ph_size : PROP_1275_INT_SIZE); 3098 } else { 3099 bcopy(ph->ph_cur_pos, data, sizeof (int)); 3100 } 3101 3102 /* 3103 * Move the current location to the start of the next 3104 * bit of undecoded data. 3105 */ 3106 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3107 PROP_1275_INT_SIZE; 3108 return (DDI_PROP_RESULT_OK); 3109 3110 case DDI_PROP_CMD_ENCODE: 3111 /* 3112 * Check that there is room to encoded the data 3113 */ 3114 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3115 ph->ph_size < PROP_1275_INT_SIZE || 3116 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 3117 ph->ph_size - sizeof (int)))) 3118 return (DDI_PROP_RESULT_ERROR); 3119 3120 /* 3121 * Encode the integer into the byte stream one byte at a 3122 * time. 3123 */ 3124 bcopy(data, ph->ph_cur_pos, sizeof (int)); 3125 3126 /* 3127 * Move the current location to the start of the next bit of 3128 * space where we can store encoded data. 3129 */ 3130 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE; 3131 return (DDI_PROP_RESULT_OK); 3132 3133 case DDI_PROP_CMD_SKIP: 3134 /* 3135 * Check that there is encoded data 3136 */ 3137 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3138 ph->ph_size < PROP_1275_INT_SIZE) 3139 return (DDI_PROP_RESULT_ERROR); 3140 3141 3142 if ((caddr_t)ph->ph_cur_pos == 3143 (caddr_t)ph->ph_data + ph->ph_size) { 3144 return (DDI_PROP_RESULT_EOF); 3145 } else if ((caddr_t)ph->ph_cur_pos > 3146 (caddr_t)ph->ph_data + ph->ph_size) { 3147 return (DDI_PROP_RESULT_EOF); 3148 } 3149 3150 /* 3151 * Move the current location to the start of the next bit of 3152 * undecoded data. 3153 */ 3154 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE; 3155 return (DDI_PROP_RESULT_OK); 3156 3157 case DDI_PROP_CMD_GET_ESIZE: 3158 /* 3159 * Return the size of an encoded integer on OBP 3160 */ 3161 return (PROP_1275_INT_SIZE); 3162 3163 case DDI_PROP_CMD_GET_DSIZE: 3164 /* 3165 * Return the size of a decoded integer on the system. 3166 */ 3167 return (sizeof (int)); 3168 3169 default: 3170 #ifdef DEBUG 3171 panic("ddi_prop_1275_int: %x impossible", cmd); 3172 /*NOTREACHED*/ 3173 #else 3174 return (DDI_PROP_RESULT_ERROR); 3175 #endif /* DEBUG */ 3176 } 3177 } 3178 3179 /* 3180 * 64 bit integer operator. 3181 * 3182 * This is an extension, defined by Sun, to the 1275 integer 3183 * operator. This routine handles the encoding/decoding of 3184 * 64 bit integer properties. 3185 */ 3186 int 3187 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data) 3188 { 3189 3190 switch (cmd) { 3191 case DDI_PROP_CMD_DECODE: 3192 /* 3193 * Check that there is encoded data 3194 */ 3195 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) 3196 return (DDI_PROP_RESULT_ERROR); 3197 if (ph->ph_flags & PH_FROM_PROM) { 3198 return (DDI_PROP_RESULT_ERROR); 3199 } else { 3200 if (ph->ph_size < sizeof (int64_t) || 3201 ((int64_t *)ph->ph_cur_pos > 3202 ((int64_t *)ph->ph_data + 3203 ph->ph_size - sizeof (int64_t)))) 3204 return (DDI_PROP_RESULT_ERROR); 3205 } 3206 /* 3207 * Copy the integer, using the implementation-specific 3208 * copy function if the property is coming from the PROM. 3209 */ 3210 if (ph->ph_flags & PH_FROM_PROM) { 3211 return (DDI_PROP_RESULT_ERROR); 3212 } else { 3213 bcopy(ph->ph_cur_pos, data, sizeof (int64_t)); 3214 } 3215 3216 /* 3217 * Move the current location to the start of the next 3218 * bit of undecoded data. 3219 */ 3220 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3221 sizeof (int64_t); 3222 return (DDI_PROP_RESULT_OK); 3223 3224 case DDI_PROP_CMD_ENCODE: 3225 /* 3226 * Check that there is room to encoded the data 3227 */ 3228 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3229 ph->ph_size < sizeof (int64_t) || 3230 ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data + 3231 ph->ph_size - sizeof (int64_t)))) 3232 return (DDI_PROP_RESULT_ERROR); 3233 3234 /* 3235 * Encode the integer into the byte stream one byte at a 3236 * time. 3237 */ 3238 bcopy(data, ph->ph_cur_pos, sizeof (int64_t)); 3239 3240 /* 3241 * Move the current location to the start of the next bit of 3242 * space where we can store encoded data. 3243 */ 3244 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3245 sizeof (int64_t); 3246 return (DDI_PROP_RESULT_OK); 3247 3248 case DDI_PROP_CMD_SKIP: 3249 /* 3250 * Check that there is encoded data 3251 */ 3252 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3253 ph->ph_size < sizeof (int64_t)) 3254 return (DDI_PROP_RESULT_ERROR); 3255 3256 if ((caddr_t)ph->ph_cur_pos == 3257 (caddr_t)ph->ph_data + ph->ph_size) { 3258 return (DDI_PROP_RESULT_EOF); 3259 } else if ((caddr_t)ph->ph_cur_pos > 3260 (caddr_t)ph->ph_data + ph->ph_size) { 3261 return (DDI_PROP_RESULT_EOF); 3262 } 3263 3264 /* 3265 * Move the current location to the start of 3266 * the next bit of undecoded data. 3267 */ 3268 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 3269 sizeof (int64_t); 3270 return (DDI_PROP_RESULT_OK); 3271 3272 case DDI_PROP_CMD_GET_ESIZE: 3273 /* 3274 * Return the size of an encoded integer on OBP 3275 */ 3276 return (sizeof (int64_t)); 3277 3278 case DDI_PROP_CMD_GET_DSIZE: 3279 /* 3280 * Return the size of a decoded integer on the system. 3281 */ 3282 return (sizeof (int64_t)); 3283 3284 default: 3285 #ifdef DEBUG 3286 panic("ddi_prop_int64_op: %x impossible", cmd); 3287 /*NOTREACHED*/ 3288 #else 3289 return (DDI_PROP_RESULT_ERROR); 3290 #endif /* DEBUG */ 3291 } 3292 } 3293 3294 /* 3295 * OBP 1275 string operator. 3296 * 3297 * OBP strings are NULL terminated. 3298 */ 3299 int 3300 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data) 3301 { 3302 int n; 3303 char *p; 3304 char *end; 3305 3306 switch (cmd) { 3307 case DDI_PROP_CMD_DECODE: 3308 /* 3309 * Check that there is encoded data 3310 */ 3311 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3312 return (DDI_PROP_RESULT_ERROR); 3313 } 3314 3315 /* 3316 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and 3317 * how to NULL terminate result. 3318 */ 3319 p = (char *)ph->ph_cur_pos; 3320 end = (char *)ph->ph_data + ph->ph_size; 3321 if (p >= end) 3322 return (DDI_PROP_RESULT_EOF); 3323 3324 while (p < end) { 3325 *data++ = *p; 3326 if (*p++ == 0) { /* NULL from OBP */ 3327 ph->ph_cur_pos = p; 3328 return (DDI_PROP_RESULT_OK); 3329 } 3330 } 3331 3332 /* 3333 * If OBP did not NULL terminate string, which happens 3334 * (at least) for 'true'/'false' boolean values, account for 3335 * the space and store null termination on decode. 3336 */ 3337 ph->ph_cur_pos = p; 3338 *data = 0; 3339 return (DDI_PROP_RESULT_OK); 3340 3341 case DDI_PROP_CMD_ENCODE: 3342 /* 3343 * Check that there is room to encoded the data 3344 */ 3345 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3346 return (DDI_PROP_RESULT_ERROR); 3347 } 3348 3349 n = strlen(data) + 1; 3350 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3351 ph->ph_size - n)) { 3352 return (DDI_PROP_RESULT_ERROR); 3353 } 3354 3355 /* 3356 * Copy the NULL terminated string 3357 */ 3358 bcopy(data, ph->ph_cur_pos, n); 3359 3360 /* 3361 * Move the current location to the start of the next bit of 3362 * space where we can store encoded data. 3363 */ 3364 ph->ph_cur_pos = (char *)ph->ph_cur_pos + n; 3365 return (DDI_PROP_RESULT_OK); 3366 3367 case DDI_PROP_CMD_SKIP: 3368 /* 3369 * Check that there is encoded data 3370 */ 3371 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3372 return (DDI_PROP_RESULT_ERROR); 3373 } 3374 3375 /* 3376 * Return the string length plus one for the NULL 3377 * We know the size of the property, we need to 3378 * ensure that the string is properly formatted, 3379 * since we may be looking up random OBP data. 3380 */ 3381 p = (char *)ph->ph_cur_pos; 3382 end = (char *)ph->ph_data + ph->ph_size; 3383 if (p >= end) 3384 return (DDI_PROP_RESULT_EOF); 3385 3386 while (p < end) { 3387 if (*p++ == 0) { /* NULL from OBP */ 3388 ph->ph_cur_pos = p; 3389 return (DDI_PROP_RESULT_OK); 3390 } 3391 } 3392 3393 /* 3394 * Accommodate the fact that OBP does not always NULL 3395 * terminate strings. 3396 */ 3397 ph->ph_cur_pos = p; 3398 return (DDI_PROP_RESULT_OK); 3399 3400 case DDI_PROP_CMD_GET_ESIZE: 3401 /* 3402 * Return the size of the encoded string on OBP. 3403 */ 3404 return (strlen(data) + 1); 3405 3406 case DDI_PROP_CMD_GET_DSIZE: 3407 /* 3408 * Return the string length plus one for the NULL. 3409 * We know the size of the property, we need to 3410 * ensure that the string is properly formatted, 3411 * since we may be looking up random OBP data. 3412 */ 3413 p = (char *)ph->ph_cur_pos; 3414 end = (char *)ph->ph_data + ph->ph_size; 3415 if (p >= end) 3416 return (DDI_PROP_RESULT_EOF); 3417 3418 for (n = 0; p < end; n++) { 3419 if (*p++ == 0) { /* NULL from OBP */ 3420 ph->ph_cur_pos = p; 3421 return (n + 1); 3422 } 3423 } 3424 3425 /* 3426 * If OBP did not NULL terminate string, which happens for 3427 * 'true'/'false' boolean values, account for the space 3428 * to store null termination here. 3429 */ 3430 ph->ph_cur_pos = p; 3431 return (n + 1); 3432 3433 default: 3434 #ifdef DEBUG 3435 panic("ddi_prop_1275_string: %x impossible", cmd); 3436 /*NOTREACHED*/ 3437 #else 3438 return (DDI_PROP_RESULT_ERROR); 3439 #endif /* DEBUG */ 3440 } 3441 } 3442 3443 /* 3444 * OBP 1275 byte operator 3445 * 3446 * Caller must specify the number of bytes to get. OBP encodes bytes 3447 * as a byte so there is a 1-to-1 translation. 3448 */ 3449 int 3450 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data, 3451 uint_t nelements) 3452 { 3453 switch (cmd) { 3454 case DDI_PROP_CMD_DECODE: 3455 /* 3456 * Check that there is encoded data 3457 */ 3458 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3459 ph->ph_size < nelements || 3460 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3461 ph->ph_size - nelements))) 3462 return (DDI_PROP_RESULT_ERROR); 3463 3464 /* 3465 * Copy out the bytes 3466 */ 3467 bcopy(ph->ph_cur_pos, data, nelements); 3468 3469 /* 3470 * Move the current location 3471 */ 3472 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3473 return (DDI_PROP_RESULT_OK); 3474 3475 case DDI_PROP_CMD_ENCODE: 3476 /* 3477 * Check that there is room to encode the data 3478 */ 3479 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3480 ph->ph_size < nelements || 3481 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3482 ph->ph_size - nelements))) 3483 return (DDI_PROP_RESULT_ERROR); 3484 3485 /* 3486 * Copy in the bytes 3487 */ 3488 bcopy(data, ph->ph_cur_pos, nelements); 3489 3490 /* 3491 * Move the current location to the start of the next bit of 3492 * space where we can store encoded data. 3493 */ 3494 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3495 return (DDI_PROP_RESULT_OK); 3496 3497 case DDI_PROP_CMD_SKIP: 3498 /* 3499 * Check that there is encoded data 3500 */ 3501 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3502 ph->ph_size < nelements) 3503 return (DDI_PROP_RESULT_ERROR); 3504 3505 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3506 ph->ph_size - nelements)) 3507 return (DDI_PROP_RESULT_EOF); 3508 3509 /* 3510 * Move the current location 3511 */ 3512 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3513 return (DDI_PROP_RESULT_OK); 3514 3515 case DDI_PROP_CMD_GET_ESIZE: 3516 /* 3517 * The size in bytes of the encoded size is the 3518 * same as the decoded size provided by the caller. 3519 */ 3520 return (nelements); 3521 3522 case DDI_PROP_CMD_GET_DSIZE: 3523 /* 3524 * Just return the number of bytes specified by the caller. 3525 */ 3526 return (nelements); 3527 3528 default: 3529 #ifdef DEBUG 3530 panic("ddi_prop_1275_bytes: %x impossible", cmd); 3531 /*NOTREACHED*/ 3532 #else 3533 return (DDI_PROP_RESULT_ERROR); 3534 #endif /* DEBUG */ 3535 } 3536 } 3537 3538 /* 3539 * Used for properties that come from the OBP, hardware configuration files, 3540 * or that are created by calls to ddi_prop_update(9F). 3541 */ 3542 static struct prop_handle_ops prop_1275_ops = { 3543 ddi_prop_1275_int, 3544 ddi_prop_1275_string, 3545 ddi_prop_1275_bytes, 3546 ddi_prop_int64_op 3547 }; 3548 3549 3550 /* 3551 * Interface to create/modify a managed property on child's behalf... 3552 * Flags interpreted are: 3553 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep. 3554 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list. 3555 * 3556 * Use same dev_t when modifying or undefining a property. 3557 * Search for properties with DDI_DEV_T_ANY to match first named 3558 * property on the list. 3559 * 3560 * Properties are stored LIFO and subsequently will match the first 3561 * `matching' instance. 3562 */ 3563 3564 /* 3565 * ddi_prop_add: Add a software defined property 3566 */ 3567 3568 /* 3569 * define to get a new ddi_prop_t. 3570 * km_flags are KM_SLEEP or KM_NOSLEEP. 3571 */ 3572 3573 #define DDI_NEW_PROP_T(km_flags) \ 3574 (kmem_zalloc(sizeof (ddi_prop_t), km_flags)) 3575 3576 static int 3577 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags, 3578 char *name, caddr_t value, int length) 3579 { 3580 ddi_prop_t *new_propp, *propp; 3581 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr); 3582 int km_flags = KM_NOSLEEP; 3583 int name_buf_len; 3584 3585 /* 3586 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error. 3587 */ 3588 3589 if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0) 3590 return (DDI_PROP_INVAL_ARG); 3591 3592 if (flags & DDI_PROP_CANSLEEP) 3593 km_flags = KM_SLEEP; 3594 3595 if (flags & DDI_PROP_SYSTEM_DEF) 3596 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 3597 else if (flags & DDI_PROP_HW_DEF) 3598 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 3599 3600 if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL) { 3601 cmn_err(CE_CONT, prop_no_mem_msg, name); 3602 return (DDI_PROP_NO_MEMORY); 3603 } 3604 3605 /* 3606 * If dev is major number 0, then we need to do a ddi_name_to_major 3607 * to get the real major number for the device. This needs to be 3608 * done because some drivers need to call ddi_prop_create in their 3609 * attach routines but they don't have a dev. By creating the dev 3610 * ourself if the major number is 0, drivers will not have to know what 3611 * their major number. They can just create a dev with major number 3612 * 0 and pass it in. For device 0, we will be doing a little extra 3613 * work by recreating the same dev that we already have, but its the 3614 * price you pay :-). 3615 * 3616 * This fixes bug #1098060. 3617 */ 3618 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) { 3619 new_propp->prop_dev = 3620 makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name), 3621 getminor(dev)); 3622 } else 3623 new_propp->prop_dev = dev; 3624 3625 /* 3626 * Allocate space for property name and copy it in... 3627 */ 3628 3629 name_buf_len = strlen(name) + 1; 3630 new_propp->prop_name = kmem_alloc(name_buf_len, km_flags); 3631 if (new_propp->prop_name == 0) { 3632 kmem_free(new_propp, sizeof (ddi_prop_t)); 3633 cmn_err(CE_CONT, prop_no_mem_msg, name); 3634 return (DDI_PROP_NO_MEMORY); 3635 } 3636 bcopy(name, new_propp->prop_name, name_buf_len); 3637 3638 /* 3639 * Set the property type 3640 */ 3641 new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK; 3642 3643 /* 3644 * Set length and value ONLY if not an explicit property undefine: 3645 * NOTE: value and length are zero for explicit undefines. 3646 */ 3647 3648 if (flags & DDI_PROP_UNDEF_IT) { 3649 new_propp->prop_flags |= DDI_PROP_UNDEF_IT; 3650 } else { 3651 if ((new_propp->prop_len = length) != 0) { 3652 new_propp->prop_val = kmem_alloc(length, km_flags); 3653 if (new_propp->prop_val == 0) { 3654 kmem_free(new_propp->prop_name, name_buf_len); 3655 kmem_free(new_propp, sizeof (ddi_prop_t)); 3656 cmn_err(CE_CONT, prop_no_mem_msg, name); 3657 return (DDI_PROP_NO_MEMORY); 3658 } 3659 bcopy(value, new_propp->prop_val, length); 3660 } 3661 } 3662 3663 /* 3664 * Link property into beginning of list. (Properties are LIFO order.) 3665 */ 3666 3667 mutex_enter(&(DEVI(dip)->devi_lock)); 3668 propp = *list_head; 3669 new_propp->prop_next = propp; 3670 *list_head = new_propp; 3671 mutex_exit(&(DEVI(dip)->devi_lock)); 3672 return (DDI_PROP_SUCCESS); 3673 } 3674 3675 3676 /* 3677 * ddi_prop_change: Modify a software managed property value 3678 * 3679 * Set new length and value if found. 3680 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or 3681 * input name is the NULL string. 3682 * returns DDI_PROP_NO_MEMORY if unable to allocate memory 3683 * 3684 * Note: an undef can be modified to be a define, 3685 * (you can't go the other way.) 3686 */ 3687 3688 static int 3689 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags, 3690 char *name, caddr_t value, int length) 3691 { 3692 ddi_prop_t *propp; 3693 ddi_prop_t **ppropp; 3694 caddr_t p = NULL; 3695 3696 if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0)) 3697 return (DDI_PROP_INVAL_ARG); 3698 3699 /* 3700 * Preallocate buffer, even if we don't need it... 3701 */ 3702 if (length != 0) { 3703 p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ? 3704 KM_SLEEP : KM_NOSLEEP); 3705 if (p == NULL) { 3706 cmn_err(CE_CONT, prop_no_mem_msg, name); 3707 return (DDI_PROP_NO_MEMORY); 3708 } 3709 } 3710 3711 /* 3712 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major 3713 * number, a real dev_t value should be created based upon the dip's 3714 * binding driver. See ddi_prop_add... 3715 */ 3716 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) 3717 dev = makedevice( 3718 ddi_name_to_major(DEVI(dip)->devi_binding_name), 3719 getminor(dev)); 3720 3721 /* 3722 * Check to see if the property exists. If so we modify it. 3723 * Else we create it by calling ddi_prop_add(). 3724 */ 3725 mutex_enter(&(DEVI(dip)->devi_lock)); 3726 ppropp = &DEVI(dip)->devi_drv_prop_ptr; 3727 if (flags & DDI_PROP_SYSTEM_DEF) 3728 ppropp = &DEVI(dip)->devi_sys_prop_ptr; 3729 else if (flags & DDI_PROP_HW_DEF) 3730 ppropp = &DEVI(dip)->devi_hw_prop_ptr; 3731 3732 if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) { 3733 /* 3734 * Need to reallocate buffer? If so, do it 3735 * carefully (reuse same space if new prop 3736 * is same size and non-NULL sized). 3737 */ 3738 if (length != 0) 3739 bcopy(value, p, length); 3740 3741 if (propp->prop_len != 0) 3742 kmem_free(propp->prop_val, propp->prop_len); 3743 3744 propp->prop_len = length; 3745 propp->prop_val = p; 3746 propp->prop_flags &= ~DDI_PROP_UNDEF_IT; 3747 mutex_exit(&(DEVI(dip)->devi_lock)); 3748 return (DDI_PROP_SUCCESS); 3749 } 3750 3751 mutex_exit(&(DEVI(dip)->devi_lock)); 3752 if (length != 0) 3753 kmem_free(p, length); 3754 3755 return (ddi_prop_add(dev, dip, flags, name, value, length)); 3756 } 3757 3758 /* 3759 * Common update routine used to update and encode a property. Creates 3760 * a property handle, calls the property encode routine, figures out if 3761 * the property already exists and updates if it does. Otherwise it 3762 * creates if it does not exist. 3763 */ 3764 int 3765 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags, 3766 char *name, void *data, uint_t nelements, 3767 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements)) 3768 { 3769 prop_handle_t ph; 3770 int rval; 3771 uint_t ourflags; 3772 3773 /* 3774 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3775 * return error. 3776 */ 3777 if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3778 return (DDI_PROP_INVAL_ARG); 3779 3780 /* 3781 * Create the handle 3782 */ 3783 ph.ph_data = NULL; 3784 ph.ph_cur_pos = NULL; 3785 ph.ph_save_pos = NULL; 3786 ph.ph_size = 0; 3787 ph.ph_ops = &prop_1275_ops; 3788 3789 /* 3790 * ourflags: 3791 * For compatibility with the old interfaces. The old interfaces 3792 * didn't sleep by default and slept when the flag was set. These 3793 * interfaces to the opposite. So the old interfaces now set the 3794 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep. 3795 * 3796 * ph.ph_flags: 3797 * Blocked data or unblocked data allocation 3798 * for ph.ph_data in ddi_prop_encode_alloc() 3799 */ 3800 if (flags & DDI_PROP_DONTSLEEP) { 3801 ourflags = flags; 3802 ph.ph_flags = DDI_PROP_DONTSLEEP; 3803 } else { 3804 ourflags = flags | DDI_PROP_CANSLEEP; 3805 ph.ph_flags = DDI_PROP_CANSLEEP; 3806 } 3807 3808 /* 3809 * Encode the data and store it in the property handle by 3810 * calling the prop_encode routine. 3811 */ 3812 if ((rval = (*prop_create)(&ph, data, nelements)) != 3813 DDI_PROP_SUCCESS) { 3814 if (rval == DDI_PROP_NO_MEMORY) 3815 cmn_err(CE_CONT, prop_no_mem_msg, name); 3816 if (ph.ph_size != 0) 3817 kmem_free(ph.ph_data, ph.ph_size); 3818 return (rval); 3819 } 3820 3821 /* 3822 * The old interfaces use a stacking approach to creating 3823 * properties. If we are being called from the old interfaces, 3824 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a 3825 * create without checking. 3826 */ 3827 if (flags & DDI_PROP_STACK_CREATE) { 3828 rval = ddi_prop_add(match_dev, dip, 3829 ourflags, name, ph.ph_data, ph.ph_size); 3830 } else { 3831 rval = ddi_prop_change(match_dev, dip, 3832 ourflags, name, ph.ph_data, ph.ph_size); 3833 } 3834 3835 /* 3836 * Free the encoded data allocated in the prop_encode routine. 3837 */ 3838 if (ph.ph_size != 0) 3839 kmem_free(ph.ph_data, ph.ph_size); 3840 3841 return (rval); 3842 } 3843 3844 3845 /* 3846 * ddi_prop_create: Define a managed property: 3847 * See above for details. 3848 */ 3849 3850 int 3851 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag, 3852 char *name, caddr_t value, int length) 3853 { 3854 if (!(flag & DDI_PROP_CANSLEEP)) { 3855 flag |= DDI_PROP_DONTSLEEP; 3856 #ifdef DDI_PROP_DEBUG 3857 if (length != 0) 3858 cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete," 3859 "use ddi_prop_update (prop = %s, node = %s%d)", 3860 name, ddi_driver_name(dip), ddi_get_instance(dip)); 3861 #endif /* DDI_PROP_DEBUG */ 3862 } 3863 flag &= ~DDI_PROP_SYSTEM_DEF; 3864 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY; 3865 return (ddi_prop_update_common(dev, dip, flag, name, 3866 value, length, ddi_prop_fm_encode_bytes)); 3867 } 3868 3869 int 3870 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag, 3871 char *name, caddr_t value, int length) 3872 { 3873 if (!(flag & DDI_PROP_CANSLEEP)) 3874 flag |= DDI_PROP_DONTSLEEP; 3875 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY; 3876 return (ddi_prop_update_common(dev, dip, flag, 3877 name, value, length, ddi_prop_fm_encode_bytes)); 3878 } 3879 3880 int 3881 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag, 3882 char *name, caddr_t value, int length) 3883 { 3884 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0); 3885 3886 /* 3887 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3888 * return error. 3889 */ 3890 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3891 return (DDI_PROP_INVAL_ARG); 3892 3893 if (!(flag & DDI_PROP_CANSLEEP)) 3894 flag |= DDI_PROP_DONTSLEEP; 3895 flag &= ~DDI_PROP_SYSTEM_DEF; 3896 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0) 3897 return (DDI_PROP_NOT_FOUND); 3898 3899 return (ddi_prop_update_common(dev, dip, 3900 (flag | DDI_PROP_TYPE_BYTE), name, 3901 value, length, ddi_prop_fm_encode_bytes)); 3902 } 3903 3904 int 3905 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag, 3906 char *name, caddr_t value, int length) 3907 { 3908 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0); 3909 3910 /* 3911 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3912 * return error. 3913 */ 3914 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3915 return (DDI_PROP_INVAL_ARG); 3916 3917 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0) 3918 return (DDI_PROP_NOT_FOUND); 3919 3920 if (!(flag & DDI_PROP_CANSLEEP)) 3921 flag |= DDI_PROP_DONTSLEEP; 3922 return (ddi_prop_update_common(dev, dip, 3923 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE), 3924 name, value, length, ddi_prop_fm_encode_bytes)); 3925 } 3926 3927 3928 /* 3929 * Common lookup routine used to lookup and decode a property. 3930 * Creates a property handle, searches for the raw encoded data, 3931 * fills in the handle, and calls the property decode functions 3932 * passed in. 3933 * 3934 * This routine is not static because ddi_bus_prop_op() which lives in 3935 * ddi_impl.c calls it. No driver should be calling this routine. 3936 */ 3937 int 3938 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip, 3939 uint_t flags, char *name, void *data, uint_t *nelements, 3940 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements)) 3941 { 3942 int rval; 3943 uint_t ourflags; 3944 prop_handle_t ph; 3945 3946 if ((match_dev == DDI_DEV_T_NONE) || 3947 (name == NULL) || (strlen(name) == 0)) 3948 return (DDI_PROP_INVAL_ARG); 3949 3950 ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags : 3951 flags | DDI_PROP_CANSLEEP; 3952 3953 /* 3954 * Get the encoded data 3955 */ 3956 bzero(&ph, sizeof (prop_handle_t)); 3957 3958 if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) { 3959 /* 3960 * For rootnex and unbound dlpi style-2 devices, index into 3961 * the devnames' array and search the global 3962 * property list. 3963 */ 3964 ourflags &= ~DDI_UNBND_DLPI2; 3965 rval = i_ddi_prop_search_global(match_dev, 3966 ourflags, name, &ph.ph_data, &ph.ph_size); 3967 } else { 3968 rval = ddi_prop_search_common(match_dev, dip, 3969 PROP_LEN_AND_VAL_ALLOC, ourflags, name, 3970 &ph.ph_data, &ph.ph_size); 3971 3972 } 3973 3974 if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) { 3975 ASSERT(ph.ph_data == NULL); 3976 ASSERT(ph.ph_size == 0); 3977 return (rval); 3978 } 3979 3980 /* 3981 * If the encoded data came from a OBP or software 3982 * use the 1275 OBP decode/encode routines. 3983 */ 3984 ph.ph_cur_pos = ph.ph_data; 3985 ph.ph_save_pos = ph.ph_data; 3986 ph.ph_ops = &prop_1275_ops; 3987 ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0; 3988 3989 rval = (*prop_decoder)(&ph, data, nelements); 3990 3991 /* 3992 * Free the encoded data 3993 */ 3994 if (ph.ph_size != 0) 3995 kmem_free(ph.ph_data, ph.ph_size); 3996 3997 return (rval); 3998 } 3999 4000 /* 4001 * Lookup and return an array of composite properties. The driver must 4002 * provide the decode routine. 4003 */ 4004 int 4005 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip, 4006 uint_t flags, char *name, void *data, uint_t *nelements, 4007 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements)) 4008 { 4009 return (ddi_prop_lookup_common(match_dev, dip, 4010 (flags | DDI_PROP_TYPE_COMPOSITE), name, 4011 data, nelements, prop_decoder)); 4012 } 4013 4014 /* 4015 * Return 1 if a property exists (no type checking done). 4016 * Return 0 if it does not exist. 4017 */ 4018 int 4019 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name) 4020 { 4021 int i; 4022 uint_t x = 0; 4023 4024 i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS, 4025 flags | DDI_PROP_TYPE_MASK, name, NULL, &x); 4026 return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275); 4027 } 4028 4029 4030 /* 4031 * Update an array of composite properties. The driver must 4032 * provide the encode routine. 4033 */ 4034 int 4035 ddi_prop_update(dev_t match_dev, dev_info_t *dip, 4036 char *name, void *data, uint_t nelements, 4037 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements)) 4038 { 4039 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE, 4040 name, data, nelements, prop_create)); 4041 } 4042 4043 /* 4044 * Get a single integer or boolean property and return it. 4045 * If the property does not exists, or cannot be decoded, 4046 * then return the defvalue passed in. 4047 * 4048 * This routine always succeeds. 4049 */ 4050 int 4051 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags, 4052 char *name, int defvalue) 4053 { 4054 int data; 4055 uint_t nelements; 4056 int rval; 4057 4058 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4059 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4060 #ifdef DEBUG 4061 if (dip != NULL) { 4062 cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag" 4063 " 0x%x (prop = %s, node = %s%d)", flags, 4064 name, ddi_driver_name(dip), ddi_get_instance(dip)); 4065 } 4066 #endif /* DEBUG */ 4067 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4068 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4069 } 4070 4071 if ((rval = ddi_prop_lookup_common(match_dev, dip, 4072 (flags | DDI_PROP_TYPE_INT), name, &data, &nelements, 4073 ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) { 4074 if (rval == DDI_PROP_END_OF_DATA) 4075 data = 1; 4076 else 4077 data = defvalue; 4078 } 4079 return (data); 4080 } 4081 4082 /* 4083 * Get a single 64 bit integer or boolean property and return it. 4084 * If the property does not exists, or cannot be decoded, 4085 * then return the defvalue passed in. 4086 * 4087 * This routine always succeeds. 4088 */ 4089 int64_t 4090 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags, 4091 char *name, int64_t defvalue) 4092 { 4093 int64_t data; 4094 uint_t nelements; 4095 int rval; 4096 4097 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4098 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4099 #ifdef DEBUG 4100 if (dip != NULL) { 4101 cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag" 4102 " 0x%x (prop = %s, node = %s%d)", flags, 4103 name, ddi_driver_name(dip), ddi_get_instance(dip)); 4104 } 4105 #endif /* DEBUG */ 4106 return (DDI_PROP_INVAL_ARG); 4107 } 4108 4109 if ((rval = ddi_prop_lookup_common(match_dev, dip, 4110 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM), 4111 name, &data, &nelements, ddi_prop_fm_decode_int64)) 4112 != DDI_PROP_SUCCESS) { 4113 if (rval == DDI_PROP_END_OF_DATA) 4114 data = 1; 4115 else 4116 data = defvalue; 4117 } 4118 return (data); 4119 } 4120 4121 /* 4122 * Get an array of integer property 4123 */ 4124 int 4125 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4126 char *name, int **data, uint_t *nelements) 4127 { 4128 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4129 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4130 #ifdef DEBUG 4131 if (dip != NULL) { 4132 cmn_err(CE_WARN, "ddi_prop_lookup_int_array: " 4133 "invalid flag 0x%x (prop = %s, node = %s%d)", 4134 flags, name, ddi_driver_name(dip), 4135 ddi_get_instance(dip)); 4136 } 4137 #endif /* DEBUG */ 4138 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4139 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4140 } 4141 4142 return (ddi_prop_lookup_common(match_dev, dip, 4143 (flags | DDI_PROP_TYPE_INT), name, data, 4144 nelements, ddi_prop_fm_decode_ints)); 4145 } 4146 4147 /* 4148 * Get an array of 64 bit integer properties 4149 */ 4150 int 4151 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4152 char *name, int64_t **data, uint_t *nelements) 4153 { 4154 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4155 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4156 #ifdef DEBUG 4157 if (dip != NULL) { 4158 cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: " 4159 "invalid flag 0x%x (prop = %s, node = %s%d)", 4160 flags, name, ddi_driver_name(dip), 4161 ddi_get_instance(dip)); 4162 } 4163 #endif /* DEBUG */ 4164 return (DDI_PROP_INVAL_ARG); 4165 } 4166 4167 return (ddi_prop_lookup_common(match_dev, dip, 4168 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM), 4169 name, data, nelements, ddi_prop_fm_decode_int64_array)); 4170 } 4171 4172 /* 4173 * Update a single integer property. If the property exists on the drivers 4174 * property list it updates, else it creates it. 4175 */ 4176 int 4177 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip, 4178 char *name, int data) 4179 { 4180 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT, 4181 name, &data, 1, ddi_prop_fm_encode_ints)); 4182 } 4183 4184 /* 4185 * Update a single 64 bit integer property. 4186 * Update the driver property list if it exists, else create it. 4187 */ 4188 int 4189 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 4190 char *name, int64_t data) 4191 { 4192 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64, 4193 name, &data, 1, ddi_prop_fm_encode_int64)); 4194 } 4195 4196 int 4197 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip, 4198 char *name, int data) 4199 { 4200 return (ddi_prop_update_common(match_dev, dip, 4201 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT, 4202 name, &data, 1, ddi_prop_fm_encode_ints)); 4203 } 4204 4205 int 4206 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 4207 char *name, int64_t data) 4208 { 4209 return (ddi_prop_update_common(match_dev, dip, 4210 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64, 4211 name, &data, 1, ddi_prop_fm_encode_int64)); 4212 } 4213 4214 /* 4215 * Update an array of integer property. If the property exists on the drivers 4216 * property list it updates, else it creates it. 4217 */ 4218 int 4219 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 4220 char *name, int *data, uint_t nelements) 4221 { 4222 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT, 4223 name, data, nelements, ddi_prop_fm_encode_ints)); 4224 } 4225 4226 /* 4227 * Update an array of 64 bit integer properties. 4228 * Update the driver property list if it exists, else create it. 4229 */ 4230 int 4231 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 4232 char *name, int64_t *data, uint_t nelements) 4233 { 4234 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64, 4235 name, data, nelements, ddi_prop_fm_encode_int64)); 4236 } 4237 4238 int 4239 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 4240 char *name, int64_t *data, uint_t nelements) 4241 { 4242 return (ddi_prop_update_common(match_dev, dip, 4243 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64, 4244 name, data, nelements, ddi_prop_fm_encode_int64)); 4245 } 4246 4247 int 4248 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 4249 char *name, int *data, uint_t nelements) 4250 { 4251 return (ddi_prop_update_common(match_dev, dip, 4252 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT, 4253 name, data, nelements, ddi_prop_fm_encode_ints)); 4254 } 4255 4256 /* 4257 * Get a single string property. 4258 */ 4259 int 4260 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags, 4261 char *name, char **data) 4262 { 4263 uint_t x; 4264 4265 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4266 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4267 #ifdef DEBUG 4268 if (dip != NULL) { 4269 cmn_err(CE_WARN, "%s: invalid flag 0x%x " 4270 "(prop = %s, node = %s%d); invalid bits ignored", 4271 "ddi_prop_lookup_string", flags, name, 4272 ddi_driver_name(dip), ddi_get_instance(dip)); 4273 } 4274 #endif /* DEBUG */ 4275 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4276 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4277 } 4278 4279 return (ddi_prop_lookup_common(match_dev, dip, 4280 (flags | DDI_PROP_TYPE_STRING), name, data, 4281 &x, ddi_prop_fm_decode_string)); 4282 } 4283 4284 /* 4285 * Get an array of strings property. 4286 */ 4287 int 4288 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4289 char *name, char ***data, uint_t *nelements) 4290 { 4291 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4292 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4293 #ifdef DEBUG 4294 if (dip != NULL) { 4295 cmn_err(CE_WARN, "ddi_prop_lookup_string_array: " 4296 "invalid flag 0x%x (prop = %s, node = %s%d)", 4297 flags, name, ddi_driver_name(dip), 4298 ddi_get_instance(dip)); 4299 } 4300 #endif /* DEBUG */ 4301 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4302 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4303 } 4304 4305 return (ddi_prop_lookup_common(match_dev, dip, 4306 (flags | DDI_PROP_TYPE_STRING), name, data, 4307 nelements, ddi_prop_fm_decode_strings)); 4308 } 4309 4310 /* 4311 * Update a single string property. 4312 */ 4313 int 4314 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip, 4315 char *name, char *data) 4316 { 4317 return (ddi_prop_update_common(match_dev, dip, 4318 DDI_PROP_TYPE_STRING, name, &data, 1, 4319 ddi_prop_fm_encode_string)); 4320 } 4321 4322 int 4323 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip, 4324 char *name, char *data) 4325 { 4326 return (ddi_prop_update_common(match_dev, dip, 4327 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING, 4328 name, &data, 1, ddi_prop_fm_encode_string)); 4329 } 4330 4331 4332 /* 4333 * Update an array of strings property. 4334 */ 4335 int 4336 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 4337 char *name, char **data, uint_t nelements) 4338 { 4339 return (ddi_prop_update_common(match_dev, dip, 4340 DDI_PROP_TYPE_STRING, name, data, nelements, 4341 ddi_prop_fm_encode_strings)); 4342 } 4343 4344 int 4345 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 4346 char *name, char **data, uint_t nelements) 4347 { 4348 return (ddi_prop_update_common(match_dev, dip, 4349 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING, 4350 name, data, nelements, 4351 ddi_prop_fm_encode_strings)); 4352 } 4353 4354 4355 /* 4356 * Get an array of bytes property. 4357 */ 4358 int 4359 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4360 char *name, uchar_t **data, uint_t *nelements) 4361 { 4362 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4363 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4364 #ifdef DEBUG 4365 if (dip != NULL) { 4366 cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: " 4367 " invalid flag 0x%x (prop = %s, node = %s%d)", 4368 flags, name, ddi_driver_name(dip), 4369 ddi_get_instance(dip)); 4370 } 4371 #endif /* DEBUG */ 4372 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4373 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4374 } 4375 4376 return (ddi_prop_lookup_common(match_dev, dip, 4377 (flags | DDI_PROP_TYPE_BYTE), name, data, 4378 nelements, ddi_prop_fm_decode_bytes)); 4379 } 4380 4381 /* 4382 * Update an array of bytes property. 4383 */ 4384 int 4385 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 4386 char *name, uchar_t *data, uint_t nelements) 4387 { 4388 if (nelements == 0) 4389 return (DDI_PROP_INVAL_ARG); 4390 4391 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE, 4392 name, data, nelements, ddi_prop_fm_encode_bytes)); 4393 } 4394 4395 4396 int 4397 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 4398 char *name, uchar_t *data, uint_t nelements) 4399 { 4400 if (nelements == 0) 4401 return (DDI_PROP_INVAL_ARG); 4402 4403 return (ddi_prop_update_common(match_dev, dip, 4404 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE, 4405 name, data, nelements, ddi_prop_fm_encode_bytes)); 4406 } 4407 4408 4409 /* 4410 * ddi_prop_remove_common: Undefine a managed property: 4411 * Input dev_t must match dev_t when defined. 4412 * Returns DDI_PROP_NOT_FOUND, possibly. 4413 * DDI_PROP_INVAL_ARG is also possible if dev is 4414 * DDI_DEV_T_ANY or incoming name is the NULL string. 4415 */ 4416 int 4417 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag) 4418 { 4419 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr); 4420 ddi_prop_t *propp; 4421 ddi_prop_t *lastpropp = NULL; 4422 4423 if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) || 4424 (strlen(name) == 0)) { 4425 return (DDI_PROP_INVAL_ARG); 4426 } 4427 4428 if (flag & DDI_PROP_SYSTEM_DEF) 4429 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 4430 else if (flag & DDI_PROP_HW_DEF) 4431 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 4432 4433 mutex_enter(&(DEVI(dip)->devi_lock)); 4434 4435 for (propp = *list_head; propp != NULL; propp = propp->prop_next) { 4436 if (DDI_STRSAME(propp->prop_name, name) && 4437 (dev == propp->prop_dev)) { 4438 /* 4439 * Unlink this propp allowing for it to 4440 * be first in the list: 4441 */ 4442 4443 if (lastpropp == NULL) 4444 *list_head = propp->prop_next; 4445 else 4446 lastpropp->prop_next = propp->prop_next; 4447 4448 mutex_exit(&(DEVI(dip)->devi_lock)); 4449 4450 /* 4451 * Free memory and return... 4452 */ 4453 kmem_free(propp->prop_name, 4454 strlen(propp->prop_name) + 1); 4455 if (propp->prop_len != 0) 4456 kmem_free(propp->prop_val, propp->prop_len); 4457 kmem_free(propp, sizeof (ddi_prop_t)); 4458 return (DDI_PROP_SUCCESS); 4459 } 4460 lastpropp = propp; 4461 } 4462 mutex_exit(&(DEVI(dip)->devi_lock)); 4463 return (DDI_PROP_NOT_FOUND); 4464 } 4465 4466 int 4467 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 4468 { 4469 return (ddi_prop_remove_common(dev, dip, name, 0)); 4470 } 4471 4472 int 4473 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 4474 { 4475 return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF)); 4476 } 4477 4478 /* 4479 * e_ddi_prop_list_delete: remove a list of properties 4480 * Note that the caller needs to provide the required protection 4481 * (eg. devi_lock if these properties are still attached to a devi) 4482 */ 4483 void 4484 e_ddi_prop_list_delete(ddi_prop_t *props) 4485 { 4486 i_ddi_prop_list_delete(props); 4487 } 4488 4489 /* 4490 * ddi_prop_remove_all_common: 4491 * Used before unloading a driver to remove 4492 * all properties. (undefines all dev_t's props.) 4493 * Also removes `explicitly undefined' props. 4494 * No errors possible. 4495 */ 4496 void 4497 ddi_prop_remove_all_common(dev_info_t *dip, int flag) 4498 { 4499 ddi_prop_t **list_head; 4500 4501 mutex_enter(&(DEVI(dip)->devi_lock)); 4502 if (flag & DDI_PROP_SYSTEM_DEF) { 4503 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 4504 } else if (flag & DDI_PROP_HW_DEF) { 4505 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 4506 } else { 4507 list_head = &(DEVI(dip)->devi_drv_prop_ptr); 4508 } 4509 i_ddi_prop_list_delete(*list_head); 4510 *list_head = NULL; 4511 mutex_exit(&(DEVI(dip)->devi_lock)); 4512 } 4513 4514 4515 /* 4516 * ddi_prop_remove_all: Remove all driver prop definitions. 4517 */ 4518 4519 void 4520 ddi_prop_remove_all(dev_info_t *dip) 4521 { 4522 i_ddi_prop_dyn_driver_set(dip, NULL); 4523 ddi_prop_remove_all_common(dip, 0); 4524 } 4525 4526 /* 4527 * e_ddi_prop_remove_all: Remove all system prop definitions. 4528 */ 4529 4530 void 4531 e_ddi_prop_remove_all(dev_info_t *dip) 4532 { 4533 ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF); 4534 } 4535 4536 4537 /* 4538 * ddi_prop_undefine: Explicitly undefine a property. Property 4539 * searches which match this property return 4540 * the error code DDI_PROP_UNDEFINED. 4541 * 4542 * Use ddi_prop_remove to negate effect of 4543 * ddi_prop_undefine 4544 * 4545 * See above for error returns. 4546 */ 4547 4548 int 4549 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name) 4550 { 4551 if (!(flag & DDI_PROP_CANSLEEP)) 4552 flag |= DDI_PROP_DONTSLEEP; 4553 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY; 4554 return (ddi_prop_update_common(dev, dip, flag, 4555 name, NULL, 0, ddi_prop_fm_encode_bytes)); 4556 } 4557 4558 int 4559 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name) 4560 { 4561 if (!(flag & DDI_PROP_CANSLEEP)) 4562 flag |= DDI_PROP_DONTSLEEP; 4563 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | 4564 DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY; 4565 return (ddi_prop_update_common(dev, dip, flag, 4566 name, NULL, 0, ddi_prop_fm_encode_bytes)); 4567 } 4568 4569 /* 4570 * Support for gathering dynamic properties in devinfo snapshot. 4571 */ 4572 void 4573 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp) 4574 { 4575 DEVI(dip)->devi_prop_dyn_driver = dp; 4576 } 4577 4578 i_ddi_prop_dyn_t * 4579 i_ddi_prop_dyn_driver_get(dev_info_t *dip) 4580 { 4581 return (DEVI(dip)->devi_prop_dyn_driver); 4582 } 4583 4584 void 4585 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp) 4586 { 4587 DEVI(dip)->devi_prop_dyn_parent = dp; 4588 } 4589 4590 i_ddi_prop_dyn_t * 4591 i_ddi_prop_dyn_parent_get(dev_info_t *dip) 4592 { 4593 return (DEVI(dip)->devi_prop_dyn_parent); 4594 } 4595 4596 void 4597 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp) 4598 { 4599 /* for now we invalidate the entire cached snapshot */ 4600 if (dip && dp) 4601 i_ddi_di_cache_invalidate(); 4602 } 4603 4604 /* ARGSUSED */ 4605 void 4606 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags) 4607 { 4608 /* for now we invalidate the entire cached snapshot */ 4609 i_ddi_di_cache_invalidate(); 4610 } 4611 4612 4613 /* 4614 * Code to search hardware layer (PROM), if it exists, on behalf of child. 4615 * 4616 * if input dip != child_dip, then call is on behalf of child 4617 * to search PROM, do it via ddi_prop_search_common() and ascend only 4618 * if allowed. 4619 * 4620 * if input dip == ch_dip (child_dip), call is on behalf of root driver, 4621 * to search for PROM defined props only. 4622 * 4623 * Note that the PROM search is done only if the requested dev 4624 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties 4625 * have no associated dev, thus are automatically associated with 4626 * DDI_DEV_T_NONE. 4627 * 4628 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer. 4629 * 4630 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework 4631 * that the property resides in the prom. 4632 */ 4633 int 4634 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip, 4635 ddi_prop_op_t prop_op, int mod_flags, 4636 char *name, caddr_t valuep, int *lengthp) 4637 { 4638 int len; 4639 caddr_t buffer; 4640 4641 /* 4642 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then 4643 * look in caller's PROM if it's a self identifying device... 4644 * 4645 * Note that this is very similar to ddi_prop_op, but we 4646 * search the PROM instead of the s/w defined properties, 4647 * and we are called on by the parent driver to do this for 4648 * the child. 4649 */ 4650 4651 if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) && 4652 ndi_dev_is_prom_node(ch_dip) && 4653 ((mod_flags & DDI_PROP_NOTPROM) == 0)) { 4654 len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name); 4655 if (len == -1) { 4656 return (DDI_PROP_NOT_FOUND); 4657 } 4658 4659 /* 4660 * If exists only request, we're done 4661 */ 4662 if (prop_op == PROP_EXISTS) { 4663 return (DDI_PROP_FOUND_1275); 4664 } 4665 4666 /* 4667 * If length only request or prop length == 0, get out 4668 */ 4669 if ((prop_op == PROP_LEN) || (len == 0)) { 4670 *lengthp = len; 4671 return (DDI_PROP_FOUND_1275); 4672 } 4673 4674 /* 4675 * Allocate buffer if required... (either way `buffer' 4676 * is receiving address). 4677 */ 4678 4679 switch (prop_op) { 4680 4681 case PROP_LEN_AND_VAL_ALLOC: 4682 4683 buffer = kmem_alloc((size_t)len, 4684 mod_flags & DDI_PROP_CANSLEEP ? 4685 KM_SLEEP : KM_NOSLEEP); 4686 if (buffer == NULL) { 4687 return (DDI_PROP_NO_MEMORY); 4688 } 4689 *(caddr_t *)valuep = buffer; 4690 break; 4691 4692 case PROP_LEN_AND_VAL_BUF: 4693 4694 if (len > (*lengthp)) { 4695 *lengthp = len; 4696 return (DDI_PROP_BUF_TOO_SMALL); 4697 } 4698 4699 buffer = valuep; 4700 break; 4701 4702 default: 4703 break; 4704 } 4705 4706 /* 4707 * Call the PROM function to do the copy. 4708 */ 4709 (void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid, 4710 name, buffer); 4711 4712 *lengthp = len; /* return the actual length to the caller */ 4713 (void) impl_fix_props(dip, ch_dip, name, len, buffer); 4714 return (DDI_PROP_FOUND_1275); 4715 } 4716 4717 return (DDI_PROP_NOT_FOUND); 4718 } 4719 4720 /* 4721 * The ddi_bus_prop_op default bus nexus prop op function. 4722 * 4723 * Code to search hardware layer (PROM), if it exists, 4724 * on behalf of child, then, if appropriate, ascend and check 4725 * my own software defined properties... 4726 */ 4727 int 4728 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip, 4729 ddi_prop_op_t prop_op, int mod_flags, 4730 char *name, caddr_t valuep, int *lengthp) 4731 { 4732 int error; 4733 4734 error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags, 4735 name, valuep, lengthp); 4736 4737 if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 || 4738 error == DDI_PROP_BUF_TOO_SMALL) 4739 return (error); 4740 4741 if (error == DDI_PROP_NO_MEMORY) { 4742 cmn_err(CE_CONT, prop_no_mem_msg, name); 4743 return (DDI_PROP_NO_MEMORY); 4744 } 4745 4746 /* 4747 * Check the 'options' node as a last resort 4748 */ 4749 if ((mod_flags & DDI_PROP_DONTPASS) != 0) 4750 return (DDI_PROP_NOT_FOUND); 4751 4752 if (ch_dip == ddi_root_node()) { 4753 /* 4754 * As a last resort, when we've reached 4755 * the top and still haven't found the 4756 * property, see if the desired property 4757 * is attached to the options node. 4758 * 4759 * The options dip is attached right after boot. 4760 */ 4761 ASSERT(options_dip != NULL); 4762 /* 4763 * Force the "don't pass" flag to *just* see 4764 * what the options node has to offer. 4765 */ 4766 return (ddi_prop_search_common(dev, options_dip, prop_op, 4767 mod_flags|DDI_PROP_DONTPASS, name, valuep, 4768 (uint_t *)lengthp)); 4769 } 4770 4771 /* 4772 * Otherwise, continue search with parent's s/w defined properties... 4773 * NOTE: Using `dip' in following call increments the level. 4774 */ 4775 4776 return (ddi_prop_search_common(dev, dip, prop_op, mod_flags, 4777 name, valuep, (uint_t *)lengthp)); 4778 } 4779 4780 /* 4781 * External property functions used by other parts of the kernel... 4782 */ 4783 4784 /* 4785 * e_ddi_getlongprop: See comments for ddi_get_longprop. 4786 */ 4787 4788 int 4789 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags, 4790 caddr_t valuep, int *lengthp) 4791 { 4792 _NOTE(ARGUNUSED(type)) 4793 dev_info_t *devi; 4794 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC; 4795 int error; 4796 4797 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4798 return (DDI_PROP_NOT_FOUND); 4799 4800 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp); 4801 ddi_release_devi(devi); 4802 return (error); 4803 } 4804 4805 /* 4806 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf. 4807 */ 4808 4809 int 4810 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags, 4811 caddr_t valuep, int *lengthp) 4812 { 4813 _NOTE(ARGUNUSED(type)) 4814 dev_info_t *devi; 4815 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4816 int error; 4817 4818 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4819 return (DDI_PROP_NOT_FOUND); 4820 4821 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp); 4822 ddi_release_devi(devi); 4823 return (error); 4824 } 4825 4826 /* 4827 * e_ddi_getprop: See comments for ddi_getprop. 4828 */ 4829 int 4830 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue) 4831 { 4832 _NOTE(ARGUNUSED(type)) 4833 dev_info_t *devi; 4834 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4835 int propvalue = defvalue; 4836 int proplength = sizeof (int); 4837 int error; 4838 4839 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4840 return (defvalue); 4841 4842 error = cdev_prop_op(dev, devi, prop_op, 4843 flags, name, (caddr_t)&propvalue, &proplength); 4844 ddi_release_devi(devi); 4845 4846 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 4847 propvalue = 1; 4848 4849 return (propvalue); 4850 } 4851 4852 /* 4853 * e_ddi_getprop_int64: 4854 * 4855 * This is a typed interfaces, but predates typed properties. With the 4856 * introduction of typed properties the framework tries to ensure 4857 * consistent use of typed interfaces. This is why TYPE_INT64 is not 4858 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a 4859 * typed interface invokes legacy (non-typed) interfaces: 4860 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the 4861 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support 4862 * this type of lookup as a single operation we invoke the legacy 4863 * non-typed interfaces with the special CONSUMER_TYPED bit set. The 4864 * framework ddi_prop_op(9F) implementation is expected to check for 4865 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY 4866 * (currently TYPE_INT64). 4867 */ 4868 int64_t 4869 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name, 4870 int flags, int64_t defvalue) 4871 { 4872 _NOTE(ARGUNUSED(type)) 4873 dev_info_t *devi; 4874 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4875 int64_t propvalue = defvalue; 4876 int proplength = sizeof (propvalue); 4877 int error; 4878 4879 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4880 return (defvalue); 4881 4882 error = cdev_prop_op(dev, devi, prop_op, flags | 4883 DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength); 4884 ddi_release_devi(devi); 4885 4886 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 4887 propvalue = 1; 4888 4889 return (propvalue); 4890 } 4891 4892 /* 4893 * e_ddi_getproplen: See comments for ddi_getproplen. 4894 */ 4895 int 4896 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp) 4897 { 4898 _NOTE(ARGUNUSED(type)) 4899 dev_info_t *devi; 4900 ddi_prop_op_t prop_op = PROP_LEN; 4901 int error; 4902 4903 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4904 return (DDI_PROP_NOT_FOUND); 4905 4906 error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp); 4907 ddi_release_devi(devi); 4908 return (error); 4909 } 4910 4911 /* 4912 * Routines to get at elements of the dev_info structure 4913 */ 4914 4915 /* 4916 * ddi_binding_name: Return the driver binding name of the devinfo node 4917 * This is the name the OS used to bind the node to a driver. 4918 */ 4919 char * 4920 ddi_binding_name(dev_info_t *dip) 4921 { 4922 return (DEVI(dip)->devi_binding_name); 4923 } 4924 4925 /* 4926 * ddi_driver_major: Return the major number of the driver that 4927 * the supplied devinfo is bound to. If not yet bound, 4928 * DDI_MAJOR_T_NONE. 4929 * 4930 * When used by the driver bound to 'devi', this 4931 * function will reliably return the driver major number. 4932 * Other ways of determining the driver major number, such as 4933 * major = ddi_name_to_major(ddi_get_name(devi)); 4934 * major = ddi_name_to_major(ddi_binding_name(devi)); 4935 * can return a different result as the driver/alias binding 4936 * can change dynamically, and thus should be avoided. 4937 */ 4938 major_t 4939 ddi_driver_major(dev_info_t *devi) 4940 { 4941 return (DEVI(devi)->devi_major); 4942 } 4943 4944 /* 4945 * ddi_driver_name: Return the normalized driver name. this is the 4946 * actual driver name 4947 */ 4948 const char * 4949 ddi_driver_name(dev_info_t *devi) 4950 { 4951 major_t major; 4952 4953 if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE) 4954 return (ddi_major_to_name(major)); 4955 4956 return (ddi_node_name(devi)); 4957 } 4958 4959 /* 4960 * i_ddi_set_binding_name: Set binding name. 4961 * 4962 * Set the binding name to the given name. 4963 * This routine is for use by the ddi implementation, not by drivers. 4964 */ 4965 void 4966 i_ddi_set_binding_name(dev_info_t *dip, char *name) 4967 { 4968 DEVI(dip)->devi_binding_name = name; 4969 4970 } 4971 4972 /* 4973 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name 4974 * the implementation has used to bind the node to a driver. 4975 */ 4976 char * 4977 ddi_get_name(dev_info_t *dip) 4978 { 4979 return (DEVI(dip)->devi_binding_name); 4980 } 4981 4982 /* 4983 * ddi_node_name: Return the name property of the devinfo node 4984 * This may differ from ddi_binding_name if the node name 4985 * does not define a binding to a driver (i.e. generic names). 4986 */ 4987 char * 4988 ddi_node_name(dev_info_t *dip) 4989 { 4990 return (DEVI(dip)->devi_node_name); 4991 } 4992 4993 4994 /* 4995 * ddi_get_nodeid: Get nodeid stored in dev_info structure. 4996 */ 4997 int 4998 ddi_get_nodeid(dev_info_t *dip) 4999 { 5000 return (DEVI(dip)->devi_nodeid); 5001 } 5002 5003 int 5004 ddi_get_instance(dev_info_t *dip) 5005 { 5006 return (DEVI(dip)->devi_instance); 5007 } 5008 5009 struct dev_ops * 5010 ddi_get_driver(dev_info_t *dip) 5011 { 5012 return (DEVI(dip)->devi_ops); 5013 } 5014 5015 void 5016 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo) 5017 { 5018 DEVI(dip)->devi_ops = devo; 5019 } 5020 5021 /* 5022 * ddi_set_driver_private/ddi_get_driver_private: 5023 * Get/set device driver private data in devinfo. 5024 */ 5025 void 5026 ddi_set_driver_private(dev_info_t *dip, void *data) 5027 { 5028 DEVI(dip)->devi_driver_data = data; 5029 } 5030 5031 void * 5032 ddi_get_driver_private(dev_info_t *dip) 5033 { 5034 return (DEVI(dip)->devi_driver_data); 5035 } 5036 5037 /* 5038 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling 5039 */ 5040 5041 dev_info_t * 5042 ddi_get_parent(dev_info_t *dip) 5043 { 5044 return ((dev_info_t *)DEVI(dip)->devi_parent); 5045 } 5046 5047 dev_info_t * 5048 ddi_get_child(dev_info_t *dip) 5049 { 5050 return ((dev_info_t *)DEVI(dip)->devi_child); 5051 } 5052 5053 dev_info_t * 5054 ddi_get_next_sibling(dev_info_t *dip) 5055 { 5056 return ((dev_info_t *)DEVI(dip)->devi_sibling); 5057 } 5058 5059 dev_info_t * 5060 ddi_get_next(dev_info_t *dip) 5061 { 5062 return ((dev_info_t *)DEVI(dip)->devi_next); 5063 } 5064 5065 void 5066 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip) 5067 { 5068 DEVI(dip)->devi_next = DEVI(nextdip); 5069 } 5070 5071 /* 5072 * ddi_root_node: Return root node of devinfo tree 5073 */ 5074 5075 dev_info_t * 5076 ddi_root_node(void) 5077 { 5078 extern dev_info_t *top_devinfo; 5079 5080 return (top_devinfo); 5081 } 5082 5083 /* 5084 * Miscellaneous functions: 5085 */ 5086 5087 /* 5088 * Implementation specific hooks 5089 */ 5090 5091 void 5092 ddi_report_dev(dev_info_t *d) 5093 { 5094 char *b; 5095 5096 (void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0); 5097 5098 /* 5099 * If this devinfo node has cb_ops, it's implicitly accessible from 5100 * userland, so we print its full name together with the instance 5101 * number 'abbreviation' that the driver may use internally. 5102 */ 5103 if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 && 5104 (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) { 5105 cmn_err(CE_CONT, "?%s%d is %s\n", 5106 ddi_driver_name(d), ddi_get_instance(d), 5107 ddi_pathname(d, b)); 5108 kmem_free(b, MAXPATHLEN); 5109 } 5110 } 5111 5112 /* 5113 * ddi_ctlops() is described in the assembler not to buy a new register 5114 * window when it's called and can reduce cost in climbing the device tree 5115 * without using the tail call optimization. 5116 */ 5117 int 5118 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result) 5119 { 5120 int ret; 5121 5122 ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE, 5123 (void *)&rnumber, (void *)result); 5124 5125 return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE); 5126 } 5127 5128 int 5129 ddi_dev_nregs(dev_info_t *dev, int *result) 5130 { 5131 return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result)); 5132 } 5133 5134 int 5135 ddi_dev_is_sid(dev_info_t *d) 5136 { 5137 return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0)); 5138 } 5139 5140 int 5141 ddi_slaveonly(dev_info_t *d) 5142 { 5143 return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0)); 5144 } 5145 5146 int 5147 ddi_dev_affinity(dev_info_t *a, dev_info_t *b) 5148 { 5149 return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0)); 5150 } 5151 5152 int 5153 ddi_streams_driver(dev_info_t *dip) 5154 { 5155 if (i_ddi_devi_attached(dip) && 5156 (DEVI(dip)->devi_ops->devo_cb_ops != NULL) && 5157 (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL)) 5158 return (DDI_SUCCESS); 5159 return (DDI_FAILURE); 5160 } 5161 5162 /* 5163 * callback free list 5164 */ 5165 5166 static int ncallbacks; 5167 static int nc_low = 170; 5168 static int nc_med = 512; 5169 static int nc_high = 2048; 5170 static struct ddi_callback *callbackq; 5171 static struct ddi_callback *callbackqfree; 5172 5173 /* 5174 * set/run callback lists 5175 */ 5176 struct cbstats { 5177 kstat_named_t cb_asked; 5178 kstat_named_t cb_new; 5179 kstat_named_t cb_run; 5180 kstat_named_t cb_delete; 5181 kstat_named_t cb_maxreq; 5182 kstat_named_t cb_maxlist; 5183 kstat_named_t cb_alloc; 5184 kstat_named_t cb_runouts; 5185 kstat_named_t cb_L2; 5186 kstat_named_t cb_grow; 5187 } cbstats = { 5188 {"asked", KSTAT_DATA_UINT32}, 5189 {"new", KSTAT_DATA_UINT32}, 5190 {"run", KSTAT_DATA_UINT32}, 5191 {"delete", KSTAT_DATA_UINT32}, 5192 {"maxreq", KSTAT_DATA_UINT32}, 5193 {"maxlist", KSTAT_DATA_UINT32}, 5194 {"alloc", KSTAT_DATA_UINT32}, 5195 {"runouts", KSTAT_DATA_UINT32}, 5196 {"L2", KSTAT_DATA_UINT32}, 5197 {"grow", KSTAT_DATA_UINT32}, 5198 }; 5199 5200 #define nc_asked cb_asked.value.ui32 5201 #define nc_new cb_new.value.ui32 5202 #define nc_run cb_run.value.ui32 5203 #define nc_delete cb_delete.value.ui32 5204 #define nc_maxreq cb_maxreq.value.ui32 5205 #define nc_maxlist cb_maxlist.value.ui32 5206 #define nc_alloc cb_alloc.value.ui32 5207 #define nc_runouts cb_runouts.value.ui32 5208 #define nc_L2 cb_L2.value.ui32 5209 #define nc_grow cb_grow.value.ui32 5210 5211 static kmutex_t ddi_callback_mutex; 5212 5213 /* 5214 * callbacks are handled using a L1/L2 cache. The L1 cache 5215 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If 5216 * we can't get callbacks from the L1 cache [because pageout is doing 5217 * I/O at the time freemem is 0], we allocate callbacks out of the 5218 * L2 cache. The L2 cache is static and depends on the memory size. 5219 * [We might also count the number of devices at probe time and 5220 * allocate one structure per device and adjust for deferred attach] 5221 */ 5222 void 5223 impl_ddi_callback_init(void) 5224 { 5225 int i; 5226 uint_t physmegs; 5227 kstat_t *ksp; 5228 5229 physmegs = physmem >> (20 - PAGESHIFT); 5230 if (physmegs < 48) { 5231 ncallbacks = nc_low; 5232 } else if (physmegs < 128) { 5233 ncallbacks = nc_med; 5234 } else { 5235 ncallbacks = nc_high; 5236 } 5237 5238 /* 5239 * init free list 5240 */ 5241 callbackq = kmem_zalloc( 5242 ncallbacks * sizeof (struct ddi_callback), KM_SLEEP); 5243 for (i = 0; i < ncallbacks-1; i++) 5244 callbackq[i].c_nfree = &callbackq[i+1]; 5245 callbackqfree = callbackq; 5246 5247 /* init kstats */ 5248 if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED, 5249 sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) { 5250 ksp->ks_data = (void *) &cbstats; 5251 kstat_install(ksp); 5252 } 5253 5254 } 5255 5256 static void 5257 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid, 5258 int count) 5259 { 5260 struct ddi_callback *list, *marker, *new; 5261 size_t size = sizeof (struct ddi_callback); 5262 5263 list = marker = (struct ddi_callback *)*listid; 5264 while (list != NULL) { 5265 if (list->c_call == funcp && list->c_arg == arg) { 5266 list->c_count += count; 5267 return; 5268 } 5269 marker = list; 5270 list = list->c_nlist; 5271 } 5272 new = kmem_alloc(size, KM_NOSLEEP); 5273 if (new == NULL) { 5274 new = callbackqfree; 5275 if (new == NULL) { 5276 new = kmem_alloc_tryhard(sizeof (struct ddi_callback), 5277 &size, KM_NOSLEEP | KM_PANIC); 5278 cbstats.nc_grow++; 5279 } else { 5280 callbackqfree = new->c_nfree; 5281 cbstats.nc_L2++; 5282 } 5283 } 5284 if (marker != NULL) { 5285 marker->c_nlist = new; 5286 } else { 5287 *listid = (uintptr_t)new; 5288 } 5289 new->c_size = size; 5290 new->c_nlist = NULL; 5291 new->c_call = funcp; 5292 new->c_arg = arg; 5293 new->c_count = count; 5294 cbstats.nc_new++; 5295 cbstats.nc_alloc++; 5296 if (cbstats.nc_alloc > cbstats.nc_maxlist) 5297 cbstats.nc_maxlist = cbstats.nc_alloc; 5298 } 5299 5300 void 5301 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid) 5302 { 5303 mutex_enter(&ddi_callback_mutex); 5304 cbstats.nc_asked++; 5305 if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq) 5306 cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run); 5307 (void) callback_insert(funcp, arg, listid, 1); 5308 mutex_exit(&ddi_callback_mutex); 5309 } 5310 5311 static void 5312 real_callback_run(void *Queue) 5313 { 5314 int (*funcp)(caddr_t); 5315 caddr_t arg; 5316 int count, rval; 5317 uintptr_t *listid; 5318 struct ddi_callback *list, *marker; 5319 int check_pending = 1; 5320 int pending = 0; 5321 5322 do { 5323 mutex_enter(&ddi_callback_mutex); 5324 listid = Queue; 5325 list = (struct ddi_callback *)*listid; 5326 if (list == NULL) { 5327 mutex_exit(&ddi_callback_mutex); 5328 return; 5329 } 5330 if (check_pending) { 5331 marker = list; 5332 while (marker != NULL) { 5333 pending += marker->c_count; 5334 marker = marker->c_nlist; 5335 } 5336 check_pending = 0; 5337 } 5338 ASSERT(pending > 0); 5339 ASSERT(list->c_count > 0); 5340 funcp = list->c_call; 5341 arg = list->c_arg; 5342 count = list->c_count; 5343 *(uintptr_t *)Queue = (uintptr_t)list->c_nlist; 5344 if (list >= &callbackq[0] && 5345 list <= &callbackq[ncallbacks-1]) { 5346 list->c_nfree = callbackqfree; 5347 callbackqfree = list; 5348 } else 5349 kmem_free(list, list->c_size); 5350 5351 cbstats.nc_delete++; 5352 cbstats.nc_alloc--; 5353 mutex_exit(&ddi_callback_mutex); 5354 5355 do { 5356 if ((rval = (*funcp)(arg)) == 0) { 5357 pending -= count; 5358 mutex_enter(&ddi_callback_mutex); 5359 (void) callback_insert(funcp, arg, listid, 5360 count); 5361 cbstats.nc_runouts++; 5362 } else { 5363 pending--; 5364 mutex_enter(&ddi_callback_mutex); 5365 cbstats.nc_run++; 5366 } 5367 mutex_exit(&ddi_callback_mutex); 5368 } while (rval != 0 && (--count > 0)); 5369 } while (pending > 0); 5370 } 5371 5372 void 5373 ddi_run_callback(uintptr_t *listid) 5374 { 5375 softcall(real_callback_run, listid); 5376 } 5377 5378 /* 5379 * ddi_periodic_t 5380 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, 5381 * int level) 5382 * 5383 * INTERFACE LEVEL 5384 * Solaris DDI specific (Solaris DDI) 5385 * 5386 * PARAMETERS 5387 * func: the callback function 5388 * 5389 * The callback function will be invoked. The function is invoked 5390 * in kernel context if the argument level passed is the zero. 5391 * Otherwise it's invoked in interrupt context at the specified 5392 * level. 5393 * 5394 * arg: the argument passed to the callback function 5395 * 5396 * interval: interval time 5397 * 5398 * level : callback interrupt level 5399 * 5400 * If the value is the zero, the callback function is invoked 5401 * in kernel context. If the value is more than the zero, but 5402 * less than or equal to ten, the callback function is invoked in 5403 * interrupt context at the specified interrupt level, which may 5404 * be used for real time applications. 5405 * 5406 * This value must be in range of 0-10, which can be a numeric 5407 * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10). 5408 * 5409 * DESCRIPTION 5410 * ddi_periodic_add(9F) schedules the specified function to be 5411 * periodically invoked in the interval time. 5412 * 5413 * As well as timeout(9F), the exact time interval over which the function 5414 * takes effect cannot be guaranteed, but the value given is a close 5415 * approximation. 5416 * 5417 * Drivers waiting on behalf of processes with real-time constraints must 5418 * pass non-zero value with the level argument to ddi_periodic_add(9F). 5419 * 5420 * RETURN VALUES 5421 * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t), 5422 * which must be used for ddi_periodic_delete(9F) to specify the request. 5423 * 5424 * CONTEXT 5425 * ddi_periodic_add(9F) can be called in user or kernel context, but 5426 * it cannot be called in interrupt context, which is different from 5427 * timeout(9F). 5428 */ 5429 ddi_periodic_t 5430 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level) 5431 { 5432 /* 5433 * Sanity check of the argument level. 5434 */ 5435 if (level < DDI_IPL_0 || level > DDI_IPL_10) 5436 cmn_err(CE_PANIC, 5437 "ddi_periodic_add: invalid interrupt level (%d).", level); 5438 5439 /* 5440 * Sanity check of the context. ddi_periodic_add() cannot be 5441 * called in either interrupt context or high interrupt context. 5442 */ 5443 if (servicing_interrupt()) 5444 cmn_err(CE_PANIC, 5445 "ddi_periodic_add: called in (high) interrupt context."); 5446 5447 return ((ddi_periodic_t)i_timeout(func, arg, interval, level)); 5448 } 5449 5450 /* 5451 * void 5452 * ddi_periodic_delete(ddi_periodic_t req) 5453 * 5454 * INTERFACE LEVEL 5455 * Solaris DDI specific (Solaris DDI) 5456 * 5457 * PARAMETERS 5458 * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned 5459 * previously. 5460 * 5461 * DESCRIPTION 5462 * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request 5463 * previously requested. 5464 * 5465 * ddi_periodic_delete(9F) will not return until the pending request 5466 * is canceled or executed. 5467 * 5468 * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a 5469 * timeout which is either running on another CPU, or has already 5470 * completed causes no problems. However, unlike untimeout(9F), there is 5471 * no restrictions on the lock which might be held across the call to 5472 * ddi_periodic_delete(9F). 5473 * 5474 * Drivers should be structured with the understanding that the arrival of 5475 * both an interrupt and a timeout for that interrupt can occasionally 5476 * occur, in either order. 5477 * 5478 * CONTEXT 5479 * ddi_periodic_delete(9F) can be called in user or kernel context, but 5480 * it cannot be called in interrupt context, which is different from 5481 * untimeout(9F). 5482 */ 5483 void 5484 ddi_periodic_delete(ddi_periodic_t req) 5485 { 5486 /* 5487 * Sanity check of the context. ddi_periodic_delete() cannot be 5488 * called in either interrupt context or high interrupt context. 5489 */ 5490 if (servicing_interrupt()) 5491 cmn_err(CE_PANIC, 5492 "ddi_periodic_delete: called in (high) interrupt context."); 5493 5494 i_untimeout((timeout_t)req); 5495 } 5496 5497 dev_info_t * 5498 nodevinfo(dev_t dev, int otyp) 5499 { 5500 _NOTE(ARGUNUSED(dev, otyp)) 5501 return ((dev_info_t *)0); 5502 } 5503 5504 /* 5505 * A driver should support its own getinfo(9E) entry point. This function 5506 * is provided as a convenience for ON drivers that don't expect their 5507 * getinfo(9E) entry point to be called. A driver that uses this must not 5508 * call ddi_create_minor_node. 5509 */ 5510 int 5511 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 5512 { 5513 _NOTE(ARGUNUSED(dip, infocmd, arg, result)) 5514 return (DDI_FAILURE); 5515 } 5516 5517 /* 5518 * A driver should support its own getinfo(9E) entry point. This function 5519 * is provided as a convenience for ON drivers that where the minor number 5520 * is the instance. Drivers that do not have 1:1 mapping must implement 5521 * their own getinfo(9E) function. 5522 */ 5523 int 5524 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd, 5525 void *arg, void **result) 5526 { 5527 _NOTE(ARGUNUSED(dip)) 5528 int instance; 5529 5530 if (infocmd != DDI_INFO_DEVT2INSTANCE) 5531 return (DDI_FAILURE); 5532 5533 instance = getminor((dev_t)(uintptr_t)arg); 5534 *result = (void *)(uintptr_t)instance; 5535 return (DDI_SUCCESS); 5536 } 5537 5538 int 5539 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd) 5540 { 5541 _NOTE(ARGUNUSED(devi, cmd)) 5542 return (DDI_FAILURE); 5543 } 5544 5545 int 5546 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip, 5547 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep) 5548 { 5549 _NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep)) 5550 return (DDI_DMA_NOMAPPING); 5551 } 5552 5553 int 5554 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 5555 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 5556 { 5557 _NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep)) 5558 return (DDI_DMA_BADATTR); 5559 } 5560 5561 int 5562 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 5563 ddi_dma_handle_t handle) 5564 { 5565 _NOTE(ARGUNUSED(dip, rdip, handle)) 5566 return (DDI_FAILURE); 5567 } 5568 5569 int 5570 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 5571 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 5572 ddi_dma_cookie_t *cp, uint_t *ccountp) 5573 { 5574 _NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp)) 5575 return (DDI_DMA_NOMAPPING); 5576 } 5577 5578 int 5579 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 5580 ddi_dma_handle_t handle) 5581 { 5582 _NOTE(ARGUNUSED(dip, rdip, handle)) 5583 return (DDI_FAILURE); 5584 } 5585 5586 int 5587 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip, 5588 ddi_dma_handle_t handle, off_t off, size_t len, 5589 uint_t cache_flags) 5590 { 5591 _NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags)) 5592 return (DDI_FAILURE); 5593 } 5594 5595 int 5596 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip, 5597 ddi_dma_handle_t handle, uint_t win, off_t *offp, 5598 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 5599 { 5600 _NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp)) 5601 return (DDI_FAILURE); 5602 } 5603 5604 int 5605 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 5606 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 5607 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 5608 { 5609 _NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags)) 5610 return (DDI_FAILURE); 5611 } 5612 5613 void 5614 ddivoid(void) 5615 {} 5616 5617 int 5618 nochpoll(dev_t dev, short events, int anyyet, short *reventsp, 5619 struct pollhead **pollhdrp) 5620 { 5621 _NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp)) 5622 return (ENXIO); 5623 } 5624 5625 cred_t * 5626 ddi_get_cred(void) 5627 { 5628 return (CRED()); 5629 } 5630 5631 clock_t 5632 ddi_get_lbolt(void) 5633 { 5634 return ((clock_t)lbolt_hybrid()); 5635 } 5636 5637 int64_t 5638 ddi_get_lbolt64(void) 5639 { 5640 return (lbolt_hybrid()); 5641 } 5642 5643 time_t 5644 ddi_get_time(void) 5645 { 5646 time_t now; 5647 5648 if ((now = gethrestime_sec()) == 0) { 5649 timestruc_t ts; 5650 mutex_enter(&tod_lock); 5651 ts = tod_get(); 5652 mutex_exit(&tod_lock); 5653 return (ts.tv_sec); 5654 } else { 5655 return (now); 5656 } 5657 } 5658 5659 pid_t 5660 ddi_get_pid(void) 5661 { 5662 return (ttoproc(curthread)->p_pid); 5663 } 5664 5665 kt_did_t 5666 ddi_get_kt_did(void) 5667 { 5668 return (curthread->t_did); 5669 } 5670 5671 /* 5672 * This function returns B_TRUE if the caller can reasonably expect that a call 5673 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened 5674 * by user-level signal. If it returns B_FALSE, then the caller should use 5675 * other means to make certain that the wait will not hang "forever." 5676 * 5677 * It does not check the signal mask, nor for reception of any particular 5678 * signal. 5679 * 5680 * Currently, a thread can receive a signal if it's not a kernel thread and it 5681 * is not in the middle of exit(2) tear-down. Threads that are in that 5682 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to 5683 * cv_timedwait, and qwait_sig to qwait. 5684 */ 5685 boolean_t 5686 ddi_can_receive_sig(void) 5687 { 5688 proc_t *pp; 5689 5690 if (curthread->t_proc_flag & TP_LWPEXIT) 5691 return (B_FALSE); 5692 if ((pp = ttoproc(curthread)) == NULL) 5693 return (B_FALSE); 5694 return (pp->p_as != &kas); 5695 } 5696 5697 /* 5698 * Swap bytes in 16-bit [half-]words 5699 */ 5700 void 5701 swab(void *src, void *dst, size_t nbytes) 5702 { 5703 uchar_t *pf = (uchar_t *)src; 5704 uchar_t *pt = (uchar_t *)dst; 5705 uchar_t tmp; 5706 int nshorts; 5707 5708 nshorts = nbytes >> 1; 5709 5710 while (--nshorts >= 0) { 5711 tmp = *pf++; 5712 *pt++ = *pf++; 5713 *pt++ = tmp; 5714 } 5715 } 5716 5717 static void 5718 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp) 5719 { 5720 int circ; 5721 struct ddi_minor_data *dp; 5722 5723 ndi_devi_enter(ddip, &circ); 5724 if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) { 5725 DEVI(ddip)->devi_minor = dmdp; 5726 } else { 5727 while (dp->next != (struct ddi_minor_data *)NULL) 5728 dp = dp->next; 5729 dp->next = dmdp; 5730 } 5731 ndi_devi_exit(ddip, circ); 5732 } 5733 5734 /* 5735 * Part of the obsolete SunCluster DDI Hooks. 5736 * Keep for binary compatibility 5737 */ 5738 minor_t 5739 ddi_getiminor(dev_t dev) 5740 { 5741 return (getminor(dev)); 5742 } 5743 5744 static int 5745 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name) 5746 { 5747 int se_flag; 5748 int kmem_flag; 5749 int se_err; 5750 char *pathname, *class_name; 5751 sysevent_t *ev = NULL; 5752 sysevent_id_t eid; 5753 sysevent_value_t se_val; 5754 sysevent_attr_list_t *ev_attr_list = NULL; 5755 5756 /* determine interrupt context */ 5757 se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP; 5758 kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP; 5759 5760 i_ddi_di_cache_invalidate(); 5761 5762 #ifdef DEBUG 5763 if ((se_flag == SE_NOSLEEP) && sunddi_debug) { 5764 cmn_err(CE_CONT, "ddi_create_minor_node: called from " 5765 "interrupt level by driver %s", 5766 ddi_driver_name(dip)); 5767 } 5768 #endif /* DEBUG */ 5769 5770 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag); 5771 if (ev == NULL) { 5772 goto fail; 5773 } 5774 5775 pathname = kmem_alloc(MAXPATHLEN, kmem_flag); 5776 if (pathname == NULL) { 5777 sysevent_free(ev); 5778 goto fail; 5779 } 5780 5781 (void) ddi_pathname(dip, pathname); 5782 ASSERT(strlen(pathname)); 5783 se_val.value_type = SE_DATA_TYPE_STRING; 5784 se_val.value.sv_string = pathname; 5785 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 5786 &se_val, se_flag) != 0) { 5787 kmem_free(pathname, MAXPATHLEN); 5788 sysevent_free(ev); 5789 goto fail; 5790 } 5791 kmem_free(pathname, MAXPATHLEN); 5792 5793 /* add the device class attribute */ 5794 if ((class_name = i_ddi_devi_class(dip)) != NULL) { 5795 se_val.value_type = SE_DATA_TYPE_STRING; 5796 se_val.value.sv_string = class_name; 5797 if (sysevent_add_attr(&ev_attr_list, 5798 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 5799 sysevent_free_attr(ev_attr_list); 5800 goto fail; 5801 } 5802 } 5803 5804 /* 5805 * allow for NULL minor names 5806 */ 5807 if (minor_name != NULL) { 5808 se_val.value.sv_string = minor_name; 5809 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME, 5810 &se_val, se_flag) != 0) { 5811 sysevent_free_attr(ev_attr_list); 5812 sysevent_free(ev); 5813 goto fail; 5814 } 5815 } 5816 5817 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 5818 sysevent_free_attr(ev_attr_list); 5819 sysevent_free(ev); 5820 goto fail; 5821 } 5822 5823 if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) { 5824 if (se_err == SE_NO_TRANSPORT) { 5825 cmn_err(CE_WARN, "/devices or /dev may not be current " 5826 "for driver %s (%s). Run devfsadm -i %s", 5827 ddi_driver_name(dip), "syseventd not responding", 5828 ddi_driver_name(dip)); 5829 } else { 5830 sysevent_free(ev); 5831 goto fail; 5832 } 5833 } 5834 5835 sysevent_free(ev); 5836 return (DDI_SUCCESS); 5837 fail: 5838 cmn_err(CE_WARN, "/devices or /dev may not be current " 5839 "for driver %s. Run devfsadm -i %s", 5840 ddi_driver_name(dip), ddi_driver_name(dip)); 5841 return (DDI_SUCCESS); 5842 } 5843 5844 /* 5845 * failing to remove a minor node is not of interest 5846 * therefore we do not generate an error message 5847 */ 5848 static int 5849 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name) 5850 { 5851 char *pathname, *class_name; 5852 sysevent_t *ev; 5853 sysevent_id_t eid; 5854 sysevent_value_t se_val; 5855 sysevent_attr_list_t *ev_attr_list = NULL; 5856 5857 /* 5858 * only log ddi_remove_minor_node() calls outside the scope 5859 * of attach/detach reconfigurations and when the dip is 5860 * still initialized. 5861 */ 5862 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) || 5863 (i_ddi_node_state(dip) < DS_INITIALIZED)) { 5864 return (DDI_SUCCESS); 5865 } 5866 5867 i_ddi_di_cache_invalidate(); 5868 5869 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP); 5870 if (ev == NULL) { 5871 return (DDI_SUCCESS); 5872 } 5873 5874 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5875 if (pathname == NULL) { 5876 sysevent_free(ev); 5877 return (DDI_SUCCESS); 5878 } 5879 5880 (void) ddi_pathname(dip, pathname); 5881 ASSERT(strlen(pathname)); 5882 se_val.value_type = SE_DATA_TYPE_STRING; 5883 se_val.value.sv_string = pathname; 5884 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 5885 &se_val, SE_SLEEP) != 0) { 5886 kmem_free(pathname, MAXPATHLEN); 5887 sysevent_free(ev); 5888 return (DDI_SUCCESS); 5889 } 5890 5891 kmem_free(pathname, MAXPATHLEN); 5892 5893 /* 5894 * allow for NULL minor names 5895 */ 5896 if (minor_name != NULL) { 5897 se_val.value.sv_string = minor_name; 5898 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME, 5899 &se_val, SE_SLEEP) != 0) { 5900 sysevent_free_attr(ev_attr_list); 5901 goto fail; 5902 } 5903 } 5904 5905 if ((class_name = i_ddi_devi_class(dip)) != NULL) { 5906 /* add the device class, driver name and instance attributes */ 5907 5908 se_val.value_type = SE_DATA_TYPE_STRING; 5909 se_val.value.sv_string = class_name; 5910 if (sysevent_add_attr(&ev_attr_list, 5911 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 5912 sysevent_free_attr(ev_attr_list); 5913 goto fail; 5914 } 5915 5916 se_val.value_type = SE_DATA_TYPE_STRING; 5917 se_val.value.sv_string = (char *)ddi_driver_name(dip); 5918 if (sysevent_add_attr(&ev_attr_list, 5919 DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) { 5920 sysevent_free_attr(ev_attr_list); 5921 goto fail; 5922 } 5923 5924 se_val.value_type = SE_DATA_TYPE_INT32; 5925 se_val.value.sv_int32 = ddi_get_instance(dip); 5926 if (sysevent_add_attr(&ev_attr_list, 5927 DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) { 5928 sysevent_free_attr(ev_attr_list); 5929 goto fail; 5930 } 5931 5932 } 5933 5934 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 5935 sysevent_free_attr(ev_attr_list); 5936 } else { 5937 (void) log_sysevent(ev, SE_SLEEP, &eid); 5938 } 5939 fail: 5940 sysevent_free(ev); 5941 return (DDI_SUCCESS); 5942 } 5943 5944 /* 5945 * Derive the device class of the node. 5946 * Device class names aren't defined yet. Until this is done we use 5947 * devfs event subclass names as device class names. 5948 */ 5949 static int 5950 derive_devi_class(dev_info_t *dip, char *node_type, int flag) 5951 { 5952 int rv = DDI_SUCCESS; 5953 5954 if (i_ddi_devi_class(dip) == NULL) { 5955 if (strncmp(node_type, DDI_NT_BLOCK, 5956 sizeof (DDI_NT_BLOCK) - 1) == 0 && 5957 (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' || 5958 node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') && 5959 strcmp(node_type, DDI_NT_FD) != 0) { 5960 5961 rv = i_ddi_set_devi_class(dip, ESC_DISK, flag); 5962 5963 } else if (strncmp(node_type, DDI_NT_NET, 5964 sizeof (DDI_NT_NET) - 1) == 0 && 5965 (node_type[sizeof (DDI_NT_NET) - 1] == '\0' || 5966 node_type[sizeof (DDI_NT_NET) - 1] == ':')) { 5967 5968 rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag); 5969 5970 } else if (strncmp(node_type, DDI_NT_PRINTER, 5971 sizeof (DDI_NT_PRINTER) - 1) == 0 && 5972 (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' || 5973 node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) { 5974 5975 rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag); 5976 5977 } else if (strncmp(node_type, DDI_PSEUDO, 5978 sizeof (DDI_PSEUDO) -1) == 0 && 5979 (strncmp(ESC_LOFI, ddi_node_name(dip), 5980 sizeof (ESC_LOFI) -1) == 0)) { 5981 rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag); 5982 } 5983 } 5984 5985 return (rv); 5986 } 5987 5988 /* 5989 * Check compliance with PSARC 2003/375: 5990 * 5991 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not 5992 * exceed IFNAMSIZ (16) characters in length. 5993 */ 5994 static boolean_t 5995 verify_name(char *name) 5996 { 5997 size_t len = strlen(name); 5998 char *cp; 5999 6000 if (len == 0 || len > IFNAMSIZ) 6001 return (B_FALSE); 6002 6003 for (cp = name; *cp != '\0'; cp++) { 6004 if (!isalnum(*cp) && *cp != '_') 6005 return (B_FALSE); 6006 } 6007 6008 return (B_TRUE); 6009 } 6010 6011 /* 6012 * ddi_create_minor_common: Create a ddi_minor_data structure and 6013 * attach it to the given devinfo node. 6014 */ 6015 6016 int 6017 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type, 6018 minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype, 6019 const char *read_priv, const char *write_priv, mode_t priv_mode) 6020 { 6021 struct ddi_minor_data *dmdp; 6022 major_t major; 6023 6024 if (spec_type != S_IFCHR && spec_type != S_IFBLK) 6025 return (DDI_FAILURE); 6026 6027 if (name == NULL) 6028 return (DDI_FAILURE); 6029 6030 /* 6031 * Log a message if the minor number the driver is creating 6032 * is not expressible on the on-disk filesystem (currently 6033 * this is limited to 18 bits both by UFS). The device can 6034 * be opened via devfs, but not by device special files created 6035 * via mknod(). 6036 */ 6037 if (minor_num > L_MAXMIN32) { 6038 cmn_err(CE_WARN, 6039 "%s%d:%s minor 0x%x too big for 32-bit applications", 6040 ddi_driver_name(dip), ddi_get_instance(dip), 6041 name, minor_num); 6042 return (DDI_FAILURE); 6043 } 6044 6045 /* dip must be bound and attached */ 6046 major = ddi_driver_major(dip); 6047 ASSERT(major != DDI_MAJOR_T_NONE); 6048 6049 /* 6050 * Default node_type to DDI_PSEUDO and issue notice in debug mode 6051 */ 6052 if (node_type == NULL) { 6053 node_type = DDI_PSEUDO; 6054 NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d " 6055 " minor node %s; default to DDI_PSEUDO", 6056 ddi_driver_name(dip), ddi_get_instance(dip), name)); 6057 } 6058 6059 /* 6060 * If the driver is a network driver, ensure that the name falls within 6061 * the interface naming constraints specified by PSARC/2003/375. 6062 */ 6063 if (strcmp(node_type, DDI_NT_NET) == 0) { 6064 if (!verify_name(name)) 6065 return (DDI_FAILURE); 6066 6067 if (mtype == DDM_MINOR) { 6068 struct devnames *dnp = &devnamesp[major]; 6069 6070 /* Mark driver as a network driver */ 6071 LOCK_DEV_OPS(&dnp->dn_lock); 6072 dnp->dn_flags |= DN_NETWORK_DRIVER; 6073 6074 /* 6075 * If this minor node is created during the device 6076 * attachment, this is a physical network device. 6077 * Mark the driver as a physical network driver. 6078 */ 6079 if (DEVI_IS_ATTACHING(dip)) 6080 dnp->dn_flags |= DN_NETWORK_PHYSDRIVER; 6081 UNLOCK_DEV_OPS(&dnp->dn_lock); 6082 } 6083 } 6084 6085 if (mtype == DDM_MINOR) { 6086 if (derive_devi_class(dip, node_type, KM_NOSLEEP) != 6087 DDI_SUCCESS) 6088 return (DDI_FAILURE); 6089 } 6090 6091 /* 6092 * Take care of minor number information for the node. 6093 */ 6094 6095 if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data), 6096 KM_NOSLEEP)) == NULL) { 6097 return (DDI_FAILURE); 6098 } 6099 if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) { 6100 kmem_free(dmdp, sizeof (struct ddi_minor_data)); 6101 return (DDI_FAILURE); 6102 } 6103 dmdp->dip = dip; 6104 dmdp->ddm_dev = makedevice(major, minor_num); 6105 dmdp->ddm_spec_type = spec_type; 6106 dmdp->ddm_node_type = node_type; 6107 dmdp->type = mtype; 6108 if (flag & CLONE_DEV) { 6109 dmdp->type = DDM_ALIAS; 6110 dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major); 6111 } 6112 if (flag & PRIVONLY_DEV) { 6113 dmdp->ddm_flags |= DM_NO_FSPERM; 6114 } 6115 if (read_priv || write_priv) { 6116 dmdp->ddm_node_priv = 6117 devpolicy_priv_by_name(read_priv, write_priv); 6118 } 6119 dmdp->ddm_priv_mode = priv_mode; 6120 6121 ddi_append_minor_node(dip, dmdp); 6122 6123 /* 6124 * only log ddi_create_minor_node() calls which occur 6125 * outside the scope of attach(9e)/detach(9e) reconfigurations 6126 */ 6127 if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) && 6128 mtype != DDM_INTERNAL_PATH) { 6129 (void) i_log_devfs_minor_create(dip, name); 6130 } 6131 6132 /* 6133 * Check if any dacf rules match the creation of this minor node 6134 */ 6135 dacfc_match_create_minor(name, node_type, dip, dmdp, flag); 6136 return (DDI_SUCCESS); 6137 } 6138 6139 int 6140 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type, 6141 minor_t minor_num, char *node_type, int flag) 6142 { 6143 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 6144 node_type, flag, DDM_MINOR, NULL, NULL, 0)); 6145 } 6146 6147 int 6148 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type, 6149 minor_t minor_num, char *node_type, int flag, 6150 const char *rdpriv, const char *wrpriv, mode_t priv_mode) 6151 { 6152 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 6153 node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode)); 6154 } 6155 6156 int 6157 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type, 6158 minor_t minor_num, char *node_type, int flag) 6159 { 6160 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 6161 node_type, flag, DDM_DEFAULT, NULL, NULL, 0)); 6162 } 6163 6164 /* 6165 * Internal (non-ddi) routine for drivers to export names known 6166 * to the kernel (especially ddi_pathname_to_dev_t and friends) 6167 * but not exported externally to /dev 6168 */ 6169 int 6170 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type, 6171 minor_t minor_num) 6172 { 6173 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 6174 "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0)); 6175 } 6176 6177 void 6178 ddi_remove_minor_node(dev_info_t *dip, char *name) 6179 { 6180 int circ; 6181 struct ddi_minor_data *dmdp, *dmdp1; 6182 struct ddi_minor_data **dmdp_prev; 6183 6184 ndi_devi_enter(dip, &circ); 6185 dmdp_prev = &DEVI(dip)->devi_minor; 6186 dmdp = DEVI(dip)->devi_minor; 6187 while (dmdp != NULL) { 6188 dmdp1 = dmdp->next; 6189 if ((name == NULL || (dmdp->ddm_name != NULL && 6190 strcmp(name, dmdp->ddm_name) == 0))) { 6191 if (dmdp->ddm_name != NULL) { 6192 if (dmdp->type != DDM_INTERNAL_PATH) 6193 (void) i_log_devfs_minor_remove(dip, 6194 dmdp->ddm_name); 6195 kmem_free(dmdp->ddm_name, 6196 strlen(dmdp->ddm_name) + 1); 6197 } 6198 /* 6199 * Release device privilege, if any. 6200 * Release dacf client data associated with this minor 6201 * node by storing NULL. 6202 */ 6203 if (dmdp->ddm_node_priv) 6204 dpfree(dmdp->ddm_node_priv); 6205 dacf_store_info((dacf_infohdl_t)dmdp, NULL); 6206 kmem_free(dmdp, sizeof (struct ddi_minor_data)); 6207 *dmdp_prev = dmdp1; 6208 /* 6209 * OK, we found it, so get out now -- if we drive on, 6210 * we will strcmp against garbage. See 1139209. 6211 */ 6212 if (name != NULL) 6213 break; 6214 } else { 6215 dmdp_prev = &dmdp->next; 6216 } 6217 dmdp = dmdp1; 6218 } 6219 ndi_devi_exit(dip, circ); 6220 } 6221 6222 6223 int 6224 ddi_in_panic() 6225 { 6226 return (panicstr != NULL); 6227 } 6228 6229 6230 /* 6231 * Find first bit set in a mask (returned counting from 1 up) 6232 */ 6233 6234 int 6235 ddi_ffs(long mask) 6236 { 6237 return (ffs(mask)); 6238 } 6239 6240 /* 6241 * Find last bit set. Take mask and clear 6242 * all but the most significant bit, and 6243 * then let ffs do the rest of the work. 6244 * 6245 * Algorithm courtesy of Steve Chessin. 6246 */ 6247 6248 int 6249 ddi_fls(long mask) 6250 { 6251 while (mask) { 6252 long nx; 6253 6254 if ((nx = (mask & (mask - 1))) == 0) 6255 break; 6256 mask = nx; 6257 } 6258 return (ffs(mask)); 6259 } 6260 6261 /* 6262 * The ddi_soft_state_* routines comprise generic storage management utilities 6263 * for driver soft state structures (in "the old days," this was done with 6264 * statically sized array - big systems and dynamic loading and unloading 6265 * make heap allocation more attractive). 6266 */ 6267 6268 /* 6269 * Allocate a set of pointers to 'n_items' objects of size 'size' 6270 * bytes. Each pointer is initialized to nil. 6271 * 6272 * The 'size' and 'n_items' values are stashed in the opaque 6273 * handle returned to the caller. 6274 * 6275 * This implementation interprets 'set of pointers' to mean 'array 6276 * of pointers' but note that nothing in the interface definition 6277 * precludes an implementation that uses, for example, a linked list. 6278 * However there should be a small efficiency gain from using an array 6279 * at lookup time. 6280 * 6281 * NOTE As an optimization, we make our growable array allocations in 6282 * powers of two (bytes), since that's how much kmem_alloc (currently) 6283 * gives us anyway. It should save us some free/realloc's .. 6284 * 6285 * As a further optimization, we make the growable array start out 6286 * with MIN_N_ITEMS in it. 6287 */ 6288 6289 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */ 6290 6291 int 6292 ddi_soft_state_init(void **state_p, size_t size, size_t n_items) 6293 { 6294 i_ddi_soft_state *ss; 6295 6296 if (state_p == NULL || size == 0) 6297 return (EINVAL); 6298 6299 ss = kmem_zalloc(sizeof (*ss), KM_SLEEP); 6300 mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL); 6301 ss->size = size; 6302 6303 if (n_items < MIN_N_ITEMS) 6304 ss->n_items = MIN_N_ITEMS; 6305 else { 6306 int bitlog; 6307 6308 if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items)) 6309 bitlog--; 6310 ss->n_items = 1 << bitlog; 6311 } 6312 6313 ASSERT(ss->n_items >= n_items); 6314 6315 ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP); 6316 6317 *state_p = ss; 6318 return (0); 6319 } 6320 6321 /* 6322 * Allocate a state structure of size 'size' to be associated 6323 * with item 'item'. 6324 * 6325 * In this implementation, the array is extended to 6326 * allow the requested offset, if needed. 6327 */ 6328 int 6329 ddi_soft_state_zalloc(void *state, int item) 6330 { 6331 i_ddi_soft_state *ss = (i_ddi_soft_state *)state; 6332 void **array; 6333 void *new_element; 6334 6335 if ((state == NULL) || (item < 0)) 6336 return (DDI_FAILURE); 6337 6338 mutex_enter(&ss->lock); 6339 if (ss->size == 0) { 6340 mutex_exit(&ss->lock); 6341 cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s", 6342 mod_containing_pc(caller())); 6343 return (DDI_FAILURE); 6344 } 6345 6346 array = ss->array; /* NULL if ss->n_items == 0 */ 6347 ASSERT(ss->n_items != 0 && array != NULL); 6348 6349 /* 6350 * refuse to tread on an existing element 6351 */ 6352 if (item < ss->n_items && array[item] != NULL) { 6353 mutex_exit(&ss->lock); 6354 return (DDI_FAILURE); 6355 } 6356 6357 /* 6358 * Allocate a new element to plug in 6359 */ 6360 new_element = kmem_zalloc(ss->size, KM_SLEEP); 6361 6362 /* 6363 * Check if the array is big enough, if not, grow it. 6364 */ 6365 if (item >= ss->n_items) { 6366 void **new_array; 6367 size_t new_n_items; 6368 struct i_ddi_soft_state *dirty; 6369 6370 /* 6371 * Allocate a new array of the right length, copy 6372 * all the old pointers to the new array, then 6373 * if it exists at all, put the old array on the 6374 * dirty list. 6375 * 6376 * Note that we can't kmem_free() the old array. 6377 * 6378 * Why -- well the 'get' operation is 'mutex-free', so we 6379 * can't easily catch a suspended thread that is just about 6380 * to dereference the array we just grew out of. So we 6381 * cons up a header and put it on a list of 'dirty' 6382 * pointer arrays. (Dirty in the sense that there may 6383 * be suspended threads somewhere that are in the middle 6384 * of referencing them). Fortunately, we -can- garbage 6385 * collect it all at ddi_soft_state_fini time. 6386 */ 6387 new_n_items = ss->n_items; 6388 while (new_n_items < (1 + item)) 6389 new_n_items <<= 1; /* double array size .. */ 6390 6391 ASSERT(new_n_items >= (1 + item)); /* sanity check! */ 6392 6393 new_array = kmem_zalloc(new_n_items * sizeof (void *), 6394 KM_SLEEP); 6395 /* 6396 * Copy the pointers into the new array 6397 */ 6398 bcopy(array, new_array, ss->n_items * sizeof (void *)); 6399 6400 /* 6401 * Save the old array on the dirty list 6402 */ 6403 dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP); 6404 dirty->array = ss->array; 6405 dirty->n_items = ss->n_items; 6406 dirty->next = ss->next; 6407 ss->next = dirty; 6408 6409 ss->array = (array = new_array); 6410 ss->n_items = new_n_items; 6411 } 6412 6413 ASSERT(array != NULL && item < ss->n_items && array[item] == NULL); 6414 6415 array[item] = new_element; 6416 6417 mutex_exit(&ss->lock); 6418 return (DDI_SUCCESS); 6419 } 6420 6421 /* 6422 * Fetch a pointer to the allocated soft state structure. 6423 * 6424 * This is designed to be cheap. 6425 * 6426 * There's an argument that there should be more checking for 6427 * nil pointers and out of bounds on the array.. but we do a lot 6428 * of that in the alloc/free routines. 6429 * 6430 * An array has the convenience that we don't need to lock read-access 6431 * to it c.f. a linked list. However our "expanding array" strategy 6432 * means that we should hold a readers lock on the i_ddi_soft_state 6433 * structure. 6434 * 6435 * However, from a performance viewpoint, we need to do it without 6436 * any locks at all -- this also makes it a leaf routine. The algorithm 6437 * is 'lock-free' because we only discard the pointer arrays at 6438 * ddi_soft_state_fini() time. 6439 */ 6440 void * 6441 ddi_get_soft_state(void *state, int item) 6442 { 6443 i_ddi_soft_state *ss = (i_ddi_soft_state *)state; 6444 6445 ASSERT((ss != NULL) && (item >= 0)); 6446 6447 if (item < ss->n_items && ss->array != NULL) 6448 return (ss->array[item]); 6449 return (NULL); 6450 } 6451 6452 /* 6453 * Free the state structure corresponding to 'item.' Freeing an 6454 * element that has either gone or was never allocated is not 6455 * considered an error. Note that we free the state structure, but 6456 * we don't shrink our pointer array, or discard 'dirty' arrays, 6457 * since even a few pointers don't really waste too much memory. 6458 * 6459 * Passing an item number that is out of bounds, or a null pointer will 6460 * provoke an error message. 6461 */ 6462 void 6463 ddi_soft_state_free(void *state, int item) 6464 { 6465 i_ddi_soft_state *ss = (i_ddi_soft_state *)state; 6466 void **array; 6467 void *element; 6468 static char msg[] = "ddi_soft_state_free:"; 6469 6470 if (ss == NULL) { 6471 cmn_err(CE_WARN, "%s null handle: %s", 6472 msg, mod_containing_pc(caller())); 6473 return; 6474 } 6475 6476 element = NULL; 6477 6478 mutex_enter(&ss->lock); 6479 6480 if ((array = ss->array) == NULL || ss->size == 0) { 6481 cmn_err(CE_WARN, "%s bad handle: %s", 6482 msg, mod_containing_pc(caller())); 6483 } else if (item < 0 || item >= ss->n_items) { 6484 cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s", 6485 msg, item, ss->n_items - 1, mod_containing_pc(caller())); 6486 } else if (array[item] != NULL) { 6487 element = array[item]; 6488 array[item] = NULL; 6489 } 6490 6491 mutex_exit(&ss->lock); 6492 6493 if (element) 6494 kmem_free(element, ss->size); 6495 } 6496 6497 /* 6498 * Free the entire set of pointers, and any 6499 * soft state structures contained therein. 6500 * 6501 * Note that we don't grab the ss->lock mutex, even though 6502 * we're inspecting the various fields of the data structure. 6503 * 6504 * There is an implicit assumption that this routine will 6505 * never run concurrently with any of the above on this 6506 * particular state structure i.e. by the time the driver 6507 * calls this routine, there should be no other threads 6508 * running in the driver. 6509 */ 6510 void 6511 ddi_soft_state_fini(void **state_p) 6512 { 6513 i_ddi_soft_state *ss, *dirty; 6514 int item; 6515 static char msg[] = "ddi_soft_state_fini:"; 6516 6517 if (state_p == NULL || 6518 (ss = (i_ddi_soft_state *)(*state_p)) == NULL) { 6519 cmn_err(CE_WARN, "%s null handle: %s", 6520 msg, mod_containing_pc(caller())); 6521 return; 6522 } 6523 6524 if (ss->size == 0) { 6525 cmn_err(CE_WARN, "%s bad handle: %s", 6526 msg, mod_containing_pc(caller())); 6527 return; 6528 } 6529 6530 if (ss->n_items > 0) { 6531 for (item = 0; item < ss->n_items; item++) 6532 ddi_soft_state_free(ss, item); 6533 kmem_free(ss->array, ss->n_items * sizeof (void *)); 6534 } 6535 6536 /* 6537 * Now delete any dirty arrays from previous 'grow' operations 6538 */ 6539 for (dirty = ss->next; dirty; dirty = ss->next) { 6540 ss->next = dirty->next; 6541 kmem_free(dirty->array, dirty->n_items * sizeof (void *)); 6542 kmem_free(dirty, sizeof (*dirty)); 6543 } 6544 6545 mutex_destroy(&ss->lock); 6546 kmem_free(ss, sizeof (*ss)); 6547 6548 *state_p = NULL; 6549 } 6550 6551 #define SS_N_ITEMS_PER_HASH 16 6552 #define SS_MIN_HASH_SZ 16 6553 #define SS_MAX_HASH_SZ 4096 6554 6555 int 6556 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size, 6557 int n_items) 6558 { 6559 i_ddi_soft_state_bystr *sss; 6560 int hash_sz; 6561 6562 ASSERT(state_p && size && n_items); 6563 if ((state_p == NULL) || (size == 0) || (n_items == 0)) 6564 return (EINVAL); 6565 6566 /* current implementation is based on hash, convert n_items to hash */ 6567 hash_sz = n_items / SS_N_ITEMS_PER_HASH; 6568 if (hash_sz < SS_MIN_HASH_SZ) 6569 hash_sz = SS_MIN_HASH_SZ; 6570 else if (hash_sz > SS_MAX_HASH_SZ) 6571 hash_sz = SS_MAX_HASH_SZ; 6572 6573 /* allocate soft_state pool */ 6574 sss = kmem_zalloc(sizeof (*sss), KM_SLEEP); 6575 sss->ss_size = size; 6576 sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr", 6577 hash_sz, mod_hash_null_valdtor); 6578 *state_p = (ddi_soft_state_bystr *)sss; 6579 return (0); 6580 } 6581 6582 int 6583 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str) 6584 { 6585 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state; 6586 void *sso; 6587 char *dup_str; 6588 6589 ASSERT(sss && str && sss->ss_mod_hash); 6590 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL)) 6591 return (DDI_FAILURE); 6592 sso = kmem_zalloc(sss->ss_size, KM_SLEEP); 6593 dup_str = i_ddi_strdup((char *)str, KM_SLEEP); 6594 if (mod_hash_insert(sss->ss_mod_hash, 6595 (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0) 6596 return (DDI_SUCCESS); 6597 6598 /* 6599 * The only error from an strhash insert is caused by a duplicate key. 6600 * We refuse to tread on an existing elements, so free and fail. 6601 */ 6602 kmem_free(dup_str, strlen(dup_str) + 1); 6603 kmem_free(sso, sss->ss_size); 6604 return (DDI_FAILURE); 6605 } 6606 6607 void * 6608 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str) 6609 { 6610 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state; 6611 void *sso; 6612 6613 ASSERT(sss && str && sss->ss_mod_hash); 6614 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL)) 6615 return (NULL); 6616 6617 if (mod_hash_find(sss->ss_mod_hash, 6618 (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0) 6619 return (sso); 6620 return (NULL); 6621 } 6622 6623 void 6624 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str) 6625 { 6626 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state; 6627 void *sso; 6628 6629 ASSERT(sss && str && sss->ss_mod_hash); 6630 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL)) 6631 return; 6632 6633 (void) mod_hash_remove(sss->ss_mod_hash, 6634 (mod_hash_key_t)str, (mod_hash_val_t *)&sso); 6635 kmem_free(sso, sss->ss_size); 6636 } 6637 6638 void 6639 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p) 6640 { 6641 i_ddi_soft_state_bystr *sss; 6642 6643 ASSERT(state_p); 6644 if (state_p == NULL) 6645 return; 6646 6647 sss = (i_ddi_soft_state_bystr *)(*state_p); 6648 if (sss == NULL) 6649 return; 6650 6651 ASSERT(sss->ss_mod_hash); 6652 if (sss->ss_mod_hash) { 6653 mod_hash_destroy_strhash(sss->ss_mod_hash); 6654 sss->ss_mod_hash = NULL; 6655 } 6656 6657 kmem_free(sss, sizeof (*sss)); 6658 *state_p = NULL; 6659 } 6660 6661 /* 6662 * The ddi_strid_* routines provide string-to-index management utilities. 6663 */ 6664 /* allocate and initialize an strid set */ 6665 int 6666 ddi_strid_init(ddi_strid **strid_p, int n_items) 6667 { 6668 i_ddi_strid *ss; 6669 int hash_sz; 6670 6671 if (strid_p == NULL) 6672 return (DDI_FAILURE); 6673 6674 /* current implementation is based on hash, convert n_items to hash */ 6675 hash_sz = n_items / SS_N_ITEMS_PER_HASH; 6676 if (hash_sz < SS_MIN_HASH_SZ) 6677 hash_sz = SS_MIN_HASH_SZ; 6678 else if (hash_sz > SS_MAX_HASH_SZ) 6679 hash_sz = SS_MAX_HASH_SZ; 6680 6681 ss = kmem_alloc(sizeof (*ss), KM_SLEEP); 6682 ss->strid_space = id_space_create("strid", 1, n_items); 6683 ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz, 6684 mod_hash_null_valdtor); 6685 ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz, 6686 mod_hash_null_valdtor); 6687 *strid_p = (ddi_strid *)ss; 6688 return (DDI_SUCCESS); 6689 } 6690 6691 #define ID_FIXED_SIZE 0x1 6692 6693 /* allocate an id mapping within the specified set for str, return id */ 6694 static id_t 6695 i_ddi_strid_alloc(ddi_strid *strid, char *str, int flags) 6696 { 6697 i_ddi_strid *ss = (i_ddi_strid *)strid; 6698 id_t id; 6699 char *s; 6700 6701 ASSERT(ss && str); 6702 if ((ss == NULL) || (str == NULL)) 6703 return (0); 6704 6705 /* 6706 * Allocate an id using VM_FIRSTFIT in order to keep allocated id 6707 * range as compressed as possible. This is important to minimize 6708 * the amount of space used when the id is used as a ddi_soft_state 6709 * index by the caller. 6710 * 6711 * If ID_FIXED_SIZE, use the _nosleep variant to fail rather 6712 * than sleep in id_allocff() 6713 */ 6714 if (flags & ID_FIXED_SIZE) { 6715 id = id_allocff_nosleep(ss->strid_space); 6716 if (id == (id_t)-1) 6717 return (0); 6718 } else { 6719 id = id_allocff(ss->strid_space); 6720 } 6721 6722 /* 6723 * NOTE: since we create and destroy in unison we can save space by 6724 * using bystr key as the byid value. This means destroy must occur 6725 * in (byid, bystr) order. 6726 */ 6727 s = i_ddi_strdup(str, KM_SLEEP); 6728 if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s, 6729 (mod_hash_val_t)(intptr_t)id) != 0) { 6730 ddi_strid_free(strid, id); 6731 return (0); 6732 } 6733 if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id, 6734 (mod_hash_val_t)s) != 0) { 6735 ddi_strid_free(strid, id); 6736 return (0); 6737 } 6738 6739 /* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */ 6740 return (id); 6741 } 6742 6743 /* allocate an id mapping within the specified set for str, return id */ 6744 id_t 6745 ddi_strid_alloc(ddi_strid *strid, char *str) 6746 { 6747 return (i_ddi_strid_alloc(strid, str, 0)); 6748 } 6749 6750 /* allocate an id mapping within the specified set for str, return id */ 6751 id_t 6752 ddi_strid_fixed_alloc(ddi_strid *strid, char *str) 6753 { 6754 return (i_ddi_strid_alloc(strid, str, ID_FIXED_SIZE)); 6755 } 6756 6757 /* return the id within the specified strid given the str */ 6758 id_t 6759 ddi_strid_str2id(ddi_strid *strid, char *str) 6760 { 6761 i_ddi_strid *ss = (i_ddi_strid *)strid; 6762 id_t id = 0; 6763 mod_hash_val_t hv; 6764 6765 ASSERT(ss && str); 6766 if (ss && str && (mod_hash_find(ss->strid_bystr, 6767 (mod_hash_key_t)str, &hv) == 0)) 6768 id = (int)(intptr_t)hv; 6769 return (id); 6770 } 6771 6772 /* return str within the specified strid given the id */ 6773 char * 6774 ddi_strid_id2str(ddi_strid *strid, id_t id) 6775 { 6776 i_ddi_strid *ss = (i_ddi_strid *)strid; 6777 char *str = NULL; 6778 mod_hash_val_t hv; 6779 6780 ASSERT(ss && id > 0); 6781 if (ss && (id > 0) && (mod_hash_find(ss->strid_byid, 6782 (mod_hash_key_t)(uintptr_t)id, &hv) == 0)) 6783 str = (char *)hv; 6784 return (str); 6785 } 6786 6787 /* free the id mapping within the specified strid */ 6788 void 6789 ddi_strid_free(ddi_strid *strid, id_t id) 6790 { 6791 i_ddi_strid *ss = (i_ddi_strid *)strid; 6792 char *str; 6793 6794 ASSERT(ss && id > 0); 6795 if ((ss == NULL) || (id <= 0)) 6796 return; 6797 6798 /* bystr key is byid value: destroy order must be (byid, bystr) */ 6799 str = ddi_strid_id2str(strid, id); 6800 (void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id); 6801 id_free(ss->strid_space, id); 6802 6803 if (str) 6804 (void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str); 6805 } 6806 6807 /* destroy the strid set */ 6808 void 6809 ddi_strid_fini(ddi_strid **strid_p) 6810 { 6811 i_ddi_strid *ss; 6812 6813 ASSERT(strid_p); 6814 if (strid_p == NULL) 6815 return; 6816 6817 ss = (i_ddi_strid *)(*strid_p); 6818 if (ss == NULL) 6819 return; 6820 6821 /* bystr key is byid value: destroy order must be (byid, bystr) */ 6822 if (ss->strid_byid) 6823 mod_hash_destroy_hash(ss->strid_byid); 6824 if (ss->strid_byid) 6825 mod_hash_destroy_hash(ss->strid_bystr); 6826 if (ss->strid_space) 6827 id_space_destroy(ss->strid_space); 6828 kmem_free(ss, sizeof (*ss)); 6829 *strid_p = NULL; 6830 } 6831 6832 /* 6833 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'. 6834 * Storage is double buffered to prevent updates during devi_addr use - 6835 * double buffering is adaquate for reliable ddi_deviname() consumption. 6836 * The double buffer is not freed until dev_info structure destruction 6837 * (by i_ddi_free_node). 6838 */ 6839 void 6840 ddi_set_name_addr(dev_info_t *dip, char *name) 6841 { 6842 char *buf = DEVI(dip)->devi_addr_buf; 6843 char *newaddr; 6844 6845 if (buf == NULL) { 6846 buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP); 6847 DEVI(dip)->devi_addr_buf = buf; 6848 } 6849 6850 if (name) { 6851 ASSERT(strlen(name) < MAXNAMELEN); 6852 newaddr = (DEVI(dip)->devi_addr == buf) ? 6853 (buf + MAXNAMELEN) : buf; 6854 (void) strlcpy(newaddr, name, MAXNAMELEN); 6855 } else 6856 newaddr = NULL; 6857 6858 DEVI(dip)->devi_addr = newaddr; 6859 } 6860 6861 char * 6862 ddi_get_name_addr(dev_info_t *dip) 6863 { 6864 return (DEVI(dip)->devi_addr); 6865 } 6866 6867 void 6868 ddi_set_parent_data(dev_info_t *dip, void *pd) 6869 { 6870 DEVI(dip)->devi_parent_data = pd; 6871 } 6872 6873 void * 6874 ddi_get_parent_data(dev_info_t *dip) 6875 { 6876 return (DEVI(dip)->devi_parent_data); 6877 } 6878 6879 /* 6880 * ddi_name_to_major: returns the major number of a named module, 6881 * derived from the current driver alias binding. 6882 * 6883 * Caveat: drivers should avoid the use of this function, in particular 6884 * together with ddi_get_name/ddi_binding name, as per 6885 * major = ddi_name_to_major(ddi_get_name(devi)); 6886 * ddi_name_to_major() relies on the state of the device/alias binding, 6887 * which can and does change dynamically as aliases are administered 6888 * over time. An attached device instance cannot rely on the major 6889 * number returned by ddi_name_to_major() to match its own major number. 6890 * 6891 * For driver use, ddi_driver_major() reliably returns the major number 6892 * for the module to which the device was bound at attach time over 6893 * the life of the instance. 6894 * major = ddi_driver_major(dev_info_t *) 6895 */ 6896 major_t 6897 ddi_name_to_major(char *name) 6898 { 6899 return (mod_name_to_major(name)); 6900 } 6901 6902 /* 6903 * ddi_major_to_name: Returns the module name bound to a major number. 6904 */ 6905 char * 6906 ddi_major_to_name(major_t major) 6907 { 6908 return (mod_major_to_name(major)); 6909 } 6910 6911 /* 6912 * Return the name of the devinfo node pointed at by 'dip' in the buffer 6913 * pointed at by 'name.' A devinfo node is named as a result of calling 6914 * ddi_initchild(). 6915 * 6916 * Note: the driver must be held before calling this function! 6917 */ 6918 char * 6919 ddi_deviname(dev_info_t *dip, char *name) 6920 { 6921 char *addrname; 6922 char none = '\0'; 6923 6924 if (dip == ddi_root_node()) { 6925 *name = '\0'; 6926 return (name); 6927 } 6928 6929 if (i_ddi_node_state(dip) < DS_BOUND) { 6930 addrname = &none; 6931 } else { 6932 /* 6933 * Use ddi_get_name_addr() without checking state so we get 6934 * a unit-address if we are called after ddi_set_name_addr() 6935 * by nexus DDI_CTL_INITCHILD code, but before completing 6936 * node promotion to DS_INITIALIZED. We currently have 6937 * two situations where we are called in this state: 6938 * o For framework processing of a path-oriented alias. 6939 * o If a SCSA nexus driver calls ddi_devid_register() 6940 * from it's tran_tgt_init(9E) implementation. 6941 */ 6942 addrname = ddi_get_name_addr(dip); 6943 if (addrname == NULL) 6944 addrname = &none; 6945 } 6946 6947 if (*addrname == '\0') { 6948 (void) sprintf(name, "/%s", ddi_node_name(dip)); 6949 } else { 6950 (void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname); 6951 } 6952 6953 return (name); 6954 } 6955 6956 /* 6957 * Spits out the name of device node, typically name@addr, for a given node, 6958 * using the driver name, not the nodename. 6959 * 6960 * Used by match_parent. Not to be used elsewhere. 6961 */ 6962 char * 6963 i_ddi_parname(dev_info_t *dip, char *name) 6964 { 6965 char *addrname; 6966 6967 if (dip == ddi_root_node()) { 6968 *name = '\0'; 6969 return (name); 6970 } 6971 6972 ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED); 6973 6974 if (*(addrname = ddi_get_name_addr(dip)) == '\0') 6975 (void) sprintf(name, "%s", ddi_binding_name(dip)); 6976 else 6977 (void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname); 6978 return (name); 6979 } 6980 6981 static char * 6982 pathname_work(dev_info_t *dip, char *path) 6983 { 6984 char *bp; 6985 6986 if (dip == ddi_root_node()) { 6987 *path = '\0'; 6988 return (path); 6989 } 6990 (void) pathname_work(ddi_get_parent(dip), path); 6991 bp = path + strlen(path); 6992 (void) ddi_deviname(dip, bp); 6993 return (path); 6994 } 6995 6996 char * 6997 ddi_pathname(dev_info_t *dip, char *path) 6998 { 6999 return (pathname_work(dip, path)); 7000 } 7001 7002 char * 7003 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path) 7004 { 7005 if (dmdp->dip == NULL) 7006 *path = '\0'; 7007 else { 7008 (void) ddi_pathname(dmdp->dip, path); 7009 if (dmdp->ddm_name) { 7010 (void) strcat(path, ":"); 7011 (void) strcat(path, dmdp->ddm_name); 7012 } 7013 } 7014 return (path); 7015 } 7016 7017 static char * 7018 pathname_work_obp(dev_info_t *dip, char *path) 7019 { 7020 char *bp; 7021 char *obp_path; 7022 7023 /* 7024 * look up the "obp-path" property, return the path if it exists 7025 */ 7026 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 7027 "obp-path", &obp_path) == DDI_PROP_SUCCESS) { 7028 (void) strcpy(path, obp_path); 7029 ddi_prop_free(obp_path); 7030 return (path); 7031 } 7032 7033 /* 7034 * stop at root, no obp path 7035 */ 7036 if (dip == ddi_root_node()) { 7037 return (NULL); 7038 } 7039 7040 obp_path = pathname_work_obp(ddi_get_parent(dip), path); 7041 if (obp_path == NULL) 7042 return (NULL); 7043 7044 /* 7045 * append our component to parent's obp path 7046 */ 7047 bp = path + strlen(path); 7048 if (*(bp - 1) != '/') 7049 (void) strcat(bp++, "/"); 7050 (void) ddi_deviname(dip, bp); 7051 return (path); 7052 } 7053 7054 /* 7055 * return the 'obp-path' based path for the given node, or NULL if the node 7056 * does not have a different obp path. NOTE: Unlike ddi_pathname, this 7057 * function can't be called from interrupt context (since we need to 7058 * lookup a string property). 7059 */ 7060 char * 7061 ddi_pathname_obp(dev_info_t *dip, char *path) 7062 { 7063 ASSERT(!servicing_interrupt()); 7064 if (dip == NULL || path == NULL) 7065 return (NULL); 7066 7067 /* split work into a separate function to aid debugging */ 7068 return (pathname_work_obp(dip, path)); 7069 } 7070 7071 int 7072 ddi_pathname_obp_set(dev_info_t *dip, char *component) 7073 { 7074 dev_info_t *pdip; 7075 char *obp_path = NULL; 7076 int rc = DDI_FAILURE; 7077 7078 if (dip == NULL) 7079 return (DDI_FAILURE); 7080 7081 obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7082 7083 pdip = ddi_get_parent(dip); 7084 7085 if (ddi_pathname_obp(pdip, obp_path) == NULL) { 7086 (void) ddi_pathname(pdip, obp_path); 7087 } 7088 7089 if (component) { 7090 (void) strncat(obp_path, "/", MAXPATHLEN); 7091 (void) strncat(obp_path, component, MAXPATHLEN); 7092 } 7093 rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path", 7094 obp_path); 7095 7096 if (obp_path) 7097 kmem_free(obp_path, MAXPATHLEN); 7098 7099 return (rc); 7100 } 7101 7102 /* 7103 * Given a dev_t, return the pathname of the corresponding device in the 7104 * buffer pointed at by "path." The buffer is assumed to be large enough 7105 * to hold the pathname of the device (MAXPATHLEN). 7106 * 7107 * The pathname of a device is the pathname of the devinfo node to which 7108 * the device "belongs," concatenated with the character ':' and the name 7109 * of the minor node corresponding to the dev_t. If spec_type is 0 then 7110 * just the pathname of the devinfo node is returned without driving attach 7111 * of that node. For a non-zero spec_type, an attach is performed and a 7112 * search of the minor list occurs. 7113 * 7114 * It is possible that the path associated with the dev_t is not 7115 * currently available in the devinfo tree. In order to have a 7116 * dev_t, a device must have been discovered before, which means 7117 * that the path is always in the instance tree. The one exception 7118 * to this is if the dev_t is associated with a pseudo driver, in 7119 * which case the device must exist on the pseudo branch of the 7120 * devinfo tree as a result of parsing .conf files. 7121 */ 7122 int 7123 ddi_dev_pathname(dev_t devt, int spec_type, char *path) 7124 { 7125 int circ; 7126 major_t major = getmajor(devt); 7127 int instance; 7128 dev_info_t *dip; 7129 char *minorname; 7130 char *drvname; 7131 7132 if (major >= devcnt) 7133 goto fail; 7134 if (major == clone_major) { 7135 /* clone has no minor nodes, manufacture the path here */ 7136 if ((drvname = ddi_major_to_name(getminor(devt))) == NULL) 7137 goto fail; 7138 7139 (void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname); 7140 return (DDI_SUCCESS); 7141 } 7142 7143 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */ 7144 if ((instance = dev_to_instance(devt)) == -1) 7145 goto fail; 7146 7147 /* reconstruct the path given the major/instance */ 7148 if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS) 7149 goto fail; 7150 7151 /* if spec_type given we must drive attach and search minor nodes */ 7152 if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) { 7153 /* attach the path so we can search minors */ 7154 if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL) 7155 goto fail; 7156 7157 /* Add minorname to path. */ 7158 ndi_devi_enter(dip, &circ); 7159 minorname = i_ddi_devtspectype_to_minorname(dip, 7160 devt, spec_type); 7161 if (minorname) { 7162 (void) strcat(path, ":"); 7163 (void) strcat(path, minorname); 7164 } 7165 ndi_devi_exit(dip, circ); 7166 ddi_release_devi(dip); 7167 if (minorname == NULL) 7168 goto fail; 7169 } 7170 ASSERT(strlen(path) < MAXPATHLEN); 7171 return (DDI_SUCCESS); 7172 7173 fail: *path = 0; 7174 return (DDI_FAILURE); 7175 } 7176 7177 /* 7178 * Given a major number and an instance, return the path. 7179 * This interface does NOT drive attach. 7180 */ 7181 int 7182 e_ddi_majorinstance_to_path(major_t major, int instance, char *path) 7183 { 7184 struct devnames *dnp; 7185 dev_info_t *dip; 7186 7187 if ((major >= devcnt) || (instance == -1)) { 7188 *path = 0; 7189 return (DDI_FAILURE); 7190 } 7191 7192 /* look for the major/instance in the instance tree */ 7193 if (e_ddi_instance_majorinstance_to_path(major, instance, 7194 path) == DDI_SUCCESS) { 7195 ASSERT(strlen(path) < MAXPATHLEN); 7196 return (DDI_SUCCESS); 7197 } 7198 7199 /* 7200 * Not in instance tree, find the instance on the per driver list and 7201 * construct path to instance via ddi_pathname(). This is how paths 7202 * down the 'pseudo' branch are constructed. 7203 */ 7204 dnp = &(devnamesp[major]); 7205 LOCK_DEV_OPS(&(dnp->dn_lock)); 7206 for (dip = dnp->dn_head; dip; 7207 dip = (dev_info_t *)DEVI(dip)->devi_next) { 7208 /* Skip if instance does not match. */ 7209 if (DEVI(dip)->devi_instance != instance) 7210 continue; 7211 7212 /* 7213 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND 7214 * node demotion, so it is not an effective way of ensuring 7215 * that the ddi_pathname result has a unit-address. Instead, 7216 * we reverify the node state after calling ddi_pathname(). 7217 */ 7218 if (i_ddi_node_state(dip) >= DS_INITIALIZED) { 7219 (void) ddi_pathname(dip, path); 7220 if (i_ddi_node_state(dip) < DS_INITIALIZED) 7221 continue; 7222 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 7223 ASSERT(strlen(path) < MAXPATHLEN); 7224 return (DDI_SUCCESS); 7225 } 7226 } 7227 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 7228 7229 /* can't reconstruct the path */ 7230 *path = 0; 7231 return (DDI_FAILURE); 7232 } 7233 7234 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa" 7235 7236 /* 7237 * Given the dip for a network interface return the ppa for that interface. 7238 * 7239 * In all cases except GLD v0 drivers, the ppa == instance. 7240 * In the case of GLD v0 drivers, the ppa is equal to the attach order. 7241 * So for these drivers when the attach routine calls gld_register(), 7242 * the GLD framework creates an integer property called "gld_driver_ppa" 7243 * that can be queried here. 7244 * 7245 * The only time this function is used is when a system is booting over nfs. 7246 * In this case the system has to resolve the pathname of the boot device 7247 * to it's ppa. 7248 */ 7249 int 7250 i_ddi_devi_get_ppa(dev_info_t *dip) 7251 { 7252 return (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 7253 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 7254 GLD_DRIVER_PPA, ddi_get_instance(dip))); 7255 } 7256 7257 /* 7258 * i_ddi_devi_set_ppa() should only be called from gld_register() 7259 * and only for GLD v0 drivers 7260 */ 7261 void 7262 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa) 7263 { 7264 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa); 7265 } 7266 7267 7268 /* 7269 * Private DDI Console bell functions. 7270 */ 7271 void 7272 ddi_ring_console_bell(clock_t duration) 7273 { 7274 if (ddi_console_bell_func != NULL) 7275 (*ddi_console_bell_func)(duration); 7276 } 7277 7278 void 7279 ddi_set_console_bell(void (*bellfunc)(clock_t duration)) 7280 { 7281 ddi_console_bell_func = bellfunc; 7282 } 7283 7284 int 7285 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr, 7286 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 7287 { 7288 int (*funcp)() = ddi_dma_allochdl; 7289 ddi_dma_attr_t dma_attr; 7290 struct bus_ops *bop; 7291 7292 if (attr == (ddi_dma_attr_t *)0) 7293 return (DDI_DMA_BADATTR); 7294 7295 dma_attr = *attr; 7296 7297 bop = DEVI(dip)->devi_ops->devo_bus_ops; 7298 if (bop && bop->bus_dma_allochdl) 7299 funcp = bop->bus_dma_allochdl; 7300 7301 return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep)); 7302 } 7303 7304 void 7305 ddi_dma_free_handle(ddi_dma_handle_t *handlep) 7306 { 7307 ddi_dma_handle_t h = *handlep; 7308 (void) ddi_dma_freehdl(HD, HD, h); 7309 } 7310 7311 static uintptr_t dma_mem_list_id = 0; 7312 7313 7314 int 7315 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length, 7316 ddi_device_acc_attr_t *accattrp, uint_t flags, 7317 int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp, 7318 size_t *real_length, ddi_acc_handle_t *handlep) 7319 { 7320 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7321 dev_info_t *dip = hp->dmai_rdip; 7322 ddi_acc_hdl_t *ap; 7323 ddi_dma_attr_t *attrp = &hp->dmai_attr; 7324 uint_t sleepflag, xfermodes; 7325 int (*fp)(caddr_t); 7326 int rval; 7327 7328 if (waitfp == DDI_DMA_SLEEP) 7329 fp = (int (*)())KM_SLEEP; 7330 else if (waitfp == DDI_DMA_DONTWAIT) 7331 fp = (int (*)())KM_NOSLEEP; 7332 else 7333 fp = waitfp; 7334 *handlep = impl_acc_hdl_alloc(fp, arg); 7335 if (*handlep == NULL) 7336 return (DDI_FAILURE); 7337 7338 /* check if the cache attributes are supported */ 7339 if (i_ddi_check_cache_attr(flags) == B_FALSE) 7340 return (DDI_FAILURE); 7341 7342 /* 7343 * Transfer the meaningful bits to xfermodes. 7344 * Double-check if the 3rd party driver correctly sets the bits. 7345 * If not, set DDI_DMA_STREAMING to keep compatibility. 7346 */ 7347 xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING); 7348 if (xfermodes == 0) { 7349 xfermodes = DDI_DMA_STREAMING; 7350 } 7351 7352 /* 7353 * initialize the common elements of data access handle 7354 */ 7355 ap = impl_acc_hdl_get(*handlep); 7356 ap->ah_vers = VERS_ACCHDL; 7357 ap->ah_dip = dip; 7358 ap->ah_offset = 0; 7359 ap->ah_len = 0; 7360 ap->ah_xfermodes = flags; 7361 ap->ah_acc = *accattrp; 7362 7363 sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0); 7364 if (xfermodes == DDI_DMA_CONSISTENT) { 7365 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 7366 flags, accattrp, kaddrp, NULL, ap); 7367 *real_length = length; 7368 } else { 7369 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 7370 flags, accattrp, kaddrp, real_length, ap); 7371 } 7372 if (rval == DDI_SUCCESS) { 7373 ap->ah_len = (off_t)(*real_length); 7374 ap->ah_addr = *kaddrp; 7375 } else { 7376 impl_acc_hdl_free(*handlep); 7377 *handlep = (ddi_acc_handle_t)NULL; 7378 if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) { 7379 ddi_set_callback(waitfp, arg, &dma_mem_list_id); 7380 } 7381 rval = DDI_FAILURE; 7382 } 7383 return (rval); 7384 } 7385 7386 void 7387 ddi_dma_mem_free(ddi_acc_handle_t *handlep) 7388 { 7389 ddi_acc_hdl_t *ap; 7390 7391 ap = impl_acc_hdl_get(*handlep); 7392 ASSERT(ap); 7393 7394 i_ddi_mem_free((caddr_t)ap->ah_addr, ap); 7395 7396 /* 7397 * free the handle 7398 */ 7399 impl_acc_hdl_free(*handlep); 7400 *handlep = (ddi_acc_handle_t)NULL; 7401 7402 if (dma_mem_list_id != 0) { 7403 ddi_run_callback(&dma_mem_list_id); 7404 } 7405 } 7406 7407 int 7408 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp, 7409 uint_t flags, int (*waitfp)(caddr_t), caddr_t arg, 7410 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 7411 { 7412 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7413 dev_info_t *dip, *rdip; 7414 struct ddi_dma_req dmareq; 7415 int (*funcp)(); 7416 7417 dmareq.dmar_flags = flags; 7418 dmareq.dmar_fp = waitfp; 7419 dmareq.dmar_arg = arg; 7420 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount; 7421 7422 if (bp->b_flags & B_PAGEIO) { 7423 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES; 7424 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages; 7425 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset = 7426 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET); 7427 } else { 7428 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr; 7429 if (bp->b_flags & B_SHADOW) { 7430 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = 7431 bp->b_shadow; 7432 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR; 7433 } else { 7434 dmareq.dmar_object.dmao_type = 7435 (bp->b_flags & (B_PHYS | B_REMAPPED)) ? 7436 DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR; 7437 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 7438 } 7439 7440 /* 7441 * If the buffer has no proc pointer, or the proc 7442 * struct has the kernel address space, or the buffer has 7443 * been marked B_REMAPPED (meaning that it is now 7444 * mapped into the kernel's address space), then 7445 * the address space is kas (kernel address space). 7446 */ 7447 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) || 7448 (bp->b_flags & B_REMAPPED)) { 7449 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0; 7450 } else { 7451 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 7452 bp->b_proc->p_as; 7453 } 7454 } 7455 7456 dip = rdip = hp->dmai_rdip; 7457 if (dip != ddi_root_node()) 7458 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 7459 funcp = DEVI(rdip)->devi_bus_dma_bindfunc; 7460 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp)); 7461 } 7462 7463 int 7464 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as, 7465 caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t), 7466 caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 7467 { 7468 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7469 dev_info_t *dip, *rdip; 7470 struct ddi_dma_req dmareq; 7471 int (*funcp)(); 7472 7473 if (len == (uint_t)0) { 7474 return (DDI_DMA_NOMAPPING); 7475 } 7476 dmareq.dmar_flags = flags; 7477 dmareq.dmar_fp = waitfp; 7478 dmareq.dmar_arg = arg; 7479 dmareq.dmar_object.dmao_size = len; 7480 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR; 7481 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as; 7482 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr; 7483 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 7484 7485 dip = rdip = hp->dmai_rdip; 7486 if (dip != ddi_root_node()) 7487 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 7488 funcp = DEVI(rdip)->devi_bus_dma_bindfunc; 7489 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp)); 7490 } 7491 7492 void 7493 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep) 7494 { 7495 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7496 ddi_dma_cookie_t *cp; 7497 7498 cp = hp->dmai_cookie; 7499 ASSERT(cp); 7500 7501 cookiep->dmac_notused = cp->dmac_notused; 7502 cookiep->dmac_type = cp->dmac_type; 7503 cookiep->dmac_address = cp->dmac_address; 7504 cookiep->dmac_size = cp->dmac_size; 7505 hp->dmai_cookie++; 7506 } 7507 7508 int 7509 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp) 7510 { 7511 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7512 if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) { 7513 return (DDI_FAILURE); 7514 } else { 7515 *nwinp = hp->dmai_nwin; 7516 return (DDI_SUCCESS); 7517 } 7518 } 7519 7520 int 7521 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp, 7522 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 7523 { 7524 int (*funcp)() = ddi_dma_win; 7525 struct bus_ops *bop; 7526 7527 bop = DEVI(HD)->devi_ops->devo_bus_ops; 7528 if (bop && bop->bus_dma_win) 7529 funcp = bop->bus_dma_win; 7530 7531 return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp)); 7532 } 7533 7534 int 7535 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes) 7536 { 7537 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0, 7538 &burstsizes, 0, 0)); 7539 } 7540 7541 int 7542 i_ddi_dma_fault_check(ddi_dma_impl_t *hp) 7543 { 7544 return (hp->dmai_fault); 7545 } 7546 7547 int 7548 ddi_check_dma_handle(ddi_dma_handle_t handle) 7549 { 7550 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7551 int (*check)(ddi_dma_impl_t *); 7552 7553 if ((check = hp->dmai_fault_check) == NULL) 7554 check = i_ddi_dma_fault_check; 7555 7556 return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE); 7557 } 7558 7559 void 7560 i_ddi_dma_set_fault(ddi_dma_handle_t handle) 7561 { 7562 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7563 void (*notify)(ddi_dma_impl_t *); 7564 7565 if (!hp->dmai_fault) { 7566 hp->dmai_fault = 1; 7567 if ((notify = hp->dmai_fault_notify) != NULL) 7568 (*notify)(hp); 7569 } 7570 } 7571 7572 void 7573 i_ddi_dma_clr_fault(ddi_dma_handle_t handle) 7574 { 7575 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7576 void (*notify)(ddi_dma_impl_t *); 7577 7578 if (hp->dmai_fault) { 7579 hp->dmai_fault = 0; 7580 if ((notify = hp->dmai_fault_notify) != NULL) 7581 (*notify)(hp); 7582 } 7583 } 7584 7585 /* 7586 * register mapping routines. 7587 */ 7588 int 7589 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp, 7590 offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp, 7591 ddi_acc_handle_t *handle) 7592 { 7593 ddi_map_req_t mr; 7594 ddi_acc_hdl_t *hp; 7595 int result; 7596 7597 /* 7598 * Allocate and initialize the common elements of data access handle. 7599 */ 7600 *handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 7601 hp = impl_acc_hdl_get(*handle); 7602 hp->ah_vers = VERS_ACCHDL; 7603 hp->ah_dip = dip; 7604 hp->ah_rnumber = rnumber; 7605 hp->ah_offset = offset; 7606 hp->ah_len = len; 7607 hp->ah_acc = *accattrp; 7608 7609 /* 7610 * Set up the mapping request and call to parent. 7611 */ 7612 mr.map_op = DDI_MO_MAP_LOCKED; 7613 mr.map_type = DDI_MT_RNUMBER; 7614 mr.map_obj.rnumber = rnumber; 7615 mr.map_prot = PROT_READ | PROT_WRITE; 7616 mr.map_flags = DDI_MF_KERNEL_MAPPING; 7617 mr.map_handlep = hp; 7618 mr.map_vers = DDI_MAP_VERSION; 7619 result = ddi_map(dip, &mr, offset, len, addrp); 7620 7621 /* 7622 * check for end result 7623 */ 7624 if (result != DDI_SUCCESS) { 7625 impl_acc_hdl_free(*handle); 7626 *handle = (ddi_acc_handle_t)NULL; 7627 } else { 7628 hp->ah_addr = *addrp; 7629 } 7630 7631 return (result); 7632 } 7633 7634 void 7635 ddi_regs_map_free(ddi_acc_handle_t *handlep) 7636 { 7637 ddi_map_req_t mr; 7638 ddi_acc_hdl_t *hp; 7639 7640 hp = impl_acc_hdl_get(*handlep); 7641 ASSERT(hp); 7642 7643 mr.map_op = DDI_MO_UNMAP; 7644 mr.map_type = DDI_MT_RNUMBER; 7645 mr.map_obj.rnumber = hp->ah_rnumber; 7646 mr.map_prot = PROT_READ | PROT_WRITE; 7647 mr.map_flags = DDI_MF_KERNEL_MAPPING; 7648 mr.map_handlep = hp; 7649 mr.map_vers = DDI_MAP_VERSION; 7650 7651 /* 7652 * Call my parent to unmap my regs. 7653 */ 7654 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 7655 hp->ah_len, &hp->ah_addr); 7656 /* 7657 * free the handle 7658 */ 7659 impl_acc_hdl_free(*handlep); 7660 *handlep = (ddi_acc_handle_t)NULL; 7661 } 7662 7663 int 7664 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount, 7665 ssize_t dev_advcnt, uint_t dev_datasz) 7666 { 7667 uint8_t *b; 7668 uint16_t *w; 7669 uint32_t *l; 7670 uint64_t *ll; 7671 7672 /* check for total byte count is multiple of data transfer size */ 7673 if (bytecount != ((bytecount / dev_datasz) * dev_datasz)) 7674 return (DDI_FAILURE); 7675 7676 switch (dev_datasz) { 7677 case DDI_DATA_SZ01_ACC: 7678 for (b = (uint8_t *)dev_addr; 7679 bytecount != 0; bytecount -= 1, b += dev_advcnt) 7680 ddi_put8(handle, b, 0); 7681 break; 7682 case DDI_DATA_SZ02_ACC: 7683 for (w = (uint16_t *)dev_addr; 7684 bytecount != 0; bytecount -= 2, w += dev_advcnt) 7685 ddi_put16(handle, w, 0); 7686 break; 7687 case DDI_DATA_SZ04_ACC: 7688 for (l = (uint32_t *)dev_addr; 7689 bytecount != 0; bytecount -= 4, l += dev_advcnt) 7690 ddi_put32(handle, l, 0); 7691 break; 7692 case DDI_DATA_SZ08_ACC: 7693 for (ll = (uint64_t *)dev_addr; 7694 bytecount != 0; bytecount -= 8, ll += dev_advcnt) 7695 ddi_put64(handle, ll, 0x0ll); 7696 break; 7697 default: 7698 return (DDI_FAILURE); 7699 } 7700 return (DDI_SUCCESS); 7701 } 7702 7703 int 7704 ddi_device_copy( 7705 ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt, 7706 ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt, 7707 size_t bytecount, uint_t dev_datasz) 7708 { 7709 uint8_t *b_src, *b_dst; 7710 uint16_t *w_src, *w_dst; 7711 uint32_t *l_src, *l_dst; 7712 uint64_t *ll_src, *ll_dst; 7713 7714 /* check for total byte count is multiple of data transfer size */ 7715 if (bytecount != ((bytecount / dev_datasz) * dev_datasz)) 7716 return (DDI_FAILURE); 7717 7718 switch (dev_datasz) { 7719 case DDI_DATA_SZ01_ACC: 7720 b_src = (uint8_t *)src_addr; 7721 b_dst = (uint8_t *)dest_addr; 7722 7723 for (; bytecount != 0; bytecount -= 1) { 7724 ddi_put8(dest_handle, b_dst, 7725 ddi_get8(src_handle, b_src)); 7726 b_dst += dest_advcnt; 7727 b_src += src_advcnt; 7728 } 7729 break; 7730 case DDI_DATA_SZ02_ACC: 7731 w_src = (uint16_t *)src_addr; 7732 w_dst = (uint16_t *)dest_addr; 7733 7734 for (; bytecount != 0; bytecount -= 2) { 7735 ddi_put16(dest_handle, w_dst, 7736 ddi_get16(src_handle, w_src)); 7737 w_dst += dest_advcnt; 7738 w_src += src_advcnt; 7739 } 7740 break; 7741 case DDI_DATA_SZ04_ACC: 7742 l_src = (uint32_t *)src_addr; 7743 l_dst = (uint32_t *)dest_addr; 7744 7745 for (; bytecount != 0; bytecount -= 4) { 7746 ddi_put32(dest_handle, l_dst, 7747 ddi_get32(src_handle, l_src)); 7748 l_dst += dest_advcnt; 7749 l_src += src_advcnt; 7750 } 7751 break; 7752 case DDI_DATA_SZ08_ACC: 7753 ll_src = (uint64_t *)src_addr; 7754 ll_dst = (uint64_t *)dest_addr; 7755 7756 for (; bytecount != 0; bytecount -= 8) { 7757 ddi_put64(dest_handle, ll_dst, 7758 ddi_get64(src_handle, ll_src)); 7759 ll_dst += dest_advcnt; 7760 ll_src += src_advcnt; 7761 } 7762 break; 7763 default: 7764 return (DDI_FAILURE); 7765 } 7766 return (DDI_SUCCESS); 7767 } 7768 7769 #define swap16(value) \ 7770 ((((value) & 0xff) << 8) | ((value) >> 8)) 7771 7772 #define swap32(value) \ 7773 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \ 7774 (uint32_t)swap16((uint16_t)((value) >> 16))) 7775 7776 #define swap64(value) \ 7777 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \ 7778 << 32) | \ 7779 (uint64_t)swap32((uint32_t)((value) >> 32))) 7780 7781 uint16_t 7782 ddi_swap16(uint16_t value) 7783 { 7784 return (swap16(value)); 7785 } 7786 7787 uint32_t 7788 ddi_swap32(uint32_t value) 7789 { 7790 return (swap32(value)); 7791 } 7792 7793 uint64_t 7794 ddi_swap64(uint64_t value) 7795 { 7796 return (swap64(value)); 7797 } 7798 7799 /* 7800 * Convert a binding name to a driver name. 7801 * A binding name is the name used to determine the driver for a 7802 * device - it may be either an alias for the driver or the name 7803 * of the driver itself. 7804 */ 7805 char * 7806 i_binding_to_drv_name(char *bname) 7807 { 7808 major_t major_no; 7809 7810 ASSERT(bname != NULL); 7811 7812 if ((major_no = ddi_name_to_major(bname)) == -1) 7813 return (NULL); 7814 return (ddi_major_to_name(major_no)); 7815 } 7816 7817 /* 7818 * Search for minor name that has specified dev_t and spec_type. 7819 * If spec_type is zero then any dev_t match works. Since we 7820 * are returning a pointer to the minor name string, we require the 7821 * caller to do the locking. 7822 */ 7823 char * 7824 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type) 7825 { 7826 struct ddi_minor_data *dmdp; 7827 7828 /* 7829 * The did layered driver currently intentionally returns a 7830 * devinfo ptr for an underlying sd instance based on a did 7831 * dev_t. In this case it is not an error. 7832 * 7833 * The did layered driver is associated with Sun Cluster. 7834 */ 7835 ASSERT((ddi_driver_major(dip) == getmajor(dev)) || 7836 (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0)); 7837 7838 ASSERT(DEVI_BUSY_OWNED(dip)); 7839 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 7840 if (((dmdp->type == DDM_MINOR) || 7841 (dmdp->type == DDM_INTERNAL_PATH) || 7842 (dmdp->type == DDM_DEFAULT)) && 7843 (dmdp->ddm_dev == dev) && 7844 ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) || 7845 (dmdp->ddm_spec_type == spec_type))) 7846 return (dmdp->ddm_name); 7847 } 7848 7849 return (NULL); 7850 } 7851 7852 /* 7853 * Find the devt and spectype of the specified minor_name. 7854 * Return DDI_FAILURE if minor_name not found. Since we are 7855 * returning everything via arguments we can do the locking. 7856 */ 7857 int 7858 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name, 7859 dev_t *devtp, int *spectypep) 7860 { 7861 int circ; 7862 struct ddi_minor_data *dmdp; 7863 7864 /* deal with clone minor nodes */ 7865 if (dip == clone_dip) { 7866 major_t major; 7867 /* 7868 * Make sure minor_name is a STREAMS driver. 7869 * We load the driver but don't attach to any instances. 7870 */ 7871 7872 major = ddi_name_to_major(minor_name); 7873 if (major == DDI_MAJOR_T_NONE) 7874 return (DDI_FAILURE); 7875 7876 if (ddi_hold_driver(major) == NULL) 7877 return (DDI_FAILURE); 7878 7879 if (STREAMSTAB(major) == NULL) { 7880 ddi_rele_driver(major); 7881 return (DDI_FAILURE); 7882 } 7883 ddi_rele_driver(major); 7884 7885 if (devtp) 7886 *devtp = makedevice(clone_major, (minor_t)major); 7887 7888 if (spectypep) 7889 *spectypep = S_IFCHR; 7890 7891 return (DDI_SUCCESS); 7892 } 7893 7894 ndi_devi_enter(dip, &circ); 7895 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 7896 if (((dmdp->type != DDM_MINOR) && 7897 (dmdp->type != DDM_INTERNAL_PATH) && 7898 (dmdp->type != DDM_DEFAULT)) || 7899 strcmp(minor_name, dmdp->ddm_name)) 7900 continue; 7901 7902 if (devtp) 7903 *devtp = dmdp->ddm_dev; 7904 7905 if (spectypep) 7906 *spectypep = dmdp->ddm_spec_type; 7907 7908 ndi_devi_exit(dip, circ); 7909 return (DDI_SUCCESS); 7910 } 7911 ndi_devi_exit(dip, circ); 7912 7913 return (DDI_FAILURE); 7914 } 7915 7916 static kmutex_t devid_gen_mutex; 7917 static short devid_gen_number; 7918 7919 #ifdef DEBUG 7920 7921 static int devid_register_corrupt = 0; 7922 static int devid_register_corrupt_major = 0; 7923 static int devid_register_corrupt_hint = 0; 7924 static int devid_register_corrupt_hint_major = 0; 7925 7926 static int devid_lyr_debug = 0; 7927 7928 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \ 7929 if (devid_lyr_debug) \ 7930 ddi_debug_devid_devts(msg, ndevs, devs) 7931 7932 #else 7933 7934 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) 7935 7936 #endif /* DEBUG */ 7937 7938 7939 #ifdef DEBUG 7940 7941 static void 7942 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs) 7943 { 7944 int i; 7945 7946 cmn_err(CE_CONT, "%s:\n", msg); 7947 for (i = 0; i < ndevs; i++) { 7948 cmn_err(CE_CONT, " 0x%lx\n", devs[i]); 7949 } 7950 } 7951 7952 static void 7953 ddi_debug_devid_paths(char *msg, int npaths, char **paths) 7954 { 7955 int i; 7956 7957 cmn_err(CE_CONT, "%s:\n", msg); 7958 for (i = 0; i < npaths; i++) { 7959 cmn_err(CE_CONT, " %s\n", paths[i]); 7960 } 7961 } 7962 7963 static void 7964 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs) 7965 { 7966 int i; 7967 7968 cmn_err(CE_CONT, "dev_ts per path %s\n", path); 7969 for (i = 0; i < ndevs; i++) { 7970 cmn_err(CE_CONT, " 0x%lx\n", devs[i]); 7971 } 7972 } 7973 7974 #endif /* DEBUG */ 7975 7976 /* 7977 * Register device id into DDI framework. 7978 * Must be called when device is attached. 7979 */ 7980 static int 7981 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid) 7982 { 7983 impl_devid_t *i_devid = (impl_devid_t *)devid; 7984 size_t driver_len; 7985 const char *driver_name; 7986 char *devid_str; 7987 major_t major; 7988 7989 if ((dip == NULL) || 7990 ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE)) 7991 return (DDI_FAILURE); 7992 7993 /* verify that the devid is valid */ 7994 if (ddi_devid_valid(devid) != DDI_SUCCESS) 7995 return (DDI_FAILURE); 7996 7997 /* Updating driver name hint in devid */ 7998 driver_name = ddi_driver_name(dip); 7999 driver_len = strlen(driver_name); 8000 if (driver_len > DEVID_HINT_SIZE) { 8001 /* Pick up last four characters of driver name */ 8002 driver_name += driver_len - DEVID_HINT_SIZE; 8003 driver_len = DEVID_HINT_SIZE; 8004 } 8005 bzero(i_devid->did_driver, DEVID_HINT_SIZE); 8006 bcopy(driver_name, i_devid->did_driver, driver_len); 8007 8008 #ifdef DEBUG 8009 /* Corrupt the devid for testing. */ 8010 if (devid_register_corrupt) 8011 i_devid->did_id[0] += devid_register_corrupt; 8012 if (devid_register_corrupt_major && 8013 (major == devid_register_corrupt_major)) 8014 i_devid->did_id[0] += 1; 8015 if (devid_register_corrupt_hint) 8016 i_devid->did_driver[0] += devid_register_corrupt_hint; 8017 if (devid_register_corrupt_hint_major && 8018 (major == devid_register_corrupt_hint_major)) 8019 i_devid->did_driver[0] += 1; 8020 #endif /* DEBUG */ 8021 8022 /* encode the devid as a string */ 8023 if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL) 8024 return (DDI_FAILURE); 8025 8026 /* add string as a string property */ 8027 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, 8028 DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) { 8029 cmn_err(CE_WARN, "%s%d: devid property update failed", 8030 ddi_driver_name(dip), ddi_get_instance(dip)); 8031 ddi_devid_str_free(devid_str); 8032 return (DDI_FAILURE); 8033 } 8034 8035 /* keep pointer to devid string for interrupt context fma code */ 8036 if (DEVI(dip)->devi_devid_str) 8037 ddi_devid_str_free(DEVI(dip)->devi_devid_str); 8038 DEVI(dip)->devi_devid_str = devid_str; 8039 return (DDI_SUCCESS); 8040 } 8041 8042 int 8043 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid) 8044 { 8045 int rval; 8046 8047 rval = i_ddi_devid_register(dip, devid); 8048 if (rval == DDI_SUCCESS) { 8049 /* 8050 * Register devid in devid-to-path cache 8051 */ 8052 if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) { 8053 mutex_enter(&DEVI(dip)->devi_lock); 8054 DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID; 8055 mutex_exit(&DEVI(dip)->devi_lock); 8056 } else { 8057 cmn_err(CE_WARN, "%s%d: failed to cache devid", 8058 ddi_driver_name(dip), ddi_get_instance(dip)); 8059 } 8060 } else { 8061 cmn_err(CE_WARN, "%s%d: failed to register devid", 8062 ddi_driver_name(dip), ddi_get_instance(dip)); 8063 } 8064 return (rval); 8065 } 8066 8067 /* 8068 * Remove (unregister) device id from DDI framework. 8069 * Must be called when device is detached. 8070 */ 8071 static void 8072 i_ddi_devid_unregister(dev_info_t *dip) 8073 { 8074 if (DEVI(dip)->devi_devid_str) { 8075 ddi_devid_str_free(DEVI(dip)->devi_devid_str); 8076 DEVI(dip)->devi_devid_str = NULL; 8077 } 8078 8079 /* remove the devid property */ 8080 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME); 8081 } 8082 8083 void 8084 ddi_devid_unregister(dev_info_t *dip) 8085 { 8086 mutex_enter(&DEVI(dip)->devi_lock); 8087 DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID; 8088 mutex_exit(&DEVI(dip)->devi_lock); 8089 e_devid_cache_unregister(dip); 8090 i_ddi_devid_unregister(dip); 8091 } 8092 8093 /* 8094 * Allocate and initialize a device id. 8095 */ 8096 int 8097 ddi_devid_init( 8098 dev_info_t *dip, 8099 ushort_t devid_type, 8100 ushort_t nbytes, 8101 void *id, 8102 ddi_devid_t *ret_devid) 8103 { 8104 impl_devid_t *i_devid; 8105 int sz = sizeof (*i_devid) + nbytes - sizeof (char); 8106 int driver_len; 8107 const char *driver_name; 8108 8109 switch (devid_type) { 8110 case DEVID_SCSI3_WWN: 8111 /*FALLTHRU*/ 8112 case DEVID_SCSI_SERIAL: 8113 /*FALLTHRU*/ 8114 case DEVID_ATA_SERIAL: 8115 /*FALLTHRU*/ 8116 case DEVID_ENCAP: 8117 if (nbytes == 0) 8118 return (DDI_FAILURE); 8119 if (id == NULL) 8120 return (DDI_FAILURE); 8121 break; 8122 case DEVID_FAB: 8123 if (nbytes != 0) 8124 return (DDI_FAILURE); 8125 if (id != NULL) 8126 return (DDI_FAILURE); 8127 nbytes = sizeof (int) + 8128 sizeof (struct timeval32) + sizeof (short); 8129 sz += nbytes; 8130 break; 8131 default: 8132 return (DDI_FAILURE); 8133 } 8134 8135 if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL) 8136 return (DDI_FAILURE); 8137 8138 i_devid->did_magic_hi = DEVID_MAGIC_MSB; 8139 i_devid->did_magic_lo = DEVID_MAGIC_LSB; 8140 i_devid->did_rev_hi = DEVID_REV_MSB; 8141 i_devid->did_rev_lo = DEVID_REV_LSB; 8142 DEVID_FORMTYPE(i_devid, devid_type); 8143 DEVID_FORMLEN(i_devid, nbytes); 8144 8145 /* Fill in driver name hint */ 8146 driver_name = ddi_driver_name(dip); 8147 driver_len = strlen(driver_name); 8148 if (driver_len > DEVID_HINT_SIZE) { 8149 /* Pick up last four characters of driver name */ 8150 driver_name += driver_len - DEVID_HINT_SIZE; 8151 driver_len = DEVID_HINT_SIZE; 8152 } 8153 8154 bcopy(driver_name, i_devid->did_driver, driver_len); 8155 8156 /* Fill in id field */ 8157 if (devid_type == DEVID_FAB) { 8158 char *cp; 8159 uint32_t hostid; 8160 struct timeval32 timestamp32; 8161 int i; 8162 int *ip; 8163 short gen; 8164 8165 /* increase the generation number */ 8166 mutex_enter(&devid_gen_mutex); 8167 gen = devid_gen_number++; 8168 mutex_exit(&devid_gen_mutex); 8169 8170 cp = i_devid->did_id; 8171 8172 /* Fill in host id (big-endian byte ordering) */ 8173 hostid = zone_get_hostid(NULL); 8174 *cp++ = hibyte(hiword(hostid)); 8175 *cp++ = lobyte(hiword(hostid)); 8176 *cp++ = hibyte(loword(hostid)); 8177 *cp++ = lobyte(loword(hostid)); 8178 8179 /* 8180 * Fill in timestamp (big-endian byte ordering) 8181 * 8182 * (Note that the format may have to be changed 8183 * before 2038 comes around, though it's arguably 8184 * unique enough as it is..) 8185 */ 8186 uniqtime32(×tamp32); 8187 ip = (int *)×tamp32; 8188 for (i = 0; 8189 i < sizeof (timestamp32) / sizeof (int); i++, ip++) { 8190 int val; 8191 val = *ip; 8192 *cp++ = hibyte(hiword(val)); 8193 *cp++ = lobyte(hiword(val)); 8194 *cp++ = hibyte(loword(val)); 8195 *cp++ = lobyte(loword(val)); 8196 } 8197 8198 /* fill in the generation number */ 8199 *cp++ = hibyte(gen); 8200 *cp++ = lobyte(gen); 8201 } else 8202 bcopy(id, i_devid->did_id, nbytes); 8203 8204 /* return device id */ 8205 *ret_devid = (ddi_devid_t)i_devid; 8206 return (DDI_SUCCESS); 8207 } 8208 8209 int 8210 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid) 8211 { 8212 return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid)); 8213 } 8214 8215 int 8216 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid) 8217 { 8218 char *devidstr; 8219 8220 ASSERT(dev != DDI_DEV_T_NONE); 8221 8222 /* look up the property, devt specific first */ 8223 if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS, 8224 DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) { 8225 if ((dev == DDI_DEV_T_ANY) || 8226 (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 8227 DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) != 8228 DDI_PROP_SUCCESS)) { 8229 return (DDI_FAILURE); 8230 } 8231 } 8232 8233 /* convert to binary form */ 8234 if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) { 8235 ddi_prop_free(devidstr); 8236 return (DDI_FAILURE); 8237 } 8238 ddi_prop_free(devidstr); 8239 return (DDI_SUCCESS); 8240 } 8241 8242 /* 8243 * Return a copy of the device id for dev_t 8244 */ 8245 int 8246 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid) 8247 { 8248 dev_info_t *dip; 8249 int rval; 8250 8251 /* get the dip */ 8252 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 8253 return (DDI_FAILURE); 8254 8255 rval = i_ddi_devi_get_devid(dev, dip, ret_devid); 8256 8257 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 8258 return (rval); 8259 } 8260 8261 /* 8262 * Return a copy of the minor name for dev_t and spec_type 8263 */ 8264 int 8265 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name) 8266 { 8267 char *buf; 8268 int circ; 8269 dev_info_t *dip; 8270 char *nm; 8271 int rval; 8272 8273 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) { 8274 *minor_name = NULL; 8275 return (DDI_FAILURE); 8276 } 8277 8278 /* Find the minor name and copy into max size buf */ 8279 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 8280 ndi_devi_enter(dip, &circ); 8281 nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type); 8282 if (nm) 8283 (void) strcpy(buf, nm); 8284 ndi_devi_exit(dip, circ); 8285 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 8286 8287 if (nm) { 8288 /* duplicate into min size buf for return result */ 8289 *minor_name = i_ddi_strdup(buf, KM_SLEEP); 8290 rval = DDI_SUCCESS; 8291 } else { 8292 *minor_name = NULL; 8293 rval = DDI_FAILURE; 8294 } 8295 8296 /* free max size buf and return */ 8297 kmem_free(buf, MAXNAMELEN); 8298 return (rval); 8299 } 8300 8301 int 8302 ddi_lyr_devid_to_devlist( 8303 ddi_devid_t devid, 8304 char *minor_name, 8305 int *retndevs, 8306 dev_t **retdevs) 8307 { 8308 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 8309 8310 if (e_devid_cache_to_devt_list(devid, minor_name, 8311 retndevs, retdevs) == DDI_SUCCESS) { 8312 ASSERT(*retndevs > 0); 8313 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist", 8314 *retndevs, *retdevs); 8315 return (DDI_SUCCESS); 8316 } 8317 8318 if (e_ddi_devid_discovery(devid) == DDI_FAILURE) { 8319 return (DDI_FAILURE); 8320 } 8321 8322 if (e_devid_cache_to_devt_list(devid, minor_name, 8323 retndevs, retdevs) == DDI_SUCCESS) { 8324 ASSERT(*retndevs > 0); 8325 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist", 8326 *retndevs, *retdevs); 8327 return (DDI_SUCCESS); 8328 } 8329 8330 return (DDI_FAILURE); 8331 } 8332 8333 void 8334 ddi_lyr_free_devlist(dev_t *devlist, int ndevs) 8335 { 8336 kmem_free(devlist, sizeof (dev_t) * ndevs); 8337 } 8338 8339 /* 8340 * Note: This will need to be fixed if we ever allow processes to 8341 * have more than one data model per exec. 8342 */ 8343 model_t 8344 ddi_mmap_get_model(void) 8345 { 8346 return (get_udatamodel()); 8347 } 8348 8349 model_t 8350 ddi_model_convert_from(model_t model) 8351 { 8352 return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE); 8353 } 8354 8355 /* 8356 * ddi interfaces managing storage and retrieval of eventcookies. 8357 */ 8358 8359 /* 8360 * Invoke bus nexus driver's implementation of the 8361 * (*bus_remove_eventcall)() interface to remove a registered 8362 * callback handler for "event". 8363 */ 8364 int 8365 ddi_remove_event_handler(ddi_callback_id_t id) 8366 { 8367 ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id; 8368 dev_info_t *ddip; 8369 8370 ASSERT(cb); 8371 if (!cb) { 8372 return (DDI_FAILURE); 8373 } 8374 8375 ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie); 8376 return (ndi_busop_remove_eventcall(ddip, id)); 8377 } 8378 8379 /* 8380 * Invoke bus nexus driver's implementation of the 8381 * (*bus_add_eventcall)() interface to register a callback handler 8382 * for "event". 8383 */ 8384 int 8385 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event, 8386 void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *), 8387 void *arg, ddi_callback_id_t *id) 8388 { 8389 return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id)); 8390 } 8391 8392 8393 /* 8394 * Return a handle for event "name" by calling up the device tree 8395 * hierarchy via (*bus_get_eventcookie)() interface until claimed 8396 * by a bus nexus or top of dev_info tree is reached. 8397 */ 8398 int 8399 ddi_get_eventcookie(dev_info_t *dip, char *name, 8400 ddi_eventcookie_t *event_cookiep) 8401 { 8402 return (ndi_busop_get_eventcookie(dip, dip, 8403 name, event_cookiep)); 8404 } 8405 8406 /* 8407 * This procedure is provided as the general callback function when 8408 * umem_lockmemory calls as_add_callback for long term memory locking. 8409 * When as_unmap, as_setprot, or as_free encounter segments which have 8410 * locked memory, this callback will be invoked. 8411 */ 8412 void 8413 umem_lock_undo(struct as *as, void *arg, uint_t event) 8414 { 8415 _NOTE(ARGUNUSED(as, event)) 8416 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg; 8417 8418 /* 8419 * Call the cleanup function. Decrement the cookie reference 8420 * count, if it goes to zero, return the memory for the cookie. 8421 * The i_ddi_umem_unlock for this cookie may or may not have been 8422 * called already. It is the responsibility of the caller of 8423 * umem_lockmemory to handle the case of the cleanup routine 8424 * being called after a ddi_umem_unlock for the cookie 8425 * was called. 8426 */ 8427 8428 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp); 8429 8430 /* remove the cookie if reference goes to zero */ 8431 if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) { 8432 kmem_free(cp, sizeof (struct ddi_umem_cookie)); 8433 } 8434 } 8435 8436 /* 8437 * The following two Consolidation Private routines provide generic 8438 * interfaces to increase/decrease the amount of device-locked memory. 8439 * 8440 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory() 8441 * must be called every time i_ddi_incr_locked_memory() is called. 8442 */ 8443 int 8444 /* ARGSUSED */ 8445 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc) 8446 { 8447 ASSERT(procp != NULL); 8448 mutex_enter(&procp->p_lock); 8449 if (rctl_incr_locked_mem(procp, NULL, inc, 1)) { 8450 mutex_exit(&procp->p_lock); 8451 return (ENOMEM); 8452 } 8453 mutex_exit(&procp->p_lock); 8454 return (0); 8455 } 8456 8457 /* 8458 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory() 8459 * must be called every time i_ddi_decr_locked_memory() is called. 8460 */ 8461 /* ARGSUSED */ 8462 void 8463 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec) 8464 { 8465 ASSERT(procp != NULL); 8466 mutex_enter(&procp->p_lock); 8467 rctl_decr_locked_mem(procp, NULL, dec, 1); 8468 mutex_exit(&procp->p_lock); 8469 } 8470 8471 /* 8472 * This routine checks if the max-locked-memory resource ctl is 8473 * exceeded, if not increments it, grabs a hold on the project. 8474 * Returns 0 if successful otherwise returns error code 8475 */ 8476 static int 8477 umem_incr_devlockmem(struct ddi_umem_cookie *cookie) 8478 { 8479 proc_t *procp; 8480 int ret; 8481 8482 ASSERT(cookie); 8483 procp = cookie->procp; 8484 ASSERT(procp); 8485 8486 if ((ret = i_ddi_incr_locked_memory(procp, 8487 cookie->size)) != 0) { 8488 return (ret); 8489 } 8490 return (0); 8491 } 8492 8493 /* 8494 * Decrements the max-locked-memory resource ctl and releases 8495 * the hold on the project that was acquired during umem_incr_devlockmem 8496 */ 8497 static void 8498 umem_decr_devlockmem(struct ddi_umem_cookie *cookie) 8499 { 8500 proc_t *proc; 8501 8502 proc = (proc_t *)cookie->procp; 8503 if (!proc) 8504 return; 8505 8506 i_ddi_decr_locked_memory(proc, cookie->size); 8507 } 8508 8509 /* 8510 * A consolidation private function which is essentially equivalent to 8511 * ddi_umem_lock but with the addition of arguments ops_vector and procp. 8512 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and 8513 * the ops_vector is valid. 8514 * 8515 * Lock the virtual address range in the current process and create a 8516 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to 8517 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export 8518 * to user space. 8519 * 8520 * Note: The resource control accounting currently uses a full charge model 8521 * in other words attempts to lock the same/overlapping areas of memory 8522 * will deduct the full size of the buffer from the projects running 8523 * counter for the device locked memory. 8524 * 8525 * addr, size should be PAGESIZE aligned 8526 * 8527 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both 8528 * identifies whether the locked memory will be read or written or both 8529 * DDI_UMEMLOCK_LONGTERM must be set when the locking will 8530 * be maintained for an indefinitely long period (essentially permanent), 8531 * rather than for what would be required for a typical I/O completion. 8532 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT 8533 * if the memory pertains to a regular file which is mapped MAP_SHARED. 8534 * This is to prevent a deadlock if a file truncation is attempted after 8535 * after the locking is done. 8536 * 8537 * Returns 0 on success 8538 * EINVAL - for invalid parameters 8539 * EPERM, ENOMEM and other error codes returned by as_pagelock 8540 * ENOMEM - is returned if the current request to lock memory exceeds 8541 * *.max-locked-memory resource control value. 8542 * EFAULT - memory pertains to a regular file mapped shared and 8543 * and DDI_UMEMLOCK_LONGTERM flag is set 8544 * EAGAIN - could not start the ddi_umem_unlock list processing thread 8545 */ 8546 int 8547 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie, 8548 struct umem_callback_ops *ops_vector, 8549 proc_t *procp) 8550 { 8551 int error; 8552 struct ddi_umem_cookie *p; 8553 void (*driver_callback)() = NULL; 8554 struct as *as; 8555 struct seg *seg; 8556 vnode_t *vp; 8557 8558 /* Allow device drivers to not have to reference "curproc" */ 8559 if (procp == NULL) 8560 procp = curproc; 8561 as = procp->p_as; 8562 *cookie = NULL; /* in case of any error return */ 8563 8564 /* These are the only three valid flags */ 8565 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE | 8566 DDI_UMEMLOCK_LONGTERM)) != 0) 8567 return (EINVAL); 8568 8569 /* At least one (can be both) of the two access flags must be set */ 8570 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) 8571 return (EINVAL); 8572 8573 /* addr and len must be page-aligned */ 8574 if (((uintptr_t)addr & PAGEOFFSET) != 0) 8575 return (EINVAL); 8576 8577 if ((len & PAGEOFFSET) != 0) 8578 return (EINVAL); 8579 8580 /* 8581 * For longterm locking a driver callback must be specified; if 8582 * not longterm then a callback is optional. 8583 */ 8584 if (ops_vector != NULL) { 8585 if (ops_vector->cbo_umem_callback_version != 8586 UMEM_CALLBACK_VERSION) 8587 return (EINVAL); 8588 else 8589 driver_callback = ops_vector->cbo_umem_lock_cleanup; 8590 } 8591 if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM)) 8592 return (EINVAL); 8593 8594 /* 8595 * Call i_ddi_umem_unlock_thread_start if necessary. It will 8596 * be called on first ddi_umem_lock or umem_lockmemory call. 8597 */ 8598 if (ddi_umem_unlock_thread == NULL) 8599 i_ddi_umem_unlock_thread_start(); 8600 8601 /* Allocate memory for the cookie */ 8602 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP); 8603 8604 /* Convert the flags to seg_rw type */ 8605 if (flags & DDI_UMEMLOCK_WRITE) { 8606 p->s_flags = S_WRITE; 8607 } else { 8608 p->s_flags = S_READ; 8609 } 8610 8611 /* Store procp in cookie for later iosetup/unlock */ 8612 p->procp = (void *)procp; 8613 8614 /* 8615 * Store the struct as pointer in cookie for later use by 8616 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock 8617 * is called after relvm is called. 8618 */ 8619 p->asp = as; 8620 8621 /* 8622 * The size field is needed for lockmem accounting. 8623 */ 8624 p->size = len; 8625 8626 if (umem_incr_devlockmem(p) != 0) { 8627 /* 8628 * The requested memory cannot be locked 8629 */ 8630 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8631 *cookie = (ddi_umem_cookie_t)NULL; 8632 return (ENOMEM); 8633 } 8634 8635 /* Lock the pages corresponding to addr, len in memory */ 8636 error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags); 8637 if (error != 0) { 8638 umem_decr_devlockmem(p); 8639 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8640 *cookie = (ddi_umem_cookie_t)NULL; 8641 return (error); 8642 } 8643 8644 /* 8645 * For longterm locking the addr must pertain to a seg_vn segment or 8646 * or a seg_spt segment. 8647 * If the segment pertains to a regular file, it cannot be 8648 * mapped MAP_SHARED. 8649 * This is to prevent a deadlock if a file truncation is attempted 8650 * after the locking is done. 8651 * Doing this after as_pagelock guarantees persistence of the as; if 8652 * an unacceptable segment is found, the cleanup includes calling 8653 * as_pageunlock before returning EFAULT. 8654 * 8655 * segdev is allowed here as it is already locked. This allows 8656 * for memory exported by drivers through mmap() (which is already 8657 * locked) to be allowed for LONGTERM. 8658 */ 8659 if (flags & DDI_UMEMLOCK_LONGTERM) { 8660 extern struct seg_ops segspt_shmops; 8661 extern struct seg_ops segdev_ops; 8662 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 8663 for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) { 8664 if (seg == NULL || seg->s_base > addr + len) 8665 break; 8666 if (seg->s_ops == &segdev_ops) 8667 continue; 8668 if (((seg->s_ops != &segvn_ops) && 8669 (seg->s_ops != &segspt_shmops)) || 8670 ((SEGOP_GETVP(seg, addr, &vp) == 0 && 8671 vp != NULL && vp->v_type == VREG) && 8672 (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) { 8673 as_pageunlock(as, p->pparray, 8674 addr, len, p->s_flags); 8675 AS_LOCK_EXIT(as, &as->a_lock); 8676 umem_decr_devlockmem(p); 8677 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8678 *cookie = (ddi_umem_cookie_t)NULL; 8679 return (EFAULT); 8680 } 8681 } 8682 AS_LOCK_EXIT(as, &as->a_lock); 8683 } 8684 8685 8686 /* Initialize the fields in the ddi_umem_cookie */ 8687 p->cvaddr = addr; 8688 p->type = UMEM_LOCKED; 8689 if (driver_callback != NULL) { 8690 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */ 8691 p->cook_refcnt = 2; 8692 p->callbacks = *ops_vector; 8693 } else { 8694 /* only i_ddi_umme_unlock needs the cookie */ 8695 p->cook_refcnt = 1; 8696 } 8697 8698 *cookie = (ddi_umem_cookie_t)p; 8699 8700 /* 8701 * If a driver callback was specified, add an entry to the 8702 * as struct callback list. The as_pagelock above guarantees 8703 * the persistence of as. 8704 */ 8705 if (driver_callback) { 8706 error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT, 8707 addr, len, KM_SLEEP); 8708 if (error != 0) { 8709 as_pageunlock(as, p->pparray, 8710 addr, len, p->s_flags); 8711 umem_decr_devlockmem(p); 8712 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8713 *cookie = (ddi_umem_cookie_t)NULL; 8714 } 8715 } 8716 return (error); 8717 } 8718 8719 /* 8720 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free 8721 * the cookie. Called from i_ddi_umem_unlock_thread. 8722 */ 8723 8724 static void 8725 i_ddi_umem_unlock(struct ddi_umem_cookie *p) 8726 { 8727 uint_t rc; 8728 8729 /* 8730 * There is no way to determine whether a callback to 8731 * umem_lock_undo was registered via as_add_callback. 8732 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and 8733 * a valid callback function structure.) as_delete_callback 8734 * is called to delete a possible registered callback. If the 8735 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it 8736 * indicates that there was a callback registered, and that is was 8737 * successfully deleted. Thus, the cookie reference count 8738 * will never be decremented by umem_lock_undo. Just return the 8739 * memory for the cookie, since both users of the cookie are done. 8740 * A return of AS_CALLBACK_NOTFOUND indicates a callback was 8741 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED 8742 * indicates that callback processing is taking place and, and 8743 * umem_lock_undo is, or will be, executing, and thus decrementing 8744 * the cookie reference count when it is complete. 8745 * 8746 * This needs to be done before as_pageunlock so that the 8747 * persistence of as is guaranteed because of the locked pages. 8748 * 8749 */ 8750 rc = as_delete_callback(p->asp, p); 8751 8752 8753 /* 8754 * The proc->p_as will be stale if i_ddi_umem_unlock is called 8755 * after relvm is called so use p->asp. 8756 */ 8757 as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags); 8758 8759 /* 8760 * Now that we have unlocked the memory decrement the 8761 * *.max-locked-memory rctl 8762 */ 8763 umem_decr_devlockmem(p); 8764 8765 if (rc == AS_CALLBACK_DELETED) { 8766 /* umem_lock_undo will not happen, return the cookie memory */ 8767 ASSERT(p->cook_refcnt == 2); 8768 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8769 } else { 8770 /* 8771 * umem_undo_lock may happen if as_delete_callback returned 8772 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the 8773 * reference count, atomically, and return the cookie 8774 * memory if the reference count goes to zero. The only 8775 * other value for rc is AS_CALLBACK_NOTFOUND. In that 8776 * case, just return the cookie memory. 8777 */ 8778 if ((rc != AS_CALLBACK_DELETE_DEFERRED) || 8779 (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1) 8780 == 0)) { 8781 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8782 } 8783 } 8784 } 8785 8786 /* 8787 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler. 8788 * 8789 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list 8790 * until it is empty. Then, wait for more to be added. This thread is awoken 8791 * via calls to ddi_umem_unlock. 8792 */ 8793 8794 static void 8795 i_ddi_umem_unlock_thread(void) 8796 { 8797 struct ddi_umem_cookie *ret_cookie; 8798 callb_cpr_t cprinfo; 8799 8800 /* process the ddi_umem_unlock list */ 8801 CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex, 8802 callb_generic_cpr, "unlock_thread"); 8803 for (;;) { 8804 mutex_enter(&ddi_umem_unlock_mutex); 8805 if (ddi_umem_unlock_head != NULL) { /* list not empty */ 8806 ret_cookie = ddi_umem_unlock_head; 8807 /* take if off the list */ 8808 if ((ddi_umem_unlock_head = 8809 ddi_umem_unlock_head->unl_forw) == NULL) { 8810 ddi_umem_unlock_tail = NULL; 8811 } 8812 mutex_exit(&ddi_umem_unlock_mutex); 8813 /* unlock the pages in this cookie */ 8814 (void) i_ddi_umem_unlock(ret_cookie); 8815 } else { /* list is empty, wait for next ddi_umem_unlock */ 8816 CALLB_CPR_SAFE_BEGIN(&cprinfo); 8817 cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex); 8818 CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex); 8819 mutex_exit(&ddi_umem_unlock_mutex); 8820 } 8821 } 8822 /* ddi_umem_unlock_thread does not exit */ 8823 /* NOTREACHED */ 8824 } 8825 8826 /* 8827 * Start the thread that will process the ddi_umem_unlock list if it is 8828 * not already started (i_ddi_umem_unlock_thread). 8829 */ 8830 static void 8831 i_ddi_umem_unlock_thread_start(void) 8832 { 8833 mutex_enter(&ddi_umem_unlock_mutex); 8834 if (ddi_umem_unlock_thread == NULL) { 8835 ddi_umem_unlock_thread = thread_create(NULL, 0, 8836 i_ddi_umem_unlock_thread, NULL, 0, &p0, 8837 TS_RUN, minclsyspri); 8838 } 8839 mutex_exit(&ddi_umem_unlock_mutex); 8840 } 8841 8842 /* 8843 * Lock the virtual address range in the current process and create a 8844 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to 8845 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export 8846 * to user space. 8847 * 8848 * Note: The resource control accounting currently uses a full charge model 8849 * in other words attempts to lock the same/overlapping areas of memory 8850 * will deduct the full size of the buffer from the projects running 8851 * counter for the device locked memory. This applies to umem_lockmemory too. 8852 * 8853 * addr, size should be PAGESIZE aligned 8854 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both 8855 * identifies whether the locked memory will be read or written or both 8856 * 8857 * Returns 0 on success 8858 * EINVAL - for invalid parameters 8859 * EPERM, ENOMEM and other error codes returned by as_pagelock 8860 * ENOMEM - is returned if the current request to lock memory exceeds 8861 * *.max-locked-memory resource control value. 8862 * EAGAIN - could not start the ddi_umem_unlock list processing thread 8863 */ 8864 int 8865 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie) 8866 { 8867 int error; 8868 struct ddi_umem_cookie *p; 8869 8870 *cookie = NULL; /* in case of any error return */ 8871 8872 /* These are the only two valid flags */ 8873 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) { 8874 return (EINVAL); 8875 } 8876 8877 /* At least one of the two flags (or both) must be set */ 8878 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) { 8879 return (EINVAL); 8880 } 8881 8882 /* addr and len must be page-aligned */ 8883 if (((uintptr_t)addr & PAGEOFFSET) != 0) { 8884 return (EINVAL); 8885 } 8886 8887 if ((len & PAGEOFFSET) != 0) { 8888 return (EINVAL); 8889 } 8890 8891 /* 8892 * Call i_ddi_umem_unlock_thread_start if necessary. It will 8893 * be called on first ddi_umem_lock or umem_lockmemory call. 8894 */ 8895 if (ddi_umem_unlock_thread == NULL) 8896 i_ddi_umem_unlock_thread_start(); 8897 8898 /* Allocate memory for the cookie */ 8899 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP); 8900 8901 /* Convert the flags to seg_rw type */ 8902 if (flags & DDI_UMEMLOCK_WRITE) { 8903 p->s_flags = S_WRITE; 8904 } else { 8905 p->s_flags = S_READ; 8906 } 8907 8908 /* Store curproc in cookie for later iosetup/unlock */ 8909 p->procp = (void *)curproc; 8910 8911 /* 8912 * Store the struct as pointer in cookie for later use by 8913 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock 8914 * is called after relvm is called. 8915 */ 8916 p->asp = curproc->p_as; 8917 /* 8918 * The size field is needed for lockmem accounting. 8919 */ 8920 p->size = len; 8921 8922 if (umem_incr_devlockmem(p) != 0) { 8923 /* 8924 * The requested memory cannot be locked 8925 */ 8926 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8927 *cookie = (ddi_umem_cookie_t)NULL; 8928 return (ENOMEM); 8929 } 8930 8931 /* Lock the pages corresponding to addr, len in memory */ 8932 error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray), 8933 addr, len, p->s_flags); 8934 if (error != 0) { 8935 umem_decr_devlockmem(p); 8936 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8937 *cookie = (ddi_umem_cookie_t)NULL; 8938 return (error); 8939 } 8940 8941 /* Initialize the fields in the ddi_umem_cookie */ 8942 p->cvaddr = addr; 8943 p->type = UMEM_LOCKED; 8944 p->cook_refcnt = 1; 8945 8946 *cookie = (ddi_umem_cookie_t)p; 8947 return (error); 8948 } 8949 8950 /* 8951 * Add the cookie to the ddi_umem_unlock list. Pages will be 8952 * unlocked by i_ddi_umem_unlock_thread. 8953 */ 8954 8955 void 8956 ddi_umem_unlock(ddi_umem_cookie_t cookie) 8957 { 8958 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie; 8959 8960 ASSERT(p->type == UMEM_LOCKED); 8961 ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */ 8962 ASSERT(ddi_umem_unlock_thread != NULL); 8963 8964 p->unl_forw = (struct ddi_umem_cookie *)NULL; /* end of list */ 8965 /* 8966 * Queue the unlock request and notify i_ddi_umem_unlock thread 8967 * if it's called in the interrupt context. Otherwise, unlock pages 8968 * immediately. 8969 */ 8970 if (servicing_interrupt()) { 8971 /* queue the unlock request and notify the thread */ 8972 mutex_enter(&ddi_umem_unlock_mutex); 8973 if (ddi_umem_unlock_head == NULL) { 8974 ddi_umem_unlock_head = ddi_umem_unlock_tail = p; 8975 cv_broadcast(&ddi_umem_unlock_cv); 8976 } else { 8977 ddi_umem_unlock_tail->unl_forw = p; 8978 ddi_umem_unlock_tail = p; 8979 } 8980 mutex_exit(&ddi_umem_unlock_mutex); 8981 } else { 8982 /* unlock the pages right away */ 8983 (void) i_ddi_umem_unlock(p); 8984 } 8985 } 8986 8987 /* 8988 * Create a buf structure from a ddi_umem_cookie 8989 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc 8990 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported) 8991 * off, len - identifies the portion of the memory represented by the cookie 8992 * that the buf points to. 8993 * NOTE: off, len need to follow the alignment/size restrictions of the 8994 * device (dev) that this buf will be passed to. Some devices 8995 * will accept unrestricted alignment/size, whereas others (such as 8996 * st) require some block-size alignment/size. It is the caller's 8997 * responsibility to ensure that the alignment/size restrictions 8998 * are met (we cannot assert as we do not know the restrictions) 8999 * 9000 * direction - is one of B_READ or B_WRITE and needs to be compatible with 9001 * the flags used in ddi_umem_lock 9002 * 9003 * The following three arguments are used to initialize fields in the 9004 * buf structure and are uninterpreted by this routine. 9005 * 9006 * dev 9007 * blkno 9008 * iodone 9009 * 9010 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP 9011 * 9012 * Returns a buf structure pointer on success (to be freed by freerbuf) 9013 * NULL on any parameter error or memory alloc failure 9014 * 9015 */ 9016 struct buf * 9017 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len, 9018 int direction, dev_t dev, daddr_t blkno, 9019 int (*iodone)(struct buf *), int sleepflag) 9020 { 9021 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie; 9022 struct buf *bp; 9023 9024 /* 9025 * check for valid cookie offset, len 9026 */ 9027 if ((off + len) > p->size) { 9028 return (NULL); 9029 } 9030 9031 if (len > p->size) { 9032 return (NULL); 9033 } 9034 9035 /* direction has to be one of B_READ or B_WRITE */ 9036 if ((direction != B_READ) && (direction != B_WRITE)) { 9037 return (NULL); 9038 } 9039 9040 /* These are the only two valid sleepflags */ 9041 if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) { 9042 return (NULL); 9043 } 9044 9045 /* 9046 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported 9047 */ 9048 if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) { 9049 return (NULL); 9050 } 9051 9052 /* If type is KMEM_NON_PAGEABLE procp is NULL */ 9053 ASSERT((p->type == KMEM_NON_PAGEABLE) ? 9054 (p->procp == NULL) : (p->procp != NULL)); 9055 9056 bp = kmem_alloc(sizeof (struct buf), sleepflag); 9057 if (bp == NULL) { 9058 return (NULL); 9059 } 9060 bioinit(bp); 9061 9062 bp->b_flags = B_BUSY | B_PHYS | direction; 9063 bp->b_edev = dev; 9064 bp->b_lblkno = blkno; 9065 bp->b_iodone = iodone; 9066 bp->b_bcount = len; 9067 bp->b_proc = (proc_t *)p->procp; 9068 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); 9069 bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off); 9070 if (p->pparray != NULL) { 9071 bp->b_flags |= B_SHADOW; 9072 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); 9073 bp->b_shadow = p->pparray + btop(off); 9074 } 9075 return (bp); 9076 } 9077 9078 /* 9079 * Fault-handling and related routines 9080 */ 9081 9082 ddi_devstate_t 9083 ddi_get_devstate(dev_info_t *dip) 9084 { 9085 if (DEVI_IS_DEVICE_OFFLINE(dip)) 9086 return (DDI_DEVSTATE_OFFLINE); 9087 else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip)) 9088 return (DDI_DEVSTATE_DOWN); 9089 else if (DEVI_IS_BUS_QUIESCED(dip)) 9090 return (DDI_DEVSTATE_QUIESCED); 9091 else if (DEVI_IS_DEVICE_DEGRADED(dip)) 9092 return (DDI_DEVSTATE_DEGRADED); 9093 else 9094 return (DDI_DEVSTATE_UP); 9095 } 9096 9097 void 9098 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact, 9099 ddi_fault_location_t location, const char *message) 9100 { 9101 struct ddi_fault_event_data fd; 9102 ddi_eventcookie_t ec; 9103 9104 /* 9105 * Assemble all the information into a fault-event-data structure 9106 */ 9107 fd.f_dip = dip; 9108 fd.f_impact = impact; 9109 fd.f_location = location; 9110 fd.f_message = message; 9111 fd.f_oldstate = ddi_get_devstate(dip); 9112 9113 /* 9114 * Get eventcookie from defining parent. 9115 */ 9116 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != 9117 DDI_SUCCESS) 9118 return; 9119 9120 (void) ndi_post_event(dip, dip, ec, &fd); 9121 } 9122 9123 char * 9124 i_ddi_devi_class(dev_info_t *dip) 9125 { 9126 return (DEVI(dip)->devi_device_class); 9127 } 9128 9129 int 9130 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag) 9131 { 9132 struct dev_info *devi = DEVI(dip); 9133 9134 mutex_enter(&devi->devi_lock); 9135 9136 if (devi->devi_device_class) 9137 kmem_free(devi->devi_device_class, 9138 strlen(devi->devi_device_class) + 1); 9139 9140 if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag)) 9141 != NULL) { 9142 mutex_exit(&devi->devi_lock); 9143 return (DDI_SUCCESS); 9144 } 9145 9146 mutex_exit(&devi->devi_lock); 9147 9148 return (DDI_FAILURE); 9149 } 9150 9151 9152 /* 9153 * Task Queues DDI interfaces. 9154 */ 9155 9156 /* ARGSUSED */ 9157 ddi_taskq_t * 9158 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads, 9159 pri_t pri, uint_t cflags) 9160 { 9161 char full_name[TASKQ_NAMELEN]; 9162 const char *tq_name; 9163 int nodeid = 0; 9164 9165 if (dip == NULL) 9166 tq_name = name; 9167 else { 9168 nodeid = ddi_get_instance(dip); 9169 9170 if (name == NULL) 9171 name = "tq"; 9172 9173 (void) snprintf(full_name, sizeof (full_name), "%s_%s", 9174 ddi_driver_name(dip), name); 9175 9176 tq_name = full_name; 9177 } 9178 9179 return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads, 9180 pri == TASKQ_DEFAULTPRI ? minclsyspri : pri, 9181 nthreads, INT_MAX, TASKQ_PREPOPULATE)); 9182 } 9183 9184 void 9185 ddi_taskq_destroy(ddi_taskq_t *tq) 9186 { 9187 taskq_destroy((taskq_t *)tq); 9188 } 9189 9190 int 9191 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *), 9192 void *arg, uint_t dflags) 9193 { 9194 taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg, 9195 dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP); 9196 9197 return (id != 0 ? DDI_SUCCESS : DDI_FAILURE); 9198 } 9199 9200 void 9201 ddi_taskq_wait(ddi_taskq_t *tq) 9202 { 9203 taskq_wait((taskq_t *)tq); 9204 } 9205 9206 void 9207 ddi_taskq_suspend(ddi_taskq_t *tq) 9208 { 9209 taskq_suspend((taskq_t *)tq); 9210 } 9211 9212 boolean_t 9213 ddi_taskq_suspended(ddi_taskq_t *tq) 9214 { 9215 return (taskq_suspended((taskq_t *)tq)); 9216 } 9217 9218 void 9219 ddi_taskq_resume(ddi_taskq_t *tq) 9220 { 9221 taskq_resume((taskq_t *)tq); 9222 } 9223 9224 int 9225 ddi_parse( 9226 const char *ifname, 9227 char *alnum, 9228 uint_t *nump) 9229 { 9230 const char *p; 9231 int l; 9232 ulong_t num; 9233 boolean_t nonum = B_TRUE; 9234 char c; 9235 9236 l = strlen(ifname); 9237 for (p = ifname + l; p != ifname; l--) { 9238 c = *--p; 9239 if (!isdigit(c)) { 9240 (void) strlcpy(alnum, ifname, l + 1); 9241 if (ddi_strtoul(p + 1, NULL, 10, &num) != 0) 9242 return (DDI_FAILURE); 9243 break; 9244 } 9245 nonum = B_FALSE; 9246 } 9247 if (l == 0 || nonum) 9248 return (DDI_FAILURE); 9249 9250 *nump = num; 9251 return (DDI_SUCCESS); 9252 } 9253 9254 /* 9255 * Default initialization function for drivers that don't need to quiesce. 9256 */ 9257 /* ARGSUSED */ 9258 int 9259 ddi_quiesce_not_needed(dev_info_t *dip) 9260 { 9261 return (DDI_SUCCESS); 9262 } 9263 9264 /* 9265 * Initialization function for drivers that should implement quiesce() 9266 * but haven't yet. 9267 */ 9268 /* ARGSUSED */ 9269 int 9270 ddi_quiesce_not_supported(dev_info_t *dip) 9271 { 9272 return (DDI_FAILURE); 9273 } 9274 9275 char * 9276 ddi_strdup(const char *str, int flag) 9277 { 9278 int n; 9279 char *ptr; 9280 9281 ASSERT(str != NULL); 9282 ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP)); 9283 9284 n = strlen(str); 9285 if ((ptr = kmem_alloc(n + 1, flag)) == NULL) 9286 return (NULL); 9287 bcopy(str, ptr, n + 1); 9288 return (ptr); 9289 } 9290 9291 char * 9292 strdup(const char *str) 9293 { 9294 return (ddi_strdup(str, KM_SLEEP)); 9295 } 9296 9297 void 9298 strfree(char *str) 9299 { 9300 ASSERT(str != NULL); 9301 kmem_free(str, strlen(str) + 1); 9302 } 9303 9304 /* 9305 * Generic DDI callback interfaces. 9306 */ 9307 9308 int 9309 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc, 9310 void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp) 9311 { 9312 ddi_cb_t *cbp; 9313 9314 ASSERT(dip != NULL); 9315 ASSERT(DDI_CB_FLAG_VALID(flags)); 9316 ASSERT(cbfunc != NULL); 9317 ASSERT(ret_hdlp != NULL); 9318 9319 /* Sanity check the context */ 9320 ASSERT(!servicing_interrupt()); 9321 if (servicing_interrupt()) 9322 return (DDI_FAILURE); 9323 9324 /* Validate parameters */ 9325 if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) || 9326 (cbfunc == NULL) || (ret_hdlp == NULL)) 9327 return (DDI_EINVAL); 9328 9329 /* Check for previous registration */ 9330 if (DEVI(dip)->devi_cb_p != NULL) 9331 return (DDI_EALREADY); 9332 9333 /* Allocate and initialize callback */ 9334 cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP); 9335 cbp->cb_dip = dip; 9336 cbp->cb_func = cbfunc; 9337 cbp->cb_arg1 = arg1; 9338 cbp->cb_arg2 = arg2; 9339 cbp->cb_flags = flags; 9340 DEVI(dip)->devi_cb_p = cbp; 9341 9342 /* If adding an IRM callback, notify IRM */ 9343 if (flags & DDI_CB_FLAG_INTR) 9344 i_ddi_irm_set_cb(dip, B_TRUE); 9345 9346 *ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p); 9347 return (DDI_SUCCESS); 9348 } 9349 9350 int 9351 ddi_cb_unregister(ddi_cb_handle_t hdl) 9352 { 9353 ddi_cb_t *cbp; 9354 dev_info_t *dip; 9355 9356 ASSERT(hdl != NULL); 9357 9358 /* Sanity check the context */ 9359 ASSERT(!servicing_interrupt()); 9360 if (servicing_interrupt()) 9361 return (DDI_FAILURE); 9362 9363 /* Validate parameters */ 9364 if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) || 9365 ((dip = cbp->cb_dip) == NULL)) 9366 return (DDI_EINVAL); 9367 9368 /* If removing an IRM callback, notify IRM */ 9369 if (cbp->cb_flags & DDI_CB_FLAG_INTR) 9370 i_ddi_irm_set_cb(dip, B_FALSE); 9371 9372 /* Destroy the callback */ 9373 kmem_free(cbp, sizeof (ddi_cb_t)); 9374 DEVI(dip)->devi_cb_p = NULL; 9375 9376 return (DDI_SUCCESS); 9377 } 9378