1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
28 */
29
30 /*
31 * Copyright 2023 Oxide Computer Company
32 */
33
34 #include <sys/types.h>
35 #include <sys/cmn_err.h>
36 #include <sys/conf.h>
37 #include <sys/debug.h>
38 #include <sys/errno.h>
39 #include <sys/note.h>
40 #include <sys/dditypes.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43 #include <sys/sunndi.h>
44 #include <sys/ddi_impldefs.h>
45 #include <sys/ndi_impldefs.h>
46 #include <sys/varargs.h>
47 #include <sys/modctl.h>
48 #include <sys/kmem.h>
49 #include <sys/cpuvar.h>
50 #include <sys/cpupart.h>
51 #include <sys/mem_config.h>
52 #include <sys/mem_cage.h>
53 #include <sys/memnode.h>
54 #include <sys/callb.h>
55 #include <sys/ontrap.h>
56 #include <sys/obpdefs.h>
57 #include <sys/promif.h>
58 #include <sys/synch.h>
59 #include <sys/systm.h>
60 #include <sys/sysmacros.h>
61 #include <sys/archsystm.h>
62 #include <sys/machsystm.h>
63 #include <sys/x_call.h>
64 #include <sys/x86_archext.h>
65 #include <sys/fastboot_impl.h>
66 #include <sys/sysevent.h>
67 #include <sys/sysevent/dr.h>
68 #include <sys/sysevent/eventdefs.h>
69 #include <sys/acpi/acpi.h>
70 #include <sys/acpica.h>
71 #include <sys/acpidev.h>
72 #include <sys/acpidev_rsc.h>
73 #include <sys/acpidev_dr.h>
74 #include <sys/dr.h>
75 #include <sys/dr_util.h>
76 #include <sys/drmach.h>
77 #include "drmach_acpi.h"
78
79 /* utility */
80 #define MBYTE (1048576ull)
81 #define _ptob64(p) ((uint64_t)(p) << PAGESHIFT)
82 #define _b64top(b) ((pgcnt_t)((b) >> PAGESHIFT))
83
84 static int drmach_init(void);
85 static void drmach_fini(void);
86 static int drmach_name2type_idx(char *);
87 static sbd_error_t *drmach_mem_update_lgrp(drmachid_t);
88
89 static void drmach_board_dispose(drmachid_t id);
90 static sbd_error_t *drmach_board_release(drmachid_t);
91 static sbd_error_t *drmach_board_status(drmachid_t, drmach_status_t *);
92
93 static void drmach_io_dispose(drmachid_t);
94 static sbd_error_t *drmach_io_release(drmachid_t);
95 static sbd_error_t *drmach_io_status(drmachid_t, drmach_status_t *);
96
97 static void drmach_cpu_dispose(drmachid_t);
98 static sbd_error_t *drmach_cpu_release(drmachid_t);
99 static sbd_error_t *drmach_cpu_status(drmachid_t, drmach_status_t *);
100
101 static void drmach_mem_dispose(drmachid_t);
102 static sbd_error_t *drmach_mem_release(drmachid_t);
103 static sbd_error_t *drmach_mem_status(drmachid_t, drmach_status_t *);
104
105 #ifdef DEBUG
106 int drmach_debug = 1; /* set to non-zero to enable debug messages */
107 #endif /* DEBUG */
108
109 drmach_domain_info_t drmach_domain;
110
111 static char *drmach_ie_fmt = "drmach_acpi.c %d";
112 static drmach_array_t *drmach_boards;
113
114 /* rwlock to protect drmach_boards. */
115 static krwlock_t drmach_boards_rwlock;
116
117 /* rwlock to block out CPR thread. */
118 static krwlock_t drmach_cpr_rwlock;
119
120 /* CPR callb id. */
121 static callb_id_t drmach_cpr_cid;
122
123 static struct {
124 const char *name;
125 const char *type;
126 sbd_error_t *(*new)(drmach_device_t *, drmachid_t *);
127 } drmach_name2type[] = {
128 { ACPIDEV_NODE_NAME_CPU, DRMACH_DEVTYPE_CPU, drmach_cpu_new },
129 { ACPIDEV_NODE_NAME_MEMORY, DRMACH_DEVTYPE_MEM, drmach_mem_new },
130 { ACPIDEV_NODE_NAME_PCI, DRMACH_DEVTYPE_PCI, drmach_io_new },
131 };
132
133 /*
134 * drmach autoconfiguration data structures and interfaces
135 */
136 static struct modlmisc modlmisc = {
137 &mod_miscops,
138 "ACPI based DR v1.0"
139 };
140
141 static struct modlinkage modlinkage = {
142 MODREV_1,
143 (void *)&modlmisc,
144 NULL
145 };
146
147 int
_init(void)148 _init(void)
149 {
150 int err;
151
152 if ((err = drmach_init()) != 0) {
153 return (err);
154 }
155
156 if ((err = mod_install(&modlinkage)) != 0) {
157 drmach_fini();
158 }
159
160 return (err);
161 }
162
163 int
_fini(void)164 _fini(void)
165 {
166 int err;
167
168 if ((err = mod_remove(&modlinkage)) == 0) {
169 drmach_fini();
170 }
171
172 return (err);
173 }
174
175 int
_info(struct modinfo * modinfop)176 _info(struct modinfo *modinfop)
177 {
178 return (mod_info(&modlinkage, modinfop));
179 }
180
181 /*
182 * Internal support functions.
183 */
184 static DRMACH_HANDLE
drmach_node_acpi_get_dnode(drmach_node_t * np)185 drmach_node_acpi_get_dnode(drmach_node_t *np)
186 {
187 return ((DRMACH_HANDLE)(uintptr_t)np->here);
188 }
189
190 static dev_info_t *
drmach_node_acpi_get_dip(drmach_node_t * np)191 drmach_node_acpi_get_dip(drmach_node_t *np)
192 {
193 dev_info_t *dip = NULL;
194
195 if (ACPI_FAILURE(acpica_get_devinfo((DRMACH_HANDLE)(np->here), &dip))) {
196 return (NULL);
197 }
198
199 return (dip);
200 }
201
202 static int
drmach_node_acpi_get_prop(drmach_node_t * np,char * name,void * buf,int len)203 drmach_node_acpi_get_prop(drmach_node_t *np, char *name, void *buf, int len)
204 {
205 int rv = 0;
206 DRMACH_HANDLE hdl;
207
208 hdl = np->get_dnode(np);
209 if (hdl == NULL) {
210 DRMACH_PR("!drmach_node_acpi_get_prop: NULL handle");
211 rv = -1;
212 } else {
213 rv = acpidev_dr_device_getprop(hdl, name, buf, len);
214 if (rv >= 0) {
215 ASSERT(rv <= len);
216 rv = 0;
217 }
218 }
219
220 return (rv);
221 }
222
223 static int
drmach_node_acpi_get_proplen(drmach_node_t * np,char * name,int * len)224 drmach_node_acpi_get_proplen(drmach_node_t *np, char *name, int *len)
225 {
226 int rv = 0;
227 DRMACH_HANDLE hdl;
228
229 hdl = np->get_dnode(np);
230 if (hdl == NULL) {
231 DRMACH_PR("!drmach_node_acpi_get_proplen: NULL handle");
232 rv = -1;
233 } else {
234 rv = acpidev_dr_device_getprop(hdl, name, NULL, 0);
235 if (rv >= 0) {
236 *len = rv;
237 return (0);
238 }
239 }
240
241 return (-1);
242 }
243
244 static ACPI_STATUS
drmach_node_acpi_callback(ACPI_HANDLE hdl,uint_t lvl,void * ctx,void ** retval)245 drmach_node_acpi_callback(ACPI_HANDLE hdl, uint_t lvl, void *ctx, void **retval)
246 {
247 _NOTE(ARGUNUSED(lvl));
248
249 int rv;
250 dev_info_t *dip;
251 drmach_node_walk_args_t *argp = ctx;
252 int (*cb)(drmach_node_walk_args_t *args);
253 acpidev_class_id_t clsid;
254
255 ASSERT(hdl != NULL);
256 ASSERT(ctx != NULL);
257 ASSERT(retval != NULL);
258
259 /* Skip subtree if the device is not powered. */
260 if (!acpidev_dr_device_is_powered(hdl)) {
261 return (AE_CTRL_DEPTH);
262 }
263
264 /*
265 * Keep scanning subtree if it fails to lookup device node.
266 * There may be some ACPI objects without device nodes created.
267 */
268 if (ACPI_FAILURE(acpica_get_devinfo(hdl, &dip))) {
269 return (AE_OK);
270 }
271
272 argp->node->here = hdl;
273 cb = (int (*)(drmach_node_walk_args_t *args))argp->func;
274 rv = (*cb)(argp);
275 argp->node->here = NULL;
276 if (rv) {
277 *(int *)retval = rv;
278 return (AE_CTRL_TERMINATE);
279 }
280
281 /*
282 * Skip descendants of PCI/PCIex host bridges.
283 * PCI/PCIex devices will be handled by pcihp.
284 */
285 clsid = acpidev_dr_device_get_class(hdl);
286 if (clsid == ACPIDEV_CLASS_ID_PCI || clsid == ACPIDEV_CLASS_ID_PCIEX) {
287 return (AE_CTRL_DEPTH);
288 }
289
290 return (AE_OK);
291 }
292
293 static int
drmach_node_acpi_walk(drmach_node_t * np,void * data,int (* cb)(drmach_node_walk_args_t * args))294 drmach_node_acpi_walk(drmach_node_t *np, void *data,
295 int (*cb)(drmach_node_walk_args_t *args))
296 {
297 DRMACH_HANDLE hdl;
298 int rv = 0;
299 drmach_node_walk_args_t args;
300
301 /* initialize the args structure for callback */
302 args.node = np;
303 args.data = data;
304 args.func = (void *)cb;
305
306 /* save the handle, it will be modified when walking the tree. */
307 hdl = np->get_dnode(np);
308 if (hdl == NULL) {
309 DRMACH_PR("!drmach_node_acpi_walk: failed to get device node.");
310 return (EX86_INAPPROP);
311 }
312
313 if (ACPI_FAILURE(acpidev_dr_device_walk_device(hdl,
314 ACPIDEV_MAX_ENUM_LEVELS, drmach_node_acpi_callback,
315 &args, (void *)&rv))) {
316 /*
317 * If acpidev_dr_device_walk_device() itself fails, rv won't
318 * be set to suitable error code. Set it here.
319 */
320 if (rv == 0) {
321 cmn_err(CE_WARN, "!drmach_node_acpi_walk: failed to "
322 "walk ACPI namespace.");
323 rv = EX86_ACPIWALK;
324 }
325 }
326
327 /* restore the handle to original value after walking the tree. */
328 np->here = (void *)hdl;
329
330 return ((int)rv);
331 }
332
333 static drmach_node_t *
drmach_node_new(void)334 drmach_node_new(void)
335 {
336 drmach_node_t *np;
337
338 np = kmem_zalloc(sizeof (drmach_node_t), KM_SLEEP);
339
340 np->get_dnode = drmach_node_acpi_get_dnode;
341 np->getdip = drmach_node_acpi_get_dip;
342 np->getproplen = drmach_node_acpi_get_proplen;
343 np->getprop = drmach_node_acpi_get_prop;
344 np->walk = drmach_node_acpi_walk;
345
346 return (np);
347 }
348
349 static drmachid_t
drmach_node_dup(drmach_node_t * np)350 drmach_node_dup(drmach_node_t *np)
351 {
352 drmach_node_t *dup;
353
354 dup = drmach_node_new();
355 dup->here = np->here;
356 dup->get_dnode = np->get_dnode;
357 dup->getdip = np->getdip;
358 dup->getproplen = np->getproplen;
359 dup->getprop = np->getprop;
360 dup->walk = np->walk;
361
362 return (dup);
363 }
364
365 static void
drmach_node_dispose(drmach_node_t * np)366 drmach_node_dispose(drmach_node_t *np)
367 {
368 kmem_free(np, sizeof (*np));
369 }
370
371 static int
drmach_node_walk(drmach_node_t * np,void * param,int (* cb)(drmach_node_walk_args_t * args))372 drmach_node_walk(drmach_node_t *np, void *param,
373 int (*cb)(drmach_node_walk_args_t *args))
374 {
375 return (np->walk(np, param, cb));
376 }
377
378 static DRMACH_HANDLE
drmach_node_get_dnode(drmach_node_t * np)379 drmach_node_get_dnode(drmach_node_t *np)
380 {
381 return (np->get_dnode(np));
382 }
383
384 /*
385 * drmach_array provides convenient array construction, access,
386 * bounds checking and array destruction logic.
387 */
388 static drmach_array_t *
drmach_array_new(uint_t min_index,uint_t max_index)389 drmach_array_new(uint_t min_index, uint_t max_index)
390 {
391 drmach_array_t *arr;
392
393 arr = kmem_zalloc(sizeof (drmach_array_t), KM_SLEEP);
394
395 arr->arr_sz = (max_index - min_index + 1) * sizeof (void *);
396 if (arr->arr_sz > 0) {
397 arr->min_index = min_index;
398 arr->max_index = max_index;
399
400 arr->arr = kmem_zalloc(arr->arr_sz, KM_SLEEP);
401 return (arr);
402 } else {
403 kmem_free(arr, sizeof (*arr));
404 return (0);
405 }
406 }
407
408 static int
drmach_array_set(drmach_array_t * arr,uint_t idx,drmachid_t val)409 drmach_array_set(drmach_array_t *arr, uint_t idx, drmachid_t val)
410 {
411 if (idx < arr->min_index || idx > arr->max_index)
412 return (-1);
413 arr->arr[idx - arr->min_index] = val;
414 return (0);
415 }
416
417 /*
418 * Get the item with index idx.
419 * Return 0 with the value stored in val if succeeds, otherwise return -1.
420 */
421 static int
drmach_array_get(drmach_array_t * arr,uint_t idx,drmachid_t * val)422 drmach_array_get(drmach_array_t *arr, uint_t idx, drmachid_t *val)
423 {
424 if (idx < arr->min_index || idx > arr->max_index)
425 return (-1);
426 *val = arr->arr[idx - arr->min_index];
427 return (0);
428 }
429
430 static int
drmach_array_first(drmach_array_t * arr,uint_t * idx,drmachid_t * val)431 drmach_array_first(drmach_array_t *arr, uint_t *idx, drmachid_t *val)
432 {
433 int rv;
434
435 *idx = arr->min_index;
436 while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
437 *idx += 1;
438
439 return (rv);
440 }
441
442 static int
drmach_array_next(drmach_array_t * arr,uint_t * idx,drmachid_t * val)443 drmach_array_next(drmach_array_t *arr, uint_t *idx, drmachid_t *val)
444 {
445 int rv;
446
447 *idx += 1;
448 while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
449 *idx += 1;
450
451 return (rv);
452 }
453
454 static void
drmach_array_dispose(drmach_array_t * arr,void (* disposer)(drmachid_t))455 drmach_array_dispose(drmach_array_t *arr, void (*disposer)(drmachid_t))
456 {
457 drmachid_t val;
458 uint_t idx;
459 int rv;
460
461 rv = drmach_array_first(arr, &idx, &val);
462 while (rv == 0) {
463 (*disposer)(val);
464 rv = drmach_array_next(arr, &idx, &val);
465 }
466
467 kmem_free(arr->arr, arr->arr_sz);
468 kmem_free(arr, sizeof (*arr));
469 }
470
471 static drmach_board_t *
drmach_get_board_by_bnum(uint_t bnum)472 drmach_get_board_by_bnum(uint_t bnum)
473 {
474 drmachid_t id;
475
476 if (drmach_array_get(drmach_boards, bnum, &id) == 0)
477 return ((drmach_board_t *)id);
478 else
479 return (NULL);
480 }
481
482 sbd_error_t *
drmach_device_new(drmach_node_t * node,drmach_board_t * bp,int portid,drmachid_t * idp)483 drmach_device_new(drmach_node_t *node,
484 drmach_board_t *bp, int portid, drmachid_t *idp)
485 {
486 int i;
487 int rv;
488 drmach_device_t proto;
489 sbd_error_t *err;
490 char name[OBP_MAXDRVNAME];
491
492 rv = node->getprop(node, ACPIDEV_DR_PROP_DEVNAME, name, OBP_MAXDRVNAME);
493 if (rv) {
494 /* every node is expected to have a name */
495 err = drerr_new(1, EX86_GETPROP, "device node %s: property %s",
496 ddi_node_name(node->getdip(node)),
497 ACPIDEV_DR_PROP_DEVNAME);
498 return (err);
499 }
500
501 /*
502 * The node currently being examined is not listed in the name2type[]
503 * array. In this case, the node is no interest to drmach. Both
504 * dp and err are initialized here to yield nothing (no device or
505 * error structure) for this case.
506 */
507 i = drmach_name2type_idx(name);
508 if (i < 0) {
509 *idp = (drmachid_t)0;
510 return (NULL);
511 }
512
513 /* device specific new function will set unum */
514 bzero(&proto, sizeof (proto));
515 proto.type = drmach_name2type[i].type;
516 proto.bp = bp;
517 proto.node = node;
518 proto.portid = portid;
519
520 return (drmach_name2type[i].new(&proto, idp));
521 }
522
523 static void
drmach_device_dispose(drmachid_t id)524 drmach_device_dispose(drmachid_t id)
525 {
526 drmach_device_t *self = id;
527
528 self->cm.dispose(id);
529 }
530
531 static sbd_error_t *
drmach_device_status(drmachid_t id,drmach_status_t * stat)532 drmach_device_status(drmachid_t id, drmach_status_t *stat)
533 {
534 drmach_common_t *cp;
535
536 if (!DRMACH_IS_ID(id))
537 return (drerr_new(0, EX86_NOTID, NULL));
538 cp = id;
539
540 return (cp->status(id, stat));
541 }
542
543 drmach_board_t *
drmach_board_new(uint_t bnum,int boot_board)544 drmach_board_new(uint_t bnum, int boot_board)
545 {
546 sbd_error_t *err;
547 drmach_board_t *bp;
548 dev_info_t *dip = NULL;
549
550 bp = kmem_zalloc(sizeof (drmach_board_t), KM_SLEEP);
551 bp->cm.isa = (void *)drmach_board_new;
552 bp->cm.release = drmach_board_release;
553 bp->cm.status = drmach_board_status;
554
555 bp->bnum = bnum;
556 bp->devices = NULL;
557 bp->tree = drmach_node_new();
558
559 acpidev_dr_lock_all();
560 if (ACPI_FAILURE(acpidev_dr_get_board_handle(bnum, &bp->tree->here))) {
561 acpidev_dr_unlock_all();
562 drmach_board_dispose(bp);
563 return (NULL);
564 }
565 acpidev_dr_unlock_all();
566 ASSERT(bp->tree->here != NULL);
567
568 err = drmach_board_name(bnum, bp->cm.name, sizeof (bp->cm.name));
569 if (err != NULL) {
570 sbd_err_clear(&err);
571 drmach_board_dispose(bp);
572 return (NULL);
573 }
574
575 if (acpidev_dr_device_is_powered(bp->tree->here)) {
576 bp->boot_board = boot_board;
577 bp->powered = 1;
578 } else {
579 bp->boot_board = 0;
580 bp->powered = 0;
581 }
582 bp->assigned = boot_board;
583 if (ACPI_SUCCESS(acpica_get_devinfo(bp->tree->here, &dip))) {
584 bp->connected = 1;
585 } else {
586 bp->connected = 0;
587 }
588
589 (void) drmach_array_set(drmach_boards, bnum, bp);
590
591 return (bp);
592 }
593
594 static void
drmach_board_dispose(drmachid_t id)595 drmach_board_dispose(drmachid_t id)
596 {
597 drmach_board_t *bp;
598
599 ASSERT(DRMACH_IS_BOARD_ID(id));
600 bp = id;
601
602 if (bp->tree)
603 drmach_node_dispose(bp->tree);
604
605 if (bp->devices)
606 drmach_array_dispose(bp->devices, drmach_device_dispose);
607
608 kmem_free(bp, sizeof (drmach_board_t));
609 }
610
611 static sbd_error_t *
drmach_board_release(drmachid_t id)612 drmach_board_release(drmachid_t id)
613 {
614 if (!DRMACH_IS_BOARD_ID(id))
615 return (drerr_new(0, EX86_INAPPROP, NULL));
616
617 return (NULL);
618 }
619
620 static int
drmach_board_check_power(drmach_board_t * bp)621 drmach_board_check_power(drmach_board_t *bp)
622 {
623 DRMACH_HANDLE hdl;
624
625 hdl = drmach_node_get_dnode(bp->tree);
626
627 return (acpidev_dr_device_is_powered(hdl));
628 }
629
630 struct drmach_board_list_dep_arg {
631 int count;
632 size_t len;
633 ssize_t off;
634 char *buf;
635 char temp[MAXPATHLEN];
636 };
637
638 static ACPI_STATUS
drmach_board_generate_name(ACPI_HANDLE hdl,UINT32 lvl,void * ctx,void ** retval)639 drmach_board_generate_name(ACPI_HANDLE hdl, UINT32 lvl, void *ctx,
640 void **retval)
641 {
642 _NOTE(ARGUNUSED(retval));
643
644 struct drmach_board_list_dep_arg *argp = ctx;
645
646 ASSERT(hdl != NULL);
647 ASSERT(lvl == UINT32_MAX);
648 ASSERT(ctx != NULL);
649
650 /* Skip non-board devices. */
651 if (!acpidev_dr_device_is_board(hdl)) {
652 return (AE_OK);
653 }
654
655 if (ACPI_FAILURE(acpidev_dr_get_board_name(hdl, argp->temp,
656 sizeof (argp->temp)))) {
657 DRMACH_PR("!drmach_board_generate_name: failed to "
658 "generate board name for handle %p.", hdl);
659 /* Keep on walking. */
660 return (AE_OK);
661 }
662 argp->count++;
663 argp->off += snprintf(argp->buf + argp->off, argp->len - argp->off,
664 " %s", argp->temp);
665 if (argp->off >= argp->len) {
666 return (AE_CTRL_TERMINATE);
667 }
668
669 return (AE_OK);
670 }
671
672 static ssize_t
drmach_board_list_dependency(ACPI_HANDLE hdl,boolean_t edl,char * prefix,char * buf,size_t len)673 drmach_board_list_dependency(ACPI_HANDLE hdl, boolean_t edl, char *prefix,
674 char *buf, size_t len)
675 {
676 ACPI_STATUS rc;
677 ssize_t off;
678 struct drmach_board_list_dep_arg *ap;
679
680 ASSERT(buf != NULL && len != 0);
681 if (buf == NULL || len == 0) {
682 return (-1);
683 }
684
685 ap = kmem_zalloc(sizeof (*ap), KM_SLEEP);
686 ap->buf = buf;
687 ap->len = len;
688 ap->off = snprintf(buf, len, "%s", prefix);
689 if (ap->off >= len) {
690 *buf = '\0';
691 kmem_free(ap, sizeof (*ap));
692 return (-1);
693 }
694
695 /* Generate the device dependency list. */
696 if (edl) {
697 rc = acpidev_dr_device_walk_edl(hdl,
698 drmach_board_generate_name, ap, NULL);
699 } else {
700 rc = acpidev_dr_device_walk_ejd(hdl,
701 drmach_board_generate_name, ap, NULL);
702 }
703 if (ACPI_FAILURE(rc)) {
704 *buf = '\0';
705 ap->off = -1;
706 /* No device has dependency on this board. */
707 } else if (ap->count == 0) {
708 *buf = '\0';
709 ap->off = 0;
710 }
711
712 off = ap->off;
713 kmem_free(ap, sizeof (*ap));
714
715 return (off);
716 }
717
718 static sbd_error_t *
drmach_board_status(drmachid_t id,drmach_status_t * stat)719 drmach_board_status(drmachid_t id, drmach_status_t *stat)
720 {
721 sbd_error_t *err = NULL;
722 drmach_board_t *bp;
723 DRMACH_HANDLE hdl;
724 size_t off;
725
726 if (!DRMACH_IS_BOARD_ID(id))
727 return (drerr_new(0, EX86_INAPPROP, NULL));
728 bp = id;
729
730 if (bp->tree == NULL)
731 return (drerr_new(0, EX86_INAPPROP, NULL));
732 hdl = drmach_node_get_dnode(bp->tree);
733 if (hdl == NULL)
734 return (drerr_new(0, EX86_INAPPROP, NULL));
735
736 stat->busy = 0; /* assume not busy */
737 stat->configured = 0; /* assume not configured */
738 stat->assigned = bp->assigned;
739 stat->powered = bp->powered = acpidev_dr_device_is_powered(hdl);
740 stat->empty = !acpidev_dr_device_is_present(hdl);
741 if (ACPI_SUCCESS(acpidev_dr_device_check_status(hdl))) {
742 stat->cond = bp->cond = SBD_COND_OK;
743 } else {
744 stat->cond = bp->cond = SBD_COND_FAILED;
745 }
746 stat->info[0] = '\0';
747
748 /* Generate the eject device list. */
749 if (drmach_board_list_dependency(hdl, B_TRUE, "EDL:",
750 stat->info, sizeof (stat->info)) < 0) {
751 DRMACH_PR("!drmach_board_status: failed to generate "
752 "eject device list for board %d.", bp->bnum);
753 stat->info[0] = '\0';
754 }
755 off = strlen(stat->info);
756 if (off < sizeof (stat->info)) {
757 if (drmach_board_list_dependency(hdl, B_FALSE,
758 off ? ", EJD:" : "EJD:",
759 stat->info + off, sizeof (stat->info) - off) < 0) {
760 DRMACH_PR("!drmach_board_status: failed to generate "
761 "eject dependent device for board %d.", bp->bnum);
762 stat->info[off] = '\0';
763 }
764 }
765
766 switch (acpidev_dr_get_board_type(bp->tree->get_dnode(bp->tree))) {
767 case ACPIDEV_CPU_BOARD:
768 (void) strlcpy(stat->type, "CPU Board", sizeof (stat->type));
769 break;
770 case ACPIDEV_MEMORY_BOARD:
771 (void) strlcpy(stat->type, "MemoryBoard", sizeof (stat->type));
772 break;
773 case ACPIDEV_IO_BOARD:
774 (void) strlcpy(stat->type, "IO Board", sizeof (stat->type));
775 break;
776 case ACPIDEV_SYSTEM_BOARD:
777 /*FALLTHROUGH*/
778 default:
779 (void) strlcpy(stat->type, "SystemBoard", sizeof (stat->type));
780 break;
781 }
782
783 if (bp->devices) {
784 int rv;
785 uint_t d_idx;
786 drmachid_t d_id;
787
788 rv = drmach_array_first(bp->devices, &d_idx, &d_id);
789 while (rv == 0) {
790 drmach_status_t d_stat;
791
792 err = drmach_device_status(d_id, &d_stat);
793 if (err)
794 break;
795
796 stat->busy |= d_stat.busy;
797 stat->configured |= d_stat.configured;
798
799 rv = drmach_array_next(bp->devices, &d_idx, &d_id);
800 }
801 }
802
803 return (err);
804 }
805
806 /*
807 * When DR is initialized, we walk the device tree and acquire a hold on
808 * all the nodes that are interesting to DR. This is so that the corresponding
809 * branches cannot be deleted.
810 */
811 static int
drmach_hold_rele_devtree(dev_info_t * rdip,void * arg)812 drmach_hold_rele_devtree(dev_info_t *rdip, void *arg)
813 {
814 int *holdp = (int *)arg;
815 ACPI_HANDLE hdl = NULL;
816 acpidev_data_handle_t dhdl;
817
818 /* Skip nodes and subtrees which are not created by acpidev. */
819 if (ACPI_FAILURE(acpica_get_handle(rdip, &hdl))) {
820 return (DDI_WALK_PRUNECHILD);
821 }
822 ASSERT(hdl != NULL);
823 dhdl = acpidev_data_get_handle(hdl);
824 if (dhdl == NULL) {
825 return (DDI_WALK_PRUNECHILD);
826 }
827
828 /* Hold/release devices which are interesting to DR operations. */
829 if (acpidev_data_dr_ready(dhdl)) {
830 if (*holdp) {
831 ASSERT(!e_ddi_branch_held(rdip));
832 e_ddi_branch_hold(rdip);
833 } else {
834 ASSERT(e_ddi_branch_held(rdip));
835 e_ddi_branch_rele(rdip);
836 }
837 }
838
839 return (DDI_WALK_CONTINUE);
840 }
841
842 static void
drmach_hold_devtree(void)843 drmach_hold_devtree(void)
844 {
845 dev_info_t *dip;
846 int hold = 1;
847
848 dip = ddi_root_node();
849 ndi_devi_enter(dip);
850 ddi_walk_devs(ddi_get_child(dip), drmach_hold_rele_devtree, &hold);
851 ndi_devi_exit(dip);
852 }
853
854 static void
drmach_release_devtree(void)855 drmach_release_devtree(void)
856 {
857 dev_info_t *dip;
858 int hold = 0;
859
860 dip = ddi_root_node();
861 ndi_devi_enter(dip);
862 ddi_walk_devs(ddi_get_child(dip), drmach_hold_rele_devtree, &hold);
863 ndi_devi_exit(dip);
864 }
865
866 static boolean_t
drmach_cpr_callb(void * arg,int code)867 drmach_cpr_callb(void *arg, int code)
868 {
869 _NOTE(ARGUNUSED(arg));
870
871 if (code == CB_CODE_CPR_CHKPT) {
872 /*
873 * Temporarily block CPR operations if there are DR operations
874 * ongoing.
875 */
876 rw_enter(&drmach_cpr_rwlock, RW_WRITER);
877 } else {
878 rw_exit(&drmach_cpr_rwlock);
879 }
880
881 return (B_TRUE);
882 }
883
884 static int
drmach_init(void)885 drmach_init(void)
886 {
887 DRMACH_HANDLE hdl;
888 drmachid_t id;
889 uint_t bnum;
890
891 if (MAX_BOARDS > SHRT_MAX) {
892 cmn_err(CE_WARN, "!drmach_init: system has too many (%d) "
893 "hotplug capable boards.", MAX_BOARDS);
894 return (ENXIO);
895 } else if (MAX_CMP_UNITS_PER_BOARD > 1) {
896 cmn_err(CE_WARN, "!drmach_init: DR doesn't support multiple "
897 "(%d) physical processors on one board.",
898 MAX_CMP_UNITS_PER_BOARD);
899 return (ENXIO);
900 } else if (!ISP2(MAX_CORES_PER_CMP)) {
901 cmn_err(CE_WARN, "!drmach_init: number of logical CPUs (%d) in "
902 "physical processor is not power of 2.",
903 MAX_CORES_PER_CMP);
904 return (ENXIO);
905 } else if (MAX_CPU_UNITS_PER_BOARD > DEVSET_CPU_NUMBER ||
906 MAX_MEM_UNITS_PER_BOARD > DEVSET_MEM_NUMBER ||
907 MAX_IO_UNITS_PER_BOARD > DEVSET_IO_NUMBER) {
908 cmn_err(CE_WARN, "!drmach_init: system has more CPU/memory/IO "
909 "units than the DR driver can handle.");
910 return (ENXIO);
911 }
912
913 rw_init(&drmach_cpr_rwlock, NULL, RW_DEFAULT, NULL);
914 drmach_cpr_cid = callb_add(drmach_cpr_callb, NULL,
915 CB_CL_CPR_PM, "drmach");
916
917 rw_init(&drmach_boards_rwlock, NULL, RW_DEFAULT, NULL);
918 drmach_boards = drmach_array_new(0, MAX_BOARDS - 1);
919 drmach_domain.allow_dr = acpidev_dr_capable();
920
921 for (bnum = 0; bnum < MAX_BOARDS; bnum++) {
922 hdl = NULL;
923 if (ACPI_FAILURE(acpidev_dr_get_board_handle(bnum, &hdl)) ||
924 hdl == NULL) {
925 cmn_err(CE_WARN, "!drmach_init: failed to lookup ACPI "
926 "handle for board %d.", bnum);
927 continue;
928 }
929 if (drmach_array_get(drmach_boards, bnum, &id) == -1) {
930 DRMACH_PR("!drmach_init: failed to get handle "
931 "for board %d.", bnum);
932 ASSERT(0);
933 goto error;
934 } else if (id == NULL) {
935 (void) drmach_board_new(bnum, 1);
936 }
937 }
938
939 /*
940 * Walk descendants of the devinfo root node and hold
941 * all devinfo branches of interest.
942 */
943 drmach_hold_devtree();
944
945 return (0);
946
947 error:
948 drmach_array_dispose(drmach_boards, drmach_board_dispose);
949 rw_destroy(&drmach_boards_rwlock);
950 rw_destroy(&drmach_cpr_rwlock);
951 return (ENXIO);
952 }
953
954 static void
drmach_fini(void)955 drmach_fini(void)
956 {
957 rw_enter(&drmach_boards_rwlock, RW_WRITER);
958 if (drmach_boards != NULL) {
959 drmach_array_dispose(drmach_boards, drmach_board_dispose);
960 drmach_boards = NULL;
961 }
962 rw_exit(&drmach_boards_rwlock);
963
964 /*
965 * Walk descendants of the root devinfo node
966 * release holds acquired on branches in drmach_init()
967 */
968 drmach_release_devtree();
969
970 (void) callb_delete(drmach_cpr_cid);
971 rw_destroy(&drmach_cpr_rwlock);
972 rw_destroy(&drmach_boards_rwlock);
973 }
974
975 sbd_error_t *
drmach_io_new(drmach_device_t * proto,drmachid_t * idp)976 drmach_io_new(drmach_device_t *proto, drmachid_t *idp)
977 {
978 drmach_io_t *ip;
979 int portid;
980
981 portid = proto->portid;
982 ASSERT(portid != -1);
983 proto->unum = portid;
984
985 ip = kmem_zalloc(sizeof (drmach_io_t), KM_SLEEP);
986 bcopy(proto, &ip->dev, sizeof (ip->dev));
987 ip->dev.node = drmach_node_dup(proto->node);
988 ip->dev.cm.isa = (void *)drmach_io_new;
989 ip->dev.cm.dispose = drmach_io_dispose;
990 ip->dev.cm.release = drmach_io_release;
991 ip->dev.cm.status = drmach_io_status;
992 (void) snprintf(ip->dev.cm.name, sizeof (ip->dev.cm.name), "%s%d",
993 ip->dev.type, ip->dev.unum);
994
995 *idp = (drmachid_t)ip;
996
997 return (NULL);
998 }
999
1000 static void
drmach_io_dispose(drmachid_t id)1001 drmach_io_dispose(drmachid_t id)
1002 {
1003 drmach_io_t *self;
1004
1005 ASSERT(DRMACH_IS_IO_ID(id));
1006
1007 self = id;
1008 if (self->dev.node)
1009 drmach_node_dispose(self->dev.node);
1010
1011 kmem_free(self, sizeof (*self));
1012 }
1013
1014 static sbd_error_t *
drmach_io_release(drmachid_t id)1015 drmach_io_release(drmachid_t id)
1016 {
1017 if (!DRMACH_IS_IO_ID(id))
1018 return (drerr_new(0, EX86_INAPPROP, NULL));
1019
1020 return (NULL);
1021 }
1022
1023 static sbd_error_t *
drmach_io_status(drmachid_t id,drmach_status_t * stat)1024 drmach_io_status(drmachid_t id, drmach_status_t *stat)
1025 {
1026 drmach_device_t *dp;
1027 sbd_error_t *err;
1028 int configured;
1029
1030 ASSERT(DRMACH_IS_IO_ID(id));
1031 dp = id;
1032
1033 err = drmach_io_is_attached(id, &configured);
1034 if (err)
1035 return (err);
1036
1037 stat->assigned = dp->bp->assigned;
1038 stat->powered = dp->bp->powered;
1039 stat->configured = (configured != 0);
1040 stat->busy = dp->busy;
1041 (void) strlcpy(stat->type, dp->type, sizeof (stat->type));
1042 stat->info[0] = '\0';
1043
1044 return (NULL);
1045 }
1046
1047 sbd_error_t *
drmach_cpu_new(drmach_device_t * proto,drmachid_t * idp)1048 drmach_cpu_new(drmach_device_t *proto, drmachid_t *idp)
1049 {
1050 int portid;
1051 processorid_t cpuid;
1052 drmach_cpu_t *cp = NULL;
1053
1054 /* the portid is APIC ID of the node */
1055 portid = proto->portid;
1056 ASSERT(portid != -1);
1057
1058 /*
1059 * Assume all CPUs are homogeneous and have the same number of
1060 * cores/threads.
1061 */
1062 proto->unum = portid % MAX_CPU_UNITS_PER_BOARD;
1063
1064 cp = kmem_zalloc(sizeof (drmach_cpu_t), KM_SLEEP);
1065 bcopy(proto, &cp->dev, sizeof (cp->dev));
1066 cp->dev.node = drmach_node_dup(proto->node);
1067 cp->dev.cm.isa = (void *)drmach_cpu_new;
1068 cp->dev.cm.dispose = drmach_cpu_dispose;
1069 cp->dev.cm.release = drmach_cpu_release;
1070 cp->dev.cm.status = drmach_cpu_status;
1071 (void) snprintf(cp->dev.cm.name, sizeof (cp->dev.cm.name), "%s%d",
1072 cp->dev.type, cp->dev.unum);
1073
1074 cp->apicid = portid;
1075 if (ACPI_SUCCESS(acpica_get_cpu_id_by_object(
1076 drmach_node_get_dnode(proto->node), &cpuid))) {
1077 cp->cpuid = cpuid;
1078 } else {
1079 cp->cpuid = -1;
1080 }
1081
1082 /* Mark CPU0 as busy, many other components have dependency on it. */
1083 if (cp->cpuid == 0) {
1084 cp->dev.busy = 1;
1085 }
1086
1087 *idp = (drmachid_t)cp;
1088
1089 return (NULL);
1090 }
1091
1092 static void
drmach_cpu_dispose(drmachid_t id)1093 drmach_cpu_dispose(drmachid_t id)
1094 {
1095 drmach_cpu_t *self;
1096
1097 ASSERT(DRMACH_IS_CPU_ID(id));
1098
1099 self = id;
1100 if (self->dev.node)
1101 drmach_node_dispose(self->dev.node);
1102
1103 kmem_free(self, sizeof (*self));
1104 }
1105
1106 static sbd_error_t *
drmach_cpu_release(drmachid_t id)1107 drmach_cpu_release(drmachid_t id)
1108 {
1109 if (!DRMACH_IS_CPU_ID(id))
1110 return (drerr_new(0, EX86_INAPPROP, NULL));
1111
1112 return (NULL);
1113 }
1114
1115 static sbd_error_t *
drmach_cpu_status(drmachid_t id,drmach_status_t * stat)1116 drmach_cpu_status(drmachid_t id, drmach_status_t *stat)
1117 {
1118 drmach_cpu_t *cp;
1119 drmach_device_t *dp;
1120
1121 ASSERT(DRMACH_IS_CPU_ID(id));
1122 cp = (drmach_cpu_t *)id;
1123 dp = &cp->dev;
1124
1125 stat->assigned = dp->bp->assigned;
1126 stat->powered = dp->bp->powered;
1127 mutex_enter(&cpu_lock);
1128 stat->configured = (cpu_get(cp->cpuid) != NULL);
1129 mutex_exit(&cpu_lock);
1130 stat->busy = dp->busy;
1131 (void) strlcpy(stat->type, dp->type, sizeof (stat->type));
1132 stat->info[0] = '\0';
1133
1134 return (NULL);
1135 }
1136
1137 static int
drmach_setup_mc_info(DRMACH_HANDLE hdl,drmach_mem_t * mp)1138 drmach_setup_mc_info(DRMACH_HANDLE hdl, drmach_mem_t *mp)
1139 {
1140 uint_t i, j, count;
1141 struct memlist *ml = NULL, *ml2 = NULL;
1142 acpidev_regspec_t *regp;
1143 uint64_t align, addr_min, addr_max, total_size, skipped_size;
1144
1145 if (hdl == NULL) {
1146 return (-1);
1147 } else if (ACPI_FAILURE(acpidev_dr_get_mem_alignment(hdl, &align))) {
1148 return (-1);
1149 } else {
1150 ASSERT((align & (align - 1)) == 0);
1151 mp->mem_alignment = align;
1152 }
1153
1154 addr_min = UINT64_MAX;
1155 addr_max = 0;
1156 total_size = 0;
1157 skipped_size = 0;
1158 /*
1159 * There's a memory hole just below 4G on x86, which needs special
1160 * handling. All other addresses assigned to a specific memory device
1161 * should be contiguous.
1162 */
1163 if (ACPI_FAILURE(acpidev_dr_device_get_regspec(hdl, TRUE, ®p,
1164 &count))) {
1165 return (-1);
1166 }
1167 for (i = 0, j = 0; i < count; i++) {
1168 uint64_t addr, size;
1169
1170 addr = (uint64_t)regp[i].phys_mid << 32;
1171 addr |= (uint64_t)regp[i].phys_low;
1172 size = (uint64_t)regp[i].size_hi << 32;
1173 size |= (uint64_t)regp[i].size_low;
1174 if (size == 0)
1175 continue;
1176 else
1177 j++;
1178
1179 total_size += size;
1180 if (addr < addr_min)
1181 addr_min = addr;
1182 if (addr + size > addr_max)
1183 addr_max = addr + size;
1184 if (mp->dev.bp->boot_board ||
1185 j <= acpidev_dr_max_segments_per_mem_device()) {
1186 ml = memlist_add_span(ml, addr, size);
1187 } else {
1188 skipped_size += size;
1189 }
1190 }
1191 acpidev_dr_device_free_regspec(regp, count);
1192
1193 if (skipped_size != 0) {
1194 cmn_err(CE_WARN, "!drmach: too many (%d) segments on memory "
1195 "device, max (%d) segments supported, 0x%" PRIx64 " bytes "
1196 "of memory skipped.",
1197 j, acpidev_dr_max_segments_per_mem_device(), skipped_size);
1198 }
1199
1200 mp->slice_base = addr_min;
1201 mp->slice_top = addr_max;
1202 mp->slice_size = total_size;
1203
1204 if (mp->dev.bp->boot_board) {
1205 uint64_t endpa = _ptob64(physmax + 1);
1206
1207 /*
1208 * we intersect phys_install to get base_pa.
1209 * This only works at boot-up time.
1210 */
1211 memlist_read_lock();
1212 ml2 = memlist_dup(phys_install);
1213 memlist_read_unlock();
1214
1215 ml2 = memlist_del_span(ml2, 0ull, mp->slice_base);
1216 if (ml2 && endpa > addr_max) {
1217 ml2 = memlist_del_span(ml2, addr_max, endpa - addr_max);
1218 }
1219 }
1220
1221 /*
1222 * Create a memlist for the memory board.
1223 * The created memlist only contains configured memory if there's
1224 * configured memory on the board, otherwise it contains all memory
1225 * on the board.
1226 */
1227 if (ml2) {
1228 uint64_t nbytes = 0;
1229 struct memlist *p;
1230
1231 for (p = ml2; p; p = p->ml_next) {
1232 nbytes += p->ml_size;
1233 }
1234 if (nbytes == 0) {
1235 memlist_delete(ml2);
1236 ml2 = NULL;
1237 } else {
1238 /* Node has configured memory at boot time. */
1239 mp->base_pa = ml2->ml_address;
1240 mp->nbytes = nbytes;
1241 mp->memlist = ml2;
1242 if (ml)
1243 memlist_delete(ml);
1244 }
1245 }
1246 if (ml2 == NULL) {
1247 /* Not configured at boot time. */
1248 mp->base_pa = UINT64_MAX;
1249 mp->nbytes = 0;
1250 mp->memlist = ml;
1251 }
1252
1253 return (0);
1254 }
1255
1256 sbd_error_t *
drmach_mem_new(drmach_device_t * proto,drmachid_t * idp)1257 drmach_mem_new(drmach_device_t *proto, drmachid_t *idp)
1258 {
1259 DRMACH_HANDLE hdl;
1260 drmach_mem_t *mp;
1261 int portid;
1262
1263 mp = kmem_zalloc(sizeof (drmach_mem_t), KM_SLEEP);
1264 portid = proto->portid;
1265 ASSERT(portid != -1);
1266 proto->unum = portid;
1267
1268 bcopy(proto, &mp->dev, sizeof (mp->dev));
1269 mp->dev.node = drmach_node_dup(proto->node);
1270 mp->dev.cm.isa = (void *)drmach_mem_new;
1271 mp->dev.cm.dispose = drmach_mem_dispose;
1272 mp->dev.cm.release = drmach_mem_release;
1273 mp->dev.cm.status = drmach_mem_status;
1274
1275 (void) snprintf(mp->dev.cm.name, sizeof (mp->dev.cm.name), "%s%d",
1276 mp->dev.type, proto->unum);
1277 hdl = mp->dev.node->get_dnode(mp->dev.node);
1278 ASSERT(hdl != NULL);
1279 if (drmach_setup_mc_info(hdl, mp) != 0) {
1280 kmem_free(mp, sizeof (drmach_mem_t));
1281 *idp = (drmachid_t)NULL;
1282 return (drerr_new(1, EX86_MC_SETUP, NULL));
1283 }
1284
1285 /* make sure we do not create memoryless nodes */
1286 if (mp->nbytes == 0 && mp->slice_size == 0) {
1287 kmem_free(mp, sizeof (drmach_mem_t));
1288 *idp = (drmachid_t)NULL;
1289 } else
1290 *idp = (drmachid_t)mp;
1291
1292 return (NULL);
1293 }
1294
1295 static void
drmach_mem_dispose(drmachid_t id)1296 drmach_mem_dispose(drmachid_t id)
1297 {
1298 drmach_mem_t *mp;
1299
1300 ASSERT(DRMACH_IS_MEM_ID(id));
1301
1302 mp = id;
1303
1304 if (mp->dev.node)
1305 drmach_node_dispose(mp->dev.node);
1306
1307 if (mp->memlist) {
1308 memlist_delete(mp->memlist);
1309 mp->memlist = NULL;
1310 }
1311
1312 kmem_free(mp, sizeof (*mp));
1313 }
1314
1315 static sbd_error_t *
drmach_mem_release(drmachid_t id)1316 drmach_mem_release(drmachid_t id)
1317 {
1318 if (!DRMACH_IS_MEM_ID(id))
1319 return (drerr_new(0, EX86_INAPPROP, NULL));
1320
1321 return (NULL);
1322 }
1323
1324 static sbd_error_t *
drmach_mem_status(drmachid_t id,drmach_status_t * stat)1325 drmach_mem_status(drmachid_t id, drmach_status_t *stat)
1326 {
1327 uint64_t pa;
1328 drmach_mem_t *dp;
1329 struct memlist *ml = NULL;
1330
1331 ASSERT(DRMACH_IS_MEM_ID(id));
1332 dp = id;
1333
1334 /* get starting physical address of target memory */
1335 pa = dp->base_pa;
1336 /* round down to slice boundary */
1337 pa &= ~(dp->mem_alignment - 1);
1338
1339 /* stop at first span that is in slice */
1340 memlist_read_lock();
1341 for (ml = phys_install; ml; ml = ml->ml_next)
1342 if (ml->ml_address >= pa && ml->ml_address < dp->slice_top)
1343 break;
1344 memlist_read_unlock();
1345
1346 stat->assigned = dp->dev.bp->assigned;
1347 stat->powered = dp->dev.bp->powered;
1348 stat->configured = (ml != NULL);
1349 stat->busy = dp->dev.busy;
1350 (void) strlcpy(stat->type, dp->dev.type, sizeof (stat->type));
1351 stat->info[0] = '\0';
1352
1353 return (NULL);
1354 }
1355
1356 /*
1357 * Public interfaces exported to support platform independent dr driver.
1358 */
1359 uint_t
drmach_max_boards(void)1360 drmach_max_boards(void)
1361 {
1362 return (acpidev_dr_max_boards());
1363 }
1364
1365 uint_t
drmach_max_io_units_per_board(void)1366 drmach_max_io_units_per_board(void)
1367 {
1368 return (acpidev_dr_max_io_units_per_board());
1369 }
1370
1371 uint_t
drmach_max_cmp_units_per_board(void)1372 drmach_max_cmp_units_per_board(void)
1373 {
1374 return (acpidev_dr_max_cmp_units_per_board());
1375 }
1376
1377 uint_t
drmach_max_mem_units_per_board(void)1378 drmach_max_mem_units_per_board(void)
1379 {
1380 return (acpidev_dr_max_mem_units_per_board());
1381 }
1382
1383 uint_t
drmach_max_core_per_cmp(void)1384 drmach_max_core_per_cmp(void)
1385 {
1386 return (acpidev_dr_max_cpu_units_per_cmp());
1387 }
1388
1389 sbd_error_t *
drmach_pre_op(int cmd,drmachid_t id,drmach_opts_t * opts,void * argp)1390 drmach_pre_op(int cmd, drmachid_t id, drmach_opts_t *opts, void *argp)
1391 {
1392 drmach_board_t *bp = (drmach_board_t *)id;
1393 sbd_error_t *err = NULL;
1394
1395 /* allow status and ncm operations to always succeed */
1396 if ((cmd == SBD_CMD_STATUS) || (cmd == SBD_CMD_GETNCM)) {
1397 return (NULL);
1398 }
1399
1400 switch (cmd) {
1401 case SBD_CMD_POWERON:
1402 case SBD_CMD_POWEROFF:
1403 /*
1404 * Disable fast reboot if CPU/MEM/IOH hotplug event happens.
1405 * Note: this is a temporary solution and will be revised when
1406 * fast reboot can support CPU/MEM/IOH DR operations in future.
1407 *
1408 * ACPI BIOS generates some static ACPI tables, such as MADT,
1409 * SRAT and SLIT, to describe system hardware configuration on
1410 * power-on. When CPU/MEM/IOH hotplug event happens, those
1411 * static tables won't be updated and will become stale.
1412 *
1413 * If we reset system by fast reboot, BIOS will have no chance
1414 * to regenerate those staled static tables. Fast reboot can't
1415 * tolerate such inconsistency between staled ACPI tables and
1416 * real hardware configuration yet.
1417 *
1418 * A temporary solution is introduced to disable fast reboot if
1419 * CPU/MEM/IOH hotplug event happens. This solution should be
1420 * revised when fast reboot is enhanced to support CPU/MEM/IOH
1421 * DR operations.
1422 */
1423 fastreboot_disable(FBNS_HOTPLUG);
1424 /*FALLTHROUGH*/
1425
1426 default:
1427 /* Block out the CPR thread. */
1428 rw_enter(&drmach_cpr_rwlock, RW_READER);
1429 break;
1430 }
1431
1432 /* check all other commands for the required option string */
1433 if ((opts->size > 0) && (opts->copts != NULL)) {
1434 if (strstr(opts->copts, ACPIDEV_CMD_OST_PREFIX) == NULL) {
1435 err = drerr_new(1, EX86_SUPPORT, NULL);
1436 }
1437 } else {
1438 err = drerr_new(1, EX86_SUPPORT, NULL);
1439 }
1440
1441 if (!err && id && DRMACH_IS_BOARD_ID(id)) {
1442 switch (cmd) {
1443 case SBD_CMD_TEST:
1444 break;
1445 case SBD_CMD_CONNECT:
1446 if (bp->connected)
1447 err = drerr_new(0, ESBD_STATE, NULL);
1448 else if (!drmach_domain.allow_dr)
1449 err = drerr_new(1, EX86_SUPPORT, NULL);
1450 break;
1451 case SBD_CMD_DISCONNECT:
1452 if (!bp->connected)
1453 err = drerr_new(0, ESBD_STATE, NULL);
1454 else if (!drmach_domain.allow_dr)
1455 err = drerr_new(1, EX86_SUPPORT, NULL);
1456 break;
1457 default:
1458 if (!drmach_domain.allow_dr)
1459 err = drerr_new(1, EX86_SUPPORT, NULL);
1460 break;
1461
1462 }
1463 }
1464
1465 /*
1466 * CPU/memory/IO DR operations will be supported in stages on x86.
1467 * With early versions, some operations should be blocked here.
1468 * This temporary hook will be removed when all CPU/memory/IO DR
1469 * operations are supported on x86 systems.
1470 *
1471 * We only need to filter unsupported device types for
1472 * SBD_CMD_CONFIGURE/SBD_CMD_UNCONFIGURE commands, all other
1473 * commands are supported by all device types.
1474 */
1475 if (!err && (cmd == SBD_CMD_CONFIGURE || cmd == SBD_CMD_UNCONFIGURE)) {
1476 int i;
1477 dr_devset_t *devsetp = (dr_devset_t *)argp;
1478 dr_devset_t devset = *devsetp;
1479
1480 switch (cmd) {
1481 case SBD_CMD_CONFIGURE:
1482 if (!plat_dr_support_cpu()) {
1483 DEVSET_DEL(devset, SBD_COMP_CPU,
1484 DEVSET_ANYUNIT);
1485 } else {
1486 for (i = MAX_CPU_UNITS_PER_BOARD;
1487 i < DEVSET_CPU_NUMBER; i++) {
1488 DEVSET_DEL(devset, SBD_COMP_CPU, i);
1489 }
1490 }
1491
1492 if (!plat_dr_support_memory()) {
1493 DEVSET_DEL(devset, SBD_COMP_MEM,
1494 DEVSET_ANYUNIT);
1495 } else {
1496 for (i = MAX_MEM_UNITS_PER_BOARD;
1497 i < DEVSET_MEM_NUMBER; i++) {
1498 DEVSET_DEL(devset, SBD_COMP_MEM, i);
1499 }
1500 }
1501
1502 /* No support of configuring IOH devices yet. */
1503 DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
1504 break;
1505
1506 case SBD_CMD_UNCONFIGURE:
1507 if (!plat_dr_support_cpu()) {
1508 DEVSET_DEL(devset, SBD_COMP_CPU,
1509 DEVSET_ANYUNIT);
1510 } else {
1511 for (i = MAX_CPU_UNITS_PER_BOARD;
1512 i < DEVSET_CPU_NUMBER; i++) {
1513 DEVSET_DEL(devset, SBD_COMP_CPU, i);
1514 }
1515 }
1516
1517 /* No support of unconfiguring MEM/IOH devices yet. */
1518 DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
1519 DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
1520 break;
1521 }
1522
1523 *devsetp = devset;
1524 if (DEVSET_IS_NULL(devset)) {
1525 err = drerr_new(1, EX86_SUPPORT, NULL);
1526 }
1527 }
1528
1529 return (err);
1530 }
1531
1532 sbd_error_t *
drmach_post_op(int cmd,drmachid_t id,drmach_opts_t * opts,int rv)1533 drmach_post_op(int cmd, drmachid_t id, drmach_opts_t *opts, int rv)
1534 {
1535 _NOTE(ARGUNUSED(id, opts, rv));
1536
1537 switch (cmd) {
1538 case SBD_CMD_STATUS:
1539 case SBD_CMD_GETNCM:
1540 break;
1541
1542 default:
1543 rw_exit(&drmach_cpr_rwlock);
1544 break;
1545 }
1546
1547 return (NULL);
1548 }
1549
1550 sbd_error_t *
drmach_configure(drmachid_t id,int flags)1551 drmach_configure(drmachid_t id, int flags)
1552 {
1553 _NOTE(ARGUNUSED(flags));
1554
1555 drmach_device_t *dp;
1556 sbd_error_t *err = NULL;
1557 dev_info_t *rdip;
1558 dev_info_t *fdip = NULL;
1559
1560 if (!DRMACH_IS_DEVICE_ID(id))
1561 return (drerr_new(0, EX86_INAPPROP, NULL));
1562 dp = id;
1563
1564 rdip = dp->node->getdip(dp->node);
1565 ASSERT(rdip);
1566 ASSERT(e_ddi_branch_held(rdip));
1567
1568 /* allocate cpu id for the CPU device. */
1569 if (DRMACH_IS_CPU_ID(id)) {
1570 DRMACH_HANDLE hdl = drmach_node_get_dnode(dp->node);
1571 ASSERT(hdl != NULL);
1572 if (ACPI_FAILURE(acpidev_dr_allocate_cpuid(hdl, NULL))) {
1573 err = drerr_new(1, EX86_ALLOC_CPUID, NULL);
1574 }
1575 return (err);
1576 }
1577
1578 if (DRMACH_IS_MEM_ID(id)) {
1579 err = drmach_mem_update_lgrp(id);
1580 if (err)
1581 return (err);
1582 }
1583
1584 if (e_ddi_branch_configure(rdip, &fdip, 0) != 0) {
1585 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1586 dev_info_t *dip = (fdip != NULL) ? fdip : rdip;
1587
1588 (void) ddi_pathname(dip, path);
1589 err = drerr_new(1, EX86_DRVFAIL, path);
1590 kmem_free(path, MAXPATHLEN);
1591
1592 /* If non-NULL, fdip is returned held and must be released */
1593 if (fdip != NULL)
1594 ddi_release_devi(fdip);
1595 }
1596
1597 return (err);
1598 }
1599
1600 sbd_error_t *
drmach_unconfigure(drmachid_t id,int flags)1601 drmach_unconfigure(drmachid_t id, int flags)
1602 {
1603 _NOTE(ARGUNUSED(flags));
1604
1605 drmach_device_t *dp;
1606 sbd_error_t *err = NULL;
1607 dev_info_t *rdip, *fdip = NULL;
1608
1609 if (!DRMACH_IS_DEVICE_ID(id))
1610 return (drerr_new(0, EX86_INAPPROP, NULL));
1611 dp = id;
1612
1613 rdip = dp->node->getdip(dp->node);
1614 ASSERT(rdip);
1615 ASSERT(e_ddi_branch_held(rdip));
1616
1617 if (DRMACH_IS_CPU_ID(id)) {
1618 DRMACH_HANDLE hdl = drmach_node_get_dnode(dp->node);
1619 ASSERT(hdl != NULL);
1620 if (ACPI_FAILURE(acpidev_dr_free_cpuid(hdl))) {
1621 err = drerr_new(1, EX86_FREE_CPUID, NULL);
1622 }
1623 return (err);
1624 }
1625
1626 /*
1627 * Note: FORCE flag is no longer necessary under devfs
1628 */
1629 if (e_ddi_branch_unconfigure(rdip, &fdip, 0)) {
1630 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1631
1632 /*
1633 * If non-NULL, fdip is returned held and must be released.
1634 */
1635 if (fdip != NULL) {
1636 (void) ddi_pathname(fdip, path);
1637 ndi_rele_devi(fdip);
1638 } else {
1639 (void) ddi_pathname(rdip, path);
1640 }
1641
1642 err = drerr_new(1, EX86_DRVFAIL, path);
1643
1644 kmem_free(path, MAXPATHLEN);
1645 }
1646
1647 return (err);
1648 }
1649
1650 sbd_error_t *
drmach_get_dip(drmachid_t id,dev_info_t ** dip)1651 drmach_get_dip(drmachid_t id, dev_info_t **dip)
1652 {
1653 drmach_device_t *dp;
1654
1655 if (!DRMACH_IS_DEVICE_ID(id))
1656 return (drerr_new(0, EX86_INAPPROP, NULL));
1657 dp = id;
1658
1659 *dip = dp->node->getdip(dp->node);
1660
1661 return (NULL);
1662 }
1663
1664 sbd_error_t *
drmach_release(drmachid_t id)1665 drmach_release(drmachid_t id)
1666 {
1667 drmach_common_t *cp;
1668
1669 if (!DRMACH_IS_DEVICE_ID(id))
1670 return (drerr_new(0, EX86_INAPPROP, NULL));
1671 cp = id;
1672
1673 return (cp->release(id));
1674 }
1675
1676 sbd_error_t *
drmach_status(drmachid_t id,drmach_status_t * stat)1677 drmach_status(drmachid_t id, drmach_status_t *stat)
1678 {
1679 drmach_common_t *cp;
1680 sbd_error_t *err;
1681
1682 rw_enter(&drmach_boards_rwlock, RW_READER);
1683 if (!DRMACH_IS_ID(id)) {
1684 rw_exit(&drmach_boards_rwlock);
1685 return (drerr_new(0, EX86_NOTID, NULL));
1686 }
1687 cp = (drmach_common_t *)id;
1688 err = cp->status(id, stat);
1689 rw_exit(&drmach_boards_rwlock);
1690
1691 return (err);
1692 }
1693
1694 static sbd_error_t *
drmach_update_acpi_status(drmachid_t id,drmach_opts_t * opts)1695 drmach_update_acpi_status(drmachid_t id, drmach_opts_t *opts)
1696 {
1697 char *copts;
1698 drmach_board_t *bp;
1699 DRMACH_HANDLE hdl;
1700 int event, code;
1701 boolean_t inprogress = B_FALSE;
1702
1703 if (DRMACH_NULL_ID(id) || !DRMACH_IS_BOARD_ID(id))
1704 return (drerr_new(0, EX86_INAPPROP, NULL));
1705 bp = (drmach_board_t *)id;
1706 hdl = drmach_node_get_dnode(bp->tree);
1707 ASSERT(hdl != NULL);
1708 if (hdl == NULL)
1709 return (drerr_new(0, EX86_INAPPROP, NULL));
1710
1711 /* Get the status code. */
1712 copts = opts->copts;
1713 if (strncmp(copts, ACPIDEV_CMD_OST_INPROGRESS,
1714 strlen(ACPIDEV_CMD_OST_INPROGRESS)) == 0) {
1715 inprogress = B_TRUE;
1716 code = ACPI_OST_STA_INSERT_IN_PROGRESS;
1717 copts += strlen(ACPIDEV_CMD_OST_INPROGRESS);
1718 } else if (strncmp(copts, ACPIDEV_CMD_OST_SUCCESS,
1719 strlen(ACPIDEV_CMD_OST_SUCCESS)) == 0) {
1720 code = ACPI_OST_STA_SUCCESS;
1721 copts += strlen(ACPIDEV_CMD_OST_SUCCESS);
1722 } else if (strncmp(copts, ACPIDEV_CMD_OST_FAILURE,
1723 strlen(ACPIDEV_CMD_OST_FAILURE)) == 0) {
1724 code = ACPI_OST_STA_FAILURE;
1725 copts += strlen(ACPIDEV_CMD_OST_FAILURE);
1726 } else if (strncmp(copts, ACPIDEV_CMD_OST_NOOP,
1727 strlen(ACPIDEV_CMD_OST_NOOP)) == 0) {
1728 return (NULL);
1729 } else {
1730 return (drerr_new(0, EX86_UNKPTCMD, opts->copts));
1731 }
1732
1733 /* Get the event type. */
1734 copts = strstr(copts, ACPIDEV_EVENT_TYPE_ATTR_NAME);
1735 if (copts == NULL) {
1736 return (drerr_new(0, EX86_UNKPTCMD, opts->copts));
1737 }
1738 copts += strlen(ACPIDEV_EVENT_TYPE_ATTR_NAME);
1739 if (copts[0] != '=') {
1740 return (drerr_new(0, EX86_UNKPTCMD, opts->copts));
1741 }
1742 copts += strlen("=");
1743 if (strncmp(copts, ACPIDEV_EVENT_TYPE_BUS_CHECK,
1744 strlen(ACPIDEV_EVENT_TYPE_BUS_CHECK)) == 0) {
1745 event = ACPI_NOTIFY_BUS_CHECK;
1746 } else if (strncmp(copts, ACPIDEV_EVENT_TYPE_DEVICE_CHECK,
1747 strlen(ACPIDEV_EVENT_TYPE_DEVICE_CHECK)) == 0) {
1748 event = ACPI_NOTIFY_DEVICE_CHECK;
1749 } else if (strncmp(copts, ACPIDEV_EVENT_TYPE_DEVICE_CHECK_LIGHT,
1750 strlen(ACPIDEV_EVENT_TYPE_DEVICE_CHECK_LIGHT)) == 0) {
1751 event = ACPI_NOTIFY_DEVICE_CHECK_LIGHT;
1752 } else if (strncmp(copts, ACPIDEV_EVENT_TYPE_EJECT_REQUEST,
1753 strlen(ACPIDEV_EVENT_TYPE_EJECT_REQUEST)) == 0) {
1754 event = ACPI_NOTIFY_EJECT_REQUEST;
1755 if (inprogress) {
1756 code = ACPI_OST_STA_EJECT_IN_PROGRESS;
1757 }
1758 } else {
1759 return (drerr_new(0, EX86_UNKPTCMD, opts->copts));
1760 }
1761
1762 (void) acpidev_eval_ost(hdl, event, code, NULL, 0);
1763
1764 return (NULL);
1765 }
1766
1767 static struct {
1768 const char *name;
1769 sbd_error_t *(*handler)(drmachid_t id, drmach_opts_t *opts);
1770 } drmach_pt_arr[] = {
1771 { ACPIDEV_CMD_OST_PREFIX, &drmach_update_acpi_status },
1772 /* the following line must always be last */
1773 { NULL, NULL }
1774 };
1775
1776 sbd_error_t *
drmach_passthru(drmachid_t id,drmach_opts_t * opts)1777 drmach_passthru(drmachid_t id, drmach_opts_t *opts)
1778 {
1779 int i;
1780 sbd_error_t *err;
1781
1782 i = 0;
1783 while (drmach_pt_arr[i].name != NULL) {
1784 int len = strlen(drmach_pt_arr[i].name);
1785
1786 if (strncmp(drmach_pt_arr[i].name, opts->copts, len) == 0)
1787 break;
1788
1789 i += 1;
1790 }
1791
1792 if (drmach_pt_arr[i].name == NULL)
1793 err = drerr_new(0, EX86_UNKPTCMD, opts->copts);
1794 else
1795 err = (*drmach_pt_arr[i].handler)(id, opts);
1796
1797 return (err);
1798 }
1799
1800 /*
1801 * Board specific interfaces to support dr driver
1802 */
1803 static int
drmach_get_portid(drmach_node_t * np)1804 drmach_get_portid(drmach_node_t *np)
1805 {
1806 uint32_t portid;
1807
1808 if (np->getprop(np, ACPIDEV_DR_PROP_PORTID,
1809 &portid, sizeof (portid)) == 0) {
1810 /*
1811 * acpidev returns portid as uint32_t, validates it.
1812 */
1813 if (portid > INT_MAX) {
1814 return (-1);
1815 } else {
1816 return (portid);
1817 }
1818 }
1819
1820 return (-1);
1821 }
1822
1823 /*
1824 * This is a helper function to determine if a given
1825 * node should be considered for a dr operation according
1826 * to predefined dr type nodes and the node's name.
1827 * Formal Parameter : The name of a device node.
1828 * Return Value: -1, name does not map to a valid dr type.
1829 * A value greater or equal to 0, name is a valid dr type.
1830 */
1831 static int
drmach_name2type_idx(char * name)1832 drmach_name2type_idx(char *name)
1833 {
1834 int index, ntypes;
1835
1836 if (name == NULL)
1837 return (-1);
1838
1839 /*
1840 * Determine how many possible types are currently supported
1841 * for dr.
1842 */
1843 ntypes = sizeof (drmach_name2type) / sizeof (drmach_name2type[0]);
1844
1845 /* Determine if the node's name correspond to a predefined type. */
1846 for (index = 0; index < ntypes; index++) {
1847 if (strcmp(drmach_name2type[index].name, name) == 0)
1848 /* The node is an allowed type for dr. */
1849 return (index);
1850 }
1851
1852 /*
1853 * If the name of the node does not map to any of the
1854 * types in the array drmach_name2type then the node is not of
1855 * interest to dr.
1856 */
1857 return (-1);
1858 }
1859
1860 static int
drmach_board_find_devices_cb(drmach_node_walk_args_t * args)1861 drmach_board_find_devices_cb(drmach_node_walk_args_t *args)
1862 {
1863 drmach_node_t *node = args->node;
1864 drmach_board_cb_data_t *data = args->data;
1865 drmach_board_t *obj = data->obj;
1866
1867 int rv, portid;
1868 uint32_t bnum;
1869 drmachid_t id;
1870 drmach_device_t *device;
1871 char name[OBP_MAXDRVNAME];
1872
1873 portid = drmach_get_portid(node);
1874 rv = node->getprop(node, ACPIDEV_DR_PROP_DEVNAME,
1875 name, OBP_MAXDRVNAME);
1876 if (rv)
1877 return (0);
1878
1879 rv = node->getprop(node, ACPIDEV_DR_PROP_BOARDNUM,
1880 &bnum, sizeof (bnum));
1881 if (rv) {
1882 return (0);
1883 }
1884 if (bnum > INT_MAX) {
1885 return (0);
1886 }
1887
1888 if (bnum != obj->bnum)
1889 return (0);
1890
1891 if (drmach_name2type_idx(name) < 0) {
1892 return (0);
1893 }
1894
1895 /*
1896 * Create a device data structure from this node data.
1897 * The call may yield nothing if the node is not of interest
1898 * to drmach.
1899 */
1900 data->err = drmach_device_new(node, obj, portid, &id);
1901 if (data->err)
1902 return (-1);
1903 else if (!id) {
1904 /*
1905 * drmach_device_new examined the node we passed in
1906 * and determined that it was one not of interest to
1907 * drmach. So, it is skipped.
1908 */
1909 return (0);
1910 }
1911
1912 rv = drmach_array_set(obj->devices, data->ndevs++, id);
1913 if (rv) {
1914 data->err = DRMACH_INTERNAL_ERROR();
1915 return (-1);
1916 }
1917 device = id;
1918
1919 data->err = (*data->found)(data->a, device->type, device->unum, id);
1920
1921 return (data->err == NULL ? 0 : -1);
1922 }
1923
1924 sbd_error_t *
drmach_board_find_devices(drmachid_t id,void * a,sbd_error_t * (* found)(void * a,const char *,int,drmachid_t))1925 drmach_board_find_devices(drmachid_t id, void *a,
1926 sbd_error_t *(*found)(void *a, const char *, int, drmachid_t))
1927 {
1928 drmach_board_t *bp = (drmach_board_t *)id;
1929 sbd_error_t *err;
1930 int max_devices;
1931 int rv;
1932 drmach_board_cb_data_t data;
1933
1934 if (!DRMACH_IS_BOARD_ID(id))
1935 return (drerr_new(0, EX86_INAPPROP, NULL));
1936
1937 max_devices = MAX_CPU_UNITS_PER_BOARD;
1938 max_devices += MAX_MEM_UNITS_PER_BOARD;
1939 max_devices += MAX_IO_UNITS_PER_BOARD;
1940
1941 if (bp->devices == NULL)
1942 bp->devices = drmach_array_new(0, max_devices);
1943 ASSERT(bp->tree != NULL);
1944
1945 data.obj = bp;
1946 data.ndevs = 0;
1947 data.found = found;
1948 data.a = a;
1949 data.err = NULL;
1950
1951 acpidev_dr_lock_all();
1952 rv = drmach_node_walk(bp->tree, &data, drmach_board_find_devices_cb);
1953 acpidev_dr_unlock_all();
1954 if (rv == 0) {
1955 err = NULL;
1956 } else {
1957 drmach_array_dispose(bp->devices, drmach_device_dispose);
1958 bp->devices = NULL;
1959
1960 if (data.err)
1961 err = data.err;
1962 else
1963 err = DRMACH_INTERNAL_ERROR();
1964 }
1965
1966 return (err);
1967 }
1968
1969 int
drmach_board_lookup(int bnum,drmachid_t * id)1970 drmach_board_lookup(int bnum, drmachid_t *id)
1971 {
1972 int rv = 0;
1973
1974 if (bnum < 0) {
1975 *id = 0;
1976 return (-1);
1977 }
1978
1979 rw_enter(&drmach_boards_rwlock, RW_READER);
1980 if (drmach_array_get(drmach_boards, (uint_t)bnum, id)) {
1981 *id = 0;
1982 rv = -1;
1983 }
1984 rw_exit(&drmach_boards_rwlock);
1985
1986 return (rv);
1987 }
1988
1989 sbd_error_t *
drmach_board_name(int bnum,char * buf,int buflen)1990 drmach_board_name(int bnum, char *buf, int buflen)
1991 {
1992 ACPI_HANDLE hdl;
1993 sbd_error_t *err = NULL;
1994
1995 if (bnum < 0) {
1996 return (drerr_new(1, EX86_BNUM, "%d", bnum));
1997 }
1998
1999 acpidev_dr_lock_all();
2000 if (ACPI_FAILURE(acpidev_dr_get_board_handle(bnum, &hdl))) {
2001 DRMACH_PR("!drmach_board_name: failed to lookup ACPI handle "
2002 "for board %d.", bnum);
2003 err = drerr_new(1, EX86_BNUM, "%d", bnum);
2004 } else if (ACPI_FAILURE(acpidev_dr_get_board_name(hdl, buf, buflen))) {
2005 DRMACH_PR("!drmach_board_name: failed to generate board name "
2006 "for board %d.", bnum);
2007 err = drerr_new(0, EX86_INVALID_ARG,
2008 ": buffer is too small for board name.");
2009 }
2010 acpidev_dr_unlock_all();
2011
2012 return (err);
2013 }
2014
2015 int
drmach_board_is_floating(drmachid_t id)2016 drmach_board_is_floating(drmachid_t id)
2017 {
2018 drmach_board_t *bp;
2019
2020 if (!DRMACH_IS_BOARD_ID(id))
2021 return (0);
2022
2023 bp = (drmach_board_t *)id;
2024
2025 return ((drmach_domain.floating & (1ULL << bp->bnum)) ? 1 : 0);
2026 }
2027
2028 static ACPI_STATUS
drmach_board_check_dependent_cb(ACPI_HANDLE hdl,UINT32 lvl,void * ctx,void ** retval)2029 drmach_board_check_dependent_cb(ACPI_HANDLE hdl, UINT32 lvl, void *ctx,
2030 void **retval)
2031 {
2032 uint32_t bdnum;
2033 drmach_board_t *bp;
2034 ACPI_STATUS rc = AE_OK;
2035 int cmd = (int)(intptr_t)ctx;
2036
2037 ASSERT(hdl != NULL);
2038 ASSERT(lvl == UINT32_MAX);
2039 ASSERT(retval != NULL);
2040
2041 /* Skip non-board devices. */
2042 if (!acpidev_dr_device_is_board(hdl)) {
2043 return (AE_OK);
2044 } else if (ACPI_FAILURE(acpidev_dr_get_board_number(hdl, &bdnum))) {
2045 DRMACH_PR("!drmach_board_check_dependent_cb: failed to get "
2046 "board number for object %p.\n", hdl);
2047 return (AE_ERROR);
2048 } else if (bdnum > MAX_BOARDS) {
2049 DRMACH_PR("!drmach_board_check_dependent_cb: board number %u "
2050 "is too big, max %u.", bdnum, MAX_BOARDS);
2051 return (AE_ERROR);
2052 }
2053
2054 bp = drmach_get_board_by_bnum(bdnum);
2055 switch (cmd) {
2056 case SBD_CMD_CONNECT:
2057 /*
2058 * Its parent board should be present, assigned, powered and
2059 * connected when connecting the child board.
2060 */
2061 if (bp == NULL) {
2062 *retval = hdl;
2063 rc = AE_ERROR;
2064 } else {
2065 bp->powered = acpidev_dr_device_is_powered(hdl);
2066 if (!bp->connected || !bp->powered || !bp->assigned) {
2067 *retval = hdl;
2068 rc = AE_ERROR;
2069 }
2070 }
2071 break;
2072
2073 case SBD_CMD_POWERON:
2074 /*
2075 * Its parent board should be present, assigned and powered when
2076 * powering on the child board.
2077 */
2078 if (bp == NULL) {
2079 *retval = hdl;
2080 rc = AE_ERROR;
2081 } else {
2082 bp->powered = acpidev_dr_device_is_powered(hdl);
2083 if (!bp->powered || !bp->assigned) {
2084 *retval = hdl;
2085 rc = AE_ERROR;
2086 }
2087 }
2088 break;
2089
2090 case SBD_CMD_ASSIGN:
2091 /*
2092 * Its parent board should be present and assigned when
2093 * assigning the child board.
2094 */
2095 if (bp == NULL) {
2096 *retval = hdl;
2097 rc = AE_ERROR;
2098 } else if (!bp->assigned) {
2099 *retval = hdl;
2100 rc = AE_ERROR;
2101 }
2102 break;
2103
2104 case SBD_CMD_DISCONNECT:
2105 /*
2106 * The child board should be disconnected if present when
2107 * disconnecting its parent board.
2108 */
2109 if (bp != NULL && bp->connected) {
2110 *retval = hdl;
2111 rc = AE_ERROR;
2112 }
2113 break;
2114
2115 case SBD_CMD_POWEROFF:
2116 /*
2117 * The child board should be disconnected and powered off if
2118 * present when powering off its parent board.
2119 */
2120 if (bp != NULL) {
2121 bp->powered = acpidev_dr_device_is_powered(hdl);
2122 if (bp->connected || bp->powered) {
2123 *retval = hdl;
2124 rc = AE_ERROR;
2125 }
2126 }
2127 break;
2128
2129 case SBD_CMD_UNASSIGN:
2130 /*
2131 * The child board should be disconnected, powered off and
2132 * unassigned if present when unassigning its parent board.
2133 */
2134 if (bp != NULL) {
2135 bp->powered = acpidev_dr_device_is_powered(hdl);
2136 if (bp->connected || bp->powered || bp->assigned) {
2137 *retval = hdl;
2138 rc = AE_ERROR;
2139 }
2140 }
2141 break;
2142
2143 default:
2144 /* Return success for all other commands. */
2145 break;
2146 }
2147
2148 return (rc);
2149 }
2150
2151 sbd_error_t *
drmach_board_check_dependent(int cmd,drmach_board_t * bp)2152 drmach_board_check_dependent(int cmd, drmach_board_t *bp)
2153 {
2154 int reverse;
2155 char *name;
2156 sbd_error_t *err = NULL;
2157 DRMACH_HANDLE hdl;
2158 DRMACH_HANDLE dp = NULL;
2159
2160 ASSERT(bp != NULL);
2161 ASSERT(DRMACH_IS_BOARD_ID(bp));
2162 ASSERT(RW_LOCK_HELD(&drmach_boards_rwlock));
2163
2164 hdl = drmach_node_get_dnode(bp->tree);
2165 if (hdl == NULL)
2166 return (drerr_new(0, EX86_INAPPROP, NULL));
2167
2168 switch (cmd) {
2169 case SBD_CMD_ASSIGN:
2170 case SBD_CMD_POWERON:
2171 case SBD_CMD_CONNECT:
2172 if (ACPI_SUCCESS(acpidev_dr_device_walk_ejd(hdl,
2173 &drmach_board_check_dependent_cb,
2174 (void *)(intptr_t)cmd, &dp))) {
2175 return (NULL);
2176 }
2177 reverse = 0;
2178 break;
2179
2180 case SBD_CMD_UNASSIGN:
2181 case SBD_CMD_POWEROFF:
2182 case SBD_CMD_DISCONNECT:
2183 if (ACPI_SUCCESS(acpidev_dr_device_walk_edl(hdl,
2184 &drmach_board_check_dependent_cb,
2185 (void *)(intptr_t)cmd, &dp))) {
2186 return (NULL);
2187 }
2188 reverse = 1;
2189 break;
2190
2191 default:
2192 return (drerr_new(0, EX86_INAPPROP, NULL));
2193 }
2194
2195 if (dp == NULL) {
2196 return (drerr_new(1, EX86_WALK_DEPENDENCY, "%s", bp->cm.name));
2197 }
2198 name = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
2199 if (ACPI_FAILURE(acpidev_dr_get_board_name(dp, name, MAXPATHLEN))) {
2200 err = drerr_new(1, EX86_WALK_DEPENDENCY, "%s", bp->cm.name);
2201 } else if (reverse == 0) {
2202 err = drerr_new(1, EX86_WALK_DEPENDENCY,
2203 "%s, depends on board %s", bp->cm.name, name);
2204 } else {
2205 err = drerr_new(1, EX86_WALK_DEPENDENCY,
2206 "board %s depends on %s", name, bp->cm.name);
2207 }
2208 kmem_free(name, MAXPATHLEN);
2209
2210 return (err);
2211 }
2212
2213 sbd_error_t *
drmach_board_assign(int bnum,drmachid_t * id)2214 drmach_board_assign(int bnum, drmachid_t *id)
2215 {
2216 sbd_error_t *err = NULL;
2217
2218 if (bnum < 0) {
2219 return (drerr_new(1, EX86_BNUM, "%d", bnum));
2220 }
2221
2222 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2223
2224 if (drmach_array_get(drmach_boards, bnum, id) == -1) {
2225 err = drerr_new(1, EX86_BNUM, "%d", bnum);
2226 } else {
2227 drmach_board_t *bp;
2228
2229 /*
2230 * Board has already been created, downgrade to reader.
2231 */
2232 if (*id)
2233 rw_downgrade(&drmach_boards_rwlock);
2234
2235 bp = *id;
2236 if (!(*id))
2237 bp = *id =
2238 (drmachid_t)drmach_board_new(bnum, 0);
2239
2240 if (bp == NULL) {
2241 DRMACH_PR("!drmach_board_assign: failed to create "
2242 "object for board %d.", bnum);
2243 err = drerr_new(1, EX86_BNUM, "%d", bnum);
2244 } else {
2245 err = drmach_board_check_dependent(SBD_CMD_ASSIGN, bp);
2246 if (err == NULL)
2247 bp->assigned = 1;
2248 }
2249 }
2250
2251 rw_exit(&drmach_boards_rwlock);
2252
2253 return (err);
2254 }
2255
2256 sbd_error_t *
drmach_board_unassign(drmachid_t id)2257 drmach_board_unassign(drmachid_t id)
2258 {
2259 drmach_board_t *bp;
2260 sbd_error_t *err;
2261 drmach_status_t stat;
2262
2263 if (DRMACH_NULL_ID(id))
2264 return (NULL);
2265
2266 if (!DRMACH_IS_BOARD_ID(id)) {
2267 return (drerr_new(0, EX86_INAPPROP, NULL));
2268 }
2269 bp = id;
2270
2271 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2272
2273 err = drmach_board_status(id, &stat);
2274 if (err) {
2275 rw_exit(&drmach_boards_rwlock);
2276 return (err);
2277 }
2278
2279 if (stat.configured || stat.busy) {
2280 err = drerr_new(0, EX86_CONFIGBUSY, bp->cm.name);
2281 } else if (bp->connected) {
2282 err = drerr_new(0, EX86_CONNECTBUSY, bp->cm.name);
2283 } else if (stat.powered) {
2284 err = drerr_new(0, EX86_POWERBUSY, bp->cm.name);
2285 } else {
2286 err = drmach_board_check_dependent(SBD_CMD_UNASSIGN, bp);
2287 if (err == NULL) {
2288 if (drmach_array_set(drmach_boards, bp->bnum, 0) != 0)
2289 err = DRMACH_INTERNAL_ERROR();
2290 else
2291 drmach_board_dispose(bp);
2292 }
2293 }
2294
2295 rw_exit(&drmach_boards_rwlock);
2296
2297 return (err);
2298 }
2299
2300 sbd_error_t *
drmach_board_poweron(drmachid_t id)2301 drmach_board_poweron(drmachid_t id)
2302 {
2303 drmach_board_t *bp;
2304 sbd_error_t *err = NULL;
2305 DRMACH_HANDLE hdl;
2306
2307 if (!DRMACH_IS_BOARD_ID(id))
2308 return (drerr_new(0, EX86_INAPPROP, NULL));
2309 bp = id;
2310
2311 hdl = drmach_node_get_dnode(bp->tree);
2312 if (hdl == NULL)
2313 return (drerr_new(0, EX86_INAPPROP, NULL));
2314
2315 bp->powered = drmach_board_check_power(bp);
2316 if (bp->powered) {
2317 return (NULL);
2318 }
2319
2320 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2321 err = drmach_board_check_dependent(SBD_CMD_POWERON, bp);
2322 if (err == NULL) {
2323 acpidev_dr_lock_all();
2324 if (ACPI_FAILURE(acpidev_dr_device_poweron(hdl)))
2325 err = drerr_new(0, EX86_POWERON, NULL);
2326 acpidev_dr_unlock_all();
2327
2328 /* Check whether the board is powered on. */
2329 bp->powered = drmach_board_check_power(bp);
2330 if (err == NULL && bp->powered == 0)
2331 err = drerr_new(0, EX86_POWERON, NULL);
2332 }
2333 rw_exit(&drmach_boards_rwlock);
2334
2335 return (err);
2336 }
2337
2338 sbd_error_t *
drmach_board_poweroff(drmachid_t id)2339 drmach_board_poweroff(drmachid_t id)
2340 {
2341 sbd_error_t *err = NULL;
2342 drmach_board_t *bp;
2343 drmach_status_t stat;
2344 DRMACH_HANDLE hdl;
2345
2346 if (DRMACH_NULL_ID(id))
2347 return (NULL);
2348
2349 if (!DRMACH_IS_BOARD_ID(id))
2350 return (drerr_new(0, EX86_INAPPROP, NULL));
2351 bp = id;
2352
2353 hdl = drmach_node_get_dnode(bp->tree);
2354 if (hdl == NULL)
2355 return (drerr_new(0, EX86_INAPPROP, NULL));
2356
2357 /* Check whether the board is busy, configured or connected. */
2358 err = drmach_board_status(id, &stat);
2359 if (err != NULL)
2360 return (err);
2361 if (stat.configured || stat.busy) {
2362 return (drerr_new(0, EX86_CONFIGBUSY, bp->cm.name));
2363 } else if (bp->connected) {
2364 return (drerr_new(0, EX86_CONNECTBUSY, bp->cm.name));
2365 }
2366
2367 bp->powered = drmach_board_check_power(bp);
2368 if (bp->powered == 0) {
2369 return (NULL);
2370 }
2371
2372 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2373 err = drmach_board_check_dependent(SBD_CMD_POWEROFF, bp);
2374 if (err == NULL) {
2375 acpidev_dr_lock_all();
2376 if (ACPI_FAILURE(acpidev_dr_device_poweroff(hdl)))
2377 err = drerr_new(0, EX86_POWEROFF, NULL);
2378 acpidev_dr_unlock_all();
2379
2380 bp->powered = drmach_board_check_power(bp);
2381 if (err == NULL && bp->powered != 0)
2382 err = drerr_new(0, EX86_POWEROFF, NULL);
2383 }
2384 rw_exit(&drmach_boards_rwlock);
2385
2386 return (err);
2387 }
2388
2389 sbd_error_t *
drmach_board_test(drmachid_t id,drmach_opts_t * opts,int force)2390 drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force)
2391 {
2392 _NOTE(ARGUNUSED(opts, force));
2393
2394 drmach_board_t *bp;
2395 DRMACH_HANDLE hdl;
2396
2397 if (DRMACH_NULL_ID(id))
2398 return (NULL);
2399
2400 if (!DRMACH_IS_BOARD_ID(id))
2401 return (drerr_new(0, EX86_INAPPROP, NULL));
2402 bp = id;
2403
2404 hdl = drmach_node_get_dnode(bp->tree);
2405 if (hdl == NULL)
2406 return (drerr_new(0, EX86_INAPPROP, NULL));
2407
2408 if (ACPI_FAILURE(acpidev_dr_device_check_status(hdl)))
2409 return (drerr_new(0, EX86_IN_FAILURE, NULL));
2410
2411 return (NULL);
2412 }
2413
2414 sbd_error_t *
drmach_board_connect(drmachid_t id,drmach_opts_t * opts)2415 drmach_board_connect(drmachid_t id, drmach_opts_t *opts)
2416 {
2417 _NOTE(ARGUNUSED(opts));
2418
2419 sbd_error_t *err = NULL;
2420 drmach_board_t *bp = (drmach_board_t *)id;
2421 DRMACH_HANDLE hdl;
2422
2423 if (!DRMACH_IS_BOARD_ID(id))
2424 return (drerr_new(0, EX86_INAPPROP, NULL));
2425 bp = (drmach_board_t *)id;
2426
2427 hdl = drmach_node_get_dnode(bp->tree);
2428 if (hdl == NULL)
2429 return (drerr_new(0, EX86_INAPPROP, NULL));
2430
2431 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2432 err = drmach_board_check_dependent(SBD_CMD_CONNECT, bp);
2433 if (err == NULL) {
2434 acpidev_dr_lock_all();
2435 if (ACPI_FAILURE(acpidev_dr_device_insert(hdl))) {
2436 (void) acpidev_dr_device_remove(hdl);
2437 err = drerr_new(1, EX86_PROBE, NULL);
2438 } else {
2439 bp->connected = 1;
2440 }
2441 acpidev_dr_unlock_all();
2442 }
2443 rw_exit(&drmach_boards_rwlock);
2444
2445 return (err);
2446 }
2447
2448 sbd_error_t *
drmach_board_disconnect(drmachid_t id,drmach_opts_t * opts)2449 drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts)
2450 {
2451 _NOTE(ARGUNUSED(opts));
2452
2453 DRMACH_HANDLE hdl;
2454 drmach_board_t *bp;
2455 drmach_status_t stat;
2456 sbd_error_t *err = NULL;
2457
2458 if (DRMACH_NULL_ID(id))
2459 return (NULL);
2460 if (!DRMACH_IS_BOARD_ID(id))
2461 return (drerr_new(0, EX86_INAPPROP, NULL));
2462 bp = (drmach_board_t *)id;
2463
2464 hdl = drmach_node_get_dnode(bp->tree);
2465 if (hdl == NULL)
2466 return (drerr_new(0, EX86_INAPPROP, NULL));
2467
2468 /* Check whether the board is busy or configured. */
2469 err = drmach_board_status(id, &stat);
2470 if (err != NULL)
2471 return (err);
2472 if (stat.configured || stat.busy)
2473 return (drerr_new(0, EX86_CONFIGBUSY, bp->cm.name));
2474
2475 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2476 err = drmach_board_check_dependent(SBD_CMD_DISCONNECT, bp);
2477 if (err == NULL) {
2478 acpidev_dr_lock_all();
2479 if (ACPI_SUCCESS(acpidev_dr_device_remove(hdl))) {
2480 bp->connected = 0;
2481 } else {
2482 err = drerr_new(1, EX86_DEPROBE, bp->cm.name);
2483 }
2484 acpidev_dr_unlock_all();
2485 }
2486 rw_exit(&drmach_boards_rwlock);
2487
2488 return (err);
2489 }
2490
2491 sbd_error_t *
drmach_board_deprobe(drmachid_t id)2492 drmach_board_deprobe(drmachid_t id)
2493 {
2494 drmach_board_t *bp;
2495
2496 if (!DRMACH_IS_BOARD_ID(id))
2497 return (drerr_new(0, EX86_INAPPROP, NULL));
2498 bp = id;
2499
2500 cmn_err(CE_CONT, "DR: detach board %d\n", bp->bnum);
2501
2502 if (bp->devices) {
2503 drmach_array_dispose(bp->devices, drmach_device_dispose);
2504 bp->devices = NULL;
2505 }
2506
2507 bp->boot_board = 0;
2508
2509 return (NULL);
2510 }
2511
2512 /*
2513 * CPU specific interfaces to support dr driver
2514 */
2515 sbd_error_t *
drmach_cpu_disconnect(drmachid_t id)2516 drmach_cpu_disconnect(drmachid_t id)
2517 {
2518 if (!DRMACH_IS_CPU_ID(id))
2519 return (drerr_new(0, EX86_INAPPROP, NULL));
2520
2521 return (NULL);
2522 }
2523
2524 sbd_error_t *
drmach_cpu_get_id(drmachid_t id,processorid_t * cpuid)2525 drmach_cpu_get_id(drmachid_t id, processorid_t *cpuid)
2526 {
2527 drmach_cpu_t *cpu;
2528
2529 if (!DRMACH_IS_CPU_ID(id))
2530 return (drerr_new(0, EX86_INAPPROP, NULL));
2531 cpu = (drmach_cpu_t *)id;
2532
2533 if (cpu->cpuid == -1) {
2534 if (ACPI_SUCCESS(acpica_get_cpu_id_by_object(
2535 drmach_node_get_dnode(cpu->dev.node), cpuid))) {
2536 cpu->cpuid = *cpuid;
2537 } else {
2538 *cpuid = -1;
2539 }
2540 } else {
2541 *cpuid = cpu->cpuid;
2542 }
2543
2544 return (NULL);
2545 }
2546
2547 sbd_error_t *
drmach_cpu_get_impl(drmachid_t id,int * ip)2548 drmach_cpu_get_impl(drmachid_t id, int *ip)
2549 {
2550 if (!DRMACH_IS_CPU_ID(id))
2551 return (drerr_new(0, EX86_INAPPROP, NULL));
2552
2553 /* Assume all CPUs in system are homogeneous. */
2554 *ip = X86_CPU_IMPL_UNKNOWN;
2555
2556 kpreempt_disable();
2557 if (cpuid_getvendor(CPU) == X86_VENDOR_Intel) {
2558 /* NHM-EX CPU */
2559 if (cpuid_getfamily(CPU) == 0x6 &&
2560 cpuid_getmodel(CPU) == 0x2e) {
2561 *ip = X86_CPU_IMPL_NEHALEM_EX;
2562 }
2563 }
2564 kpreempt_enable();
2565
2566 return (NULL);
2567 }
2568
2569 /*
2570 * Memory specific interfaces to support dr driver
2571 */
2572
2573 /*
2574 * When drmach_mem_new() is called, the mp->base_pa field is set to the base
2575 * address of configured memory if there's configured memory on the board,
2576 * otherwise set to UINT64_MAX. For hot-added memory board, there's no
2577 * configured memory when drmach_mem_new() is called, so mp->base_pa is set
2578 * to UINT64_MAX and we need to set a correct value for it after memory
2579 * hot-add operations.
2580 * A hot-added memory board may contain multiple memory segments,
2581 * drmach_mem_add_span() will be called once for each segment, so we can't
2582 * rely on the basepa argument. And it's possible that only part of a memory
2583 * segment is added into OS, so need to intersect with phys_installed list
2584 * to get the real base address of configured memory on the board.
2585 */
2586 sbd_error_t *
drmach_mem_add_span(drmachid_t id,uint64_t basepa,uint64_t size)2587 drmach_mem_add_span(drmachid_t id, uint64_t basepa, uint64_t size)
2588 {
2589 _NOTE(ARGUNUSED(basepa));
2590
2591 uint64_t nbytes = 0;
2592 uint64_t endpa;
2593 drmach_mem_t *mp;
2594 struct memlist *ml2;
2595 struct memlist *p;
2596
2597 ASSERT(size != 0);
2598
2599 if (!DRMACH_IS_MEM_ID(id))
2600 return (drerr_new(0, EX86_INAPPROP, NULL));
2601 mp = (drmach_mem_t *)id;
2602
2603 /* Compute basepa and size of installed memory. */
2604 endpa = _ptob64(physmax + 1);
2605 memlist_read_lock();
2606 ml2 = memlist_dup(phys_install);
2607 memlist_read_unlock();
2608 ml2 = memlist_del_span(ml2, 0ull, mp->slice_base);
2609 if (ml2 && endpa > mp->slice_top) {
2610 ml2 = memlist_del_span(ml2, mp->slice_top,
2611 endpa - mp->slice_top);
2612 }
2613
2614 ASSERT(ml2);
2615 if (ml2) {
2616 for (p = ml2; p; p = p->ml_next) {
2617 nbytes += p->ml_size;
2618 if (mp->base_pa > p->ml_address)
2619 mp->base_pa = p->ml_address;
2620 }
2621 ASSERT(nbytes > 0);
2622 mp->nbytes += nbytes;
2623 memlist_delete(ml2);
2624 }
2625
2626 return (NULL);
2627 }
2628
2629 static sbd_error_t *
drmach_mem_update_lgrp(drmachid_t id)2630 drmach_mem_update_lgrp(drmachid_t id)
2631 {
2632 ACPI_STATUS rc;
2633 DRMACH_HANDLE hdl;
2634 void *hdlp;
2635 drmach_mem_t *mp;
2636 update_membounds_t umb;
2637
2638 if (!DRMACH_IS_MEM_ID(id))
2639 return (drerr_new(0, EX86_INAPPROP, NULL));
2640 mp = (drmach_mem_t *)id;
2641 /* No need to update lgrp if memory is already installed. */
2642 if (mp->nbytes != 0)
2643 return (NULL);
2644 /* No need to update lgrp if lgrp is disabled. */
2645 if (max_mem_nodes == 1)
2646 return (NULL);
2647
2648 /* Add memory to lgroup */
2649 hdl = mp->dev.node->get_dnode(mp->dev.node);
2650 rc = acpidev_dr_device_get_memory_index(hdl, &umb.u_device_id);
2651 ASSERT(ACPI_SUCCESS(rc));
2652 if (ACPI_FAILURE(rc)) {
2653 cmn_err(CE_WARN, "drmach: failed to get device id of memory, "
2654 "can't update lgrp information.");
2655 return (drerr_new(0, EX86_INTERNAL, NULL));
2656 }
2657 rc = acpidev_dr_get_mem_numa_info(hdl, mp->memlist, &hdlp,
2658 &umb.u_domain, &umb.u_sli_cnt, &umb.u_sli_ptr);
2659 ASSERT(ACPI_SUCCESS(rc));
2660 if (ACPI_FAILURE(rc)) {
2661 cmn_err(CE_WARN, "drmach: failed to get lgrp info of memory, "
2662 "can't update lgrp information.");
2663 return (drerr_new(0, EX86_INTERNAL, NULL));
2664 }
2665 umb.u_base = (uint64_t)mp->slice_base;
2666 umb.u_length = (uint64_t)(mp->slice_top - mp->slice_base);
2667 lgrp_plat_config(LGRP_CONFIG_MEM_ADD, (uintptr_t)&umb);
2668 acpidev_dr_free_mem_numa_info(hdlp);
2669
2670 return (NULL);
2671 }
2672
2673 sbd_error_t *
drmach_mem_enable(drmachid_t id)2674 drmach_mem_enable(drmachid_t id)
2675 {
2676 if (!DRMACH_IS_MEM_ID(id))
2677 return (drerr_new(0, EX86_INAPPROP, NULL));
2678 else
2679 return (NULL);
2680 }
2681
2682 sbd_error_t *
drmach_mem_get_info(drmachid_t id,drmach_mem_info_t * mem)2683 drmach_mem_get_info(drmachid_t id, drmach_mem_info_t *mem)
2684 {
2685 drmach_mem_t *mp;
2686
2687 if (!DRMACH_IS_MEM_ID(id))
2688 return (drerr_new(0, EX86_INAPPROP, NULL));
2689 mp = (drmach_mem_t *)id;
2690
2691 /*
2692 * This is only used by dr to round up/down the memory
2693 * for copying.
2694 */
2695 mem->mi_alignment_mask = mp->mem_alignment - 1;
2696 mem->mi_basepa = mp->base_pa;
2697 mem->mi_size = mp->nbytes;
2698 mem->mi_slice_base = mp->slice_base;
2699 mem->mi_slice_top = mp->slice_top;
2700 mem->mi_slice_size = mp->slice_size;
2701
2702 return (NULL);
2703 }
2704
2705 sbd_error_t *
drmach_mem_get_slice_info(drmachid_t id,uint64_t * bp,uint64_t * ep,uint64_t * sp)2706 drmach_mem_get_slice_info(drmachid_t id,
2707 uint64_t *bp, uint64_t *ep, uint64_t *sp)
2708 {
2709 drmach_mem_t *mp;
2710
2711 if (!DRMACH_IS_MEM_ID(id))
2712 return (drerr_new(0, EX86_INAPPROP, NULL));
2713 mp = (drmach_mem_t *)id;
2714
2715 if (bp)
2716 *bp = mp->slice_base;
2717 if (ep)
2718 *ep = mp->slice_top;
2719 if (sp)
2720 *sp = mp->slice_size;
2721
2722 return (NULL);
2723 }
2724
2725 sbd_error_t *
drmach_mem_get_memlist(drmachid_t id,struct memlist ** ml)2726 drmach_mem_get_memlist(drmachid_t id, struct memlist **ml)
2727 {
2728 #ifdef DEBUG
2729 int rv;
2730 #endif
2731 drmach_mem_t *mem;
2732 struct memlist *mlist;
2733
2734 if (!DRMACH_IS_MEM_ID(id))
2735 return (drerr_new(0, EX86_INAPPROP, NULL));
2736 mem = (drmach_mem_t *)id;
2737
2738 mlist = memlist_dup(mem->memlist);
2739 *ml = mlist;
2740
2741 #ifdef DEBUG
2742 /*
2743 * Make sure the incoming memlist doesn't already
2744 * intersect with what's present in the system (phys_install).
2745 */
2746 memlist_read_lock();
2747 rv = memlist_intersect(phys_install, mlist);
2748 memlist_read_unlock();
2749 if (rv) {
2750 DRMACH_PR("Derived memlist intersects with phys_install\n");
2751 memlist_dump(mlist);
2752
2753 DRMACH_PR("phys_install memlist:\n");
2754 memlist_dump(phys_install);
2755
2756 memlist_delete(mlist);
2757 return (DRMACH_INTERNAL_ERROR());
2758 }
2759
2760 DRMACH_PR("Derived memlist:");
2761 memlist_dump(mlist);
2762 #endif
2763
2764 return (NULL);
2765 }
2766
2767 processorid_t
drmach_mem_cpu_affinity(drmachid_t id)2768 drmach_mem_cpu_affinity(drmachid_t id)
2769 {
2770 _NOTE(ARGUNUSED(id));
2771
2772 return (CPU_CURRENT);
2773 }
2774
2775 int
drmach_copy_rename_need_suspend(drmachid_t id)2776 drmach_copy_rename_need_suspend(drmachid_t id)
2777 {
2778 _NOTE(ARGUNUSED(id));
2779
2780 return (0);
2781 }
2782
2783 /*
2784 * IO specific interfaces to support dr driver
2785 */
2786 sbd_error_t *
drmach_io_pre_release(drmachid_t id)2787 drmach_io_pre_release(drmachid_t id)
2788 {
2789 if (!DRMACH_IS_IO_ID(id))
2790 return (drerr_new(0, EX86_INAPPROP, NULL));
2791
2792 return (NULL);
2793 }
2794
2795 sbd_error_t *
drmach_io_unrelease(drmachid_t id)2796 drmach_io_unrelease(drmachid_t id)
2797 {
2798 if (!DRMACH_IS_IO_ID(id))
2799 return (drerr_new(0, EX86_INAPPROP, NULL));
2800
2801 return (NULL);
2802 }
2803
2804 sbd_error_t *
drmach_io_post_release(drmachid_t id)2805 drmach_io_post_release(drmachid_t id)
2806 {
2807 _NOTE(ARGUNUSED(id));
2808
2809 return (NULL);
2810 }
2811
2812 sbd_error_t *
drmach_io_post_attach(drmachid_t id)2813 drmach_io_post_attach(drmachid_t id)
2814 {
2815 if (!DRMACH_IS_IO_ID(id))
2816 return (drerr_new(0, EX86_INAPPROP, NULL));
2817
2818 return (NULL);
2819 }
2820
2821 sbd_error_t *
drmach_io_is_attached(drmachid_t id,int * yes)2822 drmach_io_is_attached(drmachid_t id, int *yes)
2823 {
2824 drmach_device_t *dp;
2825 dev_info_t *dip;
2826 int state;
2827
2828 if (!DRMACH_IS_IO_ID(id))
2829 return (drerr_new(0, EX86_INAPPROP, NULL));
2830 dp = id;
2831
2832 dip = dp->node->getdip(dp->node);
2833 if (dip == NULL) {
2834 *yes = 0;
2835 return (NULL);
2836 }
2837
2838 state = ddi_get_devstate(dip);
2839 *yes = ((i_ddi_node_state(dip) >= DS_ATTACHED) ||
2840 (state == DDI_DEVSTATE_UP));
2841
2842 return (NULL);
2843 }
2844
2845 /*
2846 * Miscellaneous interfaces to support dr driver
2847 */
2848 int
drmach_verify_sr(dev_info_t * dip,int sflag)2849 drmach_verify_sr(dev_info_t *dip, int sflag)
2850 {
2851 _NOTE(ARGUNUSED(dip, sflag));
2852
2853 return (0);
2854 }
2855
2856 void
drmach_suspend_last(void)2857 drmach_suspend_last(void)
2858 {
2859 }
2860
2861 void
drmach_resume_first(void)2862 drmach_resume_first(void)
2863 {
2864 }
2865
2866 /*
2867 * Log a DR sysevent.
2868 * Return value: 0 success, non-zero failure.
2869 */
2870 int
drmach_log_sysevent(int board,char * hint,int flag,int verbose)2871 drmach_log_sysevent(int board, char *hint, int flag, int verbose)
2872 {
2873 sysevent_t *ev = NULL;
2874 sysevent_id_t eid;
2875 int rv, km_flag;
2876 sysevent_value_t evnt_val;
2877 sysevent_attr_list_t *evnt_attr_list = NULL;
2878 sbd_error_t *err;
2879 char attach_pnt[MAXNAMELEN];
2880
2881 km_flag = (flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
2882 attach_pnt[0] = '\0';
2883 err = drmach_board_name(board, attach_pnt, MAXNAMELEN);
2884 if (err != NULL) {
2885 sbd_err_clear(&err);
2886 rv = -1;
2887 goto logexit;
2888 }
2889 if (verbose) {
2890 DRMACH_PR("drmach_log_sysevent: %s %s, flag: %d, verbose: %d\n",
2891 attach_pnt, hint, flag, verbose);
2892 }
2893
2894 if ((ev = sysevent_alloc(EC_DR, ESC_DR_AP_STATE_CHANGE,
2895 SUNW_KERN_PUB"dr", km_flag)) == NULL) {
2896 rv = -2;
2897 goto logexit;
2898 }
2899 evnt_val.value_type = SE_DATA_TYPE_STRING;
2900 evnt_val.value.sv_string = attach_pnt;
2901 if ((rv = sysevent_add_attr(&evnt_attr_list, DR_AP_ID, &evnt_val,
2902 km_flag)) != 0)
2903 goto logexit;
2904
2905 evnt_val.value_type = SE_DATA_TYPE_STRING;
2906 evnt_val.value.sv_string = hint;
2907 if ((rv = sysevent_add_attr(&evnt_attr_list, DR_HINT, &evnt_val,
2908 km_flag)) != 0) {
2909 sysevent_free_attr(evnt_attr_list);
2910 goto logexit;
2911 }
2912
2913 (void) sysevent_attach_attributes(ev, evnt_attr_list);
2914
2915 /*
2916 * Log the event but do not sleep waiting for its
2917 * delivery. This provides insulation from syseventd.
2918 */
2919 rv = log_sysevent(ev, SE_NOSLEEP, &eid);
2920
2921 logexit:
2922 if (ev)
2923 sysevent_free(ev);
2924 if ((rv != 0) && verbose)
2925 cmn_err(CE_WARN, "!drmach_log_sysevent failed (rv %d) for %s "
2926 " %s\n", rv, attach_pnt, hint);
2927
2928 return (rv);
2929 }
2930