125cf1a30Sjl /* 225cf1a30Sjl * CDDL HEADER START 325cf1a30Sjl * 425cf1a30Sjl * The contents of this file are subject to the terms of the 525cf1a30Sjl * Common Development and Distribution License (the "License"). 625cf1a30Sjl * You may not use this file except in compliance with the License. 725cf1a30Sjl * 825cf1a30Sjl * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 925cf1a30Sjl * or http://www.opensolaris.org/os/licensing. 1025cf1a30Sjl * See the License for the specific language governing permissions 1125cf1a30Sjl * and limitations under the License. 1225cf1a30Sjl * 1325cf1a30Sjl * When distributing Covered Code, include this CDDL HEADER in each 1425cf1a30Sjl * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1525cf1a30Sjl * If applicable, add the following below this CDDL HEADER, with the 1625cf1a30Sjl * fields enclosed by brackets "[]" replaced with your own identifying 1725cf1a30Sjl * information: Portions Copyright [yyyy] [name of copyright owner] 1825cf1a30Sjl * 1925cf1a30Sjl * CDDL HEADER END 2025cf1a30Sjl */ 2125cf1a30Sjl /* 222850d85bSmv * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 2325cf1a30Sjl * Use is subject to license terms. 2425cf1a30Sjl */ 2525cf1a30Sjl 2625cf1a30Sjl #pragma ident "%Z%%M% %I% %E% SMI" 2725cf1a30Sjl 2825cf1a30Sjl #include <sys/cpuvar.h> 2925cf1a30Sjl #include <sys/systm.h> 3025cf1a30Sjl #include <sys/sysmacros.h> 3125cf1a30Sjl #include <sys/promif.h> 3225cf1a30Sjl #include <sys/platform_module.h> 3325cf1a30Sjl #include <sys/cmn_err.h> 3425cf1a30Sjl #include <sys/errno.h> 3525cf1a30Sjl #include <sys/machsystm.h> 3625cf1a30Sjl #include <sys/bootconf.h> 3725cf1a30Sjl #include <sys/nvpair.h> 3825cf1a30Sjl #include <sys/kobj.h> 3925cf1a30Sjl #include <sys/mem_cage.h> 4025cf1a30Sjl #include <sys/opl.h> 4125cf1a30Sjl #include <sys/scfd/scfostoescf.h> 4225cf1a30Sjl #include <sys/cpu_sgnblk_defs.h> 4325cf1a30Sjl #include <sys/utsname.h> 4425cf1a30Sjl #include <sys/ddi.h> 4525cf1a30Sjl #include <sys/sunndi.h> 4625cf1a30Sjl #include <sys/lgrp.h> 4725cf1a30Sjl #include <sys/memnode.h> 4825cf1a30Sjl #include <sys/sysmacros.h> 49e603b7d4Spm #include <sys/time.h> 50e603b7d4Spm #include <sys/cpu.h> 5125cf1a30Sjl #include <vm/vm_dep.h> 5225cf1a30Sjl 5325cf1a30Sjl int (*opl_get_mem_unum)(int, uint64_t, char *, int, int *); 540cc8ae86Sav int (*opl_get_mem_sid)(char *unum, char *buf, int buflen, int *lenp); 550cc8ae86Sav int (*opl_get_mem_offset)(uint64_t paddr, uint64_t *offp); 560cc8ae86Sav int (*opl_get_mem_addr)(char *unum, char *sid, 570cc8ae86Sav uint64_t offset, uint64_t *paddr); 5825cf1a30Sjl 5925cf1a30Sjl /* Memory for fcode claims. 16k times # maximum possible IO units */ 6025cf1a30Sjl #define EFCODE_SIZE (OPL_MAX_BOARDS * OPL_MAX_IO_UNITS_PER_BOARD * 0x4000) 6125cf1a30Sjl int efcode_size = EFCODE_SIZE; 6225cf1a30Sjl 6325cf1a30Sjl #define OPL_MC_MEMBOARD_SHIFT 38 /* Boards on 256BG boundary */ 6425cf1a30Sjl 6525cf1a30Sjl /* Set the maximum number of boards for DR */ 6625cf1a30Sjl int opl_boards = OPL_MAX_BOARDS; 6725cf1a30Sjl 6825cf1a30Sjl void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t); 6925cf1a30Sjl 7025cf1a30Sjl extern int tsb_lgrp_affinity; 7125cf1a30Sjl 7225cf1a30Sjl int opl_tsb_spares = (OPL_MAX_BOARDS) * (OPL_MAX_PCICH_UNITS_PER_BOARD) * 7325cf1a30Sjl (OPL_MAX_TSBS_PER_PCICH); 7425cf1a30Sjl 7525cf1a30Sjl pgcnt_t opl_startup_cage_size = 0; 7625cf1a30Sjl 773f1fa9a7Sjfrank /* 783f1fa9a7Sjfrank * The length of the delay in seconds in communication with XSCF after 793f1fa9a7Sjfrank * which the warning message will be logged. 803f1fa9a7Sjfrank */ 813f1fa9a7Sjfrank uint_t xscf_connect_delay = 60 * 15; 823f1fa9a7Sjfrank 831e2e7a75Shuah static opl_model_info_t opl_models[] = { 84195196c6Ssubhan { "FF1", OPL_MAX_BOARDS_FF1, FF1, STD_DISPATCH_TABLE }, 85195196c6Ssubhan { "FF2", OPL_MAX_BOARDS_FF2, FF2, STD_DISPATCH_TABLE }, 86195196c6Ssubhan { "DC1", OPL_MAX_BOARDS_DC1, DC1, STD_DISPATCH_TABLE }, 87195196c6Ssubhan { "DC2", OPL_MAX_BOARDS_DC2, DC2, EXT_DISPATCH_TABLE }, 88195196c6Ssubhan { "DC3", OPL_MAX_BOARDS_DC3, DC3, EXT_DISPATCH_TABLE }, 8978ed97a7Sjl { "IKKAKU", OPL_MAX_BOARDS_IKKAKU, IKKAKU, STD_DISPATCH_TABLE }, 901e2e7a75Shuah }; 911e2e7a75Shuah static int opl_num_models = sizeof (opl_models)/sizeof (opl_model_info_t); 921e2e7a75Shuah 93195196c6Ssubhan /* 9472b9fce9Ssubhan * opl_cur_model 95195196c6Ssubhan */ 9672b9fce9Ssubhan static opl_model_info_t *opl_cur_model = NULL; 971e2e7a75Shuah 9825cf1a30Sjl static struct memlist *opl_memlist_per_board(struct memlist *ml); 993f1fa9a7Sjfrank static void post_xscf_msg(char *, int); 1003f1fa9a7Sjfrank static void pass2xscf_thread(); 10125cf1a30Sjl 102e603b7d4Spm /* 103e603b7d4Spm * Note FF/DC out-of-order instruction engine takes only a 104e603b7d4Spm * single cycle to execute each spin loop 105e603b7d4Spm * for comparison, Panther takes 6 cycles for same loop 106575a7426Spt * OPL_BOFF_SPIN = base spin loop, roughly one memory reference time 107575a7426Spt * OPL_BOFF_TM = approx nsec for OPL sleep instruction (1600 for OPL-C) 108575a7426Spt * OPL_BOFF_SLEEP = approx number of SPIN iterations to equal one sleep 109575a7426Spt * OPL_BOFF_MAX_SCALE - scaling factor for max backoff based on active cpus 110575a7426Spt * Listed values tuned for 2.15GHz to 2.64GHz systems 111e603b7d4Spm * Value may change for future systems 112e603b7d4Spm */ 113575a7426Spt #define OPL_BOFF_SPIN 7 114575a7426Spt #define OPL_BOFF_SLEEP 4 115575a7426Spt #define OPL_BOFF_TM 1600 116575a7426Spt #define OPL_BOFF_MAX_SCALE 8 117e603b7d4Spm 1182850d85bSmv #define OPL_CLOCK_TICK_THRESHOLD 128 1192850d85bSmv #define OPL_CLOCK_TICK_NCPUS 64 1202850d85bSmv 1212850d85bSmv extern int clock_tick_threshold; 1222850d85bSmv extern int clock_tick_ncpus; 1232850d85bSmv 12425cf1a30Sjl int 12525cf1a30Sjl set_platform_max_ncpus(void) 12625cf1a30Sjl { 12725cf1a30Sjl return (OPL_MAX_CPU_PER_BOARD * OPL_MAX_BOARDS); 12825cf1a30Sjl } 12925cf1a30Sjl 13025cf1a30Sjl int 13125cf1a30Sjl set_platform_tsb_spares(void) 13225cf1a30Sjl { 13325cf1a30Sjl return (MIN(opl_tsb_spares, MAX_UPA)); 13425cf1a30Sjl } 13525cf1a30Sjl 1361e2e7a75Shuah static void 1371e2e7a75Shuah set_model_info() 1381e2e7a75Shuah { 139195196c6Ssubhan extern int ts_dispatch_extended; 1401e2e7a75Shuah char name[MAXSYSNAME]; 1411e2e7a75Shuah int i; 1421e2e7a75Shuah 1431e2e7a75Shuah /* 1441e2e7a75Shuah * Get model name from the root node. 1451e2e7a75Shuah * 1461e2e7a75Shuah * We are using the prom device tree since, at this point, 1471e2e7a75Shuah * the Solaris device tree is not yet setup. 1481e2e7a75Shuah */ 1491e2e7a75Shuah (void) prom_getprop(prom_rootnode(), "model", (caddr_t)name); 1501e2e7a75Shuah 1511e2e7a75Shuah for (i = 0; i < opl_num_models; i++) { 1521e2e7a75Shuah if (strncmp(name, opl_models[i].model_name, MAXSYSNAME) == 0) { 1531e2e7a75Shuah opl_cur_model = &opl_models[i]; 1541e2e7a75Shuah break; 1551e2e7a75Shuah } 1561e2e7a75Shuah } 157195196c6Ssubhan 1589b71d8e9Swh /* 1599b71d8e9Swh * If model not matched, it's an unknown model. 16078ed97a7Sjl * Just return. It will default to standard dispatch tables. 1619b71d8e9Swh */ 1621e2e7a75Shuah if (i == opl_num_models) 1639b71d8e9Swh return; 164195196c6Ssubhan 165195196c6Ssubhan if ((opl_cur_model->model_cmds & EXT_DISPATCH_TABLE) && 166e98fafb9Sjl (ts_dispatch_extended == -1)) { 167195196c6Ssubhan /* 168195196c6Ssubhan * Based on a platform model, select a dispatch table. 169195196c6Ssubhan * Only DC2 and DC3 systems uses the alternate/extended 170195196c6Ssubhan * TS dispatch table. 17178ed97a7Sjl * IKKAKU, FF1, FF2 and DC1 systems use standard dispatch 17278ed97a7Sjl * tables. 173195196c6Ssubhan */ 174195196c6Ssubhan ts_dispatch_extended = 1; 175195196c6Ssubhan } 176195196c6Ssubhan 1771e2e7a75Shuah } 1781e2e7a75Shuah 1791e2e7a75Shuah static void 1801e2e7a75Shuah set_max_mmu_ctxdoms() 1811e2e7a75Shuah { 1821e2e7a75Shuah extern uint_t max_mmu_ctxdoms; 1831e2e7a75Shuah int max_boards; 1841e2e7a75Shuah 1851e2e7a75Shuah /* 1861e2e7a75Shuah * From the model, get the maximum number of boards 1871e2e7a75Shuah * supported and set the value accordingly. If the model 1881e2e7a75Shuah * could not be determined or recognized, we assume the max value. 1891e2e7a75Shuah */ 1901e2e7a75Shuah if (opl_cur_model == NULL) 1911e2e7a75Shuah max_boards = OPL_MAX_BOARDS; 1921e2e7a75Shuah else 1931e2e7a75Shuah max_boards = opl_cur_model->model_max_boards; 1941e2e7a75Shuah 1951e2e7a75Shuah /* 1961e2e7a75Shuah * On OPL, cores and MMUs are one-to-one. 1971e2e7a75Shuah */ 1981e2e7a75Shuah max_mmu_ctxdoms = OPL_MAX_CORE_UNITS_PER_BOARD * max_boards; 1991e2e7a75Shuah } 2001e2e7a75Shuah 20125cf1a30Sjl #pragma weak mmu_init_large_pages 20225cf1a30Sjl 20325cf1a30Sjl void 20425cf1a30Sjl set_platform_defaults(void) 20525cf1a30Sjl { 20625cf1a30Sjl extern char *tod_module_name; 20725cf1a30Sjl extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int); 20825cf1a30Sjl extern void mmu_init_large_pages(size_t); 20925cf1a30Sjl 21025cf1a30Sjl /* Set the CPU signature function pointer */ 21125cf1a30Sjl cpu_sgn_func = cpu_sgn_update; 21225cf1a30Sjl 21325cf1a30Sjl /* Set appropriate tod module for OPL platform */ 21425cf1a30Sjl ASSERT(tod_module_name == NULL); 21525cf1a30Sjl tod_module_name = "todopl"; 21625cf1a30Sjl 21725cf1a30Sjl if ((mmu_page_sizes == max_mmu_page_sizes) && 218e12a8a13Ssusans (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) { 21925cf1a30Sjl if (&mmu_init_large_pages) 22025cf1a30Sjl mmu_init_large_pages(mmu_ism_pagesize); 22125cf1a30Sjl } 22225cf1a30Sjl 22325cf1a30Sjl tsb_lgrp_affinity = 1; 2241e2e7a75Shuah 2251e2e7a75Shuah set_max_mmu_ctxdoms(); 22625cf1a30Sjl } 22725cf1a30Sjl 22825cf1a30Sjl /* 22925cf1a30Sjl * Convert logical a board number to a physical one. 23025cf1a30Sjl */ 23125cf1a30Sjl 23225cf1a30Sjl #define LSBPROP "board#" 23325cf1a30Sjl #define PSBPROP "physical-board#" 23425cf1a30Sjl 23525cf1a30Sjl int 23625cf1a30Sjl opl_get_physical_board(int id) 23725cf1a30Sjl { 23825cf1a30Sjl dev_info_t *root_dip, *dip = NULL; 23925cf1a30Sjl char *dname = NULL; 24025cf1a30Sjl int circ; 24125cf1a30Sjl 24225cf1a30Sjl pnode_t pnode; 24325cf1a30Sjl char pname[MAXSYSNAME] = {0}; 24425cf1a30Sjl 24525cf1a30Sjl int lsb_id; /* Logical System Board ID */ 24625cf1a30Sjl int psb_id; /* Physical System Board ID */ 24725cf1a30Sjl 24825cf1a30Sjl 24925cf1a30Sjl /* 25025cf1a30Sjl * This function is called on early stage of bootup when the 25125cf1a30Sjl * kernel device tree is not initialized yet, and also 25225cf1a30Sjl * later on when the device tree is up. We want to try 25325cf1a30Sjl * the fast track first. 25425cf1a30Sjl */ 25525cf1a30Sjl root_dip = ddi_root_node(); 25625cf1a30Sjl if (root_dip) { 25725cf1a30Sjl /* Get from devinfo node */ 25825cf1a30Sjl ndi_devi_enter(root_dip, &circ); 25925cf1a30Sjl for (dip = ddi_get_child(root_dip); dip; 26025cf1a30Sjl dip = ddi_get_next_sibling(dip)) { 26125cf1a30Sjl 26225cf1a30Sjl dname = ddi_node_name(dip); 26325cf1a30Sjl if (strncmp(dname, "pseudo-mc", 9) != 0) 26425cf1a30Sjl continue; 26525cf1a30Sjl 26625cf1a30Sjl if ((lsb_id = (int)ddi_getprop(DDI_DEV_T_ANY, dip, 26725cf1a30Sjl DDI_PROP_DONTPASS, LSBPROP, -1)) == -1) 26825cf1a30Sjl continue; 26925cf1a30Sjl 27025cf1a30Sjl if (id == lsb_id) { 27125cf1a30Sjl if ((psb_id = (int)ddi_getprop(DDI_DEV_T_ANY, 27225cf1a30Sjl dip, DDI_PROP_DONTPASS, PSBPROP, -1)) 27325cf1a30Sjl == -1) { 27425cf1a30Sjl ndi_devi_exit(root_dip, circ); 27525cf1a30Sjl return (-1); 27625cf1a30Sjl } else { 27725cf1a30Sjl ndi_devi_exit(root_dip, circ); 27825cf1a30Sjl return (psb_id); 27925cf1a30Sjl } 28025cf1a30Sjl } 28125cf1a30Sjl } 28225cf1a30Sjl ndi_devi_exit(root_dip, circ); 28325cf1a30Sjl } 28425cf1a30Sjl 28525cf1a30Sjl /* 28625cf1a30Sjl * We do not have the kernel device tree, or we did not 28725cf1a30Sjl * find the node for some reason (let's say the kernel 28825cf1a30Sjl * device tree was modified), let's try the OBP tree. 28925cf1a30Sjl */ 29025cf1a30Sjl pnode = prom_rootnode(); 29125cf1a30Sjl for (pnode = prom_childnode(pnode); pnode; 29225cf1a30Sjl pnode = prom_nextnode(pnode)) { 29325cf1a30Sjl 29425cf1a30Sjl if ((prom_getprop(pnode, "name", (caddr_t)pname) == -1) || 29525cf1a30Sjl (strncmp(pname, "pseudo-mc", 9) != 0)) 29625cf1a30Sjl continue; 29725cf1a30Sjl 29825cf1a30Sjl if (prom_getprop(pnode, LSBPROP, (caddr_t)&lsb_id) == -1) 29925cf1a30Sjl continue; 30025cf1a30Sjl 30125cf1a30Sjl if (id == lsb_id) { 30225cf1a30Sjl if (prom_getprop(pnode, PSBPROP, 30325cf1a30Sjl (caddr_t)&psb_id) == -1) { 30425cf1a30Sjl return (-1); 30525cf1a30Sjl } else { 30625cf1a30Sjl return (psb_id); 30725cf1a30Sjl } 30825cf1a30Sjl } 30925cf1a30Sjl } 31025cf1a30Sjl 31125cf1a30Sjl return (-1); 31225cf1a30Sjl } 31325cf1a30Sjl 31425cf1a30Sjl /* 31525cf1a30Sjl * For OPL it's possible that memory from two or more successive boards 31625cf1a30Sjl * will be contiguous across the boards, and therefore represented as a 31725cf1a30Sjl * single chunk. 31825cf1a30Sjl * This function splits such chunks down the board boundaries. 31925cf1a30Sjl */ 32025cf1a30Sjl static struct memlist * 32125cf1a30Sjl opl_memlist_per_board(struct memlist *ml) 32225cf1a30Sjl { 32325cf1a30Sjl uint64_t ssize, low, high, boundary; 32425cf1a30Sjl struct memlist *head, *tail, *new; 32525cf1a30Sjl 32625cf1a30Sjl ssize = (1ull << OPL_MC_MEMBOARD_SHIFT); 32725cf1a30Sjl 32825cf1a30Sjl head = tail = NULL; 32925cf1a30Sjl 33025cf1a30Sjl for (; ml; ml = ml->next) { 33125cf1a30Sjl low = (uint64_t)ml->address; 33225cf1a30Sjl high = low+(uint64_t)(ml->size); 33325cf1a30Sjl while (low < high) { 33425cf1a30Sjl boundary = roundup(low+1, ssize); 33525cf1a30Sjl boundary = MIN(high, boundary); 33625cf1a30Sjl new = kmem_zalloc(sizeof (struct memlist), KM_SLEEP); 33725cf1a30Sjl new->address = low; 33825cf1a30Sjl new->size = boundary - low; 33925cf1a30Sjl if (head == NULL) 34025cf1a30Sjl head = new; 34125cf1a30Sjl if (tail) { 34225cf1a30Sjl tail->next = new; 34325cf1a30Sjl new->prev = tail; 34425cf1a30Sjl } 34525cf1a30Sjl tail = new; 34625cf1a30Sjl low = boundary; 34725cf1a30Sjl } 34825cf1a30Sjl } 34925cf1a30Sjl return (head); 35025cf1a30Sjl } 35125cf1a30Sjl 35225cf1a30Sjl void 35325cf1a30Sjl set_platform_cage_params(void) 35425cf1a30Sjl { 35525cf1a30Sjl extern pgcnt_t total_pages; 35625cf1a30Sjl extern struct memlist *phys_avail; 35725cf1a30Sjl struct memlist *ml, *tml; 35825cf1a30Sjl 35925cf1a30Sjl if (kernel_cage_enable) { 36025cf1a30Sjl pgcnt_t preferred_cage_size; 36125cf1a30Sjl 362e98fafb9Sjl preferred_cage_size = MAX(opl_startup_cage_size, 363e98fafb9Sjl total_pages / 256); 36425cf1a30Sjl 36525cf1a30Sjl ml = opl_memlist_per_board(phys_avail); 36625cf1a30Sjl 36725cf1a30Sjl /* 36825cf1a30Sjl * Note: we are assuming that post has load the 36925cf1a30Sjl * whole show in to the high end of memory. Having 37025cf1a30Sjl * taken this leap, we copy the whole of phys_avail 37125cf1a30Sjl * the glist and arrange for the cage to grow 37225cf1a30Sjl * downward (descending pfns). 37325cf1a30Sjl */ 37485f58038Sdp kcage_range_init(ml, KCAGE_DOWN, preferred_cage_size); 37525cf1a30Sjl 37625cf1a30Sjl /* free the memlist */ 37725cf1a30Sjl do { 37825cf1a30Sjl tml = ml->next; 37925cf1a30Sjl kmem_free(ml, sizeof (struct memlist)); 38025cf1a30Sjl ml = tml; 38125cf1a30Sjl } while (ml != NULL); 38225cf1a30Sjl } 38325cf1a30Sjl 38425cf1a30Sjl if (kcage_on) 38525cf1a30Sjl cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED"); 38625cf1a30Sjl else 38725cf1a30Sjl cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED"); 38825cf1a30Sjl } 38925cf1a30Sjl 39025cf1a30Sjl /*ARGSUSED*/ 39125cf1a30Sjl int 39225cf1a30Sjl plat_cpu_poweron(struct cpu *cp) 39325cf1a30Sjl { 39425cf1a30Sjl int (*opl_cpu_poweron)(struct cpu *) = NULL; 39525cf1a30Sjl 39625cf1a30Sjl opl_cpu_poweron = 39725cf1a30Sjl (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweron", 0); 39825cf1a30Sjl 39925cf1a30Sjl if (opl_cpu_poweron == NULL) 40025cf1a30Sjl return (ENOTSUP); 40125cf1a30Sjl else 40225cf1a30Sjl return ((opl_cpu_poweron)(cp)); 40325cf1a30Sjl 40425cf1a30Sjl } 40525cf1a30Sjl 40625cf1a30Sjl /*ARGSUSED*/ 40725cf1a30Sjl int 40825cf1a30Sjl plat_cpu_poweroff(struct cpu *cp) 40925cf1a30Sjl { 41025cf1a30Sjl int (*opl_cpu_poweroff)(struct cpu *) = NULL; 41125cf1a30Sjl 41225cf1a30Sjl opl_cpu_poweroff = 41325cf1a30Sjl (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweroff", 0); 41425cf1a30Sjl 41525cf1a30Sjl if (opl_cpu_poweroff == NULL) 41625cf1a30Sjl return (ENOTSUP); 41725cf1a30Sjl else 41825cf1a30Sjl return ((opl_cpu_poweroff)(cp)); 41925cf1a30Sjl 42025cf1a30Sjl } 42125cf1a30Sjl 42225cf1a30Sjl int 42325cf1a30Sjl plat_max_boards(void) 42425cf1a30Sjl { 425*4af09fceSwh /* 426*4af09fceSwh * If the model cannot be determined, default to the max value. 427*4af09fceSwh * Otherwise, Ikkaku model only supports 1 system board. 428*4af09fceSwh */ 429*4af09fceSwh if ((opl_cur_model != NULL) && (opl_cur_model->model_type == IKKAKU)) 430*4af09fceSwh return (OPL_MAX_BOARDS_IKKAKU); 431*4af09fceSwh else 432*4af09fceSwh return (OPL_MAX_BOARDS); 43325cf1a30Sjl } 43425cf1a30Sjl 43525cf1a30Sjl int 43625cf1a30Sjl plat_max_cpu_units_per_board(void) 43725cf1a30Sjl { 43825cf1a30Sjl return (OPL_MAX_CPU_PER_BOARD); 43925cf1a30Sjl } 44025cf1a30Sjl 44125cf1a30Sjl int 44225cf1a30Sjl plat_max_mem_units_per_board(void) 44325cf1a30Sjl { 44425cf1a30Sjl return (OPL_MAX_MEM_UNITS_PER_BOARD); 44525cf1a30Sjl } 44625cf1a30Sjl 44725cf1a30Sjl int 44825cf1a30Sjl plat_max_io_units_per_board(void) 44925cf1a30Sjl { 45025cf1a30Sjl return (OPL_MAX_IO_UNITS_PER_BOARD); 45125cf1a30Sjl } 45225cf1a30Sjl 45325cf1a30Sjl int 45425cf1a30Sjl plat_max_cmp_units_per_board(void) 45525cf1a30Sjl { 45625cf1a30Sjl return (OPL_MAX_CMP_UNITS_PER_BOARD); 45725cf1a30Sjl } 45825cf1a30Sjl 45925cf1a30Sjl int 46025cf1a30Sjl plat_max_core_units_per_board(void) 46125cf1a30Sjl { 46225cf1a30Sjl return (OPL_MAX_CORE_UNITS_PER_BOARD); 46325cf1a30Sjl } 46425cf1a30Sjl 46525cf1a30Sjl int 46625cf1a30Sjl plat_pfn_to_mem_node(pfn_t pfn) 46725cf1a30Sjl { 46825cf1a30Sjl return (pfn >> mem_node_pfn_shift); 46925cf1a30Sjl } 47025cf1a30Sjl 47125cf1a30Sjl /* ARGSUSED */ 47225cf1a30Sjl void 473986fd29aSsetje plat_build_mem_nodes(prom_memlist_t *list, size_t nelems) 47425cf1a30Sjl { 47525cf1a30Sjl size_t elem; 47625cf1a30Sjl pfn_t basepfn; 47725cf1a30Sjl pgcnt_t npgs; 47825cf1a30Sjl uint64_t boundary, ssize; 47925cf1a30Sjl uint64_t low, high; 48025cf1a30Sjl 48125cf1a30Sjl /* 48225cf1a30Sjl * OPL mem slices are always aligned on a 256GB boundary. 48325cf1a30Sjl */ 48425cf1a30Sjl mem_node_pfn_shift = OPL_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT; 48525cf1a30Sjl mem_node_physalign = 0; 48625cf1a30Sjl 48725cf1a30Sjl /* 48825cf1a30Sjl * Boot install lists are arranged <addr, len>, <addr, len>, ... 48925cf1a30Sjl */ 49025cf1a30Sjl ssize = (1ull << OPL_MC_MEMBOARD_SHIFT); 491986fd29aSsetje for (elem = 0; elem < nelems; list++, elem++) { 492986fd29aSsetje low = list->addr; 493986fd29aSsetje high = low + list->size; 49425cf1a30Sjl while (low < high) { 49525cf1a30Sjl boundary = roundup(low+1, ssize); 49625cf1a30Sjl boundary = MIN(high, boundary); 49725cf1a30Sjl basepfn = btop(low); 49825cf1a30Sjl npgs = btop(boundary - low); 49925cf1a30Sjl mem_node_add_slice(basepfn, basepfn + npgs - 1); 50025cf1a30Sjl low = boundary; 50125cf1a30Sjl } 50225cf1a30Sjl } 50325cf1a30Sjl } 50425cf1a30Sjl 50525cf1a30Sjl /* 50625cf1a30Sjl * Find the CPU associated with a slice at boot-time. 50725cf1a30Sjl */ 50825cf1a30Sjl void 50925cf1a30Sjl plat_fill_mc(pnode_t nodeid) 51025cf1a30Sjl { 51125cf1a30Sjl int board; 51225cf1a30Sjl int memnode; 51325cf1a30Sjl struct { 51425cf1a30Sjl uint64_t addr; 51525cf1a30Sjl uint64_t size; 51625cf1a30Sjl } mem_range; 51725cf1a30Sjl 51825cf1a30Sjl if (prom_getprop(nodeid, "board#", (caddr_t)&board) < 0) { 51925cf1a30Sjl panic("Can not find board# property in mc node %x", nodeid); 52025cf1a30Sjl } 52125cf1a30Sjl if (prom_getprop(nodeid, "sb-mem-ranges", (caddr_t)&mem_range) < 0) { 52225cf1a30Sjl panic("Can not find sb-mem-ranges property in mc node %x", 523e98fafb9Sjl nodeid); 52425cf1a30Sjl } 52525cf1a30Sjl memnode = mem_range.addr >> OPL_MC_MEMBOARD_SHIFT; 52625cf1a30Sjl plat_assign_lgrphand_to_mem_node(board, memnode); 52725cf1a30Sjl } 52825cf1a30Sjl 52925cf1a30Sjl /* 53025cf1a30Sjl * Return the platform handle for the lgroup containing the given CPU 53125cf1a30Sjl * 53225cf1a30Sjl * For OPL, lgroup platform handle == board #. 53325cf1a30Sjl */ 53425cf1a30Sjl 53525cf1a30Sjl extern int mpo_disabled; 53625cf1a30Sjl extern lgrp_handle_t lgrp_default_handle; 53725cf1a30Sjl 53825cf1a30Sjl lgrp_handle_t 53925cf1a30Sjl plat_lgrp_cpu_to_hand(processorid_t id) 54025cf1a30Sjl { 54125cf1a30Sjl lgrp_handle_t plathand; 54225cf1a30Sjl 54325cf1a30Sjl /* 54425cf1a30Sjl * Return the real platform handle for the CPU until 54525cf1a30Sjl * such time as we know that MPO should be disabled. 54625cf1a30Sjl * At that point, we set the "mpo_disabled" flag to true, 54725cf1a30Sjl * and from that point on, return the default handle. 54825cf1a30Sjl * 54925cf1a30Sjl * By the time we know that MPO should be disabled, the 55025cf1a30Sjl * first CPU will have already been added to a leaf 55125cf1a30Sjl * lgroup, but that's ok. The common lgroup code will 55225cf1a30Sjl * double check that the boot CPU is in the correct place, 55325cf1a30Sjl * and in the case where mpo should be disabled, will move 55425cf1a30Sjl * it to the root if necessary. 55525cf1a30Sjl */ 55625cf1a30Sjl if (mpo_disabled) { 55725cf1a30Sjl /* If MPO is disabled, return the default (UMA) handle */ 55825cf1a30Sjl plathand = lgrp_default_handle; 55925cf1a30Sjl } else 56025cf1a30Sjl plathand = (lgrp_handle_t)LSB_ID(id); 56125cf1a30Sjl return (plathand); 56225cf1a30Sjl } 56325cf1a30Sjl 56425cf1a30Sjl /* 56525cf1a30Sjl * Platform specific lgroup initialization 56625cf1a30Sjl */ 56725cf1a30Sjl void 56825cf1a30Sjl plat_lgrp_init(void) 56925cf1a30Sjl { 57025cf1a30Sjl extern uint32_t lgrp_expand_proc_thresh; 57125cf1a30Sjl extern uint32_t lgrp_expand_proc_diff; 572cc85acdaSpm const uint_t m = LGRP_LOADAVG_THREAD_MAX; 57325cf1a30Sjl 57425cf1a30Sjl /* 57525cf1a30Sjl * Set tuneables for the OPL architecture 57625cf1a30Sjl * 577cc85acdaSpm * lgrp_expand_proc_thresh is the threshold load on the set of 578cc85acdaSpm * lgroups a process is currently using on before considering 579cc85acdaSpm * adding another lgroup to the set. For Oly-C and Jupiter 580cc85acdaSpm * systems, there are four sockets per lgroup. Setting 581cc85acdaSpm * lgrp_expand_proc_thresh to add lgroups when the load reaches 582cc85acdaSpm * four threads will spread the load when it exceeds one thread 583cc85acdaSpm * per socket, optimizing memory bandwidth and L2 cache space. 58425cf1a30Sjl * 585cc85acdaSpm * lgrp_expand_proc_diff determines how much less another lgroup 586cc85acdaSpm * must be loaded before shifting the start location of a thread 587cc85acdaSpm * to it. 58825cf1a30Sjl * 589cc85acdaSpm * lgrp_loadavg_tolerance is the threshold where two lgroups are 590cc85acdaSpm * considered to have different loads. It is set to be less than 591cc85acdaSpm * 1% so that even a small residual load will be considered different 592cc85acdaSpm * from no residual load. 593cc85acdaSpm * 594cc85acdaSpm * We note loadavg values are not precise. 595cc85acdaSpm * Every 1/10 of a second loadavg values are reduced by 5%. 596cc85acdaSpm * This adjustment can come in the middle of the lgroup selection 597cc85acdaSpm * process, and for larger parallel apps with many threads can 598cc85acdaSpm * frequently occur between the start of the second thread 599cc85acdaSpm * placement and the finish of the last thread placement. 600cc85acdaSpm * We also must be careful to not use too small of a threshold 601cc85acdaSpm * since the cumulative decay for 1 second idle time is 40%. 602cc85acdaSpm * That is, the residual load from completed threads will still 603cc85acdaSpm * be 60% one second after the proc goes idle or 8% after 5 seconds. 604cc85acdaSpm * 605cc85acdaSpm * To allow for lag time in loadavg calculations 606cc85acdaSpm * remote thresh = 3.75 * LGRP_LOADAVG_THREAD_MAX 607cc85acdaSpm * local thresh = 0.75 * LGRP_LOADAVG_THREAD_MAX 608cc85acdaSpm * tolerance = 0.0078 * LGRP_LOADAVG_THREAD_MAX 609cc85acdaSpm * 610cc85acdaSpm * The load placement algorithms consider LGRP_LOADAVG_THREAD_MAX 611cc85acdaSpm * as the equivalent of a load of 1. To make the code more compact, 612cc85acdaSpm * we set m = LGRP_LOADAVG_THREAD_MAX. 61325cf1a30Sjl */ 614cc85acdaSpm lgrp_expand_proc_thresh = (m * 3) + (m >> 1) + (m >> 2); 615cc85acdaSpm lgrp_expand_proc_diff = (m >> 1) + (m >> 2); 616cc85acdaSpm lgrp_loadavg_tolerance = (m >> 7); 61725cf1a30Sjl } 61825cf1a30Sjl 61925cf1a30Sjl /* 62025cf1a30Sjl * Platform notification of lgroup (re)configuration changes 62125cf1a30Sjl */ 62225cf1a30Sjl /*ARGSUSED*/ 62325cf1a30Sjl void 62425cf1a30Sjl plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg) 62525cf1a30Sjl { 62625cf1a30Sjl update_membounds_t *umb; 62725cf1a30Sjl lgrp_config_mem_rename_t lmr; 62825cf1a30Sjl int sbd, tbd; 62925cf1a30Sjl lgrp_handle_t hand, shand, thand; 63025cf1a30Sjl int mnode, snode, tnode; 63125cf1a30Sjl pfn_t start, end; 63225cf1a30Sjl 63325cf1a30Sjl if (mpo_disabled) 63425cf1a30Sjl return; 63525cf1a30Sjl 63625cf1a30Sjl switch (evt) { 63725cf1a30Sjl 63825cf1a30Sjl case LGRP_CONFIG_MEM_ADD: 63925cf1a30Sjl /* 64025cf1a30Sjl * Establish the lgroup handle to memnode translation. 64125cf1a30Sjl */ 64225cf1a30Sjl umb = (update_membounds_t *)arg; 64325cf1a30Sjl 64425cf1a30Sjl hand = umb->u_board; 64525cf1a30Sjl mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT); 64625cf1a30Sjl plat_assign_lgrphand_to_mem_node(hand, mnode); 64725cf1a30Sjl 64825cf1a30Sjl break; 64925cf1a30Sjl 65025cf1a30Sjl case LGRP_CONFIG_MEM_DEL: 65125cf1a30Sjl /* 65225cf1a30Sjl * Special handling for possible memory holes. 65325cf1a30Sjl */ 65425cf1a30Sjl umb = (update_membounds_t *)arg; 65525cf1a30Sjl hand = umb->u_board; 65625cf1a30Sjl if ((mnode = plat_lgrphand_to_mem_node(hand)) != -1) { 65725cf1a30Sjl if (mem_node_config[mnode].exists) { 65825cf1a30Sjl start = mem_node_config[mnode].physbase; 65925cf1a30Sjl end = mem_node_config[mnode].physmax; 66025cf1a30Sjl mem_node_pre_del_slice(start, end); 66125cf1a30Sjl mem_node_post_del_slice(start, end, 0); 66225cf1a30Sjl } 66325cf1a30Sjl } 66425cf1a30Sjl 66525cf1a30Sjl break; 66625cf1a30Sjl 66725cf1a30Sjl case LGRP_CONFIG_MEM_RENAME: 66825cf1a30Sjl /* 66925cf1a30Sjl * During a DR copy-rename operation, all of the memory 67025cf1a30Sjl * on one board is moved to another board -- but the 67125cf1a30Sjl * addresses/pfns and memnodes don't change. This means 67225cf1a30Sjl * the memory has changed locations without changing identity. 67325cf1a30Sjl * 67425cf1a30Sjl * Source is where we are copying from and target is where we 67525cf1a30Sjl * are copying to. After source memnode is copied to target 67625cf1a30Sjl * memnode, the physical addresses of the target memnode are 67725cf1a30Sjl * renamed to match what the source memnode had. Then target 67825cf1a30Sjl * memnode can be removed and source memnode can take its 67925cf1a30Sjl * place. 68025cf1a30Sjl * 68125cf1a30Sjl * To do this, swap the lgroup handle to memnode mappings for 68225cf1a30Sjl * the boards, so target lgroup will have source memnode and 68325cf1a30Sjl * source lgroup will have empty target memnode which is where 68425cf1a30Sjl * its memory will go (if any is added to it later). 68525cf1a30Sjl * 68625cf1a30Sjl * Then source memnode needs to be removed from its lgroup 68725cf1a30Sjl * and added to the target lgroup where the memory was living 68825cf1a30Sjl * but under a different name/memnode. The memory was in the 68925cf1a30Sjl * target memnode and now lives in the source memnode with 69025cf1a30Sjl * different physical addresses even though it is the same 69125cf1a30Sjl * memory. 69225cf1a30Sjl */ 69325cf1a30Sjl sbd = arg & 0xffff; 69425cf1a30Sjl tbd = (arg & 0xffff0000) >> 16; 69525cf1a30Sjl shand = sbd; 69625cf1a30Sjl thand = tbd; 69725cf1a30Sjl snode = plat_lgrphand_to_mem_node(shand); 69825cf1a30Sjl tnode = plat_lgrphand_to_mem_node(thand); 69925cf1a30Sjl 70025cf1a30Sjl /* 70125cf1a30Sjl * Special handling for possible memory holes. 70225cf1a30Sjl */ 70325cf1a30Sjl if (tnode != -1 && mem_node_config[tnode].exists) { 70468ac2337Sjl start = mem_node_config[tnode].physbase; 70568ac2337Sjl end = mem_node_config[tnode].physmax; 70625cf1a30Sjl mem_node_pre_del_slice(start, end); 70725cf1a30Sjl mem_node_post_del_slice(start, end, 0); 70825cf1a30Sjl } 70925cf1a30Sjl 71025cf1a30Sjl plat_assign_lgrphand_to_mem_node(thand, snode); 71125cf1a30Sjl plat_assign_lgrphand_to_mem_node(shand, tnode); 71225cf1a30Sjl 71325cf1a30Sjl lmr.lmem_rename_from = shand; 71425cf1a30Sjl lmr.lmem_rename_to = thand; 71525cf1a30Sjl 71625cf1a30Sjl /* 71725cf1a30Sjl * Remove source memnode of copy rename from its lgroup 71825cf1a30Sjl * and add it to its new target lgroup 71925cf1a30Sjl */ 72025cf1a30Sjl lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode, 72125cf1a30Sjl (uintptr_t)&lmr); 72225cf1a30Sjl 72325cf1a30Sjl break; 72425cf1a30Sjl 72525cf1a30Sjl default: 72625cf1a30Sjl break; 72725cf1a30Sjl } 72825cf1a30Sjl } 72925cf1a30Sjl 73025cf1a30Sjl /* 73125cf1a30Sjl * Return latency between "from" and "to" lgroups 73225cf1a30Sjl * 73325cf1a30Sjl * This latency number can only be used for relative comparison 73425cf1a30Sjl * between lgroups on the running system, cannot be used across platforms, 73525cf1a30Sjl * and may not reflect the actual latency. It is platform and implementation 73625cf1a30Sjl * specific, so platform gets to decide its value. It would be nice if the 73725cf1a30Sjl * number was at least proportional to make comparisons more meaningful though. 73825cf1a30Sjl * NOTE: The numbers below are supposed to be load latencies for uncached 73925cf1a30Sjl * memory divided by 10. 74025cf1a30Sjl * 74125cf1a30Sjl */ 74225cf1a30Sjl int 74325cf1a30Sjl plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to) 74425cf1a30Sjl { 74525cf1a30Sjl /* 74625cf1a30Sjl * Return min remote latency when there are more than two lgroups 74725cf1a30Sjl * (root and child) and getting latency between two different lgroups 74825cf1a30Sjl * or root is involved 74925cf1a30Sjl */ 75025cf1a30Sjl if (lgrp_optimizations() && (from != to || 75125cf1a30Sjl from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE)) 75271b3c2ffShyw return (42); 75325cf1a30Sjl else 75471b3c2ffShyw return (35); 75525cf1a30Sjl } 75625cf1a30Sjl 75725cf1a30Sjl /* 75825cf1a30Sjl * Return platform handle for root lgroup 75925cf1a30Sjl */ 76025cf1a30Sjl lgrp_handle_t 76125cf1a30Sjl plat_lgrp_root_hand(void) 76225cf1a30Sjl { 76325cf1a30Sjl if (mpo_disabled) 76425cf1a30Sjl return (lgrp_default_handle); 76525cf1a30Sjl 76625cf1a30Sjl return (LGRP_DEFAULT_HANDLE); 76725cf1a30Sjl } 76825cf1a30Sjl 76925cf1a30Sjl /*ARGSUSED*/ 77025cf1a30Sjl void 77125cf1a30Sjl plat_freelist_process(int mnode) 77225cf1a30Sjl { 77325cf1a30Sjl } 77425cf1a30Sjl 77525cf1a30Sjl void 77625cf1a30Sjl load_platform_drivers(void) 77725cf1a30Sjl { 77825cf1a30Sjl (void) i_ddi_attach_pseudo_node("dr"); 77925cf1a30Sjl } 78025cf1a30Sjl 78125cf1a30Sjl /* 78225cf1a30Sjl * No platform drivers on this platform 78325cf1a30Sjl */ 78425cf1a30Sjl char *platform_module_list[] = { 78525cf1a30Sjl (char *)0 78625cf1a30Sjl }; 78725cf1a30Sjl 78825cf1a30Sjl /*ARGSUSED*/ 78925cf1a30Sjl void 79025cf1a30Sjl plat_tod_fault(enum tod_fault_type tod_bad) 79125cf1a30Sjl { 79225cf1a30Sjl } 79325cf1a30Sjl 79425cf1a30Sjl /*ARGSUSED*/ 79525cf1a30Sjl void 79625cf1a30Sjl cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid) 79725cf1a30Sjl { 79825cf1a30Sjl static void (*scf_panic_callback)(int); 79925cf1a30Sjl static void (*scf_shutdown_callback)(int); 80025cf1a30Sjl 80125cf1a30Sjl /* 80225cf1a30Sjl * This is for notifing system panic/shutdown to SCF. 80325cf1a30Sjl * In case of shutdown and panic, SCF call back 80425cf1a30Sjl * function should be called. 80525cf1a30Sjl * <SCF call back functions> 80625cf1a30Sjl * scf_panic_callb() : panicsys()->panic_quiesce_hw() 80725cf1a30Sjl * scf_shutdown_callb(): halt() or power_down() or reboot_machine() 80825cf1a30Sjl * cpuid should be -1 and state should be SIGST_EXIT. 80925cf1a30Sjl */ 81025cf1a30Sjl if (state == SIGST_EXIT && cpuid == -1) { 81125cf1a30Sjl 81225cf1a30Sjl /* 81325cf1a30Sjl * find the symbol for the SCF panic callback routine in driver 81425cf1a30Sjl */ 81525cf1a30Sjl if (scf_panic_callback == NULL) 81625cf1a30Sjl scf_panic_callback = (void (*)(int)) 817e98fafb9Sjl modgetsymvalue("scf_panic_callb", 0); 81825cf1a30Sjl if (scf_shutdown_callback == NULL) 81925cf1a30Sjl scf_shutdown_callback = (void (*)(int)) 820e98fafb9Sjl modgetsymvalue("scf_shutdown_callb", 0); 82125cf1a30Sjl 82225cf1a30Sjl switch (sub_state) { 82325cf1a30Sjl case SIGSUBST_PANIC: 82425cf1a30Sjl if (scf_panic_callback == NULL) { 82525cf1a30Sjl cmn_err(CE_NOTE, "!cpu_sgn_update: " 82625cf1a30Sjl "scf_panic_callb not found\n"); 82725cf1a30Sjl return; 82825cf1a30Sjl } 82925cf1a30Sjl scf_panic_callback(SIGSUBST_PANIC); 83025cf1a30Sjl break; 83125cf1a30Sjl 83225cf1a30Sjl case SIGSUBST_HALT: 83325cf1a30Sjl if (scf_shutdown_callback == NULL) { 83425cf1a30Sjl cmn_err(CE_NOTE, "!cpu_sgn_update: " 83525cf1a30Sjl "scf_shutdown_callb not found\n"); 83625cf1a30Sjl return; 83725cf1a30Sjl } 83825cf1a30Sjl scf_shutdown_callback(SIGSUBST_HALT); 83925cf1a30Sjl break; 84025cf1a30Sjl 84125cf1a30Sjl case SIGSUBST_ENVIRON: 84225cf1a30Sjl if (scf_shutdown_callback == NULL) { 84325cf1a30Sjl cmn_err(CE_NOTE, "!cpu_sgn_update: " 84425cf1a30Sjl "scf_shutdown_callb not found\n"); 84525cf1a30Sjl return; 84625cf1a30Sjl } 84725cf1a30Sjl scf_shutdown_callback(SIGSUBST_ENVIRON); 84825cf1a30Sjl break; 84925cf1a30Sjl 85025cf1a30Sjl case SIGSUBST_REBOOT: 85125cf1a30Sjl if (scf_shutdown_callback == NULL) { 85225cf1a30Sjl cmn_err(CE_NOTE, "!cpu_sgn_update: " 85325cf1a30Sjl "scf_shutdown_callb not found\n"); 85425cf1a30Sjl return; 85525cf1a30Sjl } 85625cf1a30Sjl scf_shutdown_callback(SIGSUBST_REBOOT); 85725cf1a30Sjl break; 85825cf1a30Sjl } 85925cf1a30Sjl } 86025cf1a30Sjl } 86125cf1a30Sjl 86225cf1a30Sjl /*ARGSUSED*/ 86325cf1a30Sjl int 86425cf1a30Sjl plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id, 86525cf1a30Sjl int flt_in_memory, ushort_t flt_status, 86625cf1a30Sjl char *buf, int buflen, int *lenp) 86725cf1a30Sjl { 86825cf1a30Sjl /* 86925cf1a30Sjl * check if it's a Memory error. 87025cf1a30Sjl */ 87125cf1a30Sjl if (flt_in_memory) { 87225cf1a30Sjl if (opl_get_mem_unum != NULL) { 873e98fafb9Sjl return (opl_get_mem_unum(synd_code, flt_addr, buf, 874e98fafb9Sjl buflen, lenp)); 87525cf1a30Sjl } else { 87625cf1a30Sjl return (ENOTSUP); 87725cf1a30Sjl } 87825cf1a30Sjl } else { 87925cf1a30Sjl return (ENOTSUP); 88025cf1a30Sjl } 88125cf1a30Sjl } 88225cf1a30Sjl 88325cf1a30Sjl /*ARGSUSED*/ 88425cf1a30Sjl int 88525cf1a30Sjl plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp) 88625cf1a30Sjl { 8870cc8ae86Sav int ret = 0; 8883f1fa9a7Sjfrank int sb; 889195196c6Ssubhan int plen; 89025cf1a30Sjl 89125cf1a30Sjl sb = opl_get_physical_board(LSB_ID(cpuid)); 89225cf1a30Sjl if (sb == -1) { 89325cf1a30Sjl return (ENXIO); 89425cf1a30Sjl } 89525cf1a30Sjl 89672b9fce9Ssubhan /* 89772b9fce9Ssubhan * opl_cur_model is assigned here 89872b9fce9Ssubhan */ 89972b9fce9Ssubhan if (opl_cur_model == NULL) { 90072b9fce9Ssubhan set_model_info(); 9019b71d8e9Swh 9029b71d8e9Swh /* 9039b71d8e9Swh * if not matched, return 9049b71d8e9Swh */ 9059b71d8e9Swh if (opl_cur_model == NULL) 9069b71d8e9Swh return (ENODEV); 90772b9fce9Ssubhan } 90872b9fce9Ssubhan 909195196c6Ssubhan ASSERT((opl_cur_model - opl_models) == (opl_cur_model->model_type)); 910195196c6Ssubhan 911195196c6Ssubhan switch (opl_cur_model->model_type) { 912195196c6Ssubhan case FF1: 9130cc8ae86Sav plen = snprintf(buf, buflen, "/%s/CPUM%d", "MBU_A", 9140cc8ae86Sav CHIP_ID(cpuid) / 2); 9150cc8ae86Sav break; 9160cc8ae86Sav 917195196c6Ssubhan case FF2: 9180cc8ae86Sav plen = snprintf(buf, buflen, "/%s/CPUM%d", "MBU_B", 91911114147Sav (CHIP_ID(cpuid) / 2) + (sb * 2)); 9200cc8ae86Sav break; 9210cc8ae86Sav 922195196c6Ssubhan case DC1: 923195196c6Ssubhan case DC2: 924195196c6Ssubhan case DC3: 9250cc8ae86Sav plen = snprintf(buf, buflen, "/%s%02d/CPUM%d", "CMU", sb, 9260cc8ae86Sav CHIP_ID(cpuid)); 9270cc8ae86Sav break; 9280cc8ae86Sav 92978ed97a7Sjl case IKKAKU: 93078ed97a7Sjl plen = snprintf(buf, buflen, "/%s", "MBU_A"); 93178ed97a7Sjl break; 93278ed97a7Sjl 9330cc8ae86Sav default: 9340cc8ae86Sav /* This should never happen */ 9350cc8ae86Sav return (ENODEV); 9360cc8ae86Sav } 9370cc8ae86Sav 9380cc8ae86Sav if (plen >= buflen) { 9390cc8ae86Sav ret = ENOSPC; 94025cf1a30Sjl } else { 94125cf1a30Sjl if (lenp) 94225cf1a30Sjl *lenp = strlen(buf); 94325cf1a30Sjl } 9440cc8ae86Sav return (ret); 94525cf1a30Sjl } 94625cf1a30Sjl 94725cf1a30Sjl void 94825cf1a30Sjl plat_nodename_set(void) 94925cf1a30Sjl { 9503f1fa9a7Sjfrank post_xscf_msg((char *)&utsname, sizeof (struct utsname)); 95125cf1a30Sjl } 95225cf1a30Sjl 95325cf1a30Sjl caddr_t efcode_vaddr = NULL; 95425cf1a30Sjl 95525cf1a30Sjl /* 95625cf1a30Sjl * Preallocate enough memory for fcode claims. 95725cf1a30Sjl */ 95825cf1a30Sjl 95925cf1a30Sjl caddr_t 96025cf1a30Sjl efcode_alloc(caddr_t alloc_base) 96125cf1a30Sjl { 96225cf1a30Sjl caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base, 96325cf1a30Sjl MMU_PAGESIZE); 96425cf1a30Sjl caddr_t vaddr; 96525cf1a30Sjl 96625cf1a30Sjl /* 96725cf1a30Sjl * allocate the physical memory for the Oberon fcode. 96825cf1a30Sjl */ 96925cf1a30Sjl if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base, 97025cf1a30Sjl efcode_size, MMU_PAGESIZE)) == NULL) 97125cf1a30Sjl cmn_err(CE_PANIC, "Cannot allocate Efcode Memory"); 97225cf1a30Sjl 97325cf1a30Sjl efcode_vaddr = vaddr; 97425cf1a30Sjl 97525cf1a30Sjl return (efcode_alloc_base + efcode_size); 97625cf1a30Sjl } 97725cf1a30Sjl 97825cf1a30Sjl caddr_t 97925cf1a30Sjl plat_startup_memlist(caddr_t alloc_base) 98025cf1a30Sjl { 98125cf1a30Sjl caddr_t tmp_alloc_base; 98225cf1a30Sjl 98325cf1a30Sjl tmp_alloc_base = efcode_alloc(alloc_base); 98425cf1a30Sjl tmp_alloc_base = 98525cf1a30Sjl (caddr_t)roundup((uintptr_t)tmp_alloc_base, ecache_alignsize); 98625cf1a30Sjl return (tmp_alloc_base); 98725cf1a30Sjl } 98825cf1a30Sjl 989575a7426Spt /* need to forward declare these */ 990575a7426Spt static void plat_lock_delay(uint_t); 991575a7426Spt 99225cf1a30Sjl void 99325cf1a30Sjl startup_platform(void) 99425cf1a30Sjl { 9952850d85bSmv if (clock_tick_threshold == 0) 9962850d85bSmv clock_tick_threshold = OPL_CLOCK_TICK_THRESHOLD; 9972850d85bSmv if (clock_tick_ncpus == 0) 9982850d85bSmv clock_tick_ncpus = OPL_CLOCK_TICK_NCPUS; 999575a7426Spt mutex_lock_delay = plat_lock_delay; 1000575a7426Spt mutex_cap_factor = OPL_BOFF_MAX_SCALE; 100125cf1a30Sjl } 10020cc8ae86Sav 100304938e8bSjfrank static uint_t 100404938e8bSjfrank get_mmu_id(processorid_t cpuid) 100504938e8bSjfrank { 100604938e8bSjfrank int pb = opl_get_physical_board(LSB_ID(cpuid)); 100704938e8bSjfrank 100804938e8bSjfrank if (pb == -1) { 100904938e8bSjfrank cmn_err(CE_PANIC, 101004938e8bSjfrank "opl_get_physical_board failed (cpu %d LSB %u)", 101104938e8bSjfrank cpuid, LSB_ID(cpuid)); 101204938e8bSjfrank } 101304938e8bSjfrank return (pb * OPL_MAX_COREID_PER_BOARD) + (CHIP_ID(cpuid) * 101404938e8bSjfrank OPL_MAX_COREID_PER_CMP) + CORE_ID(cpuid); 101504938e8bSjfrank } 101604938e8bSjfrank 10171e2e7a75Shuah void 10181e2e7a75Shuah plat_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *info) 10191e2e7a75Shuah { 10201e2e7a75Shuah int impl; 10211e2e7a75Shuah 10221e2e7a75Shuah impl = cpunodes[cpuid].implementation; 1023e98fafb9Sjl if (IS_OLYMPUS_C(impl) || IS_JUPITER(impl)) { 102404938e8bSjfrank info->mmu_idx = get_mmu_id(cpuid); 10251e2e7a75Shuah info->mmu_nctxs = 8192; 10261e2e7a75Shuah } else { 10271e2e7a75Shuah cmn_err(CE_PANIC, "Unknown processor %d", impl); 10281e2e7a75Shuah } 10291e2e7a75Shuah } 10301e2e7a75Shuah 10310cc8ae86Sav int 10320cc8ae86Sav plat_get_mem_sid(char *unum, char *buf, int buflen, int *lenp) 10330cc8ae86Sav { 10340cc8ae86Sav if (opl_get_mem_sid == NULL) { 10350cc8ae86Sav return (ENOTSUP); 10360cc8ae86Sav } 10370cc8ae86Sav return (opl_get_mem_sid(unum, buf, buflen, lenp)); 10380cc8ae86Sav } 10390cc8ae86Sav 10400cc8ae86Sav int 10410cc8ae86Sav plat_get_mem_offset(uint64_t paddr, uint64_t *offp) 10420cc8ae86Sav { 10430cc8ae86Sav if (opl_get_mem_offset == NULL) { 10440cc8ae86Sav return (ENOTSUP); 10450cc8ae86Sav } 10460cc8ae86Sav return (opl_get_mem_offset(paddr, offp)); 10470cc8ae86Sav } 10480cc8ae86Sav 10490cc8ae86Sav int 10500cc8ae86Sav plat_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp) 10510cc8ae86Sav { 10520cc8ae86Sav if (opl_get_mem_addr == NULL) { 10530cc8ae86Sav return (ENOTSUP); 10540cc8ae86Sav } 10550cc8ae86Sav return (opl_get_mem_addr(unum, sid, offset, addrp)); 10560cc8ae86Sav } 1057e603b7d4Spm 1058e603b7d4Spm void 1059575a7426Spt plat_lock_delay(uint_t backoff) 1060e603b7d4Spm { 1061e603b7d4Spm int i; 1062575a7426Spt uint_t cnt, remcnt; 1063e603b7d4Spm int ctr; 1064575a7426Spt hrtime_t delay_start, rem_delay; 1065e603b7d4Spm /* 1066e603b7d4Spm * Platform specific lock delay code for OPL 1067e603b7d4Spm * 1068e603b7d4Spm * Using staged linear increases in the delay. 1069e603b7d4Spm * The sleep instruction is the preferred method of delay, 1070e603b7d4Spm * but is too large of granularity for the initial backoff. 1071e603b7d4Spm */ 1072e603b7d4Spm 1073575a7426Spt if (backoff < 100) { 1074e603b7d4Spm /* 1075e603b7d4Spm * If desired backoff is long enough, 1076e603b7d4Spm * use sleep for most of it 1077e603b7d4Spm */ 1078575a7426Spt for (cnt = backoff; 1079575a7426Spt cnt >= OPL_BOFF_SLEEP; 1080e98fafb9Sjl cnt -= OPL_BOFF_SLEEP) { 1081e603b7d4Spm cpu_smt_pause(); 1082e603b7d4Spm } 1083e603b7d4Spm /* 1084e603b7d4Spm * spin for small remainder of backoff 1085e603b7d4Spm */ 1086e603b7d4Spm for (ctr = cnt * OPL_BOFF_SPIN; ctr; ctr--) { 1087575a7426Spt mutex_delay_default(); 1088e603b7d4Spm } 1089e603b7d4Spm } else { 1090575a7426Spt /* backoff is large. Fill it by sleeping */ 1091fb81f553Sck delay_start = gethrtime_waitfree(); 1092575a7426Spt cnt = backoff / OPL_BOFF_SLEEP; 1093e603b7d4Spm /* 1094e603b7d4Spm * use sleep instructions for delay 1095e603b7d4Spm */ 1096e603b7d4Spm for (i = 0; i < cnt; i++) { 1097e603b7d4Spm cpu_smt_pause(); 1098e603b7d4Spm } 1099e603b7d4Spm 1100e603b7d4Spm /* 1101e603b7d4Spm * Note: if the other strand executes a sleep instruction, 1102e603b7d4Spm * then the sleep ends immediately with a minimum time of 1103e603b7d4Spm * 42 clocks. We check gethrtime to insure we have 1104e603b7d4Spm * waited long enough. And we include both a short 1105575a7426Spt * spin loop and a sleep for repeated delay times. 1106e603b7d4Spm */ 1107e603b7d4Spm 1108fb81f553Sck rem_delay = gethrtime_waitfree() - delay_start; 1109575a7426Spt while (rem_delay < cnt * OPL_BOFF_TM) { 1110575a7426Spt remcnt = cnt - (rem_delay / OPL_BOFF_TM); 1111575a7426Spt for (i = 0; i < remcnt; i++) { 1112575a7426Spt cpu_smt_pause(); 1113575a7426Spt for (ctr = OPL_BOFF_SPIN; ctr; ctr--) { 1114575a7426Spt mutex_delay_default(); 1115575a7426Spt } 1116e603b7d4Spm } 1117fb81f553Sck rem_delay = gethrtime_waitfree() - delay_start; 1118e603b7d4Spm } 1119e603b7d4Spm } 1120e603b7d4Spm } 11213f1fa9a7Sjfrank 11223f1fa9a7Sjfrank /* 11233f1fa9a7Sjfrank * The following code implements asynchronous call to XSCF to setup the 11243f1fa9a7Sjfrank * domain node name. 11253f1fa9a7Sjfrank */ 11263f1fa9a7Sjfrank 11273f1fa9a7Sjfrank #define FREE_MSG(m) kmem_free((m), NM_LEN((m)->len)) 11283f1fa9a7Sjfrank 11293f1fa9a7Sjfrank /* 11303f1fa9a7Sjfrank * The following three macros define the all operations on the request 11313f1fa9a7Sjfrank * list we are using here, and hide the details of the list 11323f1fa9a7Sjfrank * implementation from the code. 11333f1fa9a7Sjfrank */ 11343f1fa9a7Sjfrank #define PUSH(m) \ 11353f1fa9a7Sjfrank { \ 11363f1fa9a7Sjfrank (m)->next = ctl_msg.head; \ 11373f1fa9a7Sjfrank (m)->prev = NULL; \ 11383f1fa9a7Sjfrank if ((m)->next != NULL) \ 11393f1fa9a7Sjfrank (m)->next->prev = (m); \ 11403f1fa9a7Sjfrank ctl_msg.head = (m); \ 11413f1fa9a7Sjfrank } 11423f1fa9a7Sjfrank 11433f1fa9a7Sjfrank #define REMOVE(m) \ 11443f1fa9a7Sjfrank { \ 11453f1fa9a7Sjfrank if ((m)->prev != NULL) \ 11463f1fa9a7Sjfrank (m)->prev->next = (m)->next; \ 11473f1fa9a7Sjfrank else \ 11483f1fa9a7Sjfrank ctl_msg.head = (m)->next; \ 11493f1fa9a7Sjfrank if ((m)->next != NULL) \ 11503f1fa9a7Sjfrank (m)->next->prev = (m)->prev; \ 11513f1fa9a7Sjfrank } 11523f1fa9a7Sjfrank 11533f1fa9a7Sjfrank #define FREE_THE_TAIL(head) \ 11543f1fa9a7Sjfrank { \ 11553f1fa9a7Sjfrank nm_msg_t *n_msg, *m; \ 11563f1fa9a7Sjfrank m = (head)->next; \ 11573f1fa9a7Sjfrank (head)->next = NULL; \ 11583f1fa9a7Sjfrank while (m != NULL) { \ 11593f1fa9a7Sjfrank n_msg = m->next; \ 11603f1fa9a7Sjfrank FREE_MSG(m); \ 11613f1fa9a7Sjfrank m = n_msg; \ 11623f1fa9a7Sjfrank } \ 11633f1fa9a7Sjfrank } 11643f1fa9a7Sjfrank 11653f1fa9a7Sjfrank #define SCF_PUTINFO(f, s, p) \ 11663f1fa9a7Sjfrank f(KEY_ESCF, 0x01, 0, s, p) 11673f1fa9a7Sjfrank 11683f1fa9a7Sjfrank #define PASS2XSCF(m, r) ((r = SCF_PUTINFO(ctl_msg.scf_service_function, \ 11693f1fa9a7Sjfrank (m)->len, (m)->data)) == 0) 11703f1fa9a7Sjfrank 11713f1fa9a7Sjfrank /* 11723f1fa9a7Sjfrank * The value of the following macro loosely depends on the 11733f1fa9a7Sjfrank * value of the "device busy" timeout used in the SCF driver. 11743f1fa9a7Sjfrank * (See pass2xscf_thread()). 11753f1fa9a7Sjfrank */ 11763f1fa9a7Sjfrank #define SCF_DEVBUSY_DELAY 10 11773f1fa9a7Sjfrank 11783f1fa9a7Sjfrank /* 11793f1fa9a7Sjfrank * The default number of attempts to contact the scf driver 11803f1fa9a7Sjfrank * if we cannot fetch any information about the timeout value 11813f1fa9a7Sjfrank * it uses. 11823f1fa9a7Sjfrank */ 11833f1fa9a7Sjfrank 11843f1fa9a7Sjfrank #define REPEATS 4 11853f1fa9a7Sjfrank 11863f1fa9a7Sjfrank typedef struct nm_msg { 11873f1fa9a7Sjfrank struct nm_msg *next; 11883f1fa9a7Sjfrank struct nm_msg *prev; 11893f1fa9a7Sjfrank int len; 11903f1fa9a7Sjfrank char data[1]; 11913f1fa9a7Sjfrank } nm_msg_t; 11923f1fa9a7Sjfrank 11933f1fa9a7Sjfrank #define NM_LEN(len) (sizeof (nm_msg_t) + (len) - 1) 11943f1fa9a7Sjfrank 11953f1fa9a7Sjfrank static struct ctlmsg { 11963f1fa9a7Sjfrank nm_msg_t *head; 11973f1fa9a7Sjfrank nm_msg_t *now_serving; 11983f1fa9a7Sjfrank kmutex_t nm_lock; 11993f1fa9a7Sjfrank kthread_t *nmt; 12003f1fa9a7Sjfrank int cnt; 12013f1fa9a7Sjfrank int (*scf_service_function)(uint32_t, uint8_t, 12023f1fa9a7Sjfrank uint32_t, uint32_t, void *); 12033f1fa9a7Sjfrank } ctl_msg; 12043f1fa9a7Sjfrank 12053f1fa9a7Sjfrank static void 12063f1fa9a7Sjfrank post_xscf_msg(char *dp, int len) 12073f1fa9a7Sjfrank { 12083f1fa9a7Sjfrank nm_msg_t *msg; 12093f1fa9a7Sjfrank 12103f1fa9a7Sjfrank msg = (nm_msg_t *)kmem_zalloc(NM_LEN(len), KM_SLEEP); 12113f1fa9a7Sjfrank 12123f1fa9a7Sjfrank bcopy(dp, msg->data, len); 12133f1fa9a7Sjfrank msg->len = len; 12143f1fa9a7Sjfrank 12153f1fa9a7Sjfrank mutex_enter(&ctl_msg.nm_lock); 12163f1fa9a7Sjfrank if (ctl_msg.nmt == NULL) { 12173f1fa9a7Sjfrank ctl_msg.nmt = thread_create(NULL, 0, pass2xscf_thread, 12183f1fa9a7Sjfrank NULL, 0, &p0, TS_RUN, minclsyspri); 12193f1fa9a7Sjfrank } 12203f1fa9a7Sjfrank 12213f1fa9a7Sjfrank PUSH(msg); 12223f1fa9a7Sjfrank ctl_msg.cnt++; 12233f1fa9a7Sjfrank mutex_exit(&ctl_msg.nm_lock); 12243f1fa9a7Sjfrank } 12253f1fa9a7Sjfrank 12263f1fa9a7Sjfrank static void 12273f1fa9a7Sjfrank pass2xscf_thread() 12283f1fa9a7Sjfrank { 12293f1fa9a7Sjfrank nm_msg_t *msg; 12303f1fa9a7Sjfrank int ret; 12313f1fa9a7Sjfrank uint_t i, msg_sent, xscf_driver_delay; 12323f1fa9a7Sjfrank static uint_t repeat_cnt; 12333f1fa9a7Sjfrank uint_t *scf_wait_cnt; 12343f1fa9a7Sjfrank 12353f1fa9a7Sjfrank mutex_enter(&ctl_msg.nm_lock); 12363f1fa9a7Sjfrank 12373f1fa9a7Sjfrank /* 12383f1fa9a7Sjfrank * Find the address of the SCF put routine if it's not done yet. 12393f1fa9a7Sjfrank */ 12403f1fa9a7Sjfrank if (ctl_msg.scf_service_function == NULL) { 12413f1fa9a7Sjfrank if ((ctl_msg.scf_service_function = 12423f1fa9a7Sjfrank (int (*)(uint32_t, uint8_t, uint32_t, uint32_t, void *)) 12433f1fa9a7Sjfrank modgetsymvalue("scf_service_putinfo", 0)) == NULL) { 12443f1fa9a7Sjfrank cmn_err(CE_NOTE, "pass2xscf_thread: " 12453f1fa9a7Sjfrank "scf_service_putinfo not found\n"); 12463f1fa9a7Sjfrank ctl_msg.nmt = NULL; 12473f1fa9a7Sjfrank mutex_exit(&ctl_msg.nm_lock); 12483f1fa9a7Sjfrank return; 12493f1fa9a7Sjfrank } 12503f1fa9a7Sjfrank } 12513f1fa9a7Sjfrank 12523f1fa9a7Sjfrank /* 12533f1fa9a7Sjfrank * Calculate the number of attempts to connect XSCF based on the 12543f1fa9a7Sjfrank * scf driver delay (which is 12553f1fa9a7Sjfrank * SCF_DEVBUSY_DELAY*scf_online_wait_rcnt seconds) and the value 12563f1fa9a7Sjfrank * of xscf_connect_delay (the total number of seconds to wait 12573f1fa9a7Sjfrank * till xscf get ready.) 12583f1fa9a7Sjfrank */ 12593f1fa9a7Sjfrank if (repeat_cnt == 0) { 12603f1fa9a7Sjfrank if ((scf_wait_cnt = 12613f1fa9a7Sjfrank (uint_t *) 12623f1fa9a7Sjfrank modgetsymvalue("scf_online_wait_rcnt", 0)) == NULL) { 12633f1fa9a7Sjfrank repeat_cnt = REPEATS; 12643f1fa9a7Sjfrank } else { 12653f1fa9a7Sjfrank 12663f1fa9a7Sjfrank xscf_driver_delay = *scf_wait_cnt * 12673f1fa9a7Sjfrank SCF_DEVBUSY_DELAY; 12683f1fa9a7Sjfrank repeat_cnt = (xscf_connect_delay/xscf_driver_delay) + 1; 12693f1fa9a7Sjfrank } 12703f1fa9a7Sjfrank } 12713f1fa9a7Sjfrank 12723f1fa9a7Sjfrank while (ctl_msg.cnt != 0) { 12733f1fa9a7Sjfrank 12743f1fa9a7Sjfrank /* 12753f1fa9a7Sjfrank * Take the very last request from the queue, 12763f1fa9a7Sjfrank */ 12773f1fa9a7Sjfrank ctl_msg.now_serving = ctl_msg.head; 12783f1fa9a7Sjfrank ASSERT(ctl_msg.now_serving != NULL); 12793f1fa9a7Sjfrank 12803f1fa9a7Sjfrank /* 12813f1fa9a7Sjfrank * and discard all the others if any. 12823f1fa9a7Sjfrank */ 12833f1fa9a7Sjfrank FREE_THE_TAIL(ctl_msg.now_serving); 12843f1fa9a7Sjfrank ctl_msg.cnt = 1; 12853f1fa9a7Sjfrank mutex_exit(&ctl_msg.nm_lock); 12863f1fa9a7Sjfrank 12873f1fa9a7Sjfrank /* 12883f1fa9a7Sjfrank * Pass the name to XSCF. Note please, we do not hold the 12893f1fa9a7Sjfrank * mutex while we are doing this. 12903f1fa9a7Sjfrank */ 12913f1fa9a7Sjfrank msg_sent = 0; 12923f1fa9a7Sjfrank for (i = 0; i < repeat_cnt; i++) { 12933f1fa9a7Sjfrank if (PASS2XSCF(ctl_msg.now_serving, ret)) { 12943f1fa9a7Sjfrank msg_sent = 1; 12953f1fa9a7Sjfrank break; 12963f1fa9a7Sjfrank } else { 12973f1fa9a7Sjfrank if (ret != EBUSY) { 12983f1fa9a7Sjfrank cmn_err(CE_NOTE, "pass2xscf_thread:" 12993f1fa9a7Sjfrank " unexpected return code" 13003f1fa9a7Sjfrank " from scf_service_putinfo():" 13013f1fa9a7Sjfrank " %d\n", ret); 13023f1fa9a7Sjfrank } 13033f1fa9a7Sjfrank } 13043f1fa9a7Sjfrank } 13053f1fa9a7Sjfrank 13063f1fa9a7Sjfrank if (msg_sent) { 13073f1fa9a7Sjfrank 13083f1fa9a7Sjfrank /* 13093f1fa9a7Sjfrank * Remove the request from the list 13103f1fa9a7Sjfrank */ 13113f1fa9a7Sjfrank mutex_enter(&ctl_msg.nm_lock); 13123f1fa9a7Sjfrank msg = ctl_msg.now_serving; 13133f1fa9a7Sjfrank ctl_msg.now_serving = NULL; 13143f1fa9a7Sjfrank REMOVE(msg); 13153f1fa9a7Sjfrank ctl_msg.cnt--; 13163f1fa9a7Sjfrank mutex_exit(&ctl_msg.nm_lock); 13173f1fa9a7Sjfrank FREE_MSG(msg); 13183f1fa9a7Sjfrank } else { 13193f1fa9a7Sjfrank 13203f1fa9a7Sjfrank /* 13213f1fa9a7Sjfrank * If while we have tried to communicate with 13223f1fa9a7Sjfrank * XSCF there were any other requests we are 13233f1fa9a7Sjfrank * going to drop this one and take the latest 13243f1fa9a7Sjfrank * one. Otherwise we will try to pass this one 13253f1fa9a7Sjfrank * again. 13263f1fa9a7Sjfrank */ 13273f1fa9a7Sjfrank cmn_err(CE_NOTE, 13283f1fa9a7Sjfrank "pass2xscf_thread: " 13293f1fa9a7Sjfrank "scf_service_putinfo " 13303f1fa9a7Sjfrank "not responding\n"); 13313f1fa9a7Sjfrank } 13323f1fa9a7Sjfrank mutex_enter(&ctl_msg.nm_lock); 13333f1fa9a7Sjfrank } 13343f1fa9a7Sjfrank 13353f1fa9a7Sjfrank /* 13363f1fa9a7Sjfrank * The request queue is empty, exit. 13373f1fa9a7Sjfrank */ 13383f1fa9a7Sjfrank ctl_msg.nmt = NULL; 13393f1fa9a7Sjfrank mutex_exit(&ctl_msg.nm_lock); 13403f1fa9a7Sjfrank } 1341