125cf1a30Sjl /* 225cf1a30Sjl * CDDL HEADER START 325cf1a30Sjl * 425cf1a30Sjl * The contents of this file are subject to the terms of the 525cf1a30Sjl * Common Development and Distribution License (the "License"). 625cf1a30Sjl * You may not use this file except in compliance with the License. 725cf1a30Sjl * 825cf1a30Sjl * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 925cf1a30Sjl * or http://www.opensolaris.org/os/licensing. 1025cf1a30Sjl * See the License for the specific language governing permissions 1125cf1a30Sjl * and limitations under the License. 1225cf1a30Sjl * 1325cf1a30Sjl * When distributing Covered Code, include this CDDL HEADER in each 1425cf1a30Sjl * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1525cf1a30Sjl * If applicable, add the following below this CDDL HEADER, with the 1625cf1a30Sjl * fields enclosed by brackets "[]" replaced with your own identifying 1725cf1a30Sjl * information: Portions Copyright [yyyy] [name of copyright owner] 1825cf1a30Sjl * 1925cf1a30Sjl * CDDL HEADER END 2025cf1a30Sjl */ 2125cf1a30Sjl /* 2268ac2337Sjl * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 2325cf1a30Sjl * Use is subject to license terms. 2425cf1a30Sjl */ 2525cf1a30Sjl 2625cf1a30Sjl #pragma ident "%Z%%M% %I% %E% SMI" 2725cf1a30Sjl 2825cf1a30Sjl #include <sys/cpuvar.h> 2925cf1a30Sjl #include <sys/systm.h> 3025cf1a30Sjl #include <sys/sysmacros.h> 3125cf1a30Sjl #include <sys/promif.h> 3225cf1a30Sjl #include <sys/platform_module.h> 3325cf1a30Sjl #include <sys/cmn_err.h> 3425cf1a30Sjl #include <sys/errno.h> 3525cf1a30Sjl #include <sys/machsystm.h> 3625cf1a30Sjl #include <sys/bootconf.h> 3725cf1a30Sjl #include <sys/nvpair.h> 3825cf1a30Sjl #include <sys/kobj.h> 3925cf1a30Sjl #include <sys/mem_cage.h> 4025cf1a30Sjl #include <sys/opl.h> 4125cf1a30Sjl #include <sys/scfd/scfostoescf.h> 4225cf1a30Sjl #include <sys/cpu_sgnblk_defs.h> 4325cf1a30Sjl #include <sys/utsname.h> 4425cf1a30Sjl #include <sys/ddi.h> 4525cf1a30Sjl #include <sys/sunndi.h> 4625cf1a30Sjl #include <sys/lgrp.h> 4725cf1a30Sjl #include <sys/memnode.h> 4825cf1a30Sjl #include <sys/sysmacros.h> 49e603b7d4Spm #include <sys/time.h> 50e603b7d4Spm #include <sys/cpu.h> 5125cf1a30Sjl #include <vm/vm_dep.h> 5225cf1a30Sjl 5325cf1a30Sjl int (*opl_get_mem_unum)(int, uint64_t, char *, int, int *); 540cc8ae86Sav int (*opl_get_mem_sid)(char *unum, char *buf, int buflen, int *lenp); 550cc8ae86Sav int (*opl_get_mem_offset)(uint64_t paddr, uint64_t *offp); 560cc8ae86Sav int (*opl_get_mem_addr)(char *unum, char *sid, 570cc8ae86Sav uint64_t offset, uint64_t *paddr); 5825cf1a30Sjl 5925cf1a30Sjl /* Memory for fcode claims. 16k times # maximum possible IO units */ 6025cf1a30Sjl #define EFCODE_SIZE (OPL_MAX_BOARDS * OPL_MAX_IO_UNITS_PER_BOARD * 0x4000) 6125cf1a30Sjl int efcode_size = EFCODE_SIZE; 6225cf1a30Sjl 6325cf1a30Sjl #define OPL_MC_MEMBOARD_SHIFT 38 /* Boards on 256BG boundary */ 6425cf1a30Sjl 6525cf1a30Sjl /* Set the maximum number of boards for DR */ 6625cf1a30Sjl int opl_boards = OPL_MAX_BOARDS; 6725cf1a30Sjl 6825cf1a30Sjl void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t); 6925cf1a30Sjl 7025cf1a30Sjl extern int tsb_lgrp_affinity; 7125cf1a30Sjl 7225cf1a30Sjl int opl_tsb_spares = (OPL_MAX_BOARDS) * (OPL_MAX_PCICH_UNITS_PER_BOARD) * 7325cf1a30Sjl (OPL_MAX_TSBS_PER_PCICH); 7425cf1a30Sjl 7525cf1a30Sjl pgcnt_t opl_startup_cage_size = 0; 7625cf1a30Sjl 771e2e7a75Shuah static opl_model_info_t opl_models[] = { 78195196c6Ssubhan { "FF1", OPL_MAX_BOARDS_FF1, FF1, STD_DISPATCH_TABLE }, 79195196c6Ssubhan { "FF2", OPL_MAX_BOARDS_FF2, FF2, STD_DISPATCH_TABLE }, 80195196c6Ssubhan { "DC1", OPL_MAX_BOARDS_DC1, DC1, STD_DISPATCH_TABLE }, 81195196c6Ssubhan { "DC2", OPL_MAX_BOARDS_DC2, DC2, EXT_DISPATCH_TABLE }, 82195196c6Ssubhan { "DC3", OPL_MAX_BOARDS_DC3, DC3, EXT_DISPATCH_TABLE }, 831e2e7a75Shuah }; 841e2e7a75Shuah static int opl_num_models = sizeof (opl_models)/sizeof (opl_model_info_t); 851e2e7a75Shuah 86195196c6Ssubhan /* 8772b9fce9Ssubhan * opl_cur_model 88195196c6Ssubhan */ 8972b9fce9Ssubhan static opl_model_info_t *opl_cur_model = NULL; 901e2e7a75Shuah 9125cf1a30Sjl static struct memlist *opl_memlist_per_board(struct memlist *ml); 9225cf1a30Sjl 93e603b7d4Spm /* 94e603b7d4Spm * Note FF/DC out-of-order instruction engine takes only a 95e603b7d4Spm * single cycle to execute each spin loop 96e603b7d4Spm * for comparison, Panther takes 6 cycles for same loop 97e603b7d4Spm * 1500 approx nsec for OPL sleep instruction 98e603b7d4Spm * if spin count = OPL_BOFF_SLEEP*OPL_BOFF_SPIN then 99e603b7d4Spm * spin time should be equal to OPL_BOFF_TM nsecs 100e603b7d4Spm * Listed values tuned for 2.15GHz to 2.4GHz systems 101e603b7d4Spm * Value may change for future systems 102e603b7d4Spm */ 103e603b7d4Spm #define OPL_BOFF_SPIN 720 104e603b7d4Spm #define OPL_BOFF_BASE 1 105e603b7d4Spm #define OPL_BOFF_SLEEP 5 106e603b7d4Spm #define OPL_BOFF_CAP1 20 107e603b7d4Spm #define OPL_BOFF_CAP2 60 108e603b7d4Spm #define OPL_BOFF_MAX (40 * OPL_BOFF_SLEEP) 109e603b7d4Spm #define OPL_BOFF_TM 1500 110e603b7d4Spm 11125cf1a30Sjl int 11225cf1a30Sjl set_platform_max_ncpus(void) 11325cf1a30Sjl { 11425cf1a30Sjl return (OPL_MAX_CPU_PER_BOARD * OPL_MAX_BOARDS); 11525cf1a30Sjl } 11625cf1a30Sjl 11725cf1a30Sjl int 11825cf1a30Sjl set_platform_tsb_spares(void) 11925cf1a30Sjl { 12025cf1a30Sjl return (MIN(opl_tsb_spares, MAX_UPA)); 12125cf1a30Sjl } 12225cf1a30Sjl 1231e2e7a75Shuah static void 1241e2e7a75Shuah set_model_info() 1251e2e7a75Shuah { 126195196c6Ssubhan extern int ts_dispatch_extended; 1271e2e7a75Shuah char name[MAXSYSNAME]; 1281e2e7a75Shuah int i; 1291e2e7a75Shuah 1301e2e7a75Shuah /* 1311e2e7a75Shuah * Get model name from the root node. 1321e2e7a75Shuah * 1331e2e7a75Shuah * We are using the prom device tree since, at this point, 1341e2e7a75Shuah * the Solaris device tree is not yet setup. 1351e2e7a75Shuah */ 1361e2e7a75Shuah (void) prom_getprop(prom_rootnode(), "model", (caddr_t)name); 1371e2e7a75Shuah 1381e2e7a75Shuah for (i = 0; i < opl_num_models; i++) { 1391e2e7a75Shuah if (strncmp(name, opl_models[i].model_name, MAXSYSNAME) == 0) { 1401e2e7a75Shuah opl_cur_model = &opl_models[i]; 1411e2e7a75Shuah break; 1421e2e7a75Shuah } 1431e2e7a75Shuah } 144195196c6Ssubhan 1451e2e7a75Shuah if (i == opl_num_models) 146195196c6Ssubhan halt("No valid OPL model is found!"); 147195196c6Ssubhan 148195196c6Ssubhan if ((opl_cur_model->model_cmds & EXT_DISPATCH_TABLE) && 149*e98fafb9Sjl (ts_dispatch_extended == -1)) { 150195196c6Ssubhan /* 151195196c6Ssubhan * Based on a platform model, select a dispatch table. 152195196c6Ssubhan * Only DC2 and DC3 systems uses the alternate/extended 153195196c6Ssubhan * TS dispatch table. 154195196c6Ssubhan * FF1, FF2 and DC1 systems used standard dispatch tables. 155195196c6Ssubhan */ 156195196c6Ssubhan ts_dispatch_extended = 1; 157195196c6Ssubhan } 158195196c6Ssubhan 1591e2e7a75Shuah } 1601e2e7a75Shuah 1611e2e7a75Shuah static void 1621e2e7a75Shuah set_max_mmu_ctxdoms() 1631e2e7a75Shuah { 1641e2e7a75Shuah extern uint_t max_mmu_ctxdoms; 1651e2e7a75Shuah int max_boards; 1661e2e7a75Shuah 1671e2e7a75Shuah /* 1681e2e7a75Shuah * From the model, get the maximum number of boards 1691e2e7a75Shuah * supported and set the value accordingly. If the model 1701e2e7a75Shuah * could not be determined or recognized, we assume the max value. 1711e2e7a75Shuah */ 1721e2e7a75Shuah if (opl_cur_model == NULL) 1731e2e7a75Shuah max_boards = OPL_MAX_BOARDS; 1741e2e7a75Shuah else 1751e2e7a75Shuah max_boards = opl_cur_model->model_max_boards; 1761e2e7a75Shuah 1771e2e7a75Shuah /* 1781e2e7a75Shuah * On OPL, cores and MMUs are one-to-one. 1791e2e7a75Shuah */ 1801e2e7a75Shuah max_mmu_ctxdoms = OPL_MAX_CORE_UNITS_PER_BOARD * max_boards; 1811e2e7a75Shuah } 1821e2e7a75Shuah 18325cf1a30Sjl #pragma weak mmu_init_large_pages 18425cf1a30Sjl 18525cf1a30Sjl void 18625cf1a30Sjl set_platform_defaults(void) 18725cf1a30Sjl { 18825cf1a30Sjl extern char *tod_module_name; 18925cf1a30Sjl extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int); 19025cf1a30Sjl extern void mmu_init_large_pages(size_t); 19125cf1a30Sjl 19225cf1a30Sjl /* Set the CPU signature function pointer */ 19325cf1a30Sjl cpu_sgn_func = cpu_sgn_update; 19425cf1a30Sjl 19525cf1a30Sjl /* Set appropriate tod module for OPL platform */ 19625cf1a30Sjl ASSERT(tod_module_name == NULL); 19725cf1a30Sjl tod_module_name = "todopl"; 19825cf1a30Sjl 19925cf1a30Sjl if ((mmu_page_sizes == max_mmu_page_sizes) && 200e12a8a13Ssusans (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) { 20125cf1a30Sjl if (&mmu_init_large_pages) 20225cf1a30Sjl mmu_init_large_pages(mmu_ism_pagesize); 20325cf1a30Sjl } 20425cf1a30Sjl 20525cf1a30Sjl tsb_lgrp_affinity = 1; 2061e2e7a75Shuah 2071e2e7a75Shuah set_max_mmu_ctxdoms(); 20825cf1a30Sjl } 20925cf1a30Sjl 21025cf1a30Sjl /* 21125cf1a30Sjl * Convert logical a board number to a physical one. 21225cf1a30Sjl */ 21325cf1a30Sjl 21425cf1a30Sjl #define LSBPROP "board#" 21525cf1a30Sjl #define PSBPROP "physical-board#" 21625cf1a30Sjl 21725cf1a30Sjl int 21825cf1a30Sjl opl_get_physical_board(int id) 21925cf1a30Sjl { 22025cf1a30Sjl dev_info_t *root_dip, *dip = NULL; 22125cf1a30Sjl char *dname = NULL; 22225cf1a30Sjl int circ; 22325cf1a30Sjl 22425cf1a30Sjl pnode_t pnode; 22525cf1a30Sjl char pname[MAXSYSNAME] = {0}; 22625cf1a30Sjl 22725cf1a30Sjl int lsb_id; /* Logical System Board ID */ 22825cf1a30Sjl int psb_id; /* Physical System Board ID */ 22925cf1a30Sjl 23025cf1a30Sjl 23125cf1a30Sjl /* 23225cf1a30Sjl * This function is called on early stage of bootup when the 23325cf1a30Sjl * kernel device tree is not initialized yet, and also 23425cf1a30Sjl * later on when the device tree is up. We want to try 23525cf1a30Sjl * the fast track first. 23625cf1a30Sjl */ 23725cf1a30Sjl root_dip = ddi_root_node(); 23825cf1a30Sjl if (root_dip) { 23925cf1a30Sjl /* Get from devinfo node */ 24025cf1a30Sjl ndi_devi_enter(root_dip, &circ); 24125cf1a30Sjl for (dip = ddi_get_child(root_dip); dip; 24225cf1a30Sjl dip = ddi_get_next_sibling(dip)) { 24325cf1a30Sjl 24425cf1a30Sjl dname = ddi_node_name(dip); 24525cf1a30Sjl if (strncmp(dname, "pseudo-mc", 9) != 0) 24625cf1a30Sjl continue; 24725cf1a30Sjl 24825cf1a30Sjl if ((lsb_id = (int)ddi_getprop(DDI_DEV_T_ANY, dip, 24925cf1a30Sjl DDI_PROP_DONTPASS, LSBPROP, -1)) == -1) 25025cf1a30Sjl continue; 25125cf1a30Sjl 25225cf1a30Sjl if (id == lsb_id) { 25325cf1a30Sjl if ((psb_id = (int)ddi_getprop(DDI_DEV_T_ANY, 25425cf1a30Sjl dip, DDI_PROP_DONTPASS, PSBPROP, -1)) 25525cf1a30Sjl == -1) { 25625cf1a30Sjl ndi_devi_exit(root_dip, circ); 25725cf1a30Sjl return (-1); 25825cf1a30Sjl } else { 25925cf1a30Sjl ndi_devi_exit(root_dip, circ); 26025cf1a30Sjl return (psb_id); 26125cf1a30Sjl } 26225cf1a30Sjl } 26325cf1a30Sjl } 26425cf1a30Sjl ndi_devi_exit(root_dip, circ); 26525cf1a30Sjl } 26625cf1a30Sjl 26725cf1a30Sjl /* 26825cf1a30Sjl * We do not have the kernel device tree, or we did not 26925cf1a30Sjl * find the node for some reason (let's say the kernel 27025cf1a30Sjl * device tree was modified), let's try the OBP tree. 27125cf1a30Sjl */ 27225cf1a30Sjl pnode = prom_rootnode(); 27325cf1a30Sjl for (pnode = prom_childnode(pnode); pnode; 27425cf1a30Sjl pnode = prom_nextnode(pnode)) { 27525cf1a30Sjl 27625cf1a30Sjl if ((prom_getprop(pnode, "name", (caddr_t)pname) == -1) || 27725cf1a30Sjl (strncmp(pname, "pseudo-mc", 9) != 0)) 27825cf1a30Sjl continue; 27925cf1a30Sjl 28025cf1a30Sjl if (prom_getprop(pnode, LSBPROP, (caddr_t)&lsb_id) == -1) 28125cf1a30Sjl continue; 28225cf1a30Sjl 28325cf1a30Sjl if (id == lsb_id) { 28425cf1a30Sjl if (prom_getprop(pnode, PSBPROP, 28525cf1a30Sjl (caddr_t)&psb_id) == -1) { 28625cf1a30Sjl return (-1); 28725cf1a30Sjl } else { 28825cf1a30Sjl return (psb_id); 28925cf1a30Sjl } 29025cf1a30Sjl } 29125cf1a30Sjl } 29225cf1a30Sjl 29325cf1a30Sjl return (-1); 29425cf1a30Sjl } 29525cf1a30Sjl 29625cf1a30Sjl /* 29725cf1a30Sjl * For OPL it's possible that memory from two or more successive boards 29825cf1a30Sjl * will be contiguous across the boards, and therefore represented as a 29925cf1a30Sjl * single chunk. 30025cf1a30Sjl * This function splits such chunks down the board boundaries. 30125cf1a30Sjl */ 30225cf1a30Sjl static struct memlist * 30325cf1a30Sjl opl_memlist_per_board(struct memlist *ml) 30425cf1a30Sjl { 30525cf1a30Sjl uint64_t ssize, low, high, boundary; 30625cf1a30Sjl struct memlist *head, *tail, *new; 30725cf1a30Sjl 30825cf1a30Sjl ssize = (1ull << OPL_MC_MEMBOARD_SHIFT); 30925cf1a30Sjl 31025cf1a30Sjl head = tail = NULL; 31125cf1a30Sjl 31225cf1a30Sjl for (; ml; ml = ml->next) { 31325cf1a30Sjl low = (uint64_t)ml->address; 31425cf1a30Sjl high = low+(uint64_t)(ml->size); 31525cf1a30Sjl while (low < high) { 31625cf1a30Sjl boundary = roundup(low+1, ssize); 31725cf1a30Sjl boundary = MIN(high, boundary); 31825cf1a30Sjl new = kmem_zalloc(sizeof (struct memlist), KM_SLEEP); 31925cf1a30Sjl new->address = low; 32025cf1a30Sjl new->size = boundary - low; 32125cf1a30Sjl if (head == NULL) 32225cf1a30Sjl head = new; 32325cf1a30Sjl if (tail) { 32425cf1a30Sjl tail->next = new; 32525cf1a30Sjl new->prev = tail; 32625cf1a30Sjl } 32725cf1a30Sjl tail = new; 32825cf1a30Sjl low = boundary; 32925cf1a30Sjl } 33025cf1a30Sjl } 33125cf1a30Sjl return (head); 33225cf1a30Sjl } 33325cf1a30Sjl 33425cf1a30Sjl void 33525cf1a30Sjl set_platform_cage_params(void) 33625cf1a30Sjl { 33725cf1a30Sjl extern pgcnt_t total_pages; 33825cf1a30Sjl extern struct memlist *phys_avail; 33925cf1a30Sjl struct memlist *ml, *tml; 34025cf1a30Sjl 34125cf1a30Sjl if (kernel_cage_enable) { 34225cf1a30Sjl pgcnt_t preferred_cage_size; 34325cf1a30Sjl 344*e98fafb9Sjl preferred_cage_size = MAX(opl_startup_cage_size, 345*e98fafb9Sjl total_pages / 256); 34625cf1a30Sjl 34725cf1a30Sjl ml = opl_memlist_per_board(phys_avail); 34825cf1a30Sjl 34925cf1a30Sjl /* 35025cf1a30Sjl * Note: we are assuming that post has load the 35125cf1a30Sjl * whole show in to the high end of memory. Having 35225cf1a30Sjl * taken this leap, we copy the whole of phys_avail 35325cf1a30Sjl * the glist and arrange for the cage to grow 35425cf1a30Sjl * downward (descending pfns). 35525cf1a30Sjl */ 35685f58038Sdp kcage_range_init(ml, KCAGE_DOWN, preferred_cage_size); 35725cf1a30Sjl 35825cf1a30Sjl /* free the memlist */ 35925cf1a30Sjl do { 36025cf1a30Sjl tml = ml->next; 36125cf1a30Sjl kmem_free(ml, sizeof (struct memlist)); 36225cf1a30Sjl ml = tml; 36325cf1a30Sjl } while (ml != NULL); 36425cf1a30Sjl } 36525cf1a30Sjl 36625cf1a30Sjl if (kcage_on) 36725cf1a30Sjl cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED"); 36825cf1a30Sjl else 36925cf1a30Sjl cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED"); 37025cf1a30Sjl } 37125cf1a30Sjl 37225cf1a30Sjl /*ARGSUSED*/ 37325cf1a30Sjl int 37425cf1a30Sjl plat_cpu_poweron(struct cpu *cp) 37525cf1a30Sjl { 37625cf1a30Sjl int (*opl_cpu_poweron)(struct cpu *) = NULL; 37725cf1a30Sjl 37825cf1a30Sjl opl_cpu_poweron = 37925cf1a30Sjl (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweron", 0); 38025cf1a30Sjl 38125cf1a30Sjl if (opl_cpu_poweron == NULL) 38225cf1a30Sjl return (ENOTSUP); 38325cf1a30Sjl else 38425cf1a30Sjl return ((opl_cpu_poweron)(cp)); 38525cf1a30Sjl 38625cf1a30Sjl } 38725cf1a30Sjl 38825cf1a30Sjl /*ARGSUSED*/ 38925cf1a30Sjl int 39025cf1a30Sjl plat_cpu_poweroff(struct cpu *cp) 39125cf1a30Sjl { 39225cf1a30Sjl int (*opl_cpu_poweroff)(struct cpu *) = NULL; 39325cf1a30Sjl 39425cf1a30Sjl opl_cpu_poweroff = 39525cf1a30Sjl (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweroff", 0); 39625cf1a30Sjl 39725cf1a30Sjl if (opl_cpu_poweroff == NULL) 39825cf1a30Sjl return (ENOTSUP); 39925cf1a30Sjl else 40025cf1a30Sjl return ((opl_cpu_poweroff)(cp)); 40125cf1a30Sjl 40225cf1a30Sjl } 40325cf1a30Sjl 40425cf1a30Sjl int 40525cf1a30Sjl plat_max_boards(void) 40625cf1a30Sjl { 40725cf1a30Sjl return (OPL_MAX_BOARDS); 40825cf1a30Sjl } 40925cf1a30Sjl 41025cf1a30Sjl int 41125cf1a30Sjl plat_max_cpu_units_per_board(void) 41225cf1a30Sjl { 41325cf1a30Sjl return (OPL_MAX_CPU_PER_BOARD); 41425cf1a30Sjl } 41525cf1a30Sjl 41625cf1a30Sjl int 41725cf1a30Sjl plat_max_mem_units_per_board(void) 41825cf1a30Sjl { 41925cf1a30Sjl return (OPL_MAX_MEM_UNITS_PER_BOARD); 42025cf1a30Sjl } 42125cf1a30Sjl 42225cf1a30Sjl int 42325cf1a30Sjl plat_max_io_units_per_board(void) 42425cf1a30Sjl { 42525cf1a30Sjl return (OPL_MAX_IO_UNITS_PER_BOARD); 42625cf1a30Sjl } 42725cf1a30Sjl 42825cf1a30Sjl int 42925cf1a30Sjl plat_max_cmp_units_per_board(void) 43025cf1a30Sjl { 43125cf1a30Sjl return (OPL_MAX_CMP_UNITS_PER_BOARD); 43225cf1a30Sjl } 43325cf1a30Sjl 43425cf1a30Sjl int 43525cf1a30Sjl plat_max_core_units_per_board(void) 43625cf1a30Sjl { 43725cf1a30Sjl return (OPL_MAX_CORE_UNITS_PER_BOARD); 43825cf1a30Sjl } 43925cf1a30Sjl 44025cf1a30Sjl int 44125cf1a30Sjl plat_pfn_to_mem_node(pfn_t pfn) 44225cf1a30Sjl { 44325cf1a30Sjl return (pfn >> mem_node_pfn_shift); 44425cf1a30Sjl } 44525cf1a30Sjl 44625cf1a30Sjl /* ARGSUSED */ 44725cf1a30Sjl void 44825cf1a30Sjl plat_build_mem_nodes(u_longlong_t *list, size_t nelems) 44925cf1a30Sjl { 45025cf1a30Sjl size_t elem; 45125cf1a30Sjl pfn_t basepfn; 45225cf1a30Sjl pgcnt_t npgs; 45325cf1a30Sjl uint64_t boundary, ssize; 45425cf1a30Sjl uint64_t low, high; 45525cf1a30Sjl 45625cf1a30Sjl /* 45725cf1a30Sjl * OPL mem slices are always aligned on a 256GB boundary. 45825cf1a30Sjl */ 45925cf1a30Sjl mem_node_pfn_shift = OPL_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT; 46025cf1a30Sjl mem_node_physalign = 0; 46125cf1a30Sjl 46225cf1a30Sjl /* 46325cf1a30Sjl * Boot install lists are arranged <addr, len>, <addr, len>, ... 46425cf1a30Sjl */ 46525cf1a30Sjl ssize = (1ull << OPL_MC_MEMBOARD_SHIFT); 46625cf1a30Sjl for (elem = 0; elem < nelems; elem += 2) { 46725cf1a30Sjl low = (uint64_t)list[elem]; 46825cf1a30Sjl high = low+(uint64_t)(list[elem+1]); 46925cf1a30Sjl while (low < high) { 47025cf1a30Sjl boundary = roundup(low+1, ssize); 47125cf1a30Sjl boundary = MIN(high, boundary); 47225cf1a30Sjl basepfn = btop(low); 47325cf1a30Sjl npgs = btop(boundary - low); 47425cf1a30Sjl mem_node_add_slice(basepfn, basepfn + npgs - 1); 47525cf1a30Sjl low = boundary; 47625cf1a30Sjl } 47725cf1a30Sjl } 47825cf1a30Sjl } 47925cf1a30Sjl 48025cf1a30Sjl /* 48125cf1a30Sjl * Find the CPU associated with a slice at boot-time. 48225cf1a30Sjl */ 48325cf1a30Sjl void 48425cf1a30Sjl plat_fill_mc(pnode_t nodeid) 48525cf1a30Sjl { 48625cf1a30Sjl int board; 48725cf1a30Sjl int memnode; 48825cf1a30Sjl struct { 48925cf1a30Sjl uint64_t addr; 49025cf1a30Sjl uint64_t size; 49125cf1a30Sjl } mem_range; 49225cf1a30Sjl 49325cf1a30Sjl if (prom_getprop(nodeid, "board#", (caddr_t)&board) < 0) { 49425cf1a30Sjl panic("Can not find board# property in mc node %x", nodeid); 49525cf1a30Sjl } 49625cf1a30Sjl if (prom_getprop(nodeid, "sb-mem-ranges", (caddr_t)&mem_range) < 0) { 49725cf1a30Sjl panic("Can not find sb-mem-ranges property in mc node %x", 498*e98fafb9Sjl nodeid); 49925cf1a30Sjl } 50025cf1a30Sjl memnode = mem_range.addr >> OPL_MC_MEMBOARD_SHIFT; 50125cf1a30Sjl plat_assign_lgrphand_to_mem_node(board, memnode); 50225cf1a30Sjl } 50325cf1a30Sjl 50425cf1a30Sjl /* 50525cf1a30Sjl * Return the platform handle for the lgroup containing the given CPU 50625cf1a30Sjl * 50725cf1a30Sjl * For OPL, lgroup platform handle == board #. 50825cf1a30Sjl */ 50925cf1a30Sjl 51025cf1a30Sjl extern int mpo_disabled; 51125cf1a30Sjl extern lgrp_handle_t lgrp_default_handle; 51225cf1a30Sjl 51325cf1a30Sjl lgrp_handle_t 51425cf1a30Sjl plat_lgrp_cpu_to_hand(processorid_t id) 51525cf1a30Sjl { 51625cf1a30Sjl lgrp_handle_t plathand; 51725cf1a30Sjl 51825cf1a30Sjl /* 51925cf1a30Sjl * Return the real platform handle for the CPU until 52025cf1a30Sjl * such time as we know that MPO should be disabled. 52125cf1a30Sjl * At that point, we set the "mpo_disabled" flag to true, 52225cf1a30Sjl * and from that point on, return the default handle. 52325cf1a30Sjl * 52425cf1a30Sjl * By the time we know that MPO should be disabled, the 52525cf1a30Sjl * first CPU will have already been added to a leaf 52625cf1a30Sjl * lgroup, but that's ok. The common lgroup code will 52725cf1a30Sjl * double check that the boot CPU is in the correct place, 52825cf1a30Sjl * and in the case where mpo should be disabled, will move 52925cf1a30Sjl * it to the root if necessary. 53025cf1a30Sjl */ 53125cf1a30Sjl if (mpo_disabled) { 53225cf1a30Sjl /* If MPO is disabled, return the default (UMA) handle */ 53325cf1a30Sjl plathand = lgrp_default_handle; 53425cf1a30Sjl } else 53525cf1a30Sjl plathand = (lgrp_handle_t)LSB_ID(id); 53625cf1a30Sjl return (plathand); 53725cf1a30Sjl } 53825cf1a30Sjl 53925cf1a30Sjl /* 54025cf1a30Sjl * Platform specific lgroup initialization 54125cf1a30Sjl */ 54225cf1a30Sjl void 54325cf1a30Sjl plat_lgrp_init(void) 54425cf1a30Sjl { 54525cf1a30Sjl extern uint32_t lgrp_expand_proc_thresh; 54625cf1a30Sjl extern uint32_t lgrp_expand_proc_diff; 54725cf1a30Sjl 54825cf1a30Sjl /* 54925cf1a30Sjl * Set tuneables for the OPL architecture 55025cf1a30Sjl * 55125cf1a30Sjl * lgrp_expand_proc_thresh is the minimum load on the lgroups 55225cf1a30Sjl * this process is currently running on before considering 55325cf1a30Sjl * expanding threads to another lgroup. 55425cf1a30Sjl * 55525cf1a30Sjl * lgrp_expand_proc_diff determines how much less the remote lgroup 55625cf1a30Sjl * must be loaded before expanding to it. 55725cf1a30Sjl * 55825cf1a30Sjl * Since remote latencies can be costly, attempt to keep 3 threads 55925cf1a30Sjl * within the same lgroup before expanding to the next lgroup. 56025cf1a30Sjl */ 56125cf1a30Sjl lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX * 3; 56225cf1a30Sjl lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX; 56325cf1a30Sjl } 56425cf1a30Sjl 56525cf1a30Sjl /* 56625cf1a30Sjl * Platform notification of lgroup (re)configuration changes 56725cf1a30Sjl */ 56825cf1a30Sjl /*ARGSUSED*/ 56925cf1a30Sjl void 57025cf1a30Sjl plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg) 57125cf1a30Sjl { 57225cf1a30Sjl update_membounds_t *umb; 57325cf1a30Sjl lgrp_config_mem_rename_t lmr; 57425cf1a30Sjl int sbd, tbd; 57525cf1a30Sjl lgrp_handle_t hand, shand, thand; 57625cf1a30Sjl int mnode, snode, tnode; 57725cf1a30Sjl pfn_t start, end; 57825cf1a30Sjl 57925cf1a30Sjl if (mpo_disabled) 58025cf1a30Sjl return; 58125cf1a30Sjl 58225cf1a30Sjl switch (evt) { 58325cf1a30Sjl 58425cf1a30Sjl case LGRP_CONFIG_MEM_ADD: 58525cf1a30Sjl /* 58625cf1a30Sjl * Establish the lgroup handle to memnode translation. 58725cf1a30Sjl */ 58825cf1a30Sjl umb = (update_membounds_t *)arg; 58925cf1a30Sjl 59025cf1a30Sjl hand = umb->u_board; 59125cf1a30Sjl mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT); 59225cf1a30Sjl plat_assign_lgrphand_to_mem_node(hand, mnode); 59325cf1a30Sjl 59425cf1a30Sjl break; 59525cf1a30Sjl 59625cf1a30Sjl case LGRP_CONFIG_MEM_DEL: 59725cf1a30Sjl /* 59825cf1a30Sjl * Special handling for possible memory holes. 59925cf1a30Sjl */ 60025cf1a30Sjl umb = (update_membounds_t *)arg; 60125cf1a30Sjl hand = umb->u_board; 60225cf1a30Sjl if ((mnode = plat_lgrphand_to_mem_node(hand)) != -1) { 60325cf1a30Sjl if (mem_node_config[mnode].exists) { 60425cf1a30Sjl start = mem_node_config[mnode].physbase; 60525cf1a30Sjl end = mem_node_config[mnode].physmax; 60625cf1a30Sjl mem_node_pre_del_slice(start, end); 60725cf1a30Sjl mem_node_post_del_slice(start, end, 0); 60825cf1a30Sjl } 60925cf1a30Sjl } 61025cf1a30Sjl 61125cf1a30Sjl break; 61225cf1a30Sjl 61325cf1a30Sjl case LGRP_CONFIG_MEM_RENAME: 61425cf1a30Sjl /* 61525cf1a30Sjl * During a DR copy-rename operation, all of the memory 61625cf1a30Sjl * on one board is moved to another board -- but the 61725cf1a30Sjl * addresses/pfns and memnodes don't change. This means 61825cf1a30Sjl * the memory has changed locations without changing identity. 61925cf1a30Sjl * 62025cf1a30Sjl * Source is where we are copying from and target is where we 62125cf1a30Sjl * are copying to. After source memnode is copied to target 62225cf1a30Sjl * memnode, the physical addresses of the target memnode are 62325cf1a30Sjl * renamed to match what the source memnode had. Then target 62425cf1a30Sjl * memnode can be removed and source memnode can take its 62525cf1a30Sjl * place. 62625cf1a30Sjl * 62725cf1a30Sjl * To do this, swap the lgroup handle to memnode mappings for 62825cf1a30Sjl * the boards, so target lgroup will have source memnode and 62925cf1a30Sjl * source lgroup will have empty target memnode which is where 63025cf1a30Sjl * its memory will go (if any is added to it later). 63125cf1a30Sjl * 63225cf1a30Sjl * Then source memnode needs to be removed from its lgroup 63325cf1a30Sjl * and added to the target lgroup where the memory was living 63425cf1a30Sjl * but under a different name/memnode. The memory was in the 63525cf1a30Sjl * target memnode and now lives in the source memnode with 63625cf1a30Sjl * different physical addresses even though it is the same 63725cf1a30Sjl * memory. 63825cf1a30Sjl */ 63925cf1a30Sjl sbd = arg & 0xffff; 64025cf1a30Sjl tbd = (arg & 0xffff0000) >> 16; 64125cf1a30Sjl shand = sbd; 64225cf1a30Sjl thand = tbd; 64325cf1a30Sjl snode = plat_lgrphand_to_mem_node(shand); 64425cf1a30Sjl tnode = plat_lgrphand_to_mem_node(thand); 64525cf1a30Sjl 64625cf1a30Sjl /* 64725cf1a30Sjl * Special handling for possible memory holes. 64825cf1a30Sjl */ 64925cf1a30Sjl if (tnode != -1 && mem_node_config[tnode].exists) { 65068ac2337Sjl start = mem_node_config[tnode].physbase; 65168ac2337Sjl end = mem_node_config[tnode].physmax; 65225cf1a30Sjl mem_node_pre_del_slice(start, end); 65325cf1a30Sjl mem_node_post_del_slice(start, end, 0); 65425cf1a30Sjl } 65525cf1a30Sjl 65625cf1a30Sjl plat_assign_lgrphand_to_mem_node(thand, snode); 65725cf1a30Sjl plat_assign_lgrphand_to_mem_node(shand, tnode); 65825cf1a30Sjl 65925cf1a30Sjl lmr.lmem_rename_from = shand; 66025cf1a30Sjl lmr.lmem_rename_to = thand; 66125cf1a30Sjl 66225cf1a30Sjl /* 66325cf1a30Sjl * Remove source memnode of copy rename from its lgroup 66425cf1a30Sjl * and add it to its new target lgroup 66525cf1a30Sjl */ 66625cf1a30Sjl lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode, 66725cf1a30Sjl (uintptr_t)&lmr); 66825cf1a30Sjl 66925cf1a30Sjl break; 67025cf1a30Sjl 67125cf1a30Sjl default: 67225cf1a30Sjl break; 67325cf1a30Sjl } 67425cf1a30Sjl } 67525cf1a30Sjl 67625cf1a30Sjl /* 67725cf1a30Sjl * Return latency between "from" and "to" lgroups 67825cf1a30Sjl * 67925cf1a30Sjl * This latency number can only be used for relative comparison 68025cf1a30Sjl * between lgroups on the running system, cannot be used across platforms, 68125cf1a30Sjl * and may not reflect the actual latency. It is platform and implementation 68225cf1a30Sjl * specific, so platform gets to decide its value. It would be nice if the 68325cf1a30Sjl * number was at least proportional to make comparisons more meaningful though. 68425cf1a30Sjl * NOTE: The numbers below are supposed to be load latencies for uncached 68525cf1a30Sjl * memory divided by 10. 68625cf1a30Sjl * 68725cf1a30Sjl */ 68825cf1a30Sjl int 68925cf1a30Sjl plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to) 69025cf1a30Sjl { 69125cf1a30Sjl /* 69225cf1a30Sjl * Return min remote latency when there are more than two lgroups 69325cf1a30Sjl * (root and child) and getting latency between two different lgroups 69425cf1a30Sjl * or root is involved 69525cf1a30Sjl */ 69625cf1a30Sjl if (lgrp_optimizations() && (from != to || 69725cf1a30Sjl from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE)) 69871b3c2ffShyw return (42); 69925cf1a30Sjl else 70071b3c2ffShyw return (35); 70125cf1a30Sjl } 70225cf1a30Sjl 70325cf1a30Sjl /* 70425cf1a30Sjl * Return platform handle for root lgroup 70525cf1a30Sjl */ 70625cf1a30Sjl lgrp_handle_t 70725cf1a30Sjl plat_lgrp_root_hand(void) 70825cf1a30Sjl { 70925cf1a30Sjl if (mpo_disabled) 71025cf1a30Sjl return (lgrp_default_handle); 71125cf1a30Sjl 71225cf1a30Sjl return (LGRP_DEFAULT_HANDLE); 71325cf1a30Sjl } 71425cf1a30Sjl 71525cf1a30Sjl /*ARGSUSED*/ 71625cf1a30Sjl void 71725cf1a30Sjl plat_freelist_process(int mnode) 71825cf1a30Sjl { 71925cf1a30Sjl } 72025cf1a30Sjl 72125cf1a30Sjl void 72225cf1a30Sjl load_platform_drivers(void) 72325cf1a30Sjl { 72425cf1a30Sjl (void) i_ddi_attach_pseudo_node("dr"); 72525cf1a30Sjl } 72625cf1a30Sjl 72725cf1a30Sjl /* 72825cf1a30Sjl * No platform drivers on this platform 72925cf1a30Sjl */ 73025cf1a30Sjl char *platform_module_list[] = { 73125cf1a30Sjl (char *)0 73225cf1a30Sjl }; 73325cf1a30Sjl 73425cf1a30Sjl /*ARGSUSED*/ 73525cf1a30Sjl void 73625cf1a30Sjl plat_tod_fault(enum tod_fault_type tod_bad) 73725cf1a30Sjl { 73825cf1a30Sjl } 73925cf1a30Sjl 74025cf1a30Sjl /*ARGSUSED*/ 74125cf1a30Sjl void 74225cf1a30Sjl cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid) 74325cf1a30Sjl { 74425cf1a30Sjl static void (*scf_panic_callback)(int); 74525cf1a30Sjl static void (*scf_shutdown_callback)(int); 74625cf1a30Sjl 74725cf1a30Sjl /* 74825cf1a30Sjl * This is for notifing system panic/shutdown to SCF. 74925cf1a30Sjl * In case of shutdown and panic, SCF call back 75025cf1a30Sjl * function should be called. 75125cf1a30Sjl * <SCF call back functions> 75225cf1a30Sjl * scf_panic_callb() : panicsys()->panic_quiesce_hw() 75325cf1a30Sjl * scf_shutdown_callb(): halt() or power_down() or reboot_machine() 75425cf1a30Sjl * cpuid should be -1 and state should be SIGST_EXIT. 75525cf1a30Sjl */ 75625cf1a30Sjl if (state == SIGST_EXIT && cpuid == -1) { 75725cf1a30Sjl 75825cf1a30Sjl /* 75925cf1a30Sjl * find the symbol for the SCF panic callback routine in driver 76025cf1a30Sjl */ 76125cf1a30Sjl if (scf_panic_callback == NULL) 76225cf1a30Sjl scf_panic_callback = (void (*)(int)) 763*e98fafb9Sjl modgetsymvalue("scf_panic_callb", 0); 76425cf1a30Sjl if (scf_shutdown_callback == NULL) 76525cf1a30Sjl scf_shutdown_callback = (void (*)(int)) 766*e98fafb9Sjl modgetsymvalue("scf_shutdown_callb", 0); 76725cf1a30Sjl 76825cf1a30Sjl switch (sub_state) { 76925cf1a30Sjl case SIGSUBST_PANIC: 77025cf1a30Sjl if (scf_panic_callback == NULL) { 77125cf1a30Sjl cmn_err(CE_NOTE, "!cpu_sgn_update: " 77225cf1a30Sjl "scf_panic_callb not found\n"); 77325cf1a30Sjl return; 77425cf1a30Sjl } 77525cf1a30Sjl scf_panic_callback(SIGSUBST_PANIC); 77625cf1a30Sjl break; 77725cf1a30Sjl 77825cf1a30Sjl case SIGSUBST_HALT: 77925cf1a30Sjl if (scf_shutdown_callback == NULL) { 78025cf1a30Sjl cmn_err(CE_NOTE, "!cpu_sgn_update: " 78125cf1a30Sjl "scf_shutdown_callb not found\n"); 78225cf1a30Sjl return; 78325cf1a30Sjl } 78425cf1a30Sjl scf_shutdown_callback(SIGSUBST_HALT); 78525cf1a30Sjl break; 78625cf1a30Sjl 78725cf1a30Sjl case SIGSUBST_ENVIRON: 78825cf1a30Sjl if (scf_shutdown_callback == NULL) { 78925cf1a30Sjl cmn_err(CE_NOTE, "!cpu_sgn_update: " 79025cf1a30Sjl "scf_shutdown_callb not found\n"); 79125cf1a30Sjl return; 79225cf1a30Sjl } 79325cf1a30Sjl scf_shutdown_callback(SIGSUBST_ENVIRON); 79425cf1a30Sjl break; 79525cf1a30Sjl 79625cf1a30Sjl case SIGSUBST_REBOOT: 79725cf1a30Sjl if (scf_shutdown_callback == NULL) { 79825cf1a30Sjl cmn_err(CE_NOTE, "!cpu_sgn_update: " 79925cf1a30Sjl "scf_shutdown_callb not found\n"); 80025cf1a30Sjl return; 80125cf1a30Sjl } 80225cf1a30Sjl scf_shutdown_callback(SIGSUBST_REBOOT); 80325cf1a30Sjl break; 80425cf1a30Sjl } 80525cf1a30Sjl } 80625cf1a30Sjl } 80725cf1a30Sjl 80825cf1a30Sjl /*ARGSUSED*/ 80925cf1a30Sjl int 81025cf1a30Sjl plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id, 81125cf1a30Sjl int flt_in_memory, ushort_t flt_status, 81225cf1a30Sjl char *buf, int buflen, int *lenp) 81325cf1a30Sjl { 81425cf1a30Sjl /* 81525cf1a30Sjl * check if it's a Memory error. 81625cf1a30Sjl */ 81725cf1a30Sjl if (flt_in_memory) { 81825cf1a30Sjl if (opl_get_mem_unum != NULL) { 819*e98fafb9Sjl return (opl_get_mem_unum(synd_code, flt_addr, buf, 820*e98fafb9Sjl buflen, lenp)); 82125cf1a30Sjl } else { 82225cf1a30Sjl return (ENOTSUP); 82325cf1a30Sjl } 82425cf1a30Sjl } else { 82525cf1a30Sjl return (ENOTSUP); 82625cf1a30Sjl } 82725cf1a30Sjl } 82825cf1a30Sjl 82925cf1a30Sjl /*ARGSUSED*/ 83025cf1a30Sjl int 83125cf1a30Sjl plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp) 83225cf1a30Sjl { 8330cc8ae86Sav int ret = 0; 8340cc8ae86Sav uint_t sb; 835195196c6Ssubhan int plen; 83625cf1a30Sjl 83725cf1a30Sjl sb = opl_get_physical_board(LSB_ID(cpuid)); 83825cf1a30Sjl if (sb == -1) { 83925cf1a30Sjl return (ENXIO); 84025cf1a30Sjl } 84125cf1a30Sjl 84272b9fce9Ssubhan /* 84372b9fce9Ssubhan * opl_cur_model is assigned here 84472b9fce9Ssubhan */ 84572b9fce9Ssubhan if (opl_cur_model == NULL) { 84672b9fce9Ssubhan set_model_info(); 84772b9fce9Ssubhan } 84872b9fce9Ssubhan 849195196c6Ssubhan ASSERT((opl_cur_model - opl_models) == (opl_cur_model->model_type)); 850195196c6Ssubhan 851195196c6Ssubhan switch (opl_cur_model->model_type) { 852195196c6Ssubhan case FF1: 8530cc8ae86Sav plen = snprintf(buf, buflen, "/%s/CPUM%d", "MBU_A", 8540cc8ae86Sav CHIP_ID(cpuid) / 2); 8550cc8ae86Sav break; 8560cc8ae86Sav 857195196c6Ssubhan case FF2: 8580cc8ae86Sav plen = snprintf(buf, buflen, "/%s/CPUM%d", "MBU_B", 85911114147Sav (CHIP_ID(cpuid) / 2) + (sb * 2)); 8600cc8ae86Sav break; 8610cc8ae86Sav 862195196c6Ssubhan case DC1: 863195196c6Ssubhan case DC2: 864195196c6Ssubhan case DC3: 8650cc8ae86Sav plen = snprintf(buf, buflen, "/%s%02d/CPUM%d", "CMU", sb, 8660cc8ae86Sav CHIP_ID(cpuid)); 8670cc8ae86Sav break; 8680cc8ae86Sav 8690cc8ae86Sav default: 8700cc8ae86Sav /* This should never happen */ 8710cc8ae86Sav return (ENODEV); 8720cc8ae86Sav } 8730cc8ae86Sav 8740cc8ae86Sav if (plen >= buflen) { 8750cc8ae86Sav ret = ENOSPC; 87625cf1a30Sjl } else { 87725cf1a30Sjl if (lenp) 87825cf1a30Sjl *lenp = strlen(buf); 87925cf1a30Sjl } 8800cc8ae86Sav return (ret); 88125cf1a30Sjl } 88225cf1a30Sjl 88325cf1a30Sjl #define SCF_PUTINFO(f, s, p) \ 88425cf1a30Sjl f(KEY_ESCF, 0x01, 0, s, p) 88525cf1a30Sjl void 88625cf1a30Sjl plat_nodename_set(void) 88725cf1a30Sjl { 88825cf1a30Sjl void *datap; 88925cf1a30Sjl static int (*scf_service_function)(uint32_t, uint8_t, 89025cf1a30Sjl uint32_t, uint32_t, void *); 89125cf1a30Sjl int counter = 5; 89225cf1a30Sjl 89325cf1a30Sjl /* 89425cf1a30Sjl * find the symbol for the SCF put routine in driver 89525cf1a30Sjl */ 89625cf1a30Sjl if (scf_service_function == NULL) 897*e98fafb9Sjl scf_service_function = (int (*)(uint32_t, uint8_t, uint32_t, 898*e98fafb9Sjl uint32_t, void *)) modgetsymvalue("scf_service_putinfo", 0); 89925cf1a30Sjl 90025cf1a30Sjl /* 90125cf1a30Sjl * If the symbol was found, call it. Otherwise, log a note (but not to 90225cf1a30Sjl * the console). 90325cf1a30Sjl */ 90425cf1a30Sjl 90525cf1a30Sjl if (scf_service_function == NULL) { 90625cf1a30Sjl cmn_err(CE_NOTE, 90725cf1a30Sjl "!plat_nodename_set: scf_service_putinfo not found\n"); 90825cf1a30Sjl return; 90925cf1a30Sjl } 91025cf1a30Sjl 91125cf1a30Sjl datap = 91225cf1a30Sjl (struct utsname *)kmem_zalloc(sizeof (struct utsname), KM_SLEEP); 91325cf1a30Sjl 91425cf1a30Sjl if (datap == NULL) { 91525cf1a30Sjl return; 91625cf1a30Sjl } 91725cf1a30Sjl 91825cf1a30Sjl bcopy((struct utsname *)&utsname, 91925cf1a30Sjl (struct utsname *)datap, sizeof (struct utsname)); 92025cf1a30Sjl 92125cf1a30Sjl while ((SCF_PUTINFO(scf_service_function, 92225cf1a30Sjl sizeof (struct utsname), datap) == EBUSY) && (counter-- > 0)) { 92325cf1a30Sjl delay(10 * drv_usectohz(1000000)); 92425cf1a30Sjl } 92525cf1a30Sjl if (counter == 0) 926*e98fafb9Sjl cmn_err(CE_NOTE, "!plat_nodename_set: scf_service_putinfo not " 927*e98fafb9Sjl "responding\n"); 92825cf1a30Sjl 92925cf1a30Sjl kmem_free(datap, sizeof (struct utsname)); 93025cf1a30Sjl } 93125cf1a30Sjl 93225cf1a30Sjl caddr_t efcode_vaddr = NULL; 93325cf1a30Sjl 93425cf1a30Sjl /* 93525cf1a30Sjl * Preallocate enough memory for fcode claims. 93625cf1a30Sjl */ 93725cf1a30Sjl 93825cf1a30Sjl caddr_t 93925cf1a30Sjl efcode_alloc(caddr_t alloc_base) 94025cf1a30Sjl { 94125cf1a30Sjl caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base, 94225cf1a30Sjl MMU_PAGESIZE); 94325cf1a30Sjl caddr_t vaddr; 94425cf1a30Sjl 94525cf1a30Sjl /* 94625cf1a30Sjl * allocate the physical memory for the Oberon fcode. 94725cf1a30Sjl */ 94825cf1a30Sjl if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base, 94925cf1a30Sjl efcode_size, MMU_PAGESIZE)) == NULL) 95025cf1a30Sjl cmn_err(CE_PANIC, "Cannot allocate Efcode Memory"); 95125cf1a30Sjl 95225cf1a30Sjl efcode_vaddr = vaddr; 95325cf1a30Sjl 95425cf1a30Sjl return (efcode_alloc_base + efcode_size); 95525cf1a30Sjl } 95625cf1a30Sjl 95725cf1a30Sjl caddr_t 95825cf1a30Sjl plat_startup_memlist(caddr_t alloc_base) 95925cf1a30Sjl { 96025cf1a30Sjl caddr_t tmp_alloc_base; 96125cf1a30Sjl 96225cf1a30Sjl tmp_alloc_base = efcode_alloc(alloc_base); 96325cf1a30Sjl tmp_alloc_base = 96425cf1a30Sjl (caddr_t)roundup((uintptr_t)tmp_alloc_base, ecache_alignsize); 96525cf1a30Sjl return (tmp_alloc_base); 96625cf1a30Sjl } 96725cf1a30Sjl 96825cf1a30Sjl void 96925cf1a30Sjl startup_platform(void) 97025cf1a30Sjl { 97125cf1a30Sjl } 9720cc8ae86Sav 9731e2e7a75Shuah void 9741e2e7a75Shuah plat_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *info) 9751e2e7a75Shuah { 9761e2e7a75Shuah int impl; 9771e2e7a75Shuah 9781e2e7a75Shuah impl = cpunodes[cpuid].implementation; 979*e98fafb9Sjl if (IS_OLYMPUS_C(impl) || IS_JUPITER(impl)) { 98031f6f5eeSmv info->mmu_idx = MMU_ID(cpuid); 9811e2e7a75Shuah info->mmu_nctxs = 8192; 9821e2e7a75Shuah } else { 9831e2e7a75Shuah cmn_err(CE_PANIC, "Unknown processor %d", impl); 9841e2e7a75Shuah } 9851e2e7a75Shuah } 9861e2e7a75Shuah 9870cc8ae86Sav int 9880cc8ae86Sav plat_get_mem_sid(char *unum, char *buf, int buflen, int *lenp) 9890cc8ae86Sav { 9900cc8ae86Sav if (opl_get_mem_sid == NULL) { 9910cc8ae86Sav return (ENOTSUP); 9920cc8ae86Sav } 9930cc8ae86Sav return (opl_get_mem_sid(unum, buf, buflen, lenp)); 9940cc8ae86Sav } 9950cc8ae86Sav 9960cc8ae86Sav int 9970cc8ae86Sav plat_get_mem_offset(uint64_t paddr, uint64_t *offp) 9980cc8ae86Sav { 9990cc8ae86Sav if (opl_get_mem_offset == NULL) { 10000cc8ae86Sav return (ENOTSUP); 10010cc8ae86Sav } 10020cc8ae86Sav return (opl_get_mem_offset(paddr, offp)); 10030cc8ae86Sav } 10040cc8ae86Sav 10050cc8ae86Sav int 10060cc8ae86Sav plat_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp) 10070cc8ae86Sav { 10080cc8ae86Sav if (opl_get_mem_addr == NULL) { 10090cc8ae86Sav return (ENOTSUP); 10100cc8ae86Sav } 10110cc8ae86Sav return (opl_get_mem_addr(unum, sid, offset, addrp)); 10120cc8ae86Sav } 1013e603b7d4Spm 1014e603b7d4Spm void 1015e603b7d4Spm plat_lock_delay(int *backoff) 1016e603b7d4Spm { 1017e603b7d4Spm int i; 1018e603b7d4Spm int cnt; 1019e603b7d4Spm int flag; 1020e603b7d4Spm int ctr; 1021e603b7d4Spm hrtime_t delay_start; 1022e603b7d4Spm /* 1023e603b7d4Spm * Platform specific lock delay code for OPL 1024e603b7d4Spm * 1025e603b7d4Spm * Using staged linear increases in the delay. 1026e603b7d4Spm * The sleep instruction is the preferred method of delay, 1027e603b7d4Spm * but is too large of granularity for the initial backoff. 1028e603b7d4Spm */ 1029e603b7d4Spm 1030e603b7d4Spm if (*backoff == 0) *backoff = OPL_BOFF_BASE; 1031e603b7d4Spm 1032e603b7d4Spm flag = !*backoff; 1033e603b7d4Spm 1034e603b7d4Spm if (*backoff < OPL_BOFF_CAP1) { 1035e603b7d4Spm /* 1036e603b7d4Spm * If desired backoff is long enough, 1037e603b7d4Spm * use sleep for most of it 1038e603b7d4Spm */ 1039*e98fafb9Sjl for (cnt = *backoff; cnt >= OPL_BOFF_SLEEP; 1040*e98fafb9Sjl cnt -= OPL_BOFF_SLEEP) { 1041e603b7d4Spm cpu_smt_pause(); 1042e603b7d4Spm } 1043e603b7d4Spm /* 1044e603b7d4Spm * spin for small remainder of backoff 1045e603b7d4Spm * 1046e603b7d4Spm * fake call to nulldev included to prevent 1047e603b7d4Spm * compiler from optimizing out the spin loop 1048e603b7d4Spm */ 1049e603b7d4Spm for (ctr = cnt * OPL_BOFF_SPIN; ctr; ctr--) { 1050e603b7d4Spm if (flag) (void) nulldev(); 1051e603b7d4Spm } 1052e603b7d4Spm } else { 1053e603b7d4Spm /* backoff is very large. Fill it by sleeping */ 1054e603b7d4Spm delay_start = gethrtime(); 1055e603b7d4Spm cnt = *backoff/OPL_BOFF_SLEEP; 1056e603b7d4Spm /* 1057e603b7d4Spm * use sleep instructions for delay 1058e603b7d4Spm */ 1059e603b7d4Spm for (i = 0; i < cnt; i++) { 1060e603b7d4Spm cpu_smt_pause(); 1061e603b7d4Spm } 1062e603b7d4Spm 1063e603b7d4Spm /* 1064e603b7d4Spm * Note: if the other strand executes a sleep instruction, 1065e603b7d4Spm * then the sleep ends immediately with a minimum time of 1066e603b7d4Spm * 42 clocks. We check gethrtime to insure we have 1067e603b7d4Spm * waited long enough. And we include both a short 1068e603b7d4Spm * spin loop and a sleep for any final delay time. 1069e603b7d4Spm */ 1070e603b7d4Spm 1071e603b7d4Spm while ((gethrtime() - delay_start) < cnt * OPL_BOFF_TM) { 1072e603b7d4Spm cpu_smt_pause(); 1073e603b7d4Spm for (ctr = OPL_BOFF_SPIN; ctr; ctr--) { 1074e603b7d4Spm if (flag) (void) nulldev(); 1075e603b7d4Spm } 1076e603b7d4Spm } 1077e603b7d4Spm } 1078e603b7d4Spm 1079e603b7d4Spm /* 1080e603b7d4Spm * We adjust the backoff in three linear stages 1081e603b7d4Spm * The initial stage has small increases as this phase is 1082e603b7d4Spm * usually handle locks with light contention. We don't want 1083e603b7d4Spm * to have a long backoff on a lock that is available. 1084e603b7d4Spm * 1085e603b7d4Spm * In the second stage, we are in transition, unsure whether 1086e603b7d4Spm * the lock is under heavy contention. As the failures to 1087e603b7d4Spm * obtain the lock increase, we back off further. 1088e603b7d4Spm * 1089e603b7d4Spm * For the final stage, we are in a heavily contended or 1090e603b7d4Spm * long held long so we want to reduce the number of tries. 1091e603b7d4Spm */ 1092e603b7d4Spm if (*backoff < OPL_BOFF_CAP1) { 1093e603b7d4Spm *backoff += 1; 1094e603b7d4Spm } else { 1095e603b7d4Spm if (*backoff < OPL_BOFF_CAP2) { 1096e603b7d4Spm *backoff += OPL_BOFF_SLEEP; 1097e603b7d4Spm } else { 1098e603b7d4Spm *backoff += 2 * OPL_BOFF_SLEEP; 1099e603b7d4Spm } 1100e603b7d4Spm if (*backoff > OPL_BOFF_MAX) { 1101e603b7d4Spm *backoff = OPL_BOFF_MAX; 1102e603b7d4Spm } 1103e603b7d4Spm } 1104e603b7d4Spm } 1105