17bebe46cSjc /*
27bebe46cSjc * CDDL HEADER START
37bebe46cSjc *
47bebe46cSjc * The contents of this file are subject to the terms of the
57bebe46cSjc * Common Development and Distribution License (the "License").
67bebe46cSjc * You may not use this file except in compliance with the License.
77bebe46cSjc *
87bebe46cSjc * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97bebe46cSjc * or http://www.opensolaris.org/os/licensing.
107bebe46cSjc * See the License for the specific language governing permissions
117bebe46cSjc * and limitations under the License.
127bebe46cSjc *
137bebe46cSjc * When distributing Covered Code, include this CDDL HEADER in each
147bebe46cSjc * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157bebe46cSjc * If applicable, add the following below this CDDL HEADER, with the
167bebe46cSjc * fields enclosed by brackets "[]" replaced with your own identifying
177bebe46cSjc * information: Portions Copyright [yyyy] [name of copyright owner]
187bebe46cSjc *
197bebe46cSjc * CDDL HEADER END
207bebe46cSjc */
217bebe46cSjc /*
2234a79eb7SChristopher Baumbauer - Sun Microsystems - San Diego United States * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
237bebe46cSjc * Use is subject to license terms.
247bebe46cSjc */
257bebe46cSjc
267bebe46cSjc /*
277bebe46cSjc * Driver to retire/unretire L2/L3 cachelines on panther
287bebe46cSjc */
297bebe46cSjc #include <sys/types.h>
307bebe46cSjc #include <sys/types32.h>
317bebe46cSjc #include <sys/time.h>
327bebe46cSjc #include <sys/errno.h>
337bebe46cSjc #include <sys/cmn_err.h>
347bebe46cSjc #include <sys/param.h>
357bebe46cSjc #include <sys/modctl.h>
367bebe46cSjc #include <sys/conf.h>
377bebe46cSjc #include <sys/open.h>
387bebe46cSjc #include <sys/stat.h>
397bebe46cSjc #include <sys/ddi.h>
407bebe46cSjc #include <sys/sunddi.h>
417bebe46cSjc #include <sys/file.h>
427bebe46cSjc #include <sys/cpuvar.h>
437bebe46cSjc #include <sys/x_call.h>
447bebe46cSjc #include <sys/cheetahregs.h>
457bebe46cSjc #include <sys/mem_cache.h>
467bebe46cSjc #include <sys/mem_cache_ioctl.h>
477bebe46cSjc
487bebe46cSjc extern int retire_l2(uint64_t, uint64_t);
497bebe46cSjc extern int retire_l2_alternate(uint64_t, uint64_t);
507bebe46cSjc extern int unretire_l2(uint64_t, uint64_t);
517bebe46cSjc extern int unretire_l2_alternate(uint64_t, uint64_t);
527bebe46cSjc extern int retire_l3(uint64_t, uint64_t);
537bebe46cSjc extern int retire_l3_alternate(uint64_t, uint64_t);
547bebe46cSjc extern int unretire_l3(uint64_t, uint64_t);
557bebe46cSjc extern int unretire_l3_alternate(uint64_t, uint64_t);
567bebe46cSjc
577bebe46cSjc extern void retire_l2_start(uint64_t, uint64_t);
587bebe46cSjc extern void retire_l2_end(uint64_t, uint64_t);
597bebe46cSjc extern void unretire_l2_start(uint64_t, uint64_t);
607bebe46cSjc extern void unretire_l2_end(uint64_t, uint64_t);
617bebe46cSjc extern void retire_l3_start(uint64_t, uint64_t);
627bebe46cSjc extern void retire_l3_end(uint64_t, uint64_t);
637bebe46cSjc extern void unretire_l3_start(uint64_t, uint64_t);
647bebe46cSjc extern void unretire_l3_end(uint64_t, uint64_t);
657bebe46cSjc
667bebe46cSjc extern void get_ecache_dtags_tl1(uint64_t, ch_cpu_logout_t *);
67142c9f13Sbala extern void get_l2_tag_tl1(uint64_t, uint64_t);
68142c9f13Sbala extern void get_l3_tag_tl1(uint64_t, uint64_t);
69a62774dfSSinanallur Balasubramanian extern const int _ncpu;
707bebe46cSjc
717bebe46cSjc /* Macro for putting 64-bit onto stack as two 32-bit ints */
727bebe46cSjc #define PRTF_64_TO_32(x) (uint32_t)((x)>>32), (uint32_t)(x)
737bebe46cSjc
747bebe46cSjc
757bebe46cSjc uint_t l2_flush_retries_done = 0;
767bebe46cSjc int mem_cache_debug = 0x0;
777bebe46cSjc uint64_t pattern = 0;
787bebe46cSjc uint32_t retire_failures = 0;
79a62774dfSSinanallur Balasubramanian #ifdef DEBUG
80a62774dfSSinanallur Balasubramanian int inject_anonymous_tag_error = 0;
81a62774dfSSinanallur Balasubramanian int32_t last_error_injected_way = 0;
827bebe46cSjc uint8_t last_error_injected_bit = 0;
8315cf376dSSinanallur Balasubramanian int32_t last_l3tag_error_injected_way;
8415cf376dSSinanallur Balasubramanian uint8_t last_l3tag_error_injected_bit;
8515cf376dSSinanallur Balasubramanian int32_t last_l2tag_error_injected_way;
8615cf376dSSinanallur Balasubramanian uint8_t last_l2tag_error_injected_bit;
87a62774dfSSinanallur Balasubramanian #endif
887bebe46cSjc
897bebe46cSjc /* dev_ops and cb_ops entry point function declarations */
907bebe46cSjc static int mem_cache_attach(dev_info_t *, ddi_attach_cmd_t);
917bebe46cSjc static int mem_cache_detach(dev_info_t *, ddi_detach_cmd_t);
927bebe46cSjc static int mem_cache_getinfo(dev_info_t *, ddi_info_cmd_t, void *,
937bebe46cSjc void **);
947bebe46cSjc static int mem_cache_open(dev_t *, int, int, cred_t *);
957bebe46cSjc static int mem_cache_close(dev_t, int, int, cred_t *);
967bebe46cSjc static int mem_cache_ioctl_ops(int, int, cache_info_t *);
977bebe46cSjc static int mem_cache_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
987bebe46cSjc
997bebe46cSjc struct cb_ops mem_cache_cb_ops = {
1007bebe46cSjc mem_cache_open,
1017bebe46cSjc mem_cache_close,
1027bebe46cSjc nodev,
1037bebe46cSjc nodev,
1047bebe46cSjc nodev, /* dump */
1057bebe46cSjc nodev,
1067bebe46cSjc nodev,
1077bebe46cSjc mem_cache_ioctl,
1087bebe46cSjc nodev, /* devmap */
1097bebe46cSjc nodev,
1107bebe46cSjc ddi_segmap, /* segmap */
1117bebe46cSjc nochpoll,
1127bebe46cSjc ddi_prop_op,
1137bebe46cSjc NULL, /* for STREAMS drivers */
1147bebe46cSjc D_NEW | D_MP /* driver compatibility flag */
1157bebe46cSjc };
1167bebe46cSjc
1177bebe46cSjc static struct dev_ops mem_cache_dev_ops = {
1187bebe46cSjc DEVO_REV, /* driver build version */
1197bebe46cSjc 0, /* device reference count */
1207bebe46cSjc mem_cache_getinfo,
1217bebe46cSjc nulldev,
1227bebe46cSjc nulldev, /* probe */
1237bebe46cSjc mem_cache_attach,
1247bebe46cSjc mem_cache_detach,
1257bebe46cSjc nulldev, /* reset */
1267bebe46cSjc &mem_cache_cb_ops,
1277bebe46cSjc (struct bus_ops *)NULL,
128a62774dfSSinanallur Balasubramanian nulldev, /* power */
12919397407SSherry Moore ddi_quiesce_not_needed, /* quiesce */
1307bebe46cSjc };
1317bebe46cSjc
1327bebe46cSjc /*
1337bebe46cSjc * Soft state
1347bebe46cSjc */
1357bebe46cSjc struct mem_cache_softc {
1367bebe46cSjc dev_info_t *dip;
1377bebe46cSjc kmutex_t mutex;
1387bebe46cSjc };
1397bebe46cSjc #define getsoftc(inst) ((struct mem_cache_softc *)ddi_get_soft_state(statep,\
1407bebe46cSjc (inst)))
1417bebe46cSjc
1427bebe46cSjc /* module configuration stuff */
1437bebe46cSjc static void *statep;
1447bebe46cSjc extern struct mod_ops mod_driverops;
1457bebe46cSjc
1467bebe46cSjc static struct modldrv modldrv = {
1477bebe46cSjc &mod_driverops,
1487bebe46cSjc "mem_cache_driver (08/01/30) ",
1497bebe46cSjc &mem_cache_dev_ops
1507bebe46cSjc };
1517bebe46cSjc
1527bebe46cSjc static struct modlinkage modlinkage = {
1537bebe46cSjc MODREV_1,
1547bebe46cSjc &modldrv,
1557bebe46cSjc 0
1567bebe46cSjc };
1577bebe46cSjc
15834a79eb7SChristopher Baumbauer - Sun Microsystems - San Diego United States extern const int _ncpu; /* Pull the kernel's global _ncpu definition */
15934a79eb7SChristopher Baumbauer - Sun Microsystems - San Diego United States
1607bebe46cSjc int
_init(void)1617bebe46cSjc _init(void)
1627bebe46cSjc {
1637bebe46cSjc int e;
1647bebe46cSjc
1657bebe46cSjc if (e = ddi_soft_state_init(&statep, sizeof (struct mem_cache_softc),
1667bebe46cSjc MAX_MEM_CACHE_INSTANCES)) {
1677bebe46cSjc return (e);
1687bebe46cSjc }
1697bebe46cSjc
1707bebe46cSjc if ((e = mod_install(&modlinkage)) != 0)
1717bebe46cSjc ddi_soft_state_fini(&statep);
1727bebe46cSjc
1737bebe46cSjc return (e);
1747bebe46cSjc }
1757bebe46cSjc
1767bebe46cSjc int
_fini(void)1777bebe46cSjc _fini(void)
1787bebe46cSjc {
1797bebe46cSjc int e;
1807bebe46cSjc
1817bebe46cSjc if ((e = mod_remove(&modlinkage)) != 0)
1827bebe46cSjc return (e);
1837bebe46cSjc
1847bebe46cSjc ddi_soft_state_fini(&statep);
1857bebe46cSjc
1867bebe46cSjc return (DDI_SUCCESS);
1877bebe46cSjc }
1887bebe46cSjc
1897bebe46cSjc int
_info(struct modinfo * modinfop)1907bebe46cSjc _info(struct modinfo *modinfop)
1917bebe46cSjc {
1927bebe46cSjc return (mod_info(&modlinkage, modinfop));
1937bebe46cSjc }
1947bebe46cSjc
1957bebe46cSjc /*ARGSUSED*/
1967bebe46cSjc static int
mem_cache_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)1977bebe46cSjc mem_cache_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
1987bebe46cSjc {
1997bebe46cSjc int inst;
2007bebe46cSjc int retval = DDI_SUCCESS;
2017bebe46cSjc struct mem_cache_softc *softc;
2027bebe46cSjc
2037bebe46cSjc inst = getminor((dev_t)arg);
2047bebe46cSjc
2057bebe46cSjc switch (cmd) {
2067bebe46cSjc case DDI_INFO_DEVT2DEVINFO:
2077bebe46cSjc if ((softc = getsoftc(inst)) == NULL) {
2087bebe46cSjc *result = (void *)NULL;
2097bebe46cSjc retval = DDI_FAILURE;
2107bebe46cSjc } else
2117bebe46cSjc *result = (void *)softc->dip;
2127bebe46cSjc break;
2137bebe46cSjc
2147bebe46cSjc case DDI_INFO_DEVT2INSTANCE:
2157bebe46cSjc *result = (void *)((uintptr_t)inst);
2167bebe46cSjc break;
2177bebe46cSjc
2187bebe46cSjc default:
2197bebe46cSjc retval = DDI_FAILURE;
2207bebe46cSjc }
2217bebe46cSjc
2227bebe46cSjc return (retval);
2237bebe46cSjc }
2247bebe46cSjc
2257bebe46cSjc static int
mem_cache_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)2267bebe46cSjc mem_cache_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2277bebe46cSjc {
2287bebe46cSjc int inst;
2297bebe46cSjc struct mem_cache_softc *softc = NULL;
2307bebe46cSjc char name[80];
2317bebe46cSjc
2327bebe46cSjc switch (cmd) {
2337bebe46cSjc case DDI_ATTACH:
2347bebe46cSjc inst = ddi_get_instance(dip);
2357bebe46cSjc if (inst >= MAX_MEM_CACHE_INSTANCES) {
2367bebe46cSjc cmn_err(CE_WARN, "attach failed, too many instances\n");
2377bebe46cSjc return (DDI_FAILURE);
2387bebe46cSjc }
2397bebe46cSjc (void) sprintf(name, MEM_CACHE_DRIVER_NAME"%d", inst);
2407bebe46cSjc if (ddi_create_priv_minor_node(dip, name,
2417bebe46cSjc S_IFCHR,
2427bebe46cSjc inst,
2437bebe46cSjc DDI_PSEUDO,
2447bebe46cSjc 0, NULL, "all", 0640) ==
2457bebe46cSjc DDI_FAILURE) {
2467bebe46cSjc ddi_remove_minor_node(dip, NULL);
2477bebe46cSjc return (DDI_FAILURE);
2487bebe46cSjc }
2497bebe46cSjc
2507bebe46cSjc /* Allocate a soft state structure for this instance */
2517bebe46cSjc if (ddi_soft_state_zalloc(statep, inst) != DDI_SUCCESS) {
2527bebe46cSjc cmn_err(CE_WARN, " ddi_soft_state_zalloc() failed "
2537bebe46cSjc "for inst %d\n", inst);
2547bebe46cSjc goto attach_failed;
2557bebe46cSjc }
2567bebe46cSjc
2577bebe46cSjc /* Setup soft state */
2587bebe46cSjc softc = getsoftc(inst);
2597bebe46cSjc softc->dip = dip;
2607bebe46cSjc mutex_init(&softc->mutex, NULL, MUTEX_DRIVER, NULL);
2617bebe46cSjc
2627bebe46cSjc /* Create main environmental node */
2637bebe46cSjc ddi_report_dev(dip);
2647bebe46cSjc
2657bebe46cSjc return (DDI_SUCCESS);
2667bebe46cSjc
2677bebe46cSjc case DDI_RESUME:
2687bebe46cSjc return (DDI_SUCCESS);
2697bebe46cSjc
2707bebe46cSjc default:
2717bebe46cSjc return (DDI_FAILURE);
2727bebe46cSjc }
2737bebe46cSjc
2747bebe46cSjc attach_failed:
2757bebe46cSjc
2767bebe46cSjc /* Free soft state, if allocated. remove minor node if added earlier */
2777bebe46cSjc if (softc)
2787bebe46cSjc ddi_soft_state_free(statep, inst);
2797bebe46cSjc
2807bebe46cSjc ddi_remove_minor_node(dip, NULL);
2817bebe46cSjc
2827bebe46cSjc return (DDI_FAILURE);
2837bebe46cSjc }
2847bebe46cSjc
2857bebe46cSjc static int
mem_cache_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)2867bebe46cSjc mem_cache_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2877bebe46cSjc {
2887bebe46cSjc int inst;
2897bebe46cSjc struct mem_cache_softc *softc;
2907bebe46cSjc
2917bebe46cSjc switch (cmd) {
2927bebe46cSjc case DDI_DETACH:
2937bebe46cSjc inst = ddi_get_instance(dip);
2947bebe46cSjc if ((softc = getsoftc(inst)) == NULL)
2957bebe46cSjc return (ENXIO);
2967bebe46cSjc
2977bebe46cSjc /* Free the soft state and remove minor node added earlier */
2987bebe46cSjc mutex_destroy(&softc->mutex);
2997bebe46cSjc ddi_soft_state_free(statep, inst);
3007bebe46cSjc ddi_remove_minor_node(dip, NULL);
3017bebe46cSjc return (DDI_SUCCESS);
3027bebe46cSjc
3037bebe46cSjc case DDI_SUSPEND:
3047bebe46cSjc return (DDI_SUCCESS);
3057bebe46cSjc
3067bebe46cSjc default:
3077bebe46cSjc return (DDI_FAILURE);
3087bebe46cSjc }
3097bebe46cSjc }
3107bebe46cSjc
3117bebe46cSjc /*ARGSUSED*/
3127bebe46cSjc static int
mem_cache_open(dev_t * devp,int flag,int otyp,cred_t * credp)3137bebe46cSjc mem_cache_open(dev_t *devp, int flag, int otyp, cred_t *credp)
3147bebe46cSjc {
3157bebe46cSjc int inst = getminor(*devp);
3167bebe46cSjc
3177bebe46cSjc return (getsoftc(inst) == NULL ? ENXIO : 0);
3187bebe46cSjc }
3197bebe46cSjc
3207bebe46cSjc /*ARGSUSED*/
3217bebe46cSjc static int
mem_cache_close(dev_t dev,int flag,int otyp,cred_t * credp)3227bebe46cSjc mem_cache_close(dev_t dev, int flag, int otyp, cred_t *credp)
3237bebe46cSjc {
3247bebe46cSjc int inst = getminor(dev);
3257bebe46cSjc
3267bebe46cSjc return (getsoftc(inst) == NULL ? ENXIO : 0);
3277bebe46cSjc }
3287bebe46cSjc
3297bebe46cSjc static char *tstate_to_desc[] = {
3307bebe46cSjc "Invalid", /* 0 */
3317bebe46cSjc "Shared", /* 1 */
3327bebe46cSjc "Exclusive", /* 2 */
3337bebe46cSjc "Owner", /* 3 */
3347bebe46cSjc "Modified", /* 4 */
3357bebe46cSjc "NA", /* 5 */
3367bebe46cSjc "Owner/Shared", /* 6 */
3377bebe46cSjc "Reserved(7)", /* 7 */
3387bebe46cSjc };
3397bebe46cSjc
3407bebe46cSjc static char *
tag_state_to_desc(uint8_t tagstate)3417bebe46cSjc tag_state_to_desc(uint8_t tagstate)
3427bebe46cSjc {
3437bebe46cSjc return (tstate_to_desc[tagstate & CH_ECSTATE_MASK]);
3447bebe46cSjc }
3457bebe46cSjc
3467bebe46cSjc void
print_l2_tag(uint64_t tag_addr,uint64_t l2_tag)3477bebe46cSjc print_l2_tag(uint64_t tag_addr, uint64_t l2_tag)
3487bebe46cSjc {
3497bebe46cSjc uint64_t l2_subaddr;
3507bebe46cSjc uint8_t l2_state;
3517bebe46cSjc
3527bebe46cSjc l2_subaddr = PN_L2TAG_TO_PA(l2_tag);
3537bebe46cSjc l2_subaddr |= (tag_addr & PN_L2_INDEX_MASK);
3547bebe46cSjc
3557bebe46cSjc l2_state = (l2_tag & CH_ECSTATE_MASK);
3567bebe46cSjc cmn_err(CE_CONT,
3577bebe46cSjc "PA=0x%08x.%08x E$tag 0x%08x.%08x E$state %s\n",
3587bebe46cSjc PRTF_64_TO_32(l2_subaddr),
3597bebe46cSjc PRTF_64_TO_32(l2_tag),
3607bebe46cSjc tag_state_to_desc(l2_state));
3617bebe46cSjc }
3627bebe46cSjc
3637bebe46cSjc void
print_l2cache_line(ch_cpu_logout_t * clop)3647bebe46cSjc print_l2cache_line(ch_cpu_logout_t *clop)
3657bebe46cSjc {
3667bebe46cSjc uint64_t l2_subaddr;
3677bebe46cSjc int i, offset;
3687bebe46cSjc uint8_t way, l2_state;
3697bebe46cSjc ch_ec_data_t *ecp;
3707bebe46cSjc
3717bebe46cSjc
3727bebe46cSjc for (way = 0; way < PN_CACHE_NWAYS; way++) {
3737bebe46cSjc ecp = &clop->clo_data.chd_l2_data[way];
3747bebe46cSjc l2_subaddr = PN_L2TAG_TO_PA(ecp->ec_tag);
3757bebe46cSjc l2_subaddr |= (ecp->ec_idx & PN_L2_INDEX_MASK);
3767bebe46cSjc
3777bebe46cSjc l2_state = (ecp->ec_tag & CH_ECSTATE_MASK);
3787bebe46cSjc cmn_err(CE_CONT,
3797bebe46cSjc "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
3807bebe46cSjc "E$tag 0x%08x.%08x E$state %s",
3817bebe46cSjc way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(l2_subaddr),
3827bebe46cSjc PRTF_64_TO_32(ecp->ec_tag),
3837bebe46cSjc tag_state_to_desc(l2_state));
3847bebe46cSjc /*
3857bebe46cSjc * Dump out Ecache subblock data captured.
3867bebe46cSjc * For Cheetah, we need to compute the ECC for each 16-byte
3877bebe46cSjc * chunk and compare it with the captured chunk ECC to figure
3887bebe46cSjc * out which chunk is bad.
3897bebe46cSjc */
3907bebe46cSjc for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
3917bebe46cSjc ec_data_elm_t *ecdptr;
3927bebe46cSjc uint64_t d_low, d_high;
3937bebe46cSjc uint32_t ecc;
3947bebe46cSjc int l2_data_idx = (i/2);
3957bebe46cSjc
3967bebe46cSjc offset = i * 16;
3977bebe46cSjc ecdptr = &clop->clo_data.chd_l2_data[way].ec_data
3987bebe46cSjc [l2_data_idx];
3997bebe46cSjc if ((i & 1) == 0) {
4007bebe46cSjc ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
4017bebe46cSjc d_high = ecdptr->ec_d8[0];
4027bebe46cSjc d_low = ecdptr->ec_d8[1];
4037bebe46cSjc } else {
4047bebe46cSjc ecc = ecdptr->ec_eccd & 0x1ff;
4057bebe46cSjc d_high = ecdptr->ec_d8[2];
4067bebe46cSjc d_low = ecdptr->ec_d8[3];
4077bebe46cSjc }
4087bebe46cSjc
4097bebe46cSjc cmn_err(CE_CONT,
4107bebe46cSjc "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
4117bebe46cSjc " ECC 0x%03x",
4127bebe46cSjc offset, PRTF_64_TO_32(d_high),
4137bebe46cSjc PRTF_64_TO_32(d_low), ecc);
4147bebe46cSjc }
4157bebe46cSjc } /* end of for way loop */
4167bebe46cSjc }
4177bebe46cSjc
4187bebe46cSjc void
print_ecache_line(ch_cpu_logout_t * clop)4197bebe46cSjc print_ecache_line(ch_cpu_logout_t *clop)
4207bebe46cSjc {
4217bebe46cSjc uint64_t ec_subaddr;
4227bebe46cSjc int i, offset;
4237bebe46cSjc uint8_t way, ec_state;
4247bebe46cSjc ch_ec_data_t *ecp;
4257bebe46cSjc
4267bebe46cSjc
4277bebe46cSjc for (way = 0; way < PN_CACHE_NWAYS; way++) {
4287bebe46cSjc ecp = &clop->clo_data.chd_ec_data[way];
4297bebe46cSjc ec_subaddr = PN_L3TAG_TO_PA(ecp->ec_tag);
4307bebe46cSjc ec_subaddr |= (ecp->ec_idx & PN_L3_TAG_RD_MASK);
4317bebe46cSjc
4327bebe46cSjc ec_state = (ecp->ec_tag & CH_ECSTATE_MASK);
4337bebe46cSjc cmn_err(CE_CONT,
4347bebe46cSjc "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
4357bebe46cSjc "E$tag 0x%08x.%08x E$state %s",
4367bebe46cSjc way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(ec_subaddr),
4377bebe46cSjc PRTF_64_TO_32(ecp->ec_tag),
4387bebe46cSjc tag_state_to_desc(ec_state));
4397bebe46cSjc /*
4407bebe46cSjc * Dump out Ecache subblock data captured.
4417bebe46cSjc * For Cheetah, we need to compute the ECC for each 16-byte
4427bebe46cSjc * chunk and compare it with the captured chunk ECC to figure
4437bebe46cSjc * out which chunk is bad.
4447bebe46cSjc */
4457bebe46cSjc for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
4467bebe46cSjc ec_data_elm_t *ecdptr;
4477bebe46cSjc uint64_t d_low, d_high;
4487bebe46cSjc uint32_t ecc;
4497bebe46cSjc int ec_data_idx = (i/2);
4507bebe46cSjc
4517bebe46cSjc offset = i * 16;
4527bebe46cSjc ecdptr =
4537bebe46cSjc &clop->clo_data.chd_ec_data[way].ec_data
4547bebe46cSjc [ec_data_idx];
4557bebe46cSjc if ((i & 1) == 0) {
4567bebe46cSjc ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
4577bebe46cSjc d_high = ecdptr->ec_d8[0];
4587bebe46cSjc d_low = ecdptr->ec_d8[1];
4597bebe46cSjc } else {
4607bebe46cSjc ecc = ecdptr->ec_eccd & 0x1ff;
4617bebe46cSjc d_high = ecdptr->ec_d8[2];
4627bebe46cSjc d_low = ecdptr->ec_d8[3];
4637bebe46cSjc }
4647bebe46cSjc
4657bebe46cSjc cmn_err(CE_CONT,
4667bebe46cSjc "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
4677bebe46cSjc " ECC 0x%03x",
4687bebe46cSjc offset, PRTF_64_TO_32(d_high),
4697bebe46cSjc PRTF_64_TO_32(d_low), ecc);
4707bebe46cSjc }
4717bebe46cSjc }
4727bebe46cSjc }
4737bebe46cSjc
4747bebe46cSjc static boolean_t
tag_addr_collides(uint64_t tag_addr,cache_id_t type,retire_func_t start_of_func,retire_func_t end_of_func)4757bebe46cSjc tag_addr_collides(uint64_t tag_addr, cache_id_t type,
4767bebe46cSjc retire_func_t start_of_func, retire_func_t end_of_func)
4777bebe46cSjc {
4787bebe46cSjc uint64_t start_paddr, end_paddr;
4797bebe46cSjc char *type_str;
4807bebe46cSjc
4817bebe46cSjc start_paddr = va_to_pa((void *)start_of_func);
4827bebe46cSjc end_paddr = va_to_pa((void *)end_of_func);
4837bebe46cSjc switch (type) {
4847bebe46cSjc case L2_CACHE_TAG:
4857bebe46cSjc case L2_CACHE_DATA:
4867bebe46cSjc tag_addr &= PN_L2_INDEX_MASK;
4877bebe46cSjc start_paddr &= PN_L2_INDEX_MASK;
4887bebe46cSjc end_paddr &= PN_L2_INDEX_MASK;
4897bebe46cSjc type_str = "L2:";
4907bebe46cSjc break;
4917bebe46cSjc case L3_CACHE_TAG:
4927bebe46cSjc case L3_CACHE_DATA:
4937bebe46cSjc tag_addr &= PN_L3_TAG_RD_MASK;
4947bebe46cSjc start_paddr &= PN_L3_TAG_RD_MASK;
4957bebe46cSjc end_paddr &= PN_L3_TAG_RD_MASK;
4967bebe46cSjc type_str = "L3:";
4977bebe46cSjc break;
4987bebe46cSjc default:
4997bebe46cSjc /*
5007bebe46cSjc * Should never reach here.
5017bebe46cSjc */
5027bebe46cSjc ASSERT(0);
5037bebe46cSjc return (B_FALSE);
5047bebe46cSjc }
5057bebe46cSjc if ((tag_addr > (start_paddr - 0x100)) &&
5067bebe46cSjc (tag_addr < (end_paddr + 0x100))) {
5077bebe46cSjc if (mem_cache_debug & 0x1)
5087bebe46cSjc cmn_err(CE_CONT,
5097bebe46cSjc "%s collision detected tag_addr = 0x%08x"
5107bebe46cSjc " start_paddr = 0x%08x end_paddr = 0x%08x\n",
5117bebe46cSjc type_str, (uint32_t)tag_addr, (uint32_t)start_paddr,
5127bebe46cSjc (uint32_t)end_paddr);
5137bebe46cSjc return (B_TRUE);
5147bebe46cSjc }
5157bebe46cSjc else
5167bebe46cSjc return (B_FALSE);
5177bebe46cSjc }
5187bebe46cSjc
5197bebe46cSjc static uint64_t
get_tag_addr(cache_info_t * cache_info)5207bebe46cSjc get_tag_addr(cache_info_t *cache_info)
5217bebe46cSjc {
5227bebe46cSjc uint64_t tag_addr, scratch;
5237bebe46cSjc
5247bebe46cSjc switch (cache_info->cache) {
5257bebe46cSjc case L2_CACHE_TAG:
5267bebe46cSjc case L2_CACHE_DATA:
5277bebe46cSjc tag_addr = (uint64_t)(cache_info->index <<
5287bebe46cSjc PN_CACHE_LINE_SHIFT);
5297bebe46cSjc scratch = (uint64_t)(cache_info->way <<
5307bebe46cSjc PN_L2_WAY_SHIFT);
5317bebe46cSjc tag_addr |= scratch;
5327bebe46cSjc tag_addr |= PN_L2_IDX_HW_ECC_EN;
5337bebe46cSjc break;
5347bebe46cSjc case L3_CACHE_TAG:
5357bebe46cSjc case L3_CACHE_DATA:
5367bebe46cSjc tag_addr = (uint64_t)(cache_info->index <<
5377bebe46cSjc PN_CACHE_LINE_SHIFT);
5387bebe46cSjc scratch = (uint64_t)(cache_info->way <<
5397bebe46cSjc PN_L3_WAY_SHIFT);
5407bebe46cSjc tag_addr |= scratch;
5417bebe46cSjc tag_addr |= PN_L3_IDX_HW_ECC_EN;
5427bebe46cSjc break;
5437bebe46cSjc default:
5447bebe46cSjc /*
5457bebe46cSjc * Should never reach here.
5467bebe46cSjc */
5477bebe46cSjc ASSERT(0);
5487bebe46cSjc return (uint64_t)(0);
5497bebe46cSjc }
5507bebe46cSjc return (tag_addr);
5517bebe46cSjc }
5527bebe46cSjc
5537bebe46cSjc static int
mem_cache_ioctl_ops(int cmd,int mode,cache_info_t * cache_info)5547bebe46cSjc mem_cache_ioctl_ops(int cmd, int mode, cache_info_t *cache_info)
5557bebe46cSjc {
5567bebe46cSjc int ret_val = 0;
5577bebe46cSjc uint64_t afar, tag_addr;
5587bebe46cSjc ch_cpu_logout_t clop;
5597bebe46cSjc uint64_t Lxcache_tag_data[PN_CACHE_NWAYS];
5607bebe46cSjc int i, retire_retry_count;
5617bebe46cSjc cpu_t *cpu;
5627bebe46cSjc uint64_t tag_data;
5637bebe46cSjc uint8_t state;
5647bebe46cSjc
565a62774dfSSinanallur Balasubramanian if (cache_info->way >= PN_CACHE_NWAYS)
566a62774dfSSinanallur Balasubramanian return (EINVAL);
5677bebe46cSjc switch (cache_info->cache) {
5687bebe46cSjc case L2_CACHE_TAG:
5697bebe46cSjc case L2_CACHE_DATA:
5707bebe46cSjc if (cache_info->index >=
5717bebe46cSjc (PN_L2_SET_SIZE/PN_L2_LINESIZE))
5727bebe46cSjc return (EINVAL);
5737bebe46cSjc break;
5747bebe46cSjc case L3_CACHE_TAG:
5757bebe46cSjc case L3_CACHE_DATA:
5767bebe46cSjc if (cache_info->index >=
5777bebe46cSjc (PN_L3_SET_SIZE/PN_L3_LINESIZE))
5787bebe46cSjc return (EINVAL);
5797bebe46cSjc break;
5807bebe46cSjc default:
5817bebe46cSjc return (ENOTSUP);
5827bebe46cSjc }
5837bebe46cSjc /*
5847bebe46cSjc * Check if we have a valid cpu ID and that
5857bebe46cSjc * CPU is ONLINE.
5867bebe46cSjc */
5877bebe46cSjc mutex_enter(&cpu_lock);
5887bebe46cSjc cpu = cpu_get(cache_info->cpu_id);
5897bebe46cSjc if ((cpu == NULL) || (!cpu_is_online(cpu))) {
5907bebe46cSjc mutex_exit(&cpu_lock);
5917bebe46cSjc return (EINVAL);
5927bebe46cSjc }
5937bebe46cSjc mutex_exit(&cpu_lock);
594a62774dfSSinanallur Balasubramanian pattern = 0; /* default value of TAG PA when cacheline is retired. */
5957bebe46cSjc switch (cmd) {
5967bebe46cSjc case MEM_CACHE_RETIRE:
5977bebe46cSjc tag_addr = get_tag_addr(cache_info);
5987bebe46cSjc pattern |= PN_ECSTATE_NA;
5997bebe46cSjc retire_retry_count = 0;
6007bebe46cSjc affinity_set(cache_info->cpu_id);
6017bebe46cSjc switch (cache_info->cache) {
6027bebe46cSjc case L2_CACHE_DATA:
6037bebe46cSjc case L2_CACHE_TAG:
604a62774dfSSinanallur Balasubramanian if ((cache_info->bit & MSB_BIT_MASK) ==
605a62774dfSSinanallur Balasubramanian MSB_BIT_MASK)
606a62774dfSSinanallur Balasubramanian pattern |= PN_L2TAG_PA_MASK;
6077bebe46cSjc retry_l2_retire:
6087bebe46cSjc if (tag_addr_collides(tag_addr,
6097bebe46cSjc cache_info->cache,
6107bebe46cSjc retire_l2_start, retire_l2_end))
6117bebe46cSjc ret_val =
6127bebe46cSjc retire_l2_alternate(
6137bebe46cSjc tag_addr, pattern);
6147bebe46cSjc else
6157bebe46cSjc ret_val = retire_l2(tag_addr,
6167bebe46cSjc pattern);
6177bebe46cSjc if (ret_val == 1) {
6187bebe46cSjc /*
6197bebe46cSjc * cacheline was in retired
6207bebe46cSjc * STATE already.
6217bebe46cSjc * so return success.
6227bebe46cSjc */
6237bebe46cSjc ret_val = 0;
6247bebe46cSjc }
6257bebe46cSjc if (ret_val < 0) {
6267bebe46cSjc cmn_err(CE_WARN,
6277bebe46cSjc "retire_l2() failed. index = 0x%x way %d. Retrying...\n",
6287bebe46cSjc cache_info->index,
6297bebe46cSjc cache_info->way);
6307bebe46cSjc if (retire_retry_count >= 2) {
6317bebe46cSjc retire_failures++;
6327bebe46cSjc affinity_clear();
6337bebe46cSjc return (EIO);
6347bebe46cSjc }
6357bebe46cSjc retire_retry_count++;
6367bebe46cSjc goto retry_l2_retire;
6377bebe46cSjc }
6387bebe46cSjc if (ret_val == 2)
6397bebe46cSjc l2_flush_retries_done++;
640142c9f13Sbala /*
641142c9f13Sbala * We bind ourself to a CPU and send cross trap to
642142c9f13Sbala * ourself. On return from xt_one we can rely on the
643142c9f13Sbala * data in tag_data being filled in. Normally one would
644142c9f13Sbala * do a xt_sync to make sure that the CPU has completed
645142c9f13Sbala * the cross trap call xt_one.
646142c9f13Sbala */
6477bebe46cSjc xt_one(cache_info->cpu_id,
6487bebe46cSjc (xcfunc_t *)(get_l2_tag_tl1),
6497bebe46cSjc tag_addr, (uint64_t)(&tag_data));
6507bebe46cSjc state = tag_data & CH_ECSTATE_MASK;
6517bebe46cSjc if (state != PN_ECSTATE_NA) {
6527bebe46cSjc retire_failures++;
6537bebe46cSjc print_l2_tag(tag_addr,
6547bebe46cSjc tag_data);
6557bebe46cSjc cmn_err(CE_WARN,
6567bebe46cSjc "L2 RETIRE:failed for index 0x%x way %d. Retrying...\n",
6577bebe46cSjc cache_info->index,
6587bebe46cSjc cache_info->way);
6597bebe46cSjc if (retire_retry_count >= 2) {
6607bebe46cSjc retire_failures++;
6617bebe46cSjc affinity_clear();
6627bebe46cSjc return (EIO);
6637bebe46cSjc }
6647bebe46cSjc retire_retry_count++;
6657bebe46cSjc goto retry_l2_retire;
6667bebe46cSjc }
6677bebe46cSjc break;
6687bebe46cSjc case L3_CACHE_TAG:
6697bebe46cSjc case L3_CACHE_DATA:
670a62774dfSSinanallur Balasubramanian if ((cache_info->bit & MSB_BIT_MASK) ==
671a62774dfSSinanallur Balasubramanian MSB_BIT_MASK)
672a62774dfSSinanallur Balasubramanian pattern |= PN_L3TAG_PA_MASK;
6737bebe46cSjc if (tag_addr_collides(tag_addr,
6747bebe46cSjc cache_info->cache,
6757bebe46cSjc retire_l3_start, retire_l3_end))
6767bebe46cSjc ret_val =
6777bebe46cSjc retire_l3_alternate(
6787bebe46cSjc tag_addr, pattern);
6797bebe46cSjc else
6807bebe46cSjc ret_val = retire_l3(tag_addr,
6817bebe46cSjc pattern);
6827bebe46cSjc if (ret_val == 1) {
6837bebe46cSjc /*
6847bebe46cSjc * cacheline was in retired
6857bebe46cSjc * STATE already.
6867bebe46cSjc * so return success.
6877bebe46cSjc */
6887bebe46cSjc ret_val = 0;
6897bebe46cSjc }
6907bebe46cSjc if (ret_val < 0) {
6917bebe46cSjc cmn_err(CE_WARN,
6927bebe46cSjc "retire_l3() failed. ret_val = %d index = 0x%x\n",
6937bebe46cSjc ret_val,
6947bebe46cSjc cache_info->index);
6957bebe46cSjc retire_failures++;
6967bebe46cSjc affinity_clear();
6977bebe46cSjc return (EIO);
6987bebe46cSjc }
699142c9f13Sbala /*
700142c9f13Sbala * We bind ourself to a CPU and send cross trap to
701142c9f13Sbala * ourself. On return from xt_one we can rely on the
702142c9f13Sbala * data in tag_data being filled in. Normally one would
703142c9f13Sbala * do a xt_sync to make sure that the CPU has completed
704142c9f13Sbala * the cross trap call xt_one.
705142c9f13Sbala */
7067bebe46cSjc xt_one(cache_info->cpu_id,
7077bebe46cSjc (xcfunc_t *)(get_l3_tag_tl1),
7087bebe46cSjc tag_addr, (uint64_t)(&tag_data));
7097bebe46cSjc state = tag_data & CH_ECSTATE_MASK;
7107bebe46cSjc if (state != PN_ECSTATE_NA) {
7117bebe46cSjc cmn_err(CE_WARN,
7127bebe46cSjc "L3 RETIRE failed for index 0x%x\n",
7137bebe46cSjc cache_info->index);
7147bebe46cSjc retire_failures++;
7157bebe46cSjc affinity_clear();
7167bebe46cSjc return (EIO);
7177bebe46cSjc }
7187bebe46cSjc
7197bebe46cSjc break;
7207bebe46cSjc }
7217bebe46cSjc affinity_clear();
7227bebe46cSjc break;
7237bebe46cSjc case MEM_CACHE_UNRETIRE:
7247bebe46cSjc tag_addr = get_tag_addr(cache_info);
7257bebe46cSjc pattern = PN_ECSTATE_INV;
7267bebe46cSjc affinity_set(cache_info->cpu_id);
7277bebe46cSjc switch (cache_info->cache) {
7287bebe46cSjc case L2_CACHE_DATA:
7297bebe46cSjc case L2_CACHE_TAG:
730142c9f13Sbala /*
731142c9f13Sbala * We bind ourself to a CPU and send cross trap to
732142c9f13Sbala * ourself. On return from xt_one we can rely on the
733142c9f13Sbala * data in tag_data being filled in. Normally one would
734142c9f13Sbala * do a xt_sync to make sure that the CPU has completed
735142c9f13Sbala * the cross trap call xt_one.
736142c9f13Sbala */
7377bebe46cSjc xt_one(cache_info->cpu_id,
7387bebe46cSjc (xcfunc_t *)(get_l2_tag_tl1),
7397bebe46cSjc tag_addr, (uint64_t)(&tag_data));
7407bebe46cSjc state = tag_data & CH_ECSTATE_MASK;
7417bebe46cSjc if (state != PN_ECSTATE_NA) {
7427bebe46cSjc affinity_clear();
7437bebe46cSjc return (EINVAL);
7447bebe46cSjc }
7457bebe46cSjc if (tag_addr_collides(tag_addr,
7467bebe46cSjc cache_info->cache,
7477bebe46cSjc unretire_l2_start, unretire_l2_end))
7487bebe46cSjc ret_val =
7497bebe46cSjc unretire_l2_alternate(
7507bebe46cSjc tag_addr, pattern);
7517bebe46cSjc else
7527bebe46cSjc ret_val =
7537bebe46cSjc unretire_l2(tag_addr,
7547bebe46cSjc pattern);
7557bebe46cSjc if (ret_val != 0) {
7567bebe46cSjc cmn_err(CE_WARN,
7577bebe46cSjc "unretire_l2() failed. ret_val = %d index = 0x%x\n",
7587bebe46cSjc ret_val,
7597bebe46cSjc cache_info->index);
7607bebe46cSjc retire_failures++;
7617bebe46cSjc affinity_clear();
7627bebe46cSjc return (EIO);
7637bebe46cSjc }
7647bebe46cSjc break;
7657bebe46cSjc case L3_CACHE_TAG:
7667bebe46cSjc case L3_CACHE_DATA:
767142c9f13Sbala /*
768142c9f13Sbala * We bind ourself to a CPU and send cross trap to
769142c9f13Sbala * ourself. On return from xt_one we can rely on the
770142c9f13Sbala * data in tag_data being filled in. Normally one would
771142c9f13Sbala * do a xt_sync to make sure that the CPU has completed
772142c9f13Sbala * the cross trap call xt_one.
773142c9f13Sbala */
7747bebe46cSjc xt_one(cache_info->cpu_id,
7757bebe46cSjc (xcfunc_t *)(get_l3_tag_tl1),
7767bebe46cSjc tag_addr, (uint64_t)(&tag_data));
7777bebe46cSjc state = tag_data & CH_ECSTATE_MASK;
7787bebe46cSjc if (state != PN_ECSTATE_NA) {
7797bebe46cSjc affinity_clear();
7807bebe46cSjc return (EINVAL);
7817bebe46cSjc }
7827bebe46cSjc if (tag_addr_collides(tag_addr,
7837bebe46cSjc cache_info->cache,
7847bebe46cSjc unretire_l3_start, unretire_l3_end))
7857bebe46cSjc ret_val =
7867bebe46cSjc unretire_l3_alternate(
7877bebe46cSjc tag_addr, pattern);
7887bebe46cSjc else
7897bebe46cSjc ret_val =
7907bebe46cSjc unretire_l3(tag_addr,
7917bebe46cSjc pattern);
7927bebe46cSjc if (ret_val != 0) {
7937bebe46cSjc cmn_err(CE_WARN,
7947bebe46cSjc "unretire_l3() failed. ret_val = %d index = 0x%x\n",
7957bebe46cSjc ret_val,
7967bebe46cSjc cache_info->index);
7977bebe46cSjc affinity_clear();
7987bebe46cSjc return (EIO);
7997bebe46cSjc }
8007bebe46cSjc break;
8017bebe46cSjc }
8027bebe46cSjc affinity_clear();
8037bebe46cSjc break;
8047bebe46cSjc case MEM_CACHE_ISRETIRED:
8057bebe46cSjc case MEM_CACHE_STATE:
8067bebe46cSjc return (ENOTSUP);
8077bebe46cSjc case MEM_CACHE_READ_TAGS:
808142c9f13Sbala #ifdef DEBUG
8097bebe46cSjc case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
810142c9f13Sbala #endif
8117bebe46cSjc /*
8127bebe46cSjc * Read tag and data for all the ways at a given afar
8137bebe46cSjc */
8147bebe46cSjc afar = (uint64_t)(cache_info->index
8157bebe46cSjc << PN_CACHE_LINE_SHIFT);
816a62774dfSSinanallur Balasubramanian mutex_enter(&cpu_lock);
8177bebe46cSjc affinity_set(cache_info->cpu_id);
818*0ed5c46eSJosef 'Jeff' Sipek pause_cpus(NULL, NULL);
819a62774dfSSinanallur Balasubramanian mutex_exit(&cpu_lock);
820142c9f13Sbala /*
821142c9f13Sbala * We bind ourself to a CPU and send cross trap to
822142c9f13Sbala * ourself. On return from xt_one we can rely on the
823142c9f13Sbala * data in clop being filled in. Normally one would
824142c9f13Sbala * do a xt_sync to make sure that the CPU has completed
825142c9f13Sbala * the cross trap call xt_one.
826142c9f13Sbala */
8277bebe46cSjc xt_one(cache_info->cpu_id,
8287bebe46cSjc (xcfunc_t *)(get_ecache_dtags_tl1),
8297bebe46cSjc afar, (uint64_t)(&clop));
830a62774dfSSinanallur Balasubramanian mutex_enter(&cpu_lock);
831a62774dfSSinanallur Balasubramanian (void) start_cpus();
832a62774dfSSinanallur Balasubramanian mutex_exit(&cpu_lock);
833a62774dfSSinanallur Balasubramanian affinity_clear();
8347bebe46cSjc switch (cache_info->cache) {
8357bebe46cSjc case L2_CACHE_TAG:
8367bebe46cSjc for (i = 0; i < PN_CACHE_NWAYS; i++) {
8377bebe46cSjc Lxcache_tag_data[i] =
8387bebe46cSjc clop.clo_data.chd_l2_data
8397bebe46cSjc [i].ec_tag;
8407bebe46cSjc }
841a62774dfSSinanallur Balasubramanian #ifdef DEBUG
8427bebe46cSjc last_error_injected_bit =
8437bebe46cSjc last_l2tag_error_injected_bit;
8447bebe46cSjc last_error_injected_way =
8457bebe46cSjc last_l2tag_error_injected_way;
846a62774dfSSinanallur Balasubramanian #endif
8477bebe46cSjc break;
8487bebe46cSjc case L3_CACHE_TAG:
8497bebe46cSjc for (i = 0; i < PN_CACHE_NWAYS; i++) {
8507bebe46cSjc Lxcache_tag_data[i] =
8517bebe46cSjc clop.clo_data.chd_ec_data
8527bebe46cSjc [i].ec_tag;
8537bebe46cSjc }
854a62774dfSSinanallur Balasubramanian #ifdef DEBUG
8557bebe46cSjc last_error_injected_bit =
8567bebe46cSjc last_l3tag_error_injected_bit;
8577bebe46cSjc last_error_injected_way =
8587bebe46cSjc last_l3tag_error_injected_way;
859a62774dfSSinanallur Balasubramanian #endif
8607bebe46cSjc break;
8617bebe46cSjc default:
8627bebe46cSjc return (ENOTSUP);
8637bebe46cSjc } /* end if switch(cache) */
864142c9f13Sbala #ifdef DEBUG
865a62774dfSSinanallur Balasubramanian if ((cmd == MEM_CACHE_READ_ERROR_INJECTED_TAGS) &&
866a62774dfSSinanallur Balasubramanian (inject_anonymous_tag_error == 0) &&
867a62774dfSSinanallur Balasubramanian (last_error_injected_way >= 0) &&
868a62774dfSSinanallur Balasubramanian (last_error_injected_way <= 3)) {
8697bebe46cSjc pattern = ((uint64_t)1 <<
8707bebe46cSjc last_error_injected_bit);
8717bebe46cSjc /*
8727bebe46cSjc * If error bit is ECC we need to make sure
8737bebe46cSjc * ECC on all all WAYS are corrupted.
8747bebe46cSjc */
8757bebe46cSjc if ((last_error_injected_bit >= 6) &&
8767bebe46cSjc (last_error_injected_bit <= 14)) {
8777bebe46cSjc for (i = 0; i < PN_CACHE_NWAYS; i++)
8787bebe46cSjc Lxcache_tag_data[i] ^=
8797bebe46cSjc pattern;
8807bebe46cSjc } else
8817bebe46cSjc Lxcache_tag_data
8827bebe46cSjc [last_error_injected_way] ^=
8837bebe46cSjc pattern;
8847bebe46cSjc }
885142c9f13Sbala #endif
8867bebe46cSjc if (ddi_copyout((caddr_t)Lxcache_tag_data,
8877bebe46cSjc (caddr_t)cache_info->datap,
8887bebe46cSjc sizeof (Lxcache_tag_data), mode)
8897bebe46cSjc != DDI_SUCCESS) {
8907bebe46cSjc return (EFAULT);
8917bebe46cSjc }
8927bebe46cSjc break; /* end of READ_TAGS */
8937bebe46cSjc default:
8947bebe46cSjc return (ENOTSUP);
8957bebe46cSjc } /* end if switch(cmd) */
8967bebe46cSjc return (ret_val);
8977bebe46cSjc }
8987bebe46cSjc
8997bebe46cSjc /*ARGSUSED*/
9007bebe46cSjc static int
mem_cache_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)9017bebe46cSjc mem_cache_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
9027bebe46cSjc int *rvalp)
9037bebe46cSjc {
9047bebe46cSjc int inst;
9057bebe46cSjc struct mem_cache_softc *softc;
9067bebe46cSjc cache_info_t cache_info;
9077bebe46cSjc cache_info32_t cache_info32;
9087bebe46cSjc int ret_val;
909142c9f13Sbala int is_panther;
9107bebe46cSjc
9117bebe46cSjc inst = getminor(dev);
9127bebe46cSjc if ((softc = getsoftc(inst)) == NULL)
9137bebe46cSjc return (ENXIO);
9147bebe46cSjc
9157bebe46cSjc mutex_enter(&softc->mutex);
9167bebe46cSjc
9177bebe46cSjc #ifdef _MULTI_DATAMODEL
9187bebe46cSjc if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
9197bebe46cSjc if (ddi_copyin((cache_info32_t *)arg, &cache_info32,
9207bebe46cSjc sizeof (cache_info32), mode) != DDI_SUCCESS) {
9217bebe46cSjc mutex_exit(&softc->mutex);
9227bebe46cSjc return (EFAULT);
9237bebe46cSjc }
9247bebe46cSjc cache_info.cache = cache_info32.cache;
9257bebe46cSjc cache_info.index = cache_info32.index;
9267bebe46cSjc cache_info.way = cache_info32.way;
9277bebe46cSjc cache_info.cpu_id = cache_info32.cpu_id;
9287bebe46cSjc cache_info.bit = cache_info32.bit;
9297bebe46cSjc cache_info.datap = (void *)((uint64_t)cache_info32.datap);
9307bebe46cSjc } else
9317bebe46cSjc #endif
9327bebe46cSjc if (ddi_copyin((cache_info_t *)arg, &cache_info,
9337bebe46cSjc sizeof (cache_info), mode) != DDI_SUCCESS) {
9347bebe46cSjc mutex_exit(&softc->mutex);
9357bebe46cSjc return (EFAULT);
9367bebe46cSjc }
9371dbf84bbScb
93834a79eb7SChristopher Baumbauer - Sun Microsystems - San Diego United States if ((cache_info.cpu_id < 0) || (cache_info.cpu_id >= _ncpu)) {
9391dbf84bbScb mutex_exit(&softc->mutex);
9401dbf84bbScb return (EINVAL);
9411dbf84bbScb }
942142c9f13Sbala is_panther = IS_PANTHER(cpunodes[cache_info.cpu_id].implementation);
943142c9f13Sbala if (!is_panther) {
944142c9f13Sbala mutex_exit(&softc->mutex);
945142c9f13Sbala return (ENOTSUP);
946142c9f13Sbala }
9477bebe46cSjc switch (cmd) {
9487bebe46cSjc case MEM_CACHE_RETIRE:
9497bebe46cSjc case MEM_CACHE_UNRETIRE:
9507bebe46cSjc if ((mode & FWRITE) == 0) {
9517bebe46cSjc ret_val = EBADF;
9527bebe46cSjc break;
9537bebe46cSjc }
9547bebe46cSjc /*FALLTHROUGH*/
9557bebe46cSjc case MEM_CACHE_ISRETIRED:
9567bebe46cSjc case MEM_CACHE_STATE:
9577bebe46cSjc case MEM_CACHE_READ_TAGS:
958142c9f13Sbala #ifdef DEBUG
9597bebe46cSjc case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
960142c9f13Sbala #endif
9617bebe46cSjc ret_val = mem_cache_ioctl_ops(cmd, mode, &cache_info);
9627bebe46cSjc break;
9637bebe46cSjc default:
9647bebe46cSjc ret_val = ENOTSUP;
9657bebe46cSjc break;
9667bebe46cSjc }
9677bebe46cSjc mutex_exit(&softc->mutex);
9687bebe46cSjc return (ret_val);
9697bebe46cSjc }
970