xref: /illumos-gate/usr/src/uts/sun4u/io/mem_cache.c (revision 1dbf84bb)
17bebe46cSjc /*
27bebe46cSjc  * CDDL HEADER START
37bebe46cSjc  *
47bebe46cSjc  * The contents of this file are subject to the terms of the
57bebe46cSjc  * Common Development and Distribution License (the "License").
67bebe46cSjc  * You may not use this file except in compliance with the License.
77bebe46cSjc  *
87bebe46cSjc  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97bebe46cSjc  * or http://www.opensolaris.org/os/licensing.
107bebe46cSjc  * See the License for the specific language governing permissions
117bebe46cSjc  * and limitations under the License.
127bebe46cSjc  *
137bebe46cSjc  * When distributing Covered Code, include this CDDL HEADER in each
147bebe46cSjc  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157bebe46cSjc  * If applicable, add the following below this CDDL HEADER, with the
167bebe46cSjc  * fields enclosed by brackets "[]" replaced with your own identifying
177bebe46cSjc  * information: Portions Copyright [yyyy] [name of copyright owner]
187bebe46cSjc  *
197bebe46cSjc  * CDDL HEADER END
207bebe46cSjc  */
217bebe46cSjc /*
227bebe46cSjc  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237bebe46cSjc  * Use is subject to license terms.
247bebe46cSjc  */
257bebe46cSjc 
267bebe46cSjc #pragma ident	"%Z%%M%	%I%	%E% SMI"
277bebe46cSjc 
287bebe46cSjc /*
297bebe46cSjc  * Driver to retire/unretire L2/L3 cachelines on panther
307bebe46cSjc  */
317bebe46cSjc #include <sys/types.h>
327bebe46cSjc #include <sys/types32.h>
337bebe46cSjc #include <sys/time.h>
347bebe46cSjc #include <sys/errno.h>
357bebe46cSjc #include <sys/cmn_err.h>
367bebe46cSjc #include <sys/param.h>
377bebe46cSjc #include <sys/modctl.h>
387bebe46cSjc #include <sys/conf.h>
397bebe46cSjc #include <sys/open.h>
407bebe46cSjc #include <sys/stat.h>
417bebe46cSjc #include <sys/ddi.h>
427bebe46cSjc #include <sys/sunddi.h>
437bebe46cSjc #include <sys/file.h>
447bebe46cSjc #include <sys/cpuvar.h>
457bebe46cSjc #include <sys/x_call.h>
467bebe46cSjc #include <sys/cheetahregs.h>
477bebe46cSjc #include <sys/mem_cache.h>
487bebe46cSjc #include <sys/mem_cache_ioctl.h>
497bebe46cSjc 
507bebe46cSjc extern int	retire_l2(uint64_t, uint64_t);
517bebe46cSjc extern int	retire_l2_alternate(uint64_t, uint64_t);
527bebe46cSjc extern int	unretire_l2(uint64_t, uint64_t);
537bebe46cSjc extern int	unretire_l2_alternate(uint64_t, uint64_t);
547bebe46cSjc extern int	retire_l3(uint64_t, uint64_t);
557bebe46cSjc extern int	retire_l3_alternate(uint64_t, uint64_t);
567bebe46cSjc extern int	unretire_l3(uint64_t, uint64_t);
577bebe46cSjc extern int	unretire_l3_alternate(uint64_t, uint64_t);
587bebe46cSjc 
597bebe46cSjc extern void	retire_l2_start(uint64_t, uint64_t);
607bebe46cSjc extern void	retire_l2_end(uint64_t, uint64_t);
617bebe46cSjc extern void	unretire_l2_start(uint64_t, uint64_t);
627bebe46cSjc extern void	unretire_l2_end(uint64_t, uint64_t);
637bebe46cSjc extern void	retire_l3_start(uint64_t, uint64_t);
647bebe46cSjc extern void	retire_l3_end(uint64_t, uint64_t);
657bebe46cSjc extern void	unretire_l3_start(uint64_t, uint64_t);
667bebe46cSjc extern void	unretire_l3_end(uint64_t, uint64_t);
677bebe46cSjc 
687bebe46cSjc extern void	get_ecache_dtags_tl1(uint64_t, ch_cpu_logout_t *);
69142c9f13Sbala extern void	get_l2_tag_tl1(uint64_t, uint64_t);
70142c9f13Sbala extern void	get_l3_tag_tl1(uint64_t, uint64_t);
717bebe46cSjc 
727bebe46cSjc 
737bebe46cSjc /* Macro for putting 64-bit onto stack as two 32-bit ints */
747bebe46cSjc #define	PRTF_64_TO_32(x)	(uint32_t)((x)>>32), (uint32_t)(x)
757bebe46cSjc 
767bebe46cSjc 
777bebe46cSjc uint_t l2_flush_retries_done = 0;
787bebe46cSjc int mem_cache_debug = 0x0;
797bebe46cSjc uint64_t pattern = 0;
807bebe46cSjc uint32_t retire_failures = 0;
817bebe46cSjc uint32_t last_error_injected_way = 0;
827bebe46cSjc uint8_t last_error_injected_bit = 0;
837bebe46cSjc uint32_t last_l3tag_error_injected_way = 0;
847bebe46cSjc uint8_t last_l3tag_error_injected_bit = 0;
857bebe46cSjc uint32_t last_l2tag_error_injected_way = 0;
867bebe46cSjc uint8_t last_l2tag_error_injected_bit = 0;
877bebe46cSjc uint32_t last_l3data_error_injected_way = 0;
887bebe46cSjc uint8_t last_l3data_error_injected_bit = 0;
897bebe46cSjc uint32_t last_l2data_error_injected_way = 0;
907bebe46cSjc uint8_t last_l2data_error_injected_bit = 0;
917bebe46cSjc 
927bebe46cSjc /* dev_ops and cb_ops entry point function declarations */
937bebe46cSjc static int	mem_cache_attach(dev_info_t *, ddi_attach_cmd_t);
947bebe46cSjc static int	mem_cache_detach(dev_info_t *, ddi_detach_cmd_t);
957bebe46cSjc static int	mem_cache_getinfo(dev_info_t *, ddi_info_cmd_t, void *,
967bebe46cSjc 				void **);
977bebe46cSjc static int	mem_cache_open(dev_t *, int, int, cred_t *);
987bebe46cSjc static int	mem_cache_close(dev_t, int, int, cred_t *);
997bebe46cSjc static int	mem_cache_ioctl_ops(int, int, cache_info_t *);
1007bebe46cSjc static int	mem_cache_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
1017bebe46cSjc 
1027bebe46cSjc struct cb_ops mem_cache_cb_ops = {
1037bebe46cSjc 	mem_cache_open,
1047bebe46cSjc 	mem_cache_close,
1057bebe46cSjc 	nodev,
1067bebe46cSjc 	nodev,
1077bebe46cSjc 	nodev,			/* dump */
1087bebe46cSjc 	nodev,
1097bebe46cSjc 	nodev,
1107bebe46cSjc 	mem_cache_ioctl,
1117bebe46cSjc 	nodev,			/* devmap */
1127bebe46cSjc 	nodev,
1137bebe46cSjc 	ddi_segmap,		/* segmap */
1147bebe46cSjc 	nochpoll,
1157bebe46cSjc 	ddi_prop_op,
1167bebe46cSjc 	NULL,			/* for STREAMS drivers */
1177bebe46cSjc 	D_NEW | D_MP		/* driver compatibility flag */
1187bebe46cSjc };
1197bebe46cSjc 
1207bebe46cSjc static struct dev_ops mem_cache_dev_ops = {
1217bebe46cSjc 	DEVO_REV,		/* driver build version */
1227bebe46cSjc 	0,			/* device reference count */
1237bebe46cSjc 	mem_cache_getinfo,
1247bebe46cSjc 	nulldev,
1257bebe46cSjc 	nulldev,		/* probe */
1267bebe46cSjc 	mem_cache_attach,
1277bebe46cSjc 	mem_cache_detach,
1287bebe46cSjc 	nulldev,		/* reset */
1297bebe46cSjc 	&mem_cache_cb_ops,
1307bebe46cSjc 	(struct bus_ops *)NULL,
1317bebe46cSjc 	nulldev			/* power */
1327bebe46cSjc };
1337bebe46cSjc 
1347bebe46cSjc /*
1357bebe46cSjc  * Soft state
1367bebe46cSjc  */
1377bebe46cSjc struct mem_cache_softc {
1387bebe46cSjc 	dev_info_t	*dip;
1397bebe46cSjc 	kmutex_t	mutex;
1407bebe46cSjc };
1417bebe46cSjc #define	getsoftc(inst)	((struct mem_cache_softc *)ddi_get_soft_state(statep,\
1427bebe46cSjc 			(inst)))
1437bebe46cSjc 
1447bebe46cSjc /* module configuration stuff */
1457bebe46cSjc static void *statep;
1467bebe46cSjc extern struct mod_ops mod_driverops;
1477bebe46cSjc 
1487bebe46cSjc static struct modldrv modldrv = {
1497bebe46cSjc 	&mod_driverops,
1507bebe46cSjc 	"mem_cache_driver (08/01/30) ",
1517bebe46cSjc 	&mem_cache_dev_ops
1527bebe46cSjc };
1537bebe46cSjc 
1547bebe46cSjc static struct modlinkage modlinkage = {
1557bebe46cSjc 	MODREV_1,
1567bebe46cSjc 	&modldrv,
1577bebe46cSjc 	0
1587bebe46cSjc };
1597bebe46cSjc 
1607bebe46cSjc int
1617bebe46cSjc _init(void)
1627bebe46cSjc {
1637bebe46cSjc 	int e;
1647bebe46cSjc 
1657bebe46cSjc 	if (e = ddi_soft_state_init(&statep, sizeof (struct mem_cache_softc),
1667bebe46cSjc 	    MAX_MEM_CACHE_INSTANCES)) {
1677bebe46cSjc 		return (e);
1687bebe46cSjc 	}
1697bebe46cSjc 
1707bebe46cSjc 	if ((e = mod_install(&modlinkage)) != 0)
1717bebe46cSjc 		ddi_soft_state_fini(&statep);
1727bebe46cSjc 
1737bebe46cSjc 	return (e);
1747bebe46cSjc }
1757bebe46cSjc 
1767bebe46cSjc int
1777bebe46cSjc _fini(void)
1787bebe46cSjc {
1797bebe46cSjc 	int e;
1807bebe46cSjc 
1817bebe46cSjc 	if ((e = mod_remove(&modlinkage)) != 0)
1827bebe46cSjc 		return (e);
1837bebe46cSjc 
1847bebe46cSjc 	ddi_soft_state_fini(&statep);
1857bebe46cSjc 
1867bebe46cSjc 	return (DDI_SUCCESS);
1877bebe46cSjc }
1887bebe46cSjc 
1897bebe46cSjc int
1907bebe46cSjc _info(struct modinfo *modinfop)
1917bebe46cSjc {
1927bebe46cSjc 	return (mod_info(&modlinkage, modinfop));
1937bebe46cSjc }
1947bebe46cSjc 
1957bebe46cSjc /*ARGSUSED*/
1967bebe46cSjc static int
1977bebe46cSjc mem_cache_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
1987bebe46cSjc {
1997bebe46cSjc 	int	inst;
2007bebe46cSjc 	int	retval = DDI_SUCCESS;
2017bebe46cSjc 	struct mem_cache_softc *softc;
2027bebe46cSjc 
2037bebe46cSjc 	inst = getminor((dev_t)arg);
2047bebe46cSjc 
2057bebe46cSjc 	switch (cmd) {
2067bebe46cSjc 	case DDI_INFO_DEVT2DEVINFO:
2077bebe46cSjc 		if ((softc = getsoftc(inst)) == NULL) {
2087bebe46cSjc 			*result = (void *)NULL;
2097bebe46cSjc 			retval = DDI_FAILURE;
2107bebe46cSjc 		} else
2117bebe46cSjc 			*result = (void *)softc->dip;
2127bebe46cSjc 		break;
2137bebe46cSjc 
2147bebe46cSjc 	case DDI_INFO_DEVT2INSTANCE:
2157bebe46cSjc 		*result = (void *)((uintptr_t)inst);
2167bebe46cSjc 		break;
2177bebe46cSjc 
2187bebe46cSjc 	default:
2197bebe46cSjc 		retval = DDI_FAILURE;
2207bebe46cSjc 	}
2217bebe46cSjc 
2227bebe46cSjc 	return (retval);
2237bebe46cSjc }
2247bebe46cSjc 
2257bebe46cSjc static int
2267bebe46cSjc mem_cache_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2277bebe46cSjc {
2287bebe46cSjc 	int inst;
2297bebe46cSjc 	struct mem_cache_softc *softc = NULL;
2307bebe46cSjc 	char name[80];
2317bebe46cSjc 
2327bebe46cSjc 	switch (cmd) {
2337bebe46cSjc 	case DDI_ATTACH:
2347bebe46cSjc 		inst = ddi_get_instance(dip);
2357bebe46cSjc 		if (inst >= MAX_MEM_CACHE_INSTANCES) {
2367bebe46cSjc 			cmn_err(CE_WARN, "attach failed, too many instances\n");
2377bebe46cSjc 			return (DDI_FAILURE);
2387bebe46cSjc 		}
2397bebe46cSjc 		(void) sprintf(name, MEM_CACHE_DRIVER_NAME"%d", inst);
2407bebe46cSjc 		if (ddi_create_priv_minor_node(dip, name,
2417bebe46cSjc 		    S_IFCHR,
2427bebe46cSjc 		    inst,
2437bebe46cSjc 		    DDI_PSEUDO,
2447bebe46cSjc 		    0, NULL, "all", 0640) ==
2457bebe46cSjc 		    DDI_FAILURE) {
2467bebe46cSjc 			ddi_remove_minor_node(dip, NULL);
2477bebe46cSjc 			return (DDI_FAILURE);
2487bebe46cSjc 		}
2497bebe46cSjc 
2507bebe46cSjc 		/* Allocate a soft state structure for this instance */
2517bebe46cSjc 		if (ddi_soft_state_zalloc(statep, inst) != DDI_SUCCESS) {
2527bebe46cSjc 			cmn_err(CE_WARN, " ddi_soft_state_zalloc() failed "
2537bebe46cSjc 			    "for inst %d\n", inst);
2547bebe46cSjc 			goto attach_failed;
2557bebe46cSjc 		}
2567bebe46cSjc 
2577bebe46cSjc 		/* Setup soft state */
2587bebe46cSjc 		softc = getsoftc(inst);
2597bebe46cSjc 		softc->dip = dip;
2607bebe46cSjc 		mutex_init(&softc->mutex, NULL, MUTEX_DRIVER, NULL);
2617bebe46cSjc 
2627bebe46cSjc 		/* Create main environmental node */
2637bebe46cSjc 		ddi_report_dev(dip);
2647bebe46cSjc 
2657bebe46cSjc 		return (DDI_SUCCESS);
2667bebe46cSjc 
2677bebe46cSjc 	case DDI_RESUME:
2687bebe46cSjc 		return (DDI_SUCCESS);
2697bebe46cSjc 
2707bebe46cSjc 	default:
2717bebe46cSjc 		return (DDI_FAILURE);
2727bebe46cSjc 	}
2737bebe46cSjc 
2747bebe46cSjc attach_failed:
2757bebe46cSjc 
2767bebe46cSjc 	/* Free soft state, if allocated. remove minor node if added earlier */
2777bebe46cSjc 	if (softc)
2787bebe46cSjc 		ddi_soft_state_free(statep, inst);
2797bebe46cSjc 
2807bebe46cSjc 	ddi_remove_minor_node(dip, NULL);
2817bebe46cSjc 
2827bebe46cSjc 	return (DDI_FAILURE);
2837bebe46cSjc }
2847bebe46cSjc 
2857bebe46cSjc static int
2867bebe46cSjc mem_cache_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2877bebe46cSjc {
2887bebe46cSjc 	int inst;
2897bebe46cSjc 	struct mem_cache_softc *softc;
2907bebe46cSjc 
2917bebe46cSjc 	switch (cmd) {
2927bebe46cSjc 	case DDI_DETACH:
2937bebe46cSjc 		inst = ddi_get_instance(dip);
2947bebe46cSjc 		if ((softc = getsoftc(inst)) == NULL)
2957bebe46cSjc 			return (ENXIO);
2967bebe46cSjc 
2977bebe46cSjc 		/* Free the soft state and remove minor node added earlier */
2987bebe46cSjc 		mutex_destroy(&softc->mutex);
2997bebe46cSjc 		ddi_soft_state_free(statep, inst);
3007bebe46cSjc 		ddi_remove_minor_node(dip, NULL);
3017bebe46cSjc 		return (DDI_SUCCESS);
3027bebe46cSjc 
3037bebe46cSjc 	case DDI_SUSPEND:
3047bebe46cSjc 		return (DDI_SUCCESS);
3057bebe46cSjc 
3067bebe46cSjc 	default:
3077bebe46cSjc 		return (DDI_FAILURE);
3087bebe46cSjc 	}
3097bebe46cSjc }
3107bebe46cSjc 
3117bebe46cSjc /*ARGSUSED*/
3127bebe46cSjc static int
3137bebe46cSjc mem_cache_open(dev_t *devp, int flag, int otyp, cred_t *credp)
3147bebe46cSjc {
3157bebe46cSjc 	int	inst = getminor(*devp);
3167bebe46cSjc 
3177bebe46cSjc 	return (getsoftc(inst) == NULL ? ENXIO : 0);
3187bebe46cSjc }
3197bebe46cSjc 
3207bebe46cSjc /*ARGSUSED*/
3217bebe46cSjc static int
3227bebe46cSjc mem_cache_close(dev_t dev, int flag, int otyp, cred_t *credp)
3237bebe46cSjc {
3247bebe46cSjc 	int	inst = getminor(dev);
3257bebe46cSjc 
3267bebe46cSjc 	return (getsoftc(inst) == NULL ? ENXIO : 0);
3277bebe46cSjc }
3287bebe46cSjc 
3297bebe46cSjc static char *tstate_to_desc[] = {
3307bebe46cSjc 	"Invalid",			/* 0 */
3317bebe46cSjc 	"Shared",			/* 1 */
3327bebe46cSjc 	"Exclusive",			/* 2 */
3337bebe46cSjc 	"Owner",			/* 3 */
3347bebe46cSjc 	"Modified",			/* 4 */
3357bebe46cSjc 	"NA",				/* 5 */
3367bebe46cSjc 	"Owner/Shared",			/* 6 */
3377bebe46cSjc 	"Reserved(7)",			/* 7 */
3387bebe46cSjc };
3397bebe46cSjc 
3407bebe46cSjc static char *
3417bebe46cSjc tag_state_to_desc(uint8_t tagstate)
3427bebe46cSjc {
3437bebe46cSjc 	return (tstate_to_desc[tagstate & CH_ECSTATE_MASK]);
3447bebe46cSjc }
3457bebe46cSjc 
3467bebe46cSjc void
3477bebe46cSjc print_l2_tag(uint64_t tag_addr, uint64_t l2_tag)
3487bebe46cSjc {
3497bebe46cSjc 	uint64_t l2_subaddr;
3507bebe46cSjc 	uint8_t	l2_state;
3517bebe46cSjc 
3527bebe46cSjc 	l2_subaddr = PN_L2TAG_TO_PA(l2_tag);
3537bebe46cSjc 	l2_subaddr |= (tag_addr & PN_L2_INDEX_MASK);
3547bebe46cSjc 
3557bebe46cSjc 	l2_state = (l2_tag & CH_ECSTATE_MASK);
3567bebe46cSjc 	cmn_err(CE_CONT,
3577bebe46cSjc 	    "PA=0x%08x.%08x E$tag 0x%08x.%08x E$state %s\n",
3587bebe46cSjc 	    PRTF_64_TO_32(l2_subaddr),
3597bebe46cSjc 	    PRTF_64_TO_32(l2_tag),
3607bebe46cSjc 	    tag_state_to_desc(l2_state));
3617bebe46cSjc }
3627bebe46cSjc 
3637bebe46cSjc void
3647bebe46cSjc print_l2cache_line(ch_cpu_logout_t *clop)
3657bebe46cSjc {
3667bebe46cSjc 	uint64_t l2_subaddr;
3677bebe46cSjc 	int i, offset;
3687bebe46cSjc 	uint8_t	way, l2_state;
3697bebe46cSjc 	ch_ec_data_t *ecp;
3707bebe46cSjc 
3717bebe46cSjc 
3727bebe46cSjc 	for (way = 0; way < PN_CACHE_NWAYS; way++) {
3737bebe46cSjc 		ecp = &clop->clo_data.chd_l2_data[way];
3747bebe46cSjc 		l2_subaddr = PN_L2TAG_TO_PA(ecp->ec_tag);
3757bebe46cSjc 		l2_subaddr |= (ecp->ec_idx & PN_L2_INDEX_MASK);
3767bebe46cSjc 
3777bebe46cSjc 		l2_state = (ecp->ec_tag & CH_ECSTATE_MASK);
3787bebe46cSjc 		cmn_err(CE_CONT,
3797bebe46cSjc 		    "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
3807bebe46cSjc 		    "E$tag 0x%08x.%08x E$state %s",
3817bebe46cSjc 		    way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(l2_subaddr),
3827bebe46cSjc 		    PRTF_64_TO_32(ecp->ec_tag),
3837bebe46cSjc 		    tag_state_to_desc(l2_state));
3847bebe46cSjc 		/*
3857bebe46cSjc 		 * Dump out Ecache subblock data captured.
3867bebe46cSjc 		 * For Cheetah, we need to compute the ECC for each 16-byte
3877bebe46cSjc 		 * chunk and compare it with the captured chunk ECC to figure
3887bebe46cSjc 		 * out which chunk is bad.
3897bebe46cSjc 		 */
3907bebe46cSjc 		for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
3917bebe46cSjc 			ec_data_elm_t *ecdptr;
3927bebe46cSjc 			uint64_t d_low, d_high;
3937bebe46cSjc 			uint32_t ecc;
3947bebe46cSjc 			int l2_data_idx = (i/2);
3957bebe46cSjc 
3967bebe46cSjc 			offset = i * 16;
3977bebe46cSjc 			ecdptr = &clop->clo_data.chd_l2_data[way].ec_data
3987bebe46cSjc 			    [l2_data_idx];
3997bebe46cSjc 			if ((i & 1) == 0) {
4007bebe46cSjc 				ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
4017bebe46cSjc 				d_high = ecdptr->ec_d8[0];
4027bebe46cSjc 				d_low  = ecdptr->ec_d8[1];
4037bebe46cSjc 			} else {
4047bebe46cSjc 				ecc = ecdptr->ec_eccd & 0x1ff;
4057bebe46cSjc 				d_high = ecdptr->ec_d8[2];
4067bebe46cSjc 				d_low  = ecdptr->ec_d8[3];
4077bebe46cSjc 			}
4087bebe46cSjc 
4097bebe46cSjc 			cmn_err(CE_CONT,
4107bebe46cSjc 			    "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
4117bebe46cSjc 			    " ECC 0x%03x",
4127bebe46cSjc 			    offset, PRTF_64_TO_32(d_high),
4137bebe46cSjc 			    PRTF_64_TO_32(d_low), ecc);
4147bebe46cSjc 		}
4157bebe46cSjc 	}	/* end of for way loop */
4167bebe46cSjc }
4177bebe46cSjc 
4187bebe46cSjc void
4197bebe46cSjc print_ecache_line(ch_cpu_logout_t *clop)
4207bebe46cSjc {
4217bebe46cSjc 	uint64_t ec_subaddr;
4227bebe46cSjc 	int i, offset;
4237bebe46cSjc 	uint8_t	way, ec_state;
4247bebe46cSjc 	ch_ec_data_t *ecp;
4257bebe46cSjc 
4267bebe46cSjc 
4277bebe46cSjc 	for (way = 0; way < PN_CACHE_NWAYS; way++) {
4287bebe46cSjc 		ecp = &clop->clo_data.chd_ec_data[way];
4297bebe46cSjc 		ec_subaddr = PN_L3TAG_TO_PA(ecp->ec_tag);
4307bebe46cSjc 		ec_subaddr |= (ecp->ec_idx & PN_L3_TAG_RD_MASK);
4317bebe46cSjc 
4327bebe46cSjc 		ec_state = (ecp->ec_tag & CH_ECSTATE_MASK);
4337bebe46cSjc 		cmn_err(CE_CONT,
4347bebe46cSjc 		    "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
4357bebe46cSjc 		    "E$tag 0x%08x.%08x E$state %s",
4367bebe46cSjc 		    way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(ec_subaddr),
4377bebe46cSjc 		    PRTF_64_TO_32(ecp->ec_tag),
4387bebe46cSjc 		    tag_state_to_desc(ec_state));
4397bebe46cSjc 		/*
4407bebe46cSjc 		 * Dump out Ecache subblock data captured.
4417bebe46cSjc 		 * For Cheetah, we need to compute the ECC for each 16-byte
4427bebe46cSjc 		 * chunk and compare it with the captured chunk ECC to figure
4437bebe46cSjc 		 * out which chunk is bad.
4447bebe46cSjc 		 */
4457bebe46cSjc 		for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
4467bebe46cSjc 			ec_data_elm_t *ecdptr;
4477bebe46cSjc 			uint64_t d_low, d_high;
4487bebe46cSjc 			uint32_t ecc;
4497bebe46cSjc 			int ec_data_idx = (i/2);
4507bebe46cSjc 
4517bebe46cSjc 			offset = i * 16;
4527bebe46cSjc 			ecdptr =
4537bebe46cSjc 			    &clop->clo_data.chd_ec_data[way].ec_data
4547bebe46cSjc 			    [ec_data_idx];
4557bebe46cSjc 			if ((i & 1) == 0) {
4567bebe46cSjc 				ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
4577bebe46cSjc 				d_high = ecdptr->ec_d8[0];
4587bebe46cSjc 				d_low  = ecdptr->ec_d8[1];
4597bebe46cSjc 			} else {
4607bebe46cSjc 				ecc = ecdptr->ec_eccd & 0x1ff;
4617bebe46cSjc 				d_high = ecdptr->ec_d8[2];
4627bebe46cSjc 				d_low  = ecdptr->ec_d8[3];
4637bebe46cSjc 			}
4647bebe46cSjc 
4657bebe46cSjc 			cmn_err(CE_CONT,
4667bebe46cSjc 			    "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
4677bebe46cSjc 			    " ECC 0x%03x",
4687bebe46cSjc 			    offset, PRTF_64_TO_32(d_high),
4697bebe46cSjc 			    PRTF_64_TO_32(d_low), ecc);
4707bebe46cSjc 		}
4717bebe46cSjc 	}
4727bebe46cSjc }
4737bebe46cSjc 
4747bebe46cSjc static boolean_t
4757bebe46cSjc tag_addr_collides(uint64_t tag_addr, cache_id_t type,
4767bebe46cSjc     retire_func_t start_of_func, retire_func_t end_of_func)
4777bebe46cSjc {
4787bebe46cSjc 	uint64_t start_paddr, end_paddr;
4797bebe46cSjc 	char *type_str;
4807bebe46cSjc 
4817bebe46cSjc 	start_paddr = va_to_pa((void *)start_of_func);
4827bebe46cSjc 	end_paddr = va_to_pa((void *)end_of_func);
4837bebe46cSjc 	switch (type) {
4847bebe46cSjc 		case L2_CACHE_TAG:
4857bebe46cSjc 		case L2_CACHE_DATA:
4867bebe46cSjc 			tag_addr &= PN_L2_INDEX_MASK;
4877bebe46cSjc 			start_paddr &= PN_L2_INDEX_MASK;
4887bebe46cSjc 			end_paddr &= PN_L2_INDEX_MASK;
4897bebe46cSjc 			type_str = "L2:";
4907bebe46cSjc 			break;
4917bebe46cSjc 		case L3_CACHE_TAG:
4927bebe46cSjc 		case L3_CACHE_DATA:
4937bebe46cSjc 			tag_addr &= PN_L3_TAG_RD_MASK;
4947bebe46cSjc 			start_paddr &= PN_L3_TAG_RD_MASK;
4957bebe46cSjc 			end_paddr &= PN_L3_TAG_RD_MASK;
4967bebe46cSjc 			type_str = "L3:";
4977bebe46cSjc 			break;
4987bebe46cSjc 		default:
4997bebe46cSjc 			/*
5007bebe46cSjc 			 * Should never reach here.
5017bebe46cSjc 			 */
5027bebe46cSjc 			ASSERT(0);
5037bebe46cSjc 			return (B_FALSE);
5047bebe46cSjc 	}
5057bebe46cSjc 	if ((tag_addr > (start_paddr - 0x100)) &&
5067bebe46cSjc 	    (tag_addr < (end_paddr + 0x100))) {
5077bebe46cSjc 		if (mem_cache_debug & 0x1)
5087bebe46cSjc 			cmn_err(CE_CONT,
5097bebe46cSjc 			    "%s collision detected tag_addr = 0x%08x"
5107bebe46cSjc 			    " start_paddr = 0x%08x end_paddr = 0x%08x\n",
5117bebe46cSjc 			    type_str, (uint32_t)tag_addr, (uint32_t)start_paddr,
5127bebe46cSjc 			    (uint32_t)end_paddr);
5137bebe46cSjc 		return (B_TRUE);
5147bebe46cSjc 	}
5157bebe46cSjc 	else
5167bebe46cSjc 		return (B_FALSE);
5177bebe46cSjc }
5187bebe46cSjc 
5197bebe46cSjc static uint64_t
5207bebe46cSjc get_tag_addr(cache_info_t *cache_info)
5217bebe46cSjc {
5227bebe46cSjc 	uint64_t tag_addr, scratch;
5237bebe46cSjc 
5247bebe46cSjc 	switch (cache_info->cache) {
5257bebe46cSjc 		case L2_CACHE_TAG:
5267bebe46cSjc 		case L2_CACHE_DATA:
5277bebe46cSjc 			tag_addr = (uint64_t)(cache_info->index <<
5287bebe46cSjc 			    PN_CACHE_LINE_SHIFT);
5297bebe46cSjc 			scratch = (uint64_t)(cache_info->way <<
5307bebe46cSjc 			    PN_L2_WAY_SHIFT);
5317bebe46cSjc 			tag_addr |= scratch;
5327bebe46cSjc 			tag_addr |= PN_L2_IDX_HW_ECC_EN;
5337bebe46cSjc 			break;
5347bebe46cSjc 		case L3_CACHE_TAG:
5357bebe46cSjc 		case L3_CACHE_DATA:
5367bebe46cSjc 			tag_addr = (uint64_t)(cache_info->index <<
5377bebe46cSjc 			    PN_CACHE_LINE_SHIFT);
5387bebe46cSjc 			scratch = (uint64_t)(cache_info->way <<
5397bebe46cSjc 			    PN_L3_WAY_SHIFT);
5407bebe46cSjc 			tag_addr |= scratch;
5417bebe46cSjc 			tag_addr |= PN_L3_IDX_HW_ECC_EN;
5427bebe46cSjc 			break;
5437bebe46cSjc 		default:
5447bebe46cSjc 			/*
5457bebe46cSjc 			 * Should never reach here.
5467bebe46cSjc 			 */
5477bebe46cSjc 			ASSERT(0);
5487bebe46cSjc 			return (uint64_t)(0);
5497bebe46cSjc 	}
5507bebe46cSjc 	return (tag_addr);
5517bebe46cSjc }
5527bebe46cSjc 
5537bebe46cSjc static int
5547bebe46cSjc mem_cache_ioctl_ops(int cmd, int mode, cache_info_t *cache_info)
5557bebe46cSjc {
5567bebe46cSjc 	int	ret_val = 0;
5577bebe46cSjc 	uint64_t afar, tag_addr;
5587bebe46cSjc 	ch_cpu_logout_t clop;
5597bebe46cSjc 	uint64_t Lxcache_tag_data[PN_CACHE_NWAYS];
5607bebe46cSjc 	int	i, retire_retry_count;
5617bebe46cSjc 	cpu_t	*cpu;
5627bebe46cSjc 	uint64_t tag_data;
5637bebe46cSjc 	uint8_t state;
5647bebe46cSjc 
5657bebe46cSjc 	switch (cache_info->cache) {
5667bebe46cSjc 		case L2_CACHE_TAG:
5677bebe46cSjc 		case L2_CACHE_DATA:
5687bebe46cSjc 			if (cache_info->way >= PN_CACHE_NWAYS)
5697bebe46cSjc 				return (EINVAL);
5707bebe46cSjc 			if (cache_info->index >=
5717bebe46cSjc 			    (PN_L2_SET_SIZE/PN_L2_LINESIZE))
5727bebe46cSjc 				return (EINVAL);
5737bebe46cSjc 			break;
5747bebe46cSjc 		case L3_CACHE_TAG:
5757bebe46cSjc 		case L3_CACHE_DATA:
5767bebe46cSjc 			if (cache_info->way >= PN_CACHE_NWAYS)
5777bebe46cSjc 				return (EINVAL);
5787bebe46cSjc 			if (cache_info->index >=
5797bebe46cSjc 			    (PN_L3_SET_SIZE/PN_L3_LINESIZE))
5807bebe46cSjc 				return (EINVAL);
5817bebe46cSjc 			break;
5827bebe46cSjc 		default:
5837bebe46cSjc 			return (ENOTSUP);
5847bebe46cSjc 	}
5857bebe46cSjc 	/*
5867bebe46cSjc 	 * Check if we have a valid cpu ID and that
5877bebe46cSjc 	 * CPU is ONLINE.
5887bebe46cSjc 	 */
5897bebe46cSjc 	mutex_enter(&cpu_lock);
5907bebe46cSjc 	cpu = cpu_get(cache_info->cpu_id);
5917bebe46cSjc 	if ((cpu == NULL) || (!cpu_is_online(cpu))) {
5927bebe46cSjc 		mutex_exit(&cpu_lock);
5937bebe46cSjc 		return (EINVAL);
5947bebe46cSjc 	}
5957bebe46cSjc 	mutex_exit(&cpu_lock);
5967bebe46cSjc 	switch (cmd) {
5977bebe46cSjc 		case MEM_CACHE_RETIRE:
5987bebe46cSjc 			if ((cache_info->bit & MSB_BIT_MASK) ==
5997bebe46cSjc 			    MSB_BIT_MASK) {
6007bebe46cSjc 				pattern = ((uint64_t)1 <<
6017bebe46cSjc 				    (cache_info->bit & TAG_BIT_MASK));
6027bebe46cSjc 			} else {
6037bebe46cSjc 				pattern = 0;
6047bebe46cSjc 			}
6057bebe46cSjc 			tag_addr = get_tag_addr(cache_info);
6067bebe46cSjc 			pattern |= PN_ECSTATE_NA;
6077bebe46cSjc 			retire_retry_count = 0;
6087bebe46cSjc 			affinity_set(cache_info->cpu_id);
6097bebe46cSjc 			switch (cache_info->cache) {
6107bebe46cSjc 				case L2_CACHE_DATA:
6117bebe46cSjc 				case L2_CACHE_TAG:
6127bebe46cSjc retry_l2_retire:
6137bebe46cSjc 					if (tag_addr_collides(tag_addr,
6147bebe46cSjc 					    cache_info->cache,
6157bebe46cSjc 					    retire_l2_start, retire_l2_end))
6167bebe46cSjc 						ret_val =
6177bebe46cSjc 						    retire_l2_alternate(
6187bebe46cSjc 						    tag_addr, pattern);
6197bebe46cSjc 					else
6207bebe46cSjc 						ret_val = retire_l2(tag_addr,
6217bebe46cSjc 						    pattern);
6227bebe46cSjc 					if (ret_val == 1) {
6237bebe46cSjc 						/*
6247bebe46cSjc 						 * cacheline was in retired
6257bebe46cSjc 						 * STATE already.
6267bebe46cSjc 						 * so return success.
6277bebe46cSjc 						 */
6287bebe46cSjc 						ret_val = 0;
6297bebe46cSjc 					}
6307bebe46cSjc 					if (ret_val < 0) {
6317bebe46cSjc 						cmn_err(CE_WARN,
6327bebe46cSjc 		"retire_l2() failed. index = 0x%x way %d. Retrying...\n",
6337bebe46cSjc 						    cache_info->index,
6347bebe46cSjc 						    cache_info->way);
6357bebe46cSjc 						if (retire_retry_count >= 2) {
6367bebe46cSjc 							retire_failures++;
6377bebe46cSjc 							affinity_clear();
6387bebe46cSjc 							return (EIO);
6397bebe46cSjc 						}
6407bebe46cSjc 						retire_retry_count++;
6417bebe46cSjc 						goto retry_l2_retire;
6427bebe46cSjc 					}
6437bebe46cSjc 					if (ret_val == 2)
6447bebe46cSjc 						l2_flush_retries_done++;
645142c9f13Sbala 			/*
646142c9f13Sbala 			 * We bind ourself to a CPU and send cross trap to
647142c9f13Sbala 			 * ourself. On return from xt_one we can rely on the
648142c9f13Sbala 			 * data in tag_data being filled in. Normally one would
649142c9f13Sbala 			 * do a xt_sync to make sure that the CPU has completed
650142c9f13Sbala 			 * the cross trap call xt_one.
651142c9f13Sbala 			 */
6527bebe46cSjc 					xt_one(cache_info->cpu_id,
6537bebe46cSjc 					    (xcfunc_t *)(get_l2_tag_tl1),
6547bebe46cSjc 					    tag_addr, (uint64_t)(&tag_data));
6557bebe46cSjc 					state = tag_data & CH_ECSTATE_MASK;
6567bebe46cSjc 					if (state != PN_ECSTATE_NA) {
6577bebe46cSjc 						retire_failures++;
6587bebe46cSjc 						print_l2_tag(tag_addr,
6597bebe46cSjc 						    tag_data);
6607bebe46cSjc 						cmn_err(CE_WARN,
6617bebe46cSjc 		"L2 RETIRE:failed for index 0x%x way %d. Retrying...\n",
6627bebe46cSjc 						    cache_info->index,
6637bebe46cSjc 						    cache_info->way);
6647bebe46cSjc 						if (retire_retry_count >= 2) {
6657bebe46cSjc 							retire_failures++;
6667bebe46cSjc 							affinity_clear();
6677bebe46cSjc 							return (EIO);
6687bebe46cSjc 						}
6697bebe46cSjc 						retire_retry_count++;
6707bebe46cSjc 						goto retry_l2_retire;
6717bebe46cSjc 					}
6727bebe46cSjc 					break;
6737bebe46cSjc 				case L3_CACHE_TAG:
6747bebe46cSjc 				case L3_CACHE_DATA:
6757bebe46cSjc 					if (tag_addr_collides(tag_addr,
6767bebe46cSjc 					    cache_info->cache,
6777bebe46cSjc 					    retire_l3_start, retire_l3_end))
6787bebe46cSjc 						ret_val =
6797bebe46cSjc 						    retire_l3_alternate(
6807bebe46cSjc 						    tag_addr, pattern);
6817bebe46cSjc 					else
6827bebe46cSjc 						ret_val = retire_l3(tag_addr,
6837bebe46cSjc 						    pattern);
6847bebe46cSjc 					if (ret_val == 1) {
6857bebe46cSjc 						/*
6867bebe46cSjc 						 * cacheline was in retired
6877bebe46cSjc 						 * STATE already.
6887bebe46cSjc 						 * so return success.
6897bebe46cSjc 						 */
6907bebe46cSjc 						ret_val = 0;
6917bebe46cSjc 					}
6927bebe46cSjc 					if (ret_val < 0) {
6937bebe46cSjc 						cmn_err(CE_WARN,
6947bebe46cSjc 			"retire_l3() failed. ret_val = %d index = 0x%x\n",
6957bebe46cSjc 						    ret_val,
6967bebe46cSjc 						    cache_info->index);
6977bebe46cSjc 						retire_failures++;
6987bebe46cSjc 						affinity_clear();
6997bebe46cSjc 						return (EIO);
7007bebe46cSjc 					}
701142c9f13Sbala 			/*
702142c9f13Sbala 			 * We bind ourself to a CPU and send cross trap to
703142c9f13Sbala 			 * ourself. On return from xt_one we can rely on the
704142c9f13Sbala 			 * data in tag_data being filled in. Normally one would
705142c9f13Sbala 			 * do a xt_sync to make sure that the CPU has completed
706142c9f13Sbala 			 * the cross trap call xt_one.
707142c9f13Sbala 			 */
7087bebe46cSjc 					xt_one(cache_info->cpu_id,
7097bebe46cSjc 					    (xcfunc_t *)(get_l3_tag_tl1),
7107bebe46cSjc 					    tag_addr, (uint64_t)(&tag_data));
7117bebe46cSjc 					state = tag_data & CH_ECSTATE_MASK;
7127bebe46cSjc 					if (state != PN_ECSTATE_NA) {
7137bebe46cSjc 						cmn_err(CE_WARN,
7147bebe46cSjc 					"L3 RETIRE failed for index 0x%x\n",
7157bebe46cSjc 						    cache_info->index);
7167bebe46cSjc 						retire_failures++;
7177bebe46cSjc 						affinity_clear();
7187bebe46cSjc 						return (EIO);
7197bebe46cSjc 					}
7207bebe46cSjc 
7217bebe46cSjc 					break;
7227bebe46cSjc 			}
7237bebe46cSjc 			affinity_clear();
7247bebe46cSjc 			break;
7257bebe46cSjc 		case MEM_CACHE_UNRETIRE:
7267bebe46cSjc 			tag_addr = get_tag_addr(cache_info);
7277bebe46cSjc 			pattern = PN_ECSTATE_INV;
7287bebe46cSjc 			affinity_set(cache_info->cpu_id);
7297bebe46cSjc 			switch (cache_info->cache) {
7307bebe46cSjc 				case L2_CACHE_DATA:
7317bebe46cSjc 				case L2_CACHE_TAG:
7327bebe46cSjc 					/*
7337bebe46cSjc 					 * Check if the index/way is in NA state
7347bebe46cSjc 					 */
735142c9f13Sbala 			/*
736142c9f13Sbala 			 * We bind ourself to a CPU and send cross trap to
737142c9f13Sbala 			 * ourself. On return from xt_one we can rely on the
738142c9f13Sbala 			 * data in tag_data being filled in. Normally one would
739142c9f13Sbala 			 * do a xt_sync to make sure that the CPU has completed
740142c9f13Sbala 			 * the cross trap call xt_one.
741142c9f13Sbala 			 */
7427bebe46cSjc 					xt_one(cache_info->cpu_id,
7437bebe46cSjc 					    (xcfunc_t *)(get_l2_tag_tl1),
7447bebe46cSjc 					    tag_addr, (uint64_t)(&tag_data));
7457bebe46cSjc 					state = tag_data & CH_ECSTATE_MASK;
7467bebe46cSjc 					if (state != PN_ECSTATE_NA) {
7477bebe46cSjc 						affinity_clear();
7487bebe46cSjc 						return (EINVAL);
7497bebe46cSjc 					}
7507bebe46cSjc 					if (tag_addr_collides(tag_addr,
7517bebe46cSjc 					    cache_info->cache,
7527bebe46cSjc 					    unretire_l2_start, unretire_l2_end))
7537bebe46cSjc 						ret_val =
7547bebe46cSjc 						    unretire_l2_alternate(
7557bebe46cSjc 						    tag_addr, pattern);
7567bebe46cSjc 					else
7577bebe46cSjc 						ret_val =
7587bebe46cSjc 						    unretire_l2(tag_addr,
7597bebe46cSjc 						    pattern);
7607bebe46cSjc 					if (ret_val != 0) {
7617bebe46cSjc 						cmn_err(CE_WARN,
7627bebe46cSjc 			"unretire_l2() failed. ret_val = %d index = 0x%x\n",
7637bebe46cSjc 						    ret_val,
7647bebe46cSjc 						    cache_info->index);
7657bebe46cSjc 						retire_failures++;
7667bebe46cSjc 						affinity_clear();
7677bebe46cSjc 						return (EIO);
7687bebe46cSjc 					}
7697bebe46cSjc 					break;
7707bebe46cSjc 				case L3_CACHE_TAG:
7717bebe46cSjc 				case L3_CACHE_DATA:
7727bebe46cSjc 					/*
7737bebe46cSjc 					 * Check if the index/way is in NA state
7747bebe46cSjc 					 */
775142c9f13Sbala 			/*
776142c9f13Sbala 			 * We bind ourself to a CPU and send cross trap to
777142c9f13Sbala 			 * ourself. On return from xt_one we can rely on the
778142c9f13Sbala 			 * data in tag_data being filled in. Normally one would
779142c9f13Sbala 			 * do a xt_sync to make sure that the CPU has completed
780142c9f13Sbala 			 * the cross trap call xt_one.
781142c9f13Sbala 			 */
7827bebe46cSjc 					xt_one(cache_info->cpu_id,
7837bebe46cSjc 					    (xcfunc_t *)(get_l3_tag_tl1),
7847bebe46cSjc 					    tag_addr, (uint64_t)(&tag_data));
7857bebe46cSjc 					state = tag_data & CH_ECSTATE_MASK;
7867bebe46cSjc 					if (state != PN_ECSTATE_NA) {
7877bebe46cSjc 						affinity_clear();
7887bebe46cSjc 						return (EINVAL);
7897bebe46cSjc 					}
7907bebe46cSjc 					if (tag_addr_collides(tag_addr,
7917bebe46cSjc 					    cache_info->cache,
7927bebe46cSjc 					    unretire_l3_start, unretire_l3_end))
7937bebe46cSjc 						ret_val =
7947bebe46cSjc 						    unretire_l3_alternate(
7957bebe46cSjc 						    tag_addr, pattern);
7967bebe46cSjc 					else
7977bebe46cSjc 						ret_val =
7987bebe46cSjc 						    unretire_l3(tag_addr,
7997bebe46cSjc 						    pattern);
8007bebe46cSjc 					if (ret_val != 0) {
8017bebe46cSjc 						cmn_err(CE_WARN,
8027bebe46cSjc 			"unretire_l3() failed. ret_val = %d index = 0x%x\n",
8037bebe46cSjc 						    ret_val,
8047bebe46cSjc 						    cache_info->index);
8057bebe46cSjc 						affinity_clear();
8067bebe46cSjc 						return (EIO);
8077bebe46cSjc 					}
8087bebe46cSjc 					break;
8097bebe46cSjc 			}
8107bebe46cSjc 			affinity_clear();
8117bebe46cSjc 			break;
8127bebe46cSjc 		case MEM_CACHE_ISRETIRED:
8137bebe46cSjc 		case MEM_CACHE_STATE:
8147bebe46cSjc 			return (ENOTSUP);
8157bebe46cSjc 		case MEM_CACHE_READ_TAGS:
816142c9f13Sbala #ifdef DEBUG
8177bebe46cSjc 		case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
818142c9f13Sbala #endif
8197bebe46cSjc 			/*
8207bebe46cSjc 			 * Read tag and data for all the ways at a given afar
8217bebe46cSjc 			 */
8227bebe46cSjc 			afar = (uint64_t)(cache_info->index
8237bebe46cSjc 			    << PN_CACHE_LINE_SHIFT);
8247bebe46cSjc 			affinity_set(cache_info->cpu_id);
825142c9f13Sbala 			/*
826142c9f13Sbala 			 * We bind ourself to a CPU and send cross trap to
827142c9f13Sbala 			 * ourself. On return from xt_one we can rely on the
828142c9f13Sbala 			 * data in clop being filled in. Normally one would
829142c9f13Sbala 			 * do a xt_sync to make sure that the CPU has completed
830142c9f13Sbala 			 * the cross trap call xt_one.
831142c9f13Sbala 			 */
8327bebe46cSjc 			xt_one(cache_info->cpu_id,
8337bebe46cSjc 			    (xcfunc_t *)(get_ecache_dtags_tl1),
8347bebe46cSjc 			    afar, (uint64_t)(&clop));
8357bebe46cSjc 			switch (cache_info->cache) {
8367bebe46cSjc 				case L2_CACHE_TAG:
8377bebe46cSjc 					for (i = 0; i < PN_CACHE_NWAYS; i++) {
8387bebe46cSjc 						Lxcache_tag_data[i] =
8397bebe46cSjc 						    clop.clo_data.chd_l2_data
8407bebe46cSjc 						    [i].ec_tag;
8417bebe46cSjc 					}
8427bebe46cSjc 					last_error_injected_bit =
8437bebe46cSjc 					    last_l2tag_error_injected_bit;
8447bebe46cSjc 					last_error_injected_way =
8457bebe46cSjc 					    last_l2tag_error_injected_way;
8467bebe46cSjc 					break;
8477bebe46cSjc 				case L3_CACHE_TAG:
8487bebe46cSjc 					for (i = 0; i < PN_CACHE_NWAYS; i++) {
8497bebe46cSjc 						Lxcache_tag_data[i] =
8507bebe46cSjc 						    clop.clo_data.chd_ec_data
8517bebe46cSjc 						    [i].ec_tag;
8527bebe46cSjc 					}
8537bebe46cSjc 					last_error_injected_bit =
8547bebe46cSjc 					    last_l3tag_error_injected_bit;
8557bebe46cSjc 					last_error_injected_way =
8567bebe46cSjc 					    last_l3tag_error_injected_way;
8577bebe46cSjc 					break;
8587bebe46cSjc 				default:
8597bebe46cSjc 					affinity_clear();
8607bebe46cSjc 					return (ENOTSUP);
8617bebe46cSjc 			}	/* end if switch(cache) */
862142c9f13Sbala #ifdef DEBUG
8637bebe46cSjc 			if (cmd == MEM_CACHE_READ_ERROR_INJECTED_TAGS) {
8647bebe46cSjc 				pattern = ((uint64_t)1 <<
8657bebe46cSjc 				    last_error_injected_bit);
8667bebe46cSjc 				/*
8677bebe46cSjc 				 * If error bit is ECC we need to make sure
8687bebe46cSjc 				 * ECC on all all WAYS are corrupted.
8697bebe46cSjc 				 */
8707bebe46cSjc 				if ((last_error_injected_bit >= 6) &&
8717bebe46cSjc 				    (last_error_injected_bit <= 14)) {
8727bebe46cSjc 					for (i = 0; i < PN_CACHE_NWAYS; i++)
8737bebe46cSjc 						Lxcache_tag_data[i] ^=
8747bebe46cSjc 						    pattern;
8757bebe46cSjc 				} else
8767bebe46cSjc 					Lxcache_tag_data
8777bebe46cSjc 					    [last_error_injected_way] ^=
8787bebe46cSjc 					    pattern;
8797bebe46cSjc 			}
880142c9f13Sbala #endif
8817bebe46cSjc 			if (ddi_copyout((caddr_t)Lxcache_tag_data,
8827bebe46cSjc 			    (caddr_t)cache_info->datap,
8837bebe46cSjc 			    sizeof (Lxcache_tag_data), mode)
8847bebe46cSjc 			    != DDI_SUCCESS) {
8857bebe46cSjc 				affinity_clear();
8867bebe46cSjc 				return (EFAULT);
8877bebe46cSjc 			}
8887bebe46cSjc 			affinity_clear();
8897bebe46cSjc 			break;	/* end of READ_TAGS */
8907bebe46cSjc 		default:
8917bebe46cSjc 			return (ENOTSUP);
8927bebe46cSjc 	}	/* end if switch(cmd) */
8937bebe46cSjc 	return (ret_val);
8947bebe46cSjc }
8957bebe46cSjc 
8967bebe46cSjc /*ARGSUSED*/
8977bebe46cSjc static int
8987bebe46cSjc mem_cache_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
8997bebe46cSjc 		int *rvalp)
9007bebe46cSjc {
9017bebe46cSjc 	int	inst;
9027bebe46cSjc 	struct mem_cache_softc *softc;
9037bebe46cSjc 	cache_info_t	cache_info;
9047bebe46cSjc 	cache_info32_t	cache_info32;
9057bebe46cSjc 	int	ret_val;
906142c9f13Sbala 	int	is_panther;
9077bebe46cSjc 
9087bebe46cSjc 	inst = getminor(dev);
9097bebe46cSjc 	if ((softc = getsoftc(inst)) == NULL)
9107bebe46cSjc 		return (ENXIO);
9117bebe46cSjc 
9127bebe46cSjc 	mutex_enter(&softc->mutex);
9137bebe46cSjc 
9147bebe46cSjc #ifdef _MULTI_DATAMODEL
9157bebe46cSjc 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
9167bebe46cSjc 		if (ddi_copyin((cache_info32_t *)arg, &cache_info32,
9177bebe46cSjc 		    sizeof (cache_info32), mode) != DDI_SUCCESS) {
9187bebe46cSjc 			mutex_exit(&softc->mutex);
9197bebe46cSjc 			return (EFAULT);
9207bebe46cSjc 		}
9217bebe46cSjc 		cache_info.cache = cache_info32.cache;
9227bebe46cSjc 		cache_info.index = cache_info32.index;
9237bebe46cSjc 		cache_info.way = cache_info32.way;
9247bebe46cSjc 		cache_info.cpu_id = cache_info32.cpu_id;
9257bebe46cSjc 		cache_info.bit = cache_info32.bit;
9267bebe46cSjc 		cache_info.datap = (void *)((uint64_t)cache_info32.datap);
9277bebe46cSjc 	} else
9287bebe46cSjc #endif
9297bebe46cSjc 	if (ddi_copyin((cache_info_t *)arg, &cache_info,
9307bebe46cSjc 	    sizeof (cache_info), mode) != DDI_SUCCESS) {
9317bebe46cSjc 		mutex_exit(&softc->mutex);
9327bebe46cSjc 		return (EFAULT);
9337bebe46cSjc 	}
934*1dbf84bbScb 
935*1dbf84bbScb 	if ((cache_info.cpu_id < 0) || (cache_info.cpu_id >= NCPU)) {
936*1dbf84bbScb 		mutex_exit(&softc->mutex);
937*1dbf84bbScb 		return (EINVAL);
938*1dbf84bbScb 	}
939*1dbf84bbScb 
940142c9f13Sbala 	is_panther = IS_PANTHER(cpunodes[cache_info.cpu_id].implementation);
941142c9f13Sbala 	if (!is_panther) {
942142c9f13Sbala 		mutex_exit(&softc->mutex);
943142c9f13Sbala 		return (ENOTSUP);
944142c9f13Sbala 	}
9457bebe46cSjc 	switch (cmd) {
9467bebe46cSjc 		case MEM_CACHE_RETIRE:
9477bebe46cSjc 		case MEM_CACHE_UNRETIRE:
9487bebe46cSjc 			if ((mode & FWRITE) == 0) {
9497bebe46cSjc 				ret_val = EBADF;
9507bebe46cSjc 				break;
9517bebe46cSjc 			}
9527bebe46cSjc 		/*FALLTHROUGH*/
9537bebe46cSjc 		case MEM_CACHE_ISRETIRED:
9547bebe46cSjc 		case MEM_CACHE_STATE:
9557bebe46cSjc 		case MEM_CACHE_READ_TAGS:
956142c9f13Sbala #ifdef DEBUG
9577bebe46cSjc 		case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
958142c9f13Sbala #endif
9597bebe46cSjc 			ret_val =  mem_cache_ioctl_ops(cmd, mode, &cache_info);
9607bebe46cSjc 			break;
9617bebe46cSjc 		default:
9627bebe46cSjc 			ret_val = ENOTSUP;
9637bebe46cSjc 			break;
9647bebe46cSjc 	}
9657bebe46cSjc 	mutex_exit(&softc->mutex);
9667bebe46cSjc 	return (ret_val);
9677bebe46cSjc }
968