1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 *
26 * The "rmc_comm" driver provides access to the RMC so that its clients need
27 * not be concerned with the details of the access mechanism, which in this
28 * case is implemented via a packet-based protocol over a serial link via a
29 * 16550 compatible serial port.
30 */
31
32
33/*
34 *  Header files
35 */
36#include <sys/conf.h>
37#include <sys/membar.h>
38#include <sys/modctl.h>
39#include <sys/strlog.h>
40#include <sys/types.h>
41#include <sys/sunddi.h>
42#include <sys/ddi.h>
43#include <sys/rmc_comm_dp_boot.h>
44#include <sys/rmc_comm_dp.h>
45#include <sys/rmc_comm_drvintf.h>
46#include <sys/rmc_comm.h>
47#include <sys/cpu_sgnblk_defs.h>
48
49/*
50 * Local definitions
51 */
52#define	MYNAME			"rmc_comm"
53#define	NOMAJOR			(~(major_t)0)
54#define	DUMMY_VALUE		(~(int8_t)0)
55
56/*
57 * Local data
58 */
59static void *rmc_comm_statep;
60static major_t rmc_comm_major = NOMAJOR;
61static kmutex_t rmc_comm_attach_lock;
62static ddi_device_acc_attr_t rmc_comm_dev_acc_attr[1] =
63{
64	DDI_DEVICE_ATTR_V0,
65	DDI_STRUCTURE_LE_ACC,
66	DDI_STRICTORDER_ACC
67};
68static int watchdog_was_active;
69extern int watchdog_activated;
70extern int watchdog_enable;
71
72/*
73 * prototypes
74 */
75
76extern void dp_reset(struct rmc_comm_state *, uint8_t, boolean_t, boolean_t);
77static void sio_put_reg(struct rmc_comm_state *, uint_t, uint8_t);
78static uint8_t sio_get_reg(struct rmc_comm_state *, uint_t);
79static void sio_check_fault_status(struct rmc_comm_state *);
80static boolean_t sio_data_ready(struct rmc_comm_state *);
81static void rmc_comm_set_irq(struct rmc_comm_state *, boolean_t);
82static uint_t rmc_comm_hi_intr(caddr_t);
83static uint_t rmc_comm_softint(caddr_t);
84static void rmc_comm_cyclic(void *);
85static void rmc_comm_hw_reset(struct rmc_comm_state *);
86static void rmc_comm_offline(struct rmc_comm_state *);
87static int rmc_comm_online(struct rmc_comm_state *, dev_info_t *);
88static void rmc_comm_unattach(struct rmc_comm_state *, dev_info_t *, int,
89    boolean_t, boolean_t, boolean_t);
90static int rmc_comm_attach(dev_info_t *, ddi_attach_cmd_t);
91static int rmc_comm_detach(dev_info_t *, ddi_detach_cmd_t);
92
93/*
94 * for client leaf drivers to register their desire for rmc_comm
95 * to stay attached
96 */
97int
98rmc_comm_register()
99{
100	struct rmc_comm_state *rcs;
101
102	mutex_enter(&rmc_comm_attach_lock);
103	rcs = ddi_get_soft_state(rmc_comm_statep, 0);
104	if ((rcs == NULL) || (!rcs->is_attached)) {
105		mutex_exit(&rmc_comm_attach_lock);
106		return (DDI_FAILURE);
107	}
108	rcs->n_registrations++;
109	mutex_exit(&rmc_comm_attach_lock);
110	return (DDI_SUCCESS);
111}
112
113void
114rmc_comm_unregister()
115{
116	struct rmc_comm_state *rcs;
117
118	mutex_enter(&rmc_comm_attach_lock);
119	rcs = ddi_get_soft_state(rmc_comm_statep, 0);
120	ASSERT(rcs != NULL);
121	ASSERT(rcs->n_registrations != 0);
122	rcs->n_registrations--;
123	mutex_exit(&rmc_comm_attach_lock);
124}
125
126/*
127 * to get the soft state structure of a specific instance
128 */
129struct rmc_comm_state *
130rmc_comm_getstate(dev_info_t *dip, int instance, const char *caller)
131{
132	struct rmc_comm_state *rcs = NULL;
133	dev_info_t *sdip = NULL;
134	major_t dmaj = NOMAJOR;
135
136	if (dip != NULL) {
137		/*
138		 * Use the instance number from the <dip>; also,
139		 * check that it really corresponds to this driver
140		 */
141		instance = ddi_get_instance(dip);
142		dmaj = ddi_driver_major(dip);
143		if (rmc_comm_major == NOMAJOR && dmaj != NOMAJOR)
144			rmc_comm_major = dmaj;
145		else if (dmaj != rmc_comm_major) {
146			cmn_err(CE_WARN,
147			    "%s: major number mismatch (%d vs. %d) in %s(),"
148			    "probably due to child misconfiguration",
149			    MYNAME, rmc_comm_major, dmaj, caller);
150			instance = -1;
151		}
152	}
153	if (instance >= 0)
154		rcs = ddi_get_soft_state(rmc_comm_statep, instance);
155	if (rcs != NULL) {
156		sdip = rcs->dip;
157		if (dip == NULL && sdip == NULL)
158			rcs = NULL;
159		else if (dip != NULL && sdip != NULL && sdip != dip) {
160			cmn_err(CE_WARN,
161			    "%s: devinfo mismatch (%p vs. %p) in %s(), "
162			    "probably due to child misconfiguration", MYNAME,
163			    (void *)dip, (void *)sdip, caller);
164			rcs = NULL;
165		}
166	}
167
168	return (rcs);
169}
170
171
172/*
173 * Lowest-level serial I/O chip register read/write
174 */
175static void
176sio_put_reg(struct rmc_comm_state *rcs, uint_t reg, uint8_t val)
177{
178	DPRINTF(rcs, DSER, (CE_CONT, "REG[%d]<-$%02x", reg, val));
179
180	if (rcs->sd_state.sio_handle != NULL && !rcs->sd_state.sio_fault) {
181		/*
182		 * The chip is mapped as "I/O" (e.g. with the side-effect
183		 * bit on SPARC), therefore accesses are required to be
184		 * in-order, with no value cacheing.  However, there can
185		 * still be write-behind buffering, so it is not guaranteed
186		 * that a write actually reaches the chip in a given time.
187		 *
188		 * To force the access right through to the chip, we follow
189		 * the write with another write (to the SCRATCH register)
190		 * and a read (of the value just written to the SCRATCH
191		 * register).  The SCRATCH register is specifically provided
192		 * for temporary data and has no effect on the SIO's own
193		 * operation, making it ideal as a synchronising mechanism.
194		 *
195		 * If we didn't do this, it would be possible that the new
196		 * value wouldn't reach the chip (and have the *intended*
197		 * side-effects, such as disabling interrupts), for such a
198		 * long time that the processor could execute a *lot* of
199		 * instructions - including exiting the interrupt service
200		 * routine and re-enabling interrupts.  This effect was
201		 * observed to lead to spurious (unclaimed) interrupts in
202		 * some circumstances.
203		 *
204		 * This will no longer be needed once "synchronous" access
205		 * handles are available (see PSARC/2000/269 and 2000/531).
206		 */
207		ddi_put8(rcs->sd_state.sio_handle,
208		    rcs->sd_state.sio_regs + reg, val);
209		ddi_put8(rcs->sd_state.sio_handle,
210		    rcs->sd_state.sio_regs + SIO_SCR, val);
211		membar_sync();
212		(void) ddi_get8(rcs->sd_state.sio_handle,
213		    rcs->sd_state.sio_regs + SIO_SCR);
214	}
215}
216
217static uint8_t
218sio_get_reg(struct rmc_comm_state *rcs, uint_t reg)
219{
220	uint8_t val;
221
222	if (rcs->sd_state.sio_handle && !rcs->sd_state.sio_fault)
223		val = ddi_get8(rcs->sd_state.sio_handle,
224		    rcs->sd_state.sio_regs + reg);
225	else
226		val = DUMMY_VALUE;
227	DPRINTF(rcs, DSER, (CE_CONT, "$%02x<-REG[%d]", val, reg));
228	return (val);
229}
230
231static void
232sio_check_fault_status(struct rmc_comm_state *rcs)
233{
234	rcs->sd_state.sio_fault =
235	    ddi_check_acc_handle(rcs->sd_state.sio_handle) != DDI_SUCCESS;
236}
237
238boolean_t
239rmc_comm_faulty(struct rmc_comm_state *rcs)
240{
241	if (!rcs->sd_state.sio_fault)
242		sio_check_fault_status(rcs);
243	return (rcs->sd_state.sio_fault);
244}
245
246/*
247 * Check for data ready.
248 */
249static boolean_t
250sio_data_ready(struct rmc_comm_state *rcs)
251{
252	uint8_t status;
253
254	/*
255	 * Data is available if the RXDA bit in the LSR is nonzero
256	 * (if reading it didn't incur a fault).
257	 */
258	status = sio_get_reg(rcs, SIO_LSR);
259	return ((status & SIO_LSR_RXDA) != 0 && !rmc_comm_faulty(rcs));
260}
261
262/*
263 * Enable/disable interrupts
264 */
265static void
266rmc_comm_set_irq(struct rmc_comm_state *rcs, boolean_t newstate)
267{
268	uint8_t val;
269
270	val = newstate ? SIO_IER_RXHDL_IE : 0;
271	sio_put_reg(rcs, SIO_IER, SIO_IER_STD | val);
272	rcs->sd_state.hw_int_enabled = newstate;
273}
274
275/*
276 * High-level interrupt handler:
277 *	Checks whether initialisation is complete (to avoid a race
278 *	with mutex_init()), and whether chip interrupts are enabled.
279 *	If not, the interrupt's not for us, so just return UNCLAIMED.
280 *	Otherwise, disable the interrupt, trigger a softint, and return
281 *	CLAIMED.  The softint handler will then do all the real work.
282 *
283 *	NOTE: the chip interrupt capability is only re-enabled once the
284 *	receive code has run, but that can be called from a poll loop
285 *	or cyclic callback as well as from the softint.  So it's *not*
286 *	guaranteed that there really is a chip interrupt pending here,
287 *	'cos the work may already have been done and the reason for the
288 *	interrupt gone away before we get here.
289 *
290 *	OTOH, if we come through here twice without the receive code
291 *	having run in between, that's definitely wrong.  In such an
292 *	event, we would notice that chip interrupts haven't yet been
293 *	re-enabled and return UNCLAIMED, allowing the system's jabber
294 *	protect code (if any) to do its job.
295 */
296static uint_t
297rmc_comm_hi_intr(caddr_t arg)
298{
299	struct rmc_comm_state *rcs = (void *)arg;
300	uint_t claim;
301
302	claim = DDI_INTR_UNCLAIMED;
303	if (rcs->sd_state.cycid != NULL) {
304		/*
305		 * Handle the case where this interrupt fires during
306		 * panic processing.  If that occurs, then a thread
307		 * in rmc_comm might have been idled while holding
308		 * hw_mutex.  If so, that thread will never make
309		 * progress, and so we do not want to unconditionally
310		 * grab hw_mutex.
311		 */
312		if (ddi_in_panic() != 0) {
313			if (mutex_tryenter(rcs->sd_state.hw_mutex) == 0) {
314				return (claim);
315			}
316		} else {
317			mutex_enter(rcs->sd_state.hw_mutex);
318		}
319		if (rcs->sd_state.hw_int_enabled) {
320			rmc_comm_set_irq(rcs, B_FALSE);
321			ddi_trigger_softintr(rcs->sd_state.softid);
322			claim = DDI_INTR_CLAIMED;
323		}
324		mutex_exit(rcs->sd_state.hw_mutex);
325	}
326	return (claim);
327}
328
329/*
330 * Packet receive handler
331 *
332 * This routine should be called from the low-level softint, or the
333 * cyclic callback, or rmc_comm_cmd() (for polled operation), with the
334 * low-level mutex already held.
335 */
336void
337rmc_comm_serdev_receive(struct rmc_comm_state *rcs)
338{
339	uint8_t data;
340
341	DPRINTF(rcs, DSER, (CE_CONT, "serdev_receive: soft int handler\n"));
342
343	/*
344	 * Check for access faults before starting the receive
345	 * loop (we don't want to cause bus errors or suchlike
346	 * unpleasantness in the event that the SIO has died).
347	 */
348	if (!rmc_comm_faulty(rcs)) {
349
350		char *rx_buf = rcs->sd_state.serdev_rx_buf;
351		uint16_t rx_buflen = 0;
352
353		/*
354		 * Read bytes from the FIFO until they're all gone
355		 * or our buffer overflows (which must be an error)
356		 */
357
358		/*
359		 * At the moment, the receive buffer is overwritten any
360		 * time data is received from the serial device.
361		 * This should not pose problems (probably!) as the data
362		 * protocol is half-duplex
363		 * Otherwise, a circular buffer must be implemented!
364		 */
365		mutex_enter(rcs->sd_state.hw_mutex);
366		while (sio_data_ready(rcs)) {
367			data = sio_get_reg(rcs, SIO_RXD);
368			rx_buf[rx_buflen++] = data;
369			if (rx_buflen >= SIO_MAX_RXBUF_SIZE)
370				break;
371		}
372		rcs->sd_state.serdev_rx_count = rx_buflen;
373
374		DATASCOPE(rcs, 'R', rx_buf, rx_buflen)
375
376		rmc_comm_set_irq(rcs, B_TRUE);
377		mutex_exit(rcs->sd_state.hw_mutex);
378
379		/*
380		 * call up the data protocol receive handler
381		 */
382		rmc_comm_dp_drecv(rcs, (uint8_t *)rx_buf, rx_buflen);
383	}
384}
385
386/*
387 * Low-level softint handler
388 *
389 * This routine should be triggered whenever there's a byte to be read
390 */
391static uint_t
392rmc_comm_softint(caddr_t arg)
393{
394	struct rmc_comm_state *rcs = (void *)arg;
395
396	mutex_enter(rcs->dp_state.dp_mutex);
397	rmc_comm_serdev_receive(rcs);
398	mutex_exit(rcs->dp_state.dp_mutex);
399	return (DDI_INTR_CLAIMED);
400}
401
402/*
403 * Cyclic handler: just calls the receive routine, in case interrupts
404 * are not being delivered and in order to handle command timeout
405 */
406static void
407rmc_comm_cyclic(void *arg)
408{
409	struct rmc_comm_state *rcs = (void *)arg;
410
411	mutex_enter(rcs->dp_state.dp_mutex);
412	rmc_comm_serdev_receive(rcs);
413	mutex_exit(rcs->dp_state.dp_mutex);
414}
415
416/*
417 * Serial protocol
418 *
419 * This routine builds a command and sets it in progress.
420 */
421void
422rmc_comm_serdev_send(struct rmc_comm_state *rcs, char *buf, int buflen)
423{
424	uint8_t *p;
425	uint8_t status;
426
427	/*
428	 * Check and update the SIO h/w fault status before accessing
429	 * the chip registers.  If there's a (new or previous) fault,
430	 * we'll run through the protocol but won't really touch the
431	 * hardware and all commands will timeout.  If a previously
432	 * discovered fault has now gone away (!), then we can (try to)
433	 * proceed with the new command (probably a probe).
434	 */
435	sio_check_fault_status(rcs);
436
437	/*
438	 * Send the command now by stuffing the packet into the Tx FIFO.
439	 */
440	DATASCOPE(rcs, 'S', buf, buflen)
441
442	mutex_enter(rcs->sd_state.hw_mutex);
443	p = (uint8_t *)buf;
444	while (p < (uint8_t *)&buf[buflen]) {
445
446		/*
447		 * before writing to the TX holding register, we make sure that
448		 * it is empty. In this case, there will be no chance to
449		 * overflow the serial device FIFO (but, on the other hand,
450		 * it may introduce some latency)
451		 */
452		status = sio_get_reg(rcs, SIO_LSR);
453		while ((status & SIO_LSR_XHRE) == 0) {
454			drv_usecwait(100);
455			status = sio_get_reg(rcs, SIO_LSR);
456		}
457		sio_put_reg(rcs, SIO_TXD, *p++);
458	}
459	mutex_exit(rcs->sd_state.hw_mutex);
460}
461
462/*
463 * wait for the tx fifo to drain - used for urgent nowait requests
464 */
465void
466rmc_comm_serdev_drain(struct rmc_comm_state *rcs)
467{
468	uint8_t status;
469
470	mutex_enter(rcs->sd_state.hw_mutex);
471	status = sio_get_reg(rcs, SIO_LSR);
472	while ((status & SIO_LSR_XHRE) == 0) {
473		drv_usecwait(100);
474		status = sio_get_reg(rcs, SIO_LSR);
475	}
476	mutex_exit(rcs->sd_state.hw_mutex);
477}
478
479/*
480 * Hardware setup - put the SIO chip in the required operational
481 * state,  with all our favourite parameters programmed correctly.
482 * This routine leaves all SIO interrupts disabled.
483 */
484
485static void
486rmc_comm_hw_reset(struct rmc_comm_state *rcs)
487{
488	uint16_t divisor;
489
490	/*
491	 * Disable interrupts, soft reset Tx and Rx circuitry,
492	 * reselect standard modes (bits/char, parity, etc).
493	 */
494	rmc_comm_set_irq(rcs, B_FALSE);
495	sio_put_reg(rcs, SIO_FCR, SIO_FCR_RXSR | SIO_FCR_TXSR);
496	sio_put_reg(rcs, SIO_LCR, SIO_LCR_STD);
497
498	/*
499	 * Select the proper baud rate; if the value is invalid
500	 * (presumably 0, i.e. not specified, but also if the
501	 * "baud" property is set to some silly value), we assume
502	 * the default.
503	 */
504	if (rcs->baud < SIO_BAUD_MIN || rcs->baud > SIO_BAUD_MAX) {
505		divisor = SIO_BAUD_TO_DIVISOR(SIO_BAUD_DEFAULT) *
506		    rcs->baud_divisor_factor;
507	} else {
508		divisor = SIO_BAUD_TO_DIVISOR(rcs->baud) *
509		    rcs->baud_divisor_factor;
510	}
511
512	/*
513	 * According to the datasheet, it is forbidden for the divisor
514	 * register to be zero.  So when loading the register in two
515	 * steps, we have to make sure that the temporary value formed
516	 * between loads is nonzero.  However, we can't rely on either
517	 * half already having a nonzero value, as the datasheet also
518	 * says that these registers are indeterminate after a reset!
519	 * So, we explicitly set the low byte to a non-zero value first;
520	 * then we can safely load the high byte, and then the correct
521	 * value for the low byte, without the result ever being zero.
522	 */
523	sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK1);
524	sio_put_reg(rcs, SIO_LBGDL, 0xff);
525	sio_put_reg(rcs, SIO_LBGDH, divisor >> 8);
526	sio_put_reg(rcs, SIO_LBGDL, divisor & 0xff);
527	sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK0);
528
529	/*
530	 * Program the remaining device registers as required
531	 */
532	sio_put_reg(rcs, SIO_MCR, SIO_MCR_STD);
533	sio_put_reg(rcs, SIO_FCR, SIO_FCR_STD);
534}
535
536/*
537 * Higher-level setup & teardown
538 */
539static void
540rmc_comm_offline(struct rmc_comm_state *rcs)
541{
542	if (rcs->sd_state.sio_handle != NULL)
543		ddi_regs_map_free(&rcs->sd_state.sio_handle);
544	rcs->sd_state.sio_handle = NULL;
545	rcs->sd_state.sio_regs = NULL;
546}
547
548static int
549rmc_comm_online(struct rmc_comm_state *rcs, dev_info_t *dip)
550{
551	ddi_acc_handle_t h;
552	caddr_t p;
553	int nregs;
554	int err;
555
556	if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS)
557		nregs = 0;
558	switch (nregs) {
559	default:
560	case 1:
561		/*
562		 *  regset 0 represents the SIO operating registers
563		 */
564		err = ddi_regs_map_setup(dip, 0, &p, 0, 0,
565		    rmc_comm_dev_acc_attr, &h);
566		if (err != DDI_SUCCESS)
567			return (EIO);
568		rcs->sd_state.sio_handle = h;
569		rcs->sd_state.sio_regs = (void *)p;
570		break;
571	case 0:
572		/*
573		 *  If no registers are defined, succeed vacuously;
574		 *  commands will be accepted, but we fake the accesses.
575		 */
576		break;
577	}
578
579	/*
580	 * Now that the registers are mapped, we can initialise the SIO h/w
581	 */
582	rmc_comm_hw_reset(rcs);
583	return (0);
584}
585
586
587/*
588 * Initialization of the serial device (data structure, mutex, cv, hardware
589 * and so on). It is called from the attach routine.
590 */
591
592int
593rmc_comm_serdev_init(struct rmc_comm_state *rcs, dev_info_t *dip)
594{
595	int err = DDI_SUCCESS;
596
597	rcs->sd_state.cycid = NULL;
598
599	/*
600	 *  Online the hardware ...
601	 */
602	err = rmc_comm_online(rcs, dip);
603	if (err != 0)
604		return (-1);
605
606	/*
607	 * call ddi_get_soft_iblock_cookie() to retrieve the
608	 * the interrupt block cookie so that the mutexes are initialized
609	 * before adding the interrupt (to avoid a potential race condition).
610	 */
611
612	err = ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_LOW,
613	    &rcs->dp_state.dp_iblk);
614	if (err != DDI_SUCCESS)
615		return (-1);
616
617	err = ddi_get_iblock_cookie(dip, 0, &rcs->sd_state.hw_iblk);
618	if (err != DDI_SUCCESS)
619		return (-1);
620
621	/*
622	 * initialize mutex here before adding hw/sw interrupt handlers
623	 */
624	mutex_init(rcs->dp_state.dp_mutex, NULL, MUTEX_DRIVER,
625	    rcs->dp_state.dp_iblk);
626
627	mutex_init(rcs->sd_state.hw_mutex, NULL, MUTEX_DRIVER,
628	    rcs->sd_state.hw_iblk);
629
630	/*
631	 * Install soft and hard interrupt handler(s)
632	 *
633	 * the soft intr. handler will need the data protocol lock (dp_mutex)
634	 * So, data protocol mutex and iblock cookie are created/initialized
635	 * here
636	 */
637
638	err = ddi_add_softintr(dip, DDI_SOFTINT_LOW, &rcs->sd_state.softid,
639	    &rcs->dp_state.dp_iblk, NULL, rmc_comm_softint, (caddr_t)rcs);
640	if (err != DDI_SUCCESS) {
641		mutex_destroy(rcs->dp_state.dp_mutex);
642		mutex_destroy(rcs->sd_state.hw_mutex);
643		return (-1);
644	}
645
646	/*
647	 * hardware interrupt
648	 */
649
650	if (rcs->sd_state.sio_handle != NULL) {
651		err = ddi_add_intr(dip, 0, &rcs->sd_state.hw_iblk, NULL,
652		    rmc_comm_hi_intr, (caddr_t)rcs);
653
654		/*
655		 * did we successfully install the h/w interrupt handler?
656		 */
657		if (err != DDI_SUCCESS) {
658			ddi_remove_softintr(rcs->sd_state.softid);
659			mutex_destroy(rcs->dp_state.dp_mutex);
660			mutex_destroy(rcs->sd_state.hw_mutex);
661			return (-1);
662		}
663	}
664
665	/*
666	 * Start periodical callbacks
667	 */
668	rcs->sd_state.cycid = ddi_periodic_add(rmc_comm_cyclic, rcs,
669	    5 * RMC_COMM_ONE_SEC, DDI_IPL_1);
670	return (0);
671}
672
673/*
674 * Termination of the serial device (data structure, mutex, cv, hardware
675 * and so on). It is called from the detach routine.
676 */
677
678void
679rmc_comm_serdev_fini(struct rmc_comm_state *rcs, dev_info_t *dip)
680{
681	rmc_comm_hw_reset(rcs);
682
683	if (rcs->sd_state.cycid != NULL) {
684		ddi_periodic_delete(rcs->sd_state.cycid);
685		rcs->sd_state.cycid = NULL;
686
687		if (rcs->sd_state.sio_handle != NULL)
688			ddi_remove_intr(dip, 0, rcs->sd_state.hw_iblk);
689
690		ddi_remove_softintr(rcs->sd_state.softid);
691
692		mutex_destroy(rcs->sd_state.hw_mutex);
693
694		mutex_destroy(rcs->dp_state.dp_mutex);
695	}
696	rmc_comm_offline(rcs);
697}
698
699/*
700 * device driver entry routines (init/fini, attach/detach, ...)
701 */
702
703/*
704 *  Clean up on detach or failure of attach
705 */
706static void
707rmc_comm_unattach(struct rmc_comm_state *rcs, dev_info_t *dip, int instance,
708    boolean_t drvi_init, boolean_t dp_init, boolean_t sd_init)
709{
710	if (rcs != NULL) {
711		/*
712		 * disable interrupts now
713		 */
714		rmc_comm_set_irq(rcs, B_FALSE);
715
716		/*
717		 * driver interface termination (if it has been initialized)
718		 */
719		if (drvi_init)
720			rmc_comm_drvintf_fini(rcs);
721
722		/*
723		 * data protocol termination (if it has been initialized)
724		 */
725		if (dp_init)
726			rmc_comm_dp_fini(rcs);
727
728		/*
729		 * serial device termination (if it has been initialized)
730		 */
731		if (sd_init)
732			rmc_comm_serdev_fini(rcs, dip);
733
734		ddi_set_driver_private(dip, NULL);
735	}
736	ddi_soft_state_free(rmc_comm_statep, instance);
737}
738
739/*
740 *  Autoconfiguration routines
741 */
742
743static int
744rmc_comm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
745{
746	struct rmc_comm_state *rcs = NULL;
747	sig_state_t *current_sgn_p;
748	int instance;
749
750	/*
751	 * only allow one instance
752	 */
753	instance = ddi_get_instance(dip);
754	if (instance != 0)
755		return (DDI_FAILURE);
756
757	switch (cmd) {
758	default:
759		return (DDI_FAILURE);
760
761	case DDI_RESUME:
762		if ((rcs = rmc_comm_getstate(dip, instance,
763		    "rmc_comm_attach")) == NULL)
764			return (DDI_FAILURE);	/* this "can't happen" */
765
766		rmc_comm_hw_reset(rcs);
767		rmc_comm_set_irq(rcs, B_TRUE);
768		rcs->dip = dip;
769
770		mutex_enter(&tod_lock);
771		if (watchdog_enable && tod_ops.tod_set_watchdog_timer != NULL &&
772		    watchdog_was_active) {
773			(void) tod_ops.tod_set_watchdog_timer(0);
774		}
775		mutex_exit(&tod_lock);
776
777		mutex_enter(rcs->dp_state.dp_mutex);
778		dp_reset(rcs, INITIAL_SEQID, 1, 1);
779		mutex_exit(rcs->dp_state.dp_mutex);
780
781		current_sgn_p = (sig_state_t *)modgetsymvalue(
782		    "current_sgn", 0);
783		if ((current_sgn_p != NULL) &&
784		    (current_sgn_p->state_t.sig != 0)) {
785			CPU_SIGNATURE(current_sgn_p->state_t.sig,
786			    current_sgn_p->state_t.state,
787			    current_sgn_p->state_t.sub_state, -1);
788		}
789		return (DDI_SUCCESS);
790
791	case DDI_ATTACH:
792		break;
793	}
794
795	/*
796	 *  Allocate the soft-state structure
797	 */
798	if (ddi_soft_state_zalloc(rmc_comm_statep, instance) != DDI_SUCCESS)
799		return (DDI_FAILURE);
800	if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) ==
801	    NULL) {
802		rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
803		return (DDI_FAILURE);
804	}
805	ddi_set_driver_private(dip, rcs);
806
807	rcs->dip = NULL;
808
809	/*
810	 *  Set various options from .conf properties
811	 */
812	rcs->baud = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
813	    "baud-rate", 0);
814	rcs->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
815	    "debug", 0);
816
817	/*
818	 * the baud divisor factor tells us how to scale the result of
819	 * the SIO_BAUD_TO_DIVISOR macro for platforms which do not
820	 * use the standard 24MHz uart clock
821	 */
822	rcs->baud_divisor_factor = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
823	    DDI_PROP_DONTPASS, "baud-divisor-factor", SIO_BAUD_DIVISOR_MIN);
824
825	/*
826	 * try to be reasonable if the scale factor contains a silly value
827	 */
828	if ((rcs->baud_divisor_factor < SIO_BAUD_DIVISOR_MIN) ||
829	    (rcs->baud_divisor_factor > SIO_BAUD_DIVISOR_MAX))
830		rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN;
831
832	/*
833	 * initialize serial device
834	 */
835	if (rmc_comm_serdev_init(rcs, dip) != 0) {
836		rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
837		return (DDI_FAILURE);
838	}
839
840	/*
841	 * initialize data protocol
842	 */
843	rmc_comm_dp_init(rcs);
844
845	/*
846	 * initialize driver interface
847	 */
848	if (rmc_comm_drvintf_init(rcs) != 0) {
849		rmc_comm_unattach(rcs, dip, instance, 0, 1, 1);
850		return (DDI_FAILURE);
851	}
852
853	/*
854	 *  Initialise devinfo-related fields
855	 */
856	rcs->majornum = ddi_driver_major(dip);
857	rcs->instance = instance;
858	rcs->dip = dip;
859
860	/*
861	 * enable interrupts now
862	 */
863	rmc_comm_set_irq(rcs, B_TRUE);
864
865	/*
866	 *  All done, report success
867	 */
868	ddi_report_dev(dip);
869	mutex_enter(&rmc_comm_attach_lock);
870	rcs->is_attached = B_TRUE;
871	mutex_exit(&rmc_comm_attach_lock);
872	return (DDI_SUCCESS);
873}
874
875static int
876rmc_comm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
877{
878	struct rmc_comm_state *rcs;
879	int instance;
880
881	instance = ddi_get_instance(dip);
882	if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_detach")) == NULL)
883		return (DDI_FAILURE);	/* this "can't happen" */
884
885	switch (cmd) {
886	case DDI_SUSPEND:
887		mutex_enter(&tod_lock);
888		if (watchdog_enable && watchdog_activated &&
889		    tod_ops.tod_clear_watchdog_timer != NULL) {
890			watchdog_was_active = 1;
891			(void) tod_ops.tod_clear_watchdog_timer();
892		} else {
893			watchdog_was_active = 0;
894		}
895		mutex_exit(&tod_lock);
896
897		rcs->dip = NULL;
898		rmc_comm_hw_reset(rcs);
899
900		return (DDI_SUCCESS);
901
902	case DDI_DETACH:
903		/*
904		 * reject detach if any client(s) still registered
905		 */
906		mutex_enter(&rmc_comm_attach_lock);
907		if (rcs->n_registrations != 0) {
908			mutex_exit(&rmc_comm_attach_lock);
909			return (DDI_FAILURE);
910		}
911		/*
912		 * Committed to complete the detach;
913		 * mark as no longer attached, to prevent new clients
914		 * registering (as part of a coincident attach)
915		 */
916		rcs->is_attached = B_FALSE;
917		mutex_exit(&rmc_comm_attach_lock);
918		rmc_comm_unattach(rcs, dip, instance, 1, 1, 1);
919		return (DDI_SUCCESS);
920
921	default:
922		return (DDI_FAILURE);
923	}
924}
925
926/*ARGSUSED*/
927static int
928rmc_comm_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
929{
930	struct rmc_comm_state *rcs;
931
932	if ((rcs = rmc_comm_getstate(dip, -1, "rmc_comm_reset")) == NULL)
933		return (DDI_FAILURE);
934	rmc_comm_hw_reset(rcs);
935	return (DDI_SUCCESS);
936}
937
938/*
939 * System interface structures
940 */
941static struct dev_ops rmc_comm_dev_ops =
942{
943	DEVO_REV,
944	0,				/* refcount		*/
945	nodev,				/* getinfo		*/
946	nulldev,			/* identify		*/
947	nulldev,			/* probe		*/
948	rmc_comm_attach,		/* attach		*/
949	rmc_comm_detach,		/* detach		*/
950	rmc_comm_reset,			/* reset		*/
951	(struct cb_ops *)NULL,		/* driver operations	*/
952	(struct bus_ops *)NULL,		/* bus operations	*/
953	nulldev,			/* power()		*/
954	ddi_quiesce_not_supported,	/* devo_quiesce */
955};
956
957static struct modldrv modldrv =
958{
959	&mod_driverops,
960	"rmc_comm driver",
961	&rmc_comm_dev_ops
962};
963
964static struct modlinkage modlinkage =
965{
966	MODREV_1,
967	{
968		&modldrv,
969		NULL
970	}
971};
972
973/*
974 *  Dynamic loader interface code
975 */
976int
977_init(void)
978{
979	int err;
980
981	mutex_init(&rmc_comm_attach_lock, NULL, MUTEX_DRIVER, NULL);
982	err = ddi_soft_state_init(&rmc_comm_statep,
983	    sizeof (struct rmc_comm_state), 0);
984	if (err == DDI_SUCCESS)
985		if ((err = mod_install(&modlinkage)) != 0) {
986			ddi_soft_state_fini(&rmc_comm_statep);
987		}
988	if (err != DDI_SUCCESS)
989		mutex_destroy(&rmc_comm_attach_lock);
990	return (err);
991}
992
993int
994_info(struct modinfo *mip)
995{
996	return (mod_info(&modlinkage, mip));
997}
998
999int
1000_fini(void)
1001{
1002	int err;
1003
1004	if ((err = mod_remove(&modlinkage)) == 0) {
1005		ddi_soft_state_fini(&rmc_comm_statep);
1006		rmc_comm_major = NOMAJOR;
1007		mutex_destroy(&rmc_comm_attach_lock);
1008	}
1009	return (err);
1010}
1011