Lines Matching refs:vdc

129 static int	vdc_send(vdc_t *vdc, caddr_t pkt, size_t *msglen);
130 static int vdc_do_ldc_init(vdc_t *vdc, vdc_server_t *srvr);
131 static int vdc_start_ldc_connection(vdc_t *vdc);
132 static int vdc_create_device_nodes(vdc_t *vdc);
133 static int vdc_create_device_nodes_efi(vdc_t *vdc);
134 static int vdc_create_device_nodes_vtoc(vdc_t *vdc);
135 static void vdc_create_io_kstats(vdc_t *vdc);
136 static void vdc_create_err_kstats(vdc_t *vdc);
137 static void vdc_set_err_kstats(vdc_t *vdc);
140 static int vdc_init_ports(vdc_t *vdc, md_t *mdp, mde_cookie_t vd_nodep);
141 static void vdc_fini_ports(vdc_t *vdc);
143 static int vdc_do_ldc_up(vdc_t *vdc);
144 static void vdc_terminate_ldc(vdc_t *vdc, vdc_server_t *srvr);
145 static int vdc_init_descriptor_ring(vdc_t *vdc);
146 static void vdc_destroy_descriptor_ring(vdc_t *vdc);
147 static int vdc_setup_devid(vdc_t *vdc);
151 static void vdc_store_label_unk(vdc_t *vdc);
152 static boolean_t vdc_is_opened(vdc_t *vdc);
153 static void vdc_update_size(vdc_t *vdc, size_t, size_t, size_t);
154 static int vdc_update_vio_bsize(vdc_t *vdc, uint32_t);
157 static int vdc_init_ver_negotiation(vdc_t *vdc, vio_ver_t ver);
159 static int vdc_init_attr_negotiation(vdc_t *vdc);
161 static int vdc_init_dring_negotiate(vdc_t *vdc);
168 static void vdc_process_msg_thread(vdc_t *vdc);
169 static int vdc_recv(vdc_t *vdc, vio_msg_t *msgp, size_t *nbytesp);
172 static int vdc_process_data_msg(vdc_t *vdc, vio_msg_t *msg);
173 static int vdc_handle_ver_msg(vdc_t *vdc, vio_ver_msg_t *ver_msg);
174 static int vdc_handle_attr_msg(vdc_t *vdc, vd_attr_msg_t *attr_msg);
175 static int vdc_handle_dring_reg_msg(vdc_t *vdc, vio_dring_reg_msg_t *msg);
186 static int vdc_do_op(vdc_t *vdc, int op, caddr_t addr, size_t nbytes,
192 static int vdc_depopulate_descriptor(vdc_t *vdc, uint_t idx);
194 static int vdc_verify_seq_num(vdc_t *vdc, vio_dring_msg_t *dring_msg);
200 static void vdc_create_fake_geometry(vdc_t *vdc);
201 static int vdc_validate_geometry(vdc_t *vdc);
202 static void vdc_validate(vdc_t *vdc);
204 static int vdc_null_copy_func(vdc_t *vdc, void *from, void *to,
206 static int vdc_get_wce_convert(vdc_t *vdc, void *from, void *to,
208 static int vdc_set_wce_convert(vdc_t *vdc, void *from, void *to,
210 static int vdc_get_vtoc_convert(vdc_t *vdc, void *from, void *to,
212 static int vdc_set_vtoc_convert(vdc_t *vdc, void *from, void *to,
214 static int vdc_get_extvtoc_convert(vdc_t *vdc, void *from, void *to,
216 static int vdc_set_extvtoc_convert(vdc_t *vdc, void *from, void *to,
218 static int vdc_get_geom_convert(vdc_t *vdc, void *from, void *to,
220 static int vdc_set_geom_convert(vdc_t *vdc, void *from, void *to,
222 static int vdc_get_efi_convert(vdc_t *vdc, void *from, void *to,
224 static int vdc_set_efi_convert(vdc_t *vdc, void *from, void *to,
227 static void vdc_ownership_update(vdc_t *vdc, int ownership_flags);
228 static int vdc_access_set(vdc_t *vdc, uint64_t flags);
229 static vdc_io_t *vdc_eio_queue(vdc_t *vdc, int index);
230 static void vdc_eio_unqueue(vdc_t *vdc, clock_t deadline,
232 static int vdc_eio_check(vdc_t *vdc, int flags);
420 vdc_t *vdc = NULL; in vdc_getinfo() local
424 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) { in vdc_getinfo()
428 *resultp = vdc->dip; in vdc_getinfo()
446 vdc_t *vdc = NULL; in vdc_detach() local
463 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) { in vdc_detach()
468 if (vdc_is_opened(vdc)) { in vdc_detach()
469 DMSG(vdc, 0, "[%d] Cannot detach: device is open", instance); in vdc_detach()
473 if (vdc->dkio_flush_pending) { in vdc_detach()
474 DMSG(vdc, 0, in vdc_detach()
476 instance, vdc->dkio_flush_pending); in vdc_detach()
480 if (vdc->validate_pending) { in vdc_detach()
481 DMSG(vdc, 0, in vdc_detach()
483 instance, vdc->validate_pending); in vdc_detach()
487 DMSG(vdc, 0, "[%d] proceeding...\n", instance); in vdc_detach()
490 mutex_enter(&vdc->ownership_lock); in vdc_detach()
491 if (vdc->ownership & VDC_OWNERSHIP_GRANTED) { in vdc_detach()
492 rv = vdc_access_set(vdc, VD_ACCESS_SET_CLEAR); in vdc_detach()
494 vdc_ownership_update(vdc, VDC_OWNERSHIP_NONE); in vdc_detach()
497 mutex_exit(&vdc->ownership_lock); in vdc_detach()
500 mutex_enter(&vdc->lock); in vdc_detach()
501 vdc->lifecycle = VDC_LC_DETACHING; in vdc_detach()
502 mutex_exit(&vdc->lock); in vdc_detach()
508 for (srvr = vdc->server_list; srvr != NULL; srvr = srvr->next) { in vdc_detach()
510 DMSG(vdc, 0, "callback disabled (ldc=%lu, rv=%d)\n", in vdc_detach()
514 if (vdc->initialized & VDC_THREAD) { in vdc_detach()
515 mutex_enter(&vdc->read_lock); in vdc_detach()
516 if ((vdc->read_state == VDC_READ_WAITING) || in vdc_detach()
517 (vdc->read_state == VDC_READ_RESET)) { in vdc_detach()
518 vdc->read_state = VDC_READ_RESET; in vdc_detach()
519 cv_signal(&vdc->read_cv); in vdc_detach()
522 mutex_exit(&vdc->read_lock); in vdc_detach()
525 mutex_enter(&vdc->lock); in vdc_detach()
526 if (vdc->state == VDC_STATE_INIT_WAITING) { in vdc_detach()
527 DMSG(vdc, 0, in vdc_detach()
530 vdc->state = VDC_STATE_RESETTING; in vdc_detach()
531 cv_signal(&vdc->initwait_cv); in vdc_detach()
532 } else if (vdc->state == VDC_STATE_FAILED) { in vdc_detach()
533 vdc->io_pending = B_TRUE; in vdc_detach()
534 cv_signal(&vdc->io_pending_cv); in vdc_detach()
536 mutex_exit(&vdc->lock); in vdc_detach()
539 thread_join(vdc->msg_proc_thr->t_did); in vdc_detach()
540 ASSERT(vdc->state == VDC_STATE_DETACH); in vdc_detach()
541 DMSG(vdc, 0, "[%d] Reset thread exit and join ..\n", in vdc_detach()
542 vdc->instance); in vdc_detach()
545 mutex_enter(&vdc->lock); in vdc_detach()
547 if (vdc->initialized & VDC_DRING) in vdc_detach()
548 vdc_destroy_descriptor_ring(vdc); in vdc_detach()
550 vdc_fini_ports(vdc); in vdc_detach()
552 if (vdc->eio_thread) { in vdc_detach()
553 eio_tid = vdc->eio_thread->t_did; in vdc_detach()
554 vdc->failfast_interval = 0; in vdc_detach()
555 ASSERT(vdc->num_servers == 0); in vdc_detach()
556 cv_signal(&vdc->eio_cv); in vdc_detach()
561 if (vdc->ownership & VDC_OWNERSHIP_WANTED) { in vdc_detach()
562 ownership_tid = vdc->ownership_thread->t_did; in vdc_detach()
563 vdc->ownership = VDC_OWNERSHIP_NONE; in vdc_detach()
564 cv_signal(&vdc->ownership_cv); in vdc_detach()
569 mutex_exit(&vdc->lock); in vdc_detach()
577 if (vdc->initialized & VDC_MINOR) in vdc_detach()
580 if (vdc->io_stats) { in vdc_detach()
581 kstat_delete(vdc->io_stats); in vdc_detach()
582 vdc->io_stats = NULL; in vdc_detach()
585 if (vdc->err_stats) { in vdc_detach()
586 kstat_delete(vdc->err_stats); in vdc_detach()
587 vdc->err_stats = NULL; in vdc_detach()
590 if (vdc->initialized & VDC_LOCKS) { in vdc_detach()
591 mutex_destroy(&vdc->lock); in vdc_detach()
592 mutex_destroy(&vdc->read_lock); in vdc_detach()
593 mutex_destroy(&vdc->ownership_lock); in vdc_detach()
594 cv_destroy(&vdc->initwait_cv); in vdc_detach()
595 cv_destroy(&vdc->dring_free_cv); in vdc_detach()
596 cv_destroy(&vdc->membind_cv); in vdc_detach()
597 cv_destroy(&vdc->sync_blocked_cv); in vdc_detach()
598 cv_destroy(&vdc->read_cv); in vdc_detach()
599 cv_destroy(&vdc->running_cv); in vdc_detach()
600 cv_destroy(&vdc->io_pending_cv); in vdc_detach()
601 cv_destroy(&vdc->ownership_cv); in vdc_detach()
602 cv_destroy(&vdc->eio_cv); in vdc_detach()
605 if (vdc->minfo) in vdc_detach()
606 kmem_free(vdc->minfo, sizeof (struct dk_minfo)); in vdc_detach()
608 if (vdc->cinfo) in vdc_detach()
609 kmem_free(vdc->cinfo, sizeof (struct dk_cinfo)); in vdc_detach()
611 if (vdc->vtoc) in vdc_detach()
612 kmem_free(vdc->vtoc, sizeof (struct extvtoc)); in vdc_detach()
614 if (vdc->geom) in vdc_detach()
615 kmem_free(vdc->geom, sizeof (struct dk_geom)); in vdc_detach()
617 if (vdc->devid) { in vdc_detach()
619 ddi_devid_free(vdc->devid); in vdc_detach()
622 if (vdc->initialized & VDC_SOFT_STATE) in vdc_detach()
625 DMSG(vdc, 0, "[%d] End %p\n", instance, (void *)vdc); in vdc_detach()
635 vdc_t *vdc = NULL; in vdc_do_attach() local
649 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) { in vdc_do_attach()
658 vdc->initialized = VDC_SOFT_STATE; in vdc_do_attach()
663 vdc->dip = dip; in vdc_do_attach()
664 vdc->instance = instance; in vdc_do_attach()
665 vdc->vdisk_type = VD_DISK_TYPE_UNK; in vdc_do_attach()
666 vdc->vdisk_label = VD_DISK_LABEL_UNK; in vdc_do_attach()
667 vdc->state = VDC_STATE_INIT; in vdc_do_attach()
668 vdc->lifecycle = VDC_LC_ATTACHING; in vdc_do_attach()
669 vdc->session_id = 0; in vdc_do_attach()
670 vdc->vdisk_bsize = DEV_BSIZE; in vdc_do_attach()
671 vdc->vio_bmask = 0; in vdc_do_attach()
672 vdc->vio_bshift = 0; in vdc_do_attach()
673 vdc->max_xfer_sz = maxphys / vdc->vdisk_bsize; in vdc_do_attach()
683 vdc->operations = VD_OP_MASK_READ; in vdc_do_attach()
685 vdc->vtoc = NULL; in vdc_do_attach()
686 vdc->geom = NULL; in vdc_do_attach()
687 vdc->cinfo = NULL; in vdc_do_attach()
688 vdc->minfo = NULL; in vdc_do_attach()
690 mutex_init(&vdc->lock, NULL, MUTEX_DRIVER, NULL); in vdc_do_attach()
691 cv_init(&vdc->initwait_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
692 cv_init(&vdc->dring_free_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
693 cv_init(&vdc->membind_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
694 cv_init(&vdc->running_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
695 cv_init(&vdc->io_pending_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
697 vdc->io_pending = B_FALSE; in vdc_do_attach()
698 vdc->threads_pending = 0; in vdc_do_attach()
699 vdc->sync_op_blocked = B_FALSE; in vdc_do_attach()
700 cv_init(&vdc->sync_blocked_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
702 mutex_init(&vdc->ownership_lock, NULL, MUTEX_DRIVER, NULL); in vdc_do_attach()
703 cv_init(&vdc->ownership_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
704 cv_init(&vdc->eio_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
707 mutex_init(&vdc->read_lock, NULL, MUTEX_DRIVER, NULL); in vdc_do_attach()
708 cv_init(&vdc->read_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
709 vdc->read_state = VDC_READ_IDLE; in vdc_do_attach()
711 vdc->initialized |= VDC_LOCKS; in vdc_do_attach()
720 if (vdc_init_ports(vdc, mdp, vd_node) != 0) { in vdc_do_attach()
728 vdc_create_io_kstats(vdc); in vdc_do_attach()
729 vdc_create_err_kstats(vdc); in vdc_do_attach()
732 vdc->vdisk_label = VD_DISK_LABEL_UNK; in vdc_do_attach()
733 vdc->vtoc = kmem_zalloc(sizeof (struct extvtoc), KM_SLEEP); in vdc_do_attach()
734 vdc->geom = kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); in vdc_do_attach()
735 vdc->minfo = kmem_zalloc(sizeof (struct dk_minfo), KM_SLEEP); in vdc_do_attach()
738 vdc->msg_proc_thr = thread_create(NULL, 0, vdc_process_msg_thread, in vdc_do_attach()
739 vdc, 0, &p0, TS_RUN, minclsyspri); in vdc_do_attach()
740 if (vdc->msg_proc_thr == NULL) { in vdc_do_attach()
749 if (vdc->num_servers > 1) { in vdc_do_attach()
750 vdc->eio_thread = thread_create(NULL, 0, vdc_eio_thread, vdc, 0, in vdc_do_attach()
752 if (vdc->eio_thread == NULL) { in vdc_do_attach()
759 vdc->initialized |= VDC_THREAD; in vdc_do_attach()
770 mutex_enter(&vdc->lock); in vdc_do_attach()
771 (void) vdc_validate_geometry(vdc); in vdc_do_attach()
772 mutex_exit(&vdc->lock); in vdc_do_attach()
777 status = vdc_create_device_nodes(vdc); in vdc_do_attach()
779 DMSG(vdc, 0, "[%d] Failed to create device nodes", in vdc_do_attach()
788 vdc_set_err_kstats(vdc); in vdc_do_attach()
790 ASSERT(vdc->lifecycle == VDC_LC_ONLINE || in vdc_do_attach()
791 vdc->lifecycle == VDC_LC_ONLINE_PENDING); in vdc_do_attach()
792 DMSG(vdc, 0, "[%d] Attach tasks successful\n", instance); in vdc_do_attach()
795 DMSG(vdc, 0, "[%d] Attach completed\n", instance); in vdc_do_attach()
818 vdc_do_ldc_init(vdc_t *vdc, vdc_server_t *srvr) in vdc_do_ldc_init() argument
824 ASSERT(vdc != NULL); in vdc_do_ldc_init()
828 ldc_attr.instance = vdc->instance; in vdc_do_ldc_init()
836 DMSG(vdc, 0, "[%d] ldc_init(chan %ld) returned %d", in vdc_do_ldc_init()
837 vdc->instance, srvr->ldc_id, status); in vdc_do_ldc_init()
844 DMSG(vdc, 0, "[%d] Cannot discover LDC status [err=%d]", in vdc_do_ldc_init()
845 vdc->instance, status); in vdc_do_ldc_init()
854 DMSG(vdc, 0, "[%d] LDC callback reg. failed (%d)", in vdc_do_ldc_init()
855 vdc->instance, status); in vdc_do_ldc_init()
868 DMSG(vdc, 0, "[%d] ldc_open(chan %ld) returned %d", in vdc_do_ldc_init()
869 vdc->instance, srvr->ldc_id, status); in vdc_do_ldc_init()
877 vdc_terminate_ldc(vdc, srvr); in vdc_do_ldc_init()
884 vdc_start_ldc_connection(vdc_t *vdc) in vdc_start_ldc_connection() argument
888 ASSERT(vdc != NULL); in vdc_start_ldc_connection()
890 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_start_ldc_connection()
892 status = vdc_do_ldc_up(vdc); in vdc_start_ldc_connection()
894 DMSG(vdc, 0, "[%d] Finished bringing up LDC\n", vdc->instance); in vdc_start_ldc_connection()
921 vdc_create_io_kstats(vdc_t *vdc) in vdc_create_io_kstats() argument
923 if (vdc->io_stats != NULL) { in vdc_create_io_kstats()
924 DMSG(vdc, 0, "[%d] I/O kstat already exists\n", vdc->instance); in vdc_create_io_kstats()
928 vdc->io_stats = kstat_create(VDC_DRIVER_NAME, vdc->instance, NULL, in vdc_create_io_kstats()
930 if (vdc->io_stats != NULL) { in vdc_create_io_kstats()
931 vdc->io_stats->ks_lock = &vdc->lock; in vdc_create_io_kstats()
932 kstat_install(vdc->io_stats); in vdc_create_io_kstats()
935 " will not be gathered", vdc->instance); in vdc_create_io_kstats()
940 vdc_create_err_kstats(vdc_t *vdc) in vdc_create_err_kstats() argument
946 int instance = vdc->instance; in vdc_create_err_kstats()
948 if (vdc->err_stats != NULL) { in vdc_create_err_kstats()
949 DMSG(vdc, 0, "[%d] ERR kstat already exists\n", vdc->instance); in vdc_create_err_kstats()
958 vdc->err_stats = kstat_create(kstatmodule_err, instance, kstatname, in vdc_create_err_kstats()
961 if (vdc->err_stats == NULL) { in vdc_create_err_kstats()
967 stp = (vd_err_stats_t *)vdc->err_stats->ks_data; in vdc_create_err_kstats()
981 vdc->err_stats->ks_update = nulldev; in vdc_create_err_kstats()
983 kstat_install(vdc->err_stats); in vdc_create_err_kstats()
987 vdc_set_err_kstats(vdc_t *vdc) in vdc_set_err_kstats() argument
991 if (vdc->err_stats == NULL) in vdc_set_err_kstats()
994 mutex_enter(&vdc->lock); in vdc_set_err_kstats()
996 stp = (vd_err_stats_t *)vdc->err_stats->ks_data; in vdc_set_err_kstats()
999 stp->vd_capacity.value.ui64 = vdc->vdisk_size * vdc->vdisk_bsize; in vdc_set_err_kstats()
1003 mutex_exit(&vdc->lock); in vdc_set_err_kstats()
1007 vdc_create_device_nodes_efi(vdc_t *vdc) in vdc_create_device_nodes_efi() argument
1009 ddi_remove_minor_node(vdc->dip, "h"); in vdc_create_device_nodes_efi()
1010 ddi_remove_minor_node(vdc->dip, "h,raw"); in vdc_create_device_nodes_efi()
1012 if (ddi_create_minor_node(vdc->dip, "wd", S_IFBLK, in vdc_create_device_nodes_efi()
1013 VD_MAKE_DEV(vdc->instance, VD_EFI_WD_SLICE), in vdc_create_device_nodes_efi()
1016 vdc->instance); in vdc_create_device_nodes_efi()
1021 vdc->initialized |= VDC_MINOR; in vdc_create_device_nodes_efi()
1023 if (ddi_create_minor_node(vdc->dip, "wd,raw", S_IFCHR, in vdc_create_device_nodes_efi()
1024 VD_MAKE_DEV(vdc->instance, VD_EFI_WD_SLICE), in vdc_create_device_nodes_efi()
1027 vdc->instance); in vdc_create_device_nodes_efi()
1035 vdc_create_device_nodes_vtoc(vdc_t *vdc) in vdc_create_device_nodes_vtoc() argument
1037 ddi_remove_minor_node(vdc->dip, "wd"); in vdc_create_device_nodes_vtoc()
1038 ddi_remove_minor_node(vdc->dip, "wd,raw"); in vdc_create_device_nodes_vtoc()
1040 if (ddi_create_minor_node(vdc->dip, "h", S_IFBLK, in vdc_create_device_nodes_vtoc()
1041 VD_MAKE_DEV(vdc->instance, VD_EFI_WD_SLICE), in vdc_create_device_nodes_vtoc()
1044 vdc->instance); in vdc_create_device_nodes_vtoc()
1049 vdc->initialized |= VDC_MINOR; in vdc_create_device_nodes_vtoc()
1051 if (ddi_create_minor_node(vdc->dip, "h,raw", S_IFCHR, in vdc_create_device_nodes_vtoc()
1052 VD_MAKE_DEV(vdc->instance, VD_EFI_WD_SLICE), in vdc_create_device_nodes_vtoc()
1055 vdc->instance); in vdc_create_device_nodes_vtoc()
1084 vdc_create_device_nodes(vdc_t *vdc) in vdc_create_device_nodes() argument
1092 ASSERT(vdc != NULL); in vdc_create_device_nodes()
1094 instance = vdc->instance; in vdc_create_device_nodes()
1095 dip = vdc->dip; in vdc_create_device_nodes()
1097 switch (vdc->vdisk_type) { in vdc_create_device_nodes()
1118 if (vdc->vdisk_label == VD_DISK_LABEL_EFI) in vdc_create_device_nodes()
1119 status = vdc_create_device_nodes_efi(vdc); in vdc_create_device_nodes()
1121 status = vdc_create_device_nodes_vtoc(vdc); in vdc_create_device_nodes()
1136 vdc->initialized |= VDC_MINOR; in vdc_create_device_nodes()
1160 vdc_t *vdc; in vdc_prop_op() local
1164 vdc = ddi_get_soft_state(vdc_state, instance); in vdc_prop_op()
1166 if (dev == DDI_DEV_T_ANY || vdc == NULL) { in vdc_prop_op()
1171 mutex_enter(&vdc->lock); in vdc_prop_op()
1172 (void) vdc_validate_geometry(vdc); in vdc_prop_op()
1173 if (vdc->vdisk_label == VD_DISK_LABEL_UNK) { in vdc_prop_op()
1174 mutex_exit(&vdc->lock); in vdc_prop_op()
1178 nblocks = vdc->slice[VDCPART(dev)].nblocks; in vdc_prop_op()
1179 blksize = vdc->vdisk_bsize; in vdc_prop_op()
1180 mutex_exit(&vdc->lock); in vdc_prop_op()
1202 vdc_is_opened(vdc_t *vdc) in vdc_is_opened() argument
1208 if (vdc->open_lyr[i] > 0) in vdc_is_opened()
1214 if (vdc->open[i] != 0) in vdc_is_opened()
1222 vdc_mark_opened(vdc_t *vdc, int slice, int flag, int otyp) in vdc_mark_opened() argument
1229 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_mark_opened()
1239 if (vdc->vdisk_type == VD_DISK_TYPE_SLICE && slice != 0) in vdc_mark_opened()
1243 if (vdc->open_excl & slicemask) in vdc_mark_opened()
1248 if (vdc->open_lyr[slice] > 0) in vdc_mark_opened()
1251 if (vdc->open[i] & slicemask) in vdc_mark_opened()
1254 vdc->open_excl |= slicemask; in vdc_mark_opened()
1259 vdc->open_lyr[slice]++; in vdc_mark_opened()
1261 vdc->open[otyp] |= slicemask; in vdc_mark_opened()
1268 vdc_mark_closed(vdc_t *vdc, int slice, int flag, int otyp) in vdc_mark_closed() argument
1274 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_mark_closed()
1279 ASSERT(vdc->open_lyr[slice] > 0); in vdc_mark_closed()
1280 vdc->open_lyr[slice]--; in vdc_mark_closed()
1282 vdc->open[otyp] &= ~slicemask; in vdc_mark_closed()
1286 vdc->open_excl &= ~slicemask; in vdc_mark_closed()
1296 vdc_t *vdc; in vdc_open() local
1304 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) { in vdc_open()
1309 DMSG(vdc, 0, "minor = %d flag = %x, otyp = %x\n", in vdc_open()
1317 !(VD_OP_SUPPORTED(vdc->operations, VD_OP_BWRITE))) { in vdc_open()
1321 mutex_enter(&vdc->lock); in vdc_open()
1323 status = vdc_mark_opened(vdc, slice, flag, otyp); in vdc_open()
1326 mutex_exit(&vdc->lock); in vdc_open()
1335 if (vdc->vdisk_type != VD_DISK_TYPE_UNK && nodelay) { in vdc_open()
1338 if (vdc->validate_pending > 0) { in vdc_open()
1339 mutex_exit(&vdc->lock); in vdc_open()
1345 (void *)vdc, TQ_NOSLEEP) == TASKQID_INVALID) { in vdc_open()
1346 vdc_mark_closed(vdc, slice, flag, otyp); in vdc_open()
1347 mutex_exit(&vdc->lock); in vdc_open()
1351 vdc->validate_pending++; in vdc_open()
1352 mutex_exit(&vdc->lock); in vdc_open()
1356 mutex_exit(&vdc->lock); in vdc_open()
1358 vdc_validate(vdc); in vdc_open()
1360 mutex_enter(&vdc->lock); in vdc_open()
1362 if (vdc->vdisk_type == VD_DISK_TYPE_UNK || in vdc_open()
1363 (vdc->vdisk_type == VD_DISK_TYPE_SLICE && slice != 0) || in vdc_open()
1364 (!nodelay && (vdc->vdisk_label == VD_DISK_LABEL_UNK || in vdc_open()
1365 vdc->slice[slice].nblocks == 0))) { in vdc_open()
1366 vdc_mark_closed(vdc, slice, flag, otyp); in vdc_open()
1370 mutex_exit(&vdc->lock); in vdc_open()
1383 vdc_t *vdc; in vdc_close() local
1390 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) { in vdc_close()
1395 DMSG(vdc, 0, "[%d] flag = %x, otyp = %x\n", instance, flag, otyp); in vdc_close()
1407 DMSG(vdc, 0, "[%d] flush failed with error %d on close\n", in vdc_close()
1412 mutex_enter(&vdc->lock); in vdc_close()
1413 vdc_mark_closed(vdc, slice, flag, otyp); in vdc_close()
1414 mutex_exit(&vdc->lock); in vdc_close()
1440 vdc_t *vdc = NULL; in vdc_dump() local
1443 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) { in vdc_dump()
1448 DMSG(vdc, 2, "[%d] dump %ld bytes at block 0x%lx : addr=0x%p\n", in vdc_dump()
1452 if ((blkno & vdc->vio_bmask) != 0) { in vdc_dump()
1453 DMSG(vdc, 0, "Misaligned block number (%lu)\n", blkno); in vdc_dump()
1456 vio_blkno = blkno >> vdc->vio_bshift; in vdc_dump()
1464 rv = vdc_do_op(vdc, VD_OP_BWRITE, addr, nbytes, VDCPART(dev), in vdc_dump()
1468 DMSG(vdc, 0, "Failed to do a disk dump (err=%d)\n", rv); in vdc_dump()
1472 DMSG(vdc, 0, "[%d] End\n", instance); in vdc_dump()
1495 vdc_t *vdc = NULL; in vdc_strategy() local
1500 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) { in vdc_strategy()
1507 DMSG(vdc, 2, "[%d] %s %ld bytes at block %llx : b_addr=0x%p\n", in vdc_strategy()
1526 if ((buf->b_lblkno & vdc->vio_bmask) != 0) { in vdc_strategy()
1531 vio_blkno = buf->b_lblkno >> vdc->vio_bshift; in vdc_strategy()
1534 (void) vdc_do_op(vdc, op, (caddr_t)buf->b_un.b_addr, in vdc_strategy()
1557 vdc_t *vdc = NULL; in vdc_min() local
1560 vdc = ddi_get_soft_state(vdc_state, instance); in vdc_min()
1561 VERIFY(vdc != NULL); in vdc_min()
1563 if (bufp->b_bcount > (vdc->max_xfer_sz * vdc->vdisk_bsize)) { in vdc_min()
1564 bufp->b_bcount = vdc->max_xfer_sz * vdc->vdisk_bsize; in vdc_min()
1625 vdc_init_ver_negotiation(vdc_t *vdc, vio_ver_t ver) in vdc_init_ver_negotiation() argument
1631 ASSERT(vdc != NULL); in vdc_init_ver_negotiation()
1632 ASSERT(mutex_owned(&vdc->lock)); in vdc_init_ver_negotiation()
1634 DMSG(vdc, 0, "[%d] Entered.\n", vdc->instance); in vdc_init_ver_negotiation()
1640 vdc->session_id = ((uint32_t)gettick() & 0xffffffff); in vdc_init_ver_negotiation()
1641 DMSG(vdc, 0, "[%d] Set SID to 0x%lx\n", vdc->instance, vdc->session_id); in vdc_init_ver_negotiation()
1646 pkt.tag.vio_sid = vdc->session_id; in vdc_init_ver_negotiation()
1651 status = vdc_send(vdc, (caddr_t)&pkt, &msglen); in vdc_init_ver_negotiation()
1652 DMSG(vdc, 0, "[%d] Ver info sent (status = %d)\n", in vdc_init_ver_negotiation()
1653 vdc->instance, status); in vdc_init_ver_negotiation()
1655 DMSG(vdc, 0, "[%d] Failed to send Ver negotiation info: " in vdc_init_ver_negotiation()
1656 "id(%lx) rv(%d) size(%ld)", vdc->instance, in vdc_init_ver_negotiation()
1657 vdc->curr_server->ldc_handle, status, msglen); in vdc_init_ver_negotiation()
1721 vdc_init_attr_negotiation(vdc_t *vdc) in vdc_init_attr_negotiation() argument
1727 ASSERT(vdc != NULL); in vdc_init_attr_negotiation()
1728 ASSERT(mutex_owned(&vdc->lock)); in vdc_init_attr_negotiation()
1730 DMSG(vdc, 0, "[%d] entered\n", vdc->instance); in vdc_init_attr_negotiation()
1736 pkt.tag.vio_sid = vdc->session_id; in vdc_init_attr_negotiation()
1738 pkt.max_xfer_sz = vdc->max_xfer_sz; in vdc_init_attr_negotiation()
1739 pkt.vdisk_block_size = vdc->vdisk_bsize; in vdc_init_attr_negotiation()
1746 status = vdc_send(vdc, (caddr_t)&pkt, &msglen); in vdc_init_attr_negotiation()
1747 DMSG(vdc, 0, "Attr info sent (status = %d)\n", status); in vdc_init_attr_negotiation()
1750 DMSG(vdc, 0, "[%d] Failed to send Attr negotiation info: " in vdc_init_attr_negotiation()
1751 "id(%lx) rv(%d) size(%ld)", vdc->instance, in vdc_init_attr_negotiation()
1752 vdc->curr_server->ldc_handle, status, msglen); in vdc_init_attr_negotiation()
1817 vdc_init_dring_negotiate(vdc_t *vdc) in vdc_init_dring_negotiate() argument
1825 ASSERT(vdc != NULL); in vdc_init_dring_negotiate()
1826 ASSERT(mutex_owned(&vdc->lock)); in vdc_init_dring_negotiate()
1829 status = vdc_init_descriptor_ring(vdc); in vdc_init_dring_negotiate()
1836 DMSG(vdc, 0, "[%d] Failed to init DRing (status = %d)\n", in vdc_init_dring_negotiate()
1837 vdc->instance, status); in vdc_init_dring_negotiate()
1841 DMSG(vdc, 0, "[%d] Init of descriptor ring completed (status = %d)\n", in vdc_init_dring_negotiate()
1842 vdc->instance, status); in vdc_init_dring_negotiate()
1848 pkt.tag.vio_sid = vdc->session_id; in vdc_init_dring_negotiate()
1851 pkt.num_descriptors = vdc->dring_len; in vdc_init_dring_negotiate()
1852 pkt.descriptor_size = vdc->dring_entry_size; in vdc_init_dring_negotiate()
1854 pkt.ncookies = vdc->dring_cookie_count; in vdc_init_dring_negotiate()
1855 pkt.cookie[0] = vdc->dring_cookie[0]; /* for now just one cookie */ in vdc_init_dring_negotiate()
1857 status = vdc_send(vdc, (caddr_t)&pkt, &msglen); in vdc_init_dring_negotiate()
1859 DMSG(vdc, 0, "[%d] Failed to register DRing (err = %d)", in vdc_init_dring_negotiate()
1860 vdc->instance, status); in vdc_init_dring_negotiate()
2025 vdc_recv(vdc_t *vdc, vio_msg_t *msgp, size_t *nbytesp) in vdc_recv() argument
2052 status = ldc_read(vdc->curr_server->ldc_handle, in vdc_recv()
2064 DMSG(vdc, 0, "ldc_read returned %d\n", status); in vdc_recv()
2073 mutex_enter(&vdc->read_lock); in vdc_recv()
2075 while (vdc->read_state != VDC_READ_PENDING) { in vdc_recv()
2078 if (vdc->read_state == VDC_READ_RESET) { in vdc_recv()
2079 mutex_exit(&vdc->read_lock); in vdc_recv()
2083 vdc->read_state = VDC_READ_WAITING; in vdc_recv()
2084 cv_wait(&vdc->read_cv, &vdc->read_lock); in vdc_recv()
2087 vdc->read_state = VDC_READ_IDLE; in vdc_recv()
2088 mutex_exit(&vdc->read_lock); in vdc_recv()
2164 vdc_send(vdc_t *vdc, caddr_t pkt, size_t *msglen) in vdc_send() argument
2170 ASSERT(vdc != NULL); in vdc_send()
2171 ASSERT(mutex_owned(&vdc->lock)); in vdc_send()
2176 vdc_decode_tag(vdc, (vio_msg_t *)(uintptr_t)pkt); in vdc_send()
2186 status = ldc_write(vdc->curr_server->ldc_handle, pkt, &size); in vdc_send()
2199 mutex_enter(&vdc->read_lock); in vdc_send()
2200 if ((vdc->read_state == VDC_READ_WAITING) || in vdc_send()
2201 (vdc->read_state == VDC_READ_RESET)) in vdc_send()
2202 cv_signal(&vdc->read_cv); in vdc_send()
2203 vdc->read_state = VDC_READ_RESET; in vdc_send()
2204 mutex_exit(&vdc->read_lock); in vdc_send()
2207 if (vdc->state == VDC_STATE_INIT_WAITING) { in vdc_send()
2208 DMSG(vdc, 0, "[%d] write reset - " in vdc_send()
2209 "vdc is resetting ..\n", vdc->instance); in vdc_send()
2210 vdc->state = VDC_STATE_RESETTING; in vdc_send()
2211 cv_signal(&vdc->initwait_cv); in vdc_send()
2365 vdc_init_ports(vdc_t *vdc, md_t *mdp, mde_cookie_t vd_nodep) in vdc_init_ports() argument
2404 vdc->num_servers = 0; in vdc_init_ports()
2410 srvr->vdcp = vdc; in vdc_init_ports()
2461 if (vdc_do_ldc_init(vdc, srvr) != 0) { in vdc_init_ports()
2470 vdc->server_list = srvr; in vdc_init_ports()
2475 vdc->num_servers++; in vdc_init_ports()
2479 if (vdc->server_list != NULL) { in vdc_init_ports()
2480 vdc->curr_server = vdc->server_list; in vdc_init_ports()
2509 vdc_do_ldc_up(vdc_t *vdc) in vdc_do_ldc_up() argument
2514 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_do_ldc_up()
2516 DMSG(vdc, 0, "[%d] Bringing up channel %lx\n", in vdc_do_ldc_up()
2517 vdc->instance, vdc->curr_server->ldc_id); in vdc_do_ldc_up()
2519 if (vdc->lifecycle == VDC_LC_DETACHING) in vdc_do_ldc_up()
2522 if ((status = ldc_up(vdc->curr_server->ldc_handle)) != 0) { in vdc_do_ldc_up()
2525 DMSG(vdc, 0, "[%d] ldc_up(%lx,...) return %d\n", in vdc_do_ldc_up()
2526 vdc->instance, vdc->curr_server->ldc_id, status); in vdc_do_ldc_up()
2530 DMSG(vdc, 0, "[%d] Failed to bring up LDC: " in vdc_do_ldc_up()
2531 "channel=%ld, err=%d", vdc->instance, in vdc_do_ldc_up()
2532 vdc->curr_server->ldc_id, status); in vdc_do_ldc_up()
2537 if (ldc_status(vdc->curr_server->ldc_handle, &ldc_state) == 0) { in vdc_do_ldc_up()
2538 vdc->curr_server->ldc_state = ldc_state; in vdc_do_ldc_up()
2540 DMSG(vdc, 0, "[%d] LDC channel already up\n", in vdc_do_ldc_up()
2541 vdc->instance); in vdc_do_ldc_up()
2542 vdc->seq_num = 1; in vdc_do_ldc_up()
2543 vdc->seq_num_reply = 0; in vdc_do_ldc_up()
2564 vdc_terminate_ldc(vdc_t *vdc, vdc_server_t *srvr) in vdc_terminate_ldc() argument
2566 int instance = ddi_get_instance(vdc->dip); in vdc_terminate_ldc()
2569 DMSG(vdc, 0, "[%d] ldc_close()\n", instance); in vdc_terminate_ldc()
2573 DMSG(vdc, 0, "[%d] ldc_unreg_callback()\n", instance); in vdc_terminate_ldc()
2577 DMSG(vdc, 0, "[%d] ldc_fini()\n", instance); in vdc_terminate_ldc()
2600 vdc_fini_ports(vdc_t *vdc) in vdc_fini_ports() argument
2602 int instance = ddi_get_instance(vdc->dip); in vdc_fini_ports()
2605 ASSERT(vdc != NULL); in vdc_fini_ports()
2606 ASSERT(mutex_owned(&vdc->lock)); in vdc_fini_ports()
2608 DMSG(vdc, 0, "[%d] initialized=%x\n", instance, vdc->initialized); in vdc_fini_ports()
2610 srvr = vdc->server_list; in vdc_fini_ports()
2614 vdc_terminate_ldc(vdc, srvr); in vdc_fini_ports()
2624 vdc->server_list = NULL; in vdc_fini_ports()
2625 vdc->num_servers = 0; in vdc_fini_ports()
2647 vdc_init_descriptor_ring(vdc_t *vdc) in vdc_init_descriptor_ring() argument
2653 DMSG(vdc, 0, "[%d] initialized=%x\n", vdc->instance, vdc->initialized); in vdc_init_descriptor_ring()
2655 ASSERT(vdc != NULL); in vdc_init_descriptor_ring()
2656 ASSERT(mutex_owned(&vdc->lock)); in vdc_init_descriptor_ring()
2661 if ((vdc->initialized & VDC_DRING_INIT) == 0) { in vdc_init_descriptor_ring()
2662 DMSG(vdc, 0, "[%d] ldc_mem_dring_create\n", vdc->instance); in vdc_init_descriptor_ring()
2670 if ((vdc->max_xfer_sz * vdc->vdisk_bsize) < maxphys) { in vdc_init_descriptor_ring()
2671 DMSG(vdc, 0, "[%d] using minimum DRing size\n", in vdc_init_descriptor_ring()
2672 vdc->instance); in vdc_init_descriptor_ring()
2673 vdc->dring_max_cookies = maxphys / PAGESIZE; in vdc_init_descriptor_ring()
2675 vdc->dring_max_cookies = in vdc_init_descriptor_ring()
2676 (vdc->max_xfer_sz * vdc->vdisk_bsize) / PAGESIZE; in vdc_init_descriptor_ring()
2678 vdc->dring_entry_size = (sizeof (vd_dring_entry_t) + in vdc_init_descriptor_ring()
2680 (vdc->dring_max_cookies - 1))); in vdc_init_descriptor_ring()
2681 vdc->dring_len = VD_DRING_LEN; in vdc_init_descriptor_ring()
2683 status = ldc_mem_dring_create(vdc->dring_len, in vdc_init_descriptor_ring()
2684 vdc->dring_entry_size, &vdc->dring_hdl); in vdc_init_descriptor_ring()
2685 if ((vdc->dring_hdl == 0) || (status != 0)) { in vdc_init_descriptor_ring()
2686 DMSG(vdc, 0, "[%d] Descriptor ring creation failed", in vdc_init_descriptor_ring()
2687 vdc->instance); in vdc_init_descriptor_ring()
2690 vdc->initialized |= VDC_DRING_INIT; in vdc_init_descriptor_ring()
2693 if ((vdc->initialized & VDC_DRING_BOUND) == 0) { in vdc_init_descriptor_ring()
2694 DMSG(vdc, 0, "[%d] ldc_mem_dring_bind\n", vdc->instance); in vdc_init_descriptor_ring()
2695 vdc->dring_cookie = in vdc_init_descriptor_ring()
2698 status = ldc_mem_dring_bind(vdc->curr_server->ldc_handle, in vdc_init_descriptor_ring()
2699 vdc->dring_hdl, in vdc_init_descriptor_ring()
2701 &vdc->dring_cookie[0], in vdc_init_descriptor_ring()
2702 &vdc->dring_cookie_count); in vdc_init_descriptor_ring()
2704 DMSG(vdc, 0, "[%d] Failed to bind descriptor ring " in vdc_init_descriptor_ring()
2706 vdc->instance, vdc->dring_hdl, in vdc_init_descriptor_ring()
2707 vdc->curr_server->ldc_handle, status); in vdc_init_descriptor_ring()
2710 ASSERT(vdc->dring_cookie_count == 1); in vdc_init_descriptor_ring()
2711 vdc->initialized |= VDC_DRING_BOUND; in vdc_init_descriptor_ring()
2714 status = ldc_mem_dring_info(vdc->dring_hdl, &vdc->dring_mem_info); in vdc_init_descriptor_ring()
2716 DMSG(vdc, 0, in vdc_init_descriptor_ring()
2718 vdc->instance, vdc->dring_hdl); in vdc_init_descriptor_ring()
2722 if ((vdc->initialized & VDC_DRING_LOCAL) == 0) { in vdc_init_descriptor_ring()
2723 DMSG(vdc, 0, "[%d] local dring\n", vdc->instance); in vdc_init_descriptor_ring()
2726 vdc->local_dring = in vdc_init_descriptor_ring()
2727 kmem_zalloc(vdc->dring_len * sizeof (vdc_local_desc_t), in vdc_init_descriptor_ring()
2729 vdc->initialized |= VDC_DRING_LOCAL; in vdc_init_descriptor_ring()
2738 vdc->initialized |= VDC_DRING_ENTRY; in vdc_init_descriptor_ring()
2739 for (i = 0; i < vdc->dring_len; i++) { in vdc_init_descriptor_ring()
2740 dep = VDC_GET_DRING_ENTRY_PTR(vdc, i); in vdc_init_descriptor_ring()
2743 status = ldc_mem_alloc_handle(vdc->curr_server->ldc_handle, in vdc_init_descriptor_ring()
2744 &vdc->local_dring[i].desc_mhdl); in vdc_init_descriptor_ring()
2746 DMSG(vdc, 0, "![%d] Failed to alloc mem handle for" in vdc_init_descriptor_ring()
2747 " descriptor %d", vdc->instance, i); in vdc_init_descriptor_ring()
2750 vdc->local_dring[i].is_free = B_TRUE; in vdc_init_descriptor_ring()
2751 vdc->local_dring[i].dep = dep; in vdc_init_descriptor_ring()
2755 vdc->dring_curr_idx = VDC_DRING_FIRST_ENTRY; in vdc_init_descriptor_ring()
2773 vdc_destroy_descriptor_ring(vdc_t *vdc) in vdc_destroy_descriptor_ring() argument
2781 ASSERT(vdc != NULL); in vdc_destroy_descriptor_ring()
2782 ASSERT(mutex_owned(&vdc->lock)); in vdc_destroy_descriptor_ring()
2784 DMSG(vdc, 0, "[%d] Entered\n", vdc->instance); in vdc_destroy_descriptor_ring()
2786 if (vdc->initialized & VDC_DRING_ENTRY) { in vdc_destroy_descriptor_ring()
2787 DMSG(vdc, 0, in vdc_destroy_descriptor_ring()
2788 "[%d] Removing Local DRing entries\n", vdc->instance); in vdc_destroy_descriptor_ring()
2789 for (i = 0; i < vdc->dring_len; i++) { in vdc_destroy_descriptor_ring()
2790 ldep = &vdc->local_dring[i]; in vdc_destroy_descriptor_ring()
2797 DMSG(vdc, 0, in vdc_destroy_descriptor_ring()
2818 vdc->initialized &= ~VDC_DRING_ENTRY; in vdc_destroy_descriptor_ring()
2821 if (vdc->initialized & VDC_DRING_LOCAL) { in vdc_destroy_descriptor_ring()
2822 DMSG(vdc, 0, "[%d] Freeing Local DRing\n", vdc->instance); in vdc_destroy_descriptor_ring()
2823 kmem_free(vdc->local_dring, in vdc_destroy_descriptor_ring()
2824 vdc->dring_len * sizeof (vdc_local_desc_t)); in vdc_destroy_descriptor_ring()
2825 vdc->initialized &= ~VDC_DRING_LOCAL; in vdc_destroy_descriptor_ring()
2828 if (vdc->initialized & VDC_DRING_BOUND) { in vdc_destroy_descriptor_ring()
2829 DMSG(vdc, 0, "[%d] Unbinding DRing\n", vdc->instance); in vdc_destroy_descriptor_ring()
2830 status = ldc_mem_dring_unbind(vdc->dring_hdl); in vdc_destroy_descriptor_ring()
2832 vdc->initialized &= ~VDC_DRING_BOUND; in vdc_destroy_descriptor_ring()
2834 DMSG(vdc, 0, "[%d] Error %d unbinding DRing %lx", in vdc_destroy_descriptor_ring()
2835 vdc->instance, status, vdc->dring_hdl); in vdc_destroy_descriptor_ring()
2837 kmem_free(vdc->dring_cookie, sizeof (ldc_mem_cookie_t)); in vdc_destroy_descriptor_ring()
2840 if (vdc->initialized & VDC_DRING_INIT) { in vdc_destroy_descriptor_ring()
2841 DMSG(vdc, 0, "[%d] Destroying DRing\n", vdc->instance); in vdc_destroy_descriptor_ring()
2842 status = ldc_mem_dring_destroy(vdc->dring_hdl); in vdc_destroy_descriptor_ring()
2844 vdc->dring_hdl = 0; in vdc_destroy_descriptor_ring()
2845 bzero(&vdc->dring_mem_info, sizeof (ldc_mem_info_t)); in vdc_destroy_descriptor_ring()
2846 vdc->initialized &= ~VDC_DRING_INIT; in vdc_destroy_descriptor_ring()
2848 DMSG(vdc, 0, "[%d] Error %d destroying DRing (%lx)", in vdc_destroy_descriptor_ring()
2849 vdc->instance, status, vdc->dring_hdl); in vdc_destroy_descriptor_ring()
3237 vdc_do_op(vdc_t *vdc, int op, caddr_t addr, size_t nbytes, int slice, in vdc_do_op() argument
3256 rv = vdc_send_request(vdc, op, addr, nbytes, slice, offset, bufp, in vdc_do_op()
3271 rv = vdc_drain_response(vdc, bufp); in vdc_do_op()
3277 rv = vdc_wait_for_response(vdc, &vio_msg); in vdc_do_op()
3280 rv = vdc_process_data_msg(vdc, &vio_msg); in vdc_do_op()
3290 mutex_enter(&vdc->lock); in vdc_do_op()
3293 VD_KSTAT_RUNQ_BACK_TO_WAITQ(vdc); in vdc_do_op()
3295 VD_KSTAT_RUNQ_EXIT(vdc); in vdc_do_op()
3299 mutex_exit(&vdc->lock); in vdc_do_op()
3432 vdc_drain_response(vdc_t *vdc, struct buf *buf) in vdc_drain_response() argument
3441 mutex_enter(&vdc->lock); in vdc_drain_response()
3446 rv = ldc_read(vdc->curr_server->ldc_handle, (caddr_t)&dmsg, in vdc_drain_response()
3472 DMSG(vdc, 0, "discard pkt: type=%d sub=%d env=%d\n", in vdc_drain_response()
3496 if (idx >= vdc->dring_len) { in vdc_drain_response()
3497 DMSG(vdc, 0, "[%d] Bogus ack data : start %d\n", in vdc_drain_response()
3498 vdc->instance, idx); in vdc_drain_response()
3501 ldep = &vdc->local_dring[idx]; in vdc_drain_response()
3503 DMSG(vdc, 0, "[%d] Entry @ %d - state !DONE %d\n", in vdc_drain_response()
3504 vdc->instance, idx, ldep->dep->hdr.dstate); in vdc_drain_response()
3514 rv = vdc_depopulate_descriptor(vdc, idx); in vdc_drain_response()
3521 if ((idx + 1) % vdc->dring_len == vdc->dring_curr_idx) { in vdc_drain_response()
3533 mutex_exit(&vdc->lock); in vdc_drain_response()
3534 DMSG(vdc, 0, "End idx=%d\n", idx); in vdc_drain_response()
3554 vdc_depopulate_descriptor(vdc_t *vdc, uint_t idx) in vdc_depopulate_descriptor() argument
3561 ASSERT(vdc != NULL); in vdc_depopulate_descriptor()
3562 ASSERT(idx < vdc->dring_len); in vdc_depopulate_descriptor()
3563 ldep = &vdc->local_dring[idx]; in vdc_depopulate_descriptor()
3565 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_depopulate_descriptor()
3567 DTRACE_PROBE2(depopulate, int, vdc->instance, vdc_local_desc_t *, ldep); in vdc_depopulate_descriptor()
3568 DMSG(vdc, 2, ": idx = %d\n", idx); in vdc_depopulate_descriptor()
3575 VDC_MARK_DRING_ENTRY_FREE(vdc, idx); in vdc_depopulate_descriptor()
3579 DMSG(vdc, 2, ": is_free = %d : status = %d\n", ldep->is_free, status); in vdc_depopulate_descriptor()
3587 cv_signal(&vdc->dring_free_cv); in vdc_depopulate_descriptor()
3609 DMSG(vdc, 0, "?[%d] unbind mhdl 0x%lx @ idx %d failed (%d)", in vdc_depopulate_descriptor()
3610 vdc->instance, ldep->desc_mhdl, idx, rv); in vdc_depopulate_descriptor()
3620 cv_signal(&vdc->membind_cv); in vdc_depopulate_descriptor()
3621 cv_signal(&vdc->dring_free_cv); in vdc_depopulate_descriptor()
3760 vdc_t *vdc = srvr->vdcp; in vdc_handle_cb() local
3762 ASSERT(vdc != NULL); in vdc_handle_cb()
3764 DMSG(vdc, 1, "evt=%lx seqID=%ld\n", event, vdc->seq_num); in vdc_handle_cb()
3767 mutex_enter(&vdc->lock); in vdc_handle_cb()
3769 if (vdc->curr_server != srvr) { in vdc_handle_cb()
3770 DMSG(vdc, 0, "[%d] Ignoring event 0x%lx for port@%ld\n", in vdc_handle_cb()
3771 vdc->instance, event, srvr->id); in vdc_handle_cb()
3772 mutex_exit(&vdc->lock); in vdc_handle_cb()
3785 DMSG(vdc, 0, "[%d] Received LDC_EVT_UP\n", vdc->instance); in vdc_handle_cb()
3790 DMSG(vdc, 0, "[%d] Couldn't get LDC status %d", in vdc_handle_cb()
3791 vdc->instance, rv); in vdc_handle_cb()
3792 mutex_exit(&vdc->lock); in vdc_handle_cb()
3802 vdc->seq_num = 1; in vdc_handle_cb()
3803 vdc->seq_num_reply = 0; in vdc_handle_cb()
3804 vdc->io_pending = B_TRUE; in vdc_handle_cb()
3806 cv_signal(&vdc->initwait_cv); in vdc_handle_cb()
3807 cv_signal(&vdc->io_pending_cv); in vdc_handle_cb()
3812 DMSG(vdc, 1, "[%d] Received LDC_EVT_READ\n", vdc->instance); in vdc_handle_cb()
3813 mutex_enter(&vdc->read_lock); in vdc_handle_cb()
3814 cv_signal(&vdc->read_cv); in vdc_handle_cb()
3815 vdc->read_state = VDC_READ_PENDING; in vdc_handle_cb()
3816 mutex_exit(&vdc->read_lock); in vdc_handle_cb()
3817 mutex_exit(&vdc->lock); in vdc_handle_cb()
3825 DMSG(vdc, 0, "[%d] Received LDC RESET event\n", vdc->instance); in vdc_handle_cb()
3831 mutex_enter(&vdc->read_lock); in vdc_handle_cb()
3832 if ((vdc->read_state == VDC_READ_WAITING) || in vdc_handle_cb()
3833 (vdc->read_state == VDC_READ_RESET)) in vdc_handle_cb()
3834 cv_signal(&vdc->read_cv); in vdc_handle_cb()
3835 vdc->read_state = VDC_READ_RESET; in vdc_handle_cb()
3836 mutex_exit(&vdc->read_lock); in vdc_handle_cb()
3839 if (vdc->state == VDC_STATE_INIT_WAITING) { in vdc_handle_cb()
3840 vdc->state = VDC_STATE_RESETTING; in vdc_handle_cb()
3841 cv_signal(&vdc->initwait_cv); in vdc_handle_cb()
3842 } else if (vdc->state == VDC_STATE_FAILED) { in vdc_handle_cb()
3843 vdc->io_pending = B_TRUE; in vdc_handle_cb()
3844 cv_signal(&vdc->io_pending_cv); in vdc_handle_cb()
3849 mutex_exit(&vdc->lock); in vdc_handle_cb()
3852 DMSG(vdc, 0, "![%d] Unexpected LDC event (%lx) received", in vdc_handle_cb()
3853 vdc->instance, event); in vdc_handle_cb()
5108 vdc_handle_ver_msg(vdc_t *vdc, vio_ver_msg_t *ver_msg) in vdc_handle_ver_msg() argument
5112 ASSERT(vdc != NULL); in vdc_handle_ver_msg()
5113 ASSERT(mutex_owned(&vdc->lock)); in vdc_handle_ver_msg()
5131 vdc->ver.major = ver_msg->ver_major; in vdc_handle_ver_msg()
5132 vdc->ver.minor = ver_msg->ver_minor; in vdc_handle_ver_msg()
5133 ASSERT(vdc->ver.major > 0); in vdc_handle_ver_msg()
5148 ASSERT(vdc->ver.major > 0); in vdc_handle_ver_msg()
5154 status = vdc_send(vdc, (caddr_t)ver_msg, &len); in vdc_handle_ver_msg()
5155 DMSG(vdc, 0, "[%d] Resend VER info (LDC status = %d)\n", in vdc_handle_ver_msg()
5156 vdc->instance, status); in vdc_handle_ver_msg()
5160 DMSG(vdc, 0, "[%d] No common version with vDisk server", in vdc_handle_ver_msg()
5161 vdc->instance); in vdc_handle_ver_msg()
5196 vdc_handle_attr_msg(vdc_t *vdc, vd_attr_msg_t *attr_msg) in vdc_handle_attr_msg() argument
5201 ASSERT(vdc != NULL); in vdc_handle_attr_msg()
5202 ASSERT(mutex_owned(&vdc->lock)); in vdc_handle_attr_msg()
5214 DMSG(vdc, 0, "[%d] Invalid disk size from vds", in vdc_handle_attr_msg()
5215 vdc->instance); in vdc_handle_attr_msg()
5221 DMSG(vdc, 0, "[%d] Invalid transfer size from vds", in vdc_handle_attr_msg()
5222 vdc->instance); in vdc_handle_attr_msg()
5228 DMSG(vdc, 0, "[%d] Unknown disk size from vds", in vdc_handle_attr_msg()
5229 vdc->instance); in vdc_handle_attr_msg()
5235 vdc_update_vio_bsize(vdc, in vdc_handle_attr_msg()
5237 DMSG(vdc, 0, "[%d] Invalid block size (%u) from vds", in vdc_handle_attr_msg()
5238 vdc->instance, attr_msg->vdisk_block_size); in vdc_handle_attr_msg()
5244 old_type = vdc->vdisk_type; in vdc_handle_attr_msg()
5245 vdc_update_size(vdc, attr_msg->vdisk_size, in vdc_handle_attr_msg()
5247 vdc->vdisk_type = attr_msg->vdisk_type; in vdc_handle_attr_msg()
5248 vdc->operations = attr_msg->operations; in vdc_handle_attr_msg()
5249 if (vio_ver_is_supported(vdc->ver, 1, 1)) in vdc_handle_attr_msg()
5250 vdc->vdisk_media = attr_msg->vdisk_media; in vdc_handle_attr_msg()
5252 vdc->vdisk_media = 0; in vdc_handle_attr_msg()
5254 DMSG(vdc, 0, "[%d] max_xfer_sz: sent %lx acked %lx\n", in vdc_handle_attr_msg()
5255 vdc->instance, vdc->max_xfer_sz, attr_msg->max_xfer_sz); in vdc_handle_attr_msg()
5256 DMSG(vdc, 0, "[%d] vdisk_block_size: sent %lx acked %x\n", in vdc_handle_attr_msg()
5257 vdc->instance, vdc->vdisk_bsize, in vdc_handle_attr_msg()
5264 DMSG(vdc, 0, "[%d] Invalid attributes from vds", in vdc_handle_attr_msg()
5265 vdc->instance); in vdc_handle_attr_msg()
5274 vdc_create_fake_geometry(vdc); in vdc_handle_attr_msg()
5283 (vdc->initialized & VDC_MINOR) && in vdc_handle_attr_msg()
5284 vdc->vdisk_type == VD_DISK_TYPE_SLICE) { in vdc_handle_attr_msg()
5285 ddi_remove_minor_node(vdc->dip, NULL); in vdc_handle_attr_msg()
5286 (void) devfs_clean(ddi_get_parent(vdc->dip), in vdc_handle_attr_msg()
5288 if (vdc_create_device_nodes(vdc) != 0) { in vdc_handle_attr_msg()
5289 DMSG(vdc, 0, "![%d] Failed to update " in vdc_handle_attr_msg()
5290 "device nodes", vdc->instance); in vdc_handle_attr_msg()
5334 vdc_handle_dring_reg_msg(vdc_t *vdc, vio_dring_reg_msg_t *dring_msg) in vdc_handle_dring_reg_msg() argument
5338 ASSERT(vdc != NULL); in vdc_handle_dring_reg_msg()
5339 ASSERT(mutex_owned(&vdc->lock)); in vdc_handle_dring_reg_msg()
5348 vdc->dring_ident = dring_msg->dring_ident; in vdc_handle_dring_reg_msg()
5349 DMSG(vdc, 0, "[%d] Received dring ident=0x%lx\n", in vdc_handle_dring_reg_msg()
5350 vdc->instance, vdc->dring_ident); in vdc_handle_dring_reg_msg()
5358 DMSG(vdc, 0, "[%d] server could not register DRing\n", in vdc_handle_dring_reg_msg()
5359 vdc->instance); in vdc_handle_dring_reg_msg()
5401 vdc_verify_seq_num(vdc_t *vdc, vio_dring_msg_t *dring_msg) in vdc_verify_seq_num() argument
5403 ASSERT(vdc != NULL); in vdc_verify_seq_num()
5405 ASSERT(mutex_owned(&vdc->lock)); in vdc_verify_seq_num()
5411 if ((dring_msg->seq_num <= vdc->seq_num_reply) || in vdc_verify_seq_num()
5412 (dring_msg->seq_num > vdc->seq_num)) { in vdc_verify_seq_num()
5413 DMSG(vdc, 0, "?[%d] Bogus sequence_number %lu: " in vdc_verify_seq_num()
5415 vdc->instance, dring_msg->seq_num, in vdc_verify_seq_num()
5416 vdc->seq_num_reply, vdc->seq_num, in vdc_verify_seq_num()
5417 vdc->req_id_proc, vdc->req_id); in vdc_verify_seq_num()
5420 vdc->seq_num_reply = dring_msg->seq_num; in vdc_verify_seq_num()
5422 if (vdc->req_id_proc < vdc->req_id) in vdc_verify_seq_num()
5514 vdc_t *vdc; member
5533 vdc_t *vdc = NULL; in vdc_dkio_flush_cb() local
5541 vdc = dk_arg->vdc; in vdc_dkio_flush_cb()
5542 ASSERT(vdc != NULL); in vdc_dkio_flush_cb()
5544 rv = vdc_do_sync_op(vdc, VD_OP_FLUSH, NULL, 0, in vdc_dkio_flush_cb()
5547 DMSG(vdc, 0, "[%d] DKIOCFLUSHWRITECACHE failed %d : model %x\n", in vdc_dkio_flush_cb()
5548 vdc->instance, rv, in vdc_dkio_flush_cb()
5564 mutex_enter(&vdc->lock); in vdc_dkio_flush_cb()
5565 vdc->dkio_flush_pending--; in vdc_dkio_flush_cb()
5566 ASSERT(vdc->dkio_flush_pending >= 0); in vdc_dkio_flush_cb()
5567 mutex_exit(&vdc->lock); in vdc_dkio_flush_cb()
5586 vdc_dkio_gapart(vdc_t *vdc, caddr_t arg, int flag) in vdc_dkio_gapart() argument
5596 mutex_enter(&vdc->lock); in vdc_dkio_gapart()
5598 if ((rv = vdc_validate_geometry(vdc)) != 0) { in vdc_dkio_gapart()
5599 mutex_exit(&vdc->lock); in vdc_dkio_gapart()
5603 if (vdc->vdisk_size > VD_OLDVTOC_LIMIT) { in vdc_dkio_gapart()
5604 mutex_exit(&vdc->lock); in vdc_dkio_gapart()
5608 vtoc = vdc->vtoc; in vdc_dkio_gapart()
5609 geom = vdc->geom; in vdc_dkio_gapart()
5631 mutex_exit(&vdc->lock); in vdc_dkio_gapart()
5652 vdc_dkio_partition(vdc_t *vdc, caddr_t arg, int flag) in vdc_dkio_partition() argument
5665 VDC_EFI_DEV_SET(edev, vdc, vd_process_efi_ioctl); in vdc_dkio_partition()
5706 vdc_dioctl_rwcmd(vdc_t *vdc, caddr_t arg, int flag) in vdc_dioctl_rwcmd() argument
5750 auio.uio_loffset = rwcmd.blkaddr * vdc->vdisk_bsize; in vdc_dioctl_rwcmd()
5762 status = physio(vdc_strategy, buf, VD_MAKE_DEV(vdc->instance, 0), in vdc_dioctl_rwcmd()
5820 vdc_scsi_status(vdc_t *vdc, vd_scsi_t *vd_scsi, boolean_t log_error) in vdc_scsi_status() argument
5838 ddi_pathname(vdc->dip, path_str), vdc->instance, in vdc_scsi_status()
5887 if (vdc->failfast_interval != 0 && in vdc_scsi_status()
5893 ddi_pathname(vdc->dip, path_str)); in vdc_scsi_status()
5935 vdc_uscsi_cmd(vdc_t *vdc, caddr_t arg, int mode) in vdc_uscsi_cmd() argument
5961 rv = vdc_do_sync_op(vdc, VD_OP_RESET, NULL, 0, 0, 0, in vdc_uscsi_cmd()
6038 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len, in vdc_uscsi_cmd()
6098 rv = vdc_scsi_status(vdc, vd_scsi, in vdc_uscsi_cmd()
6188 vdc_mhd_inkeys(vdc_t *vdc, caddr_t arg, int mode) in vdc_mhd_inkeys() argument
6232 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len, in vdc_mhd_inkeys()
6286 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE); in vdc_mhd_inkeys()
6300 vdc_mhd_inresv(vdc_t *vdc, caddr_t arg, int mode) in vdc_mhd_inresv() argument
6346 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len, in vdc_mhd_inresv()
6415 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE); in vdc_mhd_inresv()
6428 vdc_mhd_register(vdc_t *vdc, caddr_t arg, int mode) in vdc_mhd_register() argument
6451 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len, in vdc_mhd_register()
6455 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE); in vdc_mhd_register()
6467 vdc_mhd_reserve(vdc_t *vdc, caddr_t arg, int mode) in vdc_mhd_reserve() argument
6492 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len, in vdc_mhd_reserve()
6496 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE); in vdc_mhd_reserve()
6508 vdc_mhd_preemptabort(vdc_t *vdc, caddr_t arg, int mode) in vdc_mhd_preemptabort() argument
6537 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len, in vdc_mhd_preemptabort()
6541 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE); in vdc_mhd_preemptabort()
6553 vdc_mhd_registerignore(vdc_t *vdc, caddr_t arg, int mode) in vdc_mhd_registerignore() argument
6576 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len, in vdc_mhd_registerignore()
6580 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE); in vdc_mhd_registerignore()
6590 vdc_eio_scsi_cmd(vdc_t *vdc, uchar_t scmd, int flags) in vdc_eio_scsi_cmd() argument
6621 rv = vdc_do_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len, in vdc_eio_scsi_cmd()
6625 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE); in vdc_eio_scsi_cmd()
6641 vdc_eio_scsi_check(vdc_t *vdc, int flags) in vdc_eio_scsi_check() argument
6654 rv = vdc_eio_scsi_cmd(vdc, SCMD_TEST_UNIT_READY, flags); in vdc_eio_scsi_check()
6659 if (vdc->failfast_interval == 0) in vdc_eio_scsi_check()
6668 if (vdc_eio_scsi_cmd(vdc, SCMD_WRITE_G1, flags) != 0) in vdc_eio_scsi_check()
6682 vdc_eio_check(vdc_t *vdc, int flags) in vdc_eio_check() argument
6692 if (VD_OP_SUPPORTED(vdc->operations, VD_OP_SCSICMD)) in vdc_eio_check()
6693 return (vdc_eio_scsi_check(vdc, flags)); in vdc_eio_check()
6695 ASSERT(vdc->failfast_interval == 0); in vdc_eio_check()
6706 buffer = kmem_alloc(vdc->vdisk_bsize, KM_SLEEP); in vdc_eio_check()
6708 if (vdc->vdisk_size > 0) { in vdc_eio_check()
6713 blkno = blkno % vdc->vdisk_size; in vdc_eio_check()
6714 rv = vdc_do_op(vdc, VD_OP_BREAD, (caddr_t)buffer, in vdc_eio_check()
6715 vdc->vdisk_bsize, VD_SLICE_NONE, blkno, NULL, in vdc_eio_check()
6722 blkno = vdc->vdisk_size - 1; in vdc_eio_check()
6723 rv = vdc_do_op(vdc, VD_OP_BREAD, (caddr_t)buffer, in vdc_eio_check()
6724 vdc->vdisk_bsize, VD_SLICE_NONE, blkno, NULL, in vdc_eio_check()
6733 rv = vdc_do_op(vdc, VD_OP_BREAD, (caddr_t)buffer, vdc->vdisk_bsize, in vdc_eio_check()
6737 kmem_free(buffer, vdc->vdisk_bsize); in vdc_eio_check()
6748 vdc_eio_queue(vdc_t *vdc, int index) in vdc_eio_queue() argument
6752 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_eio_queue()
6755 vio->vio_next = vdc->eio_queue; in vdc_eio_queue()
6759 vdc->eio_queue = vio; in vdc_eio_queue()
6762 cv_signal(&vdc->eio_cv); in vdc_eio_queue()
6773 vdc_eio_unqueue(vdc_t *vdc, clock_t deadline, boolean_t complete_io) in vdc_eio_unqueue() argument
6779 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_eio_unqueue()
6782 vio = vdc->eio_queue; in vdc_eio_unqueue()
6801 vdc->eio_queue = NULL; in vdc_eio_unqueue()
6815 op = vdc->local_dring[index].operation; in vdc_eio_unqueue()
6816 buf = vdc->local_dring[index].buf; in vdc_eio_unqueue()
6817 (void) vdc_depopulate_descriptor(vdc, index); in vdc_eio_unqueue()
6820 VD_UPDATE_ERR_STATS(vdc, vd_softerrs); in vdc_eio_unqueue()
6821 VD_KSTAT_RUNQ_EXIT(vdc); in vdc_eio_unqueue()
6851 vdc_t *vdc = (vdc_t *)arg; in vdc_eio_thread() local
6852 clock_t starttime, timeout = drv_usectohz(vdc->failfast_interval); in vdc_eio_thread()
6854 mutex_enter(&vdc->lock); in vdc_eio_thread()
6856 while (vdc->failfast_interval != 0 || vdc->num_servers > 1) { in vdc_eio_thread()
6861 if (vdc->eio_queue == NULL || vdc->state != VDC_STATE_RUNNING) { in vdc_eio_thread()
6862 if (vdc->failfast_interval != 0) { in vdc_eio_thread()
6864 drv_usectohz(vdc->failfast_interval); in vdc_eio_thread()
6865 (void) cv_timedwait(&vdc->eio_cv, &vdc->lock, in vdc_eio_thread()
6868 ASSERT(vdc->num_servers > 1); in vdc_eio_thread()
6869 (void) cv_wait(&vdc->eio_cv, &vdc->lock); in vdc_eio_thread()
6872 if (vdc->state != VDC_STATE_RUNNING) in vdc_eio_thread()
6876 mutex_exit(&vdc->lock); in vdc_eio_thread()
6881 status = vdc_eio_check(vdc, VDC_OP_STATE_RUNNING); in vdc_eio_thread()
6883 mutex_enter(&vdc->lock); in vdc_eio_thread()
6888 if (vdc->failfast_interval == 0 && vdc->num_servers <= 1) in vdc_eio_thread()
6895 if (vdc->state != VDC_STATE_RUNNING || vdc->eio_queue == NULL) in vdc_eio_thread()
6903 vdc_eio_unqueue(vdc, starttime, B_TRUE); in vdc_eio_thread()
6905 } else if (vdc->num_servers > 1) { in vdc_eio_thread()
6912 mutex_enter(&vdc->read_lock); in vdc_eio_thread()
6913 vdc->read_state = VDC_READ_RESET; in vdc_eio_thread()
6914 cv_signal(&vdc->read_cv); in vdc_eio_thread()
6915 mutex_exit(&vdc->read_lock); in vdc_eio_thread()
6923 vdc_eio_unqueue(vdc, starttime, B_TRUE); in vdc_eio_thread()
6930 vdc_eio_unqueue(vdc, 0, B_TRUE); in vdc_eio_thread()
6931 vdc->eio_thread = NULL; in vdc_eio_thread()
6932 mutex_exit(&vdc->lock); in vdc_eio_thread()
6940 vdc_failfast(vdc_t *vdc, caddr_t arg, int mode) in vdc_failfast() argument
6947 mutex_enter(&vdc->lock); in vdc_failfast()
6948 if (mh_time != 0 && vdc->eio_thread == NULL) { in vdc_failfast()
6949 vdc->eio_thread = thread_create(NULL, 0, in vdc_failfast()
6950 vdc_eio_thread, vdc, 0, &p0, TS_RUN, in vdc_failfast()
6954 vdc->failfast_interval = ((long)mh_time) * MILLISEC; in vdc_failfast()
6955 cv_signal(&vdc->eio_cv); in vdc_failfast()
6956 mutex_exit(&vdc->lock); in vdc_failfast()
6966 vdc_access_set(vdc_t *vdc, uint64_t flags) in vdc_access_set() argument
6971 rv = vdc_do_sync_op(vdc, VD_OP_SET_ACCESS, (caddr_t)&flags, in vdc_access_set()
6982 vdc_access_get(vdc_t *vdc, uint64_t *status) in vdc_access_get() argument
6987 rv = vdc_do_sync_op(vdc, VD_OP_GET_ACCESS, (caddr_t)status, in vdc_access_get()
7007 vdc_t *vdc = (vdc_t *)arg; in vdc_ownership_thread() local
7011 mutex_enter(&vdc->ownership_lock); in vdc_ownership_thread()
7012 mutex_enter(&vdc->lock); in vdc_ownership_thread()
7014 while (vdc->ownership & VDC_OWNERSHIP_WANTED) { in vdc_ownership_thread()
7016 if ((vdc->ownership & VDC_OWNERSHIP_RESET) || in vdc_ownership_thread()
7017 !(vdc->ownership & VDC_OWNERSHIP_GRANTED)) { in vdc_ownership_thread()
7024 DMSG(vdc, 0, "[%d] Ownership lost, recovering", in vdc_ownership_thread()
7025 vdc->instance); in vdc_ownership_thread()
7027 vdc->ownership &= ~(VDC_OWNERSHIP_RESET | in vdc_ownership_thread()
7030 mutex_exit(&vdc->lock); in vdc_ownership_thread()
7032 status = vdc_access_set(vdc, VD_ACCESS_SET_EXCLUSIVE | in vdc_ownership_thread()
7035 mutex_enter(&vdc->lock); in vdc_ownership_thread()
7038 DMSG(vdc, 0, "[%d] Ownership recovered", in vdc_ownership_thread()
7039 vdc->instance); in vdc_ownership_thread()
7040 vdc->ownership |= VDC_OWNERSHIP_GRANTED; in vdc_ownership_thread()
7042 DMSG(vdc, 0, "[%d] Fail to recover ownership", in vdc_ownership_thread()
7043 vdc->instance); in vdc_ownership_thread()
7053 if (vdc->ownership & VDC_OWNERSHIP_GRANTED) in vdc_ownership_thread()
7059 mutex_exit(&vdc->ownership_lock); in vdc_ownership_thread()
7062 (void) cv_wait(&vdc->ownership_cv, &vdc->lock); in vdc_ownership_thread()
7064 (void) cv_reltimedwait(&vdc->ownership_cv, &vdc->lock, in vdc_ownership_thread()
7067 mutex_exit(&vdc->lock); in vdc_ownership_thread()
7069 mutex_enter(&vdc->ownership_lock); in vdc_ownership_thread()
7070 mutex_enter(&vdc->lock); in vdc_ownership_thread()
7073 vdc->ownership_thread = NULL; in vdc_ownership_thread()
7074 mutex_exit(&vdc->lock); in vdc_ownership_thread()
7075 mutex_exit(&vdc->ownership_lock); in vdc_ownership_thread()
7081 vdc_ownership_update(vdc_t *vdc, int ownership_flags) in vdc_ownership_update() argument
7083 ASSERT(MUTEX_HELD(&vdc->ownership_lock)); in vdc_ownership_update()
7085 mutex_enter(&vdc->lock); in vdc_ownership_update()
7086 vdc->ownership = ownership_flags; in vdc_ownership_update()
7087 if ((vdc->ownership & VDC_OWNERSHIP_WANTED) && in vdc_ownership_update()
7088 vdc->ownership_thread == NULL) { in vdc_ownership_update()
7090 vdc->ownership_thread = thread_create(NULL, 0, in vdc_ownership_update()
7091 vdc_ownership_thread, vdc, 0, &p0, TS_RUN, in vdc_ownership_update()
7095 cv_signal(&vdc->ownership_cv); in vdc_ownership_update()
7097 mutex_exit(&vdc->lock); in vdc_ownership_update()
7104 vdc_get_capacity(vdc_t *vdc, size_t *dsk_size, size_t *blk_size) in vdc_get_capacity() argument
7110 ASSERT(MUTEX_NOT_HELD(&vdc->lock)); in vdc_get_capacity()
7116 rv = vdc_do_sync_op(vdc, VD_OP_GET_CAPACITY, (caddr_t)vd_cap, alloc_len, in vdc_get_capacity()
7133 vdc_check_capacity(vdc_t *vdc) in vdc_check_capacity() argument
7143 if (!VD_OP_SUPPORTED(vdc->operations, VD_OP_GET_CAPACITY)) in vdc_check_capacity()
7146 if ((rv = vdc_get_capacity(vdc, &dsk_size, &blk_size)) != 0) in vdc_check_capacity()
7152 mutex_enter(&vdc->lock); in vdc_check_capacity()
7159 rv = vdc_update_vio_bsize(vdc, blk_size); in vdc_check_capacity()
7161 vdc_update_size(vdc, dsk_size, blk_size, vdc->max_xfer_sz); in vdc_check_capacity()
7163 mutex_exit(&vdc->lock); in vdc_check_capacity()
7177 int (*convert)(vdc_t *vdc, void *vd_buf, void *ioctl_arg,
7252 vdc_t *vdc = (vdc_t *)vdisk; in vd_process_efi_ioctl() local
7256 dev = makedevice(ddi_driver_major(vdc->dip), in vd_process_efi_ioctl()
7257 VD_MAKE_DEV(vdc->instance, 0)); in vd_process_efi_ioctl()
7288 vdc_t *vdc = NULL; in vd_process_ioctl() local
7297 vdc = ddi_get_soft_state(vdc_state, instance); in vd_process_ioctl()
7298 if (vdc == NULL) { in vd_process_ioctl()
7304 DMSG(vdc, 0, "[%d] Processing ioctl(%x) for dev %lx : model %x\n", in vd_process_ioctl()
7324 DMSG(vdc, 0, "[%d] Unsupported ioctl (0x%x)\n", in vd_process_ioctl()
7325 vdc->instance, cmd); in vd_process_ioctl()
7362 if (vdc->cinfo == NULL) in vd_process_ioctl()
7364 if (vdc->cinfo->dki_ctype != DKC_SCSI_CCS) in vd_process_ioctl()
7369 if (vdc->cinfo == NULL) in vd_process_ioctl()
7371 if (vdc->cinfo->dki_ctype != DKC_DIRECT) in vd_process_ioctl()
7376 if (vdc->cinfo == NULL) in vd_process_ioctl()
7381 if (vdc->minfo == NULL) in vd_process_ioctl()
7383 if (vdc_check_capacity(vdc) != 0) in vd_process_ioctl()
7398 return (vdc_uscsi_cmd(vdc, arg, mode)); in vd_process_ioctl()
7403 mutex_enter(&vdc->ownership_lock); in vd_process_ioctl()
7409 vdc_ownership_update(vdc, VDC_OWNERSHIP_WANTED); in vd_process_ioctl()
7411 rv = vdc_access_set(vdc, VD_ACCESS_SET_EXCLUSIVE | in vd_process_ioctl()
7414 vdc_ownership_update(vdc, VDC_OWNERSHIP_WANTED | in vd_process_ioctl()
7417 vdc_ownership_update(vdc, VDC_OWNERSHIP_NONE); in vd_process_ioctl()
7419 mutex_exit(&vdc->ownership_lock); in vd_process_ioctl()
7425 mutex_enter(&vdc->ownership_lock); in vd_process_ioctl()
7426 rv = vdc_access_set(vdc, VD_ACCESS_SET_CLEAR); in vd_process_ioctl()
7428 vdc_ownership_update(vdc, VDC_OWNERSHIP_NONE); in vd_process_ioctl()
7430 mutex_exit(&vdc->ownership_lock); in vd_process_ioctl()
7438 rv = vdc_access_get(vdc, &status); in vd_process_ioctl()
7446 rv = vdc_access_set(vdc, VD_ACCESS_SET_EXCLUSIVE); in vd_process_ioctl()
7452 return (vdc_mhd_inkeys(vdc, arg, mode)); in vd_process_ioctl()
7457 return (vdc_mhd_inresv(vdc, arg, mode)); in vd_process_ioctl()
7462 return (vdc_mhd_register(vdc, arg, mode)); in vd_process_ioctl()
7467 return (vdc_mhd_reserve(vdc, arg, mode)); in vd_process_ioctl()
7472 return (vdc_mhd_preemptabort(vdc, arg, mode)); in vd_process_ioctl()
7477 return (vdc_mhd_registerignore(vdc, arg, mode)); in vd_process_ioctl()
7482 rv = vdc_failfast(vdc, arg, mode); in vd_process_ioctl()
7488 return (vdc_dioctl_rwcmd(vdc, arg, mode)); in vd_process_ioctl()
7493 return (vdc_dkio_gapart(vdc, arg, mode)); in vd_process_ioctl()
7498 return (vdc_dkio_partition(vdc, arg, mode)); in vd_process_ioctl()
7505 bcopy(vdc->cinfo, &cinfo, sizeof (struct dk_cinfo)); in vd_process_ioctl()
7518 ASSERT(vdc->vdisk_size != 0); in vd_process_ioctl()
7519 ASSERT(vdc->minfo->dki_capacity != 0); in vd_process_ioctl()
7520 rv = ddi_copyout(vdc->minfo, (void *)arg, in vd_process_ioctl()
7534 DMSG(vdc, 1, "[%d] Flush W$: mode %x\n", in vd_process_ioctl()
7567 mutex_enter(&vdc->lock); in vd_process_ioctl()
7568 vdc->dkio_flush_pending++; in vd_process_ioctl()
7569 dkarg->vdc = vdc; in vd_process_ioctl()
7570 mutex_exit(&vdc->lock); in vd_process_ioctl()
7577 mutex_enter(&vdc->lock); in vd_process_ioctl()
7578 vdc->dkio_flush_pending--; in vd_process_ioctl()
7579 mutex_exit(&vdc->lock); in vd_process_ioctl()
7592 if (VD_OP_SUPPORTED(vdc->operations, iop->op) == B_FALSE) { in vd_process_ioctl()
7593 DMSG(vdc, 0, "[%d] Unsupported VD_OP operation (0x%x)\n", in vd_process_ioctl()
7594 vdc->instance, iop->op); in vd_process_ioctl()
7600 DMSG(vdc, 1, "[%d] struct size %ld alloc %ld\n", in vd_process_ioctl()
7612 rv = (iop->convert)(vdc, arg, mem_p, mode, VD_COPYIN); in vd_process_ioctl()
7614 DMSG(vdc, 0, "[%d] convert func returned %d for ioctl 0x%x\n", in vd_process_ioctl()
7624 rv = vdc_do_sync_op(vdc, iop->op, mem_p, alloc_len, in vd_process_ioctl()
7633 DMSG(vdc, 0, "[%d] vds returned %d for ioctl 0x%x\n", in vd_process_ioctl()
7647 rv = (iop->convert)(vdc, mem_p, arg, mode, VD_COPYOUT); in vd_process_ioctl()
7649 DMSG(vdc, 0, "[%d] convert func returned %d for ioctl 0x%x\n", in vd_process_ioctl()
7670 vdc_null_copy_func(vdc_t *vdc, void *from, void *to, int mode, int dir) in vdc_null_copy_func() argument
7672 _NOTE(ARGUNUSED(vdc)) in vdc_null_copy_func()
7682 vdc_get_wce_convert(vdc_t *vdc, void *from, void *to, in vdc_get_wce_convert() argument
7685 _NOTE(ARGUNUSED(vdc)) in vdc_get_wce_convert()
7697 vdc_set_wce_convert(vdc_t *vdc, void *from, void *to, in vdc_set_wce_convert() argument
7700 _NOTE(ARGUNUSED(vdc)) in vdc_set_wce_convert()
7737 vdc_get_vtoc_convert(vdc_t *vdc, void *from, void *to, int mode, int dir) in vdc_get_vtoc_convert() argument
7751 if (vdc->vdisk_size > VD_OLDVTOC_LIMIT) in vdc_get_vtoc_convert()
7758 evtoc.timestamp[i] = vdc->vtoc->timestamp[i]; in vdc_get_vtoc_convert()
7798 vdc_set_vtoc_convert(vdc_t *vdc, void *from, void *to, int mode, int dir) in vdc_set_vtoc_convert() argument
7809 if (vdc->vdisk_size > VD_OLDVTOC_LIMIT) in vdc_set_vtoc_convert()
7831 vdc_validate(vdc); in vdc_set_vtoc_convert()
7837 vdc->vtoc->timestamp[i] = evtoc.timestamp[i]; in vdc_set_vtoc_convert()
7848 vdc_get_extvtoc_convert(vdc_t *vdc, void *from, void *to, int mode, int dir) in vdc_get_extvtoc_convert() argument
7863 evtoc.timestamp[i] = vdc->vtoc->timestamp[i]; in vdc_get_extvtoc_convert()
7874 vdc_set_extvtoc_convert(vdc_t *vdc, void *from, void *to, int mode, int dir) in vdc_set_extvtoc_convert() argument
7894 vdc_validate(vdc); in vdc_set_extvtoc_convert()
7900 vdc->vtoc->timestamp[i] = evtoc.timestamp[i]; in vdc_set_extvtoc_convert()
7932 vdc_get_geom_convert(vdc_t *vdc, void *from, void *to, int mode, int dir) in vdc_get_geom_convert() argument
7934 _NOTE(ARGUNUSED(vdc)) in vdc_get_geom_convert()
7975 vdc_set_geom_convert(vdc_t *vdc, void *from, void *to, int mode, int dir) in vdc_set_geom_convert() argument
7977 _NOTE(ARGUNUSED(vdc)) in vdc_set_geom_convert()
8005 vdc_get_efi_convert(vdc_t *vdc, void *from, void *to, int mode, int dir) in vdc_get_efi_convert() argument
8007 _NOTE(ARGUNUSED(vdc)) in vdc_get_efi_convert()
8053 vdc_set_efi_convert(vdc_t *vdc, void *from, void *to, int mode, int dir) in vdc_set_efi_convert() argument
8055 _NOTE(ARGUNUSED(vdc)) in vdc_set_efi_convert()
8065 vdc_validate(vdc); in vdc_set_efi_convert()
8110 vdc_create_fake_geometry(vdc_t *vdc) in vdc_create_fake_geometry() argument
8112 ASSERT(vdc != NULL); in vdc_create_fake_geometry()
8113 ASSERT(vdc->max_xfer_sz != 0); in vdc_create_fake_geometry()
8118 if (vdc->cinfo == NULL) in vdc_create_fake_geometry()
8119 vdc->cinfo = kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); in vdc_create_fake_geometry()
8121 (void) strcpy(vdc->cinfo->dki_cname, VDC_DRIVER_NAME); in vdc_create_fake_geometry()
8122 (void) strcpy(vdc->cinfo->dki_dname, VDC_DRIVER_NAME); in vdc_create_fake_geometry()
8124 vdc->cinfo->dki_maxtransfer = vdc->max_xfer_sz; in vdc_create_fake_geometry()
8135 switch (vdc->vdisk_media) { in vdc_create_fake_geometry()
8138 vdc->cinfo->dki_ctype = DKC_CDROM; in vdc_create_fake_geometry()
8141 if (VD_OP_SUPPORTED(vdc->operations, VD_OP_SCSICMD)) in vdc_create_fake_geometry()
8142 vdc->cinfo->dki_ctype = DKC_SCSI_CCS; in vdc_create_fake_geometry()
8144 vdc->cinfo->dki_ctype = DKC_DIRECT; in vdc_create_fake_geometry()
8148 vdc->cinfo->dki_ctype = DKC_DIRECT; in vdc_create_fake_geometry()
8151 vdc->cinfo->dki_flags = DKI_FMTVOL; in vdc_create_fake_geometry()
8152 vdc->cinfo->dki_cnum = 0; in vdc_create_fake_geometry()
8153 vdc->cinfo->dki_addr = 0; in vdc_create_fake_geometry()
8154 vdc->cinfo->dki_space = 0; in vdc_create_fake_geometry()
8155 vdc->cinfo->dki_prio = 0; in vdc_create_fake_geometry()
8156 vdc->cinfo->dki_vec = 0; in vdc_create_fake_geometry()
8157 vdc->cinfo->dki_unit = vdc->instance; in vdc_create_fake_geometry()
8158 vdc->cinfo->dki_slave = 0; in vdc_create_fake_geometry()
8163 vdc->cinfo->dki_partition = 0; in vdc_create_fake_geometry()
8168 if (vdc->minfo == NULL) in vdc_create_fake_geometry()
8169 vdc->minfo = kmem_zalloc(sizeof (struct dk_minfo), KM_SLEEP); in vdc_create_fake_geometry()
8171 if (vio_ver_is_supported(vdc->ver, 1, 1)) { in vdc_create_fake_geometry()
8172 vdc->minfo->dki_media_type = in vdc_create_fake_geometry()
8173 VD_MEDIATYPE2DK_MEDIATYPE(vdc->vdisk_media); in vdc_create_fake_geometry()
8175 vdc->minfo->dki_media_type = DK_FIXED_DISK; in vdc_create_fake_geometry()
8178 vdc->minfo->dki_capacity = vdc->vdisk_size; in vdc_create_fake_geometry()
8179 vdc->minfo->dki_lbsize = vdc->vdisk_bsize; in vdc_create_fake_geometry()
8199 vdc_update_size(vdc_t *vdc, size_t dsk_size, size_t blk_size, size_t xfr_size) in vdc_update_size() argument
8203 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_update_size()
8211 (blk_size == vdc->vdisk_bsize && dsk_size == vdc->vdisk_size && in vdc_update_size()
8212 xfr_size == vdc->max_xfer_sz)) in vdc_update_size()
8223 DMSG(vdc, 0, "[%d] vds block transfer size too big;" in vdc_update_size()
8224 " using max supported by vdc", vdc->instance); in vdc_update_size()
8228 vdc->max_xfer_sz = xfr_size; in vdc_update_size()
8229 vdc->vdisk_bsize = blk_size; in vdc_update_size()
8230 vdc->vdisk_size = dsk_size; in vdc_update_size()
8232 stp = (vd_err_stats_t *)vdc->err_stats->ks_data; in vdc_update_size()
8235 vdc->minfo->dki_capacity = dsk_size; in vdc_update_size()
8236 vdc->minfo->dki_lbsize = (uint_t)blk_size; in vdc_update_size()
8254 vdc_update_vio_bsize(vdc_t *vdc, uint32_t blk_size) in vdc_update_vio_bsize() argument
8259 vdc->vio_bmask = 0; in vdc_update_vio_bsize()
8260 vdc->vio_bshift = 0; in vdc_update_vio_bsize()
8277 vdc->vio_bshift = nshift; in vdc_update_vio_bsize()
8278 vdc->vio_bmask = ratio - 1; in vdc_update_vio_bsize()
8303 vdc_validate_geometry(vdc_t *vdc) in vdc_validate_geometry() argument
8314 ASSERT(vdc != NULL); in vdc_validate_geometry()
8315 ASSERT(vdc->vtoc != NULL && vdc->geom != NULL); in vdc_validate_geometry()
8316 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_validate_geometry()
8318 mutex_exit(&vdc->lock); in vdc_validate_geometry()
8323 (void) vdc_check_capacity(vdc); in vdc_validate_geometry()
8324 dev = makedevice(ddi_driver_major(vdc->dip), in vdc_validate_geometry()
8325 VD_MAKE_DEV(vdc->instance, 0)); in vdc_validate_geometry()
8340 if (vdc->vdisk_size == 0) { in vdc_validate_geometry()
8341 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8342 vdc_store_label_unk(vdc); in vdc_validate_geometry()
8346 VDC_EFI_DEV_SET(edev, vdc, vd_process_efi_ioctl); in vdc_validate_geometry()
8351 DMSG(vdc, 0, "[%d] Failed to get EFI (err=%d)", in vdc_validate_geometry()
8352 vdc->instance, rv); in vdc_validate_geometry()
8353 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8354 vdc_store_label_unk(vdc); in vdc_validate_geometry()
8358 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8359 vdc_store_label_efi(vdc, gpt, gpe); in vdc_validate_geometry()
8365 DMSG(vdc, 0, "[%d] Failed to get VTOC (err=%d)", in vdc_validate_geometry()
8366 vdc->instance, rv); in vdc_validate_geometry()
8367 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8368 vdc_store_label_unk(vdc); in vdc_validate_geometry()
8377 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8378 vdc_store_label_unk(vdc); in vdc_validate_geometry()
8399 if (vdc->vdisk_type == VD_DISK_TYPE_SLICE) { in vdc_validate_geometry()
8400 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8402 vdc_store_label_unk(vdc); in vdc_validate_geometry()
8405 vdc_store_label_vtoc(vdc, &geom, &vtoc); in vdc_validate_geometry()
8410 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8411 vdc_store_label_unk(vdc); in vdc_validate_geometry()
8420 if (vdc->vdisk_media == VD_MEDIA_CD || in vdc_validate_geometry()
8421 vdc->vdisk_media == VD_MEDIA_DVD) { in vdc_validate_geometry()
8422 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8423 vdc_store_label_vtoc(vdc, &geom, &vtoc); in vdc_validate_geometry()
8430 label = kmem_alloc(vdc->vdisk_bsize, KM_SLEEP); in vdc_validate_geometry()
8432 rv = vdc_do_op(vdc, VD_OP_BREAD, (caddr_t)label, vdc->vdisk_bsize, in vdc_validate_geometry()
8437 DMSG(vdc, 1, "[%d] Got VTOC with invalid label\n", in vdc_validate_geometry()
8438 vdc->instance); in vdc_validate_geometry()
8439 kmem_free(label, vdc->vdisk_bsize); in vdc_validate_geometry()
8440 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8441 vdc_store_label_unk(vdc); in vdc_validate_geometry()
8445 kmem_free(label, vdc->vdisk_bsize); in vdc_validate_geometry()
8446 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8447 vdc_store_label_vtoc(vdc, &geom, &vtoc); in vdc_validate_geometry()
8466 vdc_validate(vdc_t *vdc) in vdc_validate() argument
8472 ASSERT(!MUTEX_HELD(&vdc->lock)); in vdc_validate()
8474 mutex_enter(&vdc->lock); in vdc_validate()
8477 old_label = vdc->vdisk_label; in vdc_validate()
8478 bcopy(vdc->slice, &old_slice, sizeof (vd_slice_t) * V_NUMPAR); in vdc_validate()
8481 (void) vdc_validate_geometry(vdc); in vdc_validate()
8484 if (vdc->vdisk_type == VD_DISK_TYPE_DISK && in vdc_validate()
8485 vdc->vdisk_label != old_label) { in vdc_validate()
8487 if (vdc->vdisk_label == VD_DISK_LABEL_EFI) in vdc_validate()
8488 rv = vdc_create_device_nodes_efi(vdc); in vdc_validate()
8490 rv = vdc_create_device_nodes_vtoc(vdc); in vdc_validate()
8493 DMSG(vdc, 0, "![%d] Failed to update device nodes", in vdc_validate()
8494 vdc->instance); in vdc_validate()
8498 mutex_exit(&vdc->lock); in vdc_validate()
8504 vdc_t *vdc = (vdc_t *)arg; in vdc_validate_task() local
8506 vdc_validate(vdc); in vdc_validate_task()
8508 mutex_enter(&vdc->lock); in vdc_validate_task()
8509 ASSERT(vdc->validate_pending > 0); in vdc_validate_task()
8510 vdc->validate_pending--; in vdc_validate_task()
8511 mutex_exit(&vdc->lock); in vdc_validate_task()
8531 vdc_setup_devid(vdc_t *vdc) in vdc_setup_devid() argument
8552 rv = vdc_do_op(vdc, VD_OP_GET_DEVID, (caddr_t)vd_devid, in vdc_setup_devid()
8555 DMSG(vdc, 2, "do_op returned %d\n", rv); in vdc_setup_devid()
8573 rv = vdc_do_sync_op(vdc, VD_OP_GET_DEVID, (caddr_t)vd_devid, in vdc_setup_devid()
8593 DMSG(vdc, 2, ": devid length = %d\n", vd_devid->length); in vdc_setup_devid()
8596 if (ddi_devid_init(vdc->dip, DEVID_ENCAP, vd_devid->length, in vdc_setup_devid()
8598 DMSG(vdc, 1, "[%d] Fail to created devid\n", vdc->instance); in vdc_setup_devid()
8609 if (vdc->devid != NULL) { in vdc_setup_devid()
8611 if (ddi_devid_compare(vdisk_devid, vdc->devid) == 0) { in vdc_setup_devid()
8617 vdc->instance); in vdc_setup_devid()
8619 devid_str = ddi_devid_str_encode(vdc->devid, NULL); in vdc_setup_devid()
8622 vdc->instance, in vdc_setup_devid()
8631 vdc->instance, in vdc_setup_devid()
8641 if (ddi_devid_register(vdc->dip, vdisk_devid) != DDI_SUCCESS) { in vdc_setup_devid()
8642 DMSG(vdc, 1, "[%d] Fail to register devid\n", vdc->instance); in vdc_setup_devid()
8647 vdc->devid = vdisk_devid; in vdc_setup_devid()
8653 vdc_store_label_efi(vdc_t *vdc, efi_gpt_t *gpt, efi_gpe_t *gpe) in vdc_store_label_efi() argument
8657 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_store_label_efi()
8659 vdc->vdisk_label = VD_DISK_LABEL_EFI; in vdc_store_label_efi()
8660 bzero(vdc->vtoc, sizeof (struct extvtoc)); in vdc_store_label_efi()
8661 bzero(vdc->geom, sizeof (struct dk_geom)); in vdc_store_label_efi()
8662 bzero(vdc->slice, sizeof (vd_slice_t) * V_NUMPAR); in vdc_store_label_efi()
8673 vdc->slice[i].start = gpe[i].efi_gpe_StartingLBA; in vdc_store_label_efi()
8674 vdc->slice[i].nblocks = gpe[i].efi_gpe_EndingLBA - in vdc_store_label_efi()
8678 ASSERT(vdc->vdisk_size != 0); in vdc_store_label_efi()
8679 vdc->slice[VD_EFI_WD_SLICE].start = 0; in vdc_store_label_efi()
8680 vdc->slice[VD_EFI_WD_SLICE].nblocks = vdc->vdisk_size; in vdc_store_label_efi()
8685 vdc_store_label_vtoc(vdc_t *vdc, struct dk_geom *geom, struct extvtoc *vtoc) in vdc_store_label_vtoc() argument
8689 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_store_label_vtoc()
8690 ASSERT(vdc->vdisk_bsize == vtoc->v_sectorsz); in vdc_store_label_vtoc()
8692 vdc->vdisk_label = VD_DISK_LABEL_VTOC; in vdc_store_label_vtoc()
8693 bcopy(vtoc, vdc->vtoc, sizeof (struct extvtoc)); in vdc_store_label_vtoc()
8694 bcopy(geom, vdc->geom, sizeof (struct dk_geom)); in vdc_store_label_vtoc()
8695 bzero(vdc->slice, sizeof (vd_slice_t) * V_NUMPAR); in vdc_store_label_vtoc()
8698 vdc->slice[i].start = vtoc->v_part[i].p_start; in vdc_store_label_vtoc()
8699 vdc->slice[i].nblocks = vtoc->v_part[i].p_size; in vdc_store_label_vtoc()
8704 vdc_store_label_unk(vdc_t *vdc) in vdc_store_label_unk() argument
8706 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_store_label_unk()
8708 vdc->vdisk_label = VD_DISK_LABEL_UNK; in vdc_store_label_unk()
8709 bzero(vdc->vtoc, sizeof (struct extvtoc)); in vdc_store_label_unk()
8710 bzero(vdc->geom, sizeof (struct dk_geom)); in vdc_store_label_unk()
8711 bzero(vdc->slice, sizeof (vd_slice_t) * V_NUMPAR); in vdc_store_label_unk()