Lines Matching refs:req

1957 		struct pci_nvme_ioreq *req;  in nvme_opc_format_nvm()  local
1960 req = pci_nvme_get_ioreq(sc); in nvme_opc_format_nvm()
1961 if (req == NULL) { in nvme_opc_format_nvm()
1967 req->nvme_sq = &sc->submit_queues[0]; in nvme_opc_format_nvm()
1968 req->sqid = 0; in nvme_opc_format_nvm()
1969 req->opc = command->opc; in nvme_opc_format_nvm()
1970 req->cid = command->cid; in nvme_opc_format_nvm()
1971 req->nsid = command->nsid; in nvme_opc_format_nvm()
1973 req->io_req.br_offset = 0; in nvme_opc_format_nvm()
1974 req->io_req.br_resid = sc->nvstore.size; in nvme_opc_format_nvm()
1975 req->io_req.br_callback = pci_nvme_io_done; in nvme_opc_format_nvm()
1977 err = blockif_delete(sc->nvstore.ctx, &req->io_req); in nvme_opc_format_nvm()
1981 pci_nvme_release_ioreq(sc, req); in nvme_opc_format_nvm()
2220 struct pci_nvme_ioreq *req, uint64_t gpaddr, size_t size, uint64_t offset) in pci_nvme_append_iov_req() argument
2225 if (req == NULL) in pci_nvme_append_iov_req()
2228 if (req->io_req.br_iovcnt == NVME_MAX_IOVEC) { in pci_nvme_append_iov_req()
2237 if (req->io_req.br_iovcnt == 0) in pci_nvme_append_iov_req()
2240 range_is_contiguous = (req->prev_gpaddr + req->prev_size) == gpaddr; in pci_nvme_append_iov_req()
2243 iovidx = req->io_req.br_iovcnt - 1; in pci_nvme_append_iov_req()
2245 req->io_req.br_iov[iovidx].iov_base = in pci_nvme_append_iov_req()
2246 paddr_guest2host(req->sc->nsc_pi->pi_vmctx, in pci_nvme_append_iov_req()
2247 req->prev_gpaddr, size); in pci_nvme_append_iov_req()
2248 if (req->io_req.br_iov[iovidx].iov_base == NULL) in pci_nvme_append_iov_req()
2251 req->prev_size += size; in pci_nvme_append_iov_req()
2252 req->io_req.br_resid += size; in pci_nvme_append_iov_req()
2254 req->io_req.br_iov[iovidx].iov_len = req->prev_size; in pci_nvme_append_iov_req()
2256 iovidx = req->io_req.br_iovcnt; in pci_nvme_append_iov_req()
2258 req->io_req.br_offset = offset; in pci_nvme_append_iov_req()
2259 req->io_req.br_resid = 0; in pci_nvme_append_iov_req()
2260 req->io_req.br_param = req; in pci_nvme_append_iov_req()
2263 req->io_req.br_iov[iovidx].iov_base = in pci_nvme_append_iov_req()
2264 paddr_guest2host(req->sc->nsc_pi->pi_vmctx, in pci_nvme_append_iov_req()
2266 if (req->io_req.br_iov[iovidx].iov_base == NULL) in pci_nvme_append_iov_req()
2269 req->io_req.br_iov[iovidx].iov_len = size; in pci_nvme_append_iov_req()
2271 req->prev_gpaddr = gpaddr; in pci_nvme_append_iov_req()
2272 req->prev_size = size; in pci_nvme_append_iov_req()
2273 req->io_req.br_resid += size; in pci_nvme_append_iov_req()
2275 req->io_req.br_iovcnt++; in pci_nvme_append_iov_req()
2304 pci_nvme_release_ioreq(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req) in pci_nvme_release_ioreq() argument
2306 req->sc = NULL; in pci_nvme_release_ioreq()
2307 req->nvme_sq = NULL; in pci_nvme_release_ioreq()
2308 req->sqid = 0; in pci_nvme_release_ioreq()
2312 STAILQ_INSERT_TAIL(&sc->ioreqs_free, req, link); in pci_nvme_release_ioreq()
2328 struct pci_nvme_ioreq *req = NULL; in pci_nvme_get_ioreq() local
2333 req = STAILQ_FIRST(&sc->ioreqs_free); in pci_nvme_get_ioreq()
2334 assert(req != NULL); in pci_nvme_get_ioreq()
2337 req->sc = sc; in pci_nvme_get_ioreq()
2343 req->io_req.br_iovcnt = 0; in pci_nvme_get_ioreq()
2344 req->io_req.br_offset = 0; in pci_nvme_get_ioreq()
2345 req->io_req.br_resid = 0; in pci_nvme_get_ioreq()
2346 req->io_req.br_param = req; in pci_nvme_get_ioreq()
2347 req->prev_gpaddr = 0; in pci_nvme_get_ioreq()
2348 req->prev_size = 0; in pci_nvme_get_ioreq()
2350 return req; in pci_nvme_get_ioreq()
2356 struct pci_nvme_ioreq *req = br->br_param; in pci_nvme_io_done() local
2357 struct nvme_submission_queue *sq = req->nvme_sq; in pci_nvme_io_done()
2367 pci_nvme_set_completion(req->sc, sq, req->sqid, req->cid, status); in pci_nvme_io_done()
2368 pci_nvme_stats_write_read_update(req->sc, req->opc, in pci_nvme_io_done()
2369 req->bytes, status); in pci_nvme_io_done()
2370 pci_nvme_release_ioreq(req->sc, req); in pci_nvme_io_done()
2385 struct pci_nvme_ioreq *req, in nvme_opc_flush() argument
2395 req->io_req.br_callback = pci_nvme_io_done; in nvme_opc_flush()
2397 err = blockif_flush(nvstore->ctx, &req->io_req); in nvme_opc_flush()
2443 struct pci_nvme_ioreq *req, in nvme_write_read_blockif() argument
2453 if (pci_nvme_append_iov_req(sc, req, prp1, size, offset)) { in nvme_write_read_blockif()
2465 if (pci_nvme_append_iov_req(sc, req, prp2, size, offset)) { in nvme_write_read_blockif()
2491 if (pci_nvme_append_iov_req(sc, req, *prp_list, size, in nvme_write_read_blockif()
2503 req->io_req.br_callback = pci_nvme_io_done; in nvme_write_read_blockif()
2505 err = blockif_write(nvstore->ctx, &req->io_req); in nvme_write_read_blockif()
2507 err = blockif_read(nvstore->ctx, &req->io_req); in nvme_write_read_blockif()
2519 struct pci_nvme_ioreq *req, in nvme_opc_write_read() argument
2545 req->bytes = bytes; in nvme_opc_write_read()
2546 req->io_req.br_offset = lba; in nvme_opc_write_read()
2556 *status = nvme_write_read_blockif(sc, nvstore, req, in nvme_opc_write_read()
2572 struct pci_nvme_ioreq *req = br->br_param; in pci_nvme_dealloc_sm() local
2573 struct pci_nvme_softc *sc = req->sc; in pci_nvme_dealloc_sm()
2580 } else if ((req->prev_gpaddr + 1) == (req->prev_size)) { in pci_nvme_dealloc_sm()
2583 struct iovec *iov = req->io_req.br_iov; in pci_nvme_dealloc_sm()
2585 req->prev_gpaddr++; in pci_nvme_dealloc_sm()
2586 iov += req->prev_gpaddr; in pci_nvme_dealloc_sm()
2589 req->io_req.br_offset = (off_t)iov->iov_base; in pci_nvme_dealloc_sm()
2590 req->io_req.br_resid = iov->iov_len; in pci_nvme_dealloc_sm()
2591 if (blockif_delete(sc->nvstore.ctx, &req->io_req)) { in pci_nvme_dealloc_sm()
2599 pci_nvme_set_completion(sc, req->nvme_sq, req->sqid, req->cid, in pci_nvme_dealloc_sm()
2601 pci_nvme_release_ioreq(sc, req); in pci_nvme_dealloc_sm()
2609 struct pci_nvme_ioreq *req, in nvme_opc_dataset_mgmt() argument
2670 if (req == NULL) { in nvme_opc_dataset_mgmt()
2685 req->io_req.br_iovcnt = 0; in nvme_opc_dataset_mgmt()
2686 req->io_req.br_offset = offset; in nvme_opc_dataset_mgmt()
2687 req->io_req.br_resid = bytes; in nvme_opc_dataset_mgmt()
2690 req->io_req.br_callback = pci_nvme_io_done; in nvme_opc_dataset_mgmt()
2692 struct iovec *iov = req->io_req.br_iov; in nvme_opc_dataset_mgmt()
2709 req->io_req.br_callback = pci_nvme_dealloc_sm; in nvme_opc_dataset_mgmt()
2715 req->prev_gpaddr = 0; in nvme_opc_dataset_mgmt()
2716 req->prev_size = dr; in nvme_opc_dataset_mgmt()
2719 err = blockif_delete(nvstore->ctx, &req->io_req); in nvme_opc_dataset_mgmt()
2748 struct pci_nvme_ioreq *req; in pci_nvme_handle_io_cmd() local
2753 req = NULL; in pci_nvme_handle_io_cmd()
2768 req = pci_nvme_get_ioreq(sc); in pci_nvme_handle_io_cmd()
2769 if (req == NULL) { in pci_nvme_handle_io_cmd()
2775 req->nvme_sq = sq; in pci_nvme_handle_io_cmd()
2776 req->sqid = idx; in pci_nvme_handle_io_cmd()
2777 req->opc = cmd->opc; in pci_nvme_handle_io_cmd()
2778 req->cid = cmd->cid; in pci_nvme_handle_io_cmd()
2779 req->nsid = cmd->nsid; in pci_nvme_handle_io_cmd()
2784 req, &status); in pci_nvme_handle_io_cmd()
2789 req, &status); in pci_nvme_handle_io_cmd()
2799 req, &status); in pci_nvme_handle_io_cmd()
2809 if (req != NULL) in pci_nvme_handle_io_cmd()
2810 pci_nvme_release_ioreq(sc, req); in pci_nvme_handle_io_cmd()