qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

hw/block/nvme: rename trace events to pci_nvme

Change the prefix of all nvme device related trace events to 'pci_nvme'
to not clash with trace events from the nvme block driver.

Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200609190333.59390-3-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>

authored by

Klaus Jensen and committed by
Kevin Wolf
6f4ee2e9 f7e8c23f

+188 -190
+98 -100
hw/block/nvme.c
··· 125 125 { 126 126 if (cq->irq_enabled) { 127 127 if (msix_enabled(&(n->parent_obj))) { 128 - trace_nvme_irq_msix(cq->vector); 128 + trace_pci_nvme_irq_msix(cq->vector); 129 129 msix_notify(&(n->parent_obj), cq->vector); 130 130 } else { 131 - trace_nvme_irq_pin(); 131 + trace_pci_nvme_irq_pin(); 132 132 assert(cq->cqid < 64); 133 133 n->irq_status |= 1 << cq->cqid; 134 134 nvme_irq_check(n); 135 135 } 136 136 } else { 137 - trace_nvme_irq_masked(); 137 + trace_pci_nvme_irq_masked(); 138 138 } 139 139 } 140 140 ··· 159 159 int num_prps = (len >> n->page_bits) + 1; 160 160 161 161 if (unlikely(!prp1)) { 162 - trace_nvme_err_invalid_prp(); 162 + trace_pci_nvme_err_invalid_prp(); 163 163 return NVME_INVALID_FIELD | NVME_DNR; 164 164 } else if (n->cmbsz && prp1 >= n->ctrl_mem.addr && 165 165 prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) { ··· 173 173 len -= trans_len; 174 174 if (len) { 175 175 if (unlikely(!prp2)) { 176 - trace_nvme_err_invalid_prp2_missing(); 176 + trace_pci_nvme_err_invalid_prp2_missing(); 177 177 goto unmap; 178 178 } 179 179 if (len > n->page_size) { ··· 189 189 190 190 if (i == n->max_prp_ents - 1 && len > n->page_size) { 191 191 if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) { 192 - trace_nvme_err_invalid_prplist_ent(prp_ent); 192 + trace_pci_nvme_err_invalid_prplist_ent(prp_ent); 193 193 goto unmap; 194 194 } 195 195 ··· 202 202 } 203 203 204 204 if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) { 205 - trace_nvme_err_invalid_prplist_ent(prp_ent); 205 + trace_pci_nvme_err_invalid_prplist_ent(prp_ent); 206 206 goto unmap; 207 207 } 208 208 ··· 217 217 } 218 218 } else { 219 219 if (unlikely(prp2 & (n->page_size - 1))) { 220 - trace_nvme_err_invalid_prp2_align(prp2); 220 + trace_pci_nvme_err_invalid_prp2_align(prp2); 221 221 goto unmap; 222 222 } 223 223 if (qsg->nsg) { ··· 265 265 QEMUIOVector iov; 266 266 uint16_t status = NVME_SUCCESS; 267 267 268 - trace_nvme_dma_read(prp1, prp2); 268 + trace_pci_nvme_dma_read(prp1, prp2); 269 269 270 270 if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) { 271 271 return NVME_INVALID_FIELD | NVME_DNR; 272 272 } 273 273 if (qsg.nsg > 0) { 274 274 if (unlikely(dma_buf_read(ptr, len, &qsg))) { 275 - trace_nvme_err_invalid_dma(); 275 + trace_pci_nvme_err_invalid_dma(); 276 276 status = NVME_INVALID_FIELD | NVME_DNR; 277 277 } 278 278 qemu_sglist_destroy(&qsg); 279 279 } else { 280 280 if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) { 281 - trace_nvme_err_invalid_dma(); 281 + trace_pci_nvme_err_invalid_dma(); 282 282 status = NVME_INVALID_FIELD | NVME_DNR; 283 283 } 284 284 qemu_iovec_destroy(&iov); ··· 367 367 uint32_t count = nlb << data_shift; 368 368 369 369 if (unlikely(slba + nlb > ns->id_ns.nsze)) { 370 - trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); 370 + trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); 371 371 return NVME_LBA_RANGE | NVME_DNR; 372 372 } 373 373 ··· 395 395 int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0; 396 396 enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ; 397 397 398 - trace_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba); 398 + trace_pci_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba); 399 399 400 400 if (unlikely((slba + nlb) > ns->id_ns.nsze)) { 401 401 block_acct_invalid(blk_get_stats(n->conf.blk), acct); 402 - trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); 402 + trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); 403 403 return NVME_LBA_RANGE | NVME_DNR; 404 404 } 405 405 ··· 434 434 uint32_t nsid = le32_to_cpu(cmd->nsid); 435 435 436 436 if (unlikely(nsid == 0 || nsid > n->num_namespaces)) { 437 - trace_nvme_err_invalid_ns(nsid, n->num_namespaces); 437 + trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces); 438 438 return NVME_INVALID_NSID | NVME_DNR; 439 439 } 440 440 ··· 448 448 case NVME_CMD_READ: 449 449 return nvme_rw(n, ns, cmd, req); 450 450 default: 451 - trace_nvme_err_invalid_opc(cmd->opcode); 451 + trace_pci_nvme_err_invalid_opc(cmd->opcode); 452 452 return NVME_INVALID_OPCODE | NVME_DNR; 453 453 } 454 454 } ··· 473 473 uint16_t qid = le16_to_cpu(c->qid); 474 474 475 475 if (unlikely(!qid || nvme_check_sqid(n, qid))) { 476 - trace_nvme_err_invalid_del_sq(qid); 476 + trace_pci_nvme_err_invalid_del_sq(qid); 477 477 return NVME_INVALID_QID | NVME_DNR; 478 478 } 479 479 480 - trace_nvme_del_sq(qid); 480 + trace_pci_nvme_del_sq(qid); 481 481 482 482 sq = n->sq[qid]; 483 483 while (!QTAILQ_EMPTY(&sq->out_req_list)) { ··· 541 541 uint16_t qflags = le16_to_cpu(c->sq_flags); 542 542 uint64_t prp1 = le64_to_cpu(c->prp1); 543 543 544 - trace_nvme_create_sq(prp1, sqid, cqid, qsize, qflags); 544 + trace_pci_nvme_create_sq(prp1, sqid, cqid, qsize, qflags); 545 545 546 546 if (unlikely(!cqid || nvme_check_cqid(n, cqid))) { 547 - trace_nvme_err_invalid_create_sq_cqid(cqid); 547 + trace_pci_nvme_err_invalid_create_sq_cqid(cqid); 548 548 return NVME_INVALID_CQID | NVME_DNR; 549 549 } 550 550 if (unlikely(!sqid || !nvme_check_sqid(n, sqid))) { 551 - trace_nvme_err_invalid_create_sq_sqid(sqid); 551 + trace_pci_nvme_err_invalid_create_sq_sqid(sqid); 552 552 return NVME_INVALID_QID | NVME_DNR; 553 553 } 554 554 if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) { 555 - trace_nvme_err_invalid_create_sq_size(qsize); 555 + trace_pci_nvme_err_invalid_create_sq_size(qsize); 556 556 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; 557 557 } 558 558 if (unlikely(!prp1 || prp1 & (n->page_size - 1))) { 559 - trace_nvme_err_invalid_create_sq_addr(prp1); 559 + trace_pci_nvme_err_invalid_create_sq_addr(prp1); 560 560 return NVME_INVALID_FIELD | NVME_DNR; 561 561 } 562 562 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) { 563 - trace_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags)); 563 + trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags)); 564 564 return NVME_INVALID_FIELD | NVME_DNR; 565 565 } 566 566 sq = g_malloc0(sizeof(*sq)); ··· 586 586 uint16_t qid = le16_to_cpu(c->qid); 587 587 588 588 if (unlikely(!qid || nvme_check_cqid(n, qid))) { 589 - trace_nvme_err_invalid_del_cq_cqid(qid); 589 + trace_pci_nvme_err_invalid_del_cq_cqid(qid); 590 590 return NVME_INVALID_CQID | NVME_DNR; 591 591 } 592 592 593 593 cq = n->cq[qid]; 594 594 if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) { 595 - trace_nvme_err_invalid_del_cq_notempty(qid); 595 + trace_pci_nvme_err_invalid_del_cq_notempty(qid); 596 596 return NVME_INVALID_QUEUE_DEL; 597 597 } 598 598 nvme_irq_deassert(n, cq); 599 - trace_nvme_del_cq(qid); 599 + trace_pci_nvme_del_cq(qid); 600 600 nvme_free_cq(cq, n); 601 601 return NVME_SUCCESS; 602 602 } ··· 629 629 uint16_t qflags = le16_to_cpu(c->cq_flags); 630 630 uint64_t prp1 = le64_to_cpu(c->prp1); 631 631 632 - trace_nvme_create_cq(prp1, cqid, vector, qsize, qflags, 633 - NVME_CQ_FLAGS_IEN(qflags) != 0); 632 + trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags, 633 + NVME_CQ_FLAGS_IEN(qflags) != 0); 634 634 635 635 if (unlikely(!cqid || !nvme_check_cqid(n, cqid))) { 636 - trace_nvme_err_invalid_create_cq_cqid(cqid); 636 + trace_pci_nvme_err_invalid_create_cq_cqid(cqid); 637 637 return NVME_INVALID_CQID | NVME_DNR; 638 638 } 639 639 if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) { 640 - trace_nvme_err_invalid_create_cq_size(qsize); 640 + trace_pci_nvme_err_invalid_create_cq_size(qsize); 641 641 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; 642 642 } 643 643 if (unlikely(!prp1)) { 644 - trace_nvme_err_invalid_create_cq_addr(prp1); 644 + trace_pci_nvme_err_invalid_create_cq_addr(prp1); 645 645 return NVME_INVALID_FIELD | NVME_DNR; 646 646 } 647 647 if (unlikely(vector > n->num_queues)) { 648 - trace_nvme_err_invalid_create_cq_vector(vector); 648 + trace_pci_nvme_err_invalid_create_cq_vector(vector); 649 649 return NVME_INVALID_IRQ_VECTOR | NVME_DNR; 650 650 } 651 651 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) { 652 - trace_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags)); 652 + trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags)); 653 653 return NVME_INVALID_FIELD | NVME_DNR; 654 654 } 655 655 ··· 664 664 uint64_t prp1 = le64_to_cpu(c->prp1); 665 665 uint64_t prp2 = le64_to_cpu(c->prp2); 666 666 667 - trace_nvme_identify_ctrl(); 667 + trace_pci_nvme_identify_ctrl(); 668 668 669 669 return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), 670 670 prp1, prp2); ··· 677 677 uint64_t prp1 = le64_to_cpu(c->prp1); 678 678 uint64_t prp2 = le64_to_cpu(c->prp2); 679 679 680 - trace_nvme_identify_ns(nsid); 680 + trace_pci_nvme_identify_ns(nsid); 681 681 682 682 if (unlikely(nsid == 0 || nsid > n->num_namespaces)) { 683 - trace_nvme_err_invalid_ns(nsid, n->num_namespaces); 683 + trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces); 684 684 return NVME_INVALID_NSID | NVME_DNR; 685 685 } 686 686 ··· 700 700 uint16_t ret; 701 701 int i, j = 0; 702 702 703 - trace_nvme_identify_nslist(min_nsid); 703 + trace_pci_nvme_identify_nslist(min_nsid); 704 704 705 705 list = g_malloc0(data_len); 706 706 for (i = 0; i < n->num_namespaces; i++) { ··· 729 729 case 0x02: 730 730 return nvme_identify_nslist(n, c); 731 731 default: 732 - trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns)); 732 + trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns)); 733 733 return NVME_INVALID_FIELD | NVME_DNR; 734 734 } 735 735 } 736 736 737 737 static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts) 738 738 { 739 - trace_nvme_setfeat_timestamp(ts); 739 + trace_pci_nvme_setfeat_timestamp(ts); 740 740 741 741 n->host_timestamp = le64_to_cpu(ts); 742 742 n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); ··· 769 769 /* If the host timestamp is non-zero, set the timestamp origin */ 770 770 ts.origin = n->host_timestamp ? 0x01 : 0x00; 771 771 772 - trace_nvme_getfeat_timestamp(ts.all); 772 + trace_pci_nvme_getfeat_timestamp(ts.all); 773 773 774 774 return cpu_to_le64(ts.all); 775 775 } ··· 793 793 switch (dw10) { 794 794 case NVME_VOLATILE_WRITE_CACHE: 795 795 result = blk_enable_write_cache(n->conf.blk); 796 - trace_nvme_getfeat_vwcache(result ? "enabled" : "disabled"); 796 + trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled"); 797 797 break; 798 798 case NVME_NUMBER_OF_QUEUES: 799 799 result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16)); 800 - trace_nvme_getfeat_numq(result); 800 + trace_pci_nvme_getfeat_numq(result); 801 801 break; 802 802 case NVME_TIMESTAMP: 803 803 return nvme_get_feature_timestamp(n, cmd); 804 804 break; 805 805 default: 806 - trace_nvme_err_invalid_getfeat(dw10); 806 + trace_pci_nvme_err_invalid_getfeat(dw10); 807 807 return NVME_INVALID_FIELD | NVME_DNR; 808 808 } 809 809 ··· 839 839 blk_set_enable_write_cache(n->conf.blk, dw11 & 1); 840 840 break; 841 841 case NVME_NUMBER_OF_QUEUES: 842 - trace_nvme_setfeat_numq((dw11 & 0xFFFF) + 1, 843 - ((dw11 >> 16) & 0xFFFF) + 1, 844 - n->num_queues - 1, n->num_queues - 1); 842 + trace_pci_nvme_setfeat_numq((dw11 & 0xFFFF) + 1, 843 + ((dw11 >> 16) & 0xFFFF) + 1, 844 + n->num_queues - 1, n->num_queues - 1); 845 845 req->cqe.result = 846 846 cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16)); 847 847 break; ··· 851 851 break; 852 852 853 853 default: 854 - trace_nvme_err_invalid_setfeat(dw10); 854 + trace_pci_nvme_err_invalid_setfeat(dw10); 855 855 return NVME_INVALID_FIELD | NVME_DNR; 856 856 } 857 857 return NVME_SUCCESS; ··· 875 875 case NVME_ADM_CMD_GET_FEATURES: 876 876 return nvme_get_feature(n, cmd, req); 877 877 default: 878 - trace_nvme_err_invalid_admin_opc(cmd->opcode); 878 + trace_pci_nvme_err_invalid_admin_opc(cmd->opcode); 879 879 return NVME_INVALID_OPCODE | NVME_DNR; 880 880 } 881 881 } ··· 938 938 uint32_t page_size = 1 << page_bits; 939 939 940 940 if (unlikely(n->cq[0])) { 941 - trace_nvme_err_startfail_cq(); 941 + trace_pci_nvme_err_startfail_cq(); 942 942 return -1; 943 943 } 944 944 if (unlikely(n->sq[0])) { 945 - trace_nvme_err_startfail_sq(); 945 + trace_pci_nvme_err_startfail_sq(); 946 946 return -1; 947 947 } 948 948 if (unlikely(!n->bar.asq)) { 949 - trace_nvme_err_startfail_nbarasq(); 949 + trace_pci_nvme_err_startfail_nbarasq(); 950 950 return -1; 951 951 } 952 952 if (unlikely(!n->bar.acq)) { 953 - trace_nvme_err_startfail_nbaracq(); 953 + trace_pci_nvme_err_startfail_nbaracq(); 954 954 return -1; 955 955 } 956 956 if (unlikely(n->bar.asq & (page_size - 1))) { 957 - trace_nvme_err_startfail_asq_misaligned(n->bar.asq); 957 + trace_pci_nvme_err_startfail_asq_misaligned(n->bar.asq); 958 958 return -1; 959 959 } 960 960 if (unlikely(n->bar.acq & (page_size - 1))) { 961 - trace_nvme_err_startfail_acq_misaligned(n->bar.acq); 961 + trace_pci_nvme_err_startfail_acq_misaligned(n->bar.acq); 962 962 return -1; 963 963 } 964 964 if (unlikely(NVME_CC_MPS(n->bar.cc) < 965 965 NVME_CAP_MPSMIN(n->bar.cap))) { 966 - trace_nvme_err_startfail_page_too_small( 966 + trace_pci_nvme_err_startfail_page_too_small( 967 967 NVME_CC_MPS(n->bar.cc), 968 968 NVME_CAP_MPSMIN(n->bar.cap)); 969 969 return -1; 970 970 } 971 971 if (unlikely(NVME_CC_MPS(n->bar.cc) > 972 972 NVME_CAP_MPSMAX(n->bar.cap))) { 973 - trace_nvme_err_startfail_page_too_large( 973 + trace_pci_nvme_err_startfail_page_too_large( 974 974 NVME_CC_MPS(n->bar.cc), 975 975 NVME_CAP_MPSMAX(n->bar.cap)); 976 976 return -1; 977 977 } 978 978 if (unlikely(NVME_CC_IOCQES(n->bar.cc) < 979 979 NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) { 980 - trace_nvme_err_startfail_cqent_too_small( 980 + trace_pci_nvme_err_startfail_cqent_too_small( 981 981 NVME_CC_IOCQES(n->bar.cc), 982 982 NVME_CTRL_CQES_MIN(n->bar.cap)); 983 983 return -1; 984 984 } 985 985 if (unlikely(NVME_CC_IOCQES(n->bar.cc) > 986 986 NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) { 987 - trace_nvme_err_startfail_cqent_too_large( 987 + trace_pci_nvme_err_startfail_cqent_too_large( 988 988 NVME_CC_IOCQES(n->bar.cc), 989 989 NVME_CTRL_CQES_MAX(n->bar.cap)); 990 990 return -1; 991 991 } 992 992 if (unlikely(NVME_CC_IOSQES(n->bar.cc) < 993 993 NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) { 994 - trace_nvme_err_startfail_sqent_too_small( 994 + trace_pci_nvme_err_startfail_sqent_too_small( 995 995 NVME_CC_IOSQES(n->bar.cc), 996 996 NVME_CTRL_SQES_MIN(n->bar.cap)); 997 997 return -1; 998 998 } 999 999 if (unlikely(NVME_CC_IOSQES(n->bar.cc) > 1000 1000 NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) { 1001 - trace_nvme_err_startfail_sqent_too_large( 1001 + trace_pci_nvme_err_startfail_sqent_too_large( 1002 1002 NVME_CC_IOSQES(n->bar.cc), 1003 1003 NVME_CTRL_SQES_MAX(n->bar.cap)); 1004 1004 return -1; 1005 1005 } 1006 1006 if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) { 1007 - trace_nvme_err_startfail_asqent_sz_zero(); 1007 + trace_pci_nvme_err_startfail_asqent_sz_zero(); 1008 1008 return -1; 1009 1009 } 1010 1010 if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) { 1011 - trace_nvme_err_startfail_acqent_sz_zero(); 1011 + trace_pci_nvme_err_startfail_acqent_sz_zero(); 1012 1012 return -1; 1013 1013 } 1014 1014 ··· 1031 1031 unsigned size) 1032 1032 { 1033 1033 if (unlikely(offset & (sizeof(uint32_t) - 1))) { 1034 - NVME_GUEST_ERR(nvme_ub_mmiowr_misaligned32, 1034 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32, 1035 1035 "MMIO write not 32-bit aligned," 1036 1036 " offset=0x%"PRIx64"", offset); 1037 1037 /* should be ignored, fall through for now */ 1038 1038 } 1039 1039 1040 1040 if (unlikely(size < sizeof(uint32_t))) { 1041 - NVME_GUEST_ERR(nvme_ub_mmiowr_toosmall, 1041 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall, 1042 1042 "MMIO write smaller than 32-bits," 1043 1043 " offset=0x%"PRIx64", size=%u", 1044 1044 offset, size); ··· 1048 1048 switch (offset) { 1049 1049 case 0xc: /* INTMS */ 1050 1050 if (unlikely(msix_enabled(&(n->parent_obj)))) { 1051 - NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix, 1051 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, 1052 1052 "undefined access to interrupt mask set" 1053 1053 " when MSI-X is enabled"); 1054 1054 /* should be ignored, fall through for now */ 1055 1055 } 1056 1056 n->bar.intms |= data & 0xffffffff; 1057 1057 n->bar.intmc = n->bar.intms; 1058 - trace_nvme_mmio_intm_set(data & 0xffffffff, 1059 - n->bar.intmc); 1058 + trace_pci_nvme_mmio_intm_set(data & 0xffffffff, n->bar.intmc); 1060 1059 nvme_irq_check(n); 1061 1060 break; 1062 1061 case 0x10: /* INTMC */ 1063 1062 if (unlikely(msix_enabled(&(n->parent_obj)))) { 1064 - NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix, 1063 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, 1065 1064 "undefined access to interrupt mask clr" 1066 1065 " when MSI-X is enabled"); 1067 1066 /* should be ignored, fall through for now */ 1068 1067 } 1069 1068 n->bar.intms &= ~(data & 0xffffffff); 1070 1069 n->bar.intmc = n->bar.intms; 1071 - trace_nvme_mmio_intm_clr(data & 0xffffffff, 1072 - n->bar.intmc); 1070 + trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, n->bar.intmc); 1073 1071 nvme_irq_check(n); 1074 1072 break; 1075 1073 case 0x14: /* CC */ 1076 - trace_nvme_mmio_cfg(data & 0xffffffff); 1074 + trace_pci_nvme_mmio_cfg(data & 0xffffffff); 1077 1075 /* Windows first sends data, then sends enable bit */ 1078 1076 if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) && 1079 1077 !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc)) ··· 1084 1082 if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) { 1085 1083 n->bar.cc = data; 1086 1084 if (unlikely(nvme_start_ctrl(n))) { 1087 - trace_nvme_err_startfail(); 1085 + trace_pci_nvme_err_startfail(); 1088 1086 n->bar.csts = NVME_CSTS_FAILED; 1089 1087 } else { 1090 - trace_nvme_mmio_start_success(); 1088 + trace_pci_nvme_mmio_start_success(); 1091 1089 n->bar.csts = NVME_CSTS_READY; 1092 1090 } 1093 1091 } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) { 1094 - trace_nvme_mmio_stopped(); 1092 + trace_pci_nvme_mmio_stopped(); 1095 1093 nvme_clear_ctrl(n); 1096 1094 n->bar.csts &= ~NVME_CSTS_READY; 1097 1095 } 1098 1096 if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) { 1099 - trace_nvme_mmio_shutdown_set(); 1097 + trace_pci_nvme_mmio_shutdown_set(); 1100 1098 nvme_clear_ctrl(n); 1101 1099 n->bar.cc = data; 1102 1100 n->bar.csts |= NVME_CSTS_SHST_COMPLETE; 1103 1101 } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) { 1104 - trace_nvme_mmio_shutdown_cleared(); 1102 + trace_pci_nvme_mmio_shutdown_cleared(); 1105 1103 n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE; 1106 1104 n->bar.cc = data; 1107 1105 } 1108 1106 break; 1109 1107 case 0x1C: /* CSTS */ 1110 1108 if (data & (1 << 4)) { 1111 - NVME_GUEST_ERR(nvme_ub_mmiowr_ssreset_w1c_unsupported, 1109 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported, 1112 1110 "attempted to W1C CSTS.NSSRO" 1113 1111 " but CAP.NSSRS is zero (not supported)"); 1114 1112 } else if (data != 0) { 1115 - NVME_GUEST_ERR(nvme_ub_mmiowr_ro_csts, 1113 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts, 1116 1114 "attempted to set a read only bit" 1117 1115 " of controller status"); 1118 1116 } 1119 1117 break; 1120 1118 case 0x20: /* NSSR */ 1121 1119 if (data == 0x4E564D65) { 1122 - trace_nvme_ub_mmiowr_ssreset_unsupported(); 1120 + trace_pci_nvme_ub_mmiowr_ssreset_unsupported(); 1123 1121 } else { 1124 1122 /* The spec says that writes of other values have no effect */ 1125 1123 return; ··· 1127 1125 break; 1128 1126 case 0x24: /* AQA */ 1129 1127 n->bar.aqa = data & 0xffffffff; 1130 - trace_nvme_mmio_aqattr(data & 0xffffffff); 1128 + trace_pci_nvme_mmio_aqattr(data & 0xffffffff); 1131 1129 break; 1132 1130 case 0x28: /* ASQ */ 1133 1131 n->bar.asq = data; 1134 - trace_nvme_mmio_asqaddr(data); 1132 + trace_pci_nvme_mmio_asqaddr(data); 1135 1133 break; 1136 1134 case 0x2c: /* ASQ hi */ 1137 1135 n->bar.asq |= data << 32; 1138 - trace_nvme_mmio_asqaddr_hi(data, n->bar.asq); 1136 + trace_pci_nvme_mmio_asqaddr_hi(data, n->bar.asq); 1139 1137 break; 1140 1138 case 0x30: /* ACQ */ 1141 - trace_nvme_mmio_acqaddr(data); 1139 + trace_pci_nvme_mmio_acqaddr(data); 1142 1140 n->bar.acq = data; 1143 1141 break; 1144 1142 case 0x34: /* ACQ hi */ 1145 1143 n->bar.acq |= data << 32; 1146 - trace_nvme_mmio_acqaddr_hi(data, n->bar.acq); 1144 + trace_pci_nvme_mmio_acqaddr_hi(data, n->bar.acq); 1147 1145 break; 1148 1146 case 0x38: /* CMBLOC */ 1149 - NVME_GUEST_ERR(nvme_ub_mmiowr_cmbloc_reserved, 1147 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved, 1150 1148 "invalid write to reserved CMBLOC" 1151 1149 " when CMBSZ is zero, ignored"); 1152 1150 return; 1153 1151 case 0x3C: /* CMBSZ */ 1154 - NVME_GUEST_ERR(nvme_ub_mmiowr_cmbsz_readonly, 1152 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly, 1155 1153 "invalid write to read only CMBSZ, ignored"); 1156 1154 return; 1157 1155 case 0xE00: /* PMRCAP */ 1158 - NVME_GUEST_ERR(nvme_ub_mmiowr_pmrcap_readonly, 1156 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly, 1159 1157 "invalid write to PMRCAP register, ignored"); 1160 1158 return; 1161 1159 case 0xE04: /* TODO PMRCTL */ 1162 1160 break; 1163 1161 case 0xE08: /* PMRSTS */ 1164 - NVME_GUEST_ERR(nvme_ub_mmiowr_pmrsts_readonly, 1162 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly, 1165 1163 "invalid write to PMRSTS register, ignored"); 1166 1164 return; 1167 1165 case 0xE0C: /* PMREBS */ 1168 - NVME_GUEST_ERR(nvme_ub_mmiowr_pmrebs_readonly, 1166 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly, 1169 1167 "invalid write to PMREBS register, ignored"); 1170 1168 return; 1171 1169 case 0xE10: /* PMRSWTP */ 1172 - NVME_GUEST_ERR(nvme_ub_mmiowr_pmrswtp_readonly, 1170 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly, 1173 1171 "invalid write to PMRSWTP register, ignored"); 1174 1172 return; 1175 1173 case 0xE14: /* TODO PMRMSC */ 1176 1174 break; 1177 1175 default: 1178 - NVME_GUEST_ERR(nvme_ub_mmiowr_invalid, 1176 + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid, 1179 1177 "invalid MMIO write," 1180 1178 " offset=0x%"PRIx64", data=%"PRIx64"", 1181 1179 offset, data); ··· 1190 1188 uint64_t val = 0; 1191 1189 1192 1190 if (unlikely(addr & (sizeof(uint32_t) - 1))) { 1193 - NVME_GUEST_ERR(nvme_ub_mmiord_misaligned32, 1191 + NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32, 1194 1192 "MMIO read not 32-bit aligned," 1195 1193 " offset=0x%"PRIx64"", addr); 1196 1194 /* should RAZ, fall through for now */ 1197 1195 } else if (unlikely(size < sizeof(uint32_t))) { 1198 - NVME_GUEST_ERR(nvme_ub_mmiord_toosmall, 1196 + NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall, 1199 1197 "MMIO read smaller than 32-bits," 1200 1198 " offset=0x%"PRIx64"", addr); 1201 1199 /* should RAZ, fall through for now */ ··· 1213 1211 } 1214 1212 memcpy(&val, ptr + addr, size); 1215 1213 } else { 1216 - NVME_GUEST_ERR(nvme_ub_mmiord_invalid_ofs, 1214 + NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs, 1217 1215 "MMIO read beyond last register," 1218 1216 " offset=0x%"PRIx64", returning 0", addr); 1219 1217 } ··· 1226 1224 uint32_t qid; 1227 1225 1228 1226 if (unlikely(addr & ((1 << 2) - 1))) { 1229 - NVME_GUEST_ERR(nvme_ub_db_wr_misaligned, 1227 + NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned, 1230 1228 "doorbell write not 32-bit aligned," 1231 1229 " offset=0x%"PRIx64", ignoring", addr); 1232 1230 return; ··· 1241 1239 1242 1240 qid = (addr - (0x1000 + (1 << 2))) >> 3; 1243 1241 if (unlikely(nvme_check_cqid(n, qid))) { 1244 - NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cq, 1242 + NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq, 1245 1243 "completion queue doorbell write" 1246 1244 " for nonexistent queue," 1247 1245 " sqid=%"PRIu32", ignoring", qid); ··· 1250 1248 1251 1249 cq = n->cq[qid]; 1252 1250 if (unlikely(new_head >= cq->size)) { 1253 - NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cqhead, 1251 + NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead, 1254 1252 "completion queue doorbell write value" 1255 1253 " beyond queue size, sqid=%"PRIu32"," 1256 1254 " new_head=%"PRIu16", ignoring", ··· 1279 1277 1280 1278 qid = (addr - 0x1000) >> 3; 1281 1279 if (unlikely(nvme_check_sqid(n, qid))) { 1282 - NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sq, 1280 + NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq, 1283 1281 "submission queue doorbell write" 1284 1282 " for nonexistent queue," 1285 1283 " sqid=%"PRIu32", ignoring", qid); ··· 1288 1286 1289 1287 sq = n->sq[qid]; 1290 1288 if (unlikely(new_tail >= sq->size)) { 1291 - NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sqtail, 1289 + NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail, 1292 1290 "submission queue doorbell write value" 1293 1291 " beyond queue size, sqid=%"PRIu32"," 1294 1292 " new_tail=%"PRIu16", ignoring",
+90 -90
hw/block/trace-events
··· 29 29 30 30 # nvme.c 31 31 # nvme traces for successful events 32 - nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u" 33 - nvme_irq_pin(void) "pulsing IRQ pin" 34 - nvme_irq_masked(void) "IRQ is masked" 35 - nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64"" 36 - nvme_rw(const char *verb, uint32_t blk_count, uint64_t byte_count, uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64"" 37 - nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16"" 38 - nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d" 39 - nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16"" 40 - nvme_del_cq(uint16_t cqid) "deleted completion queue, cqid=%"PRIu16"" 41 - nvme_identify_ctrl(void) "identify controller" 42 - nvme_identify_ns(uint16_t ns) "identify namespace, nsid=%"PRIu16"" 43 - nvme_identify_nslist(uint16_t ns) "identify namespace list, nsid=%"PRIu16"" 44 - nvme_getfeat_vwcache(const char* result) "get feature volatile write cache, result=%s" 45 - nvme_getfeat_numq(int result) "get feature number of queues, result=%d" 46 - nvme_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d" 47 - nvme_setfeat_timestamp(uint64_t ts) "set feature timestamp = 0x%"PRIx64"" 48 - nvme_getfeat_timestamp(uint64_t ts) "get feature timestamp = 0x%"PRIx64"" 49 - nvme_mmio_intm_set(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask set, data=0x%"PRIx64", new_mask=0x%"PRIx64"" 50 - nvme_mmio_intm_clr(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask clr, data=0x%"PRIx64", new_mask=0x%"PRIx64"" 51 - nvme_mmio_cfg(uint64_t data) "wrote MMIO, config controller config=0x%"PRIx64"" 52 - nvme_mmio_aqattr(uint64_t data) "wrote MMIO, admin queue attributes=0x%"PRIx64"" 53 - nvme_mmio_asqaddr(uint64_t data) "wrote MMIO, admin submission queue address=0x%"PRIx64"" 54 - nvme_mmio_acqaddr(uint64_t data) "wrote MMIO, admin completion queue address=0x%"PRIx64"" 55 - nvme_mmio_asqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin submission queue high half=0x%"PRIx64", new_address=0x%"PRIx64"" 56 - nvme_mmio_acqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin completion queue high half=0x%"PRIx64", new_address=0x%"PRIx64"" 57 - nvme_mmio_start_success(void) "setting controller enable bit succeeded" 58 - nvme_mmio_stopped(void) "cleared controller enable bit" 59 - nvme_mmio_shutdown_set(void) "shutdown bit set" 60 - nvme_mmio_shutdown_cleared(void) "shutdown bit cleared" 32 + pci_nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u" 33 + pci_nvme_irq_pin(void) "pulsing IRQ pin" 34 + pci_nvme_irq_masked(void) "IRQ is masked" 35 + pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64"" 36 + pci_nvme_rw(const char *verb, uint32_t blk_count, uint64_t byte_count, uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64"" 37 + pci_nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16"" 38 + pci_nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d" 39 + pci_nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16"" 40 + pci_nvme_del_cq(uint16_t cqid) "deleted completion queue, cqid=%"PRIu16"" 41 + pci_nvme_identify_ctrl(void) "identify controller" 42 + pci_nvme_identify_ns(uint16_t ns) "identify namespace, nsid=%"PRIu16"" 43 + pci_nvme_identify_nslist(uint16_t ns) "identify namespace list, nsid=%"PRIu16"" 44 + pci_nvme_getfeat_vwcache(const char* result) "get feature volatile write cache, result=%s" 45 + pci_nvme_getfeat_numq(int result) "get feature number of queues, result=%d" 46 + pci_nvme_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d" 47 + pci_nvme_setfeat_timestamp(uint64_t ts) "set feature timestamp = 0x%"PRIx64"" 48 + pci_nvme_getfeat_timestamp(uint64_t ts) "get feature timestamp = 0x%"PRIx64"" 49 + pci_nvme_mmio_intm_set(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask set, data=0x%"PRIx64", new_mask=0x%"PRIx64"" 50 + pci_nvme_mmio_intm_clr(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask clr, data=0x%"PRIx64", new_mask=0x%"PRIx64"" 51 + pci_nvme_mmio_cfg(uint64_t data) "wrote MMIO, config controller config=0x%"PRIx64"" 52 + pci_nvme_mmio_aqattr(uint64_t data) "wrote MMIO, admin queue attributes=0x%"PRIx64"" 53 + pci_nvme_mmio_asqaddr(uint64_t data) "wrote MMIO, admin submission queue address=0x%"PRIx64"" 54 + pci_nvme_mmio_acqaddr(uint64_t data) "wrote MMIO, admin completion queue address=0x%"PRIx64"" 55 + pci_nvme_mmio_asqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin submission queue high half=0x%"PRIx64", new_address=0x%"PRIx64"" 56 + pci_nvme_mmio_acqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin completion queue high half=0x%"PRIx64", new_address=0x%"PRIx64"" 57 + pci_nvme_mmio_start_success(void) "setting controller enable bit succeeded" 58 + pci_nvme_mmio_stopped(void) "cleared controller enable bit" 59 + pci_nvme_mmio_shutdown_set(void) "shutdown bit set" 60 + pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared" 61 61 62 62 # nvme traces for error conditions 63 - nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size" 64 - nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64"" 65 - nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64"" 66 - nvme_err_invalid_prp2_missing(void) "PRP2 is null and more data to be transferred" 67 - nvme_err_invalid_prp(void) "invalid PRP" 68 - nvme_err_invalid_ns(uint32_t ns, uint32_t limit) "invalid namespace %u not within 1-%u" 69 - nvme_err_invalid_opc(uint8_t opc) "invalid opcode 0x%"PRIx8"" 70 - nvme_err_invalid_admin_opc(uint8_t opc) "invalid admin opcode 0x%"PRIx8"" 71 - nvme_err_invalid_lba_range(uint64_t start, uint64_t len, uint64_t limit) "Invalid LBA start=%"PRIu64" len=%"PRIu64" limit=%"PRIu64"" 72 - nvme_err_invalid_del_sq(uint16_t qid) "invalid submission queue deletion, sid=%"PRIu16"" 73 - nvme_err_invalid_create_sq_cqid(uint16_t cqid) "failed creating submission queue, invalid cqid=%"PRIu16"" 74 - nvme_err_invalid_create_sq_sqid(uint16_t sqid) "failed creating submission queue, invalid sqid=%"PRIu16"" 75 - nvme_err_invalid_create_sq_size(uint16_t qsize) "failed creating submission queue, invalid qsize=%"PRIu16"" 76 - nvme_err_invalid_create_sq_addr(uint64_t addr) "failed creating submission queue, addr=0x%"PRIx64"" 77 - nvme_err_invalid_create_sq_qflags(uint16_t qflags) "failed creating submission queue, qflags=%"PRIu16"" 78 - nvme_err_invalid_del_cq_cqid(uint16_t cqid) "failed deleting completion queue, cqid=%"PRIu16"" 79 - nvme_err_invalid_del_cq_notempty(uint16_t cqid) "failed deleting completion queue, it is not empty, cqid=%"PRIu16"" 80 - nvme_err_invalid_create_cq_cqid(uint16_t cqid) "failed creating completion queue, cqid=%"PRIu16"" 81 - nvme_err_invalid_create_cq_size(uint16_t size) "failed creating completion queue, size=%"PRIu16"" 82 - nvme_err_invalid_create_cq_addr(uint64_t addr) "failed creating completion queue, addr=0x%"PRIx64"" 83 - nvme_err_invalid_create_cq_vector(uint16_t vector) "failed creating completion queue, vector=%"PRIu16"" 84 - nvme_err_invalid_create_cq_qflags(uint16_t qflags) "failed creating completion queue, qflags=%"PRIu16"" 85 - nvme_err_invalid_identify_cns(uint16_t cns) "identify, invalid cns=0x%"PRIx16"" 86 - nvme_err_invalid_getfeat(int dw10) "invalid get features, dw10=0x%"PRIx32"" 87 - nvme_err_invalid_setfeat(uint32_t dw10) "invalid set features, dw10=0x%"PRIx32"" 88 - nvme_err_startfail_cq(void) "nvme_start_ctrl failed because there are non-admin completion queues" 89 - nvme_err_startfail_sq(void) "nvme_start_ctrl failed because there are non-admin submission queues" 90 - nvme_err_startfail_nbarasq(void) "nvme_start_ctrl failed because the admin submission queue address is null" 91 - nvme_err_startfail_nbaracq(void) "nvme_start_ctrl failed because the admin completion queue address is null" 92 - nvme_err_startfail_asq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin submission queue address is misaligned: 0x%"PRIx64"" 93 - nvme_err_startfail_acq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin completion queue address is misaligned: 0x%"PRIx64"" 94 - nvme_err_startfail_page_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too small: log2size=%u, min=%u" 95 - nvme_err_startfail_page_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too large: log2size=%u, max=%u" 96 - nvme_err_startfail_cqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too small: log2size=%u, min=%u" 97 - nvme_err_startfail_cqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too large: log2size=%u, max=%u" 98 - nvme_err_startfail_sqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too small: log2size=%u, min=%u" 99 - nvme_err_startfail_sqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too large: log2size=%u, max=%u" 100 - nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the admin submission queue size is zero" 101 - nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero" 102 - nvme_err_startfail(void) "setting controller enable bit failed" 63 + pci_nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size" 64 + pci_nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64"" 65 + pci_nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64"" 66 + pci_nvme_err_invalid_prp2_missing(void) "PRP2 is null and more data to be transferred" 67 + pci_nvme_err_invalid_prp(void) "invalid PRP" 68 + pci_nvme_err_invalid_ns(uint32_t ns, uint32_t limit) "invalid namespace %u not within 1-%u" 69 + pci_nvme_err_invalid_opc(uint8_t opc) "invalid opcode 0x%"PRIx8"" 70 + pci_nvme_err_invalid_admin_opc(uint8_t opc) "invalid admin opcode 0x%"PRIx8"" 71 + pci_nvme_err_invalid_lba_range(uint64_t start, uint64_t len, uint64_t limit) "Invalid LBA start=%"PRIu64" len=%"PRIu64" limit=%"PRIu64"" 72 + pci_nvme_err_invalid_del_sq(uint16_t qid) "invalid submission queue deletion, sid=%"PRIu16"" 73 + pci_nvme_err_invalid_create_sq_cqid(uint16_t cqid) "failed creating submission queue, invalid cqid=%"PRIu16"" 74 + pci_nvme_err_invalid_create_sq_sqid(uint16_t sqid) "failed creating submission queue, invalid sqid=%"PRIu16"" 75 + pci_nvme_err_invalid_create_sq_size(uint16_t qsize) "failed creating submission queue, invalid qsize=%"PRIu16"" 76 + pci_nvme_err_invalid_create_sq_addr(uint64_t addr) "failed creating submission queue, addr=0x%"PRIx64"" 77 + pci_nvme_err_invalid_create_sq_qflags(uint16_t qflags) "failed creating submission queue, qflags=%"PRIu16"" 78 + pci_nvme_err_invalid_del_cq_cqid(uint16_t cqid) "failed deleting completion queue, cqid=%"PRIu16"" 79 + pci_nvme_err_invalid_del_cq_notempty(uint16_t cqid) "failed deleting completion queue, it is not empty, cqid=%"PRIu16"" 80 + pci_nvme_err_invalid_create_cq_cqid(uint16_t cqid) "failed creating completion queue, cqid=%"PRIu16"" 81 + pci_nvme_err_invalid_create_cq_size(uint16_t size) "failed creating completion queue, size=%"PRIu16"" 82 + pci_nvme_err_invalid_create_cq_addr(uint64_t addr) "failed creating completion queue, addr=0x%"PRIx64"" 83 + pci_nvme_err_invalid_create_cq_vector(uint16_t vector) "failed creating completion queue, vector=%"PRIu16"" 84 + pci_nvme_err_invalid_create_cq_qflags(uint16_t qflags) "failed creating completion queue, qflags=%"PRIu16"" 85 + pci_nvme_err_invalid_identify_cns(uint16_t cns) "identify, invalid cns=0x%"PRIx16"" 86 + pci_nvme_err_invalid_getfeat(int dw10) "invalid get features, dw10=0x%"PRIx32"" 87 + pci_nvme_err_invalid_setfeat(uint32_t dw10) "invalid set features, dw10=0x%"PRIx32"" 88 + pci_nvme_err_startfail_cq(void) "nvme_start_ctrl failed because there are non-admin completion queues" 89 + pci_nvme_err_startfail_sq(void) "nvme_start_ctrl failed because there are non-admin submission queues" 90 + pci_nvme_err_startfail_nbarasq(void) "nvme_start_ctrl failed because the admin submission queue address is null" 91 + pci_nvme_err_startfail_nbaracq(void) "nvme_start_ctrl failed because the admin completion queue address is null" 92 + pci_nvme_err_startfail_asq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin submission queue address is misaligned: 0x%"PRIx64"" 93 + pci_nvme_err_startfail_acq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin completion queue address is misaligned: 0x%"PRIx64"" 94 + pci_nvme_err_startfail_page_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too small: log2size=%u, min=%u" 95 + pci_nvme_err_startfail_page_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too large: log2size=%u, max=%u" 96 + pci_nvme_err_startfail_cqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too small: log2size=%u, min=%u" 97 + pci_nvme_err_startfail_cqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too large: log2size=%u, max=%u" 98 + pci_nvme_err_startfail_sqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too small: log2size=%u, min=%u" 99 + pci_nvme_err_startfail_sqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too large: log2size=%u, max=%u" 100 + pci_nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the admin submission queue size is zero" 101 + pci_nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero" 102 + pci_nvme_err_startfail(void) "setting controller enable bit failed" 103 103 104 104 # Traces for undefined behavior 105 - nvme_ub_mmiowr_misaligned32(uint64_t offset) "MMIO write not 32-bit aligned, offset=0x%"PRIx64"" 106 - nvme_ub_mmiowr_toosmall(uint64_t offset, unsigned size) "MMIO write smaller than 32 bits, offset=0x%"PRIx64", size=%u" 107 - nvme_ub_mmiowr_intmask_with_msix(void) "undefined access to interrupt mask set when MSI-X is enabled" 108 - nvme_ub_mmiowr_ro_csts(void) "attempted to set a read only bit of controller status" 109 - nvme_ub_mmiowr_ssreset_w1c_unsupported(void) "attempted to W1C CSTS.NSSRO but CAP.NSSRS is zero (not supported)" 110 - nvme_ub_mmiowr_ssreset_unsupported(void) "attempted NVM subsystem reset but CAP.NSSRS is zero (not supported)" 111 - nvme_ub_mmiowr_cmbloc_reserved(void) "invalid write to reserved CMBLOC when CMBSZ is zero, ignored" 112 - nvme_ub_mmiowr_cmbsz_readonly(void) "invalid write to read only CMBSZ, ignored" 113 - nvme_ub_mmiowr_pmrcap_readonly(void) "invalid write to read only PMRCAP, ignored" 114 - nvme_ub_mmiowr_pmrsts_readonly(void) "invalid write to read only PMRSTS, ignored" 115 - nvme_ub_mmiowr_pmrebs_readonly(void) "invalid write to read only PMREBS, ignored" 116 - nvme_ub_mmiowr_pmrswtp_readonly(void) "invalid write to read only PMRSWTP, ignored" 117 - nvme_ub_mmiowr_invalid(uint64_t offset, uint64_t data) "invalid MMIO write, offset=0x%"PRIx64", data=0x%"PRIx64"" 118 - nvme_ub_mmiord_misaligned32(uint64_t offset) "MMIO read not 32-bit aligned, offset=0x%"PRIx64"" 119 - nvme_ub_mmiord_toosmall(uint64_t offset) "MMIO read smaller than 32-bits, offset=0x%"PRIx64"" 120 - nvme_ub_mmiord_invalid_ofs(uint64_t offset) "MMIO read beyond last register, offset=0x%"PRIx64", returning 0" 121 - nvme_ub_db_wr_misaligned(uint64_t offset) "doorbell write not 32-bit aligned, offset=0x%"PRIx64", ignoring" 122 - nvme_ub_db_wr_invalid_cq(uint32_t qid) "completion queue doorbell write for nonexistent queue, cqid=%"PRIu32", ignoring" 123 - nvme_ub_db_wr_invalid_cqhead(uint32_t qid, uint16_t new_head) "completion queue doorbell write value beyond queue size, cqid=%"PRIu32", new_head=%"PRIu16", ignoring" 124 - nvme_ub_db_wr_invalid_sq(uint32_t qid) "submission queue doorbell write for nonexistent queue, sqid=%"PRIu32", ignoring" 125 - nvme_ub_db_wr_invalid_sqtail(uint32_t qid, uint16_t new_tail) "submission queue doorbell write value beyond queue size, sqid=%"PRIu32", new_head=%"PRIu16", ignoring" 105 + pci_nvme_ub_mmiowr_misaligned32(uint64_t offset) "MMIO write not 32-bit aligned, offset=0x%"PRIx64"" 106 + pci_nvme_ub_mmiowr_toosmall(uint64_t offset, unsigned size) "MMIO write smaller than 32 bits, offset=0x%"PRIx64", size=%u" 107 + pci_nvme_ub_mmiowr_intmask_with_msix(void) "undefined access to interrupt mask set when MSI-X is enabled" 108 + pci_nvme_ub_mmiowr_ro_csts(void) "attempted to set a read only bit of controller status" 109 + pci_nvme_ub_mmiowr_ssreset_w1c_unsupported(void) "attempted to W1C CSTS.NSSRO but CAP.NSSRS is zero (not supported)" 110 + pci_nvme_ub_mmiowr_ssreset_unsupported(void) "attempted NVM subsystem reset but CAP.NSSRS is zero (not supported)" 111 + pci_nvme_ub_mmiowr_cmbloc_reserved(void) "invalid write to reserved CMBLOC when CMBSZ is zero, ignored" 112 + pci_nvme_ub_mmiowr_cmbsz_readonly(void) "invalid write to read only CMBSZ, ignored" 113 + pci_nvme_ub_mmiowr_pmrcap_readonly(void) "invalid write to read only PMRCAP, ignored" 114 + pci_nvme_ub_mmiowr_pmrsts_readonly(void) "invalid write to read only PMRSTS, ignored" 115 + pci_nvme_ub_mmiowr_pmrebs_readonly(void) "invalid write to read only PMREBS, ignored" 116 + pci_nvme_ub_mmiowr_pmrswtp_readonly(void) "invalid write to read only PMRSWTP, ignored" 117 + pci_nvme_ub_mmiowr_invalid(uint64_t offset, uint64_t data) "invalid MMIO write, offset=0x%"PRIx64", data=0x%"PRIx64"" 118 + pci_nvme_ub_mmiord_misaligned32(uint64_t offset) "MMIO read not 32-bit aligned, offset=0x%"PRIx64"" 119 + pci_nvme_ub_mmiord_toosmall(uint64_t offset) "MMIO read smaller than 32-bits, offset=0x%"PRIx64"" 120 + pci_nvme_ub_mmiord_invalid_ofs(uint64_t offset) "MMIO read beyond last register, offset=0x%"PRIx64", returning 0" 121 + pci_nvme_ub_db_wr_misaligned(uint64_t offset) "doorbell write not 32-bit aligned, offset=0x%"PRIx64", ignoring" 122 + pci_nvme_ub_db_wr_invalid_cq(uint32_t qid) "completion queue doorbell write for nonexistent queue, cqid=%"PRIu32", ignoring" 123 + pci_nvme_ub_db_wr_invalid_cqhead(uint32_t qid, uint16_t new_head) "completion queue doorbell write value beyond queue size, cqid=%"PRIu32", new_head=%"PRIu16", ignoring" 124 + pci_nvme_ub_db_wr_invalid_sq(uint32_t qid) "submission queue doorbell write for nonexistent queue, sqid=%"PRIu32", ignoring" 125 + pci_nvme_ub_db_wr_invalid_sqtail(uint32_t qid, uint16_t new_tail) "submission queue doorbell write value beyond queue size, sqid=%"PRIu32", new_head=%"PRIu16", ignoring" 126 126 127 127 # xen-block.c 128 128 xen_block_realize(const char *type, uint32_t disk, uint32_t partition) "%s d%up%u"