qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

block: Move NVMe constants to a separate header

Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20180116060901.17413-8-famz@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>

+702 -703
+1 -6
block/nvme.c
··· 22 22 #include "block/block_int.h" 23 23 #include "trace.h" 24 24 25 - /* TODO: Move nvme spec definitions from hw/block/nvme.h into a separate file 26 - * that doesn't depend on dma/pci headers. */ 27 - #include "sysemu/dma.h" 28 - #include "hw/pci/pci.h" 29 - #include "hw/block/block.h" 30 - #include "hw/block/nvme.h" 25 + #include "block/nvme.h" 31 26 32 27 #define NVME_SQ_ENTRY_BYTES 64 33 28 #define NVME_CQ_ENTRY_BYTES 16
+1 -697
hw/block/nvme.h
··· 1 1 #ifndef HW_NVME_H 2 2 #define HW_NVME_H 3 3 #include "qemu/cutils.h" 4 - 5 - typedef struct NvmeBar { 6 - uint64_t cap; 7 - uint32_t vs; 8 - uint32_t intms; 9 - uint32_t intmc; 10 - uint32_t cc; 11 - uint32_t rsvd1; 12 - uint32_t csts; 13 - uint32_t nssrc; 14 - uint32_t aqa; 15 - uint64_t asq; 16 - uint64_t acq; 17 - uint32_t cmbloc; 18 - uint32_t cmbsz; 19 - } NvmeBar; 20 - 21 - enum NvmeCapShift { 22 - CAP_MQES_SHIFT = 0, 23 - CAP_CQR_SHIFT = 16, 24 - CAP_AMS_SHIFT = 17, 25 - CAP_TO_SHIFT = 24, 26 - CAP_DSTRD_SHIFT = 32, 27 - CAP_NSSRS_SHIFT = 33, 28 - CAP_CSS_SHIFT = 37, 29 - CAP_MPSMIN_SHIFT = 48, 30 - CAP_MPSMAX_SHIFT = 52, 31 - }; 32 - 33 - enum NvmeCapMask { 34 - CAP_MQES_MASK = 0xffff, 35 - CAP_CQR_MASK = 0x1, 36 - CAP_AMS_MASK = 0x3, 37 - CAP_TO_MASK = 0xff, 38 - CAP_DSTRD_MASK = 0xf, 39 - CAP_NSSRS_MASK = 0x1, 40 - CAP_CSS_MASK = 0xff, 41 - CAP_MPSMIN_MASK = 0xf, 42 - CAP_MPSMAX_MASK = 0xf, 43 - }; 44 - 45 - #define NVME_CAP_MQES(cap) (((cap) >> CAP_MQES_SHIFT) & CAP_MQES_MASK) 46 - #define NVME_CAP_CQR(cap) (((cap) >> CAP_CQR_SHIFT) & CAP_CQR_MASK) 47 - #define NVME_CAP_AMS(cap) (((cap) >> CAP_AMS_SHIFT) & CAP_AMS_MASK) 48 - #define NVME_CAP_TO(cap) (((cap) >> CAP_TO_SHIFT) & CAP_TO_MASK) 49 - #define NVME_CAP_DSTRD(cap) (((cap) >> CAP_DSTRD_SHIFT) & CAP_DSTRD_MASK) 50 - #define NVME_CAP_NSSRS(cap) (((cap) >> CAP_NSSRS_SHIFT) & CAP_NSSRS_MASK) 51 - #define NVME_CAP_CSS(cap) (((cap) >> CAP_CSS_SHIFT) & CAP_CSS_MASK) 52 - #define NVME_CAP_MPSMIN(cap)(((cap) >> CAP_MPSMIN_SHIFT) & CAP_MPSMIN_MASK) 53 - #define NVME_CAP_MPSMAX(cap)(((cap) >> CAP_MPSMAX_SHIFT) & CAP_MPSMAX_MASK) 54 - 55 - #define NVME_CAP_SET_MQES(cap, val) (cap |= (uint64_t)(val & CAP_MQES_MASK) \ 56 - << CAP_MQES_SHIFT) 57 - #define NVME_CAP_SET_CQR(cap, val) (cap |= (uint64_t)(val & CAP_CQR_MASK) \ 58 - << CAP_CQR_SHIFT) 59 - #define NVME_CAP_SET_AMS(cap, val) (cap |= (uint64_t)(val & CAP_AMS_MASK) \ 60 - << CAP_AMS_SHIFT) 61 - #define NVME_CAP_SET_TO(cap, val) (cap |= (uint64_t)(val & CAP_TO_MASK) \ 62 - << CAP_TO_SHIFT) 63 - #define NVME_CAP_SET_DSTRD(cap, val) (cap |= (uint64_t)(val & CAP_DSTRD_MASK) \ 64 - << CAP_DSTRD_SHIFT) 65 - #define NVME_CAP_SET_NSSRS(cap, val) (cap |= (uint64_t)(val & CAP_NSSRS_MASK) \ 66 - << CAP_NSSRS_SHIFT) 67 - #define NVME_CAP_SET_CSS(cap, val) (cap |= (uint64_t)(val & CAP_CSS_MASK) \ 68 - << CAP_CSS_SHIFT) 69 - #define NVME_CAP_SET_MPSMIN(cap, val) (cap |= (uint64_t)(val & CAP_MPSMIN_MASK)\ 70 - << CAP_MPSMIN_SHIFT) 71 - #define NVME_CAP_SET_MPSMAX(cap, val) (cap |= (uint64_t)(val & CAP_MPSMAX_MASK)\ 72 - << CAP_MPSMAX_SHIFT) 73 - 74 - enum NvmeCcShift { 75 - CC_EN_SHIFT = 0, 76 - CC_CSS_SHIFT = 4, 77 - CC_MPS_SHIFT = 7, 78 - CC_AMS_SHIFT = 11, 79 - CC_SHN_SHIFT = 14, 80 - CC_IOSQES_SHIFT = 16, 81 - CC_IOCQES_SHIFT = 20, 82 - }; 83 - 84 - enum NvmeCcMask { 85 - CC_EN_MASK = 0x1, 86 - CC_CSS_MASK = 0x7, 87 - CC_MPS_MASK = 0xf, 88 - CC_AMS_MASK = 0x7, 89 - CC_SHN_MASK = 0x3, 90 - CC_IOSQES_MASK = 0xf, 91 - CC_IOCQES_MASK = 0xf, 92 - }; 93 - 94 - #define NVME_CC_EN(cc) ((cc >> CC_EN_SHIFT) & CC_EN_MASK) 95 - #define NVME_CC_CSS(cc) ((cc >> CC_CSS_SHIFT) & CC_CSS_MASK) 96 - #define NVME_CC_MPS(cc) ((cc >> CC_MPS_SHIFT) & CC_MPS_MASK) 97 - #define NVME_CC_AMS(cc) ((cc >> CC_AMS_SHIFT) & CC_AMS_MASK) 98 - #define NVME_CC_SHN(cc) ((cc >> CC_SHN_SHIFT) & CC_SHN_MASK) 99 - #define NVME_CC_IOSQES(cc) ((cc >> CC_IOSQES_SHIFT) & CC_IOSQES_MASK) 100 - #define NVME_CC_IOCQES(cc) ((cc >> CC_IOCQES_SHIFT) & CC_IOCQES_MASK) 101 - 102 - enum NvmeCstsShift { 103 - CSTS_RDY_SHIFT = 0, 104 - CSTS_CFS_SHIFT = 1, 105 - CSTS_SHST_SHIFT = 2, 106 - CSTS_NSSRO_SHIFT = 4, 107 - }; 108 - 109 - enum NvmeCstsMask { 110 - CSTS_RDY_MASK = 0x1, 111 - CSTS_CFS_MASK = 0x1, 112 - CSTS_SHST_MASK = 0x3, 113 - CSTS_NSSRO_MASK = 0x1, 114 - }; 115 - 116 - enum NvmeCsts { 117 - NVME_CSTS_READY = 1 << CSTS_RDY_SHIFT, 118 - NVME_CSTS_FAILED = 1 << CSTS_CFS_SHIFT, 119 - NVME_CSTS_SHST_NORMAL = 0 << CSTS_SHST_SHIFT, 120 - NVME_CSTS_SHST_PROGRESS = 1 << CSTS_SHST_SHIFT, 121 - NVME_CSTS_SHST_COMPLETE = 2 << CSTS_SHST_SHIFT, 122 - NVME_CSTS_NSSRO = 1 << CSTS_NSSRO_SHIFT, 123 - }; 124 - 125 - #define NVME_CSTS_RDY(csts) ((csts >> CSTS_RDY_SHIFT) & CSTS_RDY_MASK) 126 - #define NVME_CSTS_CFS(csts) ((csts >> CSTS_CFS_SHIFT) & CSTS_CFS_MASK) 127 - #define NVME_CSTS_SHST(csts) ((csts >> CSTS_SHST_SHIFT) & CSTS_SHST_MASK) 128 - #define NVME_CSTS_NSSRO(csts) ((csts >> CSTS_NSSRO_SHIFT) & CSTS_NSSRO_MASK) 129 - 130 - enum NvmeAqaShift { 131 - AQA_ASQS_SHIFT = 0, 132 - AQA_ACQS_SHIFT = 16, 133 - }; 134 - 135 - enum NvmeAqaMask { 136 - AQA_ASQS_MASK = 0xfff, 137 - AQA_ACQS_MASK = 0xfff, 138 - }; 139 - 140 - #define NVME_AQA_ASQS(aqa) ((aqa >> AQA_ASQS_SHIFT) & AQA_ASQS_MASK) 141 - #define NVME_AQA_ACQS(aqa) ((aqa >> AQA_ACQS_SHIFT) & AQA_ACQS_MASK) 142 - 143 - enum NvmeCmblocShift { 144 - CMBLOC_BIR_SHIFT = 0, 145 - CMBLOC_OFST_SHIFT = 12, 146 - }; 147 - 148 - enum NvmeCmblocMask { 149 - CMBLOC_BIR_MASK = 0x7, 150 - CMBLOC_OFST_MASK = 0xfffff, 151 - }; 152 - 153 - #define NVME_CMBLOC_BIR(cmbloc) ((cmbloc >> CMBLOC_BIR_SHIFT) & \ 154 - CMBLOC_BIR_MASK) 155 - #define NVME_CMBLOC_OFST(cmbloc)((cmbloc >> CMBLOC_OFST_SHIFT) & \ 156 - CMBLOC_OFST_MASK) 157 - 158 - #define NVME_CMBLOC_SET_BIR(cmbloc, val) \ 159 - (cmbloc |= (uint64_t)(val & CMBLOC_BIR_MASK) << CMBLOC_BIR_SHIFT) 160 - #define NVME_CMBLOC_SET_OFST(cmbloc, val) \ 161 - (cmbloc |= (uint64_t)(val & CMBLOC_OFST_MASK) << CMBLOC_OFST_SHIFT) 162 - 163 - enum NvmeCmbszShift { 164 - CMBSZ_SQS_SHIFT = 0, 165 - CMBSZ_CQS_SHIFT = 1, 166 - CMBSZ_LISTS_SHIFT = 2, 167 - CMBSZ_RDS_SHIFT = 3, 168 - CMBSZ_WDS_SHIFT = 4, 169 - CMBSZ_SZU_SHIFT = 8, 170 - CMBSZ_SZ_SHIFT = 12, 171 - }; 172 - 173 - enum NvmeCmbszMask { 174 - CMBSZ_SQS_MASK = 0x1, 175 - CMBSZ_CQS_MASK = 0x1, 176 - CMBSZ_LISTS_MASK = 0x1, 177 - CMBSZ_RDS_MASK = 0x1, 178 - CMBSZ_WDS_MASK = 0x1, 179 - CMBSZ_SZU_MASK = 0xf, 180 - CMBSZ_SZ_MASK = 0xfffff, 181 - }; 182 - 183 - #define NVME_CMBSZ_SQS(cmbsz) ((cmbsz >> CMBSZ_SQS_SHIFT) & CMBSZ_SQS_MASK) 184 - #define NVME_CMBSZ_CQS(cmbsz) ((cmbsz >> CMBSZ_CQS_SHIFT) & CMBSZ_CQS_MASK) 185 - #define NVME_CMBSZ_LISTS(cmbsz)((cmbsz >> CMBSZ_LISTS_SHIFT) & CMBSZ_LISTS_MASK) 186 - #define NVME_CMBSZ_RDS(cmbsz) ((cmbsz >> CMBSZ_RDS_SHIFT) & CMBSZ_RDS_MASK) 187 - #define NVME_CMBSZ_WDS(cmbsz) ((cmbsz >> CMBSZ_WDS_SHIFT) & CMBSZ_WDS_MASK) 188 - #define NVME_CMBSZ_SZU(cmbsz) ((cmbsz >> CMBSZ_SZU_SHIFT) & CMBSZ_SZU_MASK) 189 - #define NVME_CMBSZ_SZ(cmbsz) ((cmbsz >> CMBSZ_SZ_SHIFT) & CMBSZ_SZ_MASK) 190 - 191 - #define NVME_CMBSZ_SET_SQS(cmbsz, val) \ 192 - (cmbsz |= (uint64_t)(val & CMBSZ_SQS_MASK) << CMBSZ_SQS_SHIFT) 193 - #define NVME_CMBSZ_SET_CQS(cmbsz, val) \ 194 - (cmbsz |= (uint64_t)(val & CMBSZ_CQS_MASK) << CMBSZ_CQS_SHIFT) 195 - #define NVME_CMBSZ_SET_LISTS(cmbsz, val) \ 196 - (cmbsz |= (uint64_t)(val & CMBSZ_LISTS_MASK) << CMBSZ_LISTS_SHIFT) 197 - #define NVME_CMBSZ_SET_RDS(cmbsz, val) \ 198 - (cmbsz |= (uint64_t)(val & CMBSZ_RDS_MASK) << CMBSZ_RDS_SHIFT) 199 - #define NVME_CMBSZ_SET_WDS(cmbsz, val) \ 200 - (cmbsz |= (uint64_t)(val & CMBSZ_WDS_MASK) << CMBSZ_WDS_SHIFT) 201 - #define NVME_CMBSZ_SET_SZU(cmbsz, val) \ 202 - (cmbsz |= (uint64_t)(val & CMBSZ_SZU_MASK) << CMBSZ_SZU_SHIFT) 203 - #define NVME_CMBSZ_SET_SZ(cmbsz, val) \ 204 - (cmbsz |= (uint64_t)(val & CMBSZ_SZ_MASK) << CMBSZ_SZ_SHIFT) 205 - 206 - #define NVME_CMBSZ_GETSIZE(cmbsz) \ 207 - (NVME_CMBSZ_SZ(cmbsz) * (1 << (12 + 4 * NVME_CMBSZ_SZU(cmbsz)))) 208 - 209 - typedef struct NvmeCmd { 210 - uint8_t opcode; 211 - uint8_t fuse; 212 - uint16_t cid; 213 - uint32_t nsid; 214 - uint64_t res1; 215 - uint64_t mptr; 216 - uint64_t prp1; 217 - uint64_t prp2; 218 - uint32_t cdw10; 219 - uint32_t cdw11; 220 - uint32_t cdw12; 221 - uint32_t cdw13; 222 - uint32_t cdw14; 223 - uint32_t cdw15; 224 - } NvmeCmd; 225 - 226 - enum NvmeAdminCommands { 227 - NVME_ADM_CMD_DELETE_SQ = 0x00, 228 - NVME_ADM_CMD_CREATE_SQ = 0x01, 229 - NVME_ADM_CMD_GET_LOG_PAGE = 0x02, 230 - NVME_ADM_CMD_DELETE_CQ = 0x04, 231 - NVME_ADM_CMD_CREATE_CQ = 0x05, 232 - NVME_ADM_CMD_IDENTIFY = 0x06, 233 - NVME_ADM_CMD_ABORT = 0x08, 234 - NVME_ADM_CMD_SET_FEATURES = 0x09, 235 - NVME_ADM_CMD_GET_FEATURES = 0x0a, 236 - NVME_ADM_CMD_ASYNC_EV_REQ = 0x0c, 237 - NVME_ADM_CMD_ACTIVATE_FW = 0x10, 238 - NVME_ADM_CMD_DOWNLOAD_FW = 0x11, 239 - NVME_ADM_CMD_FORMAT_NVM = 0x80, 240 - NVME_ADM_CMD_SECURITY_SEND = 0x81, 241 - NVME_ADM_CMD_SECURITY_RECV = 0x82, 242 - }; 243 - 244 - enum NvmeIoCommands { 245 - NVME_CMD_FLUSH = 0x00, 246 - NVME_CMD_WRITE = 0x01, 247 - NVME_CMD_READ = 0x02, 248 - NVME_CMD_WRITE_UNCOR = 0x04, 249 - NVME_CMD_COMPARE = 0x05, 250 - NVME_CMD_WRITE_ZEROS = 0x08, 251 - NVME_CMD_DSM = 0x09, 252 - }; 253 - 254 - typedef struct NvmeDeleteQ { 255 - uint8_t opcode; 256 - uint8_t flags; 257 - uint16_t cid; 258 - uint32_t rsvd1[9]; 259 - uint16_t qid; 260 - uint16_t rsvd10; 261 - uint32_t rsvd11[5]; 262 - } NvmeDeleteQ; 263 - 264 - typedef struct NvmeCreateCq { 265 - uint8_t opcode; 266 - uint8_t flags; 267 - uint16_t cid; 268 - uint32_t rsvd1[5]; 269 - uint64_t prp1; 270 - uint64_t rsvd8; 271 - uint16_t cqid; 272 - uint16_t qsize; 273 - uint16_t cq_flags; 274 - uint16_t irq_vector; 275 - uint32_t rsvd12[4]; 276 - } NvmeCreateCq; 277 - 278 - #define NVME_CQ_FLAGS_PC(cq_flags) (cq_flags & 0x1) 279 - #define NVME_CQ_FLAGS_IEN(cq_flags) ((cq_flags >> 1) & 0x1) 280 - 281 - typedef struct NvmeCreateSq { 282 - uint8_t opcode; 283 - uint8_t flags; 284 - uint16_t cid; 285 - uint32_t rsvd1[5]; 286 - uint64_t prp1; 287 - uint64_t rsvd8; 288 - uint16_t sqid; 289 - uint16_t qsize; 290 - uint16_t sq_flags; 291 - uint16_t cqid; 292 - uint32_t rsvd12[4]; 293 - } NvmeCreateSq; 294 - 295 - #define NVME_SQ_FLAGS_PC(sq_flags) (sq_flags & 0x1) 296 - #define NVME_SQ_FLAGS_QPRIO(sq_flags) ((sq_flags >> 1) & 0x3) 297 - 298 - enum NvmeQueueFlags { 299 - NVME_Q_PC = 1, 300 - NVME_Q_PRIO_URGENT = 0, 301 - NVME_Q_PRIO_HIGH = 1, 302 - NVME_Q_PRIO_NORMAL = 2, 303 - NVME_Q_PRIO_LOW = 3, 304 - }; 305 - 306 - typedef struct NvmeIdentify { 307 - uint8_t opcode; 308 - uint8_t flags; 309 - uint16_t cid; 310 - uint32_t nsid; 311 - uint64_t rsvd2[2]; 312 - uint64_t prp1; 313 - uint64_t prp2; 314 - uint32_t cns; 315 - uint32_t rsvd11[5]; 316 - } NvmeIdentify; 317 - 318 - typedef struct NvmeRwCmd { 319 - uint8_t opcode; 320 - uint8_t flags; 321 - uint16_t cid; 322 - uint32_t nsid; 323 - uint64_t rsvd2; 324 - uint64_t mptr; 325 - uint64_t prp1; 326 - uint64_t prp2; 327 - uint64_t slba; 328 - uint16_t nlb; 329 - uint16_t control; 330 - uint32_t dsmgmt; 331 - uint32_t reftag; 332 - uint16_t apptag; 333 - uint16_t appmask; 334 - } NvmeRwCmd; 335 - 336 - enum { 337 - NVME_RW_LR = 1 << 15, 338 - NVME_RW_FUA = 1 << 14, 339 - NVME_RW_DSM_FREQ_UNSPEC = 0, 340 - NVME_RW_DSM_FREQ_TYPICAL = 1, 341 - NVME_RW_DSM_FREQ_RARE = 2, 342 - NVME_RW_DSM_FREQ_READS = 3, 343 - NVME_RW_DSM_FREQ_WRITES = 4, 344 - NVME_RW_DSM_FREQ_RW = 5, 345 - NVME_RW_DSM_FREQ_ONCE = 6, 346 - NVME_RW_DSM_FREQ_PREFETCH = 7, 347 - NVME_RW_DSM_FREQ_TEMP = 8, 348 - NVME_RW_DSM_LATENCY_NONE = 0 << 4, 349 - NVME_RW_DSM_LATENCY_IDLE = 1 << 4, 350 - NVME_RW_DSM_LATENCY_NORM = 2 << 4, 351 - NVME_RW_DSM_LATENCY_LOW = 3 << 4, 352 - NVME_RW_DSM_SEQ_REQ = 1 << 6, 353 - NVME_RW_DSM_COMPRESSED = 1 << 7, 354 - NVME_RW_PRINFO_PRACT = 1 << 13, 355 - NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, 356 - NVME_RW_PRINFO_PRCHK_APP = 1 << 11, 357 - NVME_RW_PRINFO_PRCHK_REF = 1 << 10, 358 - }; 359 - 360 - typedef struct NvmeDsmCmd { 361 - uint8_t opcode; 362 - uint8_t flags; 363 - uint16_t cid; 364 - uint32_t nsid; 365 - uint64_t rsvd2[2]; 366 - uint64_t prp1; 367 - uint64_t prp2; 368 - uint32_t nr; 369 - uint32_t attributes; 370 - uint32_t rsvd12[4]; 371 - } NvmeDsmCmd; 372 - 373 - enum { 374 - NVME_DSMGMT_IDR = 1 << 0, 375 - NVME_DSMGMT_IDW = 1 << 1, 376 - NVME_DSMGMT_AD = 1 << 2, 377 - }; 378 - 379 - typedef struct NvmeDsmRange { 380 - uint32_t cattr; 381 - uint32_t nlb; 382 - uint64_t slba; 383 - } NvmeDsmRange; 384 - 385 - enum NvmeAsyncEventRequest { 386 - NVME_AER_TYPE_ERROR = 0, 387 - NVME_AER_TYPE_SMART = 1, 388 - NVME_AER_TYPE_IO_SPECIFIC = 6, 389 - NVME_AER_TYPE_VENDOR_SPECIFIC = 7, 390 - NVME_AER_INFO_ERR_INVALID_SQ = 0, 391 - NVME_AER_INFO_ERR_INVALID_DB = 1, 392 - NVME_AER_INFO_ERR_DIAG_FAIL = 2, 393 - NVME_AER_INFO_ERR_PERS_INTERNAL_ERR = 3, 394 - NVME_AER_INFO_ERR_TRANS_INTERNAL_ERR = 4, 395 - NVME_AER_INFO_ERR_FW_IMG_LOAD_ERR = 5, 396 - NVME_AER_INFO_SMART_RELIABILITY = 0, 397 - NVME_AER_INFO_SMART_TEMP_THRESH = 1, 398 - NVME_AER_INFO_SMART_SPARE_THRESH = 2, 399 - }; 400 - 401 - typedef struct NvmeAerResult { 402 - uint8_t event_type; 403 - uint8_t event_info; 404 - uint8_t log_page; 405 - uint8_t resv; 406 - } NvmeAerResult; 407 - 408 - typedef struct NvmeCqe { 409 - uint32_t result; 410 - uint32_t rsvd; 411 - uint16_t sq_head; 412 - uint16_t sq_id; 413 - uint16_t cid; 414 - uint16_t status; 415 - } NvmeCqe; 416 - 417 - enum NvmeStatusCodes { 418 - NVME_SUCCESS = 0x0000, 419 - NVME_INVALID_OPCODE = 0x0001, 420 - NVME_INVALID_FIELD = 0x0002, 421 - NVME_CID_CONFLICT = 0x0003, 422 - NVME_DATA_TRAS_ERROR = 0x0004, 423 - NVME_POWER_LOSS_ABORT = 0x0005, 424 - NVME_INTERNAL_DEV_ERROR = 0x0006, 425 - NVME_CMD_ABORT_REQ = 0x0007, 426 - NVME_CMD_ABORT_SQ_DEL = 0x0008, 427 - NVME_CMD_ABORT_FAILED_FUSE = 0x0009, 428 - NVME_CMD_ABORT_MISSING_FUSE = 0x000a, 429 - NVME_INVALID_NSID = 0x000b, 430 - NVME_CMD_SEQ_ERROR = 0x000c, 431 - NVME_LBA_RANGE = 0x0080, 432 - NVME_CAP_EXCEEDED = 0x0081, 433 - NVME_NS_NOT_READY = 0x0082, 434 - NVME_NS_RESV_CONFLICT = 0x0083, 435 - NVME_INVALID_CQID = 0x0100, 436 - NVME_INVALID_QID = 0x0101, 437 - NVME_MAX_QSIZE_EXCEEDED = 0x0102, 438 - NVME_ACL_EXCEEDED = 0x0103, 439 - NVME_RESERVED = 0x0104, 440 - NVME_AER_LIMIT_EXCEEDED = 0x0105, 441 - NVME_INVALID_FW_SLOT = 0x0106, 442 - NVME_INVALID_FW_IMAGE = 0x0107, 443 - NVME_INVALID_IRQ_VECTOR = 0x0108, 444 - NVME_INVALID_LOG_ID = 0x0109, 445 - NVME_INVALID_FORMAT = 0x010a, 446 - NVME_FW_REQ_RESET = 0x010b, 447 - NVME_INVALID_QUEUE_DEL = 0x010c, 448 - NVME_FID_NOT_SAVEABLE = 0x010d, 449 - NVME_FID_NOT_NSID_SPEC = 0x010f, 450 - NVME_FW_REQ_SUSYSTEM_RESET = 0x0110, 451 - NVME_CONFLICTING_ATTRS = 0x0180, 452 - NVME_INVALID_PROT_INFO = 0x0181, 453 - NVME_WRITE_TO_RO = 0x0182, 454 - NVME_WRITE_FAULT = 0x0280, 455 - NVME_UNRECOVERED_READ = 0x0281, 456 - NVME_E2E_GUARD_ERROR = 0x0282, 457 - NVME_E2E_APP_ERROR = 0x0283, 458 - NVME_E2E_REF_ERROR = 0x0284, 459 - NVME_CMP_FAILURE = 0x0285, 460 - NVME_ACCESS_DENIED = 0x0286, 461 - NVME_MORE = 0x2000, 462 - NVME_DNR = 0x4000, 463 - NVME_NO_COMPLETE = 0xffff, 464 - }; 465 - 466 - typedef struct NvmeFwSlotInfoLog { 467 - uint8_t afi; 468 - uint8_t reserved1[7]; 469 - uint8_t frs1[8]; 470 - uint8_t frs2[8]; 471 - uint8_t frs3[8]; 472 - uint8_t frs4[8]; 473 - uint8_t frs5[8]; 474 - uint8_t frs6[8]; 475 - uint8_t frs7[8]; 476 - uint8_t reserved2[448]; 477 - } NvmeFwSlotInfoLog; 478 - 479 - typedef struct NvmeErrorLog { 480 - uint64_t error_count; 481 - uint16_t sqid; 482 - uint16_t cid; 483 - uint16_t status_field; 484 - uint16_t param_error_location; 485 - uint64_t lba; 486 - uint32_t nsid; 487 - uint8_t vs; 488 - uint8_t resv[35]; 489 - } NvmeErrorLog; 490 - 491 - typedef struct NvmeSmartLog { 492 - uint8_t critical_warning; 493 - uint8_t temperature[2]; 494 - uint8_t available_spare; 495 - uint8_t available_spare_threshold; 496 - uint8_t percentage_used; 497 - uint8_t reserved1[26]; 498 - uint64_t data_units_read[2]; 499 - uint64_t data_units_written[2]; 500 - uint64_t host_read_commands[2]; 501 - uint64_t host_write_commands[2]; 502 - uint64_t controller_busy_time[2]; 503 - uint64_t power_cycles[2]; 504 - uint64_t power_on_hours[2]; 505 - uint64_t unsafe_shutdowns[2]; 506 - uint64_t media_errors[2]; 507 - uint64_t number_of_error_log_entries[2]; 508 - uint8_t reserved2[320]; 509 - } NvmeSmartLog; 510 - 511 - enum NvmeSmartWarn { 512 - NVME_SMART_SPARE = 1 << 0, 513 - NVME_SMART_TEMPERATURE = 1 << 1, 514 - NVME_SMART_RELIABILITY = 1 << 2, 515 - NVME_SMART_MEDIA_READ_ONLY = 1 << 3, 516 - NVME_SMART_FAILED_VOLATILE_MEDIA = 1 << 4, 517 - }; 518 - 519 - enum LogIdentifier { 520 - NVME_LOG_ERROR_INFO = 0x01, 521 - NVME_LOG_SMART_INFO = 0x02, 522 - NVME_LOG_FW_SLOT_INFO = 0x03, 523 - }; 524 - 525 - typedef struct NvmePSD { 526 - uint16_t mp; 527 - uint16_t reserved; 528 - uint32_t enlat; 529 - uint32_t exlat; 530 - uint8_t rrt; 531 - uint8_t rrl; 532 - uint8_t rwt; 533 - uint8_t rwl; 534 - uint8_t resv[16]; 535 - } NvmePSD; 536 - 537 - typedef struct NvmeIdCtrl { 538 - uint16_t vid; 539 - uint16_t ssvid; 540 - uint8_t sn[20]; 541 - uint8_t mn[40]; 542 - uint8_t fr[8]; 543 - uint8_t rab; 544 - uint8_t ieee[3]; 545 - uint8_t cmic; 546 - uint8_t mdts; 547 - uint8_t rsvd255[178]; 548 - uint16_t oacs; 549 - uint8_t acl; 550 - uint8_t aerl; 551 - uint8_t frmw; 552 - uint8_t lpa; 553 - uint8_t elpe; 554 - uint8_t npss; 555 - uint8_t rsvd511[248]; 556 - uint8_t sqes; 557 - uint8_t cqes; 558 - uint16_t rsvd515; 559 - uint32_t nn; 560 - uint16_t oncs; 561 - uint16_t fuses; 562 - uint8_t fna; 563 - uint8_t vwc; 564 - uint16_t awun; 565 - uint16_t awupf; 566 - uint8_t rsvd703[174]; 567 - uint8_t rsvd2047[1344]; 568 - NvmePSD psd[32]; 569 - uint8_t vs[1024]; 570 - } NvmeIdCtrl; 571 - 572 - enum NvmeIdCtrlOacs { 573 - NVME_OACS_SECURITY = 1 << 0, 574 - NVME_OACS_FORMAT = 1 << 1, 575 - NVME_OACS_FW = 1 << 2, 576 - }; 577 - 578 - enum NvmeIdCtrlOncs { 579 - NVME_ONCS_COMPARE = 1 << 0, 580 - NVME_ONCS_WRITE_UNCORR = 1 << 1, 581 - NVME_ONCS_DSM = 1 << 2, 582 - NVME_ONCS_WRITE_ZEROS = 1 << 3, 583 - NVME_ONCS_FEATURES = 1 << 4, 584 - NVME_ONCS_RESRVATIONS = 1 << 5, 585 - }; 586 - 587 - #define NVME_CTRL_SQES_MIN(sqes) ((sqes) & 0xf) 588 - #define NVME_CTRL_SQES_MAX(sqes) (((sqes) >> 4) & 0xf) 589 - #define NVME_CTRL_CQES_MIN(cqes) ((cqes) & 0xf) 590 - #define NVME_CTRL_CQES_MAX(cqes) (((cqes) >> 4) & 0xf) 591 - 592 - typedef struct NvmeFeatureVal { 593 - uint32_t arbitration; 594 - uint32_t power_mgmt; 595 - uint32_t temp_thresh; 596 - uint32_t err_rec; 597 - uint32_t volatile_wc; 598 - uint32_t num_queues; 599 - uint32_t int_coalescing; 600 - uint32_t *int_vector_config; 601 - uint32_t write_atomicity; 602 - uint32_t async_config; 603 - uint32_t sw_prog_marker; 604 - } NvmeFeatureVal; 605 - 606 - #define NVME_ARB_AB(arb) (arb & 0x7) 607 - #define NVME_ARB_LPW(arb) ((arb >> 8) & 0xff) 608 - #define NVME_ARB_MPW(arb) ((arb >> 16) & 0xff) 609 - #define NVME_ARB_HPW(arb) ((arb >> 24) & 0xff) 610 - 611 - #define NVME_INTC_THR(intc) (intc & 0xff) 612 - #define NVME_INTC_TIME(intc) ((intc >> 8) & 0xff) 613 - 614 - enum NvmeFeatureIds { 615 - NVME_ARBITRATION = 0x1, 616 - NVME_POWER_MANAGEMENT = 0x2, 617 - NVME_LBA_RANGE_TYPE = 0x3, 618 - NVME_TEMPERATURE_THRESHOLD = 0x4, 619 - NVME_ERROR_RECOVERY = 0x5, 620 - NVME_VOLATILE_WRITE_CACHE = 0x6, 621 - NVME_NUMBER_OF_QUEUES = 0x7, 622 - NVME_INTERRUPT_COALESCING = 0x8, 623 - NVME_INTERRUPT_VECTOR_CONF = 0x9, 624 - NVME_WRITE_ATOMICITY = 0xa, 625 - NVME_ASYNCHRONOUS_EVENT_CONF = 0xb, 626 - NVME_SOFTWARE_PROGRESS_MARKER = 0x80 627 - }; 628 - 629 - typedef struct NvmeRangeType { 630 - uint8_t type; 631 - uint8_t attributes; 632 - uint8_t rsvd2[14]; 633 - uint64_t slba; 634 - uint64_t nlb; 635 - uint8_t guid[16]; 636 - uint8_t rsvd48[16]; 637 - } NvmeRangeType; 638 - 639 - typedef struct NvmeLBAF { 640 - uint16_t ms; 641 - uint8_t ds; 642 - uint8_t rp; 643 - } NvmeLBAF; 644 - 645 - typedef struct NvmeIdNs { 646 - uint64_t nsze; 647 - uint64_t ncap; 648 - uint64_t nuse; 649 - uint8_t nsfeat; 650 - uint8_t nlbaf; 651 - uint8_t flbas; 652 - uint8_t mc; 653 - uint8_t dpc; 654 - uint8_t dps; 655 - uint8_t res30[98]; 656 - NvmeLBAF lbaf[16]; 657 - uint8_t res192[192]; 658 - uint8_t vs[3712]; 659 - } NvmeIdNs; 660 - 661 - #define NVME_ID_NS_NSFEAT_THIN(nsfeat) ((nsfeat & 0x1)) 662 - #define NVME_ID_NS_FLBAS_EXTENDED(flbas) ((flbas >> 4) & 0x1) 663 - #define NVME_ID_NS_FLBAS_INDEX(flbas) ((flbas & 0xf)) 664 - #define NVME_ID_NS_MC_SEPARATE(mc) ((mc >> 1) & 0x1) 665 - #define NVME_ID_NS_MC_EXTENDED(mc) ((mc & 0x1)) 666 - #define NVME_ID_NS_DPC_LAST_EIGHT(dpc) ((dpc >> 4) & 0x1) 667 - #define NVME_ID_NS_DPC_FIRST_EIGHT(dpc) ((dpc >> 3) & 0x1) 668 - #define NVME_ID_NS_DPC_TYPE_3(dpc) ((dpc >> 2) & 0x1) 669 - #define NVME_ID_NS_DPC_TYPE_2(dpc) ((dpc >> 1) & 0x1) 670 - #define NVME_ID_NS_DPC_TYPE_1(dpc) ((dpc & 0x1)) 671 - #define NVME_ID_NS_DPC_TYPE_MASK 0x7 672 - 673 - enum NvmeIdNsDps { 674 - DPS_TYPE_NONE = 0, 675 - DPS_TYPE_1 = 1, 676 - DPS_TYPE_2 = 2, 677 - DPS_TYPE_3 = 3, 678 - DPS_TYPE_MASK = 0x7, 679 - DPS_FIRST_EIGHT = 8, 680 - }; 681 - 682 - static inline void _nvme_check_size(void) 683 - { 684 - QEMU_BUILD_BUG_ON(sizeof(NvmeAerResult) != 4); 685 - QEMU_BUILD_BUG_ON(sizeof(NvmeCqe) != 16); 686 - QEMU_BUILD_BUG_ON(sizeof(NvmeDsmRange) != 16); 687 - QEMU_BUILD_BUG_ON(sizeof(NvmeCmd) != 64); 688 - QEMU_BUILD_BUG_ON(sizeof(NvmeDeleteQ) != 64); 689 - QEMU_BUILD_BUG_ON(sizeof(NvmeCreateCq) != 64); 690 - QEMU_BUILD_BUG_ON(sizeof(NvmeCreateSq) != 64); 691 - QEMU_BUILD_BUG_ON(sizeof(NvmeIdentify) != 64); 692 - QEMU_BUILD_BUG_ON(sizeof(NvmeRwCmd) != 64); 693 - QEMU_BUILD_BUG_ON(sizeof(NvmeDsmCmd) != 64); 694 - QEMU_BUILD_BUG_ON(sizeof(NvmeRangeType) != 64); 695 - QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog) != 64); 696 - QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog) != 512); 697 - QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLog) != 512); 698 - QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl) != 4096); 699 - QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096); 700 - } 4 + #include "block/nvme.h" 701 5 702 6 typedef struct NvmeAsyncEvent { 703 7 QSIMPLEQ_ENTRY(NvmeAsyncEvent) entry;
+700
include/block/nvme.h
··· 1 + #ifndef BLOCK_NVME_H 2 + #define BLOCK_NVME_H 3 + 4 + typedef struct NvmeBar { 5 + uint64_t cap; 6 + uint32_t vs; 7 + uint32_t intms; 8 + uint32_t intmc; 9 + uint32_t cc; 10 + uint32_t rsvd1; 11 + uint32_t csts; 12 + uint32_t nssrc; 13 + uint32_t aqa; 14 + uint64_t asq; 15 + uint64_t acq; 16 + uint32_t cmbloc; 17 + uint32_t cmbsz; 18 + } NvmeBar; 19 + 20 + enum NvmeCapShift { 21 + CAP_MQES_SHIFT = 0, 22 + CAP_CQR_SHIFT = 16, 23 + CAP_AMS_SHIFT = 17, 24 + CAP_TO_SHIFT = 24, 25 + CAP_DSTRD_SHIFT = 32, 26 + CAP_NSSRS_SHIFT = 33, 27 + CAP_CSS_SHIFT = 37, 28 + CAP_MPSMIN_SHIFT = 48, 29 + CAP_MPSMAX_SHIFT = 52, 30 + }; 31 + 32 + enum NvmeCapMask { 33 + CAP_MQES_MASK = 0xffff, 34 + CAP_CQR_MASK = 0x1, 35 + CAP_AMS_MASK = 0x3, 36 + CAP_TO_MASK = 0xff, 37 + CAP_DSTRD_MASK = 0xf, 38 + CAP_NSSRS_MASK = 0x1, 39 + CAP_CSS_MASK = 0xff, 40 + CAP_MPSMIN_MASK = 0xf, 41 + CAP_MPSMAX_MASK = 0xf, 42 + }; 43 + 44 + #define NVME_CAP_MQES(cap) (((cap) >> CAP_MQES_SHIFT) & CAP_MQES_MASK) 45 + #define NVME_CAP_CQR(cap) (((cap) >> CAP_CQR_SHIFT) & CAP_CQR_MASK) 46 + #define NVME_CAP_AMS(cap) (((cap) >> CAP_AMS_SHIFT) & CAP_AMS_MASK) 47 + #define NVME_CAP_TO(cap) (((cap) >> CAP_TO_SHIFT) & CAP_TO_MASK) 48 + #define NVME_CAP_DSTRD(cap) (((cap) >> CAP_DSTRD_SHIFT) & CAP_DSTRD_MASK) 49 + #define NVME_CAP_NSSRS(cap) (((cap) >> CAP_NSSRS_SHIFT) & CAP_NSSRS_MASK) 50 + #define NVME_CAP_CSS(cap) (((cap) >> CAP_CSS_SHIFT) & CAP_CSS_MASK) 51 + #define NVME_CAP_MPSMIN(cap)(((cap) >> CAP_MPSMIN_SHIFT) & CAP_MPSMIN_MASK) 52 + #define NVME_CAP_MPSMAX(cap)(((cap) >> CAP_MPSMAX_SHIFT) & CAP_MPSMAX_MASK) 53 + 54 + #define NVME_CAP_SET_MQES(cap, val) (cap |= (uint64_t)(val & CAP_MQES_MASK) \ 55 + << CAP_MQES_SHIFT) 56 + #define NVME_CAP_SET_CQR(cap, val) (cap |= (uint64_t)(val & CAP_CQR_MASK) \ 57 + << CAP_CQR_SHIFT) 58 + #define NVME_CAP_SET_AMS(cap, val) (cap |= (uint64_t)(val & CAP_AMS_MASK) \ 59 + << CAP_AMS_SHIFT) 60 + #define NVME_CAP_SET_TO(cap, val) (cap |= (uint64_t)(val & CAP_TO_MASK) \ 61 + << CAP_TO_SHIFT) 62 + #define NVME_CAP_SET_DSTRD(cap, val) (cap |= (uint64_t)(val & CAP_DSTRD_MASK) \ 63 + << CAP_DSTRD_SHIFT) 64 + #define NVME_CAP_SET_NSSRS(cap, val) (cap |= (uint64_t)(val & CAP_NSSRS_MASK) \ 65 + << CAP_NSSRS_SHIFT) 66 + #define NVME_CAP_SET_CSS(cap, val) (cap |= (uint64_t)(val & CAP_CSS_MASK) \ 67 + << CAP_CSS_SHIFT) 68 + #define NVME_CAP_SET_MPSMIN(cap, val) (cap |= (uint64_t)(val & CAP_MPSMIN_MASK)\ 69 + << CAP_MPSMIN_SHIFT) 70 + #define NVME_CAP_SET_MPSMAX(cap, val) (cap |= (uint64_t)(val & CAP_MPSMAX_MASK)\ 71 + << CAP_MPSMAX_SHIFT) 72 + 73 + enum NvmeCcShift { 74 + CC_EN_SHIFT = 0, 75 + CC_CSS_SHIFT = 4, 76 + CC_MPS_SHIFT = 7, 77 + CC_AMS_SHIFT = 11, 78 + CC_SHN_SHIFT = 14, 79 + CC_IOSQES_SHIFT = 16, 80 + CC_IOCQES_SHIFT = 20, 81 + }; 82 + 83 + enum NvmeCcMask { 84 + CC_EN_MASK = 0x1, 85 + CC_CSS_MASK = 0x7, 86 + CC_MPS_MASK = 0xf, 87 + CC_AMS_MASK = 0x7, 88 + CC_SHN_MASK = 0x3, 89 + CC_IOSQES_MASK = 0xf, 90 + CC_IOCQES_MASK = 0xf, 91 + }; 92 + 93 + #define NVME_CC_EN(cc) ((cc >> CC_EN_SHIFT) & CC_EN_MASK) 94 + #define NVME_CC_CSS(cc) ((cc >> CC_CSS_SHIFT) & CC_CSS_MASK) 95 + #define NVME_CC_MPS(cc) ((cc >> CC_MPS_SHIFT) & CC_MPS_MASK) 96 + #define NVME_CC_AMS(cc) ((cc >> CC_AMS_SHIFT) & CC_AMS_MASK) 97 + #define NVME_CC_SHN(cc) ((cc >> CC_SHN_SHIFT) & CC_SHN_MASK) 98 + #define NVME_CC_IOSQES(cc) ((cc >> CC_IOSQES_SHIFT) & CC_IOSQES_MASK) 99 + #define NVME_CC_IOCQES(cc) ((cc >> CC_IOCQES_SHIFT) & CC_IOCQES_MASK) 100 + 101 + enum NvmeCstsShift { 102 + CSTS_RDY_SHIFT = 0, 103 + CSTS_CFS_SHIFT = 1, 104 + CSTS_SHST_SHIFT = 2, 105 + CSTS_NSSRO_SHIFT = 4, 106 + }; 107 + 108 + enum NvmeCstsMask { 109 + CSTS_RDY_MASK = 0x1, 110 + CSTS_CFS_MASK = 0x1, 111 + CSTS_SHST_MASK = 0x3, 112 + CSTS_NSSRO_MASK = 0x1, 113 + }; 114 + 115 + enum NvmeCsts { 116 + NVME_CSTS_READY = 1 << CSTS_RDY_SHIFT, 117 + NVME_CSTS_FAILED = 1 << CSTS_CFS_SHIFT, 118 + NVME_CSTS_SHST_NORMAL = 0 << CSTS_SHST_SHIFT, 119 + NVME_CSTS_SHST_PROGRESS = 1 << CSTS_SHST_SHIFT, 120 + NVME_CSTS_SHST_COMPLETE = 2 << CSTS_SHST_SHIFT, 121 + NVME_CSTS_NSSRO = 1 << CSTS_NSSRO_SHIFT, 122 + }; 123 + 124 + #define NVME_CSTS_RDY(csts) ((csts >> CSTS_RDY_SHIFT) & CSTS_RDY_MASK) 125 + #define NVME_CSTS_CFS(csts) ((csts >> CSTS_CFS_SHIFT) & CSTS_CFS_MASK) 126 + #define NVME_CSTS_SHST(csts) ((csts >> CSTS_SHST_SHIFT) & CSTS_SHST_MASK) 127 + #define NVME_CSTS_NSSRO(csts) ((csts >> CSTS_NSSRO_SHIFT) & CSTS_NSSRO_MASK) 128 + 129 + enum NvmeAqaShift { 130 + AQA_ASQS_SHIFT = 0, 131 + AQA_ACQS_SHIFT = 16, 132 + }; 133 + 134 + enum NvmeAqaMask { 135 + AQA_ASQS_MASK = 0xfff, 136 + AQA_ACQS_MASK = 0xfff, 137 + }; 138 + 139 + #define NVME_AQA_ASQS(aqa) ((aqa >> AQA_ASQS_SHIFT) & AQA_ASQS_MASK) 140 + #define NVME_AQA_ACQS(aqa) ((aqa >> AQA_ACQS_SHIFT) & AQA_ACQS_MASK) 141 + 142 + enum NvmeCmblocShift { 143 + CMBLOC_BIR_SHIFT = 0, 144 + CMBLOC_OFST_SHIFT = 12, 145 + }; 146 + 147 + enum NvmeCmblocMask { 148 + CMBLOC_BIR_MASK = 0x7, 149 + CMBLOC_OFST_MASK = 0xfffff, 150 + }; 151 + 152 + #define NVME_CMBLOC_BIR(cmbloc) ((cmbloc >> CMBLOC_BIR_SHIFT) & \ 153 + CMBLOC_BIR_MASK) 154 + #define NVME_CMBLOC_OFST(cmbloc)((cmbloc >> CMBLOC_OFST_SHIFT) & \ 155 + CMBLOC_OFST_MASK) 156 + 157 + #define NVME_CMBLOC_SET_BIR(cmbloc, val) \ 158 + (cmbloc |= (uint64_t)(val & CMBLOC_BIR_MASK) << CMBLOC_BIR_SHIFT) 159 + #define NVME_CMBLOC_SET_OFST(cmbloc, val) \ 160 + (cmbloc |= (uint64_t)(val & CMBLOC_OFST_MASK) << CMBLOC_OFST_SHIFT) 161 + 162 + enum NvmeCmbszShift { 163 + CMBSZ_SQS_SHIFT = 0, 164 + CMBSZ_CQS_SHIFT = 1, 165 + CMBSZ_LISTS_SHIFT = 2, 166 + CMBSZ_RDS_SHIFT = 3, 167 + CMBSZ_WDS_SHIFT = 4, 168 + CMBSZ_SZU_SHIFT = 8, 169 + CMBSZ_SZ_SHIFT = 12, 170 + }; 171 + 172 + enum NvmeCmbszMask { 173 + CMBSZ_SQS_MASK = 0x1, 174 + CMBSZ_CQS_MASK = 0x1, 175 + CMBSZ_LISTS_MASK = 0x1, 176 + CMBSZ_RDS_MASK = 0x1, 177 + CMBSZ_WDS_MASK = 0x1, 178 + CMBSZ_SZU_MASK = 0xf, 179 + CMBSZ_SZ_MASK = 0xfffff, 180 + }; 181 + 182 + #define NVME_CMBSZ_SQS(cmbsz) ((cmbsz >> CMBSZ_SQS_SHIFT) & CMBSZ_SQS_MASK) 183 + #define NVME_CMBSZ_CQS(cmbsz) ((cmbsz >> CMBSZ_CQS_SHIFT) & CMBSZ_CQS_MASK) 184 + #define NVME_CMBSZ_LISTS(cmbsz)((cmbsz >> CMBSZ_LISTS_SHIFT) & CMBSZ_LISTS_MASK) 185 + #define NVME_CMBSZ_RDS(cmbsz) ((cmbsz >> CMBSZ_RDS_SHIFT) & CMBSZ_RDS_MASK) 186 + #define NVME_CMBSZ_WDS(cmbsz) ((cmbsz >> CMBSZ_WDS_SHIFT) & CMBSZ_WDS_MASK) 187 + #define NVME_CMBSZ_SZU(cmbsz) ((cmbsz >> CMBSZ_SZU_SHIFT) & CMBSZ_SZU_MASK) 188 + #define NVME_CMBSZ_SZ(cmbsz) ((cmbsz >> CMBSZ_SZ_SHIFT) & CMBSZ_SZ_MASK) 189 + 190 + #define NVME_CMBSZ_SET_SQS(cmbsz, val) \ 191 + (cmbsz |= (uint64_t)(val & CMBSZ_SQS_MASK) << CMBSZ_SQS_SHIFT) 192 + #define NVME_CMBSZ_SET_CQS(cmbsz, val) \ 193 + (cmbsz |= (uint64_t)(val & CMBSZ_CQS_MASK) << CMBSZ_CQS_SHIFT) 194 + #define NVME_CMBSZ_SET_LISTS(cmbsz, val) \ 195 + (cmbsz |= (uint64_t)(val & CMBSZ_LISTS_MASK) << CMBSZ_LISTS_SHIFT) 196 + #define NVME_CMBSZ_SET_RDS(cmbsz, val) \ 197 + (cmbsz |= (uint64_t)(val & CMBSZ_RDS_MASK) << CMBSZ_RDS_SHIFT) 198 + #define NVME_CMBSZ_SET_WDS(cmbsz, val) \ 199 + (cmbsz |= (uint64_t)(val & CMBSZ_WDS_MASK) << CMBSZ_WDS_SHIFT) 200 + #define NVME_CMBSZ_SET_SZU(cmbsz, val) \ 201 + (cmbsz |= (uint64_t)(val & CMBSZ_SZU_MASK) << CMBSZ_SZU_SHIFT) 202 + #define NVME_CMBSZ_SET_SZ(cmbsz, val) \ 203 + (cmbsz |= (uint64_t)(val & CMBSZ_SZ_MASK) << CMBSZ_SZ_SHIFT) 204 + 205 + #define NVME_CMBSZ_GETSIZE(cmbsz) \ 206 + (NVME_CMBSZ_SZ(cmbsz) * (1 << (12 + 4 * NVME_CMBSZ_SZU(cmbsz)))) 207 + 208 + typedef struct NvmeCmd { 209 + uint8_t opcode; 210 + uint8_t fuse; 211 + uint16_t cid; 212 + uint32_t nsid; 213 + uint64_t res1; 214 + uint64_t mptr; 215 + uint64_t prp1; 216 + uint64_t prp2; 217 + uint32_t cdw10; 218 + uint32_t cdw11; 219 + uint32_t cdw12; 220 + uint32_t cdw13; 221 + uint32_t cdw14; 222 + uint32_t cdw15; 223 + } NvmeCmd; 224 + 225 + enum NvmeAdminCommands { 226 + NVME_ADM_CMD_DELETE_SQ = 0x00, 227 + NVME_ADM_CMD_CREATE_SQ = 0x01, 228 + NVME_ADM_CMD_GET_LOG_PAGE = 0x02, 229 + NVME_ADM_CMD_DELETE_CQ = 0x04, 230 + NVME_ADM_CMD_CREATE_CQ = 0x05, 231 + NVME_ADM_CMD_IDENTIFY = 0x06, 232 + NVME_ADM_CMD_ABORT = 0x08, 233 + NVME_ADM_CMD_SET_FEATURES = 0x09, 234 + NVME_ADM_CMD_GET_FEATURES = 0x0a, 235 + NVME_ADM_CMD_ASYNC_EV_REQ = 0x0c, 236 + NVME_ADM_CMD_ACTIVATE_FW = 0x10, 237 + NVME_ADM_CMD_DOWNLOAD_FW = 0x11, 238 + NVME_ADM_CMD_FORMAT_NVM = 0x80, 239 + NVME_ADM_CMD_SECURITY_SEND = 0x81, 240 + NVME_ADM_CMD_SECURITY_RECV = 0x82, 241 + }; 242 + 243 + enum NvmeIoCommands { 244 + NVME_CMD_FLUSH = 0x00, 245 + NVME_CMD_WRITE = 0x01, 246 + NVME_CMD_READ = 0x02, 247 + NVME_CMD_WRITE_UNCOR = 0x04, 248 + NVME_CMD_COMPARE = 0x05, 249 + NVME_CMD_WRITE_ZEROS = 0x08, 250 + NVME_CMD_DSM = 0x09, 251 + }; 252 + 253 + typedef struct NvmeDeleteQ { 254 + uint8_t opcode; 255 + uint8_t flags; 256 + uint16_t cid; 257 + uint32_t rsvd1[9]; 258 + uint16_t qid; 259 + uint16_t rsvd10; 260 + uint32_t rsvd11[5]; 261 + } NvmeDeleteQ; 262 + 263 + typedef struct NvmeCreateCq { 264 + uint8_t opcode; 265 + uint8_t flags; 266 + uint16_t cid; 267 + uint32_t rsvd1[5]; 268 + uint64_t prp1; 269 + uint64_t rsvd8; 270 + uint16_t cqid; 271 + uint16_t qsize; 272 + uint16_t cq_flags; 273 + uint16_t irq_vector; 274 + uint32_t rsvd12[4]; 275 + } NvmeCreateCq; 276 + 277 + #define NVME_CQ_FLAGS_PC(cq_flags) (cq_flags & 0x1) 278 + #define NVME_CQ_FLAGS_IEN(cq_flags) ((cq_flags >> 1) & 0x1) 279 + 280 + typedef struct NvmeCreateSq { 281 + uint8_t opcode; 282 + uint8_t flags; 283 + uint16_t cid; 284 + uint32_t rsvd1[5]; 285 + uint64_t prp1; 286 + uint64_t rsvd8; 287 + uint16_t sqid; 288 + uint16_t qsize; 289 + uint16_t sq_flags; 290 + uint16_t cqid; 291 + uint32_t rsvd12[4]; 292 + } NvmeCreateSq; 293 + 294 + #define NVME_SQ_FLAGS_PC(sq_flags) (sq_flags & 0x1) 295 + #define NVME_SQ_FLAGS_QPRIO(sq_flags) ((sq_flags >> 1) & 0x3) 296 + 297 + enum NvmeQueueFlags { 298 + NVME_Q_PC = 1, 299 + NVME_Q_PRIO_URGENT = 0, 300 + NVME_Q_PRIO_HIGH = 1, 301 + NVME_Q_PRIO_NORMAL = 2, 302 + NVME_Q_PRIO_LOW = 3, 303 + }; 304 + 305 + typedef struct NvmeIdentify { 306 + uint8_t opcode; 307 + uint8_t flags; 308 + uint16_t cid; 309 + uint32_t nsid; 310 + uint64_t rsvd2[2]; 311 + uint64_t prp1; 312 + uint64_t prp2; 313 + uint32_t cns; 314 + uint32_t rsvd11[5]; 315 + } NvmeIdentify; 316 + 317 + typedef struct NvmeRwCmd { 318 + uint8_t opcode; 319 + uint8_t flags; 320 + uint16_t cid; 321 + uint32_t nsid; 322 + uint64_t rsvd2; 323 + uint64_t mptr; 324 + uint64_t prp1; 325 + uint64_t prp2; 326 + uint64_t slba; 327 + uint16_t nlb; 328 + uint16_t control; 329 + uint32_t dsmgmt; 330 + uint32_t reftag; 331 + uint16_t apptag; 332 + uint16_t appmask; 333 + } NvmeRwCmd; 334 + 335 + enum { 336 + NVME_RW_LR = 1 << 15, 337 + NVME_RW_FUA = 1 << 14, 338 + NVME_RW_DSM_FREQ_UNSPEC = 0, 339 + NVME_RW_DSM_FREQ_TYPICAL = 1, 340 + NVME_RW_DSM_FREQ_RARE = 2, 341 + NVME_RW_DSM_FREQ_READS = 3, 342 + NVME_RW_DSM_FREQ_WRITES = 4, 343 + NVME_RW_DSM_FREQ_RW = 5, 344 + NVME_RW_DSM_FREQ_ONCE = 6, 345 + NVME_RW_DSM_FREQ_PREFETCH = 7, 346 + NVME_RW_DSM_FREQ_TEMP = 8, 347 + NVME_RW_DSM_LATENCY_NONE = 0 << 4, 348 + NVME_RW_DSM_LATENCY_IDLE = 1 << 4, 349 + NVME_RW_DSM_LATENCY_NORM = 2 << 4, 350 + NVME_RW_DSM_LATENCY_LOW = 3 << 4, 351 + NVME_RW_DSM_SEQ_REQ = 1 << 6, 352 + NVME_RW_DSM_COMPRESSED = 1 << 7, 353 + NVME_RW_PRINFO_PRACT = 1 << 13, 354 + NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, 355 + NVME_RW_PRINFO_PRCHK_APP = 1 << 11, 356 + NVME_RW_PRINFO_PRCHK_REF = 1 << 10, 357 + }; 358 + 359 + typedef struct NvmeDsmCmd { 360 + uint8_t opcode; 361 + uint8_t flags; 362 + uint16_t cid; 363 + uint32_t nsid; 364 + uint64_t rsvd2[2]; 365 + uint64_t prp1; 366 + uint64_t prp2; 367 + uint32_t nr; 368 + uint32_t attributes; 369 + uint32_t rsvd12[4]; 370 + } NvmeDsmCmd; 371 + 372 + enum { 373 + NVME_DSMGMT_IDR = 1 << 0, 374 + NVME_DSMGMT_IDW = 1 << 1, 375 + NVME_DSMGMT_AD = 1 << 2, 376 + }; 377 + 378 + typedef struct NvmeDsmRange { 379 + uint32_t cattr; 380 + uint32_t nlb; 381 + uint64_t slba; 382 + } NvmeDsmRange; 383 + 384 + enum NvmeAsyncEventRequest { 385 + NVME_AER_TYPE_ERROR = 0, 386 + NVME_AER_TYPE_SMART = 1, 387 + NVME_AER_TYPE_IO_SPECIFIC = 6, 388 + NVME_AER_TYPE_VENDOR_SPECIFIC = 7, 389 + NVME_AER_INFO_ERR_INVALID_SQ = 0, 390 + NVME_AER_INFO_ERR_INVALID_DB = 1, 391 + NVME_AER_INFO_ERR_DIAG_FAIL = 2, 392 + NVME_AER_INFO_ERR_PERS_INTERNAL_ERR = 3, 393 + NVME_AER_INFO_ERR_TRANS_INTERNAL_ERR = 4, 394 + NVME_AER_INFO_ERR_FW_IMG_LOAD_ERR = 5, 395 + NVME_AER_INFO_SMART_RELIABILITY = 0, 396 + NVME_AER_INFO_SMART_TEMP_THRESH = 1, 397 + NVME_AER_INFO_SMART_SPARE_THRESH = 2, 398 + }; 399 + 400 + typedef struct NvmeAerResult { 401 + uint8_t event_type; 402 + uint8_t event_info; 403 + uint8_t log_page; 404 + uint8_t resv; 405 + } NvmeAerResult; 406 + 407 + typedef struct NvmeCqe { 408 + uint32_t result; 409 + uint32_t rsvd; 410 + uint16_t sq_head; 411 + uint16_t sq_id; 412 + uint16_t cid; 413 + uint16_t status; 414 + } NvmeCqe; 415 + 416 + enum NvmeStatusCodes { 417 + NVME_SUCCESS = 0x0000, 418 + NVME_INVALID_OPCODE = 0x0001, 419 + NVME_INVALID_FIELD = 0x0002, 420 + NVME_CID_CONFLICT = 0x0003, 421 + NVME_DATA_TRAS_ERROR = 0x0004, 422 + NVME_POWER_LOSS_ABORT = 0x0005, 423 + NVME_INTERNAL_DEV_ERROR = 0x0006, 424 + NVME_CMD_ABORT_REQ = 0x0007, 425 + NVME_CMD_ABORT_SQ_DEL = 0x0008, 426 + NVME_CMD_ABORT_FAILED_FUSE = 0x0009, 427 + NVME_CMD_ABORT_MISSING_FUSE = 0x000a, 428 + NVME_INVALID_NSID = 0x000b, 429 + NVME_CMD_SEQ_ERROR = 0x000c, 430 + NVME_LBA_RANGE = 0x0080, 431 + NVME_CAP_EXCEEDED = 0x0081, 432 + NVME_NS_NOT_READY = 0x0082, 433 + NVME_NS_RESV_CONFLICT = 0x0083, 434 + NVME_INVALID_CQID = 0x0100, 435 + NVME_INVALID_QID = 0x0101, 436 + NVME_MAX_QSIZE_EXCEEDED = 0x0102, 437 + NVME_ACL_EXCEEDED = 0x0103, 438 + NVME_RESERVED = 0x0104, 439 + NVME_AER_LIMIT_EXCEEDED = 0x0105, 440 + NVME_INVALID_FW_SLOT = 0x0106, 441 + NVME_INVALID_FW_IMAGE = 0x0107, 442 + NVME_INVALID_IRQ_VECTOR = 0x0108, 443 + NVME_INVALID_LOG_ID = 0x0109, 444 + NVME_INVALID_FORMAT = 0x010a, 445 + NVME_FW_REQ_RESET = 0x010b, 446 + NVME_INVALID_QUEUE_DEL = 0x010c, 447 + NVME_FID_NOT_SAVEABLE = 0x010d, 448 + NVME_FID_NOT_NSID_SPEC = 0x010f, 449 + NVME_FW_REQ_SUSYSTEM_RESET = 0x0110, 450 + NVME_CONFLICTING_ATTRS = 0x0180, 451 + NVME_INVALID_PROT_INFO = 0x0181, 452 + NVME_WRITE_TO_RO = 0x0182, 453 + NVME_WRITE_FAULT = 0x0280, 454 + NVME_UNRECOVERED_READ = 0x0281, 455 + NVME_E2E_GUARD_ERROR = 0x0282, 456 + NVME_E2E_APP_ERROR = 0x0283, 457 + NVME_E2E_REF_ERROR = 0x0284, 458 + NVME_CMP_FAILURE = 0x0285, 459 + NVME_ACCESS_DENIED = 0x0286, 460 + NVME_MORE = 0x2000, 461 + NVME_DNR = 0x4000, 462 + NVME_NO_COMPLETE = 0xffff, 463 + }; 464 + 465 + typedef struct NvmeFwSlotInfoLog { 466 + uint8_t afi; 467 + uint8_t reserved1[7]; 468 + uint8_t frs1[8]; 469 + uint8_t frs2[8]; 470 + uint8_t frs3[8]; 471 + uint8_t frs4[8]; 472 + uint8_t frs5[8]; 473 + uint8_t frs6[8]; 474 + uint8_t frs7[8]; 475 + uint8_t reserved2[448]; 476 + } NvmeFwSlotInfoLog; 477 + 478 + typedef struct NvmeErrorLog { 479 + uint64_t error_count; 480 + uint16_t sqid; 481 + uint16_t cid; 482 + uint16_t status_field; 483 + uint16_t param_error_location; 484 + uint64_t lba; 485 + uint32_t nsid; 486 + uint8_t vs; 487 + uint8_t resv[35]; 488 + } NvmeErrorLog; 489 + 490 + typedef struct NvmeSmartLog { 491 + uint8_t critical_warning; 492 + uint8_t temperature[2]; 493 + uint8_t available_spare; 494 + uint8_t available_spare_threshold; 495 + uint8_t percentage_used; 496 + uint8_t reserved1[26]; 497 + uint64_t data_units_read[2]; 498 + uint64_t data_units_written[2]; 499 + uint64_t host_read_commands[2]; 500 + uint64_t host_write_commands[2]; 501 + uint64_t controller_busy_time[2]; 502 + uint64_t power_cycles[2]; 503 + uint64_t power_on_hours[2]; 504 + uint64_t unsafe_shutdowns[2]; 505 + uint64_t media_errors[2]; 506 + uint64_t number_of_error_log_entries[2]; 507 + uint8_t reserved2[320]; 508 + } NvmeSmartLog; 509 + 510 + enum NvmeSmartWarn { 511 + NVME_SMART_SPARE = 1 << 0, 512 + NVME_SMART_TEMPERATURE = 1 << 1, 513 + NVME_SMART_RELIABILITY = 1 << 2, 514 + NVME_SMART_MEDIA_READ_ONLY = 1 << 3, 515 + NVME_SMART_FAILED_VOLATILE_MEDIA = 1 << 4, 516 + }; 517 + 518 + enum LogIdentifier { 519 + NVME_LOG_ERROR_INFO = 0x01, 520 + NVME_LOG_SMART_INFO = 0x02, 521 + NVME_LOG_FW_SLOT_INFO = 0x03, 522 + }; 523 + 524 + typedef struct NvmePSD { 525 + uint16_t mp; 526 + uint16_t reserved; 527 + uint32_t enlat; 528 + uint32_t exlat; 529 + uint8_t rrt; 530 + uint8_t rrl; 531 + uint8_t rwt; 532 + uint8_t rwl; 533 + uint8_t resv[16]; 534 + } NvmePSD; 535 + 536 + typedef struct NvmeIdCtrl { 537 + uint16_t vid; 538 + uint16_t ssvid; 539 + uint8_t sn[20]; 540 + uint8_t mn[40]; 541 + uint8_t fr[8]; 542 + uint8_t rab; 543 + uint8_t ieee[3]; 544 + uint8_t cmic; 545 + uint8_t mdts; 546 + uint8_t rsvd255[178]; 547 + uint16_t oacs; 548 + uint8_t acl; 549 + uint8_t aerl; 550 + uint8_t frmw; 551 + uint8_t lpa; 552 + uint8_t elpe; 553 + uint8_t npss; 554 + uint8_t rsvd511[248]; 555 + uint8_t sqes; 556 + uint8_t cqes; 557 + uint16_t rsvd515; 558 + uint32_t nn; 559 + uint16_t oncs; 560 + uint16_t fuses; 561 + uint8_t fna; 562 + uint8_t vwc; 563 + uint16_t awun; 564 + uint16_t awupf; 565 + uint8_t rsvd703[174]; 566 + uint8_t rsvd2047[1344]; 567 + NvmePSD psd[32]; 568 + uint8_t vs[1024]; 569 + } NvmeIdCtrl; 570 + 571 + enum NvmeIdCtrlOacs { 572 + NVME_OACS_SECURITY = 1 << 0, 573 + NVME_OACS_FORMAT = 1 << 1, 574 + NVME_OACS_FW = 1 << 2, 575 + }; 576 + 577 + enum NvmeIdCtrlOncs { 578 + NVME_ONCS_COMPARE = 1 << 0, 579 + NVME_ONCS_WRITE_UNCORR = 1 << 1, 580 + NVME_ONCS_DSM = 1 << 2, 581 + NVME_ONCS_WRITE_ZEROS = 1 << 3, 582 + NVME_ONCS_FEATURES = 1 << 4, 583 + NVME_ONCS_RESRVATIONS = 1 << 5, 584 + }; 585 + 586 + #define NVME_CTRL_SQES_MIN(sqes) ((sqes) & 0xf) 587 + #define NVME_CTRL_SQES_MAX(sqes) (((sqes) >> 4) & 0xf) 588 + #define NVME_CTRL_CQES_MIN(cqes) ((cqes) & 0xf) 589 + #define NVME_CTRL_CQES_MAX(cqes) (((cqes) >> 4) & 0xf) 590 + 591 + typedef struct NvmeFeatureVal { 592 + uint32_t arbitration; 593 + uint32_t power_mgmt; 594 + uint32_t temp_thresh; 595 + uint32_t err_rec; 596 + uint32_t volatile_wc; 597 + uint32_t num_queues; 598 + uint32_t int_coalescing; 599 + uint32_t *int_vector_config; 600 + uint32_t write_atomicity; 601 + uint32_t async_config; 602 + uint32_t sw_prog_marker; 603 + } NvmeFeatureVal; 604 + 605 + #define NVME_ARB_AB(arb) (arb & 0x7) 606 + #define NVME_ARB_LPW(arb) ((arb >> 8) & 0xff) 607 + #define NVME_ARB_MPW(arb) ((arb >> 16) & 0xff) 608 + #define NVME_ARB_HPW(arb) ((arb >> 24) & 0xff) 609 + 610 + #define NVME_INTC_THR(intc) (intc & 0xff) 611 + #define NVME_INTC_TIME(intc) ((intc >> 8) & 0xff) 612 + 613 + enum NvmeFeatureIds { 614 + NVME_ARBITRATION = 0x1, 615 + NVME_POWER_MANAGEMENT = 0x2, 616 + NVME_LBA_RANGE_TYPE = 0x3, 617 + NVME_TEMPERATURE_THRESHOLD = 0x4, 618 + NVME_ERROR_RECOVERY = 0x5, 619 + NVME_VOLATILE_WRITE_CACHE = 0x6, 620 + NVME_NUMBER_OF_QUEUES = 0x7, 621 + NVME_INTERRUPT_COALESCING = 0x8, 622 + NVME_INTERRUPT_VECTOR_CONF = 0x9, 623 + NVME_WRITE_ATOMICITY = 0xa, 624 + NVME_ASYNCHRONOUS_EVENT_CONF = 0xb, 625 + NVME_SOFTWARE_PROGRESS_MARKER = 0x80 626 + }; 627 + 628 + typedef struct NvmeRangeType { 629 + uint8_t type; 630 + uint8_t attributes; 631 + uint8_t rsvd2[14]; 632 + uint64_t slba; 633 + uint64_t nlb; 634 + uint8_t guid[16]; 635 + uint8_t rsvd48[16]; 636 + } NvmeRangeType; 637 + 638 + typedef struct NvmeLBAF { 639 + uint16_t ms; 640 + uint8_t ds; 641 + uint8_t rp; 642 + } NvmeLBAF; 643 + 644 + typedef struct NvmeIdNs { 645 + uint64_t nsze; 646 + uint64_t ncap; 647 + uint64_t nuse; 648 + uint8_t nsfeat; 649 + uint8_t nlbaf; 650 + uint8_t flbas; 651 + uint8_t mc; 652 + uint8_t dpc; 653 + uint8_t dps; 654 + uint8_t res30[98]; 655 + NvmeLBAF lbaf[16]; 656 + uint8_t res192[192]; 657 + uint8_t vs[3712]; 658 + } NvmeIdNs; 659 + 660 + #define NVME_ID_NS_NSFEAT_THIN(nsfeat) ((nsfeat & 0x1)) 661 + #define NVME_ID_NS_FLBAS_EXTENDED(flbas) ((flbas >> 4) & 0x1) 662 + #define NVME_ID_NS_FLBAS_INDEX(flbas) ((flbas & 0xf)) 663 + #define NVME_ID_NS_MC_SEPARATE(mc) ((mc >> 1) & 0x1) 664 + #define NVME_ID_NS_MC_EXTENDED(mc) ((mc & 0x1)) 665 + #define NVME_ID_NS_DPC_LAST_EIGHT(dpc) ((dpc >> 4) & 0x1) 666 + #define NVME_ID_NS_DPC_FIRST_EIGHT(dpc) ((dpc >> 3) & 0x1) 667 + #define NVME_ID_NS_DPC_TYPE_3(dpc) ((dpc >> 2) & 0x1) 668 + #define NVME_ID_NS_DPC_TYPE_2(dpc) ((dpc >> 1) & 0x1) 669 + #define NVME_ID_NS_DPC_TYPE_1(dpc) ((dpc & 0x1)) 670 + #define NVME_ID_NS_DPC_TYPE_MASK 0x7 671 + 672 + enum NvmeIdNsDps { 673 + DPS_TYPE_NONE = 0, 674 + DPS_TYPE_1 = 1, 675 + DPS_TYPE_2 = 2, 676 + DPS_TYPE_3 = 3, 677 + DPS_TYPE_MASK = 0x7, 678 + DPS_FIRST_EIGHT = 8, 679 + }; 680 + 681 + static inline void _nvme_check_size(void) 682 + { 683 + QEMU_BUILD_BUG_ON(sizeof(NvmeAerResult) != 4); 684 + QEMU_BUILD_BUG_ON(sizeof(NvmeCqe) != 16); 685 + QEMU_BUILD_BUG_ON(sizeof(NvmeDsmRange) != 16); 686 + QEMU_BUILD_BUG_ON(sizeof(NvmeCmd) != 64); 687 + QEMU_BUILD_BUG_ON(sizeof(NvmeDeleteQ) != 64); 688 + QEMU_BUILD_BUG_ON(sizeof(NvmeCreateCq) != 64); 689 + QEMU_BUILD_BUG_ON(sizeof(NvmeCreateSq) != 64); 690 + QEMU_BUILD_BUG_ON(sizeof(NvmeIdentify) != 64); 691 + QEMU_BUILD_BUG_ON(sizeof(NvmeRwCmd) != 64); 692 + QEMU_BUILD_BUG_ON(sizeof(NvmeDsmCmd) != 64); 693 + QEMU_BUILD_BUG_ON(sizeof(NvmeRangeType) != 64); 694 + QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog) != 64); 695 + QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog) != 512); 696 + QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLog) != 512); 697 + QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl) != 4096); 698 + QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096); 699 + } 700 + #endif