qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

block: Increase BB.in_flight for coroutine and sync interfaces

External callers of blk_co_*() and of the synchronous blk_*() functions
don't currently increase the BlockBackend.in_flight counter, but calls
from blk_aio_*() do, so there is an inconsistency whether the counter
has been increased or not.

This patch moves the actual operations to static functions that can
later know they will always be called with in_flight increased exactly
once, even for external callers using the blk_co_*() coroutine
interfaces.

If the public blk_co_*() interface is unused, remove it.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20200407121259.21350-3-kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>

+80 -24
+80 -23
block/block-backend.c
··· 1147 1147 } 1148 1148 } 1149 1149 1150 - int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, 1151 - unsigned int bytes, QEMUIOVector *qiov, 1152 - BdrvRequestFlags flags) 1150 + /* To be called between exactly one pair of blk_inc/dec_in_flight() */ 1151 + static int coroutine_fn 1152 + blk_do_preadv(BlockBackend *blk, int64_t offset, unsigned int bytes, 1153 + QEMUIOVector *qiov, BdrvRequestFlags flags) 1153 1154 { 1154 1155 int ret; 1155 1156 BlockDriverState *bs; ··· 1178 1179 return ret; 1179 1180 } 1180 1181 1181 - int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, 1182 - unsigned int bytes, 1183 - QEMUIOVector *qiov, size_t qiov_offset, 1184 - BdrvRequestFlags flags) 1182 + int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, 1183 + unsigned int bytes, QEMUIOVector *qiov, 1184 + BdrvRequestFlags flags) 1185 + { 1186 + int ret; 1187 + 1188 + blk_inc_in_flight(blk); 1189 + ret = blk_do_preadv(blk, offset, bytes, qiov, flags); 1190 + blk_dec_in_flight(blk); 1191 + 1192 + return ret; 1193 + } 1194 + 1195 + /* To be called between exactly one pair of blk_inc/dec_in_flight() */ 1196 + static int coroutine_fn 1197 + blk_do_pwritev_part(BlockBackend *blk, int64_t offset, unsigned int bytes, 1198 + QEMUIOVector *qiov, size_t qiov_offset, 1199 + BdrvRequestFlags flags) 1185 1200 { 1186 1201 int ret; 1187 1202 BlockDriverState *bs; ··· 1214 1229 return ret; 1215 1230 } 1216 1231 1232 + int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, 1233 + unsigned int bytes, 1234 + QEMUIOVector *qiov, size_t qiov_offset, 1235 + BdrvRequestFlags flags) 1236 + { 1237 + int ret; 1238 + 1239 + blk_inc_in_flight(blk); 1240 + ret = blk_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags); 1241 + blk_dec_in_flight(blk); 1242 + 1243 + return ret; 1244 + } 1245 + 1217 1246 int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, 1218 1247 unsigned int bytes, QEMUIOVector *qiov, 1219 1248 BdrvRequestFlags flags) ··· 1234 1263 BlkRwCo *rwco = opaque; 1235 1264 QEMUIOVector *qiov = rwco->iobuf; 1236 1265 1237 - rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, qiov->size, 1266 + rwco->ret = blk_do_preadv(rwco->blk, rwco->offset, qiov->size, 1238 1267 qiov, rwco->flags); 1239 1268 aio_wait_kick(); 1240 1269 } ··· 1244 1273 BlkRwCo *rwco = opaque; 1245 1274 QEMUIOVector *qiov = rwco->iobuf; 1246 1275 1247 - rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, qiov->size, 1248 - qiov, rwco->flags); 1276 + rwco->ret = blk_do_pwritev_part(rwco->blk, rwco->offset, qiov->size, 1277 + qiov, 0, rwco->flags); 1249 1278 aio_wait_kick(); 1250 1279 } 1251 1280 ··· 1262 1291 .ret = NOT_DONE, 1263 1292 }; 1264 1293 1294 + blk_inc_in_flight(blk); 1265 1295 if (qemu_in_coroutine()) { 1266 1296 /* Fast-path if already in coroutine context */ 1267 1297 co_entry(&rwco); ··· 1270 1300 bdrv_coroutine_enter(blk_bs(blk), co); 1271 1301 BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE); 1272 1302 } 1303 + blk_dec_in_flight(blk); 1273 1304 1274 1305 return rwco.ret; 1275 1306 } ··· 1394 1425 } 1395 1426 1396 1427 assert(qiov->size == acb->bytes); 1397 - rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes, 1428 + rwco->ret = blk_do_preadv(rwco->blk, rwco->offset, acb->bytes, 1398 1429 qiov, rwco->flags); 1399 1430 blk_aio_complete(acb); 1400 1431 } ··· 1412 1443 } 1413 1444 1414 1445 assert(!qiov || qiov->size == acb->bytes); 1415 - rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes, 1416 - qiov, rwco->flags); 1446 + rwco->ret = blk_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes, 1447 + qiov, 0, rwco->flags); 1417 1448 blk_aio_complete(acb); 1418 1449 } 1419 1450 ··· 1498 1529 bdrv_aio_cancel_async(acb); 1499 1530 } 1500 1531 1501 - int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf) 1532 + /* To be called between exactly one pair of blk_inc/dec_in_flight() */ 1533 + static int coroutine_fn 1534 + blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf) 1502 1535 { 1503 1536 blk_wait_while_drained(blk); 1504 1537 ··· 1514 1547 BlkRwCo *rwco = opaque; 1515 1548 QEMUIOVector *qiov = rwco->iobuf; 1516 1549 1517 - rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, 1518 - qiov->iov[0].iov_base); 1550 + rwco->ret = blk_do_ioctl(rwco->blk, rwco->offset, qiov->iov[0].iov_base); 1519 1551 aio_wait_kick(); 1520 1552 } 1521 1553 ··· 1529 1561 BlkAioEmAIOCB *acb = opaque; 1530 1562 BlkRwCo *rwco = &acb->rwco; 1531 1563 1532 - rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, rwco->iobuf); 1564 + rwco->ret = blk_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf); 1533 1565 1534 1566 blk_aio_complete(acb); 1535 1567 } ··· 1540 1572 return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque); 1541 1573 } 1542 1574 1543 - int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes) 1575 + /* To be called between exactly one pair of blk_inc/dec_in_flight() */ 1576 + static int coroutine_fn 1577 + blk_do_pdiscard(BlockBackend *blk, int64_t offset, int bytes) 1544 1578 { 1545 1579 int ret; 1546 1580 ··· 1559 1593 BlkAioEmAIOCB *acb = opaque; 1560 1594 BlkRwCo *rwco = &acb->rwco; 1561 1595 1562 - rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes); 1596 + rwco->ret = blk_do_pdiscard(rwco->blk, rwco->offset, acb->bytes); 1563 1597 blk_aio_complete(acb); 1564 1598 } 1565 1599 ··· 1571 1605 cb, opaque); 1572 1606 } 1573 1607 1608 + int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes) 1609 + { 1610 + int ret; 1611 + 1612 + blk_inc_in_flight(blk); 1613 + ret = blk_do_pdiscard(blk, offset, bytes); 1614 + blk_dec_in_flight(blk); 1615 + 1616 + return ret; 1617 + } 1618 + 1574 1619 static void blk_pdiscard_entry(void *opaque) 1575 1620 { 1576 1621 BlkRwCo *rwco = opaque; 1577 1622 QEMUIOVector *qiov = rwco->iobuf; 1578 1623 1579 - rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, qiov->size); 1624 + rwco->ret = blk_do_pdiscard(rwco->blk, rwco->offset, qiov->size); 1580 1625 aio_wait_kick(); 1581 1626 } 1582 1627 ··· 1585 1630 return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0); 1586 1631 } 1587 1632 1588 - int blk_co_flush(BlockBackend *blk) 1633 + /* To be called between exactly one pair of blk_inc/dec_in_flight() */ 1634 + static int coroutine_fn blk_do_flush(BlockBackend *blk) 1589 1635 { 1590 1636 blk_wait_while_drained(blk); 1591 1637 ··· 1601 1647 BlkAioEmAIOCB *acb = opaque; 1602 1648 BlkRwCo *rwco = &acb->rwco; 1603 1649 1604 - rwco->ret = blk_co_flush(rwco->blk); 1650 + rwco->ret = blk_do_flush(rwco->blk); 1605 1651 blk_aio_complete(acb); 1606 1652 } 1607 1653 ··· 1611 1657 return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque); 1612 1658 } 1613 1659 1660 + int coroutine_fn blk_co_flush(BlockBackend *blk) 1661 + { 1662 + int ret; 1663 + 1664 + blk_inc_in_flight(blk); 1665 + ret = blk_do_flush(blk); 1666 + blk_dec_in_flight(blk); 1667 + 1668 + return ret; 1669 + } 1670 + 1614 1671 static void blk_flush_entry(void *opaque) 1615 1672 { 1616 1673 BlkRwCo *rwco = opaque; 1617 - rwco->ret = blk_co_flush(rwco->blk); 1674 + rwco->ret = blk_do_flush(rwco->blk); 1618 1675 aio_wait_kick(); 1619 1676 } 1620 1677
-1
include/sysemu/block-backend.h
··· 171 171 BlockCompletionFunc *cb, void *opaque); 172 172 void blk_aio_cancel(BlockAIOCB *acb); 173 173 void blk_aio_cancel_async(BlockAIOCB *acb); 174 - int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf); 175 174 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf); 176 175 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, 177 176 BlockCompletionFunc *cb, void *opaque);