qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

nbd: Minimal structured read for client

Minimal implementation: for structured error only error_report error
message.

Note that test 83 is now more verbose, because the implementation
prints more warnings about unexpected communication errors; perhaps
future patches should tone things down by using trace messages
instead of traces, but the common case of successful communication
is no noisier than before.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20171027104037.8319-13-eblake@redhat.com>

authored by

Vladimir Sementsov-Ogievskiy and committed by
Eric Blake
f140e300 56dc682b

+498 -33
+458 -32
block/nbd-client.c
··· 93 93 if (i >= MAX_NBD_REQUESTS || 94 94 !s->requests[i].coroutine || 95 95 !s->requests[i].receiving || 96 - nbd_reply_is_structured(&s->reply)) 96 + (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply)) 97 97 { 98 98 break; 99 99 } ··· 141 141 assert(i < MAX_NBD_REQUESTS); 142 142 143 143 s->requests[i].coroutine = qemu_coroutine_self(); 144 + s->requests[i].offset = request->from; 144 145 s->requests[i].receiving = false; 145 146 146 147 request->handle = INDEX_TO_HANDLE(s, i); ··· 181 182 return rc; 182 183 } 183 184 184 - static int nbd_co_receive_reply(NBDClientSession *s, 185 - uint64_t handle, 186 - QEMUIOVector *qiov) 185 + static inline uint16_t payload_advance16(uint8_t **payload) 186 + { 187 + *payload += 2; 188 + return lduw_be_p(*payload - 2); 189 + } 190 + 191 + static inline uint32_t payload_advance32(uint8_t **payload) 192 + { 193 + *payload += 4; 194 + return ldl_be_p(*payload - 4); 195 + } 196 + 197 + static inline uint64_t payload_advance64(uint8_t **payload) 198 + { 199 + *payload += 8; 200 + return ldq_be_p(*payload - 8); 201 + } 202 + 203 + static int nbd_parse_offset_hole_payload(NBDStructuredReplyChunk *chunk, 204 + uint8_t *payload, uint64_t orig_offset, 205 + QEMUIOVector *qiov, Error **errp) 206 + { 207 + uint64_t offset; 208 + uint32_t hole_size; 209 + 210 + if (chunk->length != sizeof(offset) + sizeof(hole_size)) { 211 + error_setg(errp, "Protocol error: invalid payload for " 212 + "NBD_REPLY_TYPE_OFFSET_HOLE"); 213 + return -EINVAL; 214 + } 215 + 216 + offset = payload_advance64(&payload); 217 + hole_size = payload_advance32(&payload); 218 + 219 + if (offset < orig_offset || hole_size > qiov->size || 220 + offset > orig_offset + qiov->size - hole_size) { 221 + error_setg(errp, "Protocol error: server sent chunk exceeding requested" 222 + " region"); 223 + return -EINVAL; 224 + } 225 + 226 + qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size); 227 + 228 + return 0; 229 + } 230 + 231 + /* nbd_parse_error_payload 232 + * on success @errp contains message describing nbd error reply 233 + */ 234 + static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk, 235 + uint8_t *payload, int *request_ret, 236 + Error **errp) 237 + { 238 + uint32_t error; 239 + uint16_t message_size; 240 + 241 + assert(chunk->type & (1 << 15)); 242 + 243 + if (chunk->length < sizeof(error) + sizeof(message_size)) { 244 + error_setg(errp, 245 + "Protocol error: invalid payload for structured error"); 246 + return -EINVAL; 247 + } 248 + 249 + error = nbd_errno_to_system_errno(payload_advance32(&payload)); 250 + if (error == 0) { 251 + error_setg(errp, "Protocol error: server sent structured error chunk" 252 + "with error = 0"); 253 + return -EINVAL; 254 + } 255 + 256 + *request_ret = -error; 257 + message_size = payload_advance16(&payload); 258 + 259 + if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) { 260 + error_setg(errp, "Protocol error: server sent structured error chunk" 261 + "with incorrect message size"); 262 + return -EINVAL; 263 + } 264 + 265 + /* TODO: Add a trace point to mention the server complaint */ 266 + 267 + /* TODO handle ERROR_OFFSET */ 268 + 269 + return 0; 270 + } 271 + 272 + static int nbd_co_receive_offset_data_payload(NBDClientSession *s, 273 + uint64_t orig_offset, 274 + QEMUIOVector *qiov, Error **errp) 275 + { 276 + QEMUIOVector sub_qiov; 277 + uint64_t offset; 278 + size_t data_size; 279 + int ret; 280 + NBDStructuredReplyChunk *chunk = &s->reply.structured; 281 + 282 + assert(nbd_reply_is_structured(&s->reply)); 283 + 284 + if (chunk->length < sizeof(offset)) { 285 + error_setg(errp, "Protocol error: invalid payload for " 286 + "NBD_REPLY_TYPE_OFFSET_DATA"); 287 + return -EINVAL; 288 + } 289 + 290 + if (nbd_read(s->ioc, &offset, sizeof(offset), errp) < 0) { 291 + return -EIO; 292 + } 293 + be64_to_cpus(&offset); 294 + 295 + data_size = chunk->length - sizeof(offset); 296 + if (offset < orig_offset || data_size > qiov->size || 297 + offset > orig_offset + qiov->size - data_size) { 298 + error_setg(errp, "Protocol error: server sent chunk exceeding requested" 299 + " region"); 300 + return -EINVAL; 301 + } 302 + 303 + qemu_iovec_init(&sub_qiov, qiov->niov); 304 + qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size); 305 + ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp); 306 + qemu_iovec_destroy(&sub_qiov); 307 + 308 + return ret < 0 ? -EIO : 0; 309 + } 310 + 311 + #define NBD_MAX_MALLOC_PAYLOAD 1000 312 + /* nbd_co_receive_structured_payload 313 + */ 314 + static coroutine_fn int nbd_co_receive_structured_payload( 315 + NBDClientSession *s, void **payload, Error **errp) 316 + { 317 + int ret; 318 + uint32_t len; 319 + 320 + assert(nbd_reply_is_structured(&s->reply)); 321 + 322 + len = s->reply.structured.length; 323 + 324 + if (len == 0) { 325 + return 0; 326 + } 327 + 328 + if (payload == NULL) { 329 + error_setg(errp, "Unexpected structured payload"); 330 + return -EINVAL; 331 + } 332 + 333 + if (len > NBD_MAX_MALLOC_PAYLOAD) { 334 + error_setg(errp, "Payload too large"); 335 + return -EINVAL; 336 + } 337 + 338 + *payload = g_new(char, len); 339 + ret = nbd_read(s->ioc, *payload, len, errp); 340 + if (ret < 0) { 341 + g_free(*payload); 342 + *payload = NULL; 343 + return ret; 344 + } 345 + 346 + return 0; 347 + } 348 + 349 + /* nbd_co_do_receive_one_chunk 350 + * for simple reply: 351 + * set request_ret to received reply error 352 + * if qiov is not NULL: read payload to @qiov 353 + * for structured reply chunk: 354 + * if error chunk: read payload, set @request_ret, do not set @payload 355 + * else if offset_data chunk: read payload data to @qiov, do not set @payload 356 + * else: read payload to @payload 357 + * 358 + * If function fails, @errp contains corresponding error message, and the 359 + * connection with the server is suspect. If it returns 0, then the 360 + * transaction succeeded (although @request_ret may be a negative errno 361 + * corresponding to the server's error reply), and errp is unchanged. 362 + */ 363 + static coroutine_fn int nbd_co_do_receive_one_chunk( 364 + NBDClientSession *s, uint64_t handle, bool only_structured, 365 + int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp) 187 366 { 188 367 int ret; 189 368 int i = HANDLE_TO_INDEX(s, handle); 369 + void *local_payload = NULL; 370 + NBDStructuredReplyChunk *chunk; 371 + 372 + if (payload) { 373 + *payload = NULL; 374 + } 375 + *request_ret = 0; 190 376 191 377 /* Wait until we're woken up by nbd_read_reply_entry. */ 192 378 s->requests[i].receiving = true; 193 379 qemu_coroutine_yield(); 194 380 s->requests[i].receiving = false; 195 381 if (!s->ioc || s->quit) { 196 - ret = -EIO; 197 - } else { 198 - assert(s->reply.handle == handle); 199 - ret = -nbd_errno_to_system_errno(s->reply.simple.error); 200 - if (qiov && ret == 0) { 201 - if (qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov, 202 - NULL) < 0) { 203 - ret = -EIO; 204 - s->quit = true; 205 - } 382 + error_setg(errp, "Connection closed"); 383 + return -EIO; 384 + } 385 + 386 + assert(s->reply.handle == handle); 387 + 388 + if (nbd_reply_is_simple(&s->reply)) { 389 + if (only_structured) { 390 + error_setg(errp, "Protocol error: simple reply when structured " 391 + "reply chunk was expected"); 392 + return -EINVAL; 393 + } 394 + 395 + *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error); 396 + if (*request_ret < 0 || !qiov) { 397 + return 0; 206 398 } 207 399 208 - /* Tell the read handler to read another header. */ 209 - s->reply.handle = 0; 400 + return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov, 401 + errp) < 0 ? -EIO : 0; 210 402 } 211 403 212 - s->requests[i].coroutine = NULL; 404 + /* handle structured reply chunk */ 405 + assert(s->info.structured_reply); 406 + chunk = &s->reply.structured; 213 407 214 - /* Kick the read_reply_co to get the next reply. */ 408 + if (chunk->type == NBD_REPLY_TYPE_NONE) { 409 + if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) { 410 + error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without" 411 + "NBD_REPLY_FLAG_DONE flag set"); 412 + return -EINVAL; 413 + } 414 + return 0; 415 + } 416 + 417 + if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) { 418 + if (!qiov) { 419 + error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk"); 420 + return -EINVAL; 421 + } 422 + 423 + return nbd_co_receive_offset_data_payload(s, s->requests[i].offset, 424 + qiov, errp); 425 + } 426 + 427 + if (nbd_reply_type_is_error(chunk->type)) { 428 + payload = &local_payload; 429 + } 430 + 431 + ret = nbd_co_receive_structured_payload(s, payload, errp); 432 + if (ret < 0) { 433 + return ret; 434 + } 435 + 436 + if (nbd_reply_type_is_error(chunk->type)) { 437 + ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp); 438 + g_free(local_payload); 439 + return ret; 440 + } 441 + 442 + return 0; 443 + } 444 + 445 + /* nbd_co_receive_one_chunk 446 + * Read reply, wake up read_reply_co and set s->quit if needed. 447 + * Return value is a fatal error code or normal nbd reply error code 448 + */ 449 + static coroutine_fn int nbd_co_receive_one_chunk( 450 + NBDClientSession *s, uint64_t handle, bool only_structured, 451 + QEMUIOVector *qiov, NBDReply *reply, void **payload, Error **errp) 452 + { 453 + int request_ret; 454 + int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured, 455 + &request_ret, qiov, payload, errp); 456 + 457 + if (ret < 0) { 458 + s->quit = true; 459 + } else { 460 + /* For assert at loop start in nbd_read_reply_entry */ 461 + if (reply) { 462 + *reply = s->reply; 463 + } 464 + s->reply.handle = 0; 465 + ret = request_ret; 466 + } 467 + 215 468 if (s->read_reply_co) { 216 469 aio_co_wake(s->read_reply_co); 217 470 } 218 471 472 + return ret; 473 + } 474 + 475 + typedef struct NBDReplyChunkIter { 476 + int ret; 477 + Error *err; 478 + bool done, only_structured; 479 + } NBDReplyChunkIter; 480 + 481 + static void nbd_iter_error(NBDReplyChunkIter *iter, bool fatal, 482 + int ret, Error **local_err) 483 + { 484 + assert(ret < 0); 485 + 486 + if (fatal || iter->ret == 0) { 487 + if (iter->ret != 0) { 488 + error_free(iter->err); 489 + iter->err = NULL; 490 + } 491 + iter->ret = ret; 492 + error_propagate(&iter->err, *local_err); 493 + } else { 494 + error_free(*local_err); 495 + } 496 + 497 + *local_err = NULL; 498 + } 499 + 500 + /* NBD_FOREACH_REPLY_CHUNK 501 + */ 502 + #define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \ 503 + qiov, reply, payload) \ 504 + for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \ 505 + nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);) 506 + 507 + /* nbd_reply_chunk_iter_receive 508 + */ 509 + static bool nbd_reply_chunk_iter_receive(NBDClientSession *s, 510 + NBDReplyChunkIter *iter, 511 + uint64_t handle, 512 + QEMUIOVector *qiov, NBDReply *reply, 513 + void **payload) 514 + { 515 + int ret; 516 + NBDReply local_reply; 517 + NBDStructuredReplyChunk *chunk; 518 + Error *local_err = NULL; 519 + if (s->quit) { 520 + error_setg(&local_err, "Connection closed"); 521 + nbd_iter_error(iter, true, -EIO, &local_err); 522 + goto break_loop; 523 + } 524 + 525 + if (iter->done) { 526 + /* Previous iteration was last. */ 527 + goto break_loop; 528 + } 529 + 530 + if (reply == NULL) { 531 + reply = &local_reply; 532 + } 533 + 534 + ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured, 535 + qiov, reply, payload, &local_err); 536 + if (ret < 0) { 537 + /* If it is a fatal error s->quit is set by nbd_co_receive_one_chunk */ 538 + nbd_iter_error(iter, s->quit, ret, &local_err); 539 + } 540 + 541 + /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */ 542 + if (nbd_reply_is_simple(&s->reply) || s->quit) { 543 + goto break_loop; 544 + } 545 + 546 + chunk = &reply->structured; 547 + iter->only_structured = true; 548 + 549 + if (chunk->type == NBD_REPLY_TYPE_NONE) { 550 + /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */ 551 + assert(chunk->flags & NBD_REPLY_FLAG_DONE); 552 + goto break_loop; 553 + } 554 + 555 + if (chunk->flags & NBD_REPLY_FLAG_DONE) { 556 + /* This iteration is last. */ 557 + iter->done = true; 558 + } 559 + 560 + /* Execute the loop body */ 561 + return true; 562 + 563 + break_loop: 564 + s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL; 565 + 219 566 qemu_co_mutex_lock(&s->send_mutex); 220 567 s->in_flight--; 221 568 qemu_co_queue_next(&s->free_sema); 222 569 qemu_co_mutex_unlock(&s->send_mutex); 223 570 224 - return ret; 571 + return false; 572 + } 573 + 574 + static int nbd_co_receive_return_code(NBDClientSession *s, uint64_t handle, 575 + Error **errp) 576 + { 577 + NBDReplyChunkIter iter; 578 + 579 + NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) { 580 + /* nbd_reply_chunk_iter_receive does all the work */ 581 + } 582 + 583 + error_propagate(errp, iter.err); 584 + return iter.ret; 225 585 } 226 586 227 - static int nbd_co_request(BlockDriverState *bs, 228 - NBDRequest *request, 229 - QEMUIOVector *qiov) 587 + static int nbd_co_receive_cmdread_reply(NBDClientSession *s, uint64_t handle, 588 + uint64_t offset, QEMUIOVector *qiov, 589 + Error **errp) 230 590 { 231 - NBDClientSession *client = nbd_get_client_session(bs); 591 + NBDReplyChunkIter iter; 592 + NBDReply reply; 593 + void *payload = NULL; 594 + Error *local_err = NULL; 595 + 596 + NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply, 597 + qiov, &reply, &payload) 598 + { 599 + int ret; 600 + NBDStructuredReplyChunk *chunk = &reply.structured; 601 + 602 + assert(nbd_reply_is_structured(&reply)); 603 + 604 + switch (chunk->type) { 605 + case NBD_REPLY_TYPE_OFFSET_DATA: 606 + /* special cased in nbd_co_receive_one_chunk, data is already 607 + * in qiov */ 608 + break; 609 + case NBD_REPLY_TYPE_OFFSET_HOLE: 610 + ret = nbd_parse_offset_hole_payload(&reply.structured, payload, 611 + offset, qiov, &local_err); 612 + if (ret < 0) { 613 + s->quit = true; 614 + nbd_iter_error(&iter, true, ret, &local_err); 615 + } 616 + break; 617 + default: 618 + if (!nbd_reply_type_is_error(chunk->type)) { 619 + /* not allowed reply type */ 620 + s->quit = true; 621 + error_setg(&local_err, 622 + "Unexpected reply type: %d (%s) for CMD_READ", 623 + chunk->type, nbd_reply_type_lookup(chunk->type)); 624 + nbd_iter_error(&iter, true, -EINVAL, &local_err); 625 + } 626 + } 627 + 628 + g_free(payload); 629 + payload = NULL; 630 + } 631 + 632 + error_propagate(errp, iter.err); 633 + return iter.ret; 634 + } 635 + 636 + static int nbd_co_request(BlockDriverState *bs, NBDRequest *request, 637 + QEMUIOVector *write_qiov) 638 + { 232 639 int ret; 640 + Error *local_err = NULL; 641 + NBDClientSession *client = nbd_get_client_session(bs); 233 642 234 - if (qiov) { 235 - assert(request->type == NBD_CMD_WRITE || request->type == NBD_CMD_READ); 236 - assert(request->len == iov_size(qiov->iov, qiov->niov)); 643 + assert(request->type != NBD_CMD_READ); 644 + if (write_qiov) { 645 + assert(request->type == NBD_CMD_WRITE); 646 + assert(request->len == iov_size(write_qiov->iov, write_qiov->niov)); 237 647 } else { 238 - assert(request->type != NBD_CMD_WRITE && request->type != NBD_CMD_READ); 648 + assert(request->type != NBD_CMD_WRITE); 239 649 } 240 - ret = nbd_co_send_request(bs, request, 241 - request->type == NBD_CMD_WRITE ? qiov : NULL); 650 + ret = nbd_co_send_request(bs, request, write_qiov); 242 651 if (ret < 0) { 243 652 return ret; 244 653 } 245 654 246 - return nbd_co_receive_reply(client, request->handle, 247 - request->type == NBD_CMD_READ ? qiov : NULL); 655 + ret = nbd_co_receive_return_code(client, request->handle, &local_err); 656 + if (local_err) { 657 + error_report_err(local_err); 658 + } 659 + return ret; 248 660 } 249 661 250 662 int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset, 251 663 uint64_t bytes, QEMUIOVector *qiov, int flags) 252 664 { 665 + int ret; 666 + Error *local_err = NULL; 667 + NBDClientSession *client = nbd_get_client_session(bs); 253 668 NBDRequest request = { 254 669 .type = NBD_CMD_READ, 255 670 .from = offset, ··· 259 674 assert(bytes <= NBD_MAX_BUFFER_SIZE); 260 675 assert(!flags); 261 676 262 - return nbd_co_request(bs, &request, qiov); 677 + ret = nbd_co_send_request(bs, &request, NULL); 678 + if (ret < 0) { 679 + return ret; 680 + } 681 + 682 + ret = nbd_co_receive_cmdread_reply(client, request.handle, offset, qiov, 683 + &local_err); 684 + if (ret < 0) { 685 + error_report_err(local_err); 686 + } 687 + return ret; 263 688 } 264 689 265 690 int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset, ··· 381 806 qio_channel_set_blocking(QIO_CHANNEL(sioc), true, NULL); 382 807 383 808 client->info.request_sizes = true; 809 + client->info.structured_reply = true; 384 810 ret = nbd_receive_negotiate(QIO_CHANNEL(sioc), export, 385 811 tlscreds, hostname, 386 812 &client->ioc, &client->info, errp);
+1
block/nbd-client.h
··· 19 19 20 20 typedef struct { 21 21 Coroutine *coroutine; 22 + uint64_t offset; /* original offset of the request */ 22 23 bool receiving; /* waiting for read_reply_co? */ 23 24 } NBDClientRequest; 24 25
+12
include/block/nbd.h
··· 197 197 #define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1) 198 198 #define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2) 199 199 200 + static inline bool nbd_reply_type_is_error(int type) 201 + { 202 + return type & (1 << 15); 203 + } 204 + 200 205 /* NBD errors are based on errno numbers, so there is a 1:1 mapping, 201 206 * but only a limited set of errno values is specified in the protocol. 202 207 * Everything else is squashed to EINVAL. ··· 214 219 struct NBDExportInfo { 215 220 /* Set by client before nbd_receive_negotiate() */ 216 221 bool request_sizes; 222 + 223 + /* In-out fields, set by client before nbd_receive_negotiate() and 224 + * updated by server results during nbd_receive_negotiate() */ 225 + bool structured_reply; 226 + 217 227 /* Set by server results during nbd_receive_negotiate() */ 218 228 uint64_t size; 219 229 uint16_t flags; ··· 283 293 { 284 294 return reply->magic == NBD_STRUCTURED_REPLY_MAGIC; 285 295 } 296 + 297 + const char *nbd_reply_type_lookup(uint16_t type); 286 298 287 299 #endif
+12
nbd/client.c
··· 602 602 uint64_t magic; 603 603 int rc; 604 604 bool zeroes = true; 605 + bool structured_reply = info->structured_reply; 605 606 606 607 trace_nbd_receive_negotiate(tlscreds, hostname ? hostname : "<null>"); 607 608 609 + info->structured_reply = false; 608 610 rc = -EINVAL; 609 611 610 612 if (outioc) { ··· 684 686 } 685 687 if (fixedNewStyle) { 686 688 int result; 689 + 690 + if (structured_reply) { 691 + result = nbd_request_simple_option(ioc, 692 + NBD_OPT_STRUCTURED_REPLY, 693 + errp); 694 + if (result < 0) { 695 + goto fail; 696 + } 697 + info->structured_reply = result == 1; 698 + } 687 699 688 700 /* Try NBD_OPT_GO first - if it works, we are done (it 689 701 * also gives us a good message if the server requires
-1
nbd/nbd-internal.h
··· 104 104 const char *nbd_rep_lookup(uint32_t rep); 105 105 const char *nbd_info_lookup(uint16_t info); 106 106 const char *nbd_cmd_lookup(uint16_t info); 107 - const char *nbd_reply_type_lookup(uint16_t type); 108 107 const char *nbd_err_lookup(int err); 109 108 110 109 int nbd_drop(QIOChannel *ioc, size_t size, Error **errp);
+15
tests/qemu-iotests/083.out
··· 41 41 42 42 === Check disconnect after neg2 === 43 43 44 + Connection closed 44 45 read failed: Input/output error 45 46 46 47 === Check disconnect 8 neg2 === ··· 53 54 54 55 === Check disconnect before request === 55 56 57 + Connection closed 56 58 read failed: Input/output error 57 59 58 60 === Check disconnect after request === 59 61 62 + Connection closed 60 63 read failed: Input/output error 61 64 62 65 === Check disconnect before reply === 63 66 67 + Connection closed 64 68 read failed: Input/output error 65 69 66 70 === Check disconnect after reply === 67 71 72 + Unexpected end-of-file before all bytes were read 68 73 read failed: Input/output error 69 74 70 75 === Check disconnect 4 reply === 71 76 72 77 Unexpected end-of-file before all bytes were read 78 + Connection closed 73 79 read failed: Input/output error 74 80 75 81 === Check disconnect 8 reply === 76 82 77 83 Unexpected end-of-file before all bytes were read 84 + Connection closed 78 85 read failed: Input/output error 79 86 80 87 === Check disconnect before data === 81 88 89 + Unexpected end-of-file before all bytes were read 82 90 read failed: Input/output error 83 91 84 92 === Check disconnect after data === ··· 108 116 109 117 === Check disconnect after neg-classic === 110 118 119 + Connection closed 111 120 read failed: Input/output error 112 121 113 122 === Check disconnect before neg1 === ··· 168 177 169 178 === Check disconnect after request === 170 179 180 + Connection closed 171 181 read failed: Input/output error 172 182 173 183 === Check disconnect before reply === 174 184 185 + Connection closed 175 186 read failed: Input/output error 176 187 177 188 === Check disconnect after reply === 178 189 190 + Unexpected end-of-file before all bytes were read 179 191 read failed: Input/output error 180 192 181 193 === Check disconnect 4 reply === 182 194 183 195 Unexpected end-of-file before all bytes were read 196 + Connection closed 184 197 read failed: Input/output error 185 198 186 199 === Check disconnect 8 reply === 187 200 188 201 Unexpected end-of-file before all bytes were read 202 + Connection closed 189 203 read failed: Input/output error 190 204 191 205 === Check disconnect before data === 192 206 207 + Unexpected end-of-file before all bytes were read 193 208 read failed: Input/output error 194 209 195 210 === Check disconnect after data ===