地圖 (Jido) is a lightweight Unix TUI file explorer designed for speed and simplicity.

feat: add keybind to extract archives

+453 -30
+2 -1
.gila/todo/intelligent_dino_17y/intelligent_dino_17y.md .gila/done/intelligent_dino_17y/intelligent_dino_17y.md
··· 1 1 --- 2 2 title: feat: add keybind to extraction archive 3 - status: todo 3 + status: done 4 4 priority_value: 50 5 5 priority: low 6 6 owner: brookjeynes 7 7 created: 2026-01-11T21:52:59Z 8 + completed: 2026-01-21T07:05:03Z 8 9 --- 9 10 Allow users to extract archives via a keybind
+16 -11
README.md
··· 57 57 v :Verbose mode. Provides more information about selected entry. 58 58 y :Yank selected item. 59 59 p :Past yanked item. 60 + x :Extract archive to `<name>/`. 60 61 61 62 Input mode: 62 63 <Esc> :Cancel input. ··· 70 71 :trash :Navigate to trash directory if it exists. 71 72 :empty_trash :Empty trash if it exists. This action cannot be undone. 72 73 :cd <path> :Change directory via path. Will enter input mode. 74 + :extract :Extract archive under cursor. 73 75 ``` 74 76 75 77 ## Configuration ··· 87 89 Config = struct { 88 90 .show_hidden: bool = true, 89 91 .sort_dirs: bool = true, 90 - .show_images: bool = true, -- Images are only supported in a terminal 91 - supporting the `kitty image protocol`. 92 + .show_images: bool = true, -- Images are only supported in a terminal 93 + supporting the `kitty image protocol`. 92 94 .preview_file: bool = true, 93 - .empty_trash_on_exit: bool = false, -- Emptying the trash permanently deletes 94 - all files within the trash. These 95 - files are not recoverable past this 96 - point. 97 - .true_dir_size: bool = false, -- Display size of directory including 98 - all its children. This can and will 99 - cause lag on deeply nested directories. 100 - .archive_traversal_limit: usize = 100, -- How many files to be traversed when reading 101 - an archive (zip, tar, etc.). 95 + .empty_trash_on_exit: bool = false, -- Emptying the trash permanently deletes 96 + all files within the trash. These 97 + files are not recoverable past this 98 + point. 99 + .true_dir_size: bool = false, -- Display size of directory including 100 + all its children. This can and will 101 + cause lag on deeply nested directories. 102 + .archive_traversal_limit: usize = 100, -- How many files to be traversed when reading 103 + an archive (zip, tar, etc.). 104 + .keep_partial_extraction: bool = false, -- If extraction fails, keep the partial 105 + extracted directory instead of cleaning up. 102 106 .keybinds: Keybinds, 103 107 .styles: Styles 104 108 } ··· 119 123 not recoverable 120 124 .yank: ?Char = 'y' 121 125 .paste: ?Char = 'p' 126 + .extract_archive: ?Char = 'x' 122 127 } 123 128 124 129 NotificationStyles = struct {
+2
src/app.zig
··· 42 42 "v :Verbose mode. Provides more information about selected entry. ", 43 43 "y :Yank selected item.", 44 44 "p :Past yanked item.", 45 + "x :Extract archive to `<name>/`", 45 46 "", 46 47 "Input mode:", 47 48 "<Esc> :Cancel input.", ··· 55 56 ":trash :Navigate to trash directory if it exists.", 56 57 ":empty_trash :Empty trash if it exists. This action cannot be undone.", 57 58 ":cd <path> :Change directory via path. Will enter input mode.", 59 + ":extract :Extract archive under cursor.", 58 60 }; 59 61 60 62 pub const State = enum {
+330 -15
src/archive.zig
··· 1 1 const std = @import("std"); 2 2 const ascii = @import("std").ascii; 3 + const FileLogger = @import("./file_logger.zig"); 3 4 4 5 const archive_buf_size = 8192; 5 6 ··· 33 34 } 34 35 }; 35 36 37 + pub const ExtractionResult = struct { 38 + files_extracted: usize, 39 + dirs_created: usize, 40 + files_skipped: usize, 41 + }; 42 + 43 + pub const PathValidationError = error{ 44 + PathContainsTraversal, 45 + PathTooLong, 46 + PathEmpty, 47 + }; 48 + 49 + pub const SkipReason = enum { 50 + path_contains_traversal, 51 + path_too_long, 52 + path_empty, 53 + }; 54 + 55 + const Operation = enum { list, extract }; 56 + 57 + const OperationArgs = union(Operation) { 58 + list: struct { 59 + traversal_limit: usize, 60 + }, 61 + extract: struct { 62 + dest_dir: std.fs.Dir, 63 + file_logger: ?FileLogger, 64 + }, 65 + }; 66 + 67 + const OperationResult = union(Operation) { 68 + list: ArchiveContents, 69 + extract: ExtractionResult, 70 + }; 71 + 36 72 pub fn listArchiveContents( 37 73 alloc: std.mem.Allocator, 38 74 file: std.fs.File, ··· 42 78 var buffer: [archive_buf_size]u8 = undefined; 43 79 var reader = file.reader(&buffer); 44 80 81 + const list_args = OperationArgs{ .list = .{ 82 + .traversal_limit = traversal_limit, 83 + } }; 84 + 45 85 const contents = switch (archive_type) { 46 86 .tar => try listTar(alloc, &reader.interface, traversal_limit), 47 - .@"tar.gz" => try listTarGz(alloc, &reader.interface, traversal_limit), 48 - .@"tar.xz" => try listTarXz(alloc, &reader.interface, traversal_limit), 49 - .@"tar.zst" => try listTarZst(alloc, &reader.interface, traversal_limit), 87 + .@"tar.gz" => (try processTarGz(alloc, &reader.interface, list_args)).list, 88 + .@"tar.xz" => (try processTarXz(alloc, &reader.interface, list_args)).list, 89 + .@"tar.zst" => (try processTarZst(alloc, &reader.interface, list_args)).list, 50 90 .zip => try listZip(alloc, file, traversal_limit), 51 91 }; 52 92 53 93 return contents; 54 94 } 55 95 96 + pub fn extractArchive( 97 + alloc: std.mem.Allocator, 98 + file: std.fs.File, 99 + archive_type: ArchiveType, 100 + dest_dir: std.fs.Dir, 101 + file_logger: ?FileLogger, 102 + ) !ExtractionResult { 103 + var buffer: [archive_buf_size]u8 = undefined; 104 + var reader = file.reader(&buffer); 105 + 106 + const extract_args = OperationArgs{ .extract = .{ 107 + .dest_dir = dest_dir, 108 + .file_logger = file_logger, 109 + } }; 110 + 111 + return switch (archive_type) { 112 + .tar => try extractTarImpl(alloc, &reader.interface, dest_dir, file_logger), 113 + .@"tar.gz" => (try processTarGz(alloc, &reader.interface, extract_args)).extract, 114 + .@"tar.xz" => (try processTarXz(alloc, &reader.interface, extract_args)).extract, 115 + .@"tar.zst" => (try processTarZst(alloc, &reader.interface, extract_args)).extract, 116 + .zip => try extractZipImpl(alloc, file, dest_dir, file_logger), 117 + }; 118 + } 119 + 120 + pub fn getExtractDirName(archive_path: []const u8) []const u8 { 121 + const basename = std.fs.path.basename(archive_path); 122 + 123 + return if (ascii.endsWithIgnoreCase(basename, ".tar.gz")) 124 + basename[0 .. basename.len - 7] 125 + else if (ascii.endsWithIgnoreCase(basename, ".tar.xz")) 126 + basename[0 .. basename.len - 7] 127 + else if (ascii.endsWithIgnoreCase(basename, ".tar.zst")) 128 + basename[0 .. basename.len - 8] 129 + else if (ascii.endsWithIgnoreCase(basename, ".tgz")) 130 + basename[0 .. basename.len - 4] 131 + else if (ascii.endsWithIgnoreCase(basename, ".txz")) 132 + basename[0 .. basename.len - 4] 133 + else if (ascii.endsWithIgnoreCase(basename, ".tzst")) 134 + basename[0 .. basename.len - 5] 135 + else if (ascii.endsWithIgnoreCase(basename, ".tar")) 136 + basename[0 .. basename.len - 4] 137 + else if (ascii.endsWithIgnoreCase(basename, ".zip")) 138 + basename[0 .. basename.len - 4] 139 + else if (ascii.endsWithIgnoreCase(basename, ".jar")) 140 + basename[0 .. basename.len - 4] 141 + else 142 + basename; 143 + } 144 + 145 + fn validateAndCleanPath( 146 + alloc: std.mem.Allocator, 147 + path: []const u8, 148 + ) (PathValidationError || error{OutOfMemory})![]const u8 { 149 + // Strip leading slashes (handles /, //, ///, etc.) 150 + var clean_path = path; 151 + while (std.mem.startsWith(u8, clean_path, "/")) { 152 + clean_path = clean_path[1..]; 153 + } 154 + 155 + if (clean_path.len == 0) return error.PathEmpty; 156 + if (clean_path.len >= std.fs.max_path_bytes) return error.PathTooLong; 157 + 158 + // Check for directory traversal by tracking depth 159 + var depth: i32 = 0; 160 + var iter = std.mem.splitScalar(u8, clean_path, '/'); 161 + while (iter.next()) |component| { 162 + if (component.len == 0) continue; 163 + 164 + if (std.mem.eql(u8, component, "..")) { 165 + depth -= 1; 166 + if (depth < 0) { 167 + return error.PathContainsTraversal; 168 + } 169 + } else if (!std.mem.eql(u8, component, ".")) { 170 + depth += 1; 171 + } 172 + } 173 + 174 + return try alloc.dupe(u8, clean_path); 175 + } 176 + 56 177 fn extractTopLevelEntry( 57 178 alloc: std.mem.Allocator, 58 179 full_path: []const u8, ··· 121 242 }; 122 243 } 123 244 124 - fn listTarGz( 245 + fn processTarGz( 125 246 alloc: std.mem.Allocator, 126 247 reader: anytype, 127 - limit: usize, 128 - ) !ArchiveContents { 248 + args: OperationArgs, 249 + ) !OperationResult { 129 250 var flate_buffer: [std.compress.flate.max_window_len]u8 = undefined; 130 251 var decompress = std.compress.flate.Decompress.init(reader, .gzip, &flate_buffer); 131 - return try listTar(alloc, &decompress.reader, limit); 252 + 253 + return switch (args) { 254 + .list => |list_args| .{ 255 + .list = try listTar(alloc, &decompress.reader, list_args.traversal_limit), 256 + }, 257 + .extract => |extract_args| .{ 258 + .extract = try extractTarImpl(alloc, &decompress.reader, extract_args.dest_dir, extract_args.file_logger), 259 + }, 260 + }; 132 261 } 133 262 134 - fn listTarXz( 263 + fn processTarXz( 135 264 alloc: std.mem.Allocator, 136 265 reader: anytype, 137 - limit: usize, 138 - ) !ArchiveContents { 266 + args: OperationArgs, 267 + ) !OperationResult { 139 268 var dcp = try std.compress.xz.decompress(alloc, reader.adaptToOldInterface()); 140 269 defer dcp.deinit(); 141 270 var adapter_buffer: [1024]u8 = undefined; 142 271 var adapter = dcp.reader().adaptToNewApi(&adapter_buffer); 143 - return try listTar(alloc, &adapter.new_interface, limit); 272 + 273 + return switch (args) { 274 + .list => |list_args| .{ 275 + .list = try listTar(alloc, &adapter.new_interface, list_args.traversal_limit), 276 + }, 277 + .extract => |extract_args| .{ 278 + .extract = try extractTarImpl(alloc, &adapter.new_interface, extract_args.dest_dir, extract_args.file_logger), 279 + }, 280 + }; 144 281 } 145 282 146 - fn listTarZst( 283 + fn processTarZst( 147 284 alloc: std.mem.Allocator, 148 285 reader: anytype, 149 - limit: usize, 150 - ) !ArchiveContents { 286 + args: OperationArgs, 287 + ) !OperationResult { 151 288 const window_len = std.compress.zstd.default_window_len; 152 289 const window_buffer = try alloc.alloc(u8, window_len + std.compress.zstd.block_size_max); 290 + defer alloc.free(window_buffer); 153 291 var decompress: std.compress.zstd.Decompress = .init(reader, window_buffer, .{ 154 292 .verify_checksum = false, 155 293 .window_len = window_len, 156 294 }); 157 - return try listTar(alloc, &decompress.reader, limit); 295 + 296 + return switch (args) { 297 + .list => |list_args| .{ 298 + .list = try listTar(alloc, &decompress.reader, list_args.traversal_limit), 299 + }, 300 + .extract => |extract_args| .{ 301 + .extract = try extractTarImpl(alloc, &decompress.reader, extract_args.dest_dir, extract_args.file_logger), 302 + }, 303 + }; 158 304 } 159 305 160 306 fn listZip( ··· 204 350 .entries = entries, 205 351 }; 206 352 } 353 + 354 + fn extractTarImpl( 355 + alloc: std.mem.Allocator, 356 + reader: anytype, 357 + dest_dir: std.fs.Dir, 358 + file_logger: ?FileLogger, 359 + ) !ExtractionResult { 360 + var files_extracted: usize = 0; 361 + var dirs_created: usize = 0; 362 + var files_skipped: usize = 0; 363 + 364 + var diagnostics: std.tar.Diagnostics = .{ .allocator = alloc }; 365 + defer diagnostics.deinit(); 366 + 367 + var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined; 368 + var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined; 369 + var iter = std.tar.Iterator.init(reader, .{ 370 + .file_name_buffer = &file_name_buffer, 371 + .link_name_buffer = &link_name_buffer, 372 + }); 373 + iter.diagnostics = &diagnostics; 374 + 375 + while (try iter.next()) |tar_file| { 376 + const safe_path = validateAndCleanPath(alloc, tar_file.name) catch |err| { 377 + if (err == error.OutOfMemory) return err; 378 + 379 + files_skipped += 1; 380 + if (file_logger) |logger| { 381 + const reason: SkipReason = switch (err) { 382 + error.PathContainsTraversal => .path_contains_traversal, 383 + error.PathTooLong => .path_too_long, 384 + error.PathEmpty => .path_empty, 385 + error.OutOfMemory => unreachable, 386 + }; 387 + 388 + const message = try std.fmt.allocPrint(alloc, "Failed to extract file '{s}': {any}", .{ tar_file.name, reason }); 389 + defer alloc.free(message); 390 + logger.write(message, .err) catch {}; 391 + } 392 + continue; 393 + }; 394 + defer alloc.free(safe_path); 395 + 396 + if (tar_file.kind == .directory) { 397 + try dest_dir.makePath(safe_path); 398 + dirs_created += 1; 399 + } else if (tar_file.kind == .file or tar_file.kind == .sym_link) { 400 + if (std.fs.path.dirname(safe_path)) |parent| { 401 + try dest_dir.makePath(parent); 402 + } 403 + 404 + // TODO: Investigate preserving file permissions from archive 405 + const out_file = try dest_dir.createFile(safe_path, .{ .exclusive = true }); 406 + defer out_file.close(); 407 + 408 + var file_writer_buffer: [archive_buf_size]u8 = undefined; 409 + var file_writer = out_file.writer(&file_writer_buffer); 410 + try iter.streamRemaining(tar_file, &file_writer.interface); 411 + 412 + files_extracted += 1; 413 + } 414 + } 415 + 416 + return ExtractionResult{ 417 + .files_extracted = files_extracted, 418 + .dirs_created = dirs_created, 419 + .files_skipped = files_skipped, 420 + }; 421 + } 422 + 423 + fn extractZipImpl( 424 + alloc: std.mem.Allocator, 425 + file: std.fs.File, 426 + dest_dir: std.fs.Dir, 427 + file_logger: ?FileLogger, 428 + ) !ExtractionResult { 429 + var files_extracted: usize = 0; 430 + var dirs_created: usize = 0; 431 + var files_skipped: usize = 0; 432 + 433 + var buffer: [archive_buf_size]u8 = undefined; 434 + var file_reader = file.reader(&buffer); 435 + 436 + var iter = try std.zip.Iterator.init(&file_reader); 437 + var file_name_buf: [std.fs.max_path_bytes]u8 = undefined; 438 + 439 + while (try iter.next()) |entry| { 440 + const file_name_len = @min(entry.filename_len, file_name_buf.len); 441 + 442 + try file_reader.seekTo(entry.header_zip_offset + @sizeOf(std.zip.CentralDirectoryFileHeader)); 443 + const file_name = file_name_buf[0..file_name_len]; 444 + try file_reader.interface.readSliceAll(file_name); 445 + 446 + const safe_path = validateAndCleanPath(alloc, file_name) catch |err| { 447 + if (err == error.OutOfMemory) return err; 448 + 449 + files_skipped += 1; 450 + if (file_logger) |logger| { 451 + const reason: SkipReason = switch (err) { 452 + error.PathContainsTraversal => .path_contains_traversal, 453 + error.PathTooLong => .path_too_long, 454 + error.PathEmpty => .path_empty, 455 + error.OutOfMemory => unreachable, 456 + }; 457 + 458 + const message = try std.fmt.allocPrint(alloc, "Failed to extract file '{s}': {any}", .{ file_name, reason }); 459 + defer alloc.free(message); 460 + logger.write(message, .err) catch {}; 461 + } 462 + continue; 463 + }; 464 + defer alloc.free(safe_path); 465 + 466 + if (std.mem.endsWith(u8, file_name, "/")) { 467 + try dest_dir.makePath(safe_path); 468 + dirs_created += 1; 469 + } else { 470 + if (std.fs.path.dirname(safe_path)) |parent| { 471 + try dest_dir.makePath(parent); 472 + } 473 + 474 + // TODO: Investigate preserving file permissions from archive 475 + const out_file = try dest_dir.createFile(safe_path, .{ .exclusive = true }); 476 + defer out_file.close(); 477 + 478 + // Seek to local file header and read it to get to compressed data 479 + try file_reader.seekTo(entry.file_offset); 480 + const local_header = try file_reader.interface.takeStruct(std.zip.LocalFileHeader, .little); 481 + 482 + // Skip filename and extra field to get to compressed data 483 + _ = try file_reader.interface.discard(@enumFromInt(local_header.filename_len)); 484 + _ = try file_reader.interface.discard(@enumFromInt(local_header.extra_len)); 485 + 486 + var copy_buffer: [archive_buf_size]u8 = undefined; 487 + 488 + if (entry.compression_method == .store) { 489 + var total_read: usize = 0; 490 + while (total_read < entry.uncompressed_size) { 491 + const to_read = @min(copy_buffer.len, entry.uncompressed_size - total_read); 492 + const n = try file_reader.interface.readSliceShort(copy_buffer[0..to_read]); 493 + if (n == 0) break; 494 + try out_file.writeAll(copy_buffer[0..n]); 495 + total_read += n; 496 + } 497 + } else if (entry.compression_method == .deflate) { 498 + var limited_buffer: [archive_buf_size]u8 = undefined; 499 + var limited_reader = file_reader.interface.limited(@enumFromInt(entry.compressed_size), &limited_buffer); 500 + var flate_buffer: [std.compress.flate.max_window_len]u8 = undefined; 501 + var decompress = std.compress.flate.Decompress.init(&limited_reader.interface, .raw, &flate_buffer); 502 + 503 + while (true) { 504 + const n = try decompress.reader.readSliceShort(&copy_buffer); 505 + if (n == 0) break; 506 + try out_file.writeAll(copy_buffer[0..n]); 507 + } 508 + } else { 509 + return error.UnsupportedCompressionMethod; 510 + } 511 + 512 + files_extracted += 1; 513 + } 514 + } 515 + 516 + return ExtractionResult{ 517 + .files_extracted = files_extracted, 518 + .dirs_created = dirs_created, 519 + .files_skipped = files_skipped, 520 + }; 521 + }
+2
src/config.zig
··· 20 20 true_dir_size: bool = false, 21 21 entry_dir: ?[]const u8 = null, 22 22 archive_traversal_limit: usize = 100, 23 + keep_partial_extraction: bool = false, 23 24 styles: Styles = .{}, 24 25 keybinds: Keybinds = .{}, 25 26 ··· 212 213 force_delete: ?Char = null, 213 214 paste: ?Char = @enumFromInt('p'), 214 215 yank: ?Char = @enumFromInt('y'), 216 + extract_archive: ?Char = @enumFromInt('x'), 215 217 }; 216 218 217 219 const Styles = struct {
+6
src/event_handlers.zig
··· 120 120 .force_delete => try events.forceDelete(app), 121 121 .yank => try events.yank(app), 122 122 .paste => try events.paste(app), 123 + .extract_archive => try events.extractArchive(app), 123 124 } 124 125 } else { 125 126 switch (key.codepoint) { ··· 207 208 208 209 if (std.mem.eql(u8, command, ":h")) { 209 210 app.state = .help_menu; 211 + break :supported; 212 + } 213 + 214 + if (std.mem.eql(u8, command, ":extract")) { 215 + try events.extractArchive(app); 210 216 break :supported; 211 217 } 212 218
+95 -3
src/events.zig
··· 1 1 const std = @import("std"); 2 - const App = @import("./app.zig"); 3 - const config = &@import("./config.zig").config; 2 + 3 + const vaxis = @import("vaxis"); 4 4 const zuid = @import("zuid"); 5 + 6 + const App = @import("./app.zig"); 7 + const Archive = @import("./archive.zig"); 5 8 const environment = @import("./environment.zig"); 6 - const vaxis = @import("vaxis"); 9 + 10 + const config = &@import("./config.zig").config; 7 11 8 12 pub fn delete(app: *App) error{OutOfMemory}!void { 9 13 var message: ?[]const u8 = null; ··· 563 567 564 568 app.directories.entries.selected = selected; 565 569 } 570 + 571 + pub fn extractArchive(app: *App) error{OutOfMemory}!void { 572 + var message: ?[]const u8 = null; 573 + defer if (message) |msg| app.alloc.free(msg); 574 + 575 + const entry = (app.directories.getSelected() catch { 576 + app.notification.write("Can not extract - no item selected.", .warn) catch {}; 577 + return; 578 + }) orelse return; 579 + 580 + const archive_type = Archive.ArchiveType.fromPath(entry.name) orelse { 581 + app.notification.write("Not an archive file.", .warn) catch {}; 582 + return; 583 + }; 584 + 585 + const extract_dir_name = Archive.getExtractDirName(entry.name); 586 + 587 + if (environment.fileExists(app.directories.dir, extract_dir_name)) { 588 + message = try std.fmt.allocPrint(app.alloc, "Can not extract file(s) - '{s}' already exists.", .{extract_dir_name}); 589 + app.notification.write(message.?, .warn) catch {}; 590 + return; 591 + } 592 + 593 + var dest_dir = app.directories.dir.makeOpenPath(extract_dir_name, .{}) catch |err| { 594 + message = try std.fmt.allocPrint(app.alloc, "Failed to extract archive '{s}' - {}.", .{ extract_dir_name, err }); 595 + app.notification.write(message.?, .err) catch {}; 596 + if (app.file_logger) |file_logger| file_logger.write(message.?, .err) catch {}; 597 + return; 598 + }; 599 + defer dest_dir.close(); 600 + 601 + const archive_file = app.directories.dir.openFile(entry.name, .{}) catch |err| { 602 + message = try std.fmt.allocPrint( 603 + app.alloc, 604 + "Failed to open archive '{s}' - {}.", 605 + .{ entry.name, err }, 606 + ); 607 + app.notification.write(message.?, .err) catch {}; 608 + if (app.file_logger) |file_logger| file_logger.write(message.?, .err) catch {}; 609 + 610 + if (!config.keep_partial_extraction) { 611 + app.directories.dir.deleteTree(extract_dir_name) catch {}; 612 + } 613 + return; 614 + }; 615 + defer archive_file.close(); 616 + 617 + const result = Archive.extractArchive( 618 + app.alloc, 619 + archive_file, 620 + archive_type, 621 + dest_dir, 622 + app.file_logger, 623 + ) catch |err| { 624 + message = try std.fmt.allocPrint( 625 + app.alloc, 626 + "Failed to extract '{s}' - {s}.", 627 + .{ entry.name, @errorName(err) }, 628 + ); 629 + app.notification.write(message.?, .err) catch {}; 630 + if (app.file_logger) |file_logger| file_logger.write(message.?, .err) catch {}; 631 + 632 + if (!config.keep_partial_extraction) { 633 + app.directories.dir.deleteTree(extract_dir_name) catch {}; 634 + } 635 + return; 636 + }; 637 + 638 + if (result.files_skipped > 0) { 639 + message = try std.fmt.allocPrint( 640 + app.alloc, 641 + "Extracted {d} files, {d} directories to './{s}{s}'. Failed to extract {d} files, check the log file for more details.", 642 + .{ result.files_extracted, result.dirs_created, std.fs.path.sep_str, extract_dir_name, result.files_skipped }, 643 + ); 644 + app.notification.write(message.?, .err) catch {}; 645 + if (app.file_logger) |file_logger| file_logger.write(message.?, .err) catch {}; 646 + } else { 647 + message = try std.fmt.allocPrint( 648 + app.alloc, 649 + "Extracted {d} files, {d} directories to './{s}{s}'.", 650 + .{ result.files_extracted, result.dirs_created, std.fs.path.sep_str, extract_dir_name }, 651 + ); 652 + app.notification.write(message.?, .info) catch {}; 653 + if (app.file_logger) |file_logger| file_logger.write(message.?, .info) catch {}; 654 + } 655 + 656 + try app.repopulateDirectory(""); 657 + }