struct Iterator [src]

Fields

input: *File.Reader
cd_record_count: u64
cd_zip_offset: u64
cd_size: u64
cd_record_index: u64 = 0
cd_record_offset: u64 = 0

Members

Source

pub const Iterator = struct { input: *File.Reader, cd_record_count: u64, cd_zip_offset: u64, cd_size: u64, cd_record_index: u64 = 0, cd_record_offset: u64 = 0, pub fn init(input: *File.Reader) !Iterator { const end_record = try EndRecord.findFile(input); if (!isMaxInt(end_record.record_count_disk) and end_record.record_count_disk > end_record.record_count_total) return error.ZipDiskRecordCountTooLarge; if (end_record.disk_number != 0 or end_record.central_directory_disk_number != 0) return error.ZipMultiDiskUnsupported; { const counts_valid = !isMaxInt(end_record.record_count_disk) and !isMaxInt(end_record.record_count_total); if (counts_valid and end_record.record_count_disk != end_record.record_count_total) return error.ZipMultiDiskUnsupported; } var result: Iterator = .{ .input = input, .cd_record_count = end_record.record_count_total, .cd_zip_offset = end_record.central_directory_offset, .cd_size = end_record.central_directory_size, }; if (!end_record.need_zip64()) return result; const locator_end_offset: u64 = @as(u64, end_record.comment_len) + @sizeOf(EndRecord) + @sizeOf(EndLocator64); const stream_len = try input.getSize(); if (locator_end_offset > stream_len) return error.ZipTruncated; try input.seekTo(stream_len - locator_end_offset); const locator = input.interface.takeStruct(EndLocator64, .little) catch |err| switch (err) { error.ReadFailed => return input.err.?, error.EndOfStream => return error.EndOfStream, }; if (!std.mem.eql(u8, &locator.signature, &end_locator64_sig)) return error.ZipBadLocatorSig; if (locator.zip64_disk_count != 0) return error.ZipUnsupportedZip64DiskCount; if (locator.total_disk_count != 1) return error.ZipMultiDiskUnsupported; try input.seekTo(locator.record_file_offset); const record64 = input.interface.takeStruct(EndRecord64, .little) catch |err| switch (err) { error.ReadFailed => return input.err.?, error.EndOfStream => return error.EndOfStream, }; if (!std.mem.eql(u8, &record64.signature, &end_record64_sig)) return error.ZipBadEndRecord64Sig; if (record64.end_record_size < @sizeOf(EndRecord64) - 12) return error.ZipEndRecord64SizeTooSmall; if (record64.end_record_size > @sizeOf(EndRecord64) - 12) return error.ZipEndRecord64UnhandledExtraData; if (record64.version_needed_to_extract > 45) return error.ZipUnsupportedVersion; { const is_multidisk = record64.disk_number != 0 or record64.central_directory_disk_number != 0 or record64.record_count_disk != record64.record_count_total; if (is_multidisk) return error.ZipMultiDiskUnsupported; } if (isMaxInt(end_record.record_count_total)) { result.cd_record_count = record64.record_count_total; } else if (end_record.record_count_total != record64.record_count_total) return error.Zip64RecordCountTotalMismatch; if (isMaxInt(end_record.central_directory_offset)) { result.cd_zip_offset = record64.central_directory_offset; } else if (end_record.central_directory_offset != record64.central_directory_offset) return error.Zip64CentralDirectoryOffsetMismatch; if (isMaxInt(end_record.central_directory_size)) { result.cd_size = record64.central_directory_size; } else if (end_record.central_directory_size != record64.central_directory_size) return error.Zip64CentralDirectorySizeMismatch; return result; } pub fn next(self: *Iterator) !?Entry { if (self.cd_record_index == self.cd_record_count) { if (self.cd_record_offset != self.cd_size) return if (self.cd_size > self.cd_record_offset) error.ZipCdOversized else error.ZipCdUndersized; return null; } const header_zip_offset = self.cd_zip_offset + self.cd_record_offset; const input = self.input; try input.seekTo(header_zip_offset); const header = input.interface.takeStruct(CentralDirectoryFileHeader, .little) catch |err| switch (err) { error.ReadFailed => return input.err.?, error.EndOfStream => return error.EndOfStream, }; if (!std.mem.eql(u8, &header.signature, ¢ral_file_header_sig)) return error.ZipBadCdOffset; self.cd_record_index += 1; self.cd_record_offset += @sizeOf(CentralDirectoryFileHeader) + header.filename_len + header.extra_len + header.comment_len; // Note: checking the version_needed_to_extract doesn't seem to be helpful, i.e. the zip file // at https://github.com/ninja-build/ninja/releases/download/v1.12.0/ninja-linux.zip // has an undocumented version 788 but extracts just fine. if (header.flags.encrypted) return error.ZipEncryptionUnsupported; // TODO: check/verify more flags if (header.disk_number != 0) return error.ZipMultiDiskUnsupported; var extents: FileExtents = .{ .uncompressed_size = header.uncompressed_size, .compressed_size = header.compressed_size, .local_file_header_offset = header.local_file_header_offset, }; if (header.extra_len > 0) { var extra_buf: [std.math.maxInt(u16)]u8 = undefined; const extra = extra_buf[0..header.extra_len]; try input.seekTo(header_zip_offset + @sizeOf(CentralDirectoryFileHeader) + header.filename_len); input.interface.readSliceAll(extra) catch |err| switch (err) { error.ReadFailed => return input.err.?, error.EndOfStream => return error.EndOfStream, }; var extra_offset: usize = 0; while (extra_offset + 4 <= extra.len) { const header_id = std.mem.readInt(u16, extra[extra_offset..][0..2], .little); const data_size = std.mem.readInt(u16, extra[extra_offset..][2..4], .little); const end = extra_offset + 4 + data_size; if (end > extra.len) return error.ZipBadExtraFieldSize; const data = extra[extra_offset + 4 .. end]; switch (@as(ExtraHeader, @enumFromInt(header_id))) { .zip64_info => try readZip64FileExtents(CentralDirectoryFileHeader, header, &extents, data), else => {}, // ignore } extra_offset = end; } } return .{ .version_needed_to_extract = header.version_needed_to_extract, .flags = header.flags, .compression_method = header.compression_method, .last_modification_time = header.last_modification_time, .last_modification_date = header.last_modification_date, .header_zip_offset = header_zip_offset, .crc32 = header.crc32, .filename_len = header.filename_len, .compressed_size = extents.compressed_size, .uncompressed_size = extents.uncompressed_size, .file_offset = extents.local_file_header_offset, }; } pub const Entry = struct { version_needed_to_extract: u16, flags: GeneralPurposeFlags, compression_method: CompressionMethod, last_modification_time: u16, last_modification_date: u16, header_zip_offset: u64, crc32: u32, filename_len: u32, compressed_size: u64, uncompressed_size: u64, file_offset: u64, pub fn extract( self: Entry, stream: *File.Reader, options: ExtractOptions, filename_buf: []u8, dest: std.fs.Dir, ) !void { if (filename_buf.len < self.filename_len) return error.ZipInsufficientBuffer; switch (self.compression_method) { .store, .deflate => {}, else => return error.UnsupportedCompressionMethod, } const filename = filename_buf[0..self.filename_len]; { try stream.seekTo(self.header_zip_offset + @sizeOf(CentralDirectoryFileHeader)); try stream.interface.readSliceAll(filename); } const local_data_header_offset: u64 = local_data_header_offset: { const local_header = blk: { try stream.seekTo(self.file_offset); break :blk try stream.interface.takeStruct(LocalFileHeader, .little); }; if (!std.mem.eql(u8, &local_header.signature, &local_file_header_sig)) return error.ZipBadFileOffset; if (local_header.version_needed_to_extract != self.version_needed_to_extract) return error.ZipMismatchVersionNeeded; if (local_header.last_modification_time != self.last_modification_time) return error.ZipMismatchModTime; if (local_header.last_modification_date != self.last_modification_date) return error.ZipMismatchModDate; if (@as(u16, @bitCast(local_header.flags)) != @as(u16, @bitCast(self.flags))) return error.ZipMismatchFlags; if (local_header.crc32 != 0 and local_header.crc32 != self.crc32) return error.ZipMismatchCrc32; var extents: FileExtents = .{ .uncompressed_size = local_header.uncompressed_size, .compressed_size = local_header.compressed_size, .local_file_header_offset = 0, }; if (local_header.extra_len > 0) { var extra_buf: [std.math.maxInt(u16)]u8 = undefined; const extra = extra_buf[0..local_header.extra_len]; { try stream.seekTo(self.file_offset + @sizeOf(LocalFileHeader) + local_header.filename_len); try stream.interface.readSliceAll(extra); } var extra_offset: usize = 0; while (extra_offset + 4 <= local_header.extra_len) { const header_id = std.mem.readInt(u16, extra[extra_offset..][0..2], .little); const data_size = std.mem.readInt(u16, extra[extra_offset..][2..4], .little); const end = extra_offset + 4 + data_size; if (end > local_header.extra_len) return error.ZipBadExtraFieldSize; const data = extra[extra_offset + 4 .. end]; switch (@as(ExtraHeader, @enumFromInt(header_id))) { .zip64_info => try readZip64FileExtents(LocalFileHeader, local_header, &extents, data), else => {}, // ignore } extra_offset = end; } } if (extents.compressed_size != 0 and extents.compressed_size != self.compressed_size) return error.ZipMismatchCompLen; if (extents.uncompressed_size != 0 and extents.uncompressed_size != self.uncompressed_size) return error.ZipMismatchUncompLen; if (local_header.filename_len != self.filename_len) return error.ZipMismatchFilenameLen; break :local_data_header_offset @as(u64, local_header.filename_len) + @as(u64, local_header.extra_len); }; if (options.allow_backslashes) { std.mem.replaceScalar(u8, filename, '\\', '/'); } else { if (std.mem.indexOfScalar(u8, filename, '\\')) |_| return error.ZipFilenameHasBackslash; } if (isBadFilename(filename)) return error.ZipBadFilename; // All entries that end in '/' are directories if (filename[filename.len - 1] == '/') { if (self.uncompressed_size != 0) return error.ZipBadDirectorySize; try dest.makePath(filename[0 .. filename.len - 1]); return; } const out_file = blk: { if (std.fs.path.dirname(filename)) |dirname| { var parent_dir = try dest.makeOpenPath(dirname, .{}); defer parent_dir.close(); const basename = std.fs.path.basename(filename); break :blk try parent_dir.createFile(basename, .{ .exclusive = true }); } break :blk try dest.createFile(filename, .{ .exclusive = true }); }; defer out_file.close(); var out_file_buffer: [1024]u8 = undefined; var file_writer = out_file.writer(&out_file_buffer); const local_data_file_offset: u64 = @as(u64, self.file_offset) + @as(u64, @sizeOf(LocalFileHeader)) + local_data_header_offset; try stream.seekTo(local_data_file_offset); // TODO limit based on self.compressed_size switch (self.compression_method) { .store => { stream.interface.streamExact64(&file_writer.interface, self.uncompressed_size) catch |err| switch (err) { error.ReadFailed => return stream.err.?, error.WriteFailed => return file_writer.err.?, error.EndOfStream => return error.ZipDecompressTruncated, }; }, .deflate => { var flate_buffer: [flate.max_window_len]u8 = undefined; var decompress: flate.Decompress = .init(&stream.interface, .raw, &flate_buffer); decompress.reader.streamExact64(&file_writer.interface, self.uncompressed_size) catch |err| switch (err) { error.ReadFailed => return stream.err.?, error.WriteFailed => return file_writer.err orelse decompress.err.?, error.EndOfStream => return error.ZipDecompressTruncated, }; }, else => return error.UnsupportedCompressionMethod, } try file_writer.end(); } }; }