struct Decompress [src]

Fields

gpa: Allocator
input: *Reader
reader: Reader
buffer: Decode.CircularBuffer
range_decoder: RangeDecoder
decode: Decode
err: ?Error
unpacked_size: ?u64

Members

Source

pub const Decompress = struct { gpa: Allocator, input: *Reader, reader: Reader, buffer: Decode.CircularBuffer, range_decoder: RangeDecoder, decode: Decode, err: ?Error, unpacked_size: ?u64, pub const Error = error{ OutOfMemory, ReadFailed, CorruptInput, DecompressedSizeMismatch, EndOfStream, Overflow, }; /// Takes ownership of `buffer` which may be resized with `gpa`. /// /// LZMA was explicitly designed to take advantage of large heap memory /// being available, with a dictionary size anywhere from 4K to 4G. Thus, /// this API dynamically allocates the dictionary as-needed. pub fn initParams( input: *Reader, gpa: Allocator, buffer: []u8, params: Decode.Params, mem_limit: usize, ) !Decompress { return .{ .gpa = gpa, .input = input, .buffer = Decode.CircularBuffer.init(params.dict_size, mem_limit), .range_decoder = try RangeDecoder.init(input), .decode = try Decode.init(gpa, params.properties), .reader = .{ .buffer = buffer, .vtable = &.{ .readVec = readVec, .stream = stream, .discard = discard, }, .seek = 0, .end = 0, }, .err = null, .unpacked_size = params.unpacked_size, }; } /// Takes ownership of `buffer` which may be resized with `gpa`. /// /// LZMA was explicitly designed to take advantage of large heap memory /// being available, with a dictionary size anywhere from 4K to 4G. Thus, /// this API dynamically allocates the dictionary as-needed. pub fn initOptions( input: *Reader, gpa: Allocator, buffer: []u8, options: Decode.Options, mem_limit: usize, ) !Decompress { const params = try Decode.Params.readHeader(input, options); return initParams(input, gpa, buffer, params, mem_limit); } /// Reclaim ownership of the buffer passed to `init`. pub fn takeBuffer(d: *Decompress) []u8 { const buffer = d.reader.buffer; d.reader.buffer = &.{}; return buffer; } pub fn deinit(d: *Decompress) void { const gpa = d.gpa; gpa.free(d.reader.buffer); d.buffer.deinit(gpa); d.decode.deinit(gpa); d.* = undefined; } fn readVec(r: *Reader, data: [][]u8) Reader.Error!usize { _ = data; return readIndirect(r); } fn stream(r: *Reader, w: *Writer, limit: std.Io.Limit) Reader.StreamError!usize { _ = w; _ = limit; return readIndirect(r); } fn discard(r: *Reader, limit: std.Io.Limit) Reader.Error!usize { const d: *Decompress = @alignCast(@fieldParentPtr("reader", r)); _ = d; _ = limit; @panic("TODO"); } fn readIndirect(r: *Reader) Reader.Error!usize { const d: *Decompress = @alignCast(@fieldParentPtr("reader", r)); const gpa = d.gpa; var allocating = Writer.Allocating.initOwnedSlice(gpa, r.buffer); allocating.writer.end = r.end; defer { r.buffer = allocating.writer.buffer; r.end = allocating.writer.end; } if (d.decode.state == math.maxInt(usize)) return error.EndOfStream; process_next: { if (d.unpacked_size) |unpacked_size| { if (d.buffer.len >= unpacked_size) break :process_next; } else if (d.range_decoder.isFinished()) { break :process_next; } var n_read: u64 = 0; switch (d.decode.process(d.input, &allocating, &d.buffer, &d.range_decoder, &n_read) catch |err| switch (err) { error.WriteFailed => { d.err = error.OutOfMemory; return error.ReadFailed; }, error.EndOfStream => { d.err = error.EndOfStream; return error.ReadFailed; }, else => |e| { d.err = e; return error.ReadFailed; }, }) { .more => return 0, .finished => break :process_next, } } if (d.unpacked_size) |unpacked_size| { if (d.buffer.len != unpacked_size) { d.err = error.DecompressedSizeMismatch; return error.ReadFailed; } } d.buffer.finish(&allocating.writer) catch |err| switch (err) { error.WriteFailed => { d.err = error.OutOfMemory; return error.ReadFailed; }, }; d.decode.state = math.maxInt(usize); return 0; } }