struct BodyWriter [src]
Request or response body.
Fields
http_protocol_output: *WriterUntil the lifetime of BodyWriter ends, it is illegal to modify the
state of this other than via methods of BodyWriter.
state: State
writer: Writer
Members
- chunkedDrain (Function)
- chunkedSendFile (Function)
- contentLengthDrain (Function)
- contentLengthSendFile (Function)
- elidingDrain (Function)
- elidingSendFile (Function)
- end (Function)
- endChunked (Function)
- EndChunkedOptions (struct)
- endChunkedUnflushed (Function)
- endUnflushed (Function)
- Error (Error Set)
- flush (Function)
- isEliding (Function)
- noneDrain (Function)
- noneSendFile (Function)
- State (union)
Source
pub const BodyWriter = struct {
/// Until the lifetime of `BodyWriter` ends, it is illegal to modify the
/// state of this other than via methods of `BodyWriter`.
http_protocol_output: *Writer,
state: State,
writer: Writer,
pub const Error = Writer.Error;
/// How many zeroes to reserve for hex-encoded chunk length.
const chunk_len_digits = 8;
const max_chunk_len: usize = std.math.pow(u64, 16, chunk_len_digits) - 1;
const chunk_header_template = ("0" ** chunk_len_digits) ++ "\r\n";
comptime {
assert(max_chunk_len == std.math.maxInt(u32));
}
pub const State = union(enum) {
/// End of connection signals the end of the stream.
none,
/// As a debugging utility, counts down to zero as bytes are written.
content_length: u64,
/// Each chunk is wrapped in a header and trailer.
/// This length is the the number of bytes to be written before the
/// next header. This includes +2 for the `\r\n` trailer and is zero
/// for the beginning of the stream.
chunk_len: usize,
/// Cleanly finished stream; connection can be reused.
end,
pub const init_chunked: State = .{ .chunk_len = 0 };
};
pub fn isEliding(w: *const BodyWriter) bool {
return w.writer.vtable.drain == elidingDrain;
}
/// Sends all buffered data across `BodyWriter.http_protocol_output`.
pub fn flush(w: *BodyWriter) Error!void {
const out = w.http_protocol_output;
switch (w.state) {
.end, .none, .content_length, .chunk_len => return out.flush(),
}
}
/// When using content-length, asserts that the amount of data sent matches
/// the value sent in the header, then flushes `http_protocol_output`.
///
/// When using transfer-encoding: chunked, writes the end-of-stream message
/// with empty trailers, then flushes the stream to the system. Asserts any
/// started chunk has been completely finished.
///
/// Respects the value of `isEliding` to omit all data after the headers.
///
/// See also:
/// * `endUnflushed`
/// * `endChunked`
pub fn end(w: *BodyWriter) Error!void {
try endUnflushed(w);
try w.http_protocol_output.flush();
}
/// When using content-length, asserts that the amount of data sent matches
/// the value sent in the header.
///
/// Otherwise, transfer-encoding: chunked is being used, and it writes the
/// end-of-stream message with empty trailers.
///
/// Respects the value of `isEliding` to omit all data after the headers.
///
/// Does not flush `http_protocol_output`, but does flush `writer`.
///
/// See also:
/// * `end`
/// * `endChunked`
pub fn endUnflushed(w: *BodyWriter) Error!void {
try w.writer.flush();
switch (w.state) {
.end => unreachable,
.content_length => |len| {
assert(len == 0); // Trips when end() called before all bytes written.
w.state = .end;
},
.none => {},
.chunk_len => return endChunkedUnflushed(w, .{}),
}
}
pub const EndChunkedOptions = struct {
trailers: []const Header = &.{},
};
/// Writes the end-of-stream message and any optional trailers, flushing
/// the underlying stream.
///
/// Asserts that the BodyWriter is using transfer-encoding: chunked.
///
/// Respects the value of `isEliding` to omit all data after the headers.
///
/// See also:
/// * `endChunkedUnflushed`
/// * `end`
pub fn endChunked(w: *BodyWriter, options: EndChunkedOptions) Error!void {
try endChunkedUnflushed(w, options);
try w.http_protocol_output.flush();
}
/// Writes the end-of-stream message and any optional trailers.
///
/// Does not flush.
///
/// Asserts that the BodyWriter is using transfer-encoding: chunked.
///
/// Respects the value of `isEliding` to omit all data after the headers.
///
/// See also:
/// * `endChunked`
/// * `endUnflushed`
/// * `end`
pub fn endChunkedUnflushed(w: *BodyWriter, options: EndChunkedOptions) Error!void {
if (w.isEliding()) {
w.state = .end;
return;
}
const bw = w.http_protocol_output;
switch (w.state.chunk_len) {
0 => {},
1 => unreachable, // Wrote more data than specified in chunk header.
2 => try bw.writeAll("\r\n"),
else => unreachable, // An earlier write call indicated more data would follow.
}
try bw.writeAll("0\r\n");
for (options.trailers) |trailer| {
try bw.writeAll(trailer.name);
try bw.writeAll(": ");
try bw.writeAll(trailer.value);
try bw.writeAll("\r\n");
}
try bw.writeAll("\r\n");
w.state = .end;
}
pub fn contentLengthDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding());
const out = bw.http_protocol_output;
const n = try out.writeSplatHeader(w.buffered(), data, splat);
bw.state.content_length -= n;
return w.consume(n);
}
pub fn noneDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding());
const out = bw.http_protocol_output;
const n = try out.writeSplatHeader(w.buffered(), data, splat);
return w.consume(n);
}
pub fn elidingDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
const slice = data[0 .. data.len - 1];
const pattern = data[slice.len];
var written: usize = pattern.len * splat;
for (slice) |bytes| written += bytes.len;
switch (bw.state) {
.content_length => |*len| len.* -= written + w.end,
else => {},
}
w.end = 0;
return written;
}
pub fn elidingSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
if (File.Handle == void) return error.Unimplemented;
if (builtin.zig_backend == .stage2_aarch64) return error.Unimplemented;
switch (bw.state) {
.content_length => |*len| len.* -= w.end,
else => {},
}
w.end = 0;
if (limit == .nothing) return 0;
if (file_reader.getSize()) |size| {
const n = limit.minInt64(size - file_reader.pos);
if (n == 0) return error.EndOfStream;
file_reader.seekBy(@intCast(n)) catch return error.Unimplemented;
switch (bw.state) {
.content_length => |*len| len.* -= n,
else => {},
}
return n;
} else |_| {
// Error is observable on `file_reader` instance, and it is better to
// treat the file as a pipe.
return error.Unimplemented;
}
}
/// Returns `null` if size cannot be computed without making any syscalls.
pub fn noneSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding());
const out = bw.http_protocol_output;
const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
return w.consume(n);
}
pub fn contentLengthSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding());
const out = bw.http_protocol_output;
const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
bw.state.content_length -= n;
return w.consume(n);
}
pub fn chunkedSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding());
const data_len = Writer.countSendFileLowerBound(w.end, file_reader, limit) orelse {
// If the file size is unknown, we cannot lower to a `sendFile` since we would
// have to flush the chunk header before knowing the chunk length.
return error.Unimplemented;
};
const out = bw.http_protocol_output;
l: switch (bw.state.chunk_len) {
0 => {
const header_buf = try out.writableArray(chunk_header_template.len);
@memcpy(header_buf, chunk_header_template);
writeHex(header_buf[0..chunk_len_digits], data_len);
bw.state.chunk_len = data_len + 2;
continue :l bw.state.chunk_len;
},
1 => unreachable, // Wrote more data than specified in chunk header.
2 => {
try out.writeAll("\r\n");
bw.state.chunk_len = 0;
assert(file_reader.atEnd());
return error.EndOfStream;
},
else => {
const chunk_limit: std.Io.Limit = .limited(bw.state.chunk_len - 2);
const n = if (chunk_limit.subtract(w.buffered().len)) |sendfile_limit|
try out.sendFileHeader(w.buffered(), file_reader, sendfile_limit.min(limit))
else
try out.write(chunk_limit.slice(w.buffered()));
bw.state.chunk_len -= n;
const ret = w.consume(n);
return ret;
},
}
}
pub fn chunkedDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding());
const out = bw.http_protocol_output;
const data_len = w.end + Writer.countSplat(data, splat);
l: switch (bw.state.chunk_len) {
0 => {
const header_buf = try out.writableArray(chunk_header_template.len);
@memcpy(header_buf, chunk_header_template);
writeHex(header_buf[0..chunk_len_digits], data_len);
bw.state.chunk_len = data_len + 2;
continue :l bw.state.chunk_len;
},
1 => unreachable, // Wrote more data than specified in chunk header.
2 => {
try out.writeAll("\r\n");
bw.state.chunk_len = 0;
continue :l 0;
},
else => {
const n = try out.writeSplatHeaderLimit(w.buffered(), data, splat, .limited(bw.state.chunk_len - 2));
bw.state.chunk_len -= n;
return w.consume(n);
},
}
}
/// Writes an integer as base 16 to `buf`, right-aligned, assuming the
/// buffer has already been filled with zeroes.
fn writeHex(buf: []u8, x: usize) void {
assert(std.mem.allEqual(u8, buf, '0'));
const base = 16;
var index: usize = buf.len;
var a = x;
while (a > 0) {
const digit = a % base;
index -= 1;
buf[index] = std.fmt.digitToChar(@intCast(digit), .lower);
a /= base;
}
}
}