struct SubmissionQueue [src]
Fields
head: *u32
tail: *u32
mask: u32
flags: *u32
dropped: *u32
array: []u32
sqes: []linux.io_uring_sqe
mmap: []align(page_size_min) u8
mmap_sqes: []align(page_size_min) u8
sqe_head: u32 = 0
sqe_tail: u32 = 0
Members
Source
pub const SubmissionQueue = struct {
head: *u32,
tail: *u32,
mask: u32,
flags: *u32,
dropped: *u32,
array: []u32,
sqes: []linux.io_uring_sqe,
mmap: []align(page_size_min) u8,
mmap_sqes: []align(page_size_min) u8,
// We use `sqe_head` and `sqe_tail` in the same way as liburing:
// We increment `sqe_tail` (but not `tail`) for each call to `get_sqe()`.
// We then set `tail` to `sqe_tail` once, only when these events are actually submitted.
// This allows us to amortize the cost of the @atomicStore to `tail` across multiple SQEs.
sqe_head: u32 = 0,
sqe_tail: u32 = 0,
pub fn init(fd: posix.fd_t, p: linux.io_uring_params) !SubmissionQueue {
assert(fd >= 0);
assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
const size = @max(
p.sq_off.array + p.sq_entries * @sizeOf(u32),
p.cq_off.cqes + p.cq_entries * @sizeOf(linux.io_uring_cqe),
);
const mmap = try posix.mmap(
null,
size,
posix.PROT.READ | posix.PROT.WRITE,
.{ .TYPE = .SHARED, .POPULATE = true },
fd,
linux.IORING_OFF_SQ_RING,
);
errdefer posix.munmap(mmap);
assert(mmap.len == size);
// The motivation for the `sqes` and `array` indirection is to make it possible for the
// application to preallocate static linux.io_uring_sqe entries and then replay them when needed.
const size_sqes = p.sq_entries * @sizeOf(linux.io_uring_sqe);
const mmap_sqes = try posix.mmap(
null,
size_sqes,
posix.PROT.READ | posix.PROT.WRITE,
.{ .TYPE = .SHARED, .POPULATE = true },
fd,
linux.IORING_OFF_SQES,
);
errdefer posix.munmap(mmap_sqes);
assert(mmap_sqes.len == size_sqes);
const array: [*]u32 = @ptrCast(@alignCast(&mmap[p.sq_off.array]));
const sqes: [*]linux.io_uring_sqe = @ptrCast(@alignCast(&mmap_sqes[0]));
// We expect the kernel copies p.sq_entries to the u32 pointed to by p.sq_off.ring_entries,
// see https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L7843-L7844.
assert(p.sq_entries == @as(*u32, @ptrCast(@alignCast(&mmap[p.sq_off.ring_entries]))).*);
return SubmissionQueue{
.head = @ptrCast(@alignCast(&mmap[p.sq_off.head])),
.tail = @ptrCast(@alignCast(&mmap[p.sq_off.tail])),
.mask = @as(*u32, @ptrCast(@alignCast(&mmap[p.sq_off.ring_mask]))).*,
.flags = @ptrCast(@alignCast(&mmap[p.sq_off.flags])),
.dropped = @ptrCast(@alignCast(&mmap[p.sq_off.dropped])),
.array = array[0..p.sq_entries],
.sqes = sqes[0..p.sq_entries],
.mmap = mmap,
.mmap_sqes = mmap_sqes,
};
}
pub fn deinit(self: *SubmissionQueue) void {
posix.munmap(self.mmap_sqes);
posix.munmap(self.mmap);
}
}