Type Function ArrayListAlignedUnmanaged [src]
Alias for std.array_list.ArrayListAlignedUnmanaged
A contiguous, growable list of arbitrarily aligned items in memory.
This is a wrapper around an array of T values aligned to alignment-byte
addresses. If the specified alignment is null, then @alignOf(T) is used.
Functions that potentially allocate memory accept an Allocator parameter.
Initialize directly or with initCapacity, and deinitialize with deinit
or use toOwnedSlice.
Default initialization of this struct is deprecated; use .empty instead.
Prototype
pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) type
Parameters
T: type
alignment: ?u29
Source
pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) type {
if (alignment) |a| {
if (a == @alignOf(T)) {
return ArrayListAlignedUnmanaged(T, null);
}
}
return struct {
const Self = @This();
/// Contents of the list. This field is intended to be accessed
/// directly.
///
/// Pointers to elements in this slice are invalidated by various
/// functions of this ArrayList in accordance with the respective
/// documentation. In all cases, "invalidated" means that the memory
/// has been passed to an allocator's resize or free function.
items: Slice = &[_]T{},
/// How many T values this list can hold without allocating
/// additional memory.
capacity: usize = 0,
/// An ArrayList containing no elements.
pub const empty: Self = .{
.items = &.{},
.capacity = 0,
};
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
pub fn SentinelSlice(comptime s: T) type {
return if (alignment) |a| ([:s]align(a) T) else [:s]T;
}
/// Initialize with capacity to hold `num` elements.
/// The resulting capacity will equal `num` exactly.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(gpa: Allocator, num: usize) Allocator.Error!Self {
var self = Self{};
try self.ensureTotalCapacityPrecise(gpa, num);
return self;
}
/// Initialize with externally-managed memory. The buffer determines the
/// capacity, and the length is set to zero.
/// When initialized this way, all functions that accept an Allocator
/// argument cause illegal behavior.
pub fn initBuffer(buffer: Slice) Self {
return .{
.items = buffer[0..0],
.capacity = buffer.len,
};
}
/// Release all allocated memory.
pub fn deinit(self: *Self, gpa: Allocator) void {
gpa.free(self.allocatedSlice());
self.* = undefined;
}
/// Convert this list into an analogous memory-managed one.
/// The returned list has ownership of the underlying memory.
pub fn toManaged(self: *Self, gpa: Allocator) ArrayListAligned(T, alignment) {
return .{ .items = self.items, .capacity = self.capacity, .allocator = gpa };
}
/// ArrayListUnmanaged takes ownership of the passed in slice.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn fromOwnedSlice(slice: Slice) Self {
return Self{
.items = slice,
.capacity = slice.len,
};
}
/// ArrayListUnmanaged takes ownership of the passed in slice.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn fromOwnedSliceSentinel(comptime sentinel: T, slice: [:sentinel]T) Self {
return Self{
.items = slice,
.capacity = slice.len + 1,
};
}
/// The caller owns the returned memory. Empties this ArrayList.
/// Its capacity is cleared, making deinit() safe but unnecessary to call.
pub fn toOwnedSlice(self: *Self, gpa: Allocator) Allocator.Error!Slice {
const old_memory = self.allocatedSlice();
if (gpa.remap(old_memory, self.items.len)) |new_items| {
self.* = .empty;
return new_items;
}
const new_memory = try gpa.alignedAlloc(T, alignment, self.items.len);
@memcpy(new_memory, self.items);
self.clearAndFree(gpa);
return new_memory;
}
/// The caller owns the returned memory. ArrayList becomes empty.
pub fn toOwnedSliceSentinel(self: *Self, gpa: Allocator, comptime sentinel: T) Allocator.Error!SentinelSlice(sentinel) {
// This addition can never overflow because `self.items` can never occupy the whole address space
try self.ensureTotalCapacityPrecise(gpa, self.items.len + 1);
self.appendAssumeCapacity(sentinel);
const result = try self.toOwnedSlice(gpa);
return result[0 .. result.len - 1 :sentinel];
}
/// Creates a copy of this ArrayList.
pub fn clone(self: Self, gpa: Allocator) Allocator.Error!Self {
var cloned = try Self.initCapacity(gpa, self.capacity);
cloned.appendSliceAssumeCapacity(self.items);
return cloned;
}
/// Insert `item` at index `i`. Moves `list[i .. list.len]` to higher indices to make room.
/// If `i` is equal to the length of the list this operation is equivalent to append.
/// This operation is O(N).
/// Invalidates element pointers if additional memory is needed.
/// Asserts that the index is in bounds or equal to the length.
pub fn insert(self: *Self, gpa: Allocator, i: usize, item: T) Allocator.Error!void {
const dst = try self.addManyAt(gpa, i, 1);
dst[0] = item;
}
/// Insert `item` at index `i`. Moves `list[i .. list.len]` to higher indices to make room.
/// If in` is equal to the length of the list this operation is equivalent to append.
/// This operation is O(N).
/// Asserts that the list has capacity for one additional item.
/// Asserts that the index is in bounds or equal to the length.
pub fn insertAssumeCapacity(self: *Self, i: usize, item: T) void {
assert(self.items.len < self.capacity);
self.items.len += 1;
mem.copyBackwards(T, self.items[i + 1 .. self.items.len], self.items[i .. self.items.len - 1]);
self.items[i] = item;
}
/// Add `count` new elements at position `index`, which have
/// `undefined` values. Returns a slice pointing to the newly allocated
/// elements, which becomes invalid after various `ArrayList`
/// operations.
/// Invalidates pre-existing pointers to elements at and after `index`.
/// Invalidates all pre-existing element pointers if capacity must be
/// increased to accommodate the new elements.
/// Asserts that the index is in bounds or equal to the length.
pub fn addManyAt(
self: *Self,
gpa: Allocator,
index: usize,
count: usize,
) Allocator.Error![]T {
var managed = self.toManaged(gpa);
defer self.* = managed.moveToUnmanaged();
return managed.addManyAt(index, count);
}
/// Add `count` new elements at position `index`, which have
/// `undefined` values. Returns a slice pointing to the newly allocated
/// elements, which becomes invalid after various `ArrayList`
/// operations.
/// Invalidates pre-existing pointers to elements at and after `index`, but
/// does not invalidate any before that.
/// Asserts that the list has capacity for the additional items.
/// Asserts that the index is in bounds or equal to the length.
pub fn addManyAtAssumeCapacity(self: *Self, index: usize, count: usize) []T {
const new_len = self.items.len + count;
assert(self.capacity >= new_len);
const to_move = self.items[index..];
self.items.len = new_len;
mem.copyBackwards(T, self.items[index + count ..], to_move);
const result = self.items[index..][0..count];
@memset(result, undefined);
return result;
}
/// Insert slice `items` at index `i` by moving `list[i .. list.len]` to make room.
/// This operation is O(N).
/// Invalidates pre-existing pointers to elements at and after `index`.
/// Invalidates all pre-existing element pointers if capacity must be
/// increased to accommodate the new elements.
/// Asserts that the index is in bounds or equal to the length.
pub fn insertSlice(
self: *Self,
gpa: Allocator,
index: usize,
items: []const T,
) Allocator.Error!void {
const dst = try self.addManyAt(
gpa,
index,
items.len,
);
@memcpy(dst, items);
}
/// Grows or shrinks the list as necessary.
/// Invalidates element pointers if additional capacity is allocated.
/// Asserts that the range is in bounds.
pub fn replaceRange(
self: *Self,
gpa: Allocator,
start: usize,
len: usize,
new_items: []const T,
) Allocator.Error!void {
const after_range = start + len;
const range = self.items[start..after_range];
if (range.len < new_items.len) {
const first = new_items[0..range.len];
const rest = new_items[range.len..];
@memcpy(range[0..first.len], first);
try self.insertSlice(gpa, after_range, rest);
} else {
self.replaceRangeAssumeCapacity(start, len, new_items);
}
}
/// Grows or shrinks the list as necessary.
/// Never invalidates element pointers.
/// Asserts the capacity is enough for additional items.
pub fn replaceRangeAssumeCapacity(self: *Self, start: usize, len: usize, new_items: []const T) void {
const after_range = start + len;
const range = self.items[start..after_range];
if (range.len == new_items.len)
@memcpy(range[0..new_items.len], new_items)
else if (range.len < new_items.len) {
const first = new_items[0..range.len];
const rest = new_items[range.len..];
@memcpy(range[0..first.len], first);
const dst = self.addManyAtAssumeCapacity(after_range, rest.len);
@memcpy(dst, rest);
} else {
const extra = range.len - new_items.len;
@memcpy(range[0..new_items.len], new_items);
std.mem.copyForwards(
T,
self.items[after_range - extra ..],
self.items[after_range..],
);
@memset(self.items[self.items.len - extra ..], undefined);
self.items.len -= extra;
}
}
/// Extend the list by 1 element. Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
pub fn append(self: *Self, gpa: Allocator, item: T) Allocator.Error!void {
const new_item_ptr = try self.addOne(gpa);
new_item_ptr.* = item;
}
/// Extend the list by 1 element.
/// Never invalidates element pointers.
/// Asserts that the list can hold one additional item.
pub fn appendAssumeCapacity(self: *Self, item: T) void {
self.addOneAssumeCapacity().* = item;
}
/// Remove the element at index `i` from the list and return its value.
/// Invalidates pointers to the last element.
/// This operation is O(N).
/// Asserts that the list is not empty.
/// Asserts that the index is in bounds.
pub fn orderedRemove(self: *Self, i: usize) T {
const old_item = self.items[i];
self.replaceRangeAssumeCapacity(i, 1, &.{});
return old_item;
}
/// Removes the element at the specified index and returns it.
/// The empty slot is filled from the end of the list.
/// Invalidates pointers to last element.
/// This operation is O(1).
/// Asserts that the list is not empty.
/// Asserts that the index is in bounds.
pub fn swapRemove(self: *Self, i: usize) T {
if (self.items.len - 1 == i) return self.pop().?;
const old_item = self.items[i];
self.items[i] = self.pop().?;
return old_item;
}
/// Append the slice of items to the list. Allocates more
/// memory as necessary.
/// Invalidates element pointers if additional memory is needed.
pub fn appendSlice(self: *Self, gpa: Allocator, items: []const T) Allocator.Error!void {
try self.ensureUnusedCapacity(gpa, items.len);
self.appendSliceAssumeCapacity(items);
}
/// Append the slice of items to the list.
/// Asserts that the list can hold the additional items.
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
const old_len = self.items.len;
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
@memcpy(self.items[old_len..][0..items.len], items);
}
/// Append the slice of items to the list. Allocates more
/// memory as necessary. Only call this function if a call to `appendSlice` instead would
/// be a compile error.
/// Invalidates element pointers if additional memory is needed.
pub fn appendUnalignedSlice(self: *Self, gpa: Allocator, items: []align(1) const T) Allocator.Error!void {
try self.ensureUnusedCapacity(gpa, items.len);
self.appendUnalignedSliceAssumeCapacity(items);
}
/// Append an unaligned slice of items to the list.
/// Only call this function if a call to `appendSliceAssumeCapacity`
/// instead would be a compile error.
/// Asserts that the list can hold the additional items.
pub fn appendUnalignedSliceAssumeCapacity(self: *Self, items: []align(1) const T) void {
const old_len = self.items.len;
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
@memcpy(self.items[old_len..][0..items.len], items);
}
pub const WriterContext = struct {
self: *Self,
allocator: Allocator,
};
pub const Writer = if (T != u8)
@compileError("The Writer interface is only defined for ArrayList(u8) " ++
"but the given type is ArrayList(" ++ @typeName(T) ++ ")")
else
std.io.Writer(WriterContext, Allocator.Error, appendWrite);
/// Initializes a Writer which will append to the list.
pub fn writer(self: *Self, gpa: Allocator) Writer {
return .{ .context = .{ .self = self, .allocator = gpa } };
}
/// Same as `append` except it returns the number of bytes written,
/// which is always the same as `m.len`. The purpose of this function
/// existing is to match `std.io.Writer` API.
/// Invalidates element pointers if additional memory is needed.
fn appendWrite(context: WriterContext, m: []const u8) Allocator.Error!usize {
try context.self.appendSlice(context.allocator, m);
return m.len;
}
pub const FixedWriter = std.io.Writer(*Self, Allocator.Error, appendWriteFixed);
/// Initializes a Writer which will append to the list but will return
/// `error.OutOfMemory` rather than increasing capacity.
pub fn fixedWriter(self: *Self) FixedWriter {
return .{ .context = self };
}
/// The purpose of this function existing is to match `std.io.Writer` API.
fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
const available_capacity = self.capacity - self.items.len;
if (m.len > available_capacity)
return error.OutOfMemory;
self.appendSliceAssumeCapacity(m);
return m.len;
}
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
/// The function is inline so that a comptime-known `value` parameter will
/// have a more optimal memset codegen in case it has a repeated byte pattern.
pub inline fn appendNTimes(self: *Self, gpa: Allocator, value: T, n: usize) Allocator.Error!void {
const old_len = self.items.len;
try self.resize(gpa, try addOrOom(old_len, n));
@memset(self.items[old_len..self.items.len], value);
}
/// Append a value to the list `n` times.
/// Never invalidates element pointers.
/// The function is inline so that a comptime-known `value` parameter will
/// have better memset codegen in case it has a repeated byte pattern.
/// Asserts that the list can hold the additional items.
pub inline fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
const new_len = self.items.len + n;
assert(new_len <= self.capacity);
@memset(self.items.ptr[self.items.len..new_len], value);
self.items.len = new_len;
}
/// Adjust the list length to `new_len`.
/// Additional elements contain the value `undefined`.
/// Invalidates element pointers if additional memory is needed.
pub fn resize(self: *Self, gpa: Allocator, new_len: usize) Allocator.Error!void {
try self.ensureTotalCapacity(gpa, new_len);
self.items.len = new_len;
}
/// Reduce allocated capacity to `new_len`.
/// May invalidate element pointers.
/// Asserts that the new length is less than or equal to the previous length.
pub fn shrinkAndFree(self: *Self, gpa: Allocator, new_len: usize) void {
assert(new_len <= self.items.len);
if (@sizeOf(T) == 0) {
self.items.len = new_len;
return;
}
const old_memory = self.allocatedSlice();
if (gpa.remap(old_memory, new_len)) |new_items| {
self.capacity = new_items.len;
self.items = new_items;
return;
}
const new_memory = gpa.alignedAlloc(T, alignment, new_len) catch |e| switch (e) {
error.OutOfMemory => {
// No problem, capacity is still correct then.
self.items.len = new_len;
return;
},
};
@memcpy(new_memory, self.items[0..new_len]);
gpa.free(old_memory);
self.items = new_memory;
self.capacity = new_memory.len;
}
/// Reduce length to `new_len`.
/// Invalidates pointers to elements `items[new_len..]`.
/// Keeps capacity the same.
/// Asserts that the new length is less than or equal to the previous length.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
self.items.len = new_len;
}
/// Invalidates all element pointers.
pub fn clearRetainingCapacity(self: *Self) void {
self.items.len = 0;
}
/// Invalidates all element pointers.
pub fn clearAndFree(self: *Self, gpa: Allocator) void {
gpa.free(self.allocatedSlice());
self.items.len = 0;
self.capacity = 0;
}
/// Modify the array so that it can hold at least `new_capacity` items.
/// Implements super-linear growth to achieve amortized O(1) append operations.
/// Invalidates element pointers if additional memory is needed.
pub fn ensureTotalCapacity(self: *Self, gpa: Allocator, new_capacity: usize) Allocator.Error!void {
if (self.capacity >= new_capacity) return;
return self.ensureTotalCapacityPrecise(gpa, growCapacity(self.capacity, new_capacity));
}
/// If the current capacity is less than `new_capacity`, this function will
/// modify the array so that it can hold exactly `new_capacity` items.
/// Invalidates element pointers if additional memory is needed.
pub fn ensureTotalCapacityPrecise(self: *Self, gpa: Allocator, new_capacity: usize) Allocator.Error!void {
if (@sizeOf(T) == 0) {
self.capacity = math.maxInt(usize);
return;
}
if (self.capacity >= new_capacity) return;
// Here we avoid copying allocated but unused bytes by
// attempting a resize in place, and falling back to allocating
// a new buffer and doing our own copy. With a realloc() call,
// the allocator implementation would pointlessly copy our
// extra capacity.
const old_memory = self.allocatedSlice();
if (gpa.remap(old_memory, new_capacity)) |new_memory| {
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
} else {
const new_memory = try gpa.alignedAlloc(T, alignment, new_capacity);
@memcpy(new_memory[0..self.items.len], self.items);
gpa.free(old_memory);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
}
}
/// Modify the array so that it can hold at least `additional_count` **more** items.
/// Invalidates element pointers if additional memory is needed.
pub fn ensureUnusedCapacity(
self: *Self,
gpa: Allocator,
additional_count: usize,
) Allocator.Error!void {
return self.ensureTotalCapacity(gpa, try addOrOom(self.items.len, additional_count));
}
/// Increases the array's length to match the full capacity that is already allocated.
/// The new elements have `undefined` values.
/// Never invalidates element pointers.
pub fn expandToCapacity(self: *Self) void {
self.items.len = self.capacity;
}
/// Increase length by 1, returning pointer to the new item.
/// The returned element pointer becomes invalid when the list is resized.
pub fn addOne(self: *Self, gpa: Allocator) Allocator.Error!*T {
// This can never overflow because `self.items` can never occupy the whole address space
const newlen = self.items.len + 1;
try self.ensureTotalCapacity(gpa, newlen);
return self.addOneAssumeCapacity();
}
/// Increase length by 1, returning pointer to the new item.
/// Never invalidates element pointers.
/// The returned element pointer becomes invalid when the list is resized.
/// Asserts that the list can hold one additional item.
pub fn addOneAssumeCapacity(self: *Self) *T {
assert(self.items.len < self.capacity);
self.items.len += 1;
return &self.items[self.items.len - 1];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
pub fn addManyAsArray(self: *Self, gpa: Allocator, comptime n: usize) Allocator.Error!*[n]T {
const prev_len = self.items.len;
try self.resize(gpa, try addOrOom(self.items.len, n));
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// Never invalidates element pointers.
/// The returned pointer becomes invalid when the list is resized.
/// Asserts that the list can hold the additional items.
pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T {
assert(self.items.len + n <= self.capacity);
const prev_len = self.items.len;
self.items.len += n;
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
/// Resizes list if `self.capacity` is not large enough.
pub fn addManyAsSlice(self: *Self, gpa: Allocator, n: usize) Allocator.Error![]T {
const prev_len = self.items.len;
try self.resize(gpa, try addOrOom(self.items.len, n));
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the newly allocated elements.
/// Never invalidates element pointers.
/// The returned pointer becomes invalid when the list is resized.
/// Asserts that the list can hold the additional items.
pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T {
assert(self.items.len + n <= self.capacity);
const prev_len = self.items.len;
self.items.len += n;
return self.items[prev_len..][0..n];
}
/// Remove and return the last element from the list.
/// If the list is empty, returns `null`.
/// Invalidates pointers to last element.
pub fn pop(self: *Self) ?T {
if (self.items.len == 0) return null;
const val = self.items[self.items.len - 1];
self.items.len -= 1;
return val;
}
/// Returns a slice of all the items plus the extra capacity, whose memory
/// contents are `undefined`.
pub fn allocatedSlice(self: Self) Slice {
return self.items.ptr[0..self.capacity];
}
/// Returns a slice of only the extra capacity after items.
/// This can be useful for writing directly into an ArrayList.
/// Note that such an operation must be followed up with a direct
/// modification of `self.items.len`.
pub fn unusedCapacitySlice(self: Self) []T {
return self.allocatedSlice()[self.items.len..];
}
/// Return the last element from the list.
/// Asserts that the list is not empty.
pub fn getLast(self: Self) T {
const val = self.items[self.items.len - 1];
return val;
}
/// Return the last element from the list, or
/// return `null` if list is empty.
pub fn getLastOrNull(self: Self) ?T {
if (self.items.len == 0) return null;
return self.getLast();
}
const init_capacity = @as(comptime_int, @max(1, std.atomic.cache_line / @sizeOf(T)));
/// Called when memory growth is necessary. Returns a capacity larger than
/// minimum that grows super-linearly.
fn growCapacity(current: usize, minimum: usize) usize {
var new = current;
while (true) {
new +|= new / 2 + init_capacity;
if (new >= minimum)
return new;
}
}
};
}