Type Function Value [src]

Prototype

pub fn Value(comptime T: type) type

Parameters

T: type

Example

test Value { const RefCount = struct { count: Value(usize), dropFn: *const fn (*RefCount) void, const RefCount = @This(); fn ref(rc: *RefCount) void { // no synchronization necessary; just updating a counter. _ = rc.count.fetchAdd(1, .monotonic); } fn unref(rc: *RefCount) void { // release ensures code before unref() happens-before the // count is decremented as dropFn could be called by then. if (rc.count.fetchSub(1, .release) == 1) { // seeing 1 in the counter means that other unref()s have happened, // but it doesn't mean that uses before each unref() are visible. // The load acquires the release-sequence created by previous unref()s // in order to ensure visibility of uses before dropping. _ = rc.count.load(.acquire); (rc.dropFn)(rc); } } fn noop(rc: *RefCount) void { _ = rc; } }; var ref_count: RefCount = .{ .count = Value(usize).init(0), .dropFn = RefCount.noop, }; ref_count.ref(); ref_count.unref(); }

Source

pub fn Value(comptime T: type) type { return extern struct { /// Care must be taken to avoid data races when interacting with this field directly. raw: T, const Self = @This(); pub fn init(value: T) Self { return .{ .raw = value }; } pub const fence = @compileError("@fence is deprecated, use other atomics to establish ordering"); pub inline fn load(self: *const Self, comptime order: AtomicOrder) T { return @atomicLoad(T, &self.raw, order); } pub inline fn store(self: *Self, value: T, comptime order: AtomicOrder) void { @atomicStore(T, &self.raw, value, order); } pub inline fn swap(self: *Self, operand: T, comptime order: AtomicOrder) T { return @atomicRmw(T, &self.raw, .Xchg, operand, order); } pub inline fn cmpxchgWeak( self: *Self, expected_value: T, new_value: T, comptime success_order: AtomicOrder, comptime fail_order: AtomicOrder, ) ?T { return @cmpxchgWeak(T, &self.raw, expected_value, new_value, success_order, fail_order); } pub inline fn cmpxchgStrong( self: *Self, expected_value: T, new_value: T, comptime success_order: AtomicOrder, comptime fail_order: AtomicOrder, ) ?T { return @cmpxchgStrong(T, &self.raw, expected_value, new_value, success_order, fail_order); } pub inline fn fetchAdd(self: *Self, operand: T, comptime order: AtomicOrder) T { return @atomicRmw(T, &self.raw, .Add, operand, order); } pub inline fn fetchSub(self: *Self, operand: T, comptime order: AtomicOrder) T { return @atomicRmw(T, &self.raw, .Sub, operand, order); } pub inline fn fetchMin(self: *Self, operand: T, comptime order: AtomicOrder) T { return @atomicRmw(T, &self.raw, .Min, operand, order); } pub inline fn fetchMax(self: *Self, operand: T, comptime order: AtomicOrder) T { return @atomicRmw(T, &self.raw, .Max, operand, order); } pub inline fn fetchAnd(self: *Self, operand: T, comptime order: AtomicOrder) T { return @atomicRmw(T, &self.raw, .And, operand, order); } pub inline fn fetchNand(self: *Self, operand: T, comptime order: AtomicOrder) T { return @atomicRmw(T, &self.raw, .Nand, operand, order); } pub inline fn fetchXor(self: *Self, operand: T, comptime order: AtomicOrder) T { return @atomicRmw(T, &self.raw, .Xor, operand, order); } pub inline fn fetchOr(self: *Self, operand: T, comptime order: AtomicOrder) T { return @atomicRmw(T, &self.raw, .Or, operand, order); } pub inline fn rmw( self: *Self, comptime op: std.builtin.AtomicRmwOp, operand: T, comptime order: AtomicOrder, ) T { return @atomicRmw(T, &self.raw, op, operand, order); } const Bit = std.math.Log2Int(T); /// Marked `inline` so that if `bit` is comptime-known, the instruction /// can be lowered to a more efficient machine code instruction if /// possible. pub inline fn bitSet(self: *Self, bit: Bit, comptime order: AtomicOrder) u1 { const mask = @as(T, 1) << bit; const value = self.fetchOr(mask, order); return @intFromBool(value & mask != 0); } /// Marked `inline` so that if `bit` is comptime-known, the instruction /// can be lowered to a more efficient machine code instruction if /// possible. pub inline fn bitReset(self: *Self, bit: Bit, comptime order: AtomicOrder) u1 { const mask = @as(T, 1) << bit; const value = self.fetchAnd(~mask, order); return @intFromBool(value & mask != 0); } /// Marked `inline` so that if `bit` is comptime-known, the instruction /// can be lowered to a more efficient machine code instruction if /// possible. pub inline fn bitToggle(self: *Self, bit: Bit, comptime order: AtomicOrder) u1 { const mask = @as(T, 1) << bit; const value = self.fetchXor(mask, order); return @intFromBool(value & mask != 0); } }; }