Commit 2593156068
Changed files (58)
doc
lib
compiler_rt
std
Build
Cache
compress
lzma
crypto
event
hash
http
os
sort
zig
src
arch
x86_64
link
translate_c
doc/docgen.zig
@@ -276,7 +276,7 @@ fn parseError(tokenizer: *Tokenizer, token: Token, comptime fmt: []const u8, arg
}
}
{
- const caret_count = std.math.min(token.end, loc.line_end) - token.start;
+ const caret_count = @min(token.end, loc.line_end) - token.start;
var i: usize = 0;
while (i < caret_count) : (i += 1) {
print("~", .{});
lib/compiler_rt/divc3.zig
@@ -3,7 +3,6 @@ const isNan = std.math.isNan;
const isInf = std.math.isInf;
const scalbn = std.math.scalbn;
const ilogb = std.math.ilogb;
-const max = std.math.max;
const fabs = std.math.fabs;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
@@ -17,7 +16,7 @@ pub inline fn divc3(comptime T: type, a: T, b: T, c_in: T, d_in: T) Complex(T) {
var d = d_in;
// logbw used to prevent under/over-flow
- const logbw = ilogb(max(fabs(c), fabs(d)));
+ const logbw = ilogb(@max(fabs(c), fabs(d)));
const logbw_finite = logbw != maxInt(i32) and logbw != minInt(i32);
const ilogbw = if (logbw_finite) b: {
c = scalbn(c, -logbw);
lib/compiler_rt/emutls.zig
@@ -49,7 +49,7 @@ const simple_allocator = struct {
/// Allocate a memory chunk.
pub fn advancedAlloc(alignment: u29, size: usize) [*]u8 {
- const minimal_alignment = std.math.max(@alignOf(usize), alignment);
+ const minimal_alignment = @max(@alignOf(usize), alignment);
var aligned_ptr: ?*anyopaque = undefined;
if (std.c.posix_memalign(&aligned_ptr, minimal_alignment, size) != 0) {
@@ -170,7 +170,7 @@ const current_thread_storage = struct {
// make it to contains at least 16 objects (to avoid too much
// reallocation at startup).
- const size = std.math.max(16, index);
+ const size = @max(16, index);
// create a new array and store it.
var array: *ObjectArray = ObjectArray.init(size);
lib/std/Build/Cache/DepTokenizer.zig
@@ -983,7 +983,7 @@ fn hexDump(out: anytype, bytes: []const u8) !void {
try printDecValue(out, offset, 8);
try out.writeAll(":");
try out.writeAll(" ");
- var end1 = std.math.min(offset + n, offset + 8);
+ var end1 = @min(offset + n, offset + 8);
for (bytes[offset..end1]) |b| {
try out.writeAll(" ");
try printHexValue(out, b, 2);
lib/std/compress/lzma/decode.zig
@@ -59,7 +59,7 @@ pub const Params = struct {
const pb = @intCast(u3, props);
const dict_size_provided = try reader.readIntLittle(u32);
- const dict_size = math.max(0x1000, dict_size_provided);
+ const dict_size = @max(0x1000, dict_size_provided);
const unpacked_size = switch (options.unpacked_size) {
.read_from_header => blk: {
lib/std/crypto/blake3.zig
@@ -20,7 +20,7 @@ const ChunkIterator = struct {
}
fn next(self: *ChunkIterator) ?[]u8 {
- const next_chunk = self.slice[0..math.min(self.chunk_len, self.slice.len)];
+ const next_chunk = self.slice[0..@min(self.chunk_len, self.slice.len)];
self.slice = self.slice[next_chunk.len..];
return if (next_chunk.len > 0) next_chunk else null;
}
@@ -283,7 +283,7 @@ const ChunkState = struct {
fn fillBlockBuf(self: *ChunkState, input: []const u8) []const u8 {
const want = BLOCK_LEN - self.block_len;
- const take = math.min(want, input.len);
+ const take = @min(want, input.len);
@memcpy(self.block[self.block_len..][0..take], input[0..take]);
self.block_len += @truncate(u8, take);
return input[take..];
@@ -450,7 +450,7 @@ pub const Blake3 = struct {
// Compress input bytes into the current chunk state.
const want = CHUNK_LEN - self.chunk_state.len();
- const take = math.min(want, input.len);
+ const take = @min(want, input.len);
self.chunk_state.update(input[0..take]);
input = input[take..];
}
@@ -663,7 +663,7 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void {
// Write repeating input pattern to hasher
var input_counter = input_len;
while (input_counter > 0) {
- const update_len = math.min(input_counter, input_pattern.len);
+ const update_len = @min(input_counter, input_pattern.len);
hasher.update(input_pattern[0..update_len]);
input_counter -= update_len;
}
lib/std/crypto/ff.zig
@@ -570,7 +570,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
var out = self.zero;
var i = x.limbs_count() - 1;
if (self.limbs_count() >= 2) {
- const start = math.min(i, self.limbs_count() - 2);
+ const start = @min(i, self.limbs_count() - 2);
var j = start;
while (true) : (j -= 1) {
out.v.limbs.set(j, x.limbs.get(i));
lib/std/crypto/ghash_polyval.zig
@@ -363,7 +363,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
var mb = m;
if (st.leftover > 0) {
- const want = math.min(block_length - st.leftover, mb.len);
+ const want = @min(block_length - st.leftover, mb.len);
const mc = mb[0..want];
for (mc, 0..) |x, i| {
st.buf[st.leftover + i] = x;
lib/std/crypto/keccak_p.zig
@@ -214,7 +214,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti
pub fn absorb(self: *Self, bytes_: []const u8) void {
var bytes = bytes_;
if (self.offset > 0) {
- const left = math.min(rate - self.offset, bytes.len);
+ const left = @min(rate - self.offset, bytes.len);
@memcpy(self.buf[self.offset..][0..left], bytes[0..left]);
self.offset += left;
if (self.offset == rate) {
@@ -249,7 +249,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti
pub fn squeeze(self: *Self, out: []u8) void {
var i: usize = 0;
while (i < out.len) : (i += rate) {
- const left = math.min(rate, out.len - i);
+ const left = @min(rate, out.len - i);
self.st.extractBytes(out[i..][0..left]);
self.st.permuteR(rounds);
}
lib/std/crypto/poly1305.zig
@@ -112,7 +112,7 @@ pub const Poly1305 = struct {
// handle leftover
if (st.leftover > 0) {
- const want = std.math.min(block_length - st.leftover, mb.len);
+ const want = @min(block_length - st.leftover, mb.len);
const mc = mb[0..want];
for (mc, 0..) |x, i| {
st.buf[st.leftover + i] = x;
lib/std/crypto/salsa20.zig
@@ -404,7 +404,7 @@ pub const XSalsa20Poly1305 = struct {
debug.assert(c.len == m.len);
const extended = extend(rounds, k, npub);
var block0 = [_]u8{0} ** 64;
- const mlen0 = math.min(32, c.len);
+ const mlen0 = @min(32, c.len);
@memcpy(block0[32..][0..mlen0], c[0..mlen0]);
Salsa20.xor(block0[0..], block0[0..], 0, extended.key, extended.nonce);
var mac = Poly1305.init(block0[0..32]);
lib/std/crypto/scrypt.zig
@@ -143,7 +143,7 @@ pub const Params = struct {
/// Create parameters from ops and mem limits, where mem_limit given in bytes
pub fn fromLimits(ops_limit: u64, mem_limit: usize) Self {
- const ops = math.max(32768, ops_limit);
+ const ops = @max(32768, ops_limit);
const r: u30 = 8;
if (ops < mem_limit / 32) {
const max_n = ops / (r * 4);
@@ -151,7 +151,7 @@ pub const Params = struct {
} else {
const max_n = mem_limit / (@intCast(usize, r) * 128);
const ln = @intCast(u6, math.log2(max_n));
- const max_rp = math.min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln));
+ const max_rp = @min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln));
return Self{ .r = r, .p = @intCast(u30, max_rp / @as(u64, r)), .ln = ln };
}
}
lib/std/crypto/sha3.zig
@@ -148,7 +148,7 @@ fn ShakeLike(comptime security_level: u11, comptime delim: u8, comptime rounds:
if (self.offset > 0) {
const left = self.buf.len - self.offset;
if (left > 0) {
- const n = math.min(left, out.len);
+ const n = @min(left, out.len);
@memcpy(out[0..n], self.buf[self.offset..][0..n]);
out = out[n..];
self.offset += n;
lib/std/crypto/siphash.zig
@@ -433,7 +433,7 @@ test "iterative non-divisible update" {
var siphash = Siphash.init(key);
var i: usize = 0;
while (i < end) : (i += 7) {
- siphash.update(buf[i..std.math.min(i + 7, end)]);
+ siphash.update(buf[i..@min(i + 7, end)]);
}
const iterative_hash = siphash.finalInt();
lib/std/event/loop.zig
@@ -179,7 +179,7 @@ pub const Loop = struct {
// We need at least one of these in case the fs thread wants to use onNextTick
const extra_thread_count = thread_count - 1;
- const resume_node_count = std.math.max(extra_thread_count, 1);
+ const resume_node_count = @max(extra_thread_count, 1);
self.eventfd_resume_nodes = try self.arena.allocator().alloc(
std.atomic.Stack(ResumeNode.EventFd).Node,
resume_node_count,
lib/std/hash/wyhash.zig
@@ -252,7 +252,7 @@ test "iterative non-divisible update" {
var wy = Wyhash.init(seed);
var i: usize = 0;
while (i < end) : (i += 33) {
- wy.update(buf[i..std.math.min(i + 33, end)]);
+ wy.update(buf[i..@min(i + 33, end)]);
}
const iterative_hash = wy.final();
lib/std/heap/arena_allocator.zig
@@ -110,7 +110,7 @@ pub const ArenaAllocator = struct {
// value.
const requested_capacity = switch (mode) {
.retain_capacity => self.queryCapacity(),
- .retain_with_limit => |limit| std.math.min(limit, self.queryCapacity()),
+ .retain_with_limit => |limit| @min(limit, self.queryCapacity()),
.free_all => 0,
};
if (requested_capacity == 0) {
lib/std/heap/memory_pool.zig
@@ -40,11 +40,11 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type
/// Size of the memory pool items. This is not necessarily the same
/// as `@sizeOf(Item)` as the pool also uses the items for internal means.
- pub const item_size = std.math.max(@sizeOf(Node), @sizeOf(Item));
+ pub const item_size = @max(@sizeOf(Node), @sizeOf(Item));
/// Alignment of the memory pool items. This is not necessarily the same
/// as `@alignOf(Item)` as the pool also uses the items for internal means.
- pub const item_alignment = std.math.max(@alignOf(Node), pool_options.alignment orelse 0);
+ pub const item_alignment = @max(@alignOf(Node), pool_options.alignment orelse 0);
const Node = struct {
next: ?*@This(),
lib/std/http/protocol.zig
@@ -82,7 +82,7 @@ pub const HeadersParser = struct {
/// If the amount returned is less than `bytes.len`, you may assume that the parser is in a content state and the
/// first byte of content is located at `bytes[result]`.
pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 {
- const vector_len: comptime_int = comptime std.math.max(std.simd.suggestVectorSize(u8) orelse 1, 8);
+ const vector_len: comptime_int = comptime @max(std.simd.suggestVectorSize(u8) orelse 1, 8);
const len = @intCast(u32, bytes.len);
var index: u32 = 0;
lib/std/io/fixed_buffer_stream.zig
@@ -76,7 +76,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
}
pub fn seekTo(self: *Self, pos: u64) SeekError!void {
- self.pos = if (std.math.cast(usize, pos)) |x| std.math.min(self.buffer.len, x) else self.buffer.len;
+ self.pos = if (std.math.cast(usize, pos)) |x| @min(self.buffer.len, x) else self.buffer.len;
}
pub fn seekBy(self: *Self, amt: i64) SeekError!void {
@@ -91,7 +91,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
} else {
const amt_usize = std.math.cast(usize, amt) orelse std.math.maxInt(usize);
const new_pos = std.math.add(usize, self.pos, amt_usize) catch std.math.maxInt(usize);
- self.pos = std.math.min(self.buffer.len, new_pos);
+ self.pos = @min(self.buffer.len, new_pos);
}
}
lib/std/io/limited_reader.zig
@@ -14,7 +14,7 @@ pub fn LimitedReader(comptime ReaderType: type) type {
const Self = @This();
pub fn read(self: *Self, dest: []u8) Error!usize {
- const max_read = std.math.min(self.bytes_left, dest.len);
+ const max_read = @min(self.bytes_left, dest.len);
const n = try self.inner_reader.read(dest[0..max_read]);
self.bytes_left -= n;
return n;
lib/std/io/reader.zig
@@ -325,7 +325,7 @@ pub fn Reader(
var remaining = num_bytes;
while (remaining > 0) {
- const amt = std.math.min(remaining, options.buf_size);
+ const amt = @min(remaining, options.buf_size);
try self.readNoEof(buf[0..amt]);
remaining -= amt;
}
lib/std/io/writer.zig
@@ -39,7 +39,7 @@ pub fn Writer(
var remaining: usize = n;
while (remaining > 0) {
- const to_write = std.math.min(remaining, bytes.len);
+ const to_write = @min(remaining, bytes.len);
try self.writeAll(bytes[0..to_write]);
remaining -= to_write;
}
lib/std/math/big/int.zig
@@ -44,12 +44,12 @@ pub fn calcDivLimbsBufferLen(a_len: usize, b_len: usize) usize {
}
pub fn calcMulLimbsBufferLen(a_len: usize, b_len: usize, aliases: usize) usize {
- return aliases * math.max(a_len, b_len);
+ return aliases * @max(a_len, b_len);
}
pub fn calcMulWrapLimbsBufferLen(bit_count: usize, a_len: usize, b_len: usize, aliases: usize) usize {
const req_limbs = calcTwosCompLimbCount(bit_count);
- return aliases * math.min(req_limbs, math.max(a_len, b_len));
+ return aliases * @min(req_limbs, @max(a_len, b_len));
}
pub fn calcSetStringLimbsBufferLen(base: u8, string_len: usize) usize {
@@ -396,7 +396,7 @@ pub const Mutable = struct {
/// scalar is a primitive integer type.
///
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
- /// r is `math.max(a.limbs.len, calcLimbLen(scalar)) + 1`.
+ /// r is `@max(a.limbs.len, calcLimbLen(scalar)) + 1`.
pub fn addScalar(r: *Mutable, a: Const, scalar: anytype) void {
// Normally we could just determine the number of limbs needed with calcLimbLen,
// but that is not comptime-known when scalar is not a comptime_int. Instead, we
@@ -414,11 +414,11 @@ pub const Mutable = struct {
return add(r, a, operand);
}
- /// Base implementation for addition. Adds `max(a.limbs.len, b.limbs.len)` elements from a and b,
+ /// Base implementation for addition. Adds `@max(a.limbs.len, b.limbs.len)` elements from a and b,
/// and returns whether any overflow occurred.
/// r, a and b may be aliases.
///
- /// Asserts r has enough elements to hold the result. The upper bound is `max(a.limbs.len, b.limbs.len)`.
+ /// Asserts r has enough elements to hold the result. The upper bound is `@max(a.limbs.len, b.limbs.len)`.
fn addCarry(r: *Mutable, a: Const, b: Const) bool {
if (a.eqZero()) {
r.copy(b);
@@ -452,12 +452,12 @@ pub const Mutable = struct {
/// r, a and b may be aliases.
///
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
- /// r is `math.max(a.limbs.len, b.limbs.len) + 1`.
+ /// r is `@max(a.limbs.len, b.limbs.len) + 1`.
pub fn add(r: *Mutable, a: Const, b: Const) void {
if (r.addCarry(a, b)) {
// Fix up the result. Note that addCarry normalizes by a.limbs.len or b.limbs.len,
// so we need to set the length here.
- const msl = math.max(a.limbs.len, b.limbs.len);
+ const msl = @max(a.limbs.len, b.limbs.len);
// `[add|sub]Carry` normalizes by `msl`, so we need to fix up the result manually here.
// Note, the fact that it normalized means that the intermediary limbs are zero here.
r.len = msl + 1;
@@ -477,12 +477,12 @@ pub const Mutable = struct {
// if an overflow occurred.
const x = Const{
.positive = a.positive,
- .limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)],
+ .limbs = a.limbs[0..@min(req_limbs, a.limbs.len)],
};
const y = Const{
.positive = b.positive,
- .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)],
+ .limbs = b.limbs[0..@min(req_limbs, b.limbs.len)],
};
var carry_truncated = false;
@@ -492,7 +492,7 @@ pub const Mutable = struct {
// truncate anyway.
// - a and b had less elements than req_limbs, and those were overflowed. This case needs to be handled.
// Note: after this we still might need to wrap.
- const msl = math.max(a.limbs.len, b.limbs.len);
+ const msl = @max(a.limbs.len, b.limbs.len);
if (msl < req_limbs) {
r.limbs[msl] = 1;
r.len = req_limbs;
@@ -522,12 +522,12 @@ pub const Mutable = struct {
// if an overflow occurred.
const x = Const{
.positive = a.positive,
- .limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)],
+ .limbs = a.limbs[0..@min(req_limbs, a.limbs.len)],
};
const y = Const{
.positive = b.positive,
- .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)],
+ .limbs = b.limbs[0..@min(req_limbs, b.limbs.len)],
};
if (r.addCarry(x, y)) {
@@ -535,7 +535,7 @@ pub const Mutable = struct {
// - We overflowed req_limbs, in which case we need to saturate.
// - a and b had less elements than req_limbs, and those were overflowed.
// Note: In this case, might _also_ need to saturate.
- const msl = math.max(a.limbs.len, b.limbs.len);
+ const msl = @max(a.limbs.len, b.limbs.len);
if (msl < req_limbs) {
r.limbs[msl] = 1;
r.len = req_limbs;
@@ -550,11 +550,11 @@ pub const Mutable = struct {
r.saturate(r.toConst(), signedness, bit_count);
}
- /// Base implementation for subtraction. Subtracts `max(a.limbs.len, b.limbs.len)` elements from a and b,
+ /// Base implementation for subtraction. Subtracts `@max(a.limbs.len, b.limbs.len)` elements from a and b,
/// and returns whether any overflow occurred.
/// r, a and b may be aliases.
///
- /// Asserts r has enough elements to hold the result. The upper bound is `max(a.limbs.len, b.limbs.len)`.
+ /// Asserts r has enough elements to hold the result. The upper bound is `@max(a.limbs.len, b.limbs.len)`.
fn subCarry(r: *Mutable, a: Const, b: Const) bool {
if (a.eqZero()) {
r.copy(b);
@@ -607,7 +607,7 @@ pub const Mutable = struct {
/// r, a and b may be aliases.
///
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
- /// r is `math.max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive.
+ /// r is `@max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive.
pub fn sub(r: *Mutable, a: Const, b: Const) void {
r.add(a, b.negate());
}
@@ -714,7 +714,7 @@ pub const Mutable = struct {
const a_copy = if (rma.limbs.ptr == a.limbs.ptr) blk: {
const start = buf_index;
- const a_len = math.min(req_limbs, a.limbs.len);
+ const a_len = @min(req_limbs, a.limbs.len);
@memcpy(limbs_buffer[buf_index..][0..a_len], a.limbs[0..a_len]);
buf_index += a_len;
break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst();
@@ -722,7 +722,7 @@ pub const Mutable = struct {
const b_copy = if (rma.limbs.ptr == b.limbs.ptr) blk: {
const start = buf_index;
- const b_len = math.min(req_limbs, b.limbs.len);
+ const b_len = @min(req_limbs, b.limbs.len);
@memcpy(limbs_buffer[buf_index..][0..b_len], b.limbs[0..b_len]);
buf_index += b_len;
break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst();
@@ -755,13 +755,13 @@ pub const Mutable = struct {
const req_limbs = calcTwosCompLimbCount(bit_count);
// We can ignore the upper bits here, those results will be discarded anyway.
- const a_limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)];
- const b_limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)];
+ const a_limbs = a.limbs[0..@min(req_limbs, a.limbs.len)];
+ const b_limbs = b.limbs[0..@min(req_limbs, b.limbs.len)];
@memset(rma.limbs[0..req_limbs], 0);
llmulacc(.add, allocator, rma.limbs, a_limbs, b_limbs);
- rma.normalize(math.min(req_limbs, a.limbs.len + b.limbs.len));
+ rma.normalize(@min(req_limbs, a.limbs.len + b.limbs.len));
rma.positive = (a.positive == b.positive);
rma.truncate(rma.toConst(), signedness, bit_count);
}
@@ -1211,7 +1211,7 @@ pub const Mutable = struct {
///
/// a and b are zero-extended to the longer of a or b.
///
- /// Asserts that r has enough limbs to store the result. Upper bound is `math.max(a.limbs.len, b.limbs.len)`.
+ /// Asserts that r has enough limbs to store the result. Upper bound is `@max(a.limbs.len, b.limbs.len)`.
pub fn bitOr(r: *Mutable, a: Const, b: Const) void {
// Trivial cases, llsignedor does not support zero.
if (a.eqZero()) {
@@ -1235,8 +1235,8 @@ pub const Mutable = struct {
/// r may alias with a or b.
///
/// Asserts that r has enough limbs to store the result.
- /// If a or b is positive, the upper bound is `math.min(a.limbs.len, b.limbs.len)`.
- /// If a and b are negative, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`.
+ /// If a or b is positive, the upper bound is `@min(a.limbs.len, b.limbs.len)`.
+ /// If a and b are negative, the upper bound is `@max(a.limbs.len, b.limbs.len) + 1`.
pub fn bitAnd(r: *Mutable, a: Const, b: Const) void {
// Trivial cases, llsignedand does not support zero.
if (a.eqZero()) {
@@ -1260,8 +1260,8 @@ pub const Mutable = struct {
/// r may alias with a or b.
///
/// Asserts that r has enough limbs to store the result. If a and b share the same signedness, the
- /// upper bound is `math.max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative
- /// but not both, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`.
+ /// upper bound is `@max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative
+ /// but not both, the upper bound is `@max(a.limbs.len, b.limbs.len) + 1`.
pub fn bitXor(r: *Mutable, a: Const, b: Const) void {
// Trivial cases, because llsignedxor does not support negative zero.
if (a.eqZero()) {
@@ -1284,7 +1284,7 @@ pub const Mutable = struct {
/// rma may alias x or y.
/// x and y may alias each other.
/// Asserts that `rma` has enough limbs to store the result. Upper bound is
- /// `math.min(x.limbs.len, y.limbs.len)`.
+ /// `@min(x.limbs.len, y.limbs.len)`.
///
/// `limbs_buffer` is used for temporary storage during the operation. When this function returns,
/// it will have the same length as it had when the function was called.
@@ -1546,7 +1546,7 @@ pub const Mutable = struct {
if (yi != 0) break i;
} else unreachable;
- const xy_trailing = math.min(x_trailing, y_trailing);
+ const xy_trailing = @min(x_trailing, y_trailing);
if (y.len - xy_trailing == 1) {
const divisor = y.limbs[y.len - 1];
@@ -2589,7 +2589,7 @@ pub const Managed = struct {
.allocator = allocator,
.metadata = 1,
.limbs = block: {
- const limbs = try allocator.alloc(Limb, math.max(default_capacity, capacity));
+ const limbs = try allocator.alloc(Limb, @max(default_capacity, capacity));
limbs[0] = 0;
break :block limbs;
},
@@ -2918,7 +2918,7 @@ pub const Managed = struct {
///
/// Returns an error if memory could not be allocated.
pub fn sub(r: *Managed, a: *const Managed, b: *const Managed) !void {
- try r.ensureCapacity(math.max(a.len(), b.len()) + 1);
+ try r.ensureCapacity(@max(a.len(), b.len()) + 1);
var m = r.toMutable();
m.sub(a.toConst(), b.toConst());
r.setMetadata(m.positive, m.len);
@@ -3025,11 +3025,11 @@ pub const Managed = struct {
}
pub fn ensureAddScalarCapacity(r: *Managed, a: Const, scalar: anytype) !void {
- try r.ensureCapacity(math.max(a.limbs.len, calcLimbLen(scalar)) + 1);
+ try r.ensureCapacity(@max(a.limbs.len, calcLimbLen(scalar)) + 1);
}
pub fn ensureAddCapacity(r: *Managed, a: Const, b: Const) !void {
- try r.ensureCapacity(math.max(a.limbs.len, b.limbs.len) + 1);
+ try r.ensureCapacity(@max(a.limbs.len, b.limbs.len) + 1);
}
pub fn ensureMulCapacity(rma: *Managed, a: Const, b: Const) !void {
@@ -3123,7 +3123,7 @@ pub const Managed = struct {
///
/// a and b are zero-extended to the longer of a or b.
pub fn bitOr(r: *Managed, a: *const Managed, b: *const Managed) !void {
- try r.ensureCapacity(math.max(a.len(), b.len()));
+ try r.ensureCapacity(@max(a.len(), b.len()));
var m = r.toMutable();
m.bitOr(a.toConst(), b.toConst());
r.setMetadata(m.positive, m.len);
@@ -3132,9 +3132,9 @@ pub const Managed = struct {
/// r = a & b
pub fn bitAnd(r: *Managed, a: *const Managed, b: *const Managed) !void {
const cap = if (a.isPositive() or b.isPositive())
- math.min(a.len(), b.len())
+ @min(a.len(), b.len())
else
- math.max(a.len(), b.len()) + 1;
+ @max(a.len(), b.len()) + 1;
try r.ensureCapacity(cap);
var m = r.toMutable();
m.bitAnd(a.toConst(), b.toConst());
@@ -3143,7 +3143,7 @@ pub const Managed = struct {
/// r = a ^ b
pub fn bitXor(r: *Managed, a: *const Managed, b: *const Managed) !void {
- var cap = math.max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive());
+ var cap = @max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive());
try r.ensureCapacity(cap);
var m = r.toMutable();
@@ -3156,7 +3156,7 @@ pub const Managed = struct {
///
/// rma's allocator is used for temporary storage to boost multiplication performance.
pub fn gcd(rma: *Managed, x: *const Managed, y: *const Managed) !void {
- try rma.ensureCapacity(math.min(x.len(), y.len()));
+ try rma.ensureCapacity(@min(x.len(), y.len()));
var m = rma.toMutable();
var limbs_buffer = std.ArrayList(Limb).init(rma.allocator);
defer limbs_buffer.deinit();
@@ -3356,13 +3356,13 @@ fn llmulaccKaratsuba(
// For a1 and b1 we only need `limbs_after_split` limbs.
const a1 = blk: {
var a1 = a[split..];
- a1.len = math.min(llnormalize(a1), limbs_after_split);
+ a1.len = @min(llnormalize(a1), limbs_after_split);
break :blk a1;
};
const b1 = blk: {
var b1 = b[split..];
- b1.len = math.min(llnormalize(b1), limbs_after_split);
+ b1.len = @min(llnormalize(b1), limbs_after_split);
break :blk b1;
};
@@ -3381,10 +3381,10 @@ fn llmulaccKaratsuba(
// Compute p2.
// Note, we don't need to compute all of p2, just enough limbs to satisfy r.
- const p2_limbs = math.min(limbs_after_split, a1.len + b1.len);
+ const p2_limbs = @min(limbs_after_split, a1.len + b1.len);
@memset(tmp[0..p2_limbs], 0);
- llmulacc(.add, allocator, tmp[0..p2_limbs], a1[0..math.min(a1.len, p2_limbs)], b1[0..math.min(b1.len, p2_limbs)]);
+ llmulacc(.add, allocator, tmp[0..p2_limbs], a1[0..@min(a1.len, p2_limbs)], b1[0..@min(b1.len, p2_limbs)]);
const p2 = tmp[0..llnormalize(tmp[0..p2_limbs])];
// Add p2 * B to the result.
@@ -3392,7 +3392,7 @@ fn llmulaccKaratsuba(
// Add p2 * B^2 to the result if required.
if (limbs_after_split2 > 0) {
- llaccum(op, r[split * 2 ..], p2[0..math.min(p2.len, limbs_after_split2)]);
+ llaccum(op, r[split * 2 ..], p2[0..@min(p2.len, limbs_after_split2)]);
}
// Compute p0.
@@ -3406,13 +3406,13 @@ fn llmulaccKaratsuba(
llaccum(op, r, p0);
// Add p0 * B to the result. In this case, we may not need all of it.
- llaccum(op, r[split..], p0[0..math.min(limbs_after_split, p0.len)]);
+ llaccum(op, r[split..], p0[0..@min(limbs_after_split, p0.len)]);
// Finally, compute and add p1.
// From now on we only need `limbs_after_split` limbs for a0 and b0, since the result of the
// following computation will be added * B.
- const a0x = a0[0..std.math.min(a0.len, limbs_after_split)];
- const b0x = b0[0..std.math.min(b0.len, limbs_after_split)];
+ const a0x = a0[0..@min(a0.len, limbs_after_split)];
+ const b0x = b0[0..@min(b0.len, limbs_after_split)];
const j0_sign = llcmp(a0x, a1);
const j1_sign = llcmp(b1, b0x);
@@ -3544,7 +3544,7 @@ fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) bool {
return false;
}
- const split = std.math.min(y.len, acc.len);
+ const split = @min(y.len, acc.len);
var a_lo = acc[0..split];
var a_hi = acc[split..];
@@ -4023,8 +4023,8 @@ fn llsignedand(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_
// r may alias.
// a and b must not be -0.
// Returns `true` when the result is positive.
-// If the sign of a and b is equal, then r requires at least `max(a.len, b.len)` limbs are required.
-// Otherwise, r requires at least `max(a.len, b.len) + 1` limbs.
+// If the sign of a and b is equal, then r requires at least `@max(a.len, b.len)` limbs are required.
+// Otherwise, r requires at least `@max(a.len, b.len) + 1` limbs.
fn llsignedxor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_positive: bool) bool {
@setRuntimeSafety(debug_safety);
assert(a.len != 0 and b.len != 0);
lib/std/math/ldexp.zig
@@ -48,7 +48,7 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) {
return @bitCast(T, sign_bit); // Severe underflow. Return +/- 0
// Result underflowed, we need to shift and round
- const shift = @intCast(Log2Int(TBits), math.min(-n, -(exponent + n) + 1));
+ const shift = @intCast(Log2Int(TBits), @min(-n, -(exponent + n) + 1));
const exact_tie: bool = @ctz(repr) == shift - 1;
var result = repr & mantissa_mask;
lib/std/os/linux/io_uring.zig
@@ -277,7 +277,7 @@ pub const IO_Uring = struct {
fn copy_cqes_ready(self: *IO_Uring, cqes: []linux.io_uring_cqe, wait_nr: u32) u32 {
_ = wait_nr;
const ready = self.cq_ready();
- const count = std.math.min(cqes.len, ready);
+ const count = @min(cqes.len, ready);
var head = self.cq.head.*;
var tail = head +% count;
// TODO Optimize this by using 1 or 2 memcpy's (if the tail wraps) rather than a loop.
@@ -1093,7 +1093,7 @@ pub const SubmissionQueue = struct {
pub fn init(fd: os.fd_t, p: linux.io_uring_params) !SubmissionQueue {
assert(fd >= 0);
assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
- const size = std.math.max(
+ const size = @max(
p.sq_off.array + p.sq_entries * @sizeOf(u32),
p.cq_off.cqes + p.cq_entries * @sizeOf(linux.io_uring_cqe),
);
lib/std/os/linux.zig
@@ -317,7 +317,7 @@ pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize {
.getdents,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(dirp),
- std.math.min(len, maxInt(c_int)),
+ @min(len, maxInt(c_int)),
);
}
@@ -326,7 +326,7 @@ pub fn getdents64(fd: i32, dirp: [*]u8, len: usize) usize {
.getdents64,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(dirp),
- std.math.min(len, maxInt(c_int)),
+ @min(len, maxInt(c_int)),
);
}
lib/std/os/windows.zig
@@ -272,7 +272,7 @@ pub fn RtlGenRandom(output: []u8) RtlGenRandomError!void {
const max_read_size: ULONG = maxInt(ULONG);
while (total_read < output.len) {
- const to_read: ULONG = math.min(buff.len, max_read_size);
+ const to_read: ULONG = @min(buff.len, max_read_size);
if (advapi32.RtlGenRandom(buff.ptr, to_read) == 0) {
return unexpectedError(kernel32.GetLastError());
@@ -501,7 +501,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo
return @as(usize, bytes_transferred);
} else {
while (true) {
- const want_read_count = @intCast(DWORD, math.min(@as(DWORD, maxInt(DWORD)), buffer.len));
+ const want_read_count: DWORD = @min(@as(DWORD, maxInt(DWORD)), buffer.len);
var amt_read: DWORD = undefined;
var overlapped_data: OVERLAPPED = undefined;
const overlapped: ?*OVERLAPPED = if (offset) |off| blk: {
lib/std/sort/block.zig
@@ -590,7 +590,7 @@ pub fn block(
// whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well
var lastA = firstA;
var lastB = Range.init(0, 0);
- var blockB = Range.init(B.start, B.start + math.min(block_size, B.length()));
+ var blockB = Range.init(B.start, B.start + @min(block_size, B.length()));
blockA.start += firstA.length();
indexA = buffer1.start;
@@ -849,7 +849,7 @@ fn findFirstForward(
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
) usize {
if (range.length() == 0) return range.start;
- const skip = math.max(range.length() / unique, @as(usize, 1));
+ const skip = @max(range.length() / unique, @as(usize, 1));
var index = range.start + skip;
while (lessThan(context, items[index - 1], value)) : (index += skip) {
@@ -871,7 +871,7 @@ fn findFirstBackward(
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
) usize {
if (range.length() == 0) return range.start;
- const skip = math.max(range.length() / unique, @as(usize, 1));
+ const skip = @max(range.length() / unique, @as(usize, 1));
var index = range.end - skip;
while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) {
@@ -893,7 +893,7 @@ fn findLastForward(
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
) usize {
if (range.length() == 0) return range.start;
- const skip = math.max(range.length() / unique, @as(usize, 1));
+ const skip = @max(range.length() / unique, @as(usize, 1));
var index = range.start + skip;
while (!lessThan(context, value, items[index - 1])) : (index += skip) {
@@ -915,7 +915,7 @@ fn findLastBackward(
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
) usize {
if (range.length() == 0) return range.start;
- const skip = math.max(range.length() / unique, @as(usize, 1));
+ const skip = @max(range.length() / unique, @as(usize, 1));
var index = range.end - skip;
while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) {
lib/std/zig/system/NativeTargetInfo.zig
@@ -503,7 +503,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.builtin.Version {
const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset);
const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size);
var strtab_buf: [4096:0]u8 = undefined;
- const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len);
+ const shstrtab_len = @min(shstrtab_size, strtab_buf.len);
const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len);
const shstrtab = strtab_buf[0..shstrtab_read_len];
const shnum = elfInt(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum);
@@ -757,7 +757,7 @@ pub fn abiAndDynamicLinkerFromFile(
const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset);
const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size);
var strtab_buf: [4096:0]u8 = undefined;
- const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len);
+ const shstrtab_len = @min(shstrtab_size, strtab_buf.len);
const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len);
const shstrtab = strtab_buf[0..shstrtab_read_len];
@@ -806,7 +806,7 @@ pub fn abiAndDynamicLinkerFromFile(
const rpoff_file = ds.offset + rpoff_usize;
const rp_max_size = ds.size - rpoff_usize;
- const strtab_len = std.math.min(rp_max_size, strtab_buf.len);
+ const strtab_len = @min(rp_max_size, strtab_buf.len);
const strtab_read_len = try preadMin(file, &strtab_buf, rpoff_file, strtab_len);
const strtab = strtab_buf[0..strtab_read_len];
lib/std/zig/render.zig
@@ -1960,7 +1960,7 @@ fn renderArrayInit(
if (!this_contains_newline) {
const column = column_counter % row_size;
- column_widths[column] = std.math.max(column_widths[column], width);
+ column_widths[column] = @max(column_widths[column], width);
const expr_last_token = tree.lastToken(expr) + 1;
const next_expr = section_exprs[i + 1];
@@ -1980,7 +1980,7 @@ fn renderArrayInit(
if (!contains_newline) {
const column = column_counter % row_size;
- column_widths[column] = std.math.max(column_widths[column], width);
+ column_widths[column] = @max(column_widths[column], width);
}
}
}
lib/std/array_hash_map.zig
@@ -815,9 +815,9 @@ pub fn ArrayHashMapUnmanaged(
/// no longer guaranteed that no allocations will be performed.
pub fn capacity(self: Self) usize {
const entry_cap = self.entries.capacity;
- const header = self.index_header orelse return math.min(linear_scan_max, entry_cap);
+ const header = self.index_header orelse return @min(linear_scan_max, entry_cap);
const indexes_cap = header.capacity();
- return math.min(entry_cap, indexes_cap);
+ return @min(entry_cap, indexes_cap);
}
/// Clobbers any existing data. To detect if a put would clobber
@@ -1821,7 +1821,7 @@ fn Index(comptime I: type) type {
/// length * the size of an Index(u32). The index is 8 bytes (3 bits repr)
/// and max_usize + 1 is not representable, so we need to subtract out 4 bits.
const max_representable_index_len = @bitSizeOf(usize) - 4;
-const max_bit_index = math.min(32, max_representable_index_len);
+const max_bit_index = @min(32, max_representable_index_len);
const min_bit_index = 5;
const max_capacity = (1 << max_bit_index) - 1;
const index_capacities = blk: {
lib/std/ascii.zig
@@ -422,7 +422,7 @@ test "indexOfIgnoreCase" {
/// Returns the lexicographical order of two slices. O(n).
pub fn orderIgnoreCase(lhs: []const u8, rhs: []const u8) std.math.Order {
- const n = std.math.min(lhs.len, rhs.len);
+ const n = @min(lhs.len, rhs.len);
var i: usize = 0;
while (i < n) : (i += 1) {
switch (std.math.order(toLower(lhs[i]), toLower(rhs[i]))) {
lib/std/debug.zig
@@ -198,7 +198,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackT
stack_trace.index = 0;
return;
};
- const end_index = math.min(first_index + addrs.len, n);
+ const end_index = @min(first_index + addrs.len, n);
const slice = addr_buf[first_index..end_index];
// We use a for loop here because slice and addrs may alias.
for (slice, 0..) |addr, i| {
@@ -380,7 +380,7 @@ pub fn writeStackTrace(
_ = allocator;
if (builtin.strip_debug_info) return error.MissingDebugInfo;
var frame_index: usize = 0;
- var frames_left: usize = std.math.min(stack_trace.index, stack_trace.instruction_addresses.len);
+ var frames_left: usize = @min(stack_trace.index, stack_trace.instruction_addresses.len);
while (frames_left != 0) : ({
frames_left -= 1;
lib/std/dynamic_library.zig
@@ -8,7 +8,6 @@ const elf = std.elf;
const windows = std.os.windows;
const system = std.os.system;
const maxInt = std.math.maxInt;
-const max = std.math.max;
pub const DynLib = switch (builtin.os.tag) {
.linux => if (builtin.link_libc) DlDynlib else ElfDynLib,
@@ -152,7 +151,7 @@ pub const ElfDynLib = struct {
}) {
const ph = @intToPtr(*elf.Phdr, ph_addr);
switch (ph.p_type) {
- elf.PT_LOAD => virt_addr_end = max(virt_addr_end, ph.p_vaddr + ph.p_memsz),
+ elf.PT_LOAD => virt_addr_end = @max(virt_addr_end, ph.p_vaddr + ph.p_memsz),
elf.PT_DYNAMIC => maybe_dynv = @intToPtr([*]usize, elf_addr + ph.p_offset),
else => {},
}
lib/std/fifo.zig
@@ -150,7 +150,7 @@ pub fn LinearFifo(
start -= self.buf.len;
return self.buf[start .. start + (self.count - offset)];
} else {
- const end = math.min(self.head + self.count, self.buf.len);
+ const end = @min(self.head + self.count, self.buf.len);
return self.buf[start..end];
}
}
lib/std/fmt.zig
@@ -921,8 +921,8 @@ fn formatSizeImpl(comptime base: comptime_int) type {
const log2 = math.log2(value);
const magnitude = switch (base) {
- 1000 => math.min(log2 / comptime math.log2(1000), mags_si.len - 1),
- 1024 => math.min(log2 / 10, mags_iec.len - 1),
+ 1000 => @min(log2 / comptime math.log2(1000), mags_si.len - 1),
+ 1024 => @min(log2 / 10, mags_iec.len - 1),
else => unreachable,
};
const new_value = lossyCast(f64, value) / math.pow(f64, lossyCast(f64, base), lossyCast(f64, magnitude));
@@ -1103,7 +1103,7 @@ pub fn formatFloatScientific(
var printed: usize = 0;
if (float_decimal.digits.len > 1) {
- const num_digits = math.min(float_decimal.digits.len, precision + 1);
+ const num_digits = @min(float_decimal.digits.len, precision + 1);
try writer.writeAll(float_decimal.digits[1..num_digits]);
printed += num_digits - 1;
}
@@ -1116,7 +1116,7 @@ pub fn formatFloatScientific(
try writer.writeAll(float_decimal.digits[0..1]);
try writer.writeAll(".");
if (float_decimal.digits.len > 1) {
- const num_digits = if (@TypeOf(value) == f32) math.min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len;
+ const num_digits = if (@TypeOf(value) == f32) @min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len;
try writer.writeAll(float_decimal.digits[1..num_digits]);
} else {
@@ -1299,7 +1299,7 @@ pub fn formatFloatDecimal(
var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0;
// the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this.
- var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len);
+ var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len);
if (num_digits_whole > 0) {
// We may have to zero pad, for instance 1e4 requires zero padding.
@@ -1326,7 +1326,7 @@ pub fn formatFloatDecimal(
// Zero-fill until we reach significant digits or run out of precision.
if (float_decimal.exp <= 0) {
const zero_digit_count = @intCast(usize, -float_decimal.exp);
- const zeros_to_print = math.min(zero_digit_count, precision);
+ const zeros_to_print = @min(zero_digit_count, precision);
var i: usize = 0;
while (i < zeros_to_print) : (i += 1) {
@@ -1357,7 +1357,7 @@ pub fn formatFloatDecimal(
var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0;
// the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this.
- var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len);
+ var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len);
if (num_digits_whole > 0) {
// We may have to zero pad, for instance 1e4 requires zero padding.
@@ -1410,12 +1410,12 @@ pub fn formatInt(
// The type must have the same size as `base` or be wider in order for the
// division to work
- const min_int_bits = comptime math.max(value_info.bits, 8);
+ const min_int_bits = comptime @max(value_info.bits, 8);
const MinInt = std.meta.Int(.unsigned, min_int_bits);
const abs_value = math.absCast(int_value);
// The worst case in terms of space needed is base 2, plus 1 for the sign
- var buf: [1 + math.max(value_info.bits, 1)]u8 = undefined;
+ var buf: [1 + @max(@as(comptime_int, value_info.bits), 1)]u8 = undefined;
var a: MinInt = abs_value;
var index: usize = buf.len;
lib/std/hash_map.zig
@@ -1507,7 +1507,7 @@ pub fn HashMapUnmanaged(
fn grow(self: *Self, allocator: Allocator, new_capacity: Size, ctx: Context) Allocator.Error!void {
@setCold(true);
- const new_cap = std.math.max(new_capacity, minimal_capacity);
+ const new_cap = @max(new_capacity, minimal_capacity);
assert(new_cap > self.capacity());
assert(std.math.isPowerOfTwo(new_cap));
@@ -1540,7 +1540,7 @@ pub fn HashMapUnmanaged(
const header_align = @alignOf(Header);
const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
- const max_align = comptime math.max3(header_align, key_align, val_align);
+ const max_align = comptime @max(header_align, key_align, val_align);
const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata);
comptime assert(@alignOf(Metadata) == 1);
@@ -1575,7 +1575,7 @@ pub fn HashMapUnmanaged(
const header_align = @alignOf(Header);
const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
- const max_align = comptime math.max3(header_align, key_align, val_align);
+ const max_align = comptime @max(header_align, key_align, val_align);
const cap = self.capacity();
const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata);
lib/std/math.zig
@@ -165,7 +165,7 @@ pub fn approxEqRel(comptime T: type, x: T, y: T, tolerance: T) bool {
if (isNan(x) or isNan(y))
return false;
- return @fabs(x - y) <= max(@fabs(x), @fabs(y)) * tolerance;
+ return @fabs(x - y) <= @max(@fabs(x), @fabs(y)) * tolerance;
}
test "approxEqAbs and approxEqRel" {
@@ -434,104 +434,15 @@ pub fn Min(comptime A: type, comptime B: type) type {
return @TypeOf(@as(A, 0) + @as(B, 0));
}
-/// Returns the smaller number. When one parameter's type's full range
-/// fits in the other, the return type is the smaller type.
-pub fn min(x: anytype, y: anytype) Min(@TypeOf(x), @TypeOf(y)) {
- const Result = Min(@TypeOf(x), @TypeOf(y));
- if (x < y) {
- // TODO Zig should allow this as an implicit cast because x is
- // immutable and in this scope it is known to fit in the
- // return type.
- switch (@typeInfo(Result)) {
- .Int => return @intCast(Result, x),
- else => return x,
- }
- } else {
- // TODO Zig should allow this as an implicit cast because y is
- // immutable and in this scope it is known to fit in the
- // return type.
- switch (@typeInfo(Result)) {
- .Int => return @intCast(Result, y),
- else => return y,
- }
- }
-}
-
-test "min" {
- try testing.expect(min(@as(i32, -1), @as(i32, 2)) == -1);
- {
- var a: u16 = 999;
- var b: u32 = 10;
- var result = min(a, b);
- try testing.expect(@TypeOf(result) == u16);
- try testing.expect(result == 10);
- }
- {
- var a: f64 = 10.34;
- var b: f32 = 999.12;
- var result = min(a, b);
- try testing.expect(@TypeOf(result) == f64);
- try testing.expect(result == 10.34);
- }
- {
- var a: i8 = -127;
- var b: i16 = -200;
- var result = min(a, b);
- try testing.expect(@TypeOf(result) == i16);
- try testing.expect(result == -200);
- }
- {
- const a = 10.34;
- var b: f32 = 999.12;
- var result = min(a, b);
- try testing.expect(@TypeOf(result) == f32);
- try testing.expect(result == 10.34);
- }
-}
-
-/// Finds the minimum of three numbers.
-pub fn min3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) {
- return min(x, min(y, z));
-}
-
-test "min3" {
- try testing.expect(min3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 0);
- try testing.expect(min3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 0);
- try testing.expect(min3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 0);
- try testing.expect(min3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 0);
- try testing.expect(min3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 0);
- try testing.expect(min3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 0);
-}
-
-/// Returns the maximum of two numbers. Return type is the one with the
-/// larger range.
-pub fn max(x: anytype, y: anytype) @TypeOf(x, y) {
- return if (x > y) x else y;
-}
-
-test "max" {
- try testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2);
- try testing.expect(max(@as(i32, 2), @as(i32, -1)) == 2);
-}
-
-/// Finds the maximum of three numbers.
-pub fn max3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) {
- return max(x, max(y, z));
-}
-
-test "max3" {
- try testing.expect(max3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 2);
- try testing.expect(max3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 2);
- try testing.expect(max3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 2);
- try testing.expect(max3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 2);
- try testing.expect(max3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 2);
- try testing.expect(max3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 2);
-}
+pub const min = @compileError("deprecated; use @min instead");
+pub const max = @compileError("deprecated; use @max instead");
+pub const min3 = @compileError("deprecated; use @min instead");
+pub const max3 = @compileError("deprecated; use @max instead");
/// Limit val to the inclusive range [lower, upper].
pub fn clamp(val: anytype, lower: anytype, upper: anytype) @TypeOf(val, lower, upper) {
assert(lower <= upper);
- return max(lower, min(val, upper));
+ return @max(lower, @min(val, upper));
}
test "clamp" {
// Within range
@@ -795,7 +706,7 @@ pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) t
return u0;
}
const signedness: std.builtin.Signedness = if (from < 0) .signed else .unsigned;
- const largest_positive_integer = max(if (from < 0) (-from) - 1 else from, to); // two's complement
+ const largest_positive_integer = @max(if (from < 0) (-from) - 1 else from, to); // two's complement
const base = log2(largest_positive_integer);
const upper = (1 << base) - 1;
var magnitude_bits = if (upper >= largest_positive_integer) base else base + 1;
lib/std/mem.zig
@@ -596,7 +596,7 @@ pub fn sortUnstableContext(a: usize, b: usize, context: anytype) void {
/// Compares two slices of numbers lexicographically. O(n).
pub fn order(comptime T: type, lhs: []const T, rhs: []const T) math.Order {
- const n = math.min(lhs.len, rhs.len);
+ const n = @min(lhs.len, rhs.len);
var i: usize = 0;
while (i < n) : (i += 1) {
switch (math.order(lhs[i], rhs[i])) {
@@ -642,7 +642,7 @@ pub fn eql(comptime T: type, a: []const T, b: []const T) bool {
/// Compares two slices and returns the index of the first inequality.
/// Returns null if the slices are equal.
pub fn indexOfDiff(comptime T: type, a: []const T, b: []const T) ?usize {
- const shortest = math.min(a.len, b.len);
+ const shortest = @min(a.len, b.len);
if (a.ptr == b.ptr)
return if (a.len == b.len) null else shortest;
var index: usize = 0;
@@ -3296,7 +3296,7 @@ pub fn min(comptime T: type, slice: []const T) T {
assert(slice.len > 0);
var best = slice[0];
for (slice[1..]) |item| {
- best = math.min(best, item);
+ best = @min(best, item);
}
return best;
}
@@ -3313,7 +3313,7 @@ pub fn max(comptime T: type, slice: []const T) T {
assert(slice.len > 0);
var best = slice[0];
for (slice[1..]) |item| {
- best = math.max(best, item);
+ best = @max(best, item);
}
return best;
}
@@ -3332,8 +3332,8 @@ pub fn minMax(comptime T: type, slice: []const T) struct { min: T, max: T } {
var minVal = slice[0];
var maxVal = slice[0];
for (slice[1..]) |item| {
- minVal = math.min(minVal, item);
- maxVal = math.max(maxVal, item);
+ minVal = @min(minVal, item);
+ maxVal = @max(maxVal, item);
}
return .{ .min = minVal, .max = maxVal };
}
lib/std/net.zig
@@ -1482,11 +1482,11 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
error.InvalidCharacter => continue,
};
if (mem.eql(u8, name, "ndots")) {
- rc.ndots = std.math.min(value, 15);
+ rc.ndots = @min(value, 15);
} else if (mem.eql(u8, name, "attempts")) {
- rc.attempts = std.math.min(value, 10);
+ rc.attempts = @min(value, 10);
} else if (mem.eql(u8, name, "timeout")) {
- rc.timeout = std.math.min(value, 60);
+ rc.timeout = @min(value, 60);
}
}
} else if (mem.eql(u8, token, "nameserver")) {
@@ -1615,7 +1615,7 @@ fn resMSendRc(
}
// Wait for a response, or until time to retry
- const clamped_timeout = std.math.min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2);
+ const clamped_timeout = @min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2);
const nevents = os.poll(&pfd, clamped_timeout) catch 0;
if (nevents == 0) continue;
lib/std/pdb.zig
@@ -1049,7 +1049,7 @@ const MsfStream = struct {
var size: usize = 0;
var rem_buffer = buffer;
while (size < buffer.len) {
- const size_to_read = math.min(self.block_size - offset, rem_buffer.len);
+ const size_to_read = @min(self.block_size - offset, rem_buffer.len);
size += try in.read(rem_buffer[0..size_to_read]);
rem_buffer = buffer[size..];
offset += size_to_read;
lib/std/rand.zig
@@ -410,7 +410,7 @@ pub const Random = struct {
r.uintLessThan(T, sum)
else if (comptime std.meta.trait.isFloat(T))
// take care that imprecision doesn't lead to a value slightly greater than sum
- std.math.min(r.float(T) * sum, sum - std.math.floatEps(T))
+ @min(r.float(T) * sum, sum - std.math.floatEps(T))
else
@compileError("weightedIndex does not support proportions of type " ++ @typeName(T));
lib/std/Thread.zig
@@ -541,7 +541,7 @@ const WindowsThreadImpl = struct {
// Going lower makes it default to that specified in the executable (~1mb).
// Its also fine if the limit here is incorrect as stack size is only a hint.
var stack_size = std.math.cast(u32, config.stack_size) orelse std.math.maxInt(u32);
- stack_size = std.math.max(64 * 1024, stack_size);
+ stack_size = @max(64 * 1024, stack_size);
instance.thread.thread_handle = windows.kernel32.CreateThread(
null,
@@ -690,7 +690,7 @@ const PosixThreadImpl = struct {
defer assert(c.pthread_attr_destroy(&attr) == .SUCCESS);
// Use the same set of parameters used by the libc-less impl.
- const stack_size = std.math.max(config.stack_size, c.PTHREAD_STACK_MIN);
+ const stack_size = @max(config.stack_size, c.PTHREAD_STACK_MIN);
assert(c.pthread_attr_setstacksize(&attr, stack_size) == .SUCCESS);
assert(c.pthread_attr_setguardsize(&attr, std.mem.page_size) == .SUCCESS);
@@ -930,7 +930,7 @@ const LinuxThreadImpl = struct {
var bytes: usize = page_size;
guard_offset = bytes;
- bytes += std.math.max(page_size, config.stack_size);
+ bytes += @max(page_size, config.stack_size);
bytes = std.mem.alignForward(bytes, page_size);
stack_offset = bytes;
lib/std/Uri.zig
@@ -177,13 +177,13 @@ pub fn parseWithoutScheme(text: []const u8) ParseError!Uri {
if (std.mem.lastIndexOf(u8, authority, ":")) |index| {
if (index >= end_of_host) { // if not part of the V6 address field
- end_of_host = std.math.min(end_of_host, index);
+ end_of_host = @min(end_of_host, index);
uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort;
}
}
} else if (std.mem.lastIndexOf(u8, authority, ":")) |index| {
if (index >= start_of_host) { // if not part of the userinfo field
- end_of_host = std.math.min(end_of_host, index);
+ end_of_host = @min(end_of_host, index);
uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort;
}
}
src/arch/x86_64/CodeGen.zig
@@ -2907,7 +2907,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
const dst_info = dst_ty.intInfo(mod);
const src_ty = try mod.intType(dst_info.signedness, switch (tag) {
else => unreachable,
- .mul, .mulwrap => math.max3(
+ .mul, .mulwrap => @max(
self.activeIntBits(bin_op.lhs),
self.activeIntBits(bin_op.rhs),
dst_info.bits / 2,
@@ -3349,7 +3349,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const lhs_active_bits = self.activeIntBits(bin_op.lhs);
const rhs_active_bits = self.activeIntBits(bin_op.rhs);
- const src_bits = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2);
+ const src_bits = @max(lhs_active_bits, rhs_active_bits, dst_info.bits / 2);
const src_ty = try mod.intType(dst_info.signedness, src_bits);
const lhs = try self.resolveInst(bin_op.lhs);
src/link/MachO/CodeSignature.zig
@@ -99,7 +99,7 @@ const CodeDirectory = struct {
fn addSpecialHash(self: *CodeDirectory, index: u32, hash: [hash_size]u8) void {
assert(index > 0);
- self.inner.nSpecialSlots = std.math.max(self.inner.nSpecialSlots, index);
+ self.inner.nSpecialSlots = @max(self.inner.nSpecialSlots, index);
self.special_slots[index - 1] = hash;
}
@@ -426,11 +426,11 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 {
var n_special_slots: u32 = 0;
if (self.requirements) |req| {
ssize += @sizeOf(macho.BlobIndex) + req.size();
- n_special_slots = std.math.max(n_special_slots, req.slotType());
+ n_special_slots = @max(n_special_slots, req.slotType());
}
if (self.entitlements) |ent| {
ssize += @sizeOf(macho.BlobIndex) + ent.size() + hash_size;
- n_special_slots = std.math.max(n_special_slots, ent.slotType());
+ n_special_slots = @max(n_special_slots, ent.slotType());
}
if (self.signature) |sig| {
ssize += @sizeOf(macho.BlobIndex) + sig.size();
src/link/MachO/Object.zig
@@ -530,7 +530,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
sect.addr + sect.size - addr;
const atom_align = if (addr > 0)
- math.min(@ctz(addr), sect.@"align")
+ @min(@ctz(addr), sect.@"align")
else
sect.@"align";
src/link/Wasm/Object.zig
@@ -979,7 +979,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
const segment: *Wasm.Segment = &wasm_bin.segments.items[final_index];
if (relocatable_data.type == .data) { //code section and debug sections are 1-byte aligned
- segment.alignment = std.math.max(segment.alignment, atom.alignment);
+ segment.alignment = @max(segment.alignment, atom.alignment);
}
try wasm_bin.appendAtomAtIndex(final_index, atom_index);
src/link/Elf.zig
@@ -2326,7 +2326,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme
self.debug_aranges_section_dirty = true;
}
}
- shdr.sh_addralign = math.max(shdr.sh_addralign, alignment);
+ shdr.sh_addralign = @max(shdr.sh_addralign, alignment);
// This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
src/link/Wasm.zig
@@ -2027,7 +2027,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
};
const segment: *Segment = &wasm.segments.items[final_index];
- segment.alignment = std.math.max(segment.alignment, atom.alignment);
+ segment.alignment = @max(segment.alignment, atom.alignment);
try wasm.appendAtomAtIndex(final_index, atom_index);
}
src/translate_c/ast.zig
@@ -1824,7 +1824,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
},
.switch_prong => {
const payload = node.castTag(.switch_prong).?.data;
- var items = try c.gpa.alloc(NodeIndex, std.math.max(payload.cases.len, 1));
+ var items = try c.gpa.alloc(NodeIndex, @max(payload.cases.len, 1));
defer c.gpa.free(items);
items[0] = 0;
for (payload.cases, 0..) |item, i| {
@@ -1973,7 +1973,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
const payload = node.castTag(.tuple).?.data;
_ = try c.addToken(.period, ".");
const l_brace = try c.addToken(.l_brace, "{");
- var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.len, 2));
+ var inits = try c.gpa.alloc(NodeIndex, @max(payload.len, 2));
defer c.gpa.free(inits);
inits[0] = 0;
inits[1] = 0;
@@ -2007,7 +2007,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
const payload = node.castTag(.container_init_dot).?.data;
_ = try c.addToken(.period, ".");
const l_brace = try c.addToken(.l_brace, "{");
- var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.len, 2));
+ var inits = try c.gpa.alloc(NodeIndex, @max(payload.len, 2));
defer c.gpa.free(inits);
inits[0] = 0;
inits[1] = 0;
@@ -2046,7 +2046,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
const lhs = try renderNode(c, payload.lhs);
const l_brace = try c.addToken(.l_brace, "{");
- var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.inits.len, 1));
+ var inits = try c.gpa.alloc(NodeIndex, @max(payload.inits.len, 1));
defer c.gpa.free(inits);
inits[0] = 0;
for (payload.inits, 0..) |init, i| {
@@ -2102,7 +2102,7 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex {
const num_vars = payload.variables.len;
const num_funcs = payload.functions.len;
const total_members = payload.fields.len + num_vars + num_funcs;
- const members = try c.gpa.alloc(NodeIndex, std.math.max(total_members, 2));
+ const members = try c.gpa.alloc(NodeIndex, @max(total_members, 2));
defer c.gpa.free(members);
members[0] = 0;
members[1] = 0;
@@ -2195,7 +2195,7 @@ fn renderFieldAccess(c: *Context, lhs: NodeIndex, field_name: []const u8) !NodeI
fn renderArrayInit(c: *Context, lhs: NodeIndex, inits: []const Node) !NodeIndex {
const l_brace = try c.addToken(.l_brace, "{");
- var rendered = try c.gpa.alloc(NodeIndex, std.math.max(inits.len, 1));
+ var rendered = try c.gpa.alloc(NodeIndex, @max(inits.len, 1));
defer c.gpa.free(rendered);
rendered[0] = 0;
for (inits, 0..) |init, i| {
@@ -2904,7 +2904,7 @@ fn renderMacroFunc(c: *Context, node: Node) !NodeIndex {
fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.ArrayList(NodeIndex) {
_ = try c.addToken(.l_paren, "(");
- var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, std.math.max(params.len, 1));
+ var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, @max(params.len, 1));
errdefer rendered.deinit();
for (params, 0..) |param, i| {
src/main.zig
@@ -5391,7 +5391,7 @@ fn gimmeMoreOfThoseSweetSweetFileDescriptors() void {
// setrlimit() now returns with errno set to EINVAL in places that historically succeeded.
// It no longer accepts "rlim_cur = RLIM.INFINITY" for RLIM.NOFILE.
// Use "rlim_cur = min(OPEN_MAX, rlim_max)".
- lim.max = std.math.min(std.os.darwin.OPEN_MAX, lim.max);
+ lim.max = @min(std.os.darwin.OPEN_MAX, lim.max);
}
if (lim.cur == lim.max) return;
src/Sema.zig
@@ -22367,9 +22367,9 @@ fn analyzeShuffle(
// to it up to the length of the longer vector. This recursion terminates
// in 1 call because these calls to analyzeShuffle guarantee a_len == b_len.
if (a_len != b_len) {
- const min_len = std.math.min(a_len, b_len);
+ const min_len = @min(a_len, b_len);
const max_src = if (a_len > b_len) a_src else b_src;
- const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len));
+ const max_len = try sema.usizeCast(block, max_src, @max(a_len, b_len));
const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len);
for (@intCast(usize, 0)..@intCast(usize, min_len)) |i| {
@@ -31301,7 +31301,7 @@ fn cmpNumeric(
}
const dest_ty = if (dest_float_type) |ft| ft else blk: {
- const max_bits = std.math.max(lhs_bits, rhs_bits);
+ const max_bits = @max(lhs_bits, rhs_bits);
const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits});
const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned;
break :blk try mod.intType(signedness, casted_bits);
@@ -35828,7 +35828,7 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
+ @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.add(lhs_bigint, rhs_bigint);
@@ -35918,7 +35918,7 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value {
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
+ @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.sub(lhs_bigint, rhs_bigint);
src/translate_c.zig
@@ -2400,7 +2400,7 @@ fn transStringLiteralInitializer(
if (array_size == 0) return Tag.empty_array.create(c.arena, elem_type);
- const num_inits = math.min(str_length, array_size);
+ const num_inits = @min(str_length, array_size);
const init_node = if (num_inits > 0) blk: {
if (is_narrow) {
// "string literal".* or string literal"[0..num_inits].*
src/type.zig
@@ -1633,7 +1633,7 @@ pub const Type = struct {
const len = array_type.len + @boolToInt(array_type.sentinel != .none);
if (len == 0) return 0;
const elem_ty = array_type.child.toType();
- const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod));
+ const elem_size = @max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod));
if (elem_size == 0) return 0;
const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema);
return (len - 1) * 8 * elem_size + elem_bit_size;
src/TypedValue.zig
@@ -111,7 +111,7 @@ pub fn print(
.val = val.castTag(.repeated).?.data,
};
const len = ty.arrayLen(mod);
- const max_len = std.math.min(len, max_aggregate_items);
+ const max_len = @min(len, max_aggregate_items);
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
try print(elem_tv, writer, level - 1, mod);
@@ -130,7 +130,7 @@ pub fn print(
const len = payload.len.toUnsignedInt(mod);
if (elem_ty.eql(Type.u8, mod)) str: {
- const max_len = @intCast(usize, std.math.min(len, max_string_len));
+ const max_len: usize = @min(len, max_string_len);
var buf: [max_string_len]u8 = undefined;
var i: u32 = 0;
@@ -149,7 +149,7 @@ pub fn print(
try writer.writeAll(".{ ");
- const max_len = std.math.min(len, max_aggregate_items);
+ const max_len = @min(len, max_aggregate_items);
var i: u32 = 0;
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
@@ -455,7 +455,7 @@ fn printAggregate(
const len = ty.arrayLen(mod);
if (elem_ty.eql(Type.u8, mod)) str: {
- const max_len = @intCast(usize, std.math.min(len, max_string_len));
+ const max_len: usize = @min(len, max_string_len);
var buf: [max_string_len]u8 = undefined;
var i: u32 = 0;
@@ -471,7 +471,7 @@ fn printAggregate(
try writer.writeAll(".{ ");
- const max_len = std.math.min(len, max_aggregate_items);
+ const max_len = @min(len, max_aggregate_items);
var i: u32 = 0;
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
src/value.zig
@@ -2458,7 +2458,7 @@ pub const Value = struct {
const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
- std.math.max(
+ @max(
// For the saturate
std.math.big.int.calcTwosCompLimbCount(info.bits),
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
@@ -2572,7 +2572,7 @@ pub const Value = struct {
const limbs = try arena.alloc(
std.math.big.Limb,
// + 1 for negatives
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
+ @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitAnd(lhs_bigint, rhs_bigint);
@@ -2638,7 +2638,7 @@ pub const Value = struct {
const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitOr(lhs_bigint, rhs_bigint);
@@ -2677,7 +2677,7 @@ pub const Value = struct {
const limbs = try arena.alloc(
std.math.big.Limb,
// + 1 for negatives
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
+ @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitXor(lhs_bigint, rhs_bigint);