Commit ca27055cda
Changed files (33)
lib
compiler_rt
docs
std
src
test
behavior
doc/langref.html.in
@@ -8977,15 +8977,15 @@ test "integer cast panic" {
</p>
{#header_close#}
- {#header_open|@maximum#}
- <pre>{#syntax#}@maximum(a: T, b: T) T{#endsyntax#}</pre>
+ {#header_open|@max#}
+ <pre>{#syntax#}@max(a: T, b: T) T{#endsyntax#}</pre>
<p>
Returns the maximum value of {#syntax#}a{#endsyntax#} and {#syntax#}b{#endsyntax#}. This builtin accepts integers, floats, and vectors of either. In the latter case, the operation is performed element wise.
</p>
<p>
NaNs are handled as follows: if one of the operands of a (pairwise) operation is NaN, the other operand is returned. If both operands are NaN, NaN is returned.
</p>
- {#see_also|@minimum|Vectors#}
+ {#see_also|@min|Vectors#}
{#header_close#}
{#header_open|@memcpy#}
@@ -9025,15 +9025,15 @@ mem.copy(u8, dest[0..byte_count], source[0..byte_count]);{#endsyntax#}</pre>
mem.set(u8, dest, c);{#endsyntax#}</pre>
{#header_close#}
- {#header_open|@minimum#}
- <pre>{#syntax#}@minimum(a: T, b: T) T{#endsyntax#}</pre>
+ {#header_open|@min#}
+ <pre>{#syntax#}@min(a: T, b: T) T{#endsyntax#}</pre>
<p>
Returns the minimum value of {#syntax#}a{#endsyntax#} and {#syntax#}b{#endsyntax#}. This builtin accepts integers, floats, and vectors of either. In the latter case, the operation is performed element wise.
</p>
<p>
NaNs are handled as follows: if one of the operands of a (pairwise) operation is NaN, the other operand is returned. If both operands are NaN, NaN is returned.
</p>
- {#see_also|@maximum|Vectors#}
+ {#see_also|@max|Vectors#}
{#header_close#}
{#header_open|@wasmMemorySize#}
lib/compiler_rt/float_to_int.zig
@@ -29,9 +29,9 @@ pub inline fn floatToInt(comptime I: type, a: anytype) I {
switch (@typeInfo(I).Int.signedness) {
.unsigned => {
if (negative) return 0;
- if (@intCast(c_uint, exponent) >= @minimum(int_bits, max_exp)) return math.maxInt(I);
+ if (@intCast(c_uint, exponent) >= @min(int_bits, max_exp)) return math.maxInt(I);
},
- .signed => if (@intCast(c_uint, exponent) >= @minimum(int_bits - 1, max_exp)) {
+ .signed => if (@intCast(c_uint, exponent) >= @min(int_bits - 1, max_exp)) {
return if (negative) math.minInt(I) else math.maxInt(I);
},
}
lib/docs/main.js
@@ -456,9 +456,9 @@ var zigAnalysis;
let lastIsDecl = isDecl(last);
let lastIsType = isType(last);
let lastIsContainerType = isContainerType(last);
-
- if (lastIsDecl){
- renderDocTest(last);
+
+ if (lastIsDecl) {
+ renderDocTest(last);
}
if (lastIsContainerType) {
@@ -487,9 +487,9 @@ var zigAnalysis;
}
return renderValue(last);
}
-
+
}
-
+
function renderDocTest(decl) {
if (!decl.decltest) return;
const astNode = getAstNode(decl.decltest);
@@ -1172,12 +1172,12 @@ var zigAnalysis;
payloadHtml += "intToError";
break;
}
- case "maximum": {
- payloadHtml += "maximum";
+ case "max": {
+ payloadHtml += "max";
break;
}
- case "minimum": {
- payloadHtml += "minimum";
+ case "min": {
+ payloadHtml += "min";
break;
}
case "bit_not": {
@@ -2069,7 +2069,7 @@ var zigAnalysis;
if (
rootIsStd &&
typeObj ===
- getType(zigAnalysis.packages[zigAnalysis.rootPkg].main)
+ getType(zigAnalysis.packages[zigAnalysis.rootPkg].main)
) {
name = "std";
} else {
@@ -2328,7 +2328,7 @@ var zigAnalysis;
function renderSourceFileLink(decl) {
let srcNode = getAstNode(decl.src);
- return "<a style=\"float: right;\" href=\"" +
+ return "<a style=\"float: right;\" href=\"" +
sourceFileUrlTemplate.replace("{{file}}",
zigAnalysis.files[srcNode.file]).replace("{{line}}", srcNode.line + 1) + "\">[src]</a>";
}
@@ -2888,12 +2888,12 @@ var zigAnalysis;
let cut = false;
if (index < 0 || index > 80) {
- if (trimmed_docs.length > 80) {
- index = 80;
- cut = true;
- } else {
- index = trimmed_docs.length;
- }
+ if (trimmed_docs.length > 80) {
+ index = 80;
+ cut = true;
+ } else {
+ index = trimmed_docs.length;
+ }
}
let slice = trimmed_docs.slice(0, index);
@@ -3527,7 +3527,7 @@ var zigAnalysis;
decltest: decl[4],
};
}
-
+
function getAstNode(idx) {
const ast = zigAnalysis.astNodes[idx];
return {
@@ -3541,141 +3541,141 @@ var zigAnalysis;
comptime: ast[7],
};
}
-
- function getType(idx){
+
+ function getType(idx) {
const ty = zigAnalysis.types[idx];
- switch(ty[0]) {
- default:
- throw "unhandled type kind!";
- case 0: // Unanalyzed
- throw "unanalyzed type!";
- case 1: // Type
- case 2: // Void
- case 3: // Bool
- case 4: // NoReturn
- case 5: // Int
- case 6: // Float
- return { kind: ty[0], name: ty[1]};
- case 7: // Pointer
- return {
- kind: ty[0],
- size: ty[1],
- child: ty[2],
- sentinel: ty[3],
- align: ty[4],
- address_space: ty[5],
- bit_start: ty[6],
- host_size: ty[7],
- is_ref: ty[8],
- is_allowzero: ty[9],
- is_mutable: ty[10],
- is_volatile: ty[11],
- has_sentinel: ty[12],
- has_align: ty[13],
- has_addrspace: ty[14],
- has_bit_range: ty[15],
- };
- case 8: // Array
- return {
- kind: ty[0],
- len: ty[1],
- child: ty[2],
- sentinel: ty[3],
- };
- case 9: // Struct
- return {
- kind: ty[0],
- name: ty[1],
- src: ty[2],
- privDecls: ty[3],
- pubDecls: ty[4],
- fields: ty[5],
- line_number: ty[6],
- outer_decl: ty[7],
- };
- case 10: // ComptimeExpr
- case 11: // ComptimeFloat
- case 12: // ComptimeInt
- case 13: // Undefined
- case 14: // Null
- return { kind: ty[0], name: ty[1] };
- case 15: // Optional
- return {
- kind: ty[0],
- name: ty[1],
- child: ty[2],
- };
- case 16: // ErrorUnion
- return {
- kind: ty[0],
- lhs: ty[1],
- rhs: ty[2],
- };
- case 17: // InferredErrorUnion
- return {
- kind: ty[0],
- payload: ty[1],
- };
- case 18: // ErrorSet
- return {
- kind: ty[0],
- name: ty[1],
- fields: ty[2],
- };
- case 19: // Enum
- return {
- kind: ty[0],
- name: ty[1],
- src: ty[2],
- privDecls: ty[3],
- pubDecls: ty[4],
- };
- case 20: // Union
- return {
- kind: ty[0],
- name: ty[1],
- src: ty[2],
- privDecls: ty[3],
- pubDecls: ty[4],
- fields: ty[5],
- };
- case 21: // Fn
- return {
- kind: ty[0],
- name: ty[1],
- src: ty[2],
- ret: ty[3],
- generic_ret: ty[4],
- params: ty[5],
- lib_name: ty[6],
- is_var_args: ty[7],
- is_inferred_error: ty[8],
- has_lib_name: ty[9],
- has_cc: ty[10],
- cc: ty[11],
- align: ty[12],
- has_align: ty[13],
- is_test: ty[14],
- is_extern: ty[15],
- };
- case 22: // BoundFn
- return { kind: ty[0], name: ty[1] };
- case 23: // Opaque
- return {
- kind: ty[0],
- name: ty[1],
- src: ty[2],
- privDecls: ty[3],
- pubDecls: ty[4],
- };
- case 24: // Frame
- case 25: // AnyFrame
- case 26: // Vector
- case 27: // EnumLiteral
- return { kind: ty[0], name: ty[1] };
+ switch (ty[0]) {
+ default:
+ throw "unhandled type kind!";
+ case 0: // Unanalyzed
+ throw "unanalyzed type!";
+ case 1: // Type
+ case 2: // Void
+ case 3: // Bool
+ case 4: // NoReturn
+ case 5: // Int
+ case 6: // Float
+ return { kind: ty[0], name: ty[1] };
+ case 7: // Pointer
+ return {
+ kind: ty[0],
+ size: ty[1],
+ child: ty[2],
+ sentinel: ty[3],
+ align: ty[4],
+ address_space: ty[5],
+ bit_start: ty[6],
+ host_size: ty[7],
+ is_ref: ty[8],
+ is_allowzero: ty[9],
+ is_mutable: ty[10],
+ is_volatile: ty[11],
+ has_sentinel: ty[12],
+ has_align: ty[13],
+ has_addrspace: ty[14],
+ has_bit_range: ty[15],
+ };
+ case 8: // Array
+ return {
+ kind: ty[0],
+ len: ty[1],
+ child: ty[2],
+ sentinel: ty[3],
+ };
+ case 9: // Struct
+ return {
+ kind: ty[0],
+ name: ty[1],
+ src: ty[2],
+ privDecls: ty[3],
+ pubDecls: ty[4],
+ fields: ty[5],
+ line_number: ty[6],
+ outer_decl: ty[7],
+ };
+ case 10: // ComptimeExpr
+ case 11: // ComptimeFloat
+ case 12: // ComptimeInt
+ case 13: // Undefined
+ case 14: // Null
+ return { kind: ty[0], name: ty[1] };
+ case 15: // Optional
+ return {
+ kind: ty[0],
+ name: ty[1],
+ child: ty[2],
+ };
+ case 16: // ErrorUnion
+ return {
+ kind: ty[0],
+ lhs: ty[1],
+ rhs: ty[2],
+ };
+ case 17: // InferredErrorUnion
+ return {
+ kind: ty[0],
+ payload: ty[1],
+ };
+ case 18: // ErrorSet
+ return {
+ kind: ty[0],
+ name: ty[1],
+ fields: ty[2],
+ };
+ case 19: // Enum
+ return {
+ kind: ty[0],
+ name: ty[1],
+ src: ty[2],
+ privDecls: ty[3],
+ pubDecls: ty[4],
+ };
+ case 20: // Union
+ return {
+ kind: ty[0],
+ name: ty[1],
+ src: ty[2],
+ privDecls: ty[3],
+ pubDecls: ty[4],
+ fields: ty[5],
+ };
+ case 21: // Fn
+ return {
+ kind: ty[0],
+ name: ty[1],
+ src: ty[2],
+ ret: ty[3],
+ generic_ret: ty[4],
+ params: ty[5],
+ lib_name: ty[6],
+ is_var_args: ty[7],
+ is_inferred_error: ty[8],
+ has_lib_name: ty[9],
+ has_cc: ty[10],
+ cc: ty[11],
+ align: ty[12],
+ has_align: ty[13],
+ is_test: ty[14],
+ is_extern: ty[15],
+ };
+ case 22: // BoundFn
+ return { kind: ty[0], name: ty[1] };
+ case 23: // Opaque
+ return {
+ kind: ty[0],
+ name: ty[1],
+ src: ty[2],
+ privDecls: ty[3],
+ pubDecls: ty[4],
+ };
+ case 24: // Frame
+ case 25: // AnyFrame
+ case 26: // Vector
+ case 27: // EnumLiteral
+ return { kind: ty[0], name: ty[1] };
}
}
-
+
})();
lib/std/build/InstallRawStep.zig
@@ -301,7 +301,7 @@ const HexWriter = struct {
const row_address = @intCast(u32, segment.physicalAddress + bytes_read);
const remaining = segment.fileSize - bytes_read;
- const to_read = @minimum(remaining, MAX_PAYLOAD_LEN);
+ const to_read = @min(remaining, MAX_PAYLOAD_LEN);
const did_read = try elf_file.preadAll(buf[0..to_read], segment.elfOffset + bytes_read);
if (did_read < to_read) return error.UnexpectedEOF;
lib/std/compress/deflate/decompressor.zig
@@ -99,8 +99,8 @@ const HuffmanDecoder = struct {
if (min == 0) {
min = n;
}
- min = @minimum(n, min);
- max = @maximum(n, max);
+ min = @min(n, min);
+ max = @max(n, max);
count[n] += 1;
}
lib/std/compress/deflate/huffman_code.zig
@@ -134,7 +134,7 @@ pub const HuffmanEncoder = struct {
// The tree can't have greater depth than n - 1, no matter what. This
// saves a little bit of work in some small cases
- max_bits = @minimum(max_bits, n - 1);
+ max_bits = @min(max_bits, n - 1);
// Create information about each of the levels.
// A bogus "Level 0" whose sole purpose is so that
lib/std/debug.zig
@@ -2087,7 +2087,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
const tty_config = detectTTYConfig();
const stderr = io.getStdErr().writer();
- const end = @minimum(t.index, size);
+ const end = @min(t.index, size);
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print(
"Unable to dump stack trace: Unable to open debug info: {s}\n",
lib/std/heap.zig
@@ -315,7 +315,7 @@ const PageAllocator = struct {
}
}
- const max_drop_len = alignment - @minimum(alignment, mem.page_size);
+ const max_drop_len = alignment - @min(alignment, mem.page_size);
const alloc_len = if (max_drop_len <= aligned_len - n)
aligned_len
else
@@ -554,7 +554,7 @@ const WasmPageAllocator = struct {
fn freePages(start: usize, end: usize) void {
if (start < extendedOffset()) {
- conventional.recycle(start, @minimum(extendedOffset(), end) - start);
+ conventional.recycle(start, @min(extendedOffset(), end) - start);
}
if (end > extendedOffset()) {
var new_end = end;
lib/std/os.zig
@@ -642,7 +642,7 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(isize),
};
- const adjusted_len = @minimum(max_count, buf.len);
+ const adjusted_len = @min(max_count, buf.len);
while (true) {
const rc = system.read(fd, buf.ptr, adjusted_len);
@@ -771,7 +771,7 @@ pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize {
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(isize),
};
- const adjusted_len = @minimum(max_count, buf.len);
+ const adjusted_len = @min(max_count, buf.len);
const pread_sym = if (builtin.os.tag == .linux and builtin.link_libc)
system.pread64
@@ -1027,7 +1027,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(isize),
};
- const adjusted_len = @minimum(max_count, bytes.len);
+ const adjusted_len = @min(max_count, bytes.len);
while (true) {
const rc = system.write(fd, bytes.ptr, adjusted_len);
@@ -1183,7 +1183,7 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(isize),
};
- const adjusted_len = @minimum(max_count, bytes.len);
+ const adjusted_len = @min(max_count, bytes.len);
const pwrite_sym = if (builtin.os.tag == .linux and builtin.link_libc)
system.pwrite64
@@ -6006,8 +6006,8 @@ pub fn sendfile(
}
// Here we match BSD behavior, making a zero count value send as many bytes as possible.
- const adjusted_count_tmp = if (in_len == 0) max_count else @minimum(in_len, @as(size_t, max_count));
- // TODO we should not need this cast; improve return type of @minimum
+ const adjusted_count_tmp = if (in_len == 0) max_count else @min(in_len, @as(size_t, max_count));
+ // TODO we should not need this cast; improve return type of @min
const adjusted_count = @intCast(usize, adjusted_count_tmp);
const sendfile_sym = if (builtin.link_libc)
@@ -6091,7 +6091,7 @@ pub fn sendfile(
hdtr = &hdtr_data;
}
- const adjusted_count = @minimum(in_len, max_count);
+ const adjusted_count = @min(in_len, max_count);
while (true) {
var sbytes: off_t = undefined;
@@ -6170,8 +6170,8 @@ pub fn sendfile(
hdtr = &hdtr_data;
}
- const adjusted_count_temporary = @minimum(in_len, @as(u63, max_count));
- // TODO we should not need this int cast; improve the return type of `@minimum`
+ const adjusted_count_temporary = @min(in_len, @as(u63, max_count));
+ // TODO we should not need this int cast; improve the return type of `@min`
const adjusted_count = @intCast(u63, adjusted_count_temporary);
while (true) {
@@ -6226,8 +6226,8 @@ pub fn sendfile(
rw: {
var buf: [8 * 4096]u8 = undefined;
// Here we match BSD behavior, making a zero count value send as many bytes as possible.
- const adjusted_count_tmp = if (in_len == 0) buf.len else @minimum(buf.len, in_len);
- // TODO we should not need this cast; improve return type of @minimum
+ const adjusted_count_tmp = if (in_len == 0) buf.len else @min(buf.len, in_len);
+ // TODO we should not need this cast; improve return type of @min
const adjusted_count = @intCast(usize, adjusted_count_tmp);
const amt_read = try pread(in_fd, buf[0..adjusted_count], in_offset);
if (amt_read == 0) {
@@ -6329,7 +6329,7 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len
}
var buf: [8 * 4096]u8 = undefined;
- const adjusted_count = @minimum(buf.len, len);
+ const adjusted_count = @min(buf.len, len);
const amt_read = try pread(fd_in, buf[0..adjusted_count], off_in);
// TODO without @as the line below fails to compile for wasm32-wasi:
// error: integer value 0 cannot be coerced to type 'os.PWriteError!usize'
@@ -6493,7 +6493,7 @@ pub fn dn_expand(
const end = msg.ptr + msg.len;
if (p == end or exp_dn.len == 0) return error.InvalidDnsPacket;
var dest = exp_dn.ptr;
- const dend = dest + @minimum(exp_dn.len, 254);
+ const dend = dest + @min(exp_dn.len, 254);
// detect reference loop using an iteration counter
var i: usize = 0;
while (i < msg.len) : (i += 2) {
lib/std/Progress.zig
@@ -357,7 +357,7 @@ fn refreshWithHeldLock(self: *Progress) void {
// we possibly wrote previously don't affect whether we truncate the line in `bufWrite`.
const unprintables = end;
end = 0;
- self.output_buffer_slice = self.output_buffer[unprintables..@minimum(self.output_buffer.len, unprintables + self.max_width.?)];
+ self.output_buffer_slice = self.output_buffer[unprintables..@min(self.output_buffer.len, unprintables + self.max_width.?)];
if (!self.done) {
var need_ellipsis = false;
lib/std/simd.zig
@@ -9,7 +9,7 @@ const builtin = @import("builtin");
pub fn suggestVectorSizeForCpu(comptime T: type, comptime cpu: std.Target.Cpu) ?usize {
// This is guesswork, if you have better suggestions can add it or edit the current here
// This can run in comptime only, but stage 1 fails at it, stage 2 can understand it
- const element_bit_size = @maximum(8, std.math.ceilPowerOfTwo(u16, @bitSizeOf(T)) catch unreachable);
+ const element_bit_size = @max(8, std.math.ceilPowerOfTwo(u16, @bitSizeOf(T)) catch unreachable);
const vector_bit_size: u16 = blk: {
if (cpu.arch.isX86()) {
if (T == bool and std.Target.x86.featureSetHas(.prefer_mask_registers)) return 64;
@@ -405,8 +405,8 @@ pub fn prefixScan(comptime op: std.builtin.ReduceOp, comptime hop: isize, vec: a
.Xor => a ^ b,
.Add => a + b,
.Mul => a * b,
- .Min => @minimum(a, b),
- .Max => @maximum(a, b),
+ .Min => @min(a, b),
+ .Max => @max(a, b),
};
}
};
src/arch/aarch64/CodeGen.zig
@@ -418,7 +418,7 @@ fn gen(self: *Self) !void {
const stack_offset = mem.alignForwardGeneric(u32, self.next_stack_offset, ptr_bytes) + ptr_bytes;
self.next_stack_offset = stack_offset;
- self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
+ self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
try self.genSetStack(Type.usize, stack_offset, MCValue{ .register = ret_ptr_reg });
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
@@ -885,7 +885,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
// TODO find a free slot instead of always appending
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size;
self.next_stack_offset = offset;
- self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
+ self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
try self.stack.putNoClobber(self.gpa, offset, .{
.inst = inst,
.size = abi_size,
@@ -3643,7 +3643,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// TODO find a free slot instead of always appending
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size;
self.next_stack_offset = offset;
- self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
+ self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
const tmp_mcv = MCValue{ .stack_offset = offset };
try self.load(tmp_mcv, ptr, ptr_ty);
src/arch/arm/CodeGen.zig
@@ -895,7 +895,7 @@ fn allocMem(
// TODO find a free slot instead of always appending
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size;
self.next_stack_offset = offset;
- self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
+ self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
if (maybe_inst) |inst| {
try self.stack.putNoClobber(self.gpa, offset, .{
src/arch/x86_64/CodeGen.zig
@@ -418,7 +418,7 @@ fn gen(self: *Self) InnerError!void {
// spill it to stack immediately.
const stack_offset = mem.alignForwardGeneric(u32, self.next_stack_offset + 8, 8);
self.next_stack_offset = stack_offset;
- self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
+ self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
const ret_reg = abi.getCAbiIntParamRegs(self.target.*)[0];
try self.genSetStack(Type.usize, @intCast(i32, stack_offset), MCValue{ .register = ret_reg }, .{});
@@ -884,7 +884,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
// TODO find a free slot instead of always appending
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset + abi_size, abi_align);
self.next_stack_offset = offset;
- self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
+ self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
try self.stack.putNoClobber(self.gpa, offset, .{
.inst = inst,
.size = abi_size,
src/codegen/llvm.zig
@@ -996,7 +996,7 @@ pub const Object = struct {
const abi_size = @intCast(c_uint, param_ty.abiSize(target));
const int_llvm_ty = dg.context.intType(abi_size * 8);
const int_ptr_llvm_ty = int_llvm_ty.pointerType(0);
- const alignment = @maximum(
+ const alignment = @max(
param_ty.abiAlignment(target),
dg.object.target_data.abiAlignmentOfType(int_llvm_ty),
);
@@ -1032,7 +1032,7 @@ pub const Object = struct {
if (ptr_info.@"align" != 0) {
dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", ptr_info.@"align");
} else {
- const elem_align = @maximum(ptr_info.pointee_type.abiAlignment(target), 1);
+ const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1);
dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align);
}
const ptr_param = llvm_func.getParam(llvm_arg_i);
@@ -2869,7 +2869,7 @@ pub const DeclGen = struct {
if (field_val.tag() != .unreachable_value) continue;
const field_align = field_ty.abiAlignment(target);
- big_align = @maximum(big_align, field_align);
+ big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
@@ -2935,7 +2935,7 @@ pub const DeclGen = struct {
const field_ty_align = field.ty.abiAlignment(target);
any_underaligned_fields = any_underaligned_fields or
field_align < field_ty_align;
- big_align = @maximum(big_align, field_align);
+ big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
@@ -3557,7 +3557,7 @@ pub const DeclGen = struct {
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
const field_align = field_ty.abiAlignment(target);
- big_align = @maximum(big_align, field_align);
+ big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
@@ -3652,7 +3652,7 @@ pub const DeclGen = struct {
if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
const field_align = field.alignment(target, struct_obj.layout);
- big_align = @maximum(big_align, field_align);
+ big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
@@ -4244,7 +4244,7 @@ pub const DeclGen = struct {
if (ptr_info.@"align" != 0) {
dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", ptr_info.@"align");
} else {
- const elem_align = @maximum(
+ const elem_align = @max(
ptr_info.pointee_type.abiAlignment(target),
1,
);
@@ -4710,7 +4710,7 @@ pub const FuncGen = struct {
} else {
// LLVM does not allow bitcasting structs so we must allocate
// a local, bitcast its pointer, store, and then load.
- const alignment = @maximum(
+ const alignment = @max(
param_ty.abiAlignment(target),
self.dg.object.target_data.abiAlignmentOfType(int_llvm_ty),
);
@@ -7778,7 +7778,7 @@ pub const FuncGen = struct {
if (result_is_ref) {
// Bitcast the result pointer, then store.
- const alignment = @maximum(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target));
+ const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target));
const result_ptr = self.buildAlloca(llvm_dest_ty, alignment);
const operand_llvm_ty = try self.dg.lowerType(operand_ty);
const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), "");
@@ -7791,7 +7791,7 @@ pub const FuncGen = struct {
// Both our operand and our result are values, not pointers,
// but LLVM won't let us bitcast struct values.
// Therefore, we store operand to bitcasted alloca, then load for result.
- const alignment = @maximum(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target));
+ const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target));
const result_ptr = self.buildAlloca(llvm_dest_ty, alignment);
const operand_llvm_ty = try self.dg.lowerType(operand_ty);
const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), "");
@@ -9444,7 +9444,7 @@ pub const FuncGen = struct {
const elem_llvm_ty = try self.dg.lowerType(info.pointee_type);
if (isByRef(info.pointee_type)) {
const result_align = info.pointee_type.abiAlignment(target);
- const max_align = @maximum(result_align, ptr_alignment);
+ const max_align = @max(result_align, ptr_alignment);
const result_ptr = self.buildAlloca(elem_llvm_ty, max_align);
const llvm_ptr_u8 = self.context.intType(8).pointerType(0);
const llvm_usize = self.context.intType(Type.usize.intInfo(target).bits);
@@ -9995,7 +9995,7 @@ fn llvmFieldIndex(
if (tuple.values[i].tag() != .unreachable_value) continue;
const field_align = field_ty.abiAlignment(target);
- big_align = @maximum(big_align, field_align);
+ big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
@@ -10028,7 +10028,7 @@ fn llvmFieldIndex(
if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
const field_align = field.alignment(target, layout);
- big_align = @maximum(big_align, field_align);
+ big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
src/link/MachO/dead_strip.zig
@@ -284,7 +284,7 @@ fn prune(arena: Allocator, alive: std.AutoHashMap(*Atom, void), macho_file: *Mac
const aligned_end_addr = mem.alignForwardGeneric(u64, section.header.size, atom_alignment);
const padding = aligned_end_addr - section.header.size;
section.header.size += padding + atom.size;
- section.header.@"align" = @maximum(section.header.@"align", atom.alignment);
+ section.header.@"align" = @max(section.header.@"align", atom.alignment);
if (atom.next) |next| {
atom = next;
src/link/Coff.zig
@@ -609,7 +609,7 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
self.markRelocsDirtyByAddress(header.virtual_address + needed_size);
}
- header.virtual_size = @maximum(header.virtual_size, needed_size);
+ header.virtual_size = @max(header.virtual_size, needed_size);
header.size_of_raw_data = needed_size;
maybe_last_atom.* = atom;
}
@@ -1657,7 +1657,7 @@ fn writeBaseRelocations(self: *Coff) !void {
try self.growSectionVM(self.reloc_section_index.?, needed_size);
}
}
- header.virtual_size = @maximum(header.virtual_size, needed_size);
+ header.virtual_size = @max(header.virtual_size, needed_size);
header.size_of_raw_data = needed_size;
try self.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data);
@@ -1937,7 +1937,7 @@ pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
}
fn detectAllocCollision(self: *Coff, start: u32, size: u32) ?u32 {
- const headers_size = @maximum(self.getSizeOfHeaders(), self.page_size);
+ const headers_size = @max(self.getSizeOfHeaders(), self.page_size);
if (start < headers_size)
return headers_size;
src/link/Dwarf.zig
@@ -1898,7 +1898,7 @@ fn writeDbgInfoNopsToArrayList(
next_padding_size: usize,
trailing_zero: bool,
) Allocator.Error!void {
- try buffer.resize(gpa, @maximum(
+ try buffer.resize(gpa, @max(
buffer.items.len,
offset + content.len + next_padding_size + 1,
));
src/link/MachO.zig
@@ -3419,7 +3419,7 @@ pub fn populateMissingMetadata(self: *MachO) !void {
if (self.header_segment_cmd_index == null) {
// The first __TEXT segment is immovable and covers MachO header and load commands.
self.header_segment_cmd_index = @intCast(u8, self.segments.items.len);
- const ideal_size = @maximum(self.base.options.headerpad_size orelse 0, default_headerpad_size);
+ const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size);
const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size);
log.debug("found __TEXT segment (header-only) free space 0x{x} to 0x{x}", .{ 0, needed_size });
@@ -3647,7 +3647,7 @@ pub fn calcMinHeaderPad(self: *MachO) !u64 {
log.debug("headerpad_max_install_names minimum headerpad size 0x{x}", .{
min_headerpad_size + @sizeOf(macho.mach_header_64),
});
- padding = @maximum(padding, min_headerpad_size);
+ padding = @max(padding, min_headerpad_size);
}
const offset = @sizeOf(macho.mach_header_64) + padding;
log.debug("actual headerpad size 0x{x}", .{offset});
@@ -3980,7 +3980,7 @@ pub fn addAtomToSection(self: *MachO, atom: *Atom) !void {
const aligned_end_addr = mem.alignForwardGeneric(u64, section.header.size, atom_alignment);
const padding = aligned_end_addr - section.header.size;
section.header.size += padding + atom.size;
- section.header.@"align" = @maximum(section.header.@"align", atom.alignment);
+ section.header.@"align" = @max(section.header.@"align", atom.alignment);
self.sections.set(sect_id, section);
}
src/stage1/all_types.hpp
@@ -2996,8 +2996,8 @@ enum IrBinOp {
IrBinOpRemMod,
IrBinOpArrayCat,
IrBinOpArrayMult,
- IrBinOpMaximum,
- IrBinOpMinimum,
+ IrBinOpMax,
+ IrBinOpMin,
IrBinOpAddSat,
IrBinOpSubSat,
IrBinOpMultSat,
src/stage1/astgen.cpp
@@ -4758,7 +4758,7 @@ static Stage1ZirInst *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, Ast
if (arg1_value == ag->codegen->invalid_inst_src)
return arg1_value;
- Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpMaximum, arg0_value, arg1_value, true);
+ Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpMax, arg0_value, arg1_value, true);
return ir_lval_wrap(ag, scope, bin_op, lval, result_loc);
}
case BuiltinFnIdMemcpy:
@@ -4813,7 +4813,7 @@ static Stage1ZirInst *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, Ast
if (arg1_value == ag->codegen->invalid_inst_src)
return arg1_value;
- Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpMinimum, arg0_value, arg1_value, true);
+ Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpMin, arg0_value, arg1_value, true);
return ir_lval_wrap(ag, scope, bin_op, lval, result_loc);
}
case BuiltinFnIdWasmMemorySize:
src/stage1/codegen.cpp
@@ -3590,10 +3590,10 @@ static LLVMValueRef gen_soft_float_bin_op(CodeGen *g, LLVMValueRef op1_value, LL
snprintf(fn_name, sizeof(fn_name), "__ge%sf2", compiler_rt_type_abbrev);
res_icmp = EQ_ONE;
break;
- case IrBinOpMaximum:
+ case IrBinOpMax:
snprintf(fn_name, sizeof(fn_name), "%sfmax%s", math_float_prefix, math_float_suffix);
break;
- case IrBinOpMinimum:
+ case IrBinOpMin:
snprintf(fn_name, sizeof(fn_name), "%sfmin%s", math_float_prefix, math_float_suffix);
break;
case IrBinOpMult:
@@ -3864,7 +3864,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable,
case IrBinOpRemMod:
return gen_rem(g, want_runtime_safety, ir_want_fast_math(g, &bin_op_instruction->base),
op1_value, op2_value, operand_type, RemKindMod);
- case IrBinOpMaximum:
+ case IrBinOpMax:
if (scalar_type->id == ZigTypeIdFloat) {
return ZigLLVMBuildMaxNum(g->builder, op1_value, op2_value, "");
} else if (scalar_type->id == ZigTypeIdInt) {
@@ -3876,7 +3876,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable,
} else {
zig_unreachable();
}
- case IrBinOpMinimum:
+ case IrBinOpMin:
if (scalar_type->id == ZigTypeIdFloat) {
return ZigLLVMBuildMinNum(g->builder, op1_value, op2_value, "");
} else if (scalar_type->id == ZigTypeIdInt) {
@@ -10182,8 +10182,8 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdWasmMemoryGrow, "wasmMemoryGrow", 2);
create_builtin_fn(g, BuiltinFnIdSrc, "src", 0);
create_builtin_fn(g, BuiltinFnIdReduce, "reduce", 2);
- create_builtin_fn(g, BuiltinFnIdMaximum, "maximum", 2);
- create_builtin_fn(g, BuiltinFnIdMinimum, "minimum", 2);
+ create_builtin_fn(g, BuiltinFnIdMaximum, "max", 2);
+ create_builtin_fn(g, BuiltinFnIdMinimum, "min", 2);
create_builtin_fn(g, BuiltinFnIdPrefetch, "prefetch", 2);
create_builtin_fn(g, BuiltinFnIdAddrSpaceCast, "addrSpaceCast", 2);
}
src/stage1/ir.cpp
@@ -10072,14 +10072,14 @@ static ErrorMsg *ir_eval_math_op_scalar(IrAnalyze *ira, Scope *scope, AstNode *s
float_mod(out_val, op1_val, op2_val);
}
break;
- case IrBinOpMaximum:
+ case IrBinOpMax:
if (is_int) {
bigint_max(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
} else {
float_max(out_val, op1_val, op2_val);
}
break;
- case IrBinOpMinimum:
+ case IrBinOpMin:
if (is_int) {
bigint_min(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
} else {
@@ -10440,8 +10440,8 @@ static bool ok_float_op(IrBinOp op) {
case IrBinOpRemRem:
case IrBinOpRemMod:
case IrBinOpRemUnspecified:
- case IrBinOpMaximum:
- case IrBinOpMinimum:
+ case IrBinOpMax:
+ case IrBinOpMin:
return true;
case IrBinOpBoolOr:
@@ -11451,8 +11451,8 @@ static Stage1AirInst *ir_analyze_instruction_bin_op(IrAnalyze *ira, Stage1ZirIns
case IrBinOpRemUnspecified:
case IrBinOpRemRem:
case IrBinOpRemMod:
- case IrBinOpMaximum:
- case IrBinOpMinimum:
+ case IrBinOpMax:
+ case IrBinOpMin:
case IrBinOpAddSat:
case IrBinOpSubSat:
case IrBinOpMultSat:
src/stage1/ir_print.cpp
@@ -739,10 +739,10 @@ static const char *ir_bin_op_id_str(IrBinOp op_id) {
return "++";
case IrBinOpArrayMult:
return "**";
- case IrBinOpMaximum:
- return "@maximum";
- case IrBinOpMinimum:
- return "@minimum";
+ case IrBinOpMax:
+ return "@max";
+ case IrBinOpMin:
+ return "@min";
case IrBinOpAddSat:
return "@addWithSaturation";
case IrBinOpSubSat:
src/AstGen.zig
@@ -2451,8 +2451,8 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.atomic_rmw,
.mul_add,
.field_parent_ptr,
- .maximum,
- .minimum,
+ .max,
+ .min,
.c_import,
.@"resume",
.@"await",
@@ -7875,19 +7875,19 @@ fn builtinCall(
return rvalue(gz, rl, result, node);
},
- .maximum => {
+ .max => {
const a = try expr(gz, scope, .none, params[0]);
const b = try expr(gz, scope, .none, params[1]);
- const result = try gz.addPlNode(.maximum, node, Zir.Inst.Bin{
+ const result = try gz.addPlNode(.max, node, Zir.Inst.Bin{
.lhs = a,
.rhs = b,
});
return rvalue(gz, rl, result, node);
},
- .minimum => {
+ .min => {
const a = try expr(gz, scope, .none, params[0]);
const b = try expr(gz, scope, .none, params[1]);
- const result = try gz.addPlNode(.minimum, node, Zir.Inst.Bin{
+ const result = try gz.addPlNode(.min, node, Zir.Inst.Bin{
.lhs = a,
.rhs = b,
});
src/Autodoc.zig
@@ -1349,8 +1349,8 @@ fn walkInstruction(
.frame_type,
.frame_size,
.ptr_to_int,
- .minimum,
- .maximum,
+ .min,
+ .max,
.bit_not,
// @check
.clz,
src/BuiltinFn.zig
@@ -58,10 +58,10 @@ pub const Tag = enum {
int_to_error,
int_to_float,
int_to_ptr,
- maximum,
+ max,
memcpy,
memset,
- minimum,
+ min,
wasm_memory_size,
wasm_memory_grow,
mod,
@@ -556,9 +556,9 @@ pub const list = list: {
},
},
.{
- "@maximum",
+ "@max",
.{
- .tag = .maximum,
+ .tag = .max,
.param_count = 2,
},
},
@@ -577,9 +577,9 @@ pub const list = list: {
},
},
.{
- "@minimum",
+ "@min",
.{
- .tag = .minimum,
+ .tag = .min,
.param_count = 2,
},
},
src/Module.zig
@@ -992,7 +992,7 @@ pub const Struct = struct {
if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) {
// The C ABI requires 128 bit integer fields of structs
// to be 16-bytes aligned.
- return @maximum(ty_abi_align, 16);
+ return @max(ty_abi_align, 16);
}
return ty_abi_align;
@@ -1326,7 +1326,7 @@ pub const Union = struct {
if (!field.ty.hasRuntimeBits()) continue;
const field_align = field.normalAlignment(target);
- max_align = @maximum(max_align, field_align);
+ max_align = @max(max_align, field_align);
}
return max_align;
}
@@ -1391,7 +1391,7 @@ pub const Union = struct {
most_aligned_field_size = field_size;
}
}
- payload_align = @maximum(payload_align, 1);
+ payload_align = @max(payload_align, 1);
if (!have_tag or !u.tag_ty.hasRuntimeBits()) {
return .{
.abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align),
@@ -1409,7 +1409,7 @@ pub const Union = struct {
// Put the tag before or after the payload depending on which one's
// alignment is greater.
const tag_size = u.tag_ty.abiSize(target);
- const tag_align = @maximum(1, u.tag_ty.abiAlignment(target));
+ const tag_align = @max(1, u.tag_ty.abiAlignment(target));
var size: u64 = 0;
var padding: u32 = undefined;
if (tag_align >= payload_align) {
@@ -1431,7 +1431,7 @@ pub const Union = struct {
}
return .{
.abi_size = size,
- .abi_align = @maximum(tag_align, payload_align),
+ .abi_align = @max(tag_align, payload_align),
.most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size,
.biggest_field = biggest_field,
@@ -5492,7 +5492,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
.func = func,
.fn_ret_ty = decl.ty.fnReturnType(),
.owner_func = func,
- .branch_quota = @maximum(func.branch_quota, Sema.default_branch_quota),
+ .branch_quota = @max(func.branch_quota, Sema.default_branch_quota),
};
defer sema.deinit();
src/print_zir.zig
@@ -342,8 +342,8 @@ const Writer = struct {
.reduce,
.bitcast,
.vector_type,
- .maximum,
- .minimum,
+ .max,
+ .min,
.elem_ptr_node,
.elem_val_node,
.elem_ptr,
src/Sema.zig
@@ -913,8 +913,8 @@ fn analyzeBodyInner(
.mod => try sema.zirMod(block, inst),
.rem => try sema.zirRem(block, inst),
- .maximum => try sema.zirMinMax(block, inst, .max),
- .minimum => try sema.zirMinMax(block, inst, .min),
+ .max => try sema.zirMinMax(block, inst, .max),
+ .min => try sema.zirMinMax(block, inst, .min),
.shl => try sema.zirShl(block, inst, .shl),
.shl_exact => try sema.zirShl(block, inst, .shl_exact),
@@ -3886,9 +3886,9 @@ fn validateUnionInit(
if (block_index > 0 and
field_ptr_air_inst == block.instructions.items[block_index - 1])
{
- first_block_index = @minimum(first_block_index, block_index - 1);
+ first_block_index = @min(first_block_index, block_index - 1);
} else {
- first_block_index = @minimum(first_block_index, block_index);
+ first_block_index = @min(first_block_index, block_index);
}
init_val = try sema.resolveMaybeUndefValAllowVariables(block, init_src, bin_op.rhs);
break;
@@ -4097,9 +4097,9 @@ fn validateStructInit(
if (block_index > 0 and
field_ptr_air_inst == block.instructions.items[block_index - 1])
{
- first_block_index = @minimum(first_block_index, block_index - 1);
+ first_block_index = @min(first_block_index, block_index - 1);
} else {
- first_block_index = @minimum(first_block_index, block_index);
+ first_block_index = @min(first_block_index, block_index);
}
if (try sema.resolveMaybeUndefValAllowVariables(block, field_src, bin_op.rhs)) |val| {
field_values[i] = val;
@@ -4278,7 +4278,7 @@ fn zirValidateArrayInit(
}
block_index -= 1;
}
- first_block_index = @minimum(first_block_index, block_index);
+ first_block_index = @min(first_block_index, block_index);
// If the next instructon is a store with a comptime operand, this element
// is comptime.
@@ -4606,7 +4606,7 @@ fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const quota = @intCast(u32, try sema.resolveInt(block, src, inst_data.operand, Type.u32, "eval branch quota must be comptime-known"));
- sema.branch_quota = @maximum(sema.branch_quota, quota);
+ sema.branch_quota = @max(sema.branch_quota, quota);
}
fn zirStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
@@ -6887,7 +6887,7 @@ fn instantiateGenericCall(
break :callee new_func;
} else gop.key_ptr.*;
- callee.branch_quota = @maximum(callee.branch_quota, sema.branch_quota);
+ callee.branch_quota = @max(callee.branch_quota, sema.branch_quota);
const callee_inst = try sema.analyzeDeclVal(block, func_src, callee.owner_decl);
@@ -20382,7 +20382,7 @@ fn analyzeMinMax(
) CompileError!Air.Inst.Ref {
const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src);
- // TODO @maximum(max_int, undefined) should return max_int
+ // TODO @max(max_int, undefined) should return max_int
const runtime_src = if (simd_op.lhs_val) |lhs_val| rs: {
if (lhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty);
src/type.zig
@@ -2998,7 +2998,7 @@ pub const Type = extern union {
return child_type.abiAlignmentAdvanced(target, strat);
},
.lazy => |arena| switch (try child_type.abiAlignmentAdvanced(target, strat)) {
- .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @maximum(x, 1) },
+ .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) },
.val => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
},
}
@@ -3014,7 +3014,7 @@ pub const Type = extern union {
if (!(try data.payload.hasRuntimeBitsAdvanced(false, sema_kit))) {
return AbiAlignmentAdvanced{ .scalar = code_align };
}
- return AbiAlignmentAdvanced{ .scalar = @maximum(
+ return AbiAlignmentAdvanced{ .scalar = @max(
code_align,
(try data.payload.abiAlignmentAdvanced(target, strat)).scalar,
) };
@@ -3023,7 +3023,7 @@ pub const Type = extern union {
switch (try data.payload.abiAlignmentAdvanced(target, strat)) {
.scalar => |payload_align| {
return AbiAlignmentAdvanced{
- .scalar = @maximum(code_align, payload_align),
+ .scalar = @max(code_align, payload_align),
};
},
.val => {},
@@ -3077,14 +3077,14 @@ pub const Type = extern union {
.lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
},
};
- big_align = @maximum(big_align, field_align);
+ big_align = @max(big_align, field_align);
// This logic is duplicated in Module.Struct.Field.alignment.
if (struct_obj.layout == .Extern or target.ofmt == .c) {
if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) {
// The C ABI requires 128 bit integer fields of structs
// to be 16-bytes aligned.
- big_align = @maximum(big_align, 16);
+ big_align = @max(big_align, 16);
}
}
}
@@ -3099,7 +3099,7 @@ pub const Type = extern union {
if (val.tag() != .unreachable_value) continue; // comptime field
switch (try field_ty.abiAlignmentAdvanced(target, strat)) {
- .scalar => |field_align| big_align = @maximum(big_align, field_align),
+ .scalar => |field_align| big_align = @max(big_align, field_align),
.val => switch (strat) {
.eager => unreachable, // field type alignment not resolved
.sema_kit => unreachable, // passed to abiAlignmentAdvanced above
@@ -3194,7 +3194,7 @@ pub const Type = extern union {
.lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
},
};
- max_align = @maximum(max_align, field_align);
+ max_align = @max(max_align, field_align);
}
return AbiAlignmentAdvanced{ .scalar = max_align };
}
@@ -3565,7 +3565,7 @@ pub const Type = extern union {
}
fn intAbiAlignment(bits: u16, target: Target) u32 {
- return @minimum(
+ return @min(
std.math.ceilPowerOfTwoPromote(u16, (bits + 7) / 8),
target.maxIntAlignment(),
);
@@ -3648,7 +3648,7 @@ pub const Type = extern union {
var size: u64 = 0;
for (union_obj.fields.values()) |field| {
- size = @maximum(size, try bitSizeAdvanced(field.ty, target, sema_kit));
+ size = @max(size, try bitSizeAdvanced(field.ty, target, sema_kit));
}
return size;
},
@@ -5760,7 +5760,7 @@ pub const Type = extern union {
}
const field_align = field.alignment(it.target, it.struct_obj.layout);
- it.big_align = @maximum(it.big_align, field_align);
+ it.big_align = @max(it.big_align, field_align);
const field_offset = std.mem.alignForwardGeneric(u64, it.offset, field_align);
it.offset = field_offset + field.ty.abiSize(it.target);
return FieldOffset{ .field = i, .offset = field_offset };
@@ -5789,7 +5789,7 @@ pub const Type = extern union {
return field_offset.offset;
}
- return std.mem.alignForwardGeneric(u64, it.offset, @maximum(it.big_align, 1));
+ return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1));
},
.tuple, .anon_struct => {
@@ -5807,12 +5807,12 @@ pub const Type = extern union {
}
const field_align = field_ty.abiAlignment(target);
- big_align = @maximum(big_align, field_align);
+ big_align = @max(big_align, field_align);
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
if (i == index) return offset;
offset += field_ty.abiSize(target);
}
- offset = std.mem.alignForwardGeneric(u64, offset, @maximum(big_align, 1));
+ offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1));
return offset;
},
src/Zir.zig
@@ -924,12 +924,12 @@ pub const Inst = struct {
/// Implements the `@memset` builtin.
/// Uses the `pl_node` union field with payload `Memset`.
memset,
- /// Implements the `@minimum` builtin.
+ /// Implements the `@min` builtin.
/// Uses the `pl_node` union field with payload `Bin`
- minimum,
- /// Implements the `@maximum` builtin.
+ min,
+ /// Implements the `@max` builtin.
/// Uses the `pl_node` union field with payload `Bin`
- maximum,
+ max,
/// Implements the `@cImport` builtin.
/// Uses the `pl_node` union field with payload `Block`.
c_import,
@@ -1217,10 +1217,10 @@ pub const Inst = struct {
.mul_add,
.builtin_call,
.field_parent_ptr,
- .maximum,
+ .max,
.memcpy,
.memset,
- .minimum,
+ .min,
.c_import,
.@"resume",
.@"await",
@@ -1502,8 +1502,8 @@ pub const Inst = struct {
.mul_add,
.builtin_call,
.field_parent_ptr,
- .maximum,
- .minimum,
+ .max,
+ .min,
.c_import,
.@"resume",
.@"await",
@@ -1785,10 +1785,10 @@ pub const Inst = struct {
.mul_add = .pl_node,
.builtin_call = .pl_node,
.field_parent_ptr = .pl_node,
- .maximum = .pl_node,
+ .max = .pl_node,
.memcpy = .pl_node,
.memset = .pl_node,
- .minimum = .pl_node,
+ .min = .pl_node,
.c_import = .pl_node,
.alloc = .un_node,
test/behavior/maximum_minimum.zig
@@ -4,7 +4,7 @@ const mem = std.mem;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
-test "@maximum" {
+test "@max" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -14,15 +14,15 @@ test "@maximum" {
fn doTheTest() !void {
var x: i32 = 10;
var y: f32 = 0.68;
- try expect(@as(i32, 10) == @maximum(@as(i32, -3), x));
- try expect(@as(f32, 3.2) == @maximum(@as(f32, 3.2), y));
+ try expect(@as(i32, 10) == @max(@as(i32, -3), x));
+ try expect(@as(f32, 3.2) == @max(@as(f32, 3.2), y));
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
-test "@maximum on vectors" {
+test "@max on vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@@ -33,17 +33,17 @@ test "@maximum on vectors" {
fn doTheTest() !void {
var a: @Vector(4, i32) = [4]i32{ 2147483647, -2, 30, 40 };
var b: @Vector(4, i32) = [4]i32{ 1, 2147483647, 3, 4 };
- var x = @maximum(a, b);
+ var x = @max(a, b);
try expect(mem.eql(i32, &@as([4]i32, x), &[4]i32{ 2147483647, 2147483647, 30, 40 }));
var c: @Vector(4, f32) = [4]f32{ 0, 0.4, -2.4, 7.8 };
var d: @Vector(4, f32) = [4]f32{ -0.23, 0.42, -0.64, 0.9 };
- var y = @maximum(c, d);
+ var y = @max(c, d);
try expect(mem.eql(f32, &@as([4]f32, y), &[4]f32{ 0, 0.42, -0.64, 7.8 }));
var e: @Vector(2, f32) = [2]f32{ 0, std.math.qnan_f32 };
var f: @Vector(2, f32) = [2]f32{ std.math.qnan_f32, 0 };
- var z = @maximum(e, f);
+ var z = @max(e, f);
try expect(mem.eql(f32, &@as([2]f32, z), &[2]f32{ 0, 0 }));
}
};
@@ -51,7 +51,7 @@ test "@maximum on vectors" {
comptime try S.doTheTest();
}
-test "@minimum" {
+test "@min" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -61,15 +61,15 @@ test "@minimum" {
fn doTheTest() !void {
var x: i32 = 10;
var y: f32 = 0.68;
- try expect(@as(i32, -3) == @minimum(@as(i32, -3), x));
- try expect(@as(f32, 0.68) == @minimum(@as(f32, 3.2), y));
+ try expect(@as(i32, -3) == @min(@as(i32, -3), x));
+ try expect(@as(f32, 0.68) == @min(@as(f32, 3.2), y));
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
-test "@minimum for vectors" {
+test "@min for vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -80,17 +80,17 @@ test "@minimum for vectors" {
fn doTheTest() !void {
var a: @Vector(4, i32) = [4]i32{ 2147483647, -2, 30, 40 };
var b: @Vector(4, i32) = [4]i32{ 1, 2147483647, 3, 4 };
- var x = @minimum(a, b);
+ var x = @min(a, b);
try expect(mem.eql(i32, &@as([4]i32, x), &[4]i32{ 1, -2, 3, 4 }));
var c: @Vector(4, f32) = [4]f32{ 0, 0.4, -2.4, 7.8 };
var d: @Vector(4, f32) = [4]f32{ -0.23, 0.42, -0.64, 0.9 };
- var y = @minimum(c, d);
+ var y = @min(c, d);
try expect(mem.eql(f32, &@as([4]f32, y), &[4]f32{ -0.23, 0.4, -2.4, 0.9 }));
var e: @Vector(2, f32) = [2]f32{ 0, std.math.qnan_f32 };
var f: @Vector(2, f32) = [2]f32{ std.math.qnan_f32, 0 };
- var z = @maximum(e, f);
+ var z = @max(e, f);
try expect(mem.eql(f32, &@as([2]f32, z), &[2]f32{ 0, 0 }));
}
};