Commit 0282c2a924
Changed files (6)
lib/std/os/linux/seccomp.zig
@@ -29,8 +29,8 @@
//! which is dependant on the ABI. Since BPF programs execute in a 32-bit
//! machine, validation of 64-bit arguments necessitates two load-and-compare
//! instructions for the upper and lower words.
-//! 3. A further wrinkle to the above is endianess. Unlike network packets,
-//! syscall data shares the endianess of the target machine. A filter
+//! 3. A further wrinkle to the above is endianness. Unlike network packets,
+//! syscall data shares the endianness of the target machine. A filter
//! compiled on a little-endian machine will not work on a big-endian one,
//! and vice-versa. For example: Checking the upper 32-bits of `data.arg1`
//! requires a load at `@offsetOf(data, "arg1") + 4` on big-endian systems
lib/std/array_hash_map.zig
@@ -54,7 +54,7 @@ pub fn hashString(s: []const u8) u32 {
/// Insertion order is preserved.
/// Deletions perform a "swap removal" on the entries list.
-/// Modifying the hash map while iterating is allowed, however one must understand
+/// Modifying the hash map while iterating is allowed, however, one must understand
/// the (well defined) behavior when mixing insertions and deletions with iteration.
/// For a hash map that can be initialized directly that does not store an Allocator
/// field, see `ArrayHashMapUnmanaged`.
@@ -448,7 +448,7 @@ pub fn ArrayHashMap(
/// General purpose hash table.
/// Insertion order is preserved.
/// Deletions perform a "swap removal" on the entries list.
-/// Modifying the hash map while iterating is allowed, however one must understand
+/// Modifying the hash map while iterating is allowed, however, one must understand
/// the (well defined) behavior when mixing insertions and deletions with iteration.
/// This type does not store an Allocator field - the Allocator must be passed in
/// with each function call that requires it. See `ArrayHashMap` for a type that stores
lib/std/array_list.zig
@@ -31,7 +31,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
return struct {
const Self = @This();
/// Contents of the list. Pointers to elements in this slice are
- /// **invalid after resizing operations** on the ArrayList, unless the
+ /// **invalid after resizing operations** on the ArrayList unless the
/// operation explicitly either: (1) states otherwise or (2) lists the
/// invalidated pointers.
///
@@ -527,7 +527,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
return struct {
const Self = @This();
/// Contents of the list. Pointers to elements in this slice are
- /// **invalid after resizing operations** on the ArrayList, unless the
+ /// **invalid after resizing operations** on the ArrayList unless the
/// operation explicitly either: (1) states otherwise or (2) lists the
/// invalidated pointers.
///
lib/std/buf_map.zig
@@ -4,7 +4,7 @@ const mem = std.mem;
const Allocator = mem.Allocator;
const testing = std.testing;
-/// BufMap copies keys and values before they go into the map, and
+/// BufMap copies keys and values before they go into the map and
/// frees them when they get removed.
pub const BufMap = struct {
hash_map: BufMapHashMap,
lib/std/hash_map.zig
@@ -350,7 +350,7 @@ pub fn verifyContext(
/// General purpose hash table.
/// No order is guaranteed and any modification invalidates live iterators.
/// It provides fast operations (lookup, insertion, deletion) with quite high
-/// load factors (up to 80% by default) for a low memory usage.
+/// load factors (up to 80% by default) for low memory usage.
/// For a hash map that can be initialized directly that does not store an Allocator
/// field, see `HashMapUnmanaged`.
/// If iterating over the table entries is a strong usecase and needs to be fast,
lib/std/packed_int_array.zig
@@ -182,7 +182,7 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
/// Creates a bit-packed array of `Int`. Non-byte-multiple integers
/// will take up less memory in PackedIntArray than in a normal array.
-/// Elements are packed using native endianess and without storing any
+/// Elements are packed using native endianness and without storing any
/// meta data. PackedArray(i3, 8) will occupy exactly 3 bytes
/// of memory.
pub fn PackedIntArray(comptime Int: type, comptime int_count: usize) type {
@@ -261,7 +261,7 @@ pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptim
}
/// Create a PackedIntSliceEndian of the array using `NewInt` as the integer type
- /// and `new_endian` as the new endianess. `NewInt`'s bit width must fit evenly
+ /// and `new_endian` as the new endianness. `NewInt`'s bit width must fit evenly
/// within the array's `Int`'s total bits.
pub fn sliceCastEndian(self: *Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) {
return Io.sliceCast(&self.bytes, NewInt, new_endian, 0, int_count);
@@ -336,7 +336,7 @@ pub fn PackedIntSliceEndian(comptime Int: type, comptime endian: Endian) type {
}
/// Create a PackedIntSliceEndian of the slice using `NewInt` as the integer type
- /// and `new_endian` as the new endianess. `NewInt`'s bit width must fit evenly
+ /// and `new_endian` as the new endianness. `NewInt`'s bit width must fit evenly
/// within the slice's `Int`'s total bits.
pub fn sliceCastEndian(self: Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) {
return Io.sliceCast(self.bytes, NewInt, new_endian, self.bit_offset, self.len);