Commit c15a6fa9d0
std/os/linux/index.zig
@@ -1197,6 +1197,10 @@ pub fn fremovexattr(fd: usize, name: [*]const u8) usize {
return syscall2(SYS_fremovexattr, fd, @ptrToInt(name));
}
+pub fn sched_getaffinity(pid: i32, set: []usize) usize {
+ return syscall3(SYS_sched_getaffinity, @bitCast(usize, isize(pid)), set.len * @sizeOf(usize), @ptrToInt(set.ptr));
+}
+
pub const epoll_data = packed union {
ptr: usize,
fd: i32,
std/os/index.zig
@@ -2748,3 +2748,42 @@ pub fn posixFStat(fd: i32) !posix.Stat {
return stat;
}
+
+pub const CpuCountError = error{
+ OutOfMemory,
+ PermissionDenied,
+ Unexpected,
+};
+
+pub fn cpuCount(fallback_allocator: *mem.Allocator) CpuCountError!usize {
+ const usize_count = 16;
+ const allocator = std.heap.stackFallback(usize_count * @sizeOf(usize), fallback_allocator).get();
+
+ var set = try allocator.alloc(usize, usize_count);
+ defer allocator.free(set);
+
+ while (true) {
+ const rc = posix.sched_getaffinity(0, set);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => {
+ if (rc < set.len * @sizeOf(usize)) {
+ const result = set[0 .. rc / @sizeOf(usize)];
+ var sum: usize = 0;
+ for (result) |x| {
+ sum += @popCount(x);
+ }
+ return sum;
+ } else {
+ set = try allocator.realloc(usize, set, set.len * 2);
+ continue;
+ }
+ },
+ posix.EFAULT => unreachable,
+ posix.EINVAL => unreachable,
+ posix.EPERM => return CpuCountError.PermissionDenied,
+ posix.ESRCH => unreachable,
+ else => return os.unexpectedErrorPosix(err),
+ }
+ }
+}
std/os/test.zig
@@ -58,3 +58,8 @@ fn start2(ctx: *i32) u8 {
_ = @atomicRmw(i32, ctx, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
return 0;
}
+
+test "cpu count" {
+ const cpu_count = try std.os.cpuCount(a);
+ assert(cpu_count >= 1);
+}
std/event.zig
@@ -150,8 +150,8 @@ pub const Loop = struct {
/// TODO copy elision / named return values so that the threads referencing *Loop
/// have the correct pointer value.
fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void {
- // TODO check the actual cpu core count
- return self.initInternal(allocator, 4);
+ const core_count = try std.os.cpuCount(allocator);
+ return self.initInternal(allocator, core_count);
}
/// Thread count is the total thread count. The thread pool size will be
std/heap.zig
@@ -361,6 +361,73 @@ pub const ThreadSafeFixedBufferAllocator = struct {
fn free(allocator: *Allocator, bytes: []u8) void {}
};
+pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) StackFallbackAllocator(size) {
+ return StackFallbackAllocator(size){
+ .buffer = undefined,
+ .fallback_allocator = fallback_allocator,
+ .fixed_buffer_allocator = undefined,
+ .allocator = Allocator{
+ .allocFn = StackFallbackAllocator(size).alloc,
+ .reallocFn = StackFallbackAllocator(size).realloc,
+ .freeFn = StackFallbackAllocator(size).free,
+ },
+ };
+}
+
+pub fn StackFallbackAllocator(comptime size: usize) type {
+ return struct {
+ const Self = this;
+
+ buffer: [size]u8,
+ allocator: Allocator,
+ fallback_allocator: *Allocator,
+ fixed_buffer_allocator: FixedBufferAllocator,
+
+ pub fn get(self: *Self) *Allocator {
+ self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
+ return &self.allocator;
+ }
+
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+ return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator.allocator, n, alignment) catch
+ self.fallback_allocator.allocFn(self.fallback_allocator, n, alignment);
+ }
+
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+ const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
+ @ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
+ if (in_buffer) {
+ return FixedBufferAllocator.realloc(
+ &self.fixed_buffer_allocator.allocator,
+ old_mem,
+ new_size,
+ alignment,
+ ) catch {
+ const result = try self.fallback_allocator.allocFn(
+ self.fallback_allocator,
+ new_size,
+ alignment,
+ );
+ mem.copy(u8, result, old_mem);
+ return result;
+ };
+ }
+ return self.fallback_allocator.reallocFn(self.fallback_allocator, old_mem, new_size, alignment);
+ }
+
+ fn free(allocator: *Allocator, bytes: []u8) void {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+ const in_buffer = @ptrToInt(bytes.ptr) >= @ptrToInt(&self.buffer) and
+ @ptrToInt(bytes.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
+ if (!in_buffer) {
+ return self.fallback_allocator.freeFn(self.fallback_allocator, bytes);
+ }
+ }
+ };
+}
+
test "c_allocator" {
if (builtin.link_libc) {
var slice = c_allocator.alloc(u8, 50) catch return;