master
  1//! Contains only the definition of `io_uring_sqe`.
  2//! Split into its own file to compartmentalize the initialization methods.
  3
  4const std = @import("../../std.zig");
  5const linux = std.os.linux;
  6
  7pub const io_uring_sqe = extern struct {
  8    opcode: linux.IORING_OP,
  9    flags: u8,
 10    ioprio: u16,
 11    fd: i32,
 12    off: u64,
 13    addr: u64,
 14    len: u32,
 15    rw_flags: u32,
 16    user_data: u64,
 17    buf_index: u16,
 18    personality: u16,
 19    splice_fd_in: i32,
 20    addr3: u64,
 21    resv: u64,
 22
 23    pub fn prep_nop(sqe: *linux.io_uring_sqe) void {
 24        sqe.* = .{
 25            .opcode = .NOP,
 26            .flags = 0,
 27            .ioprio = 0,
 28            .fd = 0,
 29            .off = 0,
 30            .addr = 0,
 31            .len = 0,
 32            .rw_flags = 0,
 33            .user_data = 0,
 34            .buf_index = 0,
 35            .personality = 0,
 36            .splice_fd_in = 0,
 37            .addr3 = 0,
 38            .resv = 0,
 39        };
 40    }
 41
 42    pub fn prep_fsync(sqe: *linux.io_uring_sqe, fd: linux.fd_t, flags: u32) void {
 43        sqe.* = .{
 44            .opcode = .FSYNC,
 45            .flags = 0,
 46            .ioprio = 0,
 47            .fd = fd,
 48            .off = 0,
 49            .addr = 0,
 50            .len = 0,
 51            .rw_flags = flags,
 52            .user_data = 0,
 53            .buf_index = 0,
 54            .personality = 0,
 55            .splice_fd_in = 0,
 56            .addr3 = 0,
 57            .resv = 0,
 58        };
 59    }
 60
 61    pub fn prep_rw(
 62        sqe: *linux.io_uring_sqe,
 63        op: linux.IORING_OP,
 64        fd: linux.fd_t,
 65        addr: u64,
 66        len: usize,
 67        offset: u64,
 68    ) void {
 69        sqe.* = .{
 70            .opcode = op,
 71            .flags = 0,
 72            .ioprio = 0,
 73            .fd = fd,
 74            .off = offset,
 75            .addr = addr,
 76            .len = @intCast(len),
 77            .rw_flags = 0,
 78            .user_data = 0,
 79            .buf_index = 0,
 80            .personality = 0,
 81            .splice_fd_in = 0,
 82            .addr3 = 0,
 83            .resv = 0,
 84        };
 85    }
 86
 87    pub fn prep_read(sqe: *linux.io_uring_sqe, fd: linux.fd_t, buffer: []u8, offset: u64) void {
 88        sqe.prep_rw(.READ, fd, @intFromPtr(buffer.ptr), buffer.len, offset);
 89    }
 90
 91    pub fn prep_write(sqe: *linux.io_uring_sqe, fd: linux.fd_t, buffer: []const u8, offset: u64) void {
 92        sqe.prep_rw(.WRITE, fd, @intFromPtr(buffer.ptr), buffer.len, offset);
 93    }
 94
 95    pub fn prep_splice(sqe: *linux.io_uring_sqe, fd_in: linux.fd_t, off_in: u64, fd_out: linux.fd_t, off_out: u64, len: usize) void {
 96        sqe.prep_rw(.SPLICE, fd_out, undefined, len, off_out);
 97        sqe.addr = off_in;
 98        sqe.splice_fd_in = fd_in;
 99    }
100
101    pub fn prep_readv(
102        sqe: *linux.io_uring_sqe,
103        fd: linux.fd_t,
104        iovecs: []const std.posix.iovec,
105        offset: u64,
106    ) void {
107        sqe.prep_rw(.READV, fd, @intFromPtr(iovecs.ptr), iovecs.len, offset);
108    }
109
110    pub fn prep_writev(
111        sqe: *linux.io_uring_sqe,
112        fd: linux.fd_t,
113        iovecs: []const std.posix.iovec_const,
114        offset: u64,
115    ) void {
116        sqe.prep_rw(.WRITEV, fd, @intFromPtr(iovecs.ptr), iovecs.len, offset);
117    }
118
119    pub fn prep_read_fixed(sqe: *linux.io_uring_sqe, fd: linux.fd_t, buffer: *std.posix.iovec, offset: u64, buffer_index: u16) void {
120        sqe.prep_rw(.READ_FIXED, fd, @intFromPtr(buffer.base), buffer.len, offset);
121        sqe.buf_index = buffer_index;
122    }
123
124    pub fn prep_write_fixed(sqe: *linux.io_uring_sqe, fd: linux.fd_t, buffer: *std.posix.iovec, offset: u64, buffer_index: u16) void {
125        sqe.prep_rw(.WRITE_FIXED, fd, @intFromPtr(buffer.base), buffer.len, offset);
126        sqe.buf_index = buffer_index;
127    }
128
129    pub fn prep_accept(
130        sqe: *linux.io_uring_sqe,
131        fd: linux.fd_t,
132        addr: ?*linux.sockaddr,
133        addrlen: ?*linux.socklen_t,
134        flags: u32,
135    ) void {
136        // `addr` holds a pointer to `sockaddr`, and `addr2` holds a pointer to socklen_t`.
137        // `addr2` maps to `sqe.off` (u64) instead of `sqe.len` (which is only a u32).
138        sqe.prep_rw(.ACCEPT, fd, @intFromPtr(addr), 0, @intFromPtr(addrlen));
139        sqe.rw_flags = flags;
140    }
141
142    pub fn prep_accept_direct(
143        sqe: *linux.io_uring_sqe,
144        fd: linux.fd_t,
145        addr: ?*linux.sockaddr,
146        addrlen: ?*linux.socklen_t,
147        flags: u32,
148        file_index: u32,
149    ) void {
150        prep_accept(sqe, fd, addr, addrlen, flags);
151        __io_uring_set_target_fixed_file(sqe, file_index);
152    }
153
154    pub fn prep_multishot_accept_direct(
155        sqe: *linux.io_uring_sqe,
156        fd: linux.fd_t,
157        addr: ?*linux.sockaddr,
158        addrlen: ?*linux.socklen_t,
159        flags: u32,
160    ) void {
161        prep_multishot_accept(sqe, fd, addr, addrlen, flags);
162        __io_uring_set_target_fixed_file(sqe, linux.IORING_FILE_INDEX_ALLOC);
163    }
164
165    fn __io_uring_set_target_fixed_file(sqe: *linux.io_uring_sqe, file_index: u32) void {
166        const sqe_file_index: u32 = if (file_index == linux.IORING_FILE_INDEX_ALLOC)
167            linux.IORING_FILE_INDEX_ALLOC
168        else
169            // 0 means no fixed files, indexes should be encoded as "index + 1"
170            file_index + 1;
171        // This filed is overloaded in liburing:
172        //   splice_fd_in: i32
173        //   sqe_file_index: u32
174        sqe.splice_fd_in = @bitCast(sqe_file_index);
175    }
176
177    pub fn prep_connect(
178        sqe: *linux.io_uring_sqe,
179        fd: linux.fd_t,
180        addr: *const linux.sockaddr,
181        addrlen: linux.socklen_t,
182    ) void {
183        // `addrlen` maps to `sqe.off` (u64) instead of `sqe.len` (which is only a u32).
184        sqe.prep_rw(.CONNECT, fd, @intFromPtr(addr), 0, addrlen);
185    }
186
187    pub fn prep_epoll_ctl(
188        sqe: *linux.io_uring_sqe,
189        epfd: linux.fd_t,
190        fd: linux.fd_t,
191        op: u32,
192        ev: ?*linux.epoll_event,
193    ) void {
194        sqe.prep_rw(.EPOLL_CTL, epfd, @intFromPtr(ev), op, @intCast(fd));
195    }
196
197    pub fn prep_recv(sqe: *linux.io_uring_sqe, fd: linux.fd_t, buffer: []u8, flags: u32) void {
198        sqe.prep_rw(.RECV, fd, @intFromPtr(buffer.ptr), buffer.len, 0);
199        sqe.rw_flags = flags;
200    }
201
202    pub fn prep_recv_multishot(
203        sqe: *linux.io_uring_sqe,
204        fd: linux.fd_t,
205        buffer: []u8,
206        flags: u32,
207    ) void {
208        sqe.prep_recv(fd, buffer, flags);
209        sqe.ioprio |= linux.IORING_RECV_MULTISHOT;
210    }
211
212    pub fn prep_recvmsg(
213        sqe: *linux.io_uring_sqe,
214        fd: linux.fd_t,
215        msg: *linux.msghdr,
216        flags: u32,
217    ) void {
218        sqe.prep_rw(.RECVMSG, fd, @intFromPtr(msg), 1, 0);
219        sqe.rw_flags = flags;
220    }
221
222    pub fn prep_recvmsg_multishot(
223        sqe: *linux.io_uring_sqe,
224        fd: linux.fd_t,
225        msg: *linux.msghdr,
226        flags: u32,
227    ) void {
228        sqe.prep_recvmsg(fd, msg, flags);
229        sqe.ioprio |= linux.IORING_RECV_MULTISHOT;
230    }
231
232    pub fn prep_send(sqe: *linux.io_uring_sqe, fd: linux.fd_t, buffer: []const u8, flags: u32) void {
233        sqe.prep_rw(.SEND, fd, @intFromPtr(buffer.ptr), buffer.len, 0);
234        sqe.rw_flags = flags;
235    }
236
237    pub fn prep_send_zc(sqe: *linux.io_uring_sqe, fd: linux.fd_t, buffer: []const u8, flags: u32, zc_flags: u16) void {
238        sqe.prep_rw(.SEND_ZC, fd, @intFromPtr(buffer.ptr), buffer.len, 0);
239        sqe.rw_flags = flags;
240        sqe.ioprio = zc_flags;
241    }
242
243    pub fn prep_send_zc_fixed(sqe: *linux.io_uring_sqe, fd: linux.fd_t, buffer: []const u8, flags: u32, zc_flags: u16, buf_index: u16) void {
244        prep_send_zc(sqe, fd, buffer, flags, zc_flags);
245        sqe.ioprio |= linux.IORING_RECVSEND_FIXED_BUF;
246        sqe.buf_index = buf_index;
247    }
248
249    pub fn prep_sendmsg_zc(
250        sqe: *linux.io_uring_sqe,
251        fd: linux.fd_t,
252        msg: *const linux.msghdr_const,
253        flags: u32,
254    ) void {
255        prep_sendmsg(sqe, fd, msg, flags);
256        sqe.opcode = .SENDMSG_ZC;
257    }
258
259    pub fn prep_sendmsg(
260        sqe: *linux.io_uring_sqe,
261        fd: linux.fd_t,
262        msg: *const linux.msghdr_const,
263        flags: u32,
264    ) void {
265        sqe.prep_rw(.SENDMSG, fd, @intFromPtr(msg), 1, 0);
266        sqe.rw_flags = flags;
267    }
268
269    pub fn prep_openat(
270        sqe: *linux.io_uring_sqe,
271        fd: linux.fd_t,
272        path: [*:0]const u8,
273        flags: linux.O,
274        mode: linux.mode_t,
275    ) void {
276        sqe.prep_rw(.OPENAT, fd, @intFromPtr(path), mode, 0);
277        sqe.rw_flags = @bitCast(flags);
278    }
279
280    pub fn prep_openat_direct(
281        sqe: *linux.io_uring_sqe,
282        fd: linux.fd_t,
283        path: [*:0]const u8,
284        flags: linux.O,
285        mode: linux.mode_t,
286        file_index: u32,
287    ) void {
288        prep_openat(sqe, fd, path, flags, mode);
289        __io_uring_set_target_fixed_file(sqe, file_index);
290    }
291
292    pub fn prep_close(sqe: *linux.io_uring_sqe, fd: linux.fd_t) void {
293        sqe.* = .{
294            .opcode = .CLOSE,
295            .flags = 0,
296            .ioprio = 0,
297            .fd = fd,
298            .off = 0,
299            .addr = 0,
300            .len = 0,
301            .rw_flags = 0,
302            .user_data = 0,
303            .buf_index = 0,
304            .personality = 0,
305            .splice_fd_in = 0,
306            .addr3 = 0,
307            .resv = 0,
308        };
309    }
310
311    pub fn prep_close_direct(sqe: *linux.io_uring_sqe, file_index: u32) void {
312        prep_close(sqe, 0);
313        __io_uring_set_target_fixed_file(sqe, file_index);
314    }
315
316    pub fn prep_timeout(
317        sqe: *linux.io_uring_sqe,
318        ts: *const linux.kernel_timespec,
319        count: u32,
320        flags: u32,
321    ) void {
322        sqe.prep_rw(.TIMEOUT, -1, @intFromPtr(ts), 1, count);
323        sqe.rw_flags = flags;
324    }
325
326    pub fn prep_timeout_remove(sqe: *linux.io_uring_sqe, timeout_user_data: u64, flags: u32) void {
327        sqe.* = .{
328            .opcode = .TIMEOUT_REMOVE,
329            .flags = 0,
330            .ioprio = 0,
331            .fd = -1,
332            .off = 0,
333            .addr = timeout_user_data,
334            .len = 0,
335            .rw_flags = flags,
336            .user_data = 0,
337            .buf_index = 0,
338            .personality = 0,
339            .splice_fd_in = 0,
340            .addr3 = 0,
341            .resv = 0,
342        };
343    }
344
345    pub fn prep_link_timeout(
346        sqe: *linux.io_uring_sqe,
347        ts: *const linux.kernel_timespec,
348        flags: u32,
349    ) void {
350        sqe.prep_rw(.LINK_TIMEOUT, -1, @intFromPtr(ts), 1, 0);
351        sqe.rw_flags = flags;
352    }
353
354    pub fn prep_poll_add(
355        sqe: *linux.io_uring_sqe,
356        fd: linux.fd_t,
357        poll_mask: u32,
358    ) void {
359        sqe.prep_rw(.POLL_ADD, fd, @intFromPtr(@as(?*anyopaque, null)), 0, 0);
360        // Poll masks previously used to comprise of 16 bits in the flags union of
361        // a SQE, but were then extended to comprise of 32 bits in order to make
362        // room for additional option flags. To ensure that the correct bits of
363        // poll masks are consistently and properly read across multiple kernel
364        // versions, poll masks are enforced to be little-endian.
365        // https://www.spinics.net/lists/io-uring/msg02848.html
366        sqe.rw_flags = std.mem.nativeToLittle(u32, poll_mask);
367    }
368
369    pub fn prep_poll_remove(
370        sqe: *linux.io_uring_sqe,
371        target_user_data: u64,
372    ) void {
373        sqe.prep_rw(.POLL_REMOVE, -1, target_user_data, 0, 0);
374    }
375
376    pub fn prep_poll_update(
377        sqe: *linux.io_uring_sqe,
378        old_user_data: u64,
379        new_user_data: u64,
380        poll_mask: u32,
381        flags: u32,
382    ) void {
383        sqe.prep_rw(.POLL_REMOVE, -1, old_user_data, flags, new_user_data);
384        // Poll masks previously used to comprise of 16 bits in the flags union of
385        // a SQE, but were then extended to comprise of 32 bits in order to make
386        // room for additional option flags. To ensure that the correct bits of
387        // poll masks are consistently and properly read across multiple kernel
388        // versions, poll masks are enforced to be little-endian.
389        // https://www.spinics.net/lists/io-uring/msg02848.html
390        sqe.rw_flags = std.mem.nativeToLittle(u32, poll_mask);
391    }
392
393    pub fn prep_fallocate(
394        sqe: *linux.io_uring_sqe,
395        fd: linux.fd_t,
396        mode: i32,
397        offset: u64,
398        len: u64,
399    ) void {
400        sqe.* = .{
401            .opcode = .FALLOCATE,
402            .flags = 0,
403            .ioprio = 0,
404            .fd = fd,
405            .off = offset,
406            .addr = len,
407            .len = @intCast(mode),
408            .rw_flags = 0,
409            .user_data = 0,
410            .buf_index = 0,
411            .personality = 0,
412            .splice_fd_in = 0,
413            .addr3 = 0,
414            .resv = 0,
415        };
416    }
417
418    pub fn prep_statx(
419        sqe: *linux.io_uring_sqe,
420        fd: linux.fd_t,
421        path: [*:0]const u8,
422        flags: u32,
423        mask: u32,
424        buf: *linux.Statx,
425    ) void {
426        sqe.prep_rw(.STATX, fd, @intFromPtr(path), mask, @intFromPtr(buf));
427        sqe.rw_flags = flags;
428    }
429
430    pub fn prep_cancel(
431        sqe: *linux.io_uring_sqe,
432        cancel_user_data: u64,
433        flags: u32,
434    ) void {
435        sqe.prep_rw(.ASYNC_CANCEL, -1, cancel_user_data, 0, 0);
436        sqe.rw_flags = flags;
437    }
438
439    pub fn prep_cancel_fd(
440        sqe: *linux.io_uring_sqe,
441        fd: linux.fd_t,
442        flags: u32,
443    ) void {
444        sqe.prep_rw(.ASYNC_CANCEL, fd, 0, 0, 0);
445        sqe.rw_flags = flags | linux.IORING_ASYNC_CANCEL_FD;
446    }
447
448    pub fn prep_shutdown(
449        sqe: *linux.io_uring_sqe,
450        sockfd: linux.socket_t,
451        how: u32,
452    ) void {
453        sqe.prep_rw(.SHUTDOWN, sockfd, 0, how, 0);
454    }
455
456    pub fn prep_renameat(
457        sqe: *linux.io_uring_sqe,
458        old_dir_fd: linux.fd_t,
459        old_path: [*:0]const u8,
460        new_dir_fd: linux.fd_t,
461        new_path: [*:0]const u8,
462        flags: u32,
463    ) void {
464        sqe.prep_rw(
465            .RENAMEAT,
466            old_dir_fd,
467            @intFromPtr(old_path),
468            0,
469            @intFromPtr(new_path),
470        );
471        sqe.len = @bitCast(new_dir_fd);
472        sqe.rw_flags = flags;
473    }
474
475    pub fn prep_unlinkat(
476        sqe: *linux.io_uring_sqe,
477        dir_fd: linux.fd_t,
478        path: [*:0]const u8,
479        flags: u32,
480    ) void {
481        sqe.prep_rw(.UNLINKAT, dir_fd, @intFromPtr(path), 0, 0);
482        sqe.rw_flags = flags;
483    }
484
485    pub fn prep_mkdirat(
486        sqe: *linux.io_uring_sqe,
487        dir_fd: linux.fd_t,
488        path: [*:0]const u8,
489        mode: linux.mode_t,
490    ) void {
491        sqe.prep_rw(.MKDIRAT, dir_fd, @intFromPtr(path), mode, 0);
492    }
493
494    pub fn prep_symlinkat(
495        sqe: *linux.io_uring_sqe,
496        target: [*:0]const u8,
497        new_dir_fd: linux.fd_t,
498        link_path: [*:0]const u8,
499    ) void {
500        sqe.prep_rw(
501            .SYMLINKAT,
502            new_dir_fd,
503            @intFromPtr(target),
504            0,
505            @intFromPtr(link_path),
506        );
507    }
508
509    pub fn prep_linkat(
510        sqe: *linux.io_uring_sqe,
511        old_dir_fd: linux.fd_t,
512        old_path: [*:0]const u8,
513        new_dir_fd: linux.fd_t,
514        new_path: [*:0]const u8,
515        flags: u32,
516    ) void {
517        sqe.prep_rw(
518            .LINKAT,
519            old_dir_fd,
520            @intFromPtr(old_path),
521            0,
522            @intFromPtr(new_path),
523        );
524        sqe.len = @bitCast(new_dir_fd);
525        sqe.rw_flags = flags;
526    }
527
528    pub fn prep_files_update(
529        sqe: *linux.io_uring_sqe,
530        fds: []const linux.fd_t,
531        offset: u32,
532    ) void {
533        sqe.prep_rw(.FILES_UPDATE, -1, @intFromPtr(fds.ptr), fds.len, @intCast(offset));
534    }
535
536    pub fn prep_files_update_alloc(
537        sqe: *linux.io_uring_sqe,
538        fds: []linux.fd_t,
539    ) void {
540        sqe.prep_rw(.FILES_UPDATE, -1, @intFromPtr(fds.ptr), fds.len, linux.IORING_FILE_INDEX_ALLOC);
541    }
542
543    pub fn prep_provide_buffers(
544        sqe: *linux.io_uring_sqe,
545        buffers: [*]u8,
546        buffer_len: usize,
547        num: usize,
548        group_id: usize,
549        buffer_id: usize,
550    ) void {
551        const ptr = @intFromPtr(buffers);
552        sqe.prep_rw(.PROVIDE_BUFFERS, @intCast(num), ptr, buffer_len, buffer_id);
553        sqe.buf_index = @intCast(group_id);
554    }
555
556    pub fn prep_remove_buffers(
557        sqe: *linux.io_uring_sqe,
558        num: usize,
559        group_id: usize,
560    ) void {
561        sqe.prep_rw(.REMOVE_BUFFERS, @intCast(num), 0, 0, 0);
562        sqe.buf_index = @intCast(group_id);
563    }
564
565    pub fn prep_multishot_accept(
566        sqe: *linux.io_uring_sqe,
567        fd: linux.fd_t,
568        addr: ?*linux.sockaddr,
569        addrlen: ?*linux.socklen_t,
570        flags: u32,
571    ) void {
572        prep_accept(sqe, fd, addr, addrlen, flags);
573        sqe.ioprio |= linux.IORING_ACCEPT_MULTISHOT;
574    }
575
576    pub fn prep_socket(
577        sqe: *linux.io_uring_sqe,
578        domain: u32,
579        socket_type: u32,
580        protocol: u32,
581        flags: u32,
582    ) void {
583        sqe.prep_rw(.SOCKET, @intCast(domain), 0, protocol, socket_type);
584        sqe.rw_flags = flags;
585    }
586
587    pub fn prep_socket_direct(
588        sqe: *linux.io_uring_sqe,
589        domain: u32,
590        socket_type: u32,
591        protocol: u32,
592        flags: u32,
593        file_index: u32,
594    ) void {
595        prep_socket(sqe, domain, socket_type, protocol, flags);
596        __io_uring_set_target_fixed_file(sqe, file_index);
597    }
598
599    pub fn prep_socket_direct_alloc(
600        sqe: *linux.io_uring_sqe,
601        domain: u32,
602        socket_type: u32,
603        protocol: u32,
604        flags: u32,
605    ) void {
606        prep_socket(sqe, domain, socket_type, protocol, flags);
607        __io_uring_set_target_fixed_file(sqe, linux.IORING_FILE_INDEX_ALLOC);
608    }
609
610    pub fn prep_waitid(
611        sqe: *linux.io_uring_sqe,
612        id_type: linux.P,
613        id: i32,
614        infop: *linux.siginfo_t,
615        options: u32,
616        flags: u32,
617    ) void {
618        sqe.prep_rw(.WAITID, id, 0, @intFromEnum(id_type), @intFromPtr(infop));
619        sqe.rw_flags = flags;
620        sqe.splice_fd_in = @bitCast(options);
621    }
622
623    pub fn prep_bind(
624        sqe: *linux.io_uring_sqe,
625        fd: linux.fd_t,
626        addr: *const linux.sockaddr,
627        addrlen: linux.socklen_t,
628        flags: u32,
629    ) void {
630        sqe.prep_rw(.BIND, fd, @intFromPtr(addr), 0, addrlen);
631        sqe.rw_flags = flags;
632    }
633
634    pub fn prep_listen(
635        sqe: *linux.io_uring_sqe,
636        fd: linux.fd_t,
637        backlog: usize,
638        flags: u32,
639    ) void {
640        sqe.prep_rw(.LISTEN, fd, 0, backlog, 0);
641        sqe.rw_flags = flags;
642    }
643
644    pub fn prep_cmd_sock(
645        sqe: *linux.io_uring_sqe,
646        cmd_op: linux.IO_URING_SOCKET_OP,
647        fd: linux.fd_t,
648        level: u32,
649        optname: u32,
650        optval: u64,
651        optlen: u32,
652    ) void {
653        sqe.prep_rw(.URING_CMD, fd, 0, 0, 0);
654        // off is overloaded with cmd_op, https://github.com/axboe/liburing/blob/e1003e496e66f9b0ae06674869795edf772d5500/src/include/liburing/io_uring.h#L39
655        sqe.off = @intFromEnum(cmd_op);
656        // addr is overloaded, https://github.com/axboe/liburing/blob/e1003e496e66f9b0ae06674869795edf772d5500/src/include/liburing/io_uring.h#L46
657        sqe.addr = @bitCast(packed struct {
658            level: u32,
659            optname: u32,
660        }{
661            .level = level,
662            .optname = optname,
663        });
664        // splice_fd_in if overloaded u32 -> i32
665        sqe.splice_fd_in = @bitCast(optlen);
666        // addr3 is overloaded, https://github.com/axboe/liburing/blob/e1003e496e66f9b0ae06674869795edf772d5500/src/include/liburing/io_uring.h#L102
667        sqe.addr3 = optval;
668    }
669
670    pub fn set_flags(sqe: *linux.io_uring_sqe, flags: u8) void {
671        sqe.flags |= flags;
672    }
673
674    /// This SQE forms a link with the next SQE in the submission ring. Next SQE
675    /// will not be started before this one completes. Forms a chain of SQEs.
676    pub fn link_next(sqe: *linux.io_uring_sqe) void {
677        sqe.flags |= linux.IOSQE_IO_LINK;
678    }
679};