master
   1/* SPDX-License-Identifier: MIT */
   2/* Copyright (C) 2023 Collabora ltd. */
   3#ifndef _PANTHOR_DRM_H_
   4#define _PANTHOR_DRM_H_
   5
   6#include "drm.h"
   7
   8#if defined(__cplusplus)
   9extern "C" {
  10#endif
  11
  12/**
  13 * DOC: Introduction
  14 *
  15 * This documentation describes the Panthor IOCTLs.
  16 *
  17 * Just a few generic rules about the data passed to the Panthor IOCTLs:
  18 *
  19 * - Structures must be aligned on 64-bit/8-byte. If the object is not
  20 *   naturally aligned, a padding field must be added.
  21 * - Fields must be explicitly aligned to their natural type alignment with
  22 *   pad[0..N] fields.
  23 * - All padding fields will be checked by the driver to make sure they are
  24 *   zeroed.
  25 * - Flags can be added, but not removed/replaced.
  26 * - New fields can be added to the main structures (the structures
  27 *   directly passed to the ioctl). Those fields can be added at the end of
  28 *   the structure, or replace existing padding fields. Any new field being
  29 *   added must preserve the behavior that existed before those fields were
  30 *   added when a value of zero is passed.
  31 * - New fields can be added to indirect objects (objects pointed by the
  32 *   main structure), iff those objects are passed a size to reflect the
  33 *   size known by the userspace driver (see drm_panthor_obj_array::stride
  34 *   or drm_panthor_dev_query::size).
  35 * - If the kernel driver is too old to know some fields, those will be
  36 *   ignored if zero, and otherwise rejected (and so will be zero on output).
  37 * - If userspace is too old to know some fields, those will be zeroed
  38 *   (input) before the structure is parsed by the kernel driver.
  39 * - Each new flag/field addition must come with a driver version update so
  40 *   the userspace driver doesn't have to trial and error to know which
  41 *   flags are supported.
  42 * - Structures should not contain unions, as this would defeat the
  43 *   extensibility of such structures.
  44 * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
  45 *   at the end of the drm_panthor_ioctl_id enum.
  46 */
  47
  48/**
  49 * DOC: MMIO regions exposed to userspace.
  50 *
  51 * .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET
  52 *
  53 * File offset for all MMIO regions being exposed to userspace. Don't use
  54 * this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead.
  55 * pgoffset passed to mmap2() is an unsigned long, which forces us to use a
  56 * different offset on 32-bit and 64-bit systems.
  57 *
  58 * .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET
  59 *
  60 * File offset for the LATEST_FLUSH_ID register. The Userspace driver controls
  61 * GPU cache flushing through CS instructions, but the flush reduction
  62 * mechanism requires a flush_id. This flush_id could be queried with an
  63 * ioctl, but Arm provides a well-isolated register page containing only this
  64 * read-only register, so let's expose this page through a static mmap offset
  65 * and allow direct mapping of this MMIO region so we can avoid the
  66 * user <-> kernel round-trip.
  67 */
  68#define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT	(1ull << 43)
  69#define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT	(1ull << 56)
  70#define DRM_PANTHOR_USER_MMIO_OFFSET		(sizeof(unsigned long) < 8 ? \
  71						 DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \
  72						 DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
  73#define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET	(DRM_PANTHOR_USER_MMIO_OFFSET | 0)
  74
  75/**
  76 * DOC: IOCTL IDs
  77 *
  78 * enum drm_panthor_ioctl_id - IOCTL IDs
  79 *
  80 * Place new ioctls at the end, don't re-order, don't replace or remove entries.
  81 *
  82 * These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx
  83 * definitions instead.
  84 */
  85enum drm_panthor_ioctl_id {
  86	/** @DRM_PANTHOR_DEV_QUERY: Query device information. */
  87	DRM_PANTHOR_DEV_QUERY = 0,
  88
  89	/** @DRM_PANTHOR_VM_CREATE: Create a VM. */
  90	DRM_PANTHOR_VM_CREATE,
  91
  92	/** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */
  93	DRM_PANTHOR_VM_DESTROY,
  94
  95	/** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */
  96	DRM_PANTHOR_VM_BIND,
  97
  98	/** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */
  99	DRM_PANTHOR_VM_GET_STATE,
 100
 101	/** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */
 102	DRM_PANTHOR_BO_CREATE,
 103
 104	/**
 105	 * @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to
 106	 * mmap to map a GEM object.
 107	 */
 108	DRM_PANTHOR_BO_MMAP_OFFSET,
 109
 110	/** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */
 111	DRM_PANTHOR_GROUP_CREATE,
 112
 113	/** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */
 114	DRM_PANTHOR_GROUP_DESTROY,
 115
 116	/**
 117	 * @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging
 118	 * to a specific scheduling group.
 119	 */
 120	DRM_PANTHOR_GROUP_SUBMIT,
 121
 122	/** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */
 123	DRM_PANTHOR_GROUP_GET_STATE,
 124
 125	/** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */
 126	DRM_PANTHOR_TILER_HEAP_CREATE,
 127
 128	/** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */
 129	DRM_PANTHOR_TILER_HEAP_DESTROY,
 130
 131	/** @DRM_PANTHOR_BO_SET_LABEL: Label a BO. */
 132	DRM_PANTHOR_BO_SET_LABEL,
 133
 134	/**
 135	 * @DRM_PANTHOR_SET_USER_MMIO_OFFSET: Set the offset to use as the user MMIO offset.
 136	 *
 137	 * The default behavior is to pick the MMIO offset based on the size of the pgoff_t
 138	 * type seen by the process that manipulates the FD, such that a 32-bit process can
 139	 * always map the user MMIO ranges. But this approach doesn't work well for emulators
 140	 * like FEX, where the emulator is an 64-bit binary which might be executing 32-bit
 141	 * code. In that case, the kernel thinks it's the 64-bit process and assumes
 142	 * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT is in use, but the UMD library expects
 143	 * DRM_PANTHOR_USER_MMIO_OFFSET_32BIT, because it can't mmap() anything above the
 144	 * pgoff_t size.
 145	 */
 146	DRM_PANTHOR_SET_USER_MMIO_OFFSET,
 147};
 148
 149/**
 150 * DOC: IOCTL arguments
 151 */
 152
 153/**
 154 * struct drm_panthor_obj_array - Object array.
 155 *
 156 * This object is used to pass an array of objects whose size is subject to changes in
 157 * future versions of the driver. In order to support this mutability, we pass a stride
 158 * describing the size of the object as known by userspace.
 159 *
 160 * You shouldn't fill drm_panthor_obj_array fields directly. You should instead use
 161 * the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to
 162 * the object size.
 163 */
 164struct drm_panthor_obj_array {
 165	/** @stride: Stride of object struct. Used for versioning. */
 166	__u32 stride;
 167
 168	/** @count: Number of objects in the array. */
 169	__u32 count;
 170
 171	/** @array: User pointer to an array of objects. */
 172	__u64 array;
 173};
 174
 175/**
 176 * DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field.
 177 * @cnt: Number of elements in the array.
 178 * @ptr: Pointer to the array to pass to the kernel.
 179 *
 180 * Macro initializing a drm_panthor_obj_array based on the object size as known
 181 * by userspace.
 182 */
 183#define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \
 184	{ .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
 185
 186/**
 187 * enum drm_panthor_sync_op_flags - Synchronization operation flags.
 188 */
 189enum drm_panthor_sync_op_flags {
 190	/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */
 191	DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,
 192
 193	/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */
 194	DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,
 195
 196	/**
 197	 * @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization
 198	 * object type.
 199	 */
 200	DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,
 201
 202	/** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */
 203	DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,
 204
 205	/** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */
 206	DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31),
 207};
 208
 209/**
 210 * struct drm_panthor_sync_op - Synchronization operation.
 211 */
 212struct drm_panthor_sync_op {
 213	/** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */
 214	__u32 flags;
 215
 216	/** @handle: Sync handle. */
 217	__u32 handle;
 218
 219	/**
 220	 * @timeline_value: MBZ if
 221	 * (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) !=
 222	 * DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ.
 223	 */
 224	__u64 timeline_value;
 225};
 226
 227/**
 228 * enum drm_panthor_dev_query_type - Query type
 229 *
 230 * Place new types at the end, don't re-order, don't remove or replace.
 231 */
 232enum drm_panthor_dev_query_type {
 233	/** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */
 234	DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,
 235
 236	/** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */
 237	DRM_PANTHOR_DEV_QUERY_CSIF_INFO,
 238
 239	/** @DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: Query timestamp information. */
 240	DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO,
 241
 242	/**
 243	 * @DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO: Query allowed group priorities information.
 244	 */
 245	DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO,
 246};
 247
 248/**
 249 * struct drm_panthor_gpu_info - GPU information
 250 *
 251 * Structure grouping all queryable information relating to the GPU.
 252 */
 253struct drm_panthor_gpu_info {
 254	/** @gpu_id : GPU ID. */
 255	__u32 gpu_id;
 256#define DRM_PANTHOR_ARCH_MAJOR(x)		((x) >> 28)
 257#define DRM_PANTHOR_ARCH_MINOR(x)		(((x) >> 24) & 0xf)
 258#define DRM_PANTHOR_ARCH_REV(x)			(((x) >> 20) & 0xf)
 259#define DRM_PANTHOR_PRODUCT_MAJOR(x)		(((x) >> 16) & 0xf)
 260#define DRM_PANTHOR_VERSION_MAJOR(x)		(((x) >> 12) & 0xf)
 261#define DRM_PANTHOR_VERSION_MINOR(x)		(((x) >> 4) & 0xff)
 262#define DRM_PANTHOR_VERSION_STATUS(x)		((x) & 0xf)
 263
 264	/** @gpu_rev: GPU revision. */
 265	__u32 gpu_rev;
 266
 267	/** @csf_id: Command stream frontend ID. */
 268	__u32 csf_id;
 269#define DRM_PANTHOR_CSHW_MAJOR(x)		(((x) >> 26) & 0x3f)
 270#define DRM_PANTHOR_CSHW_MINOR(x)		(((x) >> 20) & 0x3f)
 271#define DRM_PANTHOR_CSHW_REV(x)			(((x) >> 16) & 0xf)
 272#define DRM_PANTHOR_MCU_MAJOR(x)		(((x) >> 10) & 0x3f)
 273#define DRM_PANTHOR_MCU_MINOR(x)		(((x) >> 4) & 0x3f)
 274#define DRM_PANTHOR_MCU_REV(x)			((x) & 0xf)
 275
 276	/** @l2_features: L2-cache features. */
 277	__u32 l2_features;
 278
 279	/** @tiler_features: Tiler features. */
 280	__u32 tiler_features;
 281
 282	/** @mem_features: Memory features. */
 283	__u32 mem_features;
 284
 285	/** @mmu_features: MMU features. */
 286	__u32 mmu_features;
 287#define DRM_PANTHOR_MMU_VA_BITS(x)		((x) & 0xff)
 288
 289	/** @thread_features: Thread features. */
 290	__u32 thread_features;
 291
 292	/** @max_threads: Maximum number of threads. */
 293	__u32 max_threads;
 294
 295	/** @thread_max_workgroup_size: Maximum workgroup size. */
 296	__u32 thread_max_workgroup_size;
 297
 298	/**
 299	 * @thread_max_barrier_size: Maximum number of threads that can wait
 300	 * simultaneously on a barrier.
 301	 */
 302	__u32 thread_max_barrier_size;
 303
 304	/** @coherency_features: Coherency features. */
 305	__u32 coherency_features;
 306
 307	/** @texture_features: Texture features. */
 308	__u32 texture_features[4];
 309
 310	/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
 311	__u32 as_present;
 312
 313	/** @pad0: MBZ. */
 314	__u32 pad0;
 315
 316	/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
 317	__u64 shader_present;
 318
 319	/** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */
 320	__u64 l2_present;
 321
 322	/** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */
 323	__u64 tiler_present;
 324
 325	/** @core_features: Used to discriminate core variants when they exist. */
 326	__u32 core_features;
 327
 328	/** @pad: MBZ. */
 329	__u32 pad;
 330};
 331
 332/**
 333 * struct drm_panthor_csif_info - Command stream interface information
 334 *
 335 * Structure grouping all queryable information relating to the command stream interface.
 336 */
 337struct drm_panthor_csif_info {
 338	/** @csg_slot_count: Number of command stream group slots exposed by the firmware. */
 339	__u32 csg_slot_count;
 340
 341	/** @cs_slot_count: Number of command stream slots per group. */
 342	__u32 cs_slot_count;
 343
 344	/** @cs_reg_count: Number of command stream registers. */
 345	__u32 cs_reg_count;
 346
 347	/** @scoreboard_slot_count: Number of scoreboard slots. */
 348	__u32 scoreboard_slot_count;
 349
 350	/**
 351	 * @unpreserved_cs_reg_count: Number of command stream registers reserved by
 352	 * the kernel driver to call a userspace command stream.
 353	 *
 354	 * All registers can be used by a userspace command stream, but the
 355	 * [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are
 356	 * used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called.
 357	 */
 358	__u32 unpreserved_cs_reg_count;
 359
 360	/**
 361	 * @pad: Padding field, set to zero.
 362	 */
 363	__u32 pad;
 364};
 365
 366/**
 367 * struct drm_panthor_timestamp_info - Timestamp information
 368 *
 369 * Structure grouping all queryable information relating to the GPU timestamp.
 370 */
 371struct drm_panthor_timestamp_info {
 372	/**
 373	 * @timestamp_frequency: The frequency of the timestamp timer or 0 if
 374	 * unknown.
 375	 */
 376	__u64 timestamp_frequency;
 377
 378	/** @current_timestamp: The current timestamp. */
 379	__u64 current_timestamp;
 380
 381	/** @timestamp_offset: The offset of the timestamp timer. */
 382	__u64 timestamp_offset;
 383};
 384
 385/**
 386 * struct drm_panthor_group_priorities_info - Group priorities information
 387 *
 388 * Structure grouping all queryable information relating to the allowed group priorities.
 389 */
 390struct drm_panthor_group_priorities_info {
 391	/**
 392	 * @allowed_mask: Bitmask of the allowed group priorities.
 393	 *
 394	 * Each bit represents a variant of the enum drm_panthor_group_priority.
 395	 */
 396	__u8 allowed_mask;
 397
 398	/** @pad: Padding fields, MBZ. */
 399	__u8 pad[3];
 400};
 401
 402/**
 403 * struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY
 404 */
 405struct drm_panthor_dev_query {
 406	/** @type: the query type (see drm_panthor_dev_query_type). */
 407	__u32 type;
 408
 409	/**
 410	 * @size: size of the type being queried.
 411	 *
 412	 * If pointer is NULL, size is updated by the driver to provide the
 413	 * output structure size. If pointer is not NULL, the driver will
 414	 * only copy min(size, actual_structure_size) bytes to the pointer,
 415	 * and update the size accordingly. This allows us to extend query
 416	 * types without breaking userspace.
 417	 */
 418	__u32 size;
 419
 420	/**
 421	 * @pointer: user pointer to a query type struct.
 422	 *
 423	 * Pointer can be NULL, in which case, nothing is copied, but the
 424	 * actual structure size is returned. If not NULL, it must point to
 425	 * a location that's large enough to hold size bytes.
 426	 */
 427	__u64 pointer;
 428};
 429
 430/**
 431 * struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE
 432 */
 433struct drm_panthor_vm_create {
 434	/** @flags: VM flags, MBZ. */
 435	__u32 flags;
 436
 437	/** @id: Returned VM ID. */
 438	__u32 id;
 439
 440	/**
 441	 * @user_va_range: Size of the VA space reserved for user objects.
 442	 *
 443	 * The kernel will pick the remaining space to map kernel-only objects to the
 444	 * VM (heap chunks, heap context, ring buffers, kernel synchronization objects,
 445	 * ...). If the space left for kernel objects is too small, kernel object
 446	 * allocation will fail further down the road. One can use
 447	 * drm_panthor_gpu_info::mmu_features to extract the total virtual address
 448	 * range, and chose a user_va_range that leaves some space to the kernel.
 449	 *
 450	 * If user_va_range is zero, the kernel will pick a sensible value based on
 451	 * TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user
 452	 * split should leave enough VA space for userspace processes to support SVM,
 453	 * while still allowing the kernel to map some amount of kernel objects in
 454	 * the kernel VA range). The value chosen by the driver will be returned in
 455	 * @user_va_range.
 456	 *
 457	 * User VA space always starts at 0x0, kernel VA space is always placed after
 458	 * the user VA range.
 459	 */
 460	__u64 user_va_range;
 461};
 462
 463/**
 464 * struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY
 465 */
 466struct drm_panthor_vm_destroy {
 467	/** @id: ID of the VM to destroy. */
 468	__u32 id;
 469
 470	/** @pad: MBZ. */
 471	__u32 pad;
 472};
 473
 474/**
 475 * enum drm_panthor_vm_bind_op_flags - VM bind operation flags
 476 */
 477enum drm_panthor_vm_bind_op_flags {
 478	/**
 479	 * @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only.
 480	 *
 481	 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
 482	 */
 483	DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,
 484
 485	/**
 486	 * @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable.
 487	 *
 488	 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
 489	 */
 490	DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,
 491
 492	/**
 493	 * @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached.
 494	 *
 495	 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
 496	 */
 497	DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,
 498
 499	/**
 500	 * @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.
 501	 */
 502	DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28),
 503
 504	/** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */
 505	DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,
 506
 507	/** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */
 508	DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,
 509
 510	/**
 511	 * @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation.
 512	 *
 513	 * Just serves as a synchronization point on a VM queue.
 514	 *
 515	 * Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags,
 516	 * and drm_panthor_vm_bind_op::syncs contains at least one element.
 517	 */
 518	DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,
 519};
 520
 521/**
 522 * struct drm_panthor_vm_bind_op - VM bind operation
 523 */
 524struct drm_panthor_vm_bind_op {
 525	/** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */
 526	__u32 flags;
 527
 528	/**
 529	 * @bo_handle: Handle of the buffer object to map.
 530	 * MBZ for unmap or sync-only operations.
 531	 */
 532	__u32 bo_handle;
 533
 534	/**
 535	 * @bo_offset: Buffer object offset.
 536	 * MBZ for unmap or sync-only operations.
 537	 */
 538	__u64 bo_offset;
 539
 540	/**
 541	 * @va: Virtual address to map/unmap.
 542	 * MBZ for sync-only operations.
 543	 */
 544	__u64 va;
 545
 546	/**
 547	 * @size: Size to map/unmap.
 548	 * MBZ for sync-only operations.
 549	 */
 550	__u64 size;
 551
 552	/**
 553	 * @syncs: Array of struct drm_panthor_sync_op synchronization
 554	 * operations.
 555	 *
 556	 * This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on
 557	 * the drm_panthor_vm_bind object containing this VM bind operation.
 558	 *
 559	 * This array shall not be empty for sync-only operations.
 560	 */
 561	struct drm_panthor_obj_array syncs;
 562
 563};
 564
 565/**
 566 * enum drm_panthor_vm_bind_flags - VM bind flags
 567 */
 568enum drm_panthor_vm_bind_flags {
 569	/**
 570	 * @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM
 571	 * queue instead of being executed synchronously.
 572	 */
 573	DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,
 574};
 575
 576/**
 577 * struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND
 578 */
 579struct drm_panthor_vm_bind {
 580	/** @vm_id: VM targeted by the bind request. */
 581	__u32 vm_id;
 582
 583	/** @flags: Combination of drm_panthor_vm_bind_flags flags. */
 584	__u32 flags;
 585
 586	/** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */
 587	struct drm_panthor_obj_array ops;
 588};
 589
 590/**
 591 * enum drm_panthor_vm_state - VM states.
 592 */
 593enum drm_panthor_vm_state {
 594	/**
 595	 * @DRM_PANTHOR_VM_STATE_USABLE: VM is usable.
 596	 *
 597	 * New VM operations will be accepted on this VM.
 598	 */
 599	DRM_PANTHOR_VM_STATE_USABLE,
 600
 601	/**
 602	 * @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable.
 603	 *
 604	 * Something put the VM in an unusable state (like an asynchronous
 605	 * VM_BIND request failing for any reason).
 606	 *
 607	 * Once the VM is in this state, all new MAP operations will be
 608	 * rejected, and any GPU job targeting this VM will fail.
 609	 * UNMAP operations are still accepted.
 610	 *
 611	 * The only way to recover from an unusable VM is to create a new
 612	 * VM, and destroy the old one.
 613	 */
 614	DRM_PANTHOR_VM_STATE_UNUSABLE,
 615};
 616
 617/**
 618 * struct drm_panthor_vm_get_state - Get VM state.
 619 */
 620struct drm_panthor_vm_get_state {
 621	/** @vm_id: VM targeted by the get_state request. */
 622	__u32 vm_id;
 623
 624	/**
 625	 * @state: state returned by the driver.
 626	 *
 627	 * Must be one of the enum drm_panthor_vm_state values.
 628	 */
 629	__u32 state;
 630};
 631
 632/**
 633 * enum drm_panthor_bo_flags - Buffer object flags, passed at creation time.
 634 */
 635enum drm_panthor_bo_flags {
 636	/** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
 637	DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
 638};
 639
 640/**
 641 * struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE.
 642 */
 643struct drm_panthor_bo_create {
 644	/**
 645	 * @size: Requested size for the object
 646	 *
 647	 * The (page-aligned) allocated size for the object will be returned.
 648	 */
 649	__u64 size;
 650
 651	/**
 652	 * @flags: Flags. Must be a combination of drm_panthor_bo_flags flags.
 653	 */
 654	__u32 flags;
 655
 656	/**
 657	 * @exclusive_vm_id: Exclusive VM this buffer object will be mapped to.
 658	 *
 659	 * If not zero, the field must refer to a valid VM ID, and implies that:
 660	 *  - the buffer object will only ever be bound to that VM
 661	 *  - cannot be exported as a PRIME fd
 662	 */
 663	__u32 exclusive_vm_id;
 664
 665	/**
 666	 * @handle: Returned handle for the object.
 667	 *
 668	 * Object handles are nonzero.
 669	 */
 670	__u32 handle;
 671
 672	/** @pad: MBZ. */
 673	__u32 pad;
 674};
 675
 676/**
 677 * struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET.
 678 */
 679struct drm_panthor_bo_mmap_offset {
 680	/** @handle: Handle of the object we want an mmap offset for. */
 681	__u32 handle;
 682
 683	/** @pad: MBZ. */
 684	__u32 pad;
 685
 686	/** @offset: The fake offset to use for subsequent mmap calls. */
 687	__u64 offset;
 688};
 689
 690/**
 691 * struct drm_panthor_queue_create - Queue creation arguments.
 692 */
 693struct drm_panthor_queue_create {
 694	/**
 695	 * @priority: Defines the priority of queues inside a group. Goes from 0 to 15,
 696	 * 15 being the highest priority.
 697	 */
 698	__u8 priority;
 699
 700	/** @pad: Padding fields, MBZ. */
 701	__u8 pad[3];
 702
 703	/** @ringbuf_size: Size of the ring buffer to allocate to this queue. */
 704	__u32 ringbuf_size;
 705};
 706
 707/**
 708 * enum drm_panthor_group_priority - Scheduling group priority
 709 */
 710enum drm_panthor_group_priority {
 711	/** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */
 712	PANTHOR_GROUP_PRIORITY_LOW = 0,
 713
 714	/** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */
 715	PANTHOR_GROUP_PRIORITY_MEDIUM,
 716
 717	/**
 718	 * @PANTHOR_GROUP_PRIORITY_HIGH: High priority group.
 719	 *
 720	 * Requires CAP_SYS_NICE or DRM_MASTER.
 721	 */
 722	PANTHOR_GROUP_PRIORITY_HIGH,
 723
 724	/**
 725	 * @PANTHOR_GROUP_PRIORITY_REALTIME: Realtime priority group.
 726	 *
 727	 * Requires CAP_SYS_NICE or DRM_MASTER.
 728	 */
 729	PANTHOR_GROUP_PRIORITY_REALTIME,
 730};
 731
 732/**
 733 * struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE
 734 */
 735struct drm_panthor_group_create {
 736	/** @queues: Array of drm_panthor_queue_create elements. */
 737	struct drm_panthor_obj_array queues;
 738
 739	/**
 740	 * @max_compute_cores: Maximum number of cores that can be used by compute
 741	 * jobs across CS queues bound to this group.
 742	 *
 743	 * Must be less or equal to the number of bits set in @compute_core_mask.
 744	 */
 745	__u8 max_compute_cores;
 746
 747	/**
 748	 * @max_fragment_cores: Maximum number of cores that can be used by fragment
 749	 * jobs across CS queues bound to this group.
 750	 *
 751	 * Must be less or equal to the number of bits set in @fragment_core_mask.
 752	 */
 753	__u8 max_fragment_cores;
 754
 755	/**
 756	 * @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs
 757	 * across CS queues bound to this group.
 758	 *
 759	 * Must be less or equal to the number of bits set in @tiler_core_mask.
 760	 */
 761	__u8 max_tiler_cores;
 762
 763	/** @priority: Group priority (see enum drm_panthor_group_priority). */
 764	__u8 priority;
 765
 766	/** @pad: Padding field, MBZ. */
 767	__u32 pad;
 768
 769	/**
 770	 * @compute_core_mask: Mask encoding cores that can be used for compute jobs.
 771	 *
 772	 * This field must have at least @max_compute_cores bits set.
 773	 *
 774	 * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
 775	 */
 776	__u64 compute_core_mask;
 777
 778	/**
 779	 * @fragment_core_mask: Mask encoding cores that can be used for fragment jobs.
 780	 *
 781	 * This field must have at least @max_fragment_cores bits set.
 782	 *
 783	 * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
 784	 */
 785	__u64 fragment_core_mask;
 786
 787	/**
 788	 * @tiler_core_mask: Mask encoding cores that can be used for tiler jobs.
 789	 *
 790	 * This field must have at least @max_tiler_cores bits set.
 791	 *
 792	 * The bits set here should also be set in drm_panthor_gpu_info::tiler_present.
 793	 */
 794	__u64 tiler_core_mask;
 795
 796	/**
 797	 * @vm_id: VM ID to bind this group to.
 798	 *
 799	 * All submission to queues bound to this group will use this VM.
 800	 */
 801	__u32 vm_id;
 802
 803	/**
 804	 * @group_handle: Returned group handle. Passed back when submitting jobs or
 805	 * destroying a group.
 806	 */
 807	__u32 group_handle;
 808};
 809
 810/**
 811 * struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY
 812 */
 813struct drm_panthor_group_destroy {
 814	/** @group_handle: Group to destroy */
 815	__u32 group_handle;
 816
 817	/** @pad: Padding field, MBZ. */
 818	__u32 pad;
 819};
 820
 821/**
 822 * struct drm_panthor_queue_submit - Job submission arguments.
 823 *
 824 * This is describing the userspace command stream to call from the kernel
 825 * command stream ring-buffer. Queue submission is always part of a group
 826 * submission, taking one or more jobs to submit to the underlying queues.
 827 */
 828struct drm_panthor_queue_submit {
 829	/** @queue_index: Index of the queue inside a group. */
 830	__u32 queue_index;
 831
 832	/**
 833	 * @stream_size: Size of the command stream to execute.
 834	 *
 835	 * Must be 64-bit/8-byte aligned (the size of a CS instruction)
 836	 *
 837	 * Can be zero if stream_addr is zero too.
 838	 *
 839	 * When the stream size is zero, the queue submit serves as a
 840	 * synchronization point.
 841	 */
 842	__u32 stream_size;
 843
 844	/**
 845	 * @stream_addr: GPU address of the command stream to execute.
 846	 *
 847	 * Must be aligned on 64-byte.
 848	 *
 849	 * Can be zero is stream_size is zero too.
 850	 */
 851	__u64 stream_addr;
 852
 853	/**
 854	 * @latest_flush: FLUSH_ID read at the time the stream was built.
 855	 *
 856	 * This allows cache flush elimination for the automatic
 857	 * flush+invalidate(all) done at submission time, which is needed to
 858	 * ensure the GPU doesn't get garbage when reading the indirect command
 859	 * stream buffers. If you want the cache flush to happen
 860	 * unconditionally, pass a zero here.
 861	 *
 862	 * Ignored when stream_size is zero.
 863	 */
 864	__u32 latest_flush;
 865
 866	/** @pad: MBZ. */
 867	__u32 pad;
 868
 869	/** @syncs: Array of struct drm_panthor_sync_op sync operations. */
 870	struct drm_panthor_obj_array syncs;
 871};
 872
 873/**
 874 * struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT
 875 */
 876struct drm_panthor_group_submit {
 877	/** @group_handle: Handle of the group to queue jobs to. */
 878	__u32 group_handle;
 879
 880	/** @pad: MBZ. */
 881	__u32 pad;
 882
 883	/** @queue_submits: Array of drm_panthor_queue_submit objects. */
 884	struct drm_panthor_obj_array queue_submits;
 885};
 886
 887/**
 888 * enum drm_panthor_group_state_flags - Group state flags
 889 */
 890enum drm_panthor_group_state_flags {
 891	/**
 892	 * @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs.
 893	 *
 894	 * When a group ends up with this flag set, no jobs can be submitted to its queues.
 895	 */
 896	DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,
 897
 898	/**
 899	 * @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults.
 900	 *
 901	 * When a group ends up with this flag set, no jobs can be submitted to its queues.
 902	 */
 903	DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,
 904
 905	/**
 906	 * @DRM_PANTHOR_GROUP_STATE_INNOCENT: Group was killed during a reset caused by other
 907	 * groups.
 908	 *
 909	 * This flag can only be set if DRM_PANTHOR_GROUP_STATE_TIMEDOUT is set and
 910	 * DRM_PANTHOR_GROUP_STATE_FATAL_FAULT is not.
 911	 */
 912	DRM_PANTHOR_GROUP_STATE_INNOCENT = 1 << 2,
 913};
 914
 915/**
 916 * struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE
 917 *
 918 * Used to query the state of a group and decide whether a new group should be created to
 919 * replace it.
 920 */
 921struct drm_panthor_group_get_state {
 922	/** @group_handle: Handle of the group to query state on */
 923	__u32 group_handle;
 924
 925	/**
 926	 * @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the
 927	 * group state.
 928	 */
 929	__u32 state;
 930
 931	/** @fatal_queues: Bitmask of queues that faced fatal faults. */
 932	__u32 fatal_queues;
 933
 934	/** @pad: MBZ */
 935	__u32 pad;
 936};
 937
 938/**
 939 * struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE
 940 */
 941struct drm_panthor_tiler_heap_create {
 942	/** @vm_id: VM ID the tiler heap should be mapped to */
 943	__u32 vm_id;
 944
 945	/** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */
 946	__u32 initial_chunk_count;
 947
 948	/**
 949	 * @chunk_size: Chunk size.
 950	 *
 951	 * Must be page-aligned and lie in the [128k:8M] range.
 952	 */
 953	__u32 chunk_size;
 954
 955	/**
 956	 * @max_chunks: Maximum number of chunks that can be allocated.
 957	 *
 958	 * Must be at least @initial_chunk_count.
 959	 */
 960	__u32 max_chunks;
 961
 962	/**
 963	 * @target_in_flight: Maximum number of in-flight render passes.
 964	 *
 965	 * If the heap has more than tiler jobs in-flight, the FW will wait for render
 966	 * passes to finish before queuing new tiler jobs.
 967	 */
 968	__u32 target_in_flight;
 969
 970	/** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */
 971	__u32 handle;
 972
 973	/** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */
 974	__u64 tiler_heap_ctx_gpu_va;
 975
 976	/**
 977	 * @first_heap_chunk_gpu_va: First heap chunk.
 978	 *
 979	 * The tiler heap is formed of heap chunks forming a single-link list. This
 980	 * is the first element in the list.
 981	 */
 982	__u64 first_heap_chunk_gpu_va;
 983};
 984
 985/**
 986 * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY
 987 */
 988struct drm_panthor_tiler_heap_destroy {
 989	/**
 990	 * @handle: Handle of the tiler heap to destroy.
 991	 *
 992	 * Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.
 993	 */
 994	__u32 handle;
 995
 996	/** @pad: Padding field, MBZ. */
 997	__u32 pad;
 998};
 999
1000/**
1001 * struct drm_panthor_bo_set_label - Arguments passed to DRM_IOCTL_PANTHOR_BO_SET_LABEL
1002 */
1003struct drm_panthor_bo_set_label {
1004	/** @handle: Handle of the buffer object to label. */
1005	__u32 handle;
1006
1007	/**  @pad: MBZ. */
1008	__u32 pad;
1009
1010	/**
1011	 * @label: User pointer to a NUL-terminated string
1012	 *
1013	 * Length cannot be greater than 4096
1014	 */
1015	__u64 label;
1016};
1017
1018/**
1019 * struct drm_panthor_set_user_mmio_offset - Arguments passed to
1020 * DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET
1021 *
1022 * This ioctl is only really useful if you want to support userspace
1023 * CPU emulation environments where the size of an unsigned long differs
1024 * between the host and the guest architectures.
1025 */
1026struct drm_panthor_set_user_mmio_offset {
1027	/**
1028	 * @offset: User MMIO offset to use.
1029	 *
1030	 * Must be either DRM_PANTHOR_USER_MMIO_OFFSET_32BIT or
1031	 * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT.
1032	 *
1033	 * Use DRM_PANTHOR_USER_MMIO_OFFSET (which selects OFFSET_32BIT or
1034	 * OFFSET_64BIT based on the size of an unsigned long) unless you
1035	 * have a very good reason to overrule this decision.
1036	 */
1037	__u64 offset;
1038};
1039
1040/**
1041 * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
1042 * @__access: Access type. Must be R, W or RW.
1043 * @__id: One of the DRM_PANTHOR_xxx id.
1044 * @__type: Suffix of the type being passed to the IOCTL.
1045 *
1046 * Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx
1047 * values instead.
1048 *
1049 * Return: An IOCTL number to be passed to ioctl() from userspace.
1050 */
1051#define DRM_IOCTL_PANTHOR(__access, __id, __type) \
1052	DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \
1053			   struct drm_panthor_ ## __type)
1054
1055enum {
1056	DRM_IOCTL_PANTHOR_DEV_QUERY =
1057		DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query),
1058	DRM_IOCTL_PANTHOR_VM_CREATE =
1059		DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create),
1060	DRM_IOCTL_PANTHOR_VM_DESTROY =
1061		DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy),
1062	DRM_IOCTL_PANTHOR_VM_BIND =
1063		DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind),
1064	DRM_IOCTL_PANTHOR_VM_GET_STATE =
1065		DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state),
1066	DRM_IOCTL_PANTHOR_BO_CREATE =
1067		DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create),
1068	DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET =
1069		DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset),
1070	DRM_IOCTL_PANTHOR_GROUP_CREATE =
1071		DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create),
1072	DRM_IOCTL_PANTHOR_GROUP_DESTROY =
1073		DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy),
1074	DRM_IOCTL_PANTHOR_GROUP_SUBMIT =
1075		DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit),
1076	DRM_IOCTL_PANTHOR_GROUP_GET_STATE =
1077		DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state),
1078	DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE =
1079		DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create),
1080	DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY =
1081		DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy),
1082	DRM_IOCTL_PANTHOR_BO_SET_LABEL =
1083		DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),
1084	DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET =
1085		DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, set_user_mmio_offset),
1086};
1087
1088#if defined(__cplusplus)
1089}
1090#endif
1091
1092#endif /* _PANTHOR_DRM_H_ */