master
   1/* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*-
   2 *
   3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
   4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
   5 * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
   6 * Copyright 2014 Advanced Micro Devices, Inc.
   7 *
   8 * Permission is hereby granted, free of charge, to any person obtaining a
   9 * copy of this software and associated documentation files (the "Software"),
  10 * to deal in the Software without restriction, including without limitation
  11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12 * and/or sell copies of the Software, and to permit persons to whom the
  13 * Software is furnished to do so, subject to the following conditions:
  14 *
  15 * The above copyright notice and this permission notice shall be included in
  16 * all copies or substantial portions of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  24 * OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 * Authors:
  27 *    Kevin E. Martin <martin@valinux.com>
  28 *    Gareth Hughes <gareth@valinux.com>
  29 *    Keith Whitwell <keith@tungstengraphics.com>
  30 */
  31
  32#ifndef __AMDGPU_DRM_H__
  33#define __AMDGPU_DRM_H__
  34
  35#include "drm.h"
  36
  37#if defined(__cplusplus)
  38extern "C" {
  39#endif
  40
  41#define DRM_AMDGPU_GEM_CREATE		0x00
  42#define DRM_AMDGPU_GEM_MMAP		0x01
  43#define DRM_AMDGPU_CTX			0x02
  44#define DRM_AMDGPU_BO_LIST		0x03
  45#define DRM_AMDGPU_CS			0x04
  46#define DRM_AMDGPU_INFO			0x05
  47#define DRM_AMDGPU_GEM_METADATA		0x06
  48#define DRM_AMDGPU_GEM_WAIT_IDLE	0x07
  49#define DRM_AMDGPU_GEM_VA		0x08
  50#define DRM_AMDGPU_WAIT_CS		0x09
  51#define DRM_AMDGPU_GEM_OP		0x10
  52#define DRM_AMDGPU_GEM_USERPTR		0x11
  53#define DRM_AMDGPU_WAIT_FENCES		0x12
  54#define DRM_AMDGPU_VM			0x13
  55#define DRM_AMDGPU_FENCE_TO_HANDLE	0x14
  56#define DRM_AMDGPU_SCHED		0x15
  57#define DRM_AMDGPU_USERQ		0x16
  58#define DRM_AMDGPU_USERQ_SIGNAL		0x17
  59#define DRM_AMDGPU_USERQ_WAIT		0x18
  60
  61#define DRM_IOCTL_AMDGPU_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
  62#define DRM_IOCTL_AMDGPU_GEM_MMAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
  63#define DRM_IOCTL_AMDGPU_CTX		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx)
  64#define DRM_IOCTL_AMDGPU_BO_LIST	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list)
  65#define DRM_IOCTL_AMDGPU_CS		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs)
  66#define DRM_IOCTL_AMDGPU_INFO		DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info)
  67#define DRM_IOCTL_AMDGPU_GEM_METADATA	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata)
  68#define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle)
  69#define DRM_IOCTL_AMDGPU_GEM_VA		DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va)
  70#define DRM_IOCTL_AMDGPU_WAIT_CS	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
  71#define DRM_IOCTL_AMDGPU_GEM_OP		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
  72#define DRM_IOCTL_AMDGPU_GEM_USERPTR	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
  73#define DRM_IOCTL_AMDGPU_WAIT_FENCES	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
  74#define DRM_IOCTL_AMDGPU_VM		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
  75#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
  76#define DRM_IOCTL_AMDGPU_SCHED		DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
  77#define DRM_IOCTL_AMDGPU_USERQ		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
  78#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
  79#define DRM_IOCTL_AMDGPU_USERQ_WAIT	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
  80
  81/**
  82 * DOC: memory domains
  83 *
  84 * %AMDGPU_GEM_DOMAIN_CPU	System memory that is not GPU accessible.
  85 * Memory in this pool could be swapped out to disk if there is pressure.
  86 *
  87 * %AMDGPU_GEM_DOMAIN_GTT	GPU accessible system memory, mapped into the
  88 * GPU's virtual address space via gart. Gart memory linearizes non-contiguous
  89 * pages of system memory, allows GPU access system memory in a linearized
  90 * fashion.
  91 *
  92 * %AMDGPU_GEM_DOMAIN_VRAM	Local video memory. For APUs, it is memory
  93 * carved out by the BIOS.
  94 *
  95 * %AMDGPU_GEM_DOMAIN_GDS	Global on-chip data storage used to share data
  96 * across shader threads.
  97 *
  98 * %AMDGPU_GEM_DOMAIN_GWS	Global wave sync, used to synchronize the
  99 * execution of all the waves on a device.
 100 *
 101 * %AMDGPU_GEM_DOMAIN_OA	Ordered append, used by 3D or Compute engines
 102 * for appending data.
 103 *
 104 * %AMDGPU_GEM_DOMAIN_DOORBELL	Doorbell. It is an MMIO region for
 105 * signalling user mode queues.
 106 */
 107#define AMDGPU_GEM_DOMAIN_CPU		0x1
 108#define AMDGPU_GEM_DOMAIN_GTT		0x2
 109#define AMDGPU_GEM_DOMAIN_VRAM		0x4
 110#define AMDGPU_GEM_DOMAIN_GDS		0x8
 111#define AMDGPU_GEM_DOMAIN_GWS		0x10
 112#define AMDGPU_GEM_DOMAIN_OA		0x20
 113#define AMDGPU_GEM_DOMAIN_DOORBELL	0x40
 114#define AMDGPU_GEM_DOMAIN_MASK		(AMDGPU_GEM_DOMAIN_CPU | \
 115					 AMDGPU_GEM_DOMAIN_GTT | \
 116					 AMDGPU_GEM_DOMAIN_VRAM | \
 117					 AMDGPU_GEM_DOMAIN_GDS | \
 118					 AMDGPU_GEM_DOMAIN_GWS | \
 119					 AMDGPU_GEM_DOMAIN_OA | \
 120					 AMDGPU_GEM_DOMAIN_DOORBELL)
 121
 122/* Flag that CPU access will be required for the case of VRAM domain */
 123#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED	(1 << 0)
 124/* Flag that CPU access will not work, this VRAM domain is invisible */
 125#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS		(1 << 1)
 126/* Flag that USWC attributes should be used for GTT */
 127#define AMDGPU_GEM_CREATE_CPU_GTT_USWC		(1 << 2)
 128/* Flag that the memory should be in VRAM and cleared */
 129#define AMDGPU_GEM_CREATE_VRAM_CLEARED		(1 << 3)
 130/* Flag that allocating the BO should use linear VRAM */
 131#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS	(1 << 5)
 132/* Flag that BO is always valid in this VM */
 133#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID	(1 << 6)
 134/* Flag that BO sharing will be explicitly synchronized */
 135#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC		(1 << 7)
 136/* Flag that indicates allocating MQD gart on GFX9, where the mtype
 137 * for the second page onward should be set to NC. It should never
 138 * be used by user space applications.
 139 */
 140#define AMDGPU_GEM_CREATE_CP_MQD_GFX9		(1 << 8)
 141/* Flag that BO may contain sensitive data that must be wiped before
 142 * releasing the memory
 143 */
 144#define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE	(1 << 9)
 145/* Flag that BO will be encrypted and that the TMZ bit should be
 146 * set in the PTEs when mapping this buffer via GPUVM or
 147 * accessing it with various hw blocks
 148 */
 149#define AMDGPU_GEM_CREATE_ENCRYPTED		(1 << 10)
 150/* Flag that BO will be used only in preemptible context, which does
 151 * not require GTT memory accounting
 152 */
 153#define AMDGPU_GEM_CREATE_PREEMPTIBLE		(1 << 11)
 154/* Flag that BO can be discarded under memory pressure without keeping the
 155 * content.
 156 */
 157#define AMDGPU_GEM_CREATE_DISCARDABLE		(1 << 12)
 158/* Flag that BO is shared coherently between multiple devices or CPU threads.
 159 * May depend on GPU instructions to flush caches to system scope explicitly.
 160 *
 161 * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
 162 * may override the MTYPE selected in AMDGPU_VA_OP_MAP.
 163 */
 164#define AMDGPU_GEM_CREATE_COHERENT		(1 << 13)
 165/* Flag that BO should not be cached by GPU. Coherent without having to flush
 166 * GPU caches explicitly
 167 *
 168 * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
 169 * may override the MTYPE selected in AMDGPU_VA_OP_MAP.
 170 */
 171#define AMDGPU_GEM_CREATE_UNCACHED		(1 << 14)
 172/* Flag that BO should be coherent across devices when using device-level
 173 * atomics. May depend on GPU instructions to flush caches to device scope
 174 * explicitly, promoting them to system scope automatically.
 175 *
 176 * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
 177 * may override the MTYPE selected in AMDGPU_VA_OP_MAP.
 178 */
 179#define AMDGPU_GEM_CREATE_EXT_COHERENT		(1 << 15)
 180/* Set PTE.D and recompress during GTT->VRAM moves according to TILING flags. */
 181#define AMDGPU_GEM_CREATE_GFX12_DCC		(1 << 16)
 182
 183struct drm_amdgpu_gem_create_in  {
 184	/** the requested memory size */
 185	__u64 bo_size;
 186	/** physical start_addr alignment in bytes for some HW requirements */
 187	__u64 alignment;
 188	/** the requested memory domains */
 189	__u64 domains;
 190	/** allocation flags */
 191	__u64 domain_flags;
 192};
 193
 194struct drm_amdgpu_gem_create_out  {
 195	/** returned GEM object handle */
 196	__u32 handle;
 197	__u32 _pad;
 198};
 199
 200union drm_amdgpu_gem_create {
 201	struct drm_amdgpu_gem_create_in		in;
 202	struct drm_amdgpu_gem_create_out	out;
 203};
 204
 205/** Opcode to create new residency list.  */
 206#define AMDGPU_BO_LIST_OP_CREATE	0
 207/** Opcode to destroy previously created residency list */
 208#define AMDGPU_BO_LIST_OP_DESTROY	1
 209/** Opcode to update resource information in the list */
 210#define AMDGPU_BO_LIST_OP_UPDATE	2
 211
 212struct drm_amdgpu_bo_list_in {
 213	/** Type of operation */
 214	__u32 operation;
 215	/** Handle of list or 0 if we want to create one */
 216	__u32 list_handle;
 217	/** Number of BOs in list  */
 218	__u32 bo_number;
 219	/** Size of each element describing BO */
 220	__u32 bo_info_size;
 221	/** Pointer to array describing BOs */
 222	__u64 bo_info_ptr;
 223};
 224
 225struct drm_amdgpu_bo_list_entry {
 226	/** Handle of BO */
 227	__u32 bo_handle;
 228	/** New (if specified) BO priority to be used during migration */
 229	__u32 bo_priority;
 230};
 231
 232struct drm_amdgpu_bo_list_out {
 233	/** Handle of resource list  */
 234	__u32 list_handle;
 235	__u32 _pad;
 236};
 237
 238union drm_amdgpu_bo_list {
 239	struct drm_amdgpu_bo_list_in in;
 240	struct drm_amdgpu_bo_list_out out;
 241};
 242
 243/* context related */
 244#define AMDGPU_CTX_OP_ALLOC_CTX	1
 245#define AMDGPU_CTX_OP_FREE_CTX	2
 246#define AMDGPU_CTX_OP_QUERY_STATE	3
 247#define AMDGPU_CTX_OP_QUERY_STATE2	4
 248#define AMDGPU_CTX_OP_GET_STABLE_PSTATE	5
 249#define AMDGPU_CTX_OP_SET_STABLE_PSTATE	6
 250
 251/* GPU reset status */
 252#define AMDGPU_CTX_NO_RESET		0
 253/* this the context caused it */
 254#define AMDGPU_CTX_GUILTY_RESET		1
 255/* some other context caused it */
 256#define AMDGPU_CTX_INNOCENT_RESET	2
 257/* unknown cause */
 258#define AMDGPU_CTX_UNKNOWN_RESET	3
 259
 260/* indicate gpu reset occurred after ctx created */
 261#define AMDGPU_CTX_QUERY2_FLAGS_RESET    (1<<0)
 262/* indicate vram lost occurred after ctx created */
 263#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
 264/* indicate some job from this context once cause gpu hang */
 265#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY   (1<<2)
 266/* indicate some errors are detected by RAS */
 267#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE   (1<<3)
 268#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE   (1<<4)
 269/* indicate that the reset hasn't completed yet */
 270#define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5)
 271
 272/* Context priority level */
 273#define AMDGPU_CTX_PRIORITY_UNSET       -2048
 274#define AMDGPU_CTX_PRIORITY_VERY_LOW    -1023
 275#define AMDGPU_CTX_PRIORITY_LOW         -512
 276#define AMDGPU_CTX_PRIORITY_NORMAL      0
 277/*
 278 * When used in struct drm_amdgpu_ctx_in, a priority above NORMAL requires
 279 * CAP_SYS_NICE or DRM_MASTER
 280*/
 281#define AMDGPU_CTX_PRIORITY_HIGH        512
 282#define AMDGPU_CTX_PRIORITY_VERY_HIGH   1023
 283
 284/* select a stable profiling pstate for perfmon tools */
 285#define AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK  0xf
 286#define AMDGPU_CTX_STABLE_PSTATE_NONE  0
 287#define AMDGPU_CTX_STABLE_PSTATE_STANDARD  1
 288#define AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK  2
 289#define AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK  3
 290#define AMDGPU_CTX_STABLE_PSTATE_PEAK  4
 291
 292struct drm_amdgpu_ctx_in {
 293	/** AMDGPU_CTX_OP_* */
 294	__u32	op;
 295	/** Flags */
 296	__u32	flags;
 297	__u32	ctx_id;
 298	/** AMDGPU_CTX_PRIORITY_* */
 299	__s32	priority;
 300};
 301
 302union drm_amdgpu_ctx_out {
 303		struct {
 304			__u32	ctx_id;
 305			__u32	_pad;
 306		} alloc;
 307
 308		struct {
 309			/** For future use, no flags defined so far */
 310			__u64	flags;
 311			/** Number of resets caused by this context so far. */
 312			__u32	hangs;
 313			/** Reset status since the last call of the ioctl. */
 314			__u32	reset_status;
 315		} state;
 316
 317		struct {
 318			__u32	flags;
 319			__u32	_pad;
 320		} pstate;
 321};
 322
 323union drm_amdgpu_ctx {
 324	struct drm_amdgpu_ctx_in in;
 325	union drm_amdgpu_ctx_out out;
 326};
 327
 328/* user queue IOCTL operations */
 329#define AMDGPU_USERQ_OP_CREATE	1
 330#define AMDGPU_USERQ_OP_FREE	2
 331
 332/* queue priority levels */
 333/* low < normal low < normal high < high */
 334#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK  0x3
 335#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT 0
 336#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW 0
 337#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW 1
 338#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH 2
 339#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH 3 /* admin only */
 340/* for queues that need access to protected content */
 341#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE  (1 << 2)
 342
 343/*
 344 * This structure is a container to pass input configuration
 345 * info for all supported userqueue related operations.
 346 * For operation AMDGPU_USERQ_OP_CREATE: user is expected
 347 *  to set all fields, excep the parameter 'queue_id'.
 348 * For operation AMDGPU_USERQ_OP_FREE: the only input parameter expected
 349 *  to be set is 'queue_id', eveything else is ignored.
 350 */
 351struct drm_amdgpu_userq_in {
 352	/** AMDGPU_USERQ_OP_* */
 353	__u32	op;
 354	/** Queue id passed for operation USERQ_OP_FREE */
 355	__u32	queue_id;
 356	/** the target GPU engine to execute workload (AMDGPU_HW_IP_*) */
 357	__u32   ip_type;
 358	/**
 359	 * @doorbell_handle: the handle of doorbell GEM object
 360	 * associated with this userqueue client.
 361	 */
 362	__u32   doorbell_handle;
 363	/**
 364	 * @doorbell_offset: 32-bit offset of the doorbell in the doorbell bo.
 365	 * Kernel will generate absolute doorbell offset using doorbell_handle
 366	 * and doorbell_offset in the doorbell bo.
 367	 */
 368	__u32   doorbell_offset;
 369	/**
 370	 * @flags: flags used for queue parameters
 371	 */
 372	__u32 flags;
 373	/**
 374	 * @queue_va: Virtual address of the GPU memory which holds the queue
 375	 * object. The queue holds the workload packets.
 376	 */
 377	__u64   queue_va;
 378	/**
 379	 * @queue_size: Size of the queue in bytes, this needs to be 256-byte
 380	 * aligned.
 381	 */
 382	__u64   queue_size;
 383	/**
 384	 * @rptr_va : Virtual address of the GPU memory which holds the ring RPTR.
 385	 * This object must be at least 8 byte in size and aligned to 8-byte offset.
 386	 */
 387	__u64   rptr_va;
 388	/**
 389	 * @wptr_va : Virtual address of the GPU memory which holds the ring WPTR.
 390	 * This object must be at least 8 byte in size and aligned to 8-byte offset.
 391	 *
 392	 * Queue, RPTR and WPTR can come from the same object, as long as the size
 393	 * and alignment related requirements are met.
 394	 */
 395	__u64   wptr_va;
 396	/**
 397	 * @mqd: MQD (memory queue descriptor) is a set of parameters which allow
 398	 * the GPU to uniquely define and identify a usermode queue.
 399	 *
 400	 * MQD data can be of different size for different GPU IP/engine and
 401	 * their respective versions/revisions, so this points to a __u64 *
 402	 * which holds IP specific MQD of this usermode queue.
 403	 */
 404	__u64 mqd;
 405	/**
 406	 * @size: size of MQD data in bytes, it must match the MQD structure
 407	 * size of the respective engine/revision defined in UAPI for ex, for
 408	 * gfx11 workloads, size = sizeof(drm_amdgpu_userq_mqd_gfx11).
 409	 */
 410	__u64 mqd_size;
 411};
 412
 413/* The structure to carry output of userqueue ops */
 414struct drm_amdgpu_userq_out {
 415	/**
 416	 * For operation AMDGPU_USERQ_OP_CREATE: This field contains a unique
 417	 * queue ID to represent the newly created userqueue in the system, otherwise
 418	 * it should be ignored.
 419	 */
 420	__u32	queue_id;
 421	__u32 _pad;
 422};
 423
 424union drm_amdgpu_userq {
 425	struct drm_amdgpu_userq_in in;
 426	struct drm_amdgpu_userq_out out;
 427};
 428
 429/* GFX V11 IP specific MQD parameters */
 430struct drm_amdgpu_userq_mqd_gfx11 {
 431	/**
 432	 * @shadow_va: Virtual address of the GPU memory to hold the shadow buffer.
 433	 * Use AMDGPU_INFO_IOCTL to find the exact size of the object.
 434	 */
 435	__u64   shadow_va;
 436	/**
 437	 * @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
 438	 * Use AMDGPU_INFO_IOCTL to find the exact size of the object.
 439	 */
 440	__u64   csa_va;
 441};
 442
 443/* GFX V11 SDMA IP specific MQD parameters */
 444struct drm_amdgpu_userq_mqd_sdma_gfx11 {
 445	/**
 446	 * @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
 447	 * This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
 448	 * to get the size.
 449	 */
 450	__u64   csa_va;
 451};
 452
 453/* GFX V11 Compute IP specific MQD parameters */
 454struct drm_amdgpu_userq_mqd_compute_gfx11 {
 455	/**
 456	 * @eop_va: Virtual address of the GPU memory to hold the EOP buffer.
 457	 * This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
 458	 * to get the size.
 459	 */
 460	__u64   eop_va;
 461};
 462
 463/* userq signal/wait ioctl */
 464struct drm_amdgpu_userq_signal {
 465	/**
 466	 * @queue_id: Queue handle used by the userq fence creation function
 467	 * to retrieve the WPTR.
 468	 */
 469	__u32	queue_id;
 470	__u32	pad;
 471	/**
 472	 * @syncobj_handles: The list of syncobj handles submitted by the user queue
 473	 * job to be signaled.
 474	 */
 475	__u64	syncobj_handles;
 476	/**
 477	 * @num_syncobj_handles: A count that represents the number of syncobj handles in
 478	 * @syncobj_handles.
 479	 */
 480	__u64	num_syncobj_handles;
 481	/**
 482	 * @bo_read_handles: The list of BO handles that the submitted user queue job
 483	 * is using for read only. This will update BO fences in the kernel.
 484	 */
 485	__u64	bo_read_handles;
 486	/**
 487	 * @bo_write_handles: The list of BO handles that the submitted user queue job
 488	 * is using for write only. This will update BO fences in the kernel.
 489	 */
 490	__u64	bo_write_handles;
 491	/**
 492	 * @num_bo_read_handles: A count that represents the number of read BO handles in
 493	 * @bo_read_handles.
 494	 */
 495	__u32	num_bo_read_handles;
 496	/**
 497	 * @num_bo_write_handles: A count that represents the number of write BO handles in
 498	 * @bo_write_handles.
 499	 */
 500	__u32	num_bo_write_handles;
 501};
 502
 503struct drm_amdgpu_userq_fence_info {
 504	/**
 505	 * @va: A gpu address allocated for each queue which stores the
 506	 * read pointer (RPTR) value.
 507	 */
 508	__u64	va;
 509	/**
 510	 * @value: A 64 bit value represents the write pointer (WPTR) of the
 511	 * queue commands which compared with the RPTR value to signal the
 512	 * fences.
 513	 */
 514	__u64	value;
 515};
 516
 517struct drm_amdgpu_userq_wait {
 518	/**
 519	 * @waitq_id: Queue handle used by the userq wait IOCTL to retrieve the
 520	 * wait queue and maintain the fence driver references in it.
 521	 */
 522	__u32	waitq_id;
 523	__u32	pad;
 524	/**
 525	 * @syncobj_handles: The list of syncobj handles submitted by the user queue
 526	 * job to get the va/value pairs.
 527	 */
 528	__u64	syncobj_handles;
 529	/**
 530	 * @syncobj_timeline_handles: The list of timeline syncobj handles submitted by
 531	 * the user queue job to get the va/value pairs at given @syncobj_timeline_points.
 532	 */
 533	__u64	syncobj_timeline_handles;
 534	/**
 535	 * @syncobj_timeline_points: The list of timeline syncobj points submitted by the
 536	 * user queue job for the corresponding @syncobj_timeline_handles.
 537	 */
 538	__u64	syncobj_timeline_points;
 539	/**
 540	 * @bo_read_handles: The list of read BO handles submitted by the user queue
 541	 * job to get the va/value pairs.
 542	 */
 543	__u64	bo_read_handles;
 544	/**
 545	 * @bo_write_handles: The list of write BO handles submitted by the user queue
 546	 * job to get the va/value pairs.
 547	 */
 548	__u64	bo_write_handles;
 549	/**
 550	 * @num_syncobj_timeline_handles: A count that represents the number of timeline
 551	 * syncobj handles in @syncobj_timeline_handles.
 552	 */
 553	__u16	num_syncobj_timeline_handles;
 554	/**
 555	 * @num_fences: This field can be used both as input and output. As input it defines
 556	 * the maximum number of fences that can be returned and as output it will specify
 557	 * how many fences were actually returned from the ioctl.
 558	 */
 559	__u16	num_fences;
 560	/**
 561	 * @num_syncobj_handles: A count that represents the number of syncobj handles in
 562	 * @syncobj_handles.
 563	 */
 564	__u32	num_syncobj_handles;
 565	/**
 566	 * @num_bo_read_handles: A count that represents the number of read BO handles in
 567	 * @bo_read_handles.
 568	 */
 569	__u32	num_bo_read_handles;
 570	/**
 571	 * @num_bo_write_handles: A count that represents the number of write BO handles in
 572	 * @bo_write_handles.
 573	 */
 574	__u32	num_bo_write_handles;
 575	/**
 576	 * @out_fences: The field is a return value from the ioctl containing the list of
 577	 * address/value pairs to wait for.
 578	 */
 579	__u64	out_fences;
 580};
 581
 582/* vm ioctl */
 583#define AMDGPU_VM_OP_RESERVE_VMID	1
 584#define AMDGPU_VM_OP_UNRESERVE_VMID	2
 585
 586struct drm_amdgpu_vm_in {
 587	/** AMDGPU_VM_OP_* */
 588	__u32	op;
 589	__u32	flags;
 590};
 591
 592struct drm_amdgpu_vm_out {
 593	/** For future use, no flags defined so far */
 594	__u64	flags;
 595};
 596
 597union drm_amdgpu_vm {
 598	struct drm_amdgpu_vm_in in;
 599	struct drm_amdgpu_vm_out out;
 600};
 601
 602/* sched ioctl */
 603#define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE	1
 604#define AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE	2
 605
 606struct drm_amdgpu_sched_in {
 607	/* AMDGPU_SCHED_OP_* */
 608	__u32	op;
 609	__u32	fd;
 610	/** AMDGPU_CTX_PRIORITY_* */
 611	__s32	priority;
 612	__u32   ctx_id;
 613};
 614
 615union drm_amdgpu_sched {
 616	struct drm_amdgpu_sched_in in;
 617};
 618
 619/*
 620 * This is not a reliable API and you should expect it to fail for any
 621 * number of reasons and have fallback path that do not use userptr to
 622 * perform any operation.
 623 */
 624#define AMDGPU_GEM_USERPTR_READONLY	(1 << 0)
 625#define AMDGPU_GEM_USERPTR_ANONONLY	(1 << 1)
 626#define AMDGPU_GEM_USERPTR_VALIDATE	(1 << 2)
 627#define AMDGPU_GEM_USERPTR_REGISTER	(1 << 3)
 628
 629struct drm_amdgpu_gem_userptr {
 630	__u64		addr;
 631	__u64		size;
 632	/* AMDGPU_GEM_USERPTR_* */
 633	__u32		flags;
 634	/* Resulting GEM handle */
 635	__u32		handle;
 636};
 637
 638/* SI-CI-VI: */
 639/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
 640#define AMDGPU_TILING_ARRAY_MODE_SHIFT			0
 641#define AMDGPU_TILING_ARRAY_MODE_MASK			0xf
 642#define AMDGPU_TILING_PIPE_CONFIG_SHIFT			4
 643#define AMDGPU_TILING_PIPE_CONFIG_MASK			0x1f
 644#define AMDGPU_TILING_TILE_SPLIT_SHIFT			9
 645#define AMDGPU_TILING_TILE_SPLIT_MASK			0x7
 646#define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT		12
 647#define AMDGPU_TILING_MICRO_TILE_MODE_MASK		0x7
 648#define AMDGPU_TILING_BANK_WIDTH_SHIFT			15
 649#define AMDGPU_TILING_BANK_WIDTH_MASK			0x3
 650#define AMDGPU_TILING_BANK_HEIGHT_SHIFT			17
 651#define AMDGPU_TILING_BANK_HEIGHT_MASK			0x3
 652#define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT		19
 653#define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK		0x3
 654#define AMDGPU_TILING_NUM_BANKS_SHIFT			21
 655#define AMDGPU_TILING_NUM_BANKS_MASK			0x3
 656
 657/* GFX9 - GFX11: */
 658#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT		0
 659#define AMDGPU_TILING_SWIZZLE_MODE_MASK			0x1f
 660#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT		5
 661#define AMDGPU_TILING_DCC_OFFSET_256B_MASK		0xFFFFFF
 662#define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT		29
 663#define AMDGPU_TILING_DCC_PITCH_MAX_MASK		0x3FFF
 664#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT		43
 665#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK		0x1
 666#define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT	44
 667#define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK		0x1
 668#define AMDGPU_TILING_SCANOUT_SHIFT			63
 669#define AMDGPU_TILING_SCANOUT_MASK			0x1
 670
 671/* GFX12 and later: */
 672#define AMDGPU_TILING_GFX12_SWIZZLE_MODE_SHIFT			0
 673#define AMDGPU_TILING_GFX12_SWIZZLE_MODE_MASK			0x7
 674/* These are DCC recompression settings for memory management: */
 675#define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_SHIFT	3
 676#define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_MASK	0x3 /* 0:64B, 1:128B, 2:256B */
 677#define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_SHIFT		5
 678#define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_MASK		0x7 /* CB_COLOR0_INFO.NUMBER_TYPE */
 679#define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_SHIFT		8
 680#define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_MASK		0x3f /* [0:4]:CB_COLOR0_INFO.FORMAT, [5]:MM */
 681/* When clearing the buffer or moving it from VRAM to GTT, don't compress and set DCC metadata
 682 * to uncompressed. Set when parts of an allocation bypass DCC and read raw data. */
 683#define AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE_SHIFT	14
 684#define AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE_MASK	0x1
 685/* bit gap */
 686#define AMDGPU_TILING_GFX12_SCANOUT_SHIFT			63
 687#define AMDGPU_TILING_GFX12_SCANOUT_MASK			0x1
 688
 689/* Set/Get helpers for tiling flags. */
 690#define AMDGPU_TILING_SET(field, value) \
 691	(((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
 692#define AMDGPU_TILING_GET(value, field) \
 693	(((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
 694
 695#define AMDGPU_GEM_METADATA_OP_SET_METADATA                  1
 696#define AMDGPU_GEM_METADATA_OP_GET_METADATA                  2
 697
 698/** The same structure is shared for input/output */
 699struct drm_amdgpu_gem_metadata {
 700	/** GEM Object handle */
 701	__u32	handle;
 702	/** Do we want get or set metadata */
 703	__u32	op;
 704	struct {
 705		/** For future use, no flags defined so far */
 706		__u64	flags;
 707		/** family specific tiling info */
 708		__u64	tiling_info;
 709		__u32	data_size_bytes;
 710		__u32	data[64];
 711	} data;
 712};
 713
 714struct drm_amdgpu_gem_mmap_in {
 715	/** the GEM object handle */
 716	__u32 handle;
 717	__u32 _pad;
 718};
 719
 720struct drm_amdgpu_gem_mmap_out {
 721	/** mmap offset from the vma offset manager */
 722	__u64 addr_ptr;
 723};
 724
 725union drm_amdgpu_gem_mmap {
 726	struct drm_amdgpu_gem_mmap_in   in;
 727	struct drm_amdgpu_gem_mmap_out out;
 728};
 729
 730struct drm_amdgpu_gem_wait_idle_in {
 731	/** GEM object handle */
 732	__u32 handle;
 733	/** For future use, no flags defined so far */
 734	__u32 flags;
 735	/** Absolute timeout to wait */
 736	__u64 timeout;
 737};
 738
 739struct drm_amdgpu_gem_wait_idle_out {
 740	/** BO status:  0 - BO is idle, 1 - BO is busy */
 741	__u32 status;
 742	/** Returned current memory domain */
 743	__u32 domain;
 744};
 745
 746union drm_amdgpu_gem_wait_idle {
 747	struct drm_amdgpu_gem_wait_idle_in  in;
 748	struct drm_amdgpu_gem_wait_idle_out out;
 749};
 750
 751struct drm_amdgpu_wait_cs_in {
 752	/* Command submission handle
 753         * handle equals 0 means none to wait for
 754         * handle equals ~0ull means wait for the latest sequence number
 755         */
 756	__u64 handle;
 757	/** Absolute timeout to wait */
 758	__u64 timeout;
 759	__u32 ip_type;
 760	__u32 ip_instance;
 761	__u32 ring;
 762	__u32 ctx_id;
 763};
 764
 765struct drm_amdgpu_wait_cs_out {
 766	/** CS status:  0 - CS completed, 1 - CS still busy */
 767	__u64 status;
 768};
 769
 770union drm_amdgpu_wait_cs {
 771	struct drm_amdgpu_wait_cs_in in;
 772	struct drm_amdgpu_wait_cs_out out;
 773};
 774
 775struct drm_amdgpu_fence {
 776	__u32 ctx_id;
 777	__u32 ip_type;
 778	__u32 ip_instance;
 779	__u32 ring;
 780	__u64 seq_no;
 781};
 782
 783struct drm_amdgpu_wait_fences_in {
 784	/** This points to uint64_t * which points to fences */
 785	__u64 fences;
 786	__u32 fence_count;
 787	__u32 wait_all;
 788	__u64 timeout_ns;
 789};
 790
 791struct drm_amdgpu_wait_fences_out {
 792	__u32 status;
 793	__u32 first_signaled;
 794};
 795
 796union drm_amdgpu_wait_fences {
 797	struct drm_amdgpu_wait_fences_in in;
 798	struct drm_amdgpu_wait_fences_out out;
 799};
 800
 801#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO	0
 802#define AMDGPU_GEM_OP_SET_PLACEMENT		1
 803
 804/* Sets or returns a value associated with a buffer. */
 805struct drm_amdgpu_gem_op {
 806	/** GEM object handle */
 807	__u32	handle;
 808	/** AMDGPU_GEM_OP_* */
 809	__u32	op;
 810	/** Input or return value */
 811	__u64	value;
 812};
 813
 814#define AMDGPU_VA_OP_MAP			1
 815#define AMDGPU_VA_OP_UNMAP			2
 816#define AMDGPU_VA_OP_CLEAR			3
 817#define AMDGPU_VA_OP_REPLACE			4
 818
 819/* Delay the page table update till the next CS */
 820#define AMDGPU_VM_DELAY_UPDATE		(1 << 0)
 821
 822/* Mapping flags */
 823/* readable mapping */
 824#define AMDGPU_VM_PAGE_READABLE		(1 << 1)
 825/* writable mapping */
 826#define AMDGPU_VM_PAGE_WRITEABLE	(1 << 2)
 827/* executable mapping, new for VI */
 828#define AMDGPU_VM_PAGE_EXECUTABLE	(1 << 3)
 829/* partially resident texture */
 830#define AMDGPU_VM_PAGE_PRT		(1 << 4)
 831/* MTYPE flags use bit 5 to 8 */
 832#define AMDGPU_VM_MTYPE_MASK		(0xf << 5)
 833/* Default MTYPE. Pre-AI must use this.  Recommended for newer ASICs. */
 834#define AMDGPU_VM_MTYPE_DEFAULT		(0 << 5)
 835/* Use Non Coherent MTYPE instead of default MTYPE */
 836#define AMDGPU_VM_MTYPE_NC		(1 << 5)
 837/* Use Write Combine MTYPE instead of default MTYPE */
 838#define AMDGPU_VM_MTYPE_WC		(2 << 5)
 839/* Use Cache Coherent MTYPE instead of default MTYPE */
 840#define AMDGPU_VM_MTYPE_CC		(3 << 5)
 841/* Use UnCached MTYPE instead of default MTYPE */
 842#define AMDGPU_VM_MTYPE_UC		(4 << 5)
 843/* Use Read Write MTYPE instead of default MTYPE */
 844#define AMDGPU_VM_MTYPE_RW		(5 << 5)
 845/* don't allocate MALL */
 846#define AMDGPU_VM_PAGE_NOALLOC		(1 << 9)
 847
 848struct drm_amdgpu_gem_va {
 849	/** GEM object handle */
 850	__u32 handle;
 851	__u32 _pad;
 852	/** AMDGPU_VA_OP_* */
 853	__u32 operation;
 854	/** AMDGPU_VM_PAGE_* */
 855	__u32 flags;
 856	/** va address to assign . Must be correctly aligned.*/
 857	__u64 va_address;
 858	/** Specify offset inside of BO to assign. Must be correctly aligned.*/
 859	__u64 offset_in_bo;
 860	/** Specify mapping size. Must be correctly aligned. */
 861	__u64 map_size;
 862	/**
 863	 * vm_timeline_point is a sequence number used to add new timeline point.
 864	 */
 865	__u64 vm_timeline_point;
 866	/**
 867	 * The vm page table update fence is installed in given vm_timeline_syncobj_out
 868	 * at vm_timeline_point.
 869	 */
 870	__u32 vm_timeline_syncobj_out;
 871	/** the number of syncobj handles in @input_fence_syncobj_handles */
 872	__u32 num_syncobj_handles;
 873	/** Array of sync object handle to wait for given input fences */
 874	__u64 input_fence_syncobj_handles;
 875};
 876
 877#define AMDGPU_HW_IP_GFX          0
 878#define AMDGPU_HW_IP_COMPUTE      1
 879#define AMDGPU_HW_IP_DMA          2
 880#define AMDGPU_HW_IP_UVD          3
 881#define AMDGPU_HW_IP_VCE          4
 882#define AMDGPU_HW_IP_UVD_ENC      5
 883#define AMDGPU_HW_IP_VCN_DEC      6
 884/*
 885 * From VCN4, AMDGPU_HW_IP_VCN_ENC is re-used to support
 886 * both encoding and decoding jobs.
 887 */
 888#define AMDGPU_HW_IP_VCN_ENC      7
 889#define AMDGPU_HW_IP_VCN_JPEG     8
 890#define AMDGPU_HW_IP_VPE          9
 891#define AMDGPU_HW_IP_NUM          10
 892
 893#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
 894
 895#define AMDGPU_CHUNK_ID_IB		0x01
 896#define AMDGPU_CHUNK_ID_FENCE		0x02
 897#define AMDGPU_CHUNK_ID_DEPENDENCIES	0x03
 898#define AMDGPU_CHUNK_ID_SYNCOBJ_IN      0x04
 899#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT     0x05
 900#define AMDGPU_CHUNK_ID_BO_HANDLES      0x06
 901#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES	0x07
 902#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT    0x08
 903#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL  0x09
 904#define AMDGPU_CHUNK_ID_CP_GFX_SHADOW   0x0a
 905
 906struct drm_amdgpu_cs_chunk {
 907	__u32		chunk_id;
 908	__u32		length_dw;
 909	__u64		chunk_data;
 910};
 911
 912struct drm_amdgpu_cs_in {
 913	/** Rendering context id */
 914	__u32		ctx_id;
 915	/**  Handle of resource list associated with CS */
 916	__u32		bo_list_handle;
 917	__u32		num_chunks;
 918	__u32		flags;
 919	/** this points to __u64 * which point to cs chunks */
 920	__u64		chunks;
 921};
 922
 923struct drm_amdgpu_cs_out {
 924	__u64 handle;
 925};
 926
 927union drm_amdgpu_cs {
 928	struct drm_amdgpu_cs_in in;
 929	struct drm_amdgpu_cs_out out;
 930};
 931
 932/* Specify flags to be used for IB */
 933
 934/* This IB should be submitted to CE */
 935#define AMDGPU_IB_FLAG_CE	(1<<0)
 936
 937/* Preamble flag, which means the IB could be dropped if no context switch */
 938#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
 939
 940/* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
 941#define AMDGPU_IB_FLAG_PREEMPT (1<<2)
 942
 943/* The IB fence should do the L2 writeback but not invalidate any shader
 944 * caches (L2/vL1/sL1/I$). */
 945#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
 946
 947/* Set GDS_COMPUTE_MAX_WAVE_ID = DEFAULT before PACKET3_INDIRECT_BUFFER.
 948 * This will reset wave ID counters for the IB.
 949 */
 950#define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
 951
 952/* Flag the IB as secure (TMZ)
 953 */
 954#define AMDGPU_IB_FLAGS_SECURE  (1 << 5)
 955
 956/* Tell KMD to flush and invalidate caches
 957 */
 958#define AMDGPU_IB_FLAG_EMIT_MEM_SYNC  (1 << 6)
 959
 960struct drm_amdgpu_cs_chunk_ib {
 961	__u32 _pad;
 962	/** AMDGPU_IB_FLAG_* */
 963	__u32 flags;
 964	/** Virtual address to begin IB execution */
 965	__u64 va_start;
 966	/** Size of submission */
 967	__u32 ib_bytes;
 968	/** HW IP to submit to */
 969	__u32 ip_type;
 970	/** HW IP index of the same type to submit to  */
 971	__u32 ip_instance;
 972	/** Ring index to submit to */
 973	__u32 ring;
 974};
 975
 976struct drm_amdgpu_cs_chunk_dep {
 977	__u32 ip_type;
 978	__u32 ip_instance;
 979	__u32 ring;
 980	__u32 ctx_id;
 981	__u64 handle;
 982};
 983
 984struct drm_amdgpu_cs_chunk_fence {
 985	__u32 handle;
 986	__u32 offset;
 987};
 988
 989struct drm_amdgpu_cs_chunk_sem {
 990	__u32 handle;
 991};
 992
 993struct drm_amdgpu_cs_chunk_syncobj {
 994       __u32 handle;
 995       __u32 flags;
 996       __u64 point;
 997};
 998
 999#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ	0
1000#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD	1
1001#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD	2
1002
1003union drm_amdgpu_fence_to_handle {
1004	struct {
1005		struct drm_amdgpu_fence fence;
1006		__u32 what;
1007		__u32 pad;
1008	} in;
1009	struct {
1010		__u32 handle;
1011	} out;
1012};
1013
1014struct drm_amdgpu_cs_chunk_data {
1015	union {
1016		struct drm_amdgpu_cs_chunk_ib		ib_data;
1017		struct drm_amdgpu_cs_chunk_fence	fence_data;
1018	};
1019};
1020
1021#define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW         0x1
1022
1023struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
1024	__u64 shadow_va;
1025	__u64 csa_va;
1026	__u64 gds_va;
1027	__u64 flags;
1028};
1029
1030/*
1031 *  Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
1032 *
1033 */
1034#define AMDGPU_IDS_FLAGS_FUSION         0x1
1035#define AMDGPU_IDS_FLAGS_PREEMPTION     0x2
1036#define AMDGPU_IDS_FLAGS_TMZ            0x4
1037#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8
1038
1039/*
1040 *  Query h/w info: Flag identifying VF/PF/PT mode
1041 *
1042 */
1043#define AMDGPU_IDS_FLAGS_MODE_MASK      0x300
1044#define AMDGPU_IDS_FLAGS_MODE_SHIFT     0x8
1045#define AMDGPU_IDS_FLAGS_MODE_PF        0x0
1046#define AMDGPU_IDS_FLAGS_MODE_VF        0x1
1047#define AMDGPU_IDS_FLAGS_MODE_PT        0x2
1048
1049/* indicate if acceleration can be working */
1050#define AMDGPU_INFO_ACCEL_WORKING		0x00
1051/* get the crtc_id from the mode object id? */
1052#define AMDGPU_INFO_CRTC_FROM_ID		0x01
1053/* query hw IP info */
1054#define AMDGPU_INFO_HW_IP_INFO			0x02
1055/* query hw IP instance count for the specified type */
1056#define AMDGPU_INFO_HW_IP_COUNT			0x03
1057/* timestamp for GL_ARB_timer_query */
1058#define AMDGPU_INFO_TIMESTAMP			0x05
1059/* Query the firmware version */
1060#define AMDGPU_INFO_FW_VERSION			0x0e
1061	/* Subquery id: Query VCE firmware version */
1062	#define AMDGPU_INFO_FW_VCE		0x1
1063	/* Subquery id: Query UVD firmware version */
1064	#define AMDGPU_INFO_FW_UVD		0x2
1065	/* Subquery id: Query GMC firmware version */
1066	#define AMDGPU_INFO_FW_GMC		0x03
1067	/* Subquery id: Query GFX ME firmware version */
1068	#define AMDGPU_INFO_FW_GFX_ME		0x04
1069	/* Subquery id: Query GFX PFP firmware version */
1070	#define AMDGPU_INFO_FW_GFX_PFP		0x05
1071	/* Subquery id: Query GFX CE firmware version */
1072	#define AMDGPU_INFO_FW_GFX_CE		0x06
1073	/* Subquery id: Query GFX RLC firmware version */
1074	#define AMDGPU_INFO_FW_GFX_RLC		0x07
1075	/* Subquery id: Query GFX MEC firmware version */
1076	#define AMDGPU_INFO_FW_GFX_MEC		0x08
1077	/* Subquery id: Query SMC firmware version */
1078	#define AMDGPU_INFO_FW_SMC		0x0a
1079	/* Subquery id: Query SDMA firmware version */
1080	#define AMDGPU_INFO_FW_SDMA		0x0b
1081	/* Subquery id: Query PSP SOS firmware version */
1082	#define AMDGPU_INFO_FW_SOS		0x0c
1083	/* Subquery id: Query PSP ASD firmware version */
1084	#define AMDGPU_INFO_FW_ASD		0x0d
1085	/* Subquery id: Query VCN firmware version */
1086	#define AMDGPU_INFO_FW_VCN		0x0e
1087	/* Subquery id: Query GFX RLC SRLC firmware version */
1088	#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f
1089	/* Subquery id: Query GFX RLC SRLG firmware version */
1090	#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
1091	/* Subquery id: Query GFX RLC SRLS firmware version */
1092	#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
1093	/* Subquery id: Query DMCU firmware version */
1094	#define AMDGPU_INFO_FW_DMCU		0x12
1095	#define AMDGPU_INFO_FW_TA		0x13
1096	/* Subquery id: Query DMCUB firmware version */
1097	#define AMDGPU_INFO_FW_DMCUB		0x14
1098	/* Subquery id: Query TOC firmware version */
1099	#define AMDGPU_INFO_FW_TOC		0x15
1100	/* Subquery id: Query CAP firmware version */
1101	#define AMDGPU_INFO_FW_CAP		0x16
1102	/* Subquery id: Query GFX RLCP firmware version */
1103	#define AMDGPU_INFO_FW_GFX_RLCP		0x17
1104	/* Subquery id: Query GFX RLCV firmware version */
1105	#define AMDGPU_INFO_FW_GFX_RLCV		0x18
1106	/* Subquery id: Query MES_KIQ firmware version */
1107	#define AMDGPU_INFO_FW_MES_KIQ		0x19
1108	/* Subquery id: Query MES firmware version */
1109	#define AMDGPU_INFO_FW_MES		0x1a
1110	/* Subquery id: Query IMU firmware version */
1111	#define AMDGPU_INFO_FW_IMU		0x1b
1112	/* Subquery id: Query VPE firmware version */
1113	#define AMDGPU_INFO_FW_VPE		0x1c
1114
1115/* number of bytes moved for TTM migration */
1116#define AMDGPU_INFO_NUM_BYTES_MOVED		0x0f
1117/* the used VRAM size */
1118#define AMDGPU_INFO_VRAM_USAGE			0x10
1119/* the used GTT size */
1120#define AMDGPU_INFO_GTT_USAGE			0x11
1121/* Information about GDS, etc. resource configuration */
1122#define AMDGPU_INFO_GDS_CONFIG			0x13
1123/* Query information about VRAM and GTT domains */
1124#define AMDGPU_INFO_VRAM_GTT			0x14
1125/* Query information about register in MMR address space*/
1126#define AMDGPU_INFO_READ_MMR_REG		0x15
1127/* Query information about device: rev id, family, etc. */
1128#define AMDGPU_INFO_DEV_INFO			0x16
1129/* visible vram usage */
1130#define AMDGPU_INFO_VIS_VRAM_USAGE		0x17
1131/* number of TTM buffer evictions */
1132#define AMDGPU_INFO_NUM_EVICTIONS		0x18
1133/* Query memory about VRAM and GTT domains */
1134#define AMDGPU_INFO_MEMORY			0x19
1135/* Query vce clock table */
1136#define AMDGPU_INFO_VCE_CLOCK_TABLE		0x1A
1137/* Query vbios related information */
1138#define AMDGPU_INFO_VBIOS			0x1B
1139	/* Subquery id: Query vbios size */
1140	#define AMDGPU_INFO_VBIOS_SIZE		0x1
1141	/* Subquery id: Query vbios image */
1142	#define AMDGPU_INFO_VBIOS_IMAGE		0x2
1143	/* Subquery id: Query vbios info */
1144	#define AMDGPU_INFO_VBIOS_INFO		0x3
1145/* Query UVD handles */
1146#define AMDGPU_INFO_NUM_HANDLES			0x1C
1147/* Query sensor related information */
1148#define AMDGPU_INFO_SENSOR			0x1D
1149	/* Subquery id: Query GPU shader clock */
1150	#define AMDGPU_INFO_SENSOR_GFX_SCLK		0x1
1151	/* Subquery id: Query GPU memory clock */
1152	#define AMDGPU_INFO_SENSOR_GFX_MCLK		0x2
1153	/* Subquery id: Query GPU temperature */
1154	#define AMDGPU_INFO_SENSOR_GPU_TEMP		0x3
1155	/* Subquery id: Query GPU load */
1156	#define AMDGPU_INFO_SENSOR_GPU_LOAD		0x4
1157	/* Subquery id: Query average GPU power	*/
1158	#define AMDGPU_INFO_SENSOR_GPU_AVG_POWER	0x5
1159	/* Subquery id: Query northbridge voltage */
1160	#define AMDGPU_INFO_SENSOR_VDDNB		0x6
1161	/* Subquery id: Query graphics voltage */
1162	#define AMDGPU_INFO_SENSOR_VDDGFX		0x7
1163	/* Subquery id: Query GPU stable pstate shader clock */
1164	#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK		0x8
1165	/* Subquery id: Query GPU stable pstate memory clock */
1166	#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK		0x9
1167	/* Subquery id: Query GPU peak pstate shader clock */
1168	#define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK			0xa
1169	/* Subquery id: Query GPU peak pstate memory clock */
1170	#define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK			0xb
1171	/* Subquery id: Query input GPU power	*/
1172	#define AMDGPU_INFO_SENSOR_GPU_INPUT_POWER	0xc
1173/* Number of VRAM page faults on CPU access. */
1174#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS	0x1E
1175#define AMDGPU_INFO_VRAM_LOST_COUNTER		0x1F
1176/* query ras mask of enabled features*/
1177#define AMDGPU_INFO_RAS_ENABLED_FEATURES	0x20
1178/* RAS MASK: UMC (VRAM) */
1179#define AMDGPU_INFO_RAS_ENABLED_UMC			(1 << 0)
1180/* RAS MASK: SDMA */
1181#define AMDGPU_INFO_RAS_ENABLED_SDMA			(1 << 1)
1182/* RAS MASK: GFX */
1183#define AMDGPU_INFO_RAS_ENABLED_GFX			(1 << 2)
1184/* RAS MASK: MMHUB */
1185#define AMDGPU_INFO_RAS_ENABLED_MMHUB			(1 << 3)
1186/* RAS MASK: ATHUB */
1187#define AMDGPU_INFO_RAS_ENABLED_ATHUB			(1 << 4)
1188/* RAS MASK: PCIE */
1189#define AMDGPU_INFO_RAS_ENABLED_PCIE			(1 << 5)
1190/* RAS MASK: HDP */
1191#define AMDGPU_INFO_RAS_ENABLED_HDP			(1 << 6)
1192/* RAS MASK: XGMI */
1193#define AMDGPU_INFO_RAS_ENABLED_XGMI			(1 << 7)
1194/* RAS MASK: DF */
1195#define AMDGPU_INFO_RAS_ENABLED_DF			(1 << 8)
1196/* RAS MASK: SMN */
1197#define AMDGPU_INFO_RAS_ENABLED_SMN			(1 << 9)
1198/* RAS MASK: SEM */
1199#define AMDGPU_INFO_RAS_ENABLED_SEM			(1 << 10)
1200/* RAS MASK: MP0 */
1201#define AMDGPU_INFO_RAS_ENABLED_MP0			(1 << 11)
1202/* RAS MASK: MP1 */
1203#define AMDGPU_INFO_RAS_ENABLED_MP1			(1 << 12)
1204/* RAS MASK: FUSE */
1205#define AMDGPU_INFO_RAS_ENABLED_FUSE			(1 << 13)
1206/* query video encode/decode caps */
1207#define AMDGPU_INFO_VIDEO_CAPS			0x21
1208	/* Subquery id: Decode */
1209	#define AMDGPU_INFO_VIDEO_CAPS_DECODE		0
1210	/* Subquery id: Encode */
1211	#define AMDGPU_INFO_VIDEO_CAPS_ENCODE		1
1212/* Query the max number of IBs per gang per submission */
1213#define AMDGPU_INFO_MAX_IBS			0x22
1214/* query last page fault info */
1215#define AMDGPU_INFO_GPUVM_FAULT			0x23
1216/* query FW object size and alignment */
1217#define AMDGPU_INFO_UQ_FW_AREAS			0x24
1218
1219#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT	0
1220#define AMDGPU_INFO_MMR_SE_INDEX_MASK	0xff
1221#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT	8
1222#define AMDGPU_INFO_MMR_SH_INDEX_MASK	0xff
1223
1224struct drm_amdgpu_query_fw {
1225	/** AMDGPU_INFO_FW_* */
1226	__u32 fw_type;
1227	/**
1228	 * Index of the IP if there are more IPs of
1229	 * the same type.
1230	 */
1231	__u32 ip_instance;
1232	/**
1233	 * Index of the engine. Whether this is used depends
1234	 * on the firmware type. (e.g. MEC, SDMA)
1235	 */
1236	__u32 index;
1237	__u32 _pad;
1238};
1239
1240/* Input structure for the INFO ioctl */
1241struct drm_amdgpu_info {
1242	/* Where the return value will be stored */
1243	__u64 return_pointer;
1244	/* The size of the return value. Just like "size" in "snprintf",
1245	 * it limits how many bytes the kernel can write. */
1246	__u32 return_size;
1247	/* The query request id. */
1248	__u32 query;
1249
1250	union {
1251		struct {
1252			__u32 id;
1253			__u32 _pad;
1254		} mode_crtc;
1255
1256		struct {
1257			/** AMDGPU_HW_IP_* */
1258			__u32 type;
1259			/**
1260			 * Index of the IP if there are more IPs of the same
1261			 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
1262			 */
1263			__u32 ip_instance;
1264		} query_hw_ip;
1265
1266		struct {
1267			__u32 dword_offset;
1268			/** number of registers to read */
1269			__u32 count;
1270			__u32 instance;
1271			/** For future use, no flags defined so far */
1272			__u32 flags;
1273		} read_mmr_reg;
1274
1275		struct drm_amdgpu_query_fw query_fw;
1276
1277		struct {
1278			__u32 type;
1279			__u32 offset;
1280		} vbios_info;
1281
1282		struct {
1283			__u32 type;
1284		} sensor_info;
1285
1286		struct {
1287			__u32 type;
1288		} video_cap;
1289	};
1290};
1291
1292struct drm_amdgpu_info_gds {
1293	/** GDS GFX partition size */
1294	__u32 gds_gfx_partition_size;
1295	/** GDS compute partition size */
1296	__u32 compute_partition_size;
1297	/** total GDS memory size */
1298	__u32 gds_total_size;
1299	/** GWS size per GFX partition */
1300	__u32 gws_per_gfx_partition;
1301	/** GSW size per compute partition */
1302	__u32 gws_per_compute_partition;
1303	/** OA size per GFX partition */
1304	__u32 oa_per_gfx_partition;
1305	/** OA size per compute partition */
1306	__u32 oa_per_compute_partition;
1307	__u32 _pad;
1308};
1309
1310struct drm_amdgpu_info_vram_gtt {
1311	__u64 vram_size;
1312	__u64 vram_cpu_accessible_size;
1313	__u64 gtt_size;
1314};
1315
1316struct drm_amdgpu_heap_info {
1317	/** max. physical memory */
1318	__u64 total_heap_size;
1319
1320	/** Theoretical max. available memory in the given heap */
1321	__u64 usable_heap_size;
1322
1323	/**
1324	 * Number of bytes allocated in the heap. This includes all processes
1325	 * and private allocations in the kernel. It changes when new buffers
1326	 * are allocated, freed, and moved. It cannot be larger than
1327	 * heap_size.
1328	 */
1329	__u64 heap_usage;
1330
1331	/**
1332	 * Theoretical possible max. size of buffer which
1333	 * could be allocated in the given heap
1334	 */
1335	__u64 max_allocation;
1336};
1337
1338struct drm_amdgpu_memory_info {
1339	struct drm_amdgpu_heap_info vram;
1340	struct drm_amdgpu_heap_info cpu_accessible_vram;
1341	struct drm_amdgpu_heap_info gtt;
1342};
1343
1344struct drm_amdgpu_info_firmware {
1345	__u32 ver;
1346	__u32 feature;
1347};
1348
1349struct drm_amdgpu_info_vbios {
1350	__u8 name[64];
1351	__u8 vbios_pn[64];
1352	__u32 version;
1353	__u32 pad;
1354	__u8 vbios_ver_str[32];
1355	__u8 date[32];
1356};
1357
1358#define AMDGPU_VRAM_TYPE_UNKNOWN 0
1359#define AMDGPU_VRAM_TYPE_GDDR1 1
1360#define AMDGPU_VRAM_TYPE_DDR2  2
1361#define AMDGPU_VRAM_TYPE_GDDR3 3
1362#define AMDGPU_VRAM_TYPE_GDDR4 4
1363#define AMDGPU_VRAM_TYPE_GDDR5 5
1364#define AMDGPU_VRAM_TYPE_HBM   6
1365#define AMDGPU_VRAM_TYPE_DDR3  7
1366#define AMDGPU_VRAM_TYPE_DDR4  8
1367#define AMDGPU_VRAM_TYPE_GDDR6 9
1368#define AMDGPU_VRAM_TYPE_DDR5  10
1369#define AMDGPU_VRAM_TYPE_LPDDR4 11
1370#define AMDGPU_VRAM_TYPE_LPDDR5 12
1371#define AMDGPU_VRAM_TYPE_HBM3E 13
1372
1373struct drm_amdgpu_info_device {
1374	/** PCI Device ID */
1375	__u32 device_id;
1376	/** Internal chip revision: A0, A1, etc.) */
1377	__u32 chip_rev;
1378	__u32 external_rev;
1379	/** Revision id in PCI Config space */
1380	__u32 pci_rev;
1381	__u32 family;
1382	__u32 num_shader_engines;
1383	__u32 num_shader_arrays_per_engine;
1384	/* in KHz */
1385	__u32 gpu_counter_freq;
1386	__u64 max_engine_clock;
1387	__u64 max_memory_clock;
1388	/* cu information */
1389	__u32 cu_active_number;
1390	/* NOTE: cu_ao_mask is INVALID, DON'T use it */
1391	__u32 cu_ao_mask;
1392	__u32 cu_bitmap[4][4];
1393	/** Render backend pipe mask. One render backend is CB+DB. */
1394	__u32 enabled_rb_pipes_mask;
1395	__u32 num_rb_pipes;
1396	__u32 num_hw_gfx_contexts;
1397	/* PCIe version (the smaller of the GPU and the CPU/motherboard) */
1398	__u32 pcie_gen;
1399	__u64 ids_flags;
1400	/** Starting virtual address for UMDs. */
1401	__u64 virtual_address_offset;
1402	/** The maximum virtual address */
1403	__u64 virtual_address_max;
1404	/** Required alignment of virtual addresses. */
1405	__u32 virtual_address_alignment;
1406	/** Page table entry - fragment size */
1407	__u32 pte_fragment_size;
1408	__u32 gart_page_size;
1409	/** constant engine ram size*/
1410	__u32 ce_ram_size;
1411	/** video memory type info*/
1412	__u32 vram_type;
1413	/** video memory bit width*/
1414	__u32 vram_bit_width;
1415	/* vce harvesting instance */
1416	__u32 vce_harvest_config;
1417	/* gfx double offchip LDS buffers */
1418	__u32 gc_double_offchip_lds_buf;
1419	/* NGG Primitive Buffer */
1420	__u64 prim_buf_gpu_addr;
1421	/* NGG Position Buffer */
1422	__u64 pos_buf_gpu_addr;
1423	/* NGG Control Sideband */
1424	__u64 cntl_sb_buf_gpu_addr;
1425	/* NGG Parameter Cache */
1426	__u64 param_buf_gpu_addr;
1427	__u32 prim_buf_size;
1428	__u32 pos_buf_size;
1429	__u32 cntl_sb_buf_size;
1430	__u32 param_buf_size;
1431	/* wavefront size*/
1432	__u32 wave_front_size;
1433	/* shader visible vgprs*/
1434	__u32 num_shader_visible_vgprs;
1435	/* CU per shader array*/
1436	__u32 num_cu_per_sh;
1437	/* number of tcc blocks*/
1438	__u32 num_tcc_blocks;
1439	/* gs vgt table depth*/
1440	__u32 gs_vgt_table_depth;
1441	/* gs primitive buffer depth*/
1442	__u32 gs_prim_buffer_depth;
1443	/* max gs wavefront per vgt*/
1444	__u32 max_gs_waves_per_vgt;
1445	/* PCIe number of lanes (the smaller of the GPU and the CPU/motherboard) */
1446	__u32 pcie_num_lanes;
1447	/* always on cu bitmap */
1448	__u32 cu_ao_bitmap[4][4];
1449	/** Starting high virtual address for UMDs. */
1450	__u64 high_va_offset;
1451	/** The maximum high virtual address */
1452	__u64 high_va_max;
1453	/* gfx10 pa_sc_tile_steering_override */
1454	__u32 pa_sc_tile_steering_override;
1455	/* disabled TCCs */
1456	__u64 tcc_disabled_mask;
1457	__u64 min_engine_clock;
1458	__u64 min_memory_clock;
1459	/* The following fields are only set on gfx11+, older chips set 0. */
1460	__u32 tcp_cache_size;       /* AKA GL0, VMEM cache */
1461	__u32 num_sqc_per_wgp;
1462	__u32 sqc_data_cache_size;  /* AKA SMEM cache */
1463	__u32 sqc_inst_cache_size;
1464	__u32 gl1c_cache_size;
1465	__u32 gl2c_cache_size;
1466	__u64 mall_size;            /* AKA infinity cache */
1467	/* high 32 bits of the rb pipes mask */
1468	__u32 enabled_rb_pipes_mask_hi;
1469	/* shadow area size for gfx11 */
1470	__u32 shadow_size;
1471	/* shadow area base virtual alignment for gfx11 */
1472	__u32 shadow_alignment;
1473	/* context save area size for gfx11 */
1474	__u32 csa_size;
1475	/* context save area base virtual alignment for gfx11 */
1476	__u32 csa_alignment;
1477	/* Userq IP mask (1 << AMDGPU_HW_IP_*) */
1478	__u32 userq_ip_mask;
1479	__u32 pad;
1480};
1481
1482struct drm_amdgpu_info_hw_ip {
1483	/** Version of h/w IP */
1484	__u32  hw_ip_version_major;
1485	__u32  hw_ip_version_minor;
1486	/** Capabilities */
1487	__u64  capabilities_flags;
1488	/** command buffer address start alignment*/
1489	__u32  ib_start_alignment;
1490	/** command buffer size alignment*/
1491	__u32  ib_size_alignment;
1492	/** Bitmask of available rings. Bit 0 means ring 0, etc. */
1493	__u32  available_rings;
1494	/** version info: bits 23:16 major, 15:8 minor, 7:0 revision */
1495	__u32  ip_discovery_version;
1496	/* Userq available slots */
1497	__u32  userq_num_slots;
1498};
1499
1500/* GFX metadata BO sizes and alignment info (in bytes) */
1501struct drm_amdgpu_info_uq_fw_areas_gfx {
1502	/* shadow area size */
1503	__u32 shadow_size;
1504	/* shadow area base virtual mem alignment */
1505	__u32 shadow_alignment;
1506	/* context save area size */
1507	__u32 csa_size;
1508	/* context save area base virtual mem alignment */
1509	__u32 csa_alignment;
1510};
1511
1512/* IP specific fw related information used in the
1513 * subquery AMDGPU_INFO_UQ_FW_AREAS
1514 */
1515struct drm_amdgpu_info_uq_fw_areas {
1516	union {
1517		struct drm_amdgpu_info_uq_fw_areas_gfx gfx;
1518	};
1519};
1520
1521struct drm_amdgpu_info_num_handles {
1522	/** Max handles as supported by firmware for UVD */
1523	__u32  uvd_max_handles;
1524	/** Handles currently in use for UVD */
1525	__u32  uvd_used_handles;
1526};
1527
1528#define AMDGPU_VCE_CLOCK_TABLE_ENTRIES		6
1529
1530struct drm_amdgpu_info_vce_clock_table_entry {
1531	/** System clock */
1532	__u32 sclk;
1533	/** Memory clock */
1534	__u32 mclk;
1535	/** VCE clock */
1536	__u32 eclk;
1537	__u32 pad;
1538};
1539
1540struct drm_amdgpu_info_vce_clock_table {
1541	struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES];
1542	__u32 num_valid_entries;
1543	__u32 pad;
1544};
1545
1546/* query video encode/decode caps */
1547#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2			0
1548#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4			1
1549#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1			2
1550#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC		3
1551#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC			4
1552#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG			5
1553#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9			6
1554#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1			7
1555#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT			8
1556
1557struct drm_amdgpu_info_video_codec_info {
1558	__u32 valid;
1559	__u32 max_width;
1560	__u32 max_height;
1561	__u32 max_pixels_per_frame;
1562	__u32 max_level;
1563	__u32 pad;
1564};
1565
1566struct drm_amdgpu_info_video_caps {
1567	struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
1568};
1569
1570#define AMDGPU_VMHUB_TYPE_MASK			0xff
1571#define AMDGPU_VMHUB_TYPE_SHIFT			0
1572#define AMDGPU_VMHUB_TYPE_GFX			0
1573#define AMDGPU_VMHUB_TYPE_MM0			1
1574#define AMDGPU_VMHUB_TYPE_MM1			2
1575#define AMDGPU_VMHUB_IDX_MASK			0xff00
1576#define AMDGPU_VMHUB_IDX_SHIFT			8
1577
1578struct drm_amdgpu_info_gpuvm_fault {
1579	__u64 addr;
1580	__u32 status;
1581	__u32 vmhub;
1582};
1583
1584struct drm_amdgpu_info_uq_metadata_gfx {
1585	/* shadow area size for gfx11 */
1586	__u32 shadow_size;
1587	/* shadow area base virtual alignment for gfx11 */
1588	__u32 shadow_alignment;
1589	/* context save area size for gfx11 */
1590	__u32 csa_size;
1591	/* context save area base virtual alignment for gfx11 */
1592	__u32 csa_alignment;
1593};
1594
1595struct drm_amdgpu_info_uq_metadata {
1596	union {
1597		struct drm_amdgpu_info_uq_metadata_gfx gfx;
1598	};
1599};
1600
1601/*
1602 * Supported GPU families
1603 */
1604#define AMDGPU_FAMILY_UNKNOWN			0
1605#define AMDGPU_FAMILY_SI			110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */
1606#define AMDGPU_FAMILY_CI			120 /* Bonaire, Hawaii */
1607#define AMDGPU_FAMILY_KV			125 /* Kaveri, Kabini, Mullins */
1608#define AMDGPU_FAMILY_VI			130 /* Iceland, Tonga */
1609#define AMDGPU_FAMILY_CZ			135 /* Carrizo, Stoney */
1610#define AMDGPU_FAMILY_AI			141 /* Vega10 */
1611#define AMDGPU_FAMILY_RV			142 /* Raven */
1612#define AMDGPU_FAMILY_NV			143 /* Navi10 */
1613#define AMDGPU_FAMILY_VGH			144 /* Van Gogh */
1614#define AMDGPU_FAMILY_GC_11_0_0			145 /* GC 11.0.0 */
1615#define AMDGPU_FAMILY_YC			146 /* Yellow Carp */
1616#define AMDGPU_FAMILY_GC_11_0_1			148 /* GC 11.0.1 */
1617#define AMDGPU_FAMILY_GC_10_3_6			149 /* GC 10.3.6 */
1618#define AMDGPU_FAMILY_GC_10_3_7			151 /* GC 10.3.7 */
1619#define AMDGPU_FAMILY_GC_11_5_0			150 /* GC 11.5.0 */
1620#define AMDGPU_FAMILY_GC_12_0_0			152 /* GC 12.0.0 */
1621
1622/* FIXME wrong namespace! */
1623struct drm_color_ctm_3x4 {
1624	/*
1625	 * Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude
1626	 * (not two's complement!) format.
1627	 */
1628	__u64 matrix[12];
1629};
1630
1631#if defined(__cplusplus)
1632}
1633#endif
1634
1635#endif