master
   1/*
   2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial portions
  15 * of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#ifndef _I915_DRM_H_
  28#define _I915_DRM_H_
  29
  30#include "drm.h"
  31
  32#if defined(__cplusplus)
  33extern "C" {
  34#endif
  35
  36/* Please note that modifications to all structs defined here are
  37 * subject to backwards-compatibility constraints.
  38 */
  39
  40/**
  41 * DOC: uevents generated by i915 on its device node
  42 *
  43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
  44 *	event from the GPU L3 cache. Additional information supplied is ROW,
  45 *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
  46 *	track of these events, and if a specific cache-line seems to have a
  47 *	persistent error, remap it with the L3 remapping tool supplied in
  48 *	intel-gpu-tools.  The value supplied with the event is always 1.
  49 *
  50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
  51 *	hangcheck. The error detection event is a good indicator of when things
  52 *	began to go badly. The value supplied with the event is a 1 upon error
  53 *	detection, and a 0 upon reset completion, signifying no more error
  54 *	exists. NOTE: Disabling hangcheck or reset via module parameter will
  55 *	cause the related events to not be seen.
  56 *
  57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
  58 *	GPU. The value supplied with the event is always 1. NOTE: Disable
  59 *	reset via module parameter will cause this event to not be seen.
  60 */
  61#define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
  62#define I915_ERROR_UEVENT		"ERROR"
  63#define I915_RESET_UEVENT		"RESET"
  64
  65/**
  66 * struct i915_user_extension - Base class for defining a chain of extensions
  67 *
  68 * Many interfaces need to grow over time. In most cases we can simply
  69 * extend the struct and have userspace pass in more data. Another option,
  70 * as demonstrated by Vulkan's approach to providing extensions for forward
  71 * and backward compatibility, is to use a list of optional structs to
  72 * provide those extra details.
  73 *
  74 * The key advantage to using an extension chain is that it allows us to
  75 * redefine the interface more easily than an ever growing struct of
  76 * increasing complexity, and for large parts of that interface to be
  77 * entirely optional. The downside is more pointer chasing; chasing across
  78 * the boundary with pointers encapsulated inside u64.
  79 *
  80 * Example chaining:
  81 *
  82 * .. code-block:: C
  83 *
  84 *	struct i915_user_extension ext3 {
  85 *		.next_extension = 0, // end
  86 *		.name = ...,
  87 *	};
  88 *	struct i915_user_extension ext2 {
  89 *		.next_extension = (uintptr_t)&ext3,
  90 *		.name = ...,
  91 *	};
  92 *	struct i915_user_extension ext1 {
  93 *		.next_extension = (uintptr_t)&ext2,
  94 *		.name = ...,
  95 *	};
  96 *
  97 * Typically the struct i915_user_extension would be embedded in some uAPI
  98 * struct, and in this case we would feed it the head of the chain(i.e ext1),
  99 * which would then apply all of the above extensions.
 100 *
 101 */
 102struct i915_user_extension {
 103	/**
 104	 * @next_extension:
 105	 *
 106	 * Pointer to the next struct i915_user_extension, or zero if the end.
 107	 */
 108	__u64 next_extension;
 109	/**
 110	 * @name: Name of the extension.
 111	 *
 112	 * Note that the name here is just some integer.
 113	 *
 114	 * Also note that the name space for this is not global for the whole
 115	 * driver, but rather its scope/meaning is limited to the specific piece
 116	 * of uAPI which has embedded the struct i915_user_extension.
 117	 */
 118	__u32 name;
 119	/**
 120	 * @flags: MBZ
 121	 *
 122	 * All undefined bits must be zero.
 123	 */
 124	__u32 flags;
 125	/**
 126	 * @rsvd: MBZ
 127	 *
 128	 * Reserved for future use; must be zero.
 129	 */
 130	__u32 rsvd[4];
 131};
 132
 133/*
 134 * MOCS indexes used for GPU surfaces, defining the cacheability of the
 135 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
 136 */
 137enum i915_mocs_table_index {
 138	/*
 139	 * Not cached anywhere, coherency between CPU and GPU accesses is
 140	 * guaranteed.
 141	 */
 142	I915_MOCS_UNCACHED,
 143	/*
 144	 * Cacheability and coherency controlled by the kernel automatically
 145	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
 146	 * usage of the surface (used for display scanout or not).
 147	 */
 148	I915_MOCS_PTE,
 149	/*
 150	 * Cached in all GPU caches available on the platform.
 151	 * Coherency between CPU and GPU accesses to the surface is not
 152	 * guaranteed without extra synchronization.
 153	 */
 154	I915_MOCS_CACHED,
 155};
 156
 157/**
 158 * enum drm_i915_gem_engine_class - uapi engine type enumeration
 159 *
 160 * Different engines serve different roles, and there may be more than one
 161 * engine serving each role.  This enum provides a classification of the role
 162 * of the engine, which may be used when requesting operations to be performed
 163 * on a certain subset of engines, or for providing information about that
 164 * group.
 165 */
 166enum drm_i915_gem_engine_class {
 167	/**
 168	 * @I915_ENGINE_CLASS_RENDER:
 169	 *
 170	 * Render engines support instructions used for 3D, Compute (GPGPU),
 171	 * and programmable media workloads.  These instructions fetch data and
 172	 * dispatch individual work items to threads that operate in parallel.
 173	 * The threads run small programs (called "kernels" or "shaders") on
 174	 * the GPU's execution units (EUs).
 175	 */
 176	I915_ENGINE_CLASS_RENDER	= 0,
 177
 178	/**
 179	 * @I915_ENGINE_CLASS_COPY:
 180	 *
 181	 * Copy engines (also referred to as "blitters") support instructions
 182	 * that move blocks of data from one location in memory to another,
 183	 * or that fill a specified location of memory with fixed data.
 184	 * Copy engines can perform pre-defined logical or bitwise operations
 185	 * on the source, destination, or pattern data.
 186	 */
 187	I915_ENGINE_CLASS_COPY		= 1,
 188
 189	/**
 190	 * @I915_ENGINE_CLASS_VIDEO:
 191	 *
 192	 * Video engines (also referred to as "bit stream decode" (BSD) or
 193	 * "vdbox") support instructions that perform fixed-function media
 194	 * decode and encode.
 195	 */
 196	I915_ENGINE_CLASS_VIDEO		= 2,
 197
 198	/**
 199	 * @I915_ENGINE_CLASS_VIDEO_ENHANCE:
 200	 *
 201	 * Video enhancement engines (also referred to as "vebox") support
 202	 * instructions related to image enhancement.
 203	 */
 204	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
 205
 206	/**
 207	 * @I915_ENGINE_CLASS_COMPUTE:
 208	 *
 209	 * Compute engines support a subset of the instructions available
 210	 * on render engines:  compute engines support Compute (GPGPU) and
 211	 * programmable media workloads, but do not support the 3D pipeline.
 212	 */
 213	I915_ENGINE_CLASS_COMPUTE	= 4,
 214
 215	/* Values in this enum should be kept compact. */
 216
 217	/**
 218	 * @I915_ENGINE_CLASS_INVALID:
 219	 *
 220	 * Placeholder value to represent an invalid engine class assignment.
 221	 */
 222	I915_ENGINE_CLASS_INVALID	= -1
 223};
 224
 225/**
 226 * struct i915_engine_class_instance - Engine class/instance identifier
 227 *
 228 * There may be more than one engine fulfilling any role within the system.
 229 * Each engine of a class is given a unique instance number and therefore
 230 * any engine can be specified by its class:instance tuplet. APIs that allow
 231 * access to any engine in the system will use struct i915_engine_class_instance
 232 * for this identification.
 233 */
 234struct i915_engine_class_instance {
 235	/**
 236	 * @engine_class:
 237	 *
 238	 * Engine class from enum drm_i915_gem_engine_class
 239	 */
 240	__u16 engine_class;
 241#define I915_ENGINE_CLASS_INVALID_NONE -1
 242#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
 243
 244	/**
 245	 * @engine_instance:
 246	 *
 247	 * Engine instance.
 248	 */
 249	__u16 engine_instance;
 250};
 251
 252/**
 253 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
 254 *
 255 */
 256
 257enum drm_i915_pmu_engine_sample {
 258	I915_SAMPLE_BUSY = 0,
 259	I915_SAMPLE_WAIT = 1,
 260	I915_SAMPLE_SEMA = 2
 261};
 262
 263#define I915_PMU_SAMPLE_BITS (4)
 264#define I915_PMU_SAMPLE_MASK (0xf)
 265#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
 266#define I915_PMU_CLASS_SHIFT \
 267	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
 268
 269#define __I915_PMU_ENGINE(class, instance, sample) \
 270	((class) << I915_PMU_CLASS_SHIFT | \
 271	(instance) << I915_PMU_SAMPLE_BITS | \
 272	(sample))
 273
 274#define I915_PMU_ENGINE_BUSY(class, instance) \
 275	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
 276
 277#define I915_PMU_ENGINE_WAIT(class, instance) \
 278	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
 279
 280#define I915_PMU_ENGINE_SEMA(class, instance) \
 281	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
 282
 283/*
 284 * Top 4 bits of every non-engine counter are GT id.
 285 */
 286#define __I915_PMU_GT_SHIFT (60)
 287
 288#define ___I915_PMU_OTHER(gt, x) \
 289	(((__u64)__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) | \
 290	((__u64)(gt) << __I915_PMU_GT_SHIFT))
 291
 292#define __I915_PMU_OTHER(x) ___I915_PMU_OTHER(0, x)
 293
 294#define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
 295#define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
 296#define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
 297#define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
 298#define I915_PMU_SOFTWARE_GT_AWAKE_TIME	__I915_PMU_OTHER(4)
 299
 300#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
 301
 302#define __I915_PMU_ACTUAL_FREQUENCY(gt)		___I915_PMU_OTHER(gt, 0)
 303#define __I915_PMU_REQUESTED_FREQUENCY(gt)	___I915_PMU_OTHER(gt, 1)
 304#define __I915_PMU_INTERRUPTS(gt)		___I915_PMU_OTHER(gt, 2)
 305#define __I915_PMU_RC6_RESIDENCY(gt)		___I915_PMU_OTHER(gt, 3)
 306#define __I915_PMU_SOFTWARE_GT_AWAKE_TIME(gt)	___I915_PMU_OTHER(gt, 4)
 307
 308/* Each region is a minimum of 16k, and there are at most 255 of them.
 309 */
 310#define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
 311				 * of chars for next/prev indices */
 312#define I915_LOG_MIN_TEX_REGION_SIZE 14
 313
 314typedef struct _drm_i915_init {
 315	enum {
 316		I915_INIT_DMA = 0x01,
 317		I915_CLEANUP_DMA = 0x02,
 318		I915_RESUME_DMA = 0x03
 319	} func;
 320	unsigned int mmio_offset;
 321	int sarea_priv_offset;
 322	unsigned int ring_start;
 323	unsigned int ring_end;
 324	unsigned int ring_size;
 325	unsigned int front_offset;
 326	unsigned int back_offset;
 327	unsigned int depth_offset;
 328	unsigned int w;
 329	unsigned int h;
 330	unsigned int pitch;
 331	unsigned int pitch_bits;
 332	unsigned int back_pitch;
 333	unsigned int depth_pitch;
 334	unsigned int cpp;
 335	unsigned int chipset;
 336} drm_i915_init_t;
 337
 338typedef struct _drm_i915_sarea {
 339	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
 340	int last_upload;	/* last time texture was uploaded */
 341	int last_enqueue;	/* last time a buffer was enqueued */
 342	int last_dispatch;	/* age of the most recently dispatched buffer */
 343	int ctxOwner;		/* last context to upload state */
 344	int texAge;
 345	int pf_enabled;		/* is pageflipping allowed? */
 346	int pf_active;
 347	int pf_current_page;	/* which buffer is being displayed? */
 348	int perf_boxes;		/* performance boxes to be displayed */
 349	int width, height;      /* screen size in pixels */
 350
 351	drm_handle_t front_handle;
 352	int front_offset;
 353	int front_size;
 354
 355	drm_handle_t back_handle;
 356	int back_offset;
 357	int back_size;
 358
 359	drm_handle_t depth_handle;
 360	int depth_offset;
 361	int depth_size;
 362
 363	drm_handle_t tex_handle;
 364	int tex_offset;
 365	int tex_size;
 366	int log_tex_granularity;
 367	int pitch;
 368	int rotation;           /* 0, 90, 180 or 270 */
 369	int rotated_offset;
 370	int rotated_size;
 371	int rotated_pitch;
 372	int virtualX, virtualY;
 373
 374	unsigned int front_tiled;
 375	unsigned int back_tiled;
 376	unsigned int depth_tiled;
 377	unsigned int rotated_tiled;
 378	unsigned int rotated2_tiled;
 379
 380	int pipeA_x;
 381	int pipeA_y;
 382	int pipeA_w;
 383	int pipeA_h;
 384	int pipeB_x;
 385	int pipeB_y;
 386	int pipeB_w;
 387	int pipeB_h;
 388
 389	/* fill out some space for old userspace triple buffer */
 390	drm_handle_t unused_handle;
 391	__u32 unused1, unused2, unused3;
 392
 393	/* buffer object handles for static buffers. May change
 394	 * over the lifetime of the client.
 395	 */
 396	__u32 front_bo_handle;
 397	__u32 back_bo_handle;
 398	__u32 unused_bo_handle;
 399	__u32 depth_bo_handle;
 400
 401} drm_i915_sarea_t;
 402
 403/* due to userspace building against these headers we need some compat here */
 404#define planeA_x pipeA_x
 405#define planeA_y pipeA_y
 406#define planeA_w pipeA_w
 407#define planeA_h pipeA_h
 408#define planeB_x pipeB_x
 409#define planeB_y pipeB_y
 410#define planeB_w pipeB_w
 411#define planeB_h pipeB_h
 412
 413/* Flags for perf_boxes
 414 */
 415#define I915_BOX_RING_EMPTY    0x1
 416#define I915_BOX_FLIP          0x2
 417#define I915_BOX_WAIT          0x4
 418#define I915_BOX_TEXTURE_LOAD  0x8
 419#define I915_BOX_LOST_CONTEXT  0x10
 420
 421/*
 422 * i915 specific ioctls.
 423 *
 424 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
 425 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
 426 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
 427 */
 428#define DRM_I915_INIT		0x00
 429#define DRM_I915_FLUSH		0x01
 430#define DRM_I915_FLIP		0x02
 431#define DRM_I915_BATCHBUFFER	0x03
 432#define DRM_I915_IRQ_EMIT	0x04
 433#define DRM_I915_IRQ_WAIT	0x05
 434#define DRM_I915_GETPARAM	0x06
 435#define DRM_I915_SETPARAM	0x07
 436#define DRM_I915_ALLOC		0x08
 437#define DRM_I915_FREE		0x09
 438#define DRM_I915_INIT_HEAP	0x0a
 439#define DRM_I915_CMDBUFFER	0x0b
 440#define DRM_I915_DESTROY_HEAP	0x0c
 441#define DRM_I915_SET_VBLANK_PIPE	0x0d
 442#define DRM_I915_GET_VBLANK_PIPE	0x0e
 443#define DRM_I915_VBLANK_SWAP	0x0f
 444#define DRM_I915_HWS_ADDR	0x11
 445#define DRM_I915_GEM_INIT	0x13
 446#define DRM_I915_GEM_EXECBUFFER	0x14
 447#define DRM_I915_GEM_PIN	0x15
 448#define DRM_I915_GEM_UNPIN	0x16
 449#define DRM_I915_GEM_BUSY	0x17
 450#define DRM_I915_GEM_THROTTLE	0x18
 451#define DRM_I915_GEM_ENTERVT	0x19
 452#define DRM_I915_GEM_LEAVEVT	0x1a
 453#define DRM_I915_GEM_CREATE	0x1b
 454#define DRM_I915_GEM_PREAD	0x1c
 455#define DRM_I915_GEM_PWRITE	0x1d
 456#define DRM_I915_GEM_MMAP	0x1e
 457#define DRM_I915_GEM_SET_DOMAIN	0x1f
 458#define DRM_I915_GEM_SW_FINISH	0x20
 459#define DRM_I915_GEM_SET_TILING	0x21
 460#define DRM_I915_GEM_GET_TILING	0x22
 461#define DRM_I915_GEM_GET_APERTURE 0x23
 462#define DRM_I915_GEM_MMAP_GTT	0x24
 463#define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
 464#define DRM_I915_GEM_MADVISE	0x26
 465#define DRM_I915_OVERLAY_PUT_IMAGE	0x27
 466#define DRM_I915_OVERLAY_ATTRS	0x28
 467#define DRM_I915_GEM_EXECBUFFER2	0x29
 468#define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
 469#define DRM_I915_GET_SPRITE_COLORKEY	0x2a
 470#define DRM_I915_SET_SPRITE_COLORKEY	0x2b
 471#define DRM_I915_GEM_WAIT	0x2c
 472#define DRM_I915_GEM_CONTEXT_CREATE	0x2d
 473#define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
 474#define DRM_I915_GEM_SET_CACHING	0x2f
 475#define DRM_I915_GEM_GET_CACHING	0x30
 476#define DRM_I915_REG_READ		0x31
 477#define DRM_I915_GET_RESET_STATS	0x32
 478#define DRM_I915_GEM_USERPTR		0x33
 479#define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
 480#define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
 481#define DRM_I915_PERF_OPEN		0x36
 482#define DRM_I915_PERF_ADD_CONFIG	0x37
 483#define DRM_I915_PERF_REMOVE_CONFIG	0x38
 484#define DRM_I915_QUERY			0x39
 485#define DRM_I915_GEM_VM_CREATE		0x3a
 486#define DRM_I915_GEM_VM_DESTROY		0x3b
 487#define DRM_I915_GEM_CREATE_EXT		0x3c
 488/* Must be kept compact -- no holes */
 489
 490#define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 491#define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
 492#define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
 493#define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
 494#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
 495#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
 496#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
 497#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
 498#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
 499#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
 500#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
 501#define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
 502#define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
 503#define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 504#define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 505#define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
 506#define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
 507#define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
 508#define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
 509#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
 510#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
 511#define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
 512#define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
 513#define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
 514#define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
 515#define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
 516#define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
 517#define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
 518#define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
 519#define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
 520#define DRM_IOCTL_I915_GEM_CREATE_EXT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
 521#define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
 522#define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
 523#define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
 524#define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
 525#define DRM_IOCTL_I915_GEM_MMAP_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
 526#define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
 527#define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
 528#define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
 529#define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
 530#define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
 531#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
 532#define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
 533#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
 534#define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
 535#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
 536#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
 537#define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
 538#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
 539#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
 540#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
 541#define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
 542#define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
 543#define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
 544#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
 545#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
 546#define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
 547#define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
 548#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
 549#define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
 550#define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
 551#define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
 552
 553/* Allow drivers to submit batchbuffers directly to hardware, relying
 554 * on the security mechanisms provided by hardware.
 555 */
 556typedef struct drm_i915_batchbuffer {
 557	int start;		/* agp offset */
 558	int used;		/* nr bytes in use */
 559	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
 560	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
 561	int num_cliprects;	/* mulitpass with multiple cliprects? */
 562	struct drm_clip_rect *cliprects;	/* pointer to userspace cliprects */
 563} drm_i915_batchbuffer_t;
 564
 565/* As above, but pass a pointer to userspace buffer which can be
 566 * validated by the kernel prior to sending to hardware.
 567 */
 568typedef struct _drm_i915_cmdbuffer {
 569	char *buf;	/* pointer to userspace command buffer */
 570	int sz;			/* nr bytes in buf */
 571	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
 572	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
 573	int num_cliprects;	/* mulitpass with multiple cliprects? */
 574	struct drm_clip_rect *cliprects;	/* pointer to userspace cliprects */
 575} drm_i915_cmdbuffer_t;
 576
 577/* Userspace can request & wait on irq's:
 578 */
 579typedef struct drm_i915_irq_emit {
 580	int *irq_seq;
 581} drm_i915_irq_emit_t;
 582
 583typedef struct drm_i915_irq_wait {
 584	int irq_seq;
 585} drm_i915_irq_wait_t;
 586
 587/*
 588 * Different modes of per-process Graphics Translation Table,
 589 * see I915_PARAM_HAS_ALIASING_PPGTT
 590 */
 591#define I915_GEM_PPGTT_NONE	0
 592#define I915_GEM_PPGTT_ALIASING	1
 593#define I915_GEM_PPGTT_FULL	2
 594
 595/* Ioctl to query kernel params:
 596 */
 597#define I915_PARAM_IRQ_ACTIVE            1
 598#define I915_PARAM_ALLOW_BATCHBUFFER     2
 599#define I915_PARAM_LAST_DISPATCH         3
 600#define I915_PARAM_CHIPSET_ID            4
 601#define I915_PARAM_HAS_GEM               5
 602#define I915_PARAM_NUM_FENCES_AVAIL      6
 603#define I915_PARAM_HAS_OVERLAY           7
 604#define I915_PARAM_HAS_PAGEFLIPPING	 8
 605#define I915_PARAM_HAS_EXECBUF2          9
 606#define I915_PARAM_HAS_BSD		 10
 607#define I915_PARAM_HAS_BLT		 11
 608#define I915_PARAM_HAS_RELAXED_FENCING	 12
 609#define I915_PARAM_HAS_COHERENT_RINGS	 13
 610#define I915_PARAM_HAS_EXEC_CONSTANTS	 14
 611#define I915_PARAM_HAS_RELAXED_DELTA	 15
 612#define I915_PARAM_HAS_GEN7_SOL_RESET	 16
 613#define I915_PARAM_HAS_LLC     	 	 17
 614#define I915_PARAM_HAS_ALIASING_PPGTT	 18
 615#define I915_PARAM_HAS_WAIT_TIMEOUT	 19
 616#define I915_PARAM_HAS_SEMAPHORES	 20
 617#define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
 618#define I915_PARAM_HAS_VEBOX		 22
 619#define I915_PARAM_HAS_SECURE_BATCHES	 23
 620#define I915_PARAM_HAS_PINNED_BATCHES	 24
 621#define I915_PARAM_HAS_EXEC_NO_RELOC	 25
 622#define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
 623#define I915_PARAM_HAS_WT     	 	 27
 624#define I915_PARAM_CMD_PARSER_VERSION	 28
 625#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
 626#define I915_PARAM_MMAP_VERSION          30
 627#define I915_PARAM_HAS_BSD2		 31
 628#define I915_PARAM_REVISION              32
 629#define I915_PARAM_SUBSLICE_TOTAL	 33
 630#define I915_PARAM_EU_TOTAL		 34
 631#define I915_PARAM_HAS_GPU_RESET	 35
 632#define I915_PARAM_HAS_RESOURCE_STREAMER 36
 633#define I915_PARAM_HAS_EXEC_SOFTPIN	 37
 634#define I915_PARAM_HAS_POOLED_EU	 38
 635#define I915_PARAM_MIN_EU_IN_POOL	 39
 636#define I915_PARAM_MMAP_GTT_VERSION	 40
 637
 638/*
 639 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
 640 * priorities and the driver will attempt to execute batches in priority order.
 641 * The param returns a capability bitmask, nonzero implies that the scheduler
 642 * is enabled, with different features present according to the mask.
 643 *
 644 * The initial priority for each batch is supplied by the context and is
 645 * controlled via I915_CONTEXT_PARAM_PRIORITY.
 646 */
 647#define I915_PARAM_HAS_SCHEDULER	 41
 648#define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
 649#define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
 650#define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
 651#define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
 652#define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS	(1ul << 4)
 653/*
 654 * Indicates the 2k user priority levels are statically mapped into 3 buckets as
 655 * follows:
 656 *
 657 * -1k to -1	Low priority
 658 * 0		Normal priority
 659 * 1 to 1k	Highest priority
 660 */
 661#define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 5)
 662
 663/*
 664 * Query the status of HuC load.
 665 *
 666 * The query can fail in the following scenarios with the listed error codes:
 667 *  -ENODEV if HuC is not present on this platform,
 668 *  -EOPNOTSUPP if HuC firmware usage is disabled,
 669 *  -ENOPKG if HuC firmware fetch failed,
 670 *  -ENOEXEC if HuC firmware is invalid or mismatched,
 671 *  -ENOMEM if i915 failed to prepare the FW objects for transfer to the uC,
 672 *  -EIO if the FW transfer or the FW authentication failed.
 673 *
 674 * If the IOCTL is successful, the returned parameter will be set to one of the
 675 * following values:
 676 *  * 0 if HuC firmware load is not complete,
 677 *  * 1 if HuC firmware is loaded and fully authenticated,
 678 *  * 2 if HuC firmware is loaded and authenticated for clear media only
 679 */
 680#define I915_PARAM_HUC_STATUS		 42
 681
 682/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
 683 * synchronisation with implicit fencing on individual objects.
 684 * See EXEC_OBJECT_ASYNC.
 685 */
 686#define I915_PARAM_HAS_EXEC_ASYNC	 43
 687
 688/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
 689 * both being able to pass in a sync_file fd to wait upon before executing,
 690 * and being able to return a new sync_file fd that is signaled when the
 691 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
 692 */
 693#define I915_PARAM_HAS_EXEC_FENCE	 44
 694
 695/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
 696 * user-specified buffers for post-mortem debugging of GPU hangs. See
 697 * EXEC_OBJECT_CAPTURE.
 698 */
 699#define I915_PARAM_HAS_EXEC_CAPTURE	 45
 700
 701#define I915_PARAM_SLICE_MASK		 46
 702
 703/* Assuming it's uniform for each slice, this queries the mask of subslices
 704 * per-slice for this system.
 705 */
 706#define I915_PARAM_SUBSLICE_MASK	 47
 707
 708/*
 709 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
 710 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
 711 */
 712#define I915_PARAM_HAS_EXEC_BATCH_FIRST	 48
 713
 714/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
 715 * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
 716 */
 717#define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
 718
 719/*
 720 * Query whether every context (both per-file default and user created) is
 721 * isolated (insofar as HW supports). If this parameter is not true, then
 722 * freshly created contexts may inherit values from an existing context,
 723 * rather than default HW values. If true, it also ensures (insofar as HW
 724 * supports) that all state set by this context will not leak to any other
 725 * context.
 726 *
 727 * As not every engine across every gen support contexts, the returned
 728 * value reports the support of context isolation for individual engines by
 729 * returning a bitmask of each engine class set to true if that class supports
 730 * isolation.
 731 */
 732#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
 733
 734/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
 735 * registers. This used to be fixed per platform but from CNL onwards, this
 736 * might vary depending on the parts.
 737 */
 738#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
 739
 740/*
 741 * Once upon a time we supposed that writes through the GGTT would be
 742 * immediately in physical memory (once flushed out of the CPU path). However,
 743 * on a few different processors and chipsets, this is not necessarily the case
 744 * as the writes appear to be buffered internally. Thus a read of the backing
 745 * storage (physical memory) via a different path (with different physical tags
 746 * to the indirect write via the GGTT) will see stale values from before
 747 * the GGTT write. Inside the kernel, we can for the most part keep track of
 748 * the different read/write domains in use (e.g. set-domain), but the assumption
 749 * of coherency is baked into the ABI, hence reporting its true state in this
 750 * parameter.
 751 *
 752 * Reports true when writes via mmap_gtt are immediately visible following an
 753 * lfence to flush the WCB.
 754 *
 755 * Reports false when writes via mmap_gtt are indeterminately delayed in an in
 756 * internal buffer and are _not_ immediately visible to third parties accessing
 757 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
 758 * communications channel when reporting false is strongly disadvised.
 759 */
 760#define I915_PARAM_MMAP_GTT_COHERENT	52
 761
 762/*
 763 * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
 764 * execution through use of explicit fence support.
 765 * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
 766 */
 767#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
 768
 769/*
 770 * Revision of the i915-perf uAPI. The value returned helps determine what
 771 * i915-perf features are available. See drm_i915_perf_property_id.
 772 */
 773#define I915_PARAM_PERF_REVISION	54
 774
 775/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
 776 * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
 777 * I915_EXEC_USE_EXTENSIONS.
 778 */
 779#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
 780
 781/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
 782#define I915_PARAM_HAS_USERPTR_PROBE 56
 783
 784/*
 785 * Frequency of the timestamps in OA reports. This used to be the same as the CS
 786 * timestamp frequency, but differs on some platforms.
 787 */
 788#define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57
 789
 790/*
 791 * Query the status of PXP support in i915.
 792 *
 793 * The query can fail in the following scenarios with the listed error codes:
 794 *     -ENODEV = PXP support is not available on the GPU device or in the
 795 *               kernel due to missing component drivers or kernel configs.
 796 *
 797 * If the IOCTL is successful, the returned parameter will be set to one of
 798 * the following values:
 799 *     1 = PXP feature is supported and is ready for use.
 800 *     2 = PXP feature is supported but should be ready soon (pending
 801 *         initialization of non-i915 system dependencies).
 802 *
 803 * NOTE: When param is supported (positive return values), user space should
 804 *       still refer to the GEM PXP context-creation UAPI header specs to be
 805 *       aware of possible failure due to system state machine at the time.
 806 */
 807#define I915_PARAM_PXP_STATUS		 58
 808
 809/*
 810 * Query if kernel allows marking a context to send a Freq hint to SLPC. This
 811 * will enable use of the strategies allowed by the SLPC algorithm.
 812 */
 813#define I915_PARAM_HAS_CONTEXT_FREQ_HINT	59
 814
 815/* Must be kept compact -- no holes and well documented */
 816
 817/**
 818 * struct drm_i915_getparam - Driver parameter query structure.
 819 */
 820struct drm_i915_getparam {
 821	/** @param: Driver parameter to query. */
 822	__s32 param;
 823
 824	/**
 825	 * @value: Address of memory where queried value should be put.
 826	 *
 827	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
 828	 * compat32 code. Don't repeat this mistake.
 829	 */
 830	int *value;
 831};
 832
 833/**
 834 * typedef drm_i915_getparam_t - Driver parameter query structure.
 835 * See struct drm_i915_getparam.
 836 */
 837typedef struct drm_i915_getparam drm_i915_getparam_t;
 838
 839/* Ioctl to set kernel params:
 840 */
 841#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
 842#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
 843#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
 844#define I915_SETPARAM_NUM_USED_FENCES                     4
 845/* Must be kept compact -- no holes */
 846
 847typedef struct drm_i915_setparam {
 848	int param;
 849	int value;
 850} drm_i915_setparam_t;
 851
 852/* A memory manager for regions of shared memory:
 853 */
 854#define I915_MEM_REGION_AGP 1
 855
 856typedef struct drm_i915_mem_alloc {
 857	int region;
 858	int alignment;
 859	int size;
 860	int *region_offset;	/* offset from start of fb or agp */
 861} drm_i915_mem_alloc_t;
 862
 863typedef struct drm_i915_mem_free {
 864	int region;
 865	int region_offset;
 866} drm_i915_mem_free_t;
 867
 868typedef struct drm_i915_mem_init_heap {
 869	int region;
 870	int size;
 871	int start;
 872} drm_i915_mem_init_heap_t;
 873
 874/* Allow memory manager to be torn down and re-initialized (eg on
 875 * rotate):
 876 */
 877typedef struct drm_i915_mem_destroy_heap {
 878	int region;
 879} drm_i915_mem_destroy_heap_t;
 880
 881/* Allow X server to configure which pipes to monitor for vblank signals
 882 */
 883#define	DRM_I915_VBLANK_PIPE_A	1
 884#define	DRM_I915_VBLANK_PIPE_B	2
 885
 886typedef struct drm_i915_vblank_pipe {
 887	int pipe;
 888} drm_i915_vblank_pipe_t;
 889
 890/* Schedule buffer swap at given vertical blank:
 891 */
 892typedef struct drm_i915_vblank_swap {
 893	drm_drawable_t drawable;
 894	enum drm_vblank_seq_type seqtype;
 895	unsigned int sequence;
 896} drm_i915_vblank_swap_t;
 897
 898typedef struct drm_i915_hws_addr {
 899	__u64 addr;
 900} drm_i915_hws_addr_t;
 901
 902struct drm_i915_gem_init {
 903	/**
 904	 * Beginning offset in the GTT to be managed by the DRM memory
 905	 * manager.
 906	 */
 907	__u64 gtt_start;
 908	/**
 909	 * Ending offset in the GTT to be managed by the DRM memory
 910	 * manager.
 911	 */
 912	__u64 gtt_end;
 913};
 914
 915struct drm_i915_gem_create {
 916	/**
 917	 * Requested size for the object.
 918	 *
 919	 * The (page-aligned) allocated size for the object will be returned.
 920	 */
 921	__u64 size;
 922	/**
 923	 * Returned handle for the object.
 924	 *
 925	 * Object handles are nonzero.
 926	 */
 927	__u32 handle;
 928	__u32 pad;
 929};
 930
 931struct drm_i915_gem_pread {
 932	/** Handle for the object being read. */
 933	__u32 handle;
 934	__u32 pad;
 935	/** Offset into the object to read from */
 936	__u64 offset;
 937	/** Length of data to read */
 938	__u64 size;
 939	/**
 940	 * Pointer to write the data into.
 941	 *
 942	 * This is a fixed-size type for 32/64 compatibility.
 943	 */
 944	__u64 data_ptr;
 945};
 946
 947struct drm_i915_gem_pwrite {
 948	/** Handle for the object being written to. */
 949	__u32 handle;
 950	__u32 pad;
 951	/** Offset into the object to write to */
 952	__u64 offset;
 953	/** Length of data to write */
 954	__u64 size;
 955	/**
 956	 * Pointer to read the data from.
 957	 *
 958	 * This is a fixed-size type for 32/64 compatibility.
 959	 */
 960	__u64 data_ptr;
 961};
 962
 963struct drm_i915_gem_mmap {
 964	/** Handle for the object being mapped. */
 965	__u32 handle;
 966	__u32 pad;
 967	/** Offset in the object to map. */
 968	__u64 offset;
 969	/**
 970	 * Length of data to map.
 971	 *
 972	 * The value will be page-aligned.
 973	 */
 974	__u64 size;
 975	/**
 976	 * Returned pointer the data was mapped at.
 977	 *
 978	 * This is a fixed-size type for 32/64 compatibility.
 979	 */
 980	__u64 addr_ptr;
 981
 982	/**
 983	 * Flags for extended behaviour.
 984	 *
 985	 * Added in version 2.
 986	 */
 987	__u64 flags;
 988#define I915_MMAP_WC 0x1
 989};
 990
 991struct drm_i915_gem_mmap_gtt {
 992	/** Handle for the object being mapped. */
 993	__u32 handle;
 994	__u32 pad;
 995	/**
 996	 * Fake offset to use for subsequent mmap call
 997	 *
 998	 * This is a fixed-size type for 32/64 compatibility.
 999	 */
1000	__u64 offset;
1001};
1002
1003/**
1004 * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
1005 *
1006 * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
1007 * and is used to retrieve the fake offset to mmap an object specified by &handle.
1008 *
1009 * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
1010 * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
1011 * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
1012 */
1013struct drm_i915_gem_mmap_offset {
1014	/** @handle: Handle for the object being mapped. */
1015	__u32 handle;
1016	/** @pad: Must be zero */
1017	__u32 pad;
1018	/**
1019	 * @offset: The fake offset to use for subsequent mmap call
1020	 *
1021	 * This is a fixed-size type for 32/64 compatibility.
1022	 */
1023	__u64 offset;
1024
1025	/**
1026	 * @flags: Flags for extended behaviour.
1027	 *
1028	 * It is mandatory that one of the `MMAP_OFFSET` types
1029	 * should be included:
1030	 *
1031	 * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
1032	 * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
1033	 * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
1034	 * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
1035	 *
1036	 * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
1037	 * type. On devices without local memory, this caching mode is invalid.
1038	 *
1039	 * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
1040	 * be used, depending on the object placement on creation. WB will be used
1041	 * when the object can only exist in system memory, WC otherwise.
1042	 */
1043	__u64 flags;
1044
1045#define I915_MMAP_OFFSET_GTT	0
1046#define I915_MMAP_OFFSET_WC	1
1047#define I915_MMAP_OFFSET_WB	2
1048#define I915_MMAP_OFFSET_UC	3
1049#define I915_MMAP_OFFSET_FIXED	4
1050
1051	/**
1052	 * @extensions: Zero-terminated chain of extensions.
1053	 *
1054	 * No current extensions defined; mbz.
1055	 */
1056	__u64 extensions;
1057};
1058
1059/**
1060 * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
1061 * preparation for accessing the pages via some CPU domain.
1062 *
1063 * Specifying a new write or read domain will flush the object out of the
1064 * previous domain(if required), before then updating the objects domain
1065 * tracking with the new domain.
1066 *
1067 * Note this might involve waiting for the object first if it is still active on
1068 * the GPU.
1069 *
1070 * Supported values for @read_domains and @write_domain:
1071 *
1072 *	- I915_GEM_DOMAIN_WC: Uncached write-combined domain
1073 *	- I915_GEM_DOMAIN_CPU: CPU cache domain
1074 *	- I915_GEM_DOMAIN_GTT: Mappable aperture domain
1075 *
1076 * All other domains are rejected.
1077 *
1078 * Note that for discrete, starting from DG1, this is no longer supported, and
1079 * is instead rejected. On such platforms the CPU domain is effectively static,
1080 * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
1081 * which can't be set explicitly and instead depends on the object placements,
1082 * as per the below.
1083 *
1084 * Implicit caching rules, starting from DG1:
1085 *
1086 *	- If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
1087 *	  contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
1088 *	  mapped as write-combined only.
1089 *
1090 *	- Everything else is always allocated and mapped as write-back, with the
1091 *	  guarantee that everything is also coherent with the GPU.
1092 *
1093 * Note that this is likely to change in the future again, where we might need
1094 * more flexibility on future devices, so making this all explicit as part of a
1095 * new &drm_i915_gem_create_ext extension is probable.
1096 */
1097struct drm_i915_gem_set_domain {
1098	/** @handle: Handle for the object. */
1099	__u32 handle;
1100
1101	/** @read_domains: New read domains. */
1102	__u32 read_domains;
1103
1104	/**
1105	 * @write_domain: New write domain.
1106	 *
1107	 * Note that having something in the write domain implies it's in the
1108	 * read domain, and only that read domain.
1109	 */
1110	__u32 write_domain;
1111};
1112
1113struct drm_i915_gem_sw_finish {
1114	/** Handle for the object */
1115	__u32 handle;
1116};
1117
1118struct drm_i915_gem_relocation_entry {
1119	/**
1120	 * Handle of the buffer being pointed to by this relocation entry.
1121	 *
1122	 * It's appealing to make this be an index into the mm_validate_entry
1123	 * list to refer to the buffer, but this allows the driver to create
1124	 * a relocation list for state buffers and not re-write it per
1125	 * exec using the buffer.
1126	 */
1127	__u32 target_handle;
1128
1129	/**
1130	 * Value to be added to the offset of the target buffer to make up
1131	 * the relocation entry.
1132	 */
1133	__u32 delta;
1134
1135	/** Offset in the buffer the relocation entry will be written into */
1136	__u64 offset;
1137
1138	/**
1139	 * Offset value of the target buffer that the relocation entry was last
1140	 * written as.
1141	 *
1142	 * If the buffer has the same offset as last time, we can skip syncing
1143	 * and writing the relocation.  This value is written back out by
1144	 * the execbuffer ioctl when the relocation is written.
1145	 */
1146	__u64 presumed_offset;
1147
1148	/**
1149	 * Target memory domains read by this operation.
1150	 */
1151	__u32 read_domains;
1152
1153	/**
1154	 * Target memory domains written by this operation.
1155	 *
1156	 * Note that only one domain may be written by the whole
1157	 * execbuffer operation, so that where there are conflicts,
1158	 * the application will get -EINVAL back.
1159	 */
1160	__u32 write_domain;
1161};
1162
1163/** @{
1164 * Intel memory domains
1165 *
1166 * Most of these just align with the various caches in
1167 * the system and are used to flush and invalidate as
1168 * objects end up cached in different domains.
1169 */
1170/** CPU cache */
1171#define I915_GEM_DOMAIN_CPU		0x00000001
1172/** Render cache, used by 2D and 3D drawing */
1173#define I915_GEM_DOMAIN_RENDER		0x00000002
1174/** Sampler cache, used by texture engine */
1175#define I915_GEM_DOMAIN_SAMPLER		0x00000004
1176/** Command queue, used to load batch buffers */
1177#define I915_GEM_DOMAIN_COMMAND		0x00000008
1178/** Instruction cache, used by shader programs */
1179#define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
1180/** Vertex address cache */
1181#define I915_GEM_DOMAIN_VERTEX		0x00000020
1182/** GTT domain - aperture and scanout */
1183#define I915_GEM_DOMAIN_GTT		0x00000040
1184/** WC domain - uncached access */
1185#define I915_GEM_DOMAIN_WC		0x00000080
1186/** @} */
1187
1188struct drm_i915_gem_exec_object {
1189	/**
1190	 * User's handle for a buffer to be bound into the GTT for this
1191	 * operation.
1192	 */
1193	__u32 handle;
1194
1195	/** Number of relocations to be performed on this buffer */
1196	__u32 relocation_count;
1197	/**
1198	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
1199	 * the relocations to be performed in this buffer.
1200	 */
1201	__u64 relocs_ptr;
1202
1203	/** Required alignment in graphics aperture */
1204	__u64 alignment;
1205
1206	/**
1207	 * Returned value of the updated offset of the object, for future
1208	 * presumed_offset writes.
1209	 */
1210	__u64 offset;
1211};
1212
1213/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
1214struct drm_i915_gem_execbuffer {
1215	/**
1216	 * List of buffers to be validated with their relocations to be
1217	 * performend on them.
1218	 *
1219	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
1220	 *
1221	 * These buffers must be listed in an order such that all relocations
1222	 * a buffer is performing refer to buffers that have already appeared
1223	 * in the validate list.
1224	 */
1225	__u64 buffers_ptr;
1226	__u32 buffer_count;
1227
1228	/** Offset in the batchbuffer to start execution from. */
1229	__u32 batch_start_offset;
1230	/** Bytes used in batchbuffer from batch_start_offset */
1231	__u32 batch_len;
1232	__u32 DR1;
1233	__u32 DR4;
1234	__u32 num_cliprects;
1235	/** This is a struct drm_clip_rect *cliprects */
1236	__u64 cliprects_ptr;
1237};
1238
1239struct drm_i915_gem_exec_object2 {
1240	/**
1241	 * User's handle for a buffer to be bound into the GTT for this
1242	 * operation.
1243	 */
1244	__u32 handle;
1245
1246	/** Number of relocations to be performed on this buffer */
1247	__u32 relocation_count;
1248	/**
1249	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
1250	 * the relocations to be performed in this buffer.
1251	 */
1252	__u64 relocs_ptr;
1253
1254	/** Required alignment in graphics aperture */
1255	__u64 alignment;
1256
1257	/**
1258	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
1259	 * the user with the GTT offset at which this object will be pinned.
1260	 *
1261	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
1262	 * presumed_offset of the object.
1263	 *
1264	 * During execbuffer2 the kernel populates it with the value of the
1265	 * current GTT offset of the object, for future presumed_offset writes.
1266	 *
1267	 * See struct drm_i915_gem_create_ext for the rules when dealing with
1268	 * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with
1269	 * minimum page sizes, like DG2.
1270	 */
1271	__u64 offset;
1272
1273#define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
1274#define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
1275#define EXEC_OBJECT_WRITE		 (1<<2)
1276#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
1277#define EXEC_OBJECT_PINNED		 (1<<4)
1278#define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
1279/* The kernel implicitly tracks GPU activity on all GEM objects, and
1280 * synchronises operations with outstanding rendering. This includes
1281 * rendering on other devices if exported via dma-buf. However, sometimes
1282 * this tracking is too coarse and the user knows better. For example,
1283 * if the object is split into non-overlapping ranges shared between different
1284 * clients or engines (i.e. suballocating objects), the implicit tracking
1285 * by kernel assumes that each operation affects the whole object rather
1286 * than an individual range, causing needless synchronisation between clients.
1287 * The kernel will also forgo any CPU cache flushes prior to rendering from
1288 * the object as the client is expected to be also handling such domain
1289 * tracking.
1290 *
1291 * The kernel maintains the implicit tracking in order to manage resources
1292 * used by the GPU - this flag only disables the synchronisation prior to
1293 * rendering with this object in this execbuf.
1294 *
1295 * Opting out of implicit synhronisation requires the user to do its own
1296 * explicit tracking to avoid rendering corruption. See, for example,
1297 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
1298 */
1299#define EXEC_OBJECT_ASYNC		(1<<6)
1300/* Request that the contents of this execobject be copied into the error
1301 * state upon a GPU hang involving this batch for post-mortem debugging.
1302 * These buffers are recorded in no particular order as "user" in
1303 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
1304 * if the kernel supports this flag.
1305 */
1306#define EXEC_OBJECT_CAPTURE		(1<<7)
1307/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
1308#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
1309	__u64 flags;
1310
1311	union {
1312		__u64 rsvd1;
1313		__u64 pad_to_size;
1314	};
1315	__u64 rsvd2;
1316};
1317
1318/**
1319 * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf
1320 * ioctl.
1321 *
1322 * The request will wait for input fence to signal before submission.
1323 *
1324 * The returned output fence will be signaled after the completion of the
1325 * request.
1326 */
1327struct drm_i915_gem_exec_fence {
1328	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
1329	__u32 handle;
1330
1331	/**
1332	 * @flags: Supported flags are:
1333	 *
1334	 * I915_EXEC_FENCE_WAIT:
1335	 * Wait for the input fence before request submission.
1336	 *
1337	 * I915_EXEC_FENCE_SIGNAL:
1338	 * Return request completion fence as output
1339	 */
1340	__u32 flags;
1341#define I915_EXEC_FENCE_WAIT            (1<<0)
1342#define I915_EXEC_FENCE_SIGNAL          (1<<1)
1343#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
1344};
1345
1346/**
1347 * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences
1348 * for execbuf ioctl.
1349 *
1350 * This structure describes an array of drm_syncobj and associated points for
1351 * timeline variants of drm_syncobj. It is invalid to append this structure to
1352 * the execbuf if I915_EXEC_FENCE_ARRAY is set.
1353 */
1354struct drm_i915_gem_execbuffer_ext_timeline_fences {
1355#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
1356	/** @base: Extension link. See struct i915_user_extension. */
1357	struct i915_user_extension base;
1358
1359	/**
1360	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
1361	 * arrays.
1362	 */
1363	__u64 fence_count;
1364
1365	/**
1366	 * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence
1367	 * of length @fence_count.
1368	 */
1369	__u64 handles_ptr;
1370
1371	/**
1372	 * @values_ptr: Pointer to an array of u64 values of length
1373	 * @fence_count.
1374	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
1375	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
1376	 * binary one.
1377	 */
1378	__u64 values_ptr;
1379};
1380
1381/**
1382 * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2
1383 * ioctl.
1384 */
1385struct drm_i915_gem_execbuffer2 {
1386	/** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */
1387	__u64 buffers_ptr;
1388
1389	/** @buffer_count: Number of elements in @buffers_ptr array */
1390	__u32 buffer_count;
1391
1392	/**
1393	 * @batch_start_offset: Offset in the batchbuffer to start execution
1394	 * from.
1395	 */
1396	__u32 batch_start_offset;
1397
1398	/**
1399	 * @batch_len: Length in bytes of the batch buffer, starting from the
1400	 * @batch_start_offset. If 0, length is assumed to be the batch buffer
1401	 * object size.
1402	 */
1403	__u32 batch_len;
1404
1405	/** @DR1: deprecated */
1406	__u32 DR1;
1407
1408	/** @DR4: deprecated */
1409	__u32 DR4;
1410
1411	/** @num_cliprects: See @cliprects_ptr */
1412	__u32 num_cliprects;
1413
1414	/**
1415	 * @cliprects_ptr: Kernel clipping was a DRI1 misfeature.
1416	 *
1417	 * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or
1418	 * I915_EXEC_USE_EXTENSIONS flags are not set.
1419	 *
1420	 * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
1421	 * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the
1422	 * array.
1423	 *
1424	 * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
1425	 * single &i915_user_extension and num_cliprects is 0.
1426	 */
1427	__u64 cliprects_ptr;
1428
1429	/** @flags: Execbuf flags */
1430	__u64 flags;
1431#define I915_EXEC_RING_MASK              (0x3f)
1432#define I915_EXEC_DEFAULT                (0<<0)
1433#define I915_EXEC_RENDER                 (1<<0)
1434#define I915_EXEC_BSD                    (2<<0)
1435#define I915_EXEC_BLT                    (3<<0)
1436#define I915_EXEC_VEBOX                  (4<<0)
1437
1438/* Used for switching the constants addressing mode on gen4+ RENDER ring.
1439 * Gen6+ only supports relative addressing to dynamic state (default) and
1440 * absolute addressing.
1441 *
1442 * These flags are ignored for the BSD and BLT rings.
1443 */
1444#define I915_EXEC_CONSTANTS_MASK 	(3<<6)
1445#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1446#define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
1447#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
1448
1449/** Resets the SO write offset registers for transform feedback on gen7. */
1450#define I915_EXEC_GEN7_SOL_RESET	(1<<8)
1451
1452/** Request a privileged ("secure") batch buffer. Note only available for
1453 * DRM_ROOT_ONLY | DRM_MASTER processes.
1454 */
1455#define I915_EXEC_SECURE		(1<<9)
1456
1457/** Inform the kernel that the batch is and will always be pinned. This
1458 * negates the requirement for a workaround to be performed to avoid
1459 * an incoherent CS (such as can be found on 830/845). If this flag is
1460 * not passed, the kernel will endeavour to make sure the batch is
1461 * coherent with the CS before execution. If this flag is passed,
1462 * userspace assumes the responsibility for ensuring the same.
1463 */
1464#define I915_EXEC_IS_PINNED		(1<<10)
1465
1466/** Provide a hint to the kernel that the command stream and auxiliary
1467 * state buffers already holds the correct presumed addresses and so the
1468 * relocation process may be skipped if no buffers need to be moved in
1469 * preparation for the execbuffer.
1470 */
1471#define I915_EXEC_NO_RELOC		(1<<11)
1472
1473/** Use the reloc.handle as an index into the exec object array rather
1474 * than as the per-file handle.
1475 */
1476#define I915_EXEC_HANDLE_LUT		(1<<12)
1477
1478/** Used for switching BSD rings on the platforms with two BSD rings */
1479#define I915_EXEC_BSD_SHIFT	 (13)
1480#define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
1481/* default ping-pong mode */
1482#define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
1483#define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
1484#define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
1485
1486/** Tell the kernel that the batchbuffer is processed by
1487 *  the resource streamer.
1488 */
1489#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
1490
1491/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1492 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1493 * the batch.
1494 *
1495 * Returns -EINVAL if the sync_file fd cannot be found.
1496 */
1497#define I915_EXEC_FENCE_IN		(1<<16)
1498
1499/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1500 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1501 * to the caller, and it should be close() after use. (The fd is a regular
1502 * file descriptor and will be cleaned up on process termination. It holds
1503 * a reference to the request, but nothing else.)
1504 *
1505 * The sync_file fd can be combined with other sync_file and passed either
1506 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1507 * will only occur after this request completes), or to other devices.
1508 *
1509 * Using I915_EXEC_FENCE_OUT requires use of
1510 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1511 * back to userspace. Failure to do so will cause the out-fence to always
1512 * be reported as zero, and the real fence fd to be leaked.
1513 */
1514#define I915_EXEC_FENCE_OUT		(1<<17)
1515
1516/*
1517 * Traditionally the execbuf ioctl has only considered the final element in
1518 * the execobject[] to be the executable batch. Often though, the client
1519 * will known the batch object prior to construction and being able to place
1520 * it into the execobject[] array first can simplify the relocation tracking.
1521 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1522 * execobject[] as the * batch instead (the default is to use the last
1523 * element).
1524 */
1525#define I915_EXEC_BATCH_FIRST		(1<<18)
1526
1527/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1528 * define an array of i915_gem_exec_fence structures which specify a set of
1529 * dma fences to wait upon or signal.
1530 */
1531#define I915_EXEC_FENCE_ARRAY   (1<<19)
1532
1533/*
1534 * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
1535 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1536 * the batch.
1537 *
1538 * Returns -EINVAL if the sync_file fd cannot be found.
1539 */
1540#define I915_EXEC_FENCE_SUBMIT		(1 << 20)
1541
1542/*
1543 * Setting I915_EXEC_USE_EXTENSIONS implies that
1544 * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
1545 * list of i915_user_extension. Each i915_user_extension node is the base of a
1546 * larger structure. The list of supported structures are listed in the
1547 * drm_i915_gem_execbuffer_ext enum.
1548 */
1549#define I915_EXEC_USE_EXTENSIONS	(1 << 21)
1550#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
1551
1552	/** @rsvd1: Context id */
1553	__u64 rsvd1;
1554
1555	/**
1556	 * @rsvd2: in and out sync_file file descriptors.
1557	 *
1558	 * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the
1559	 * lower 32 bits of this field will have the in sync_file fd (input).
1560	 *
1561	 * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this
1562	 * field will have the out sync_file fd (output).
1563	 */
1564	__u64 rsvd2;
1565};
1566
1567#define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
1568#define i915_execbuffer2_set_context_id(eb2, context) \
1569	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1570#define i915_execbuffer2_get_context_id(eb2) \
1571	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1572
1573struct drm_i915_gem_pin {
1574	/** Handle of the buffer to be pinned. */
1575	__u32 handle;
1576	__u32 pad;
1577
1578	/** alignment required within the aperture */
1579	__u64 alignment;
1580
1581	/** Returned GTT offset of the buffer. */
1582	__u64 offset;
1583};
1584
1585struct drm_i915_gem_unpin {
1586	/** Handle of the buffer to be unpinned. */
1587	__u32 handle;
1588	__u32 pad;
1589};
1590
1591struct drm_i915_gem_busy {
1592	/** Handle of the buffer to check for busy */
1593	__u32 handle;
1594
1595	/** Return busy status
1596	 *
1597	 * A return of 0 implies that the object is idle (after
1598	 * having flushed any pending activity), and a non-zero return that
1599	 * the object is still in-flight on the GPU. (The GPU has not yet
1600	 * signaled completion for all pending requests that reference the
1601	 * object.) An object is guaranteed to become idle eventually (so
1602	 * long as no new GPU commands are executed upon it). Due to the
1603	 * asynchronous nature of the hardware, an object reported
1604	 * as busy may become idle before the ioctl is completed.
1605	 *
1606	 * Furthermore, if the object is busy, which engine is busy is only
1607	 * provided as a guide and only indirectly by reporting its class
1608	 * (there may be more than one engine in each class). There are race
1609	 * conditions which prevent the report of which engines are busy from
1610	 * being always accurate.  However, the converse is not true. If the
1611	 * object is idle, the result of the ioctl, that all engines are idle,
1612	 * is accurate.
1613	 *
1614	 * The returned dword is split into two fields to indicate both
1615	 * the engine classes on which the object is being read, and the
1616	 * engine class on which it is currently being written (if any).
1617	 *
1618	 * The low word (bits 0:15) indicate if the object is being written
1619	 * to by any engine (there can only be one, as the GEM implicit
1620	 * synchronisation rules force writes to be serialised). Only the
1621	 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1622	 * 1 not 0 etc) for the last write is reported.
1623	 *
1624	 * The high word (bits 16:31) are a bitmask of which engines classes
1625	 * are currently reading from the object. Multiple engines may be
1626	 * reading from the object simultaneously.
1627	 *
1628	 * The value of each engine class is the same as specified in the
1629	 * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
1630	 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1631	 * Some hardware may have parallel execution engines, e.g. multiple
1632	 * media engines, which are mapped to the same class identifier and so
1633	 * are not separately reported for busyness.
1634	 *
1635	 * Caveat emptor:
1636	 * Only the boolean result of this query is reliable; that is whether
1637	 * the object is idle or busy. The report of which engines are busy
1638	 * should be only used as a heuristic.
1639	 */
1640	__u32 busy;
1641};
1642
1643/**
1644 * struct drm_i915_gem_caching - Set or get the caching for given object
1645 * handle.
1646 *
1647 * Allow userspace to control the GTT caching bits for a given object when the
1648 * object is later mapped through the ppGTT(or GGTT on older platforms lacking
1649 * ppGTT support, or if the object is used for scanout). Note that this might
1650 * require unbinding the object from the GTT first, if its current caching value
1651 * doesn't match.
1652 *
1653 * Note that this all changes on discrete platforms, starting from DG1, the
1654 * set/get caching is no longer supported, and is now rejected.  Instead the CPU
1655 * caching attributes(WB vs WC) will become an immutable creation time property
1656 * for the object, along with the GTT caching level. For now we don't expose any
1657 * new uAPI for this, instead on DG1 this is all implicit, although this largely
1658 * shouldn't matter since DG1 is coherent by default(without any way of
1659 * controlling it).
1660 *
1661 * Implicit caching rules, starting from DG1:
1662 *
1663 *     - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
1664 *       contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
1665 *       mapped as write-combined only.
1666 *
1667 *     - Everything else is always allocated and mapped as write-back, with the
1668 *       guarantee that everything is also coherent with the GPU.
1669 *
1670 * Note that this is likely to change in the future again, where we might need
1671 * more flexibility on future devices, so making this all explicit as part of a
1672 * new &drm_i915_gem_create_ext extension is probable.
1673 *
1674 * Side note: Part of the reason for this is that changing the at-allocation-time CPU
1675 * caching attributes for the pages might be required(and is expensive) if we
1676 * need to then CPU map the pages later with different caching attributes. This
1677 * inconsistent caching behaviour, while supported on x86, is not universally
1678 * supported on other architectures. So for simplicity we opt for setting
1679 * everything at creation time, whilst also making it immutable, on discrete
1680 * platforms.
1681 */
1682struct drm_i915_gem_caching {
1683	/**
1684	 * @handle: Handle of the buffer to set/get the caching level.
1685	 */
1686	__u32 handle;
1687
1688	/**
1689	 * @caching: The GTT caching level to apply or possible return value.
1690	 *
1691	 * The supported @caching values:
1692	 *
1693	 * I915_CACHING_NONE:
1694	 *
1695	 * GPU access is not coherent with CPU caches.  Default for machines
1696	 * without an LLC. This means manual flushing might be needed, if we
1697	 * want GPU access to be coherent.
1698	 *
1699	 * I915_CACHING_CACHED:
1700	 *
1701	 * GPU access is coherent with CPU caches and furthermore the data is
1702	 * cached in last-level caches shared between CPU cores and the GPU GT.
1703	 *
1704	 * I915_CACHING_DISPLAY:
1705	 *
1706	 * Special GPU caching mode which is coherent with the scanout engines.
1707	 * Transparently falls back to I915_CACHING_NONE on platforms where no
1708	 * special cache mode (like write-through or gfdt flushing) is
1709	 * available. The kernel automatically sets this mode when using a
1710	 * buffer as a scanout target.  Userspace can manually set this mode to
1711	 * avoid a costly stall and clflush in the hotpath of drawing the first
1712	 * frame.
1713	 */
1714#define I915_CACHING_NONE		0
1715#define I915_CACHING_CACHED		1
1716#define I915_CACHING_DISPLAY		2
1717	__u32 caching;
1718};
1719
1720#define I915_TILING_NONE	0
1721#define I915_TILING_X		1
1722#define I915_TILING_Y		2
1723/*
1724 * Do not add new tiling types here.  The I915_TILING_* values are for
1725 * de-tiling fence registers that no longer exist on modern platforms.  Although
1726 * the hardware may support new types of tiling in general (e.g., Tile4), we
1727 * do not need to add them to the uapi that is specific to now-defunct ioctls.
1728 */
1729#define I915_TILING_LAST	I915_TILING_Y
1730
1731#define I915_BIT_6_SWIZZLE_NONE		0
1732#define I915_BIT_6_SWIZZLE_9		1
1733#define I915_BIT_6_SWIZZLE_9_10		2
1734#define I915_BIT_6_SWIZZLE_9_11		3
1735#define I915_BIT_6_SWIZZLE_9_10_11	4
1736/* Not seen by userland */
1737#define I915_BIT_6_SWIZZLE_UNKNOWN	5
1738/* Seen by userland. */
1739#define I915_BIT_6_SWIZZLE_9_17		6
1740#define I915_BIT_6_SWIZZLE_9_10_17	7
1741
1742struct drm_i915_gem_set_tiling {
1743	/** Handle of the buffer to have its tiling state updated */
1744	__u32 handle;
1745
1746	/**
1747	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1748	 * I915_TILING_Y).
1749	 *
1750	 * This value is to be set on request, and will be updated by the
1751	 * kernel on successful return with the actual chosen tiling layout.
1752	 *
1753	 * The tiling mode may be demoted to I915_TILING_NONE when the system
1754	 * has bit 6 swizzling that can't be managed correctly by GEM.
1755	 *
1756	 * Buffer contents become undefined when changing tiling_mode.
1757	 */
1758	__u32 tiling_mode;
1759
1760	/**
1761	 * Stride in bytes for the object when in I915_TILING_X or
1762	 * I915_TILING_Y.
1763	 */
1764	__u32 stride;
1765
1766	/**
1767	 * Returned address bit 6 swizzling required for CPU access through
1768	 * mmap mapping.
1769	 */
1770	__u32 swizzle_mode;
1771};
1772
1773struct drm_i915_gem_get_tiling {
1774	/** Handle of the buffer to get tiling state for. */
1775	__u32 handle;
1776
1777	/**
1778	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1779	 * I915_TILING_Y).
1780	 */
1781	__u32 tiling_mode;
1782
1783	/**
1784	 * Returned address bit 6 swizzling required for CPU access through
1785	 * mmap mapping.
1786	 */
1787	__u32 swizzle_mode;
1788
1789	/**
1790	 * Returned address bit 6 swizzling required for CPU access through
1791	 * mmap mapping whilst bound.
1792	 */
1793	__u32 phys_swizzle_mode;
1794};
1795
1796struct drm_i915_gem_get_aperture {
1797	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1798	__u64 aper_size;
1799
1800	/**
1801	 * Available space in the aperture used by i915_gem_execbuffer, in
1802	 * bytes
1803	 */
1804	__u64 aper_available_size;
1805};
1806
1807struct drm_i915_get_pipe_from_crtc_id {
1808	/** ID of CRTC being requested **/
1809	__u32 crtc_id;
1810
1811	/** pipe of requested CRTC **/
1812	__u32 pipe;
1813};
1814
1815#define I915_MADV_WILLNEED 0
1816#define I915_MADV_DONTNEED 1
1817#define __I915_MADV_PURGED 2 /* internal state */
1818
1819struct drm_i915_gem_madvise {
1820	/** Handle of the buffer to change the backing store advice */
1821	__u32 handle;
1822
1823	/* Advice: either the buffer will be needed again in the near future,
1824	 *         or won't be and could be discarded under memory pressure.
1825	 */
1826	__u32 madv;
1827
1828	/** Whether the backing store still exists. */
1829	__u32 retained;
1830};
1831
1832/* flags */
1833#define I915_OVERLAY_TYPE_MASK 		0xff
1834#define I915_OVERLAY_YUV_PLANAR 	0x01
1835#define I915_OVERLAY_YUV_PACKED 	0x02
1836#define I915_OVERLAY_RGB		0x03
1837
1838#define I915_OVERLAY_DEPTH_MASK		0xff00
1839#define I915_OVERLAY_RGB24		0x1000
1840#define I915_OVERLAY_RGB16		0x2000
1841#define I915_OVERLAY_RGB15		0x3000
1842#define I915_OVERLAY_YUV422		0x0100
1843#define I915_OVERLAY_YUV411		0x0200
1844#define I915_OVERLAY_YUV420		0x0300
1845#define I915_OVERLAY_YUV410		0x0400
1846
1847#define I915_OVERLAY_SWAP_MASK		0xff0000
1848#define I915_OVERLAY_NO_SWAP		0x000000
1849#define I915_OVERLAY_UV_SWAP		0x010000
1850#define I915_OVERLAY_Y_SWAP		0x020000
1851#define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
1852
1853#define I915_OVERLAY_FLAGS_MASK		0xff000000
1854#define I915_OVERLAY_ENABLE		0x01000000
1855
1856struct drm_intel_overlay_put_image {
1857	/* various flags and src format description */
1858	__u32 flags;
1859	/* source picture description */
1860	__u32 bo_handle;
1861	/* stride values and offsets are in bytes, buffer relative */
1862	__u16 stride_Y; /* stride for packed formats */
1863	__u16 stride_UV;
1864	__u32 offset_Y; /* offset for packet formats */
1865	__u32 offset_U;
1866	__u32 offset_V;
1867	/* in pixels */
1868	__u16 src_width;
1869	__u16 src_height;
1870	/* to compensate the scaling factors for partially covered surfaces */
1871	__u16 src_scan_width;
1872	__u16 src_scan_height;
1873	/* output crtc description */
1874	__u32 crtc_id;
1875	__u16 dst_x;
1876	__u16 dst_y;
1877	__u16 dst_width;
1878	__u16 dst_height;
1879};
1880
1881/* flags */
1882#define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
1883#define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
1884#define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
1885struct drm_intel_overlay_attrs {
1886	__u32 flags;
1887	__u32 color_key;
1888	__s32 brightness;
1889	__u32 contrast;
1890	__u32 saturation;
1891	__u32 gamma0;
1892	__u32 gamma1;
1893	__u32 gamma2;
1894	__u32 gamma3;
1895	__u32 gamma4;
1896	__u32 gamma5;
1897};
1898
1899/*
1900 * Intel sprite handling
1901 *
1902 * Color keying works with a min/mask/max tuple.  Both source and destination
1903 * color keying is allowed.
1904 *
1905 * Source keying:
1906 * Sprite pixels within the min & max values, masked against the color channels
1907 * specified in the mask field, will be transparent.  All other pixels will
1908 * be displayed on top of the primary plane.  For RGB surfaces, only the min
1909 * and mask fields will be used; ranged compares are not allowed.
1910 *
1911 * Destination keying:
1912 * Primary plane pixels that match the min value, masked against the color
1913 * channels specified in the mask field, will be replaced by corresponding
1914 * pixels from the sprite plane.
1915 *
1916 * Note that source & destination keying are exclusive; only one can be
1917 * active on a given plane.
1918 */
1919
1920#define I915_SET_COLORKEY_NONE		(1<<0) /* Deprecated. Instead set
1921						* flags==0 to disable colorkeying.
1922						*/
1923#define I915_SET_COLORKEY_DESTINATION	(1<<1)
1924#define I915_SET_COLORKEY_SOURCE	(1<<2)
1925struct drm_intel_sprite_colorkey {
1926	__u32 plane_id;
1927	__u32 min_value;
1928	__u32 channel_mask;
1929	__u32 max_value;
1930	__u32 flags;
1931};
1932
1933struct drm_i915_gem_wait {
1934	/** Handle of BO we shall wait on */
1935	__u32 bo_handle;
1936	__u32 flags;
1937	/** Number of nanoseconds to wait, Returns time remaining. */
1938	__s64 timeout_ns;
1939};
1940
1941struct drm_i915_gem_context_create {
1942	__u32 ctx_id; /* output: id of new context*/
1943	__u32 pad;
1944};
1945
1946/**
1947 * struct drm_i915_gem_context_create_ext - Structure for creating contexts.
1948 */
1949struct drm_i915_gem_context_create_ext {
1950	/** @ctx_id: Id of the created context (output) */
1951	__u32 ctx_id;
1952
1953	/**
1954	 * @flags: Supported flags are:
1955	 *
1956	 * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS:
1957	 *
1958	 * Extensions may be appended to this structure and driver must check
1959	 * for those. See @extensions.
1960	 *
1961	 * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE
1962	 *
1963	 * Created context will have single timeline.
1964	 */
1965	__u32 flags;
1966#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
1967#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE	(1u << 1)
1968#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
1969	(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
1970
1971	/**
1972	 * @extensions: Zero-terminated chain of extensions.
1973	 *
1974	 * I915_CONTEXT_CREATE_EXT_SETPARAM:
1975	 * Context parameter to set or query during context creation.
1976	 * See struct drm_i915_gem_context_create_ext_setparam.
1977	 *
1978	 * I915_CONTEXT_CREATE_EXT_CLONE:
1979	 * This extension has been removed. On the off chance someone somewhere
1980	 * has attempted to use it, never re-use this extension number.
1981	 */
1982	__u64 extensions;
1983#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
1984#define I915_CONTEXT_CREATE_EXT_CLONE 1
1985};
1986
1987/**
1988 * struct drm_i915_gem_context_param - Context parameter to set or query.
1989 */
1990struct drm_i915_gem_context_param {
1991	/** @ctx_id: Context id */
1992	__u32 ctx_id;
1993
1994	/** @size: Size of the parameter @value */
1995	__u32 size;
1996
1997	/** @param: Parameter to set or query */
1998	__u64 param;
1999#define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
2000/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed.  On the off chance
2001 * someone somewhere has attempted to use it, never re-use this context
2002 * param number.
2003 */
2004#define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
2005#define I915_CONTEXT_PARAM_GTT_SIZE	0x3
2006#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
2007#define I915_CONTEXT_PARAM_BANNABLE	0x5
2008#define I915_CONTEXT_PARAM_PRIORITY	0x6
2009#define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
2010#define   I915_CONTEXT_DEFAULT_PRIORITY		0
2011#define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
2012	/*
2013	 * When using the following param, value should be a pointer to
2014	 * drm_i915_gem_context_param_sseu.
2015	 */
2016#define I915_CONTEXT_PARAM_SSEU		0x7
2017
2018/*
2019 * Not all clients may want to attempt automatic recover of a context after
2020 * a hang (for example, some clients may only submit very small incremental
2021 * batches relying on known logical state of previous batches which will never
2022 * recover correctly and each attempt will hang), and so would prefer that
2023 * the context is forever banned instead.
2024 *
2025 * If set to false (0), after a reset, subsequent (and in flight) rendering
2026 * from this context is discarded, and the client will need to create a new
2027 * context to use instead.
2028 *
2029 * If set to true (1), the kernel will automatically attempt to recover the
2030 * context by skipping the hanging batch and executing the next batch starting
2031 * from the default context state (discarding the incomplete logical context
2032 * state lost due to the reset).
2033 *
2034 * On creation, all new contexts are marked as recoverable.
2035 */
2036#define I915_CONTEXT_PARAM_RECOVERABLE	0x8
2037
2038	/*
2039	 * The id of the associated virtual memory address space (ppGTT) of
2040	 * this context. Can be retrieved and passed to another context
2041	 * (on the same fd) for both to use the same ppGTT and so share
2042	 * address layouts, and avoid reloading the page tables on context
2043	 * switches between themselves.
2044	 *
2045	 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
2046	 */
2047#define I915_CONTEXT_PARAM_VM		0x9
2048
2049/*
2050 * I915_CONTEXT_PARAM_ENGINES:
2051 *
2052 * Bind this context to operate on this subset of available engines. Henceforth,
2053 * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
2054 * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
2055 * and upwards. Slots 0...N are filled in using the specified (class, instance).
2056 * Use
2057 *	engine_class: I915_ENGINE_CLASS_INVALID,
2058 *	engine_instance: I915_ENGINE_CLASS_INVALID_NONE
2059 * to specify a gap in the array that can be filled in later, e.g. by a
2060 * virtual engine used for load balancing.
2061 *
2062 * Setting the number of engines bound to the context to 0, by passing a zero
2063 * sized argument, will revert back to default settings.
2064 *
2065 * See struct i915_context_param_engines.
2066 *
2067 * Extensions:
2068 *   i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
2069 *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
2070 *   i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
2071 */
2072#define I915_CONTEXT_PARAM_ENGINES	0xa
2073
2074/*
2075 * I915_CONTEXT_PARAM_PERSISTENCE:
2076 *
2077 * Allow the context and active rendering to survive the process until
2078 * completion. Persistence allows fire-and-forget clients to queue up a
2079 * bunch of work, hand the output over to a display server and then quit.
2080 * If the context is marked as not persistent, upon closing (either via
2081 * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
2082 * or process termination), the context and any outstanding requests will be
2083 * cancelled (and exported fences for cancelled requests marked as -EIO).
2084 *
2085 * By default, new contexts allow persistence.
2086 */
2087#define I915_CONTEXT_PARAM_PERSISTENCE	0xb
2088
2089/* This API has been removed.  On the off chance someone somewhere has
2090 * attempted to use it, never re-use this context param number.
2091 */
2092#define I915_CONTEXT_PARAM_RINGSIZE	0xc
2093
2094/*
2095 * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2096 *
2097 * Mark that the context makes use of protected content, which will result
2098 * in the context being invalidated when the protected content session is.
2099 * Given that the protected content session is killed on suspend, the device
2100 * is kept awake for the lifetime of a protected context, so the user should
2101 * make sure to dispose of them once done.
2102 * This flag can only be set at context creation time and, when set to true,
2103 * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
2104 * to false. This flag can't be set to true in conjunction with setting the
2105 * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
2106 *
2107 * .. code-block:: C
2108 *
2109 *	struct drm_i915_gem_context_create_ext_setparam p_protected = {
2110 *		.base = {
2111 *			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2112 *		},
2113 *		.param = {
2114 *			.param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
2115 *			.value = 1,
2116 *		}
2117 *	};
2118 *	struct drm_i915_gem_context_create_ext_setparam p_norecover = {
2119 *		.base = {
2120 *			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2121 *			.next_extension = to_user_pointer(&p_protected),
2122 *		},
2123 *		.param = {
2124 *			.param = I915_CONTEXT_PARAM_RECOVERABLE,
2125 *			.value = 0,
2126 *		}
2127 *	};
2128 *	struct drm_i915_gem_context_create_ext create = {
2129 *		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
2130 *		.extensions = to_user_pointer(&p_norecover);
2131 *	};
2132 *
2133 *	ctx_id = gem_context_create_ext(drm_fd, &create);
2134 *
2135 * In addition to the normal failure cases, setting this flag during context
2136 * creation can result in the following errors:
2137 *
2138 * -ENODEV: feature not available
2139 * -EPERM: trying to mark a recoverable or not bannable context as protected
2140 * -ENXIO: A dependency such as a component driver or firmware is not yet
2141 *         loaded so user space may need to attempt again. Depending on the
2142 *         device, this error may be reported if protected context creation is
2143 *         attempted very early after kernel start because the internal timeout
2144 *         waiting for such dependencies is not guaranteed to be larger than
2145 *         required (numbers differ depending on system and kernel config):
2146 *            - ADL/RPL: dependencies may take up to 3 seconds from kernel start
2147 *                       while context creation internal timeout is 250 milisecs
2148 *            - MTL: dependencies may take up to 8 seconds from kernel start
2149 *                   while context creation internal timeout is 250 milisecs
2150 *         NOTE: such dependencies happen once, so a subsequent call to create a
2151 *         protected context after a prior successful call will not experience
2152 *         such timeouts and will not return -ENXIO (unless the driver is reloaded,
2153 *         or, depending on the device, resumes from a suspended state).
2154 * -EIO: The firmware did not succeed in creating the protected context.
2155 */
2156#define I915_CONTEXT_PARAM_PROTECTED_CONTENT    0xd
2157
2158/*
2159 * I915_CONTEXT_PARAM_LOW_LATENCY:
2160 *
2161 * Mark this context as a low latency workload which requires aggressive GT
2162 * frequency scaling. Use I915_PARAM_HAS_CONTEXT_FREQ_HINT to check if the kernel
2163 * supports this per context flag.
2164 */
2165#define I915_CONTEXT_PARAM_LOW_LATENCY		0xe
2166
2167/*
2168 * I915_CONTEXT_PARAM_CONTEXT_IMAGE:
2169 *
2170 * Allows userspace to provide own context images.
2171 *
2172 * Note that this is a debug API not available on production kernel builds.
2173 */
2174#define I915_CONTEXT_PARAM_CONTEXT_IMAGE	0xf
2175/* Must be kept compact -- no holes and well documented */
2176
2177	/** @value: Context parameter value to be set or queried */
2178	__u64 value;
2179};
2180
2181/*
2182 * Context SSEU programming
2183 *
2184 * It may be necessary for either functional or performance reason to configure
2185 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
2186 * Sub-slice/EU).
2187 *
2188 * This is done by configuring SSEU configuration using the below
2189 * @struct drm_i915_gem_context_param_sseu for every supported engine which
2190 * userspace intends to use.
2191 *
2192 * Not all GPUs or engines support this functionality in which case an error
2193 * code -ENODEV will be returned.
2194 *
2195 * Also, flexibility of possible SSEU configuration permutations varies between
2196 * GPU generations and software imposed limitations. Requesting such a
2197 * combination will return an error code of -EINVAL.
2198 *
2199 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
2200 * favour of a single global setting.
2201 */
2202struct drm_i915_gem_context_param_sseu {
2203	/*
2204	 * Engine class & instance to be configured or queried.
2205	 */
2206	struct i915_engine_class_instance engine;
2207
2208	/*
2209	 * Unknown flags must be cleared to zero.
2210	 */
2211	__u32 flags;
2212#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
2213
2214	/*
2215	 * Mask of slices to enable for the context. Valid values are a subset
2216	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
2217	 */
2218	__u64 slice_mask;
2219
2220	/*
2221	 * Mask of subslices to enable for the context. Valid values are a
2222	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
2223	 */
2224	__u64 subslice_mask;
2225
2226	/*
2227	 * Minimum/Maximum number of EUs to enable per subslice for the
2228	 * context. min_eus_per_subslice must be inferior or equal to
2229	 * max_eus_per_subslice.
2230	 */
2231	__u16 min_eus_per_subslice;
2232	__u16 max_eus_per_subslice;
2233
2234	/*
2235	 * Unused for now. Must be cleared to zero.
2236	 */
2237	__u32 rsvd;
2238};
2239
2240/**
2241 * DOC: Virtual Engine uAPI
2242 *
2243 * Virtual engine is a concept where userspace is able to configure a set of
2244 * physical engines, submit a batch buffer, and let the driver execute it on any
2245 * engine from the set as it sees fit.
2246 *
2247 * This is primarily useful on parts which have multiple instances of a same
2248 * class engine, like for example GT3+ Skylake parts with their two VCS engines.
2249 *
2250 * For instance userspace can enumerate all engines of a certain class using the
2251 * previously described `Engine Discovery uAPI`_. After that userspace can
2252 * create a GEM context with a placeholder slot for the virtual engine (using
2253 * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
2254 * and instance respectively) and finally using the
2255 * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
2256 * the same reserved slot.
2257 *
2258 * Example of creating a virtual engine and submitting a batch buffer to it:
2259 *
2260 * .. code-block:: C
2261 *
2262 * 	I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
2263 * 		.base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
2264 * 		.engine_index = 0, // Place this virtual engine into engine map slot 0
2265 * 		.num_siblings = 2,
2266 * 		.engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
2267 * 			     { I915_ENGINE_CLASS_VIDEO, 1 }, },
2268 * 	};
2269 * 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
2270 * 		.engines = { { I915_ENGINE_CLASS_INVALID,
2271 * 			       I915_ENGINE_CLASS_INVALID_NONE } },
2272 * 		.extensions = to_user_pointer(&virtual), // Chains after load_balance extension
2273 * 	};
2274 * 	struct drm_i915_gem_context_create_ext_setparam p_engines = {
2275 * 		.base = {
2276 * 			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2277 * 		},
2278 * 		.param = {
2279 * 			.param = I915_CONTEXT_PARAM_ENGINES,
2280 * 			.value = to_user_pointer(&engines),
2281 * 			.size = sizeof(engines),
2282 * 		},
2283 * 	};
2284 * 	struct drm_i915_gem_context_create_ext create = {
2285 * 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
2286 * 		.extensions = to_user_pointer(&p_engines);
2287 * 	};
2288 *
2289 * 	ctx_id = gem_context_create_ext(drm_fd, &create);
2290 *
2291 * 	// Now we have created a GEM context with its engine map containing a
2292 * 	// single virtual engine. Submissions to this slot can go either to
2293 * 	// vcs0 or vcs1, depending on the load balancing algorithm used inside
2294 * 	// the driver. The load balancing is dynamic from one batch buffer to
2295 * 	// another and transparent to userspace.
2296 *
2297 * 	...
2298 * 	execbuf.rsvd1 = ctx_id;
2299 * 	execbuf.flags = 0; // Submits to index 0 which is the virtual engine
2300 * 	gem_execbuf(drm_fd, &execbuf);
2301 */
2302
2303/*
2304 * i915_context_engines_load_balance:
2305 *
2306 * Enable load balancing across this set of engines.
2307 *
2308 * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
2309 * used will proxy the execbuffer request onto one of the set of engines
2310 * in such a way as to distribute the load evenly across the set.
2311 *
2312 * The set of engines must be compatible (e.g. the same HW class) as they
2313 * will share the same logical GPU context and ring.
2314 *
2315 * To intermix rendering with the virtual engine and direct rendering onto
2316 * the backing engines (bypassing the load balancing proxy), the context must
2317 * be defined to use a single timeline for all engines.
2318 */
2319struct i915_context_engines_load_balance {
2320	struct i915_user_extension base;
2321
2322	__u16 engine_index;
2323	__u16 num_siblings;
2324	__u32 flags; /* all undefined flags must be zero */
2325
2326	__u64 mbz64; /* reserved for future use; must be zero */
2327
2328	struct i915_engine_class_instance engines[];
2329} __attribute__((packed));
2330
2331#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
2332	struct i915_user_extension base; \
2333	__u16 engine_index; \
2334	__u16 num_siblings; \
2335	__u32 flags; \
2336	__u64 mbz64; \
2337	struct i915_engine_class_instance engines[N__]; \
2338} __attribute__((packed)) name__
2339
2340/*
2341 * i915_context_engines_bond:
2342 *
2343 * Constructed bonded pairs for execution within a virtual engine.
2344 *
2345 * All engines are equal, but some are more equal than others. Given
2346 * the distribution of resources in the HW, it may be preferable to run
2347 * a request on a given subset of engines in parallel to a request on a
2348 * specific engine. We enable this selection of engines within a virtual
2349 * engine by specifying bonding pairs, for any given master engine we will
2350 * only execute on one of the corresponding siblings within the virtual engine.
2351 *
2352 * To execute a request in parallel on the master engine and a sibling requires
2353 * coordination with a I915_EXEC_FENCE_SUBMIT.
2354 */
2355struct i915_context_engines_bond {
2356	struct i915_user_extension base;
2357
2358	struct i915_engine_class_instance master;
2359
2360	__u16 virtual_index; /* index of virtual engine in ctx->engines[] */
2361	__u16 num_bonds;
2362
2363	__u64 flags; /* all undefined flags must be zero */
2364	__u64 mbz64[4]; /* reserved for future use; must be zero */
2365
2366	struct i915_engine_class_instance engines[];
2367} __attribute__((packed));
2368
2369#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
2370	struct i915_user_extension base; \
2371	struct i915_engine_class_instance master; \
2372	__u16 virtual_index; \
2373	__u16 num_bonds; \
2374	__u64 flags; \
2375	__u64 mbz64[4]; \
2376	struct i915_engine_class_instance engines[N__]; \
2377} __attribute__((packed)) name__
2378
2379/**
2380 * struct i915_context_engines_parallel_submit - Configure engine for
2381 * parallel submission.
2382 *
2383 * Setup a slot in the context engine map to allow multiple BBs to be submitted
2384 * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
2385 * in parallel. Multiple hardware contexts are created internally in the i915 to
2386 * run these BBs. Once a slot is configured for N BBs only N BBs can be
2387 * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
2388 * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
2389 * many BBs there are based on the slot's configuration. The N BBs are the last
2390 * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
2391 *
2392 * The default placement behavior is to create implicit bonds between each
2393 * context if each context maps to more than 1 physical engine (e.g. context is
2394 * a virtual engine). Also we only allow contexts of same engine class and these
2395 * contexts must be in logically contiguous order. Examples of the placement
2396 * behavior are described below. Lastly, the default is to not allow BBs to be
2397 * preempted mid-batch. Rather insert coordinated preemption points on all
2398 * hardware contexts between each set of BBs. Flags could be added in the future
2399 * to change both of these default behaviors.
2400 *
2401 * Returns -EINVAL if hardware context placement configuration is invalid or if
2402 * the placement configuration isn't supported on the platform / submission
2403 * interface.
2404 * Returns -ENODEV if extension isn't supported on the platform / submission
2405 * interface.
2406 *
2407 * .. code-block:: none
2408 *
2409 *	Examples syntax:
2410 *	CS[X] = generic engine of same class, logical instance X
2411 *	INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
2412 *
2413 *	Example 1 pseudo code:
2414 *	set_engines(INVALID)
2415 *	set_parallel(engine_index=0, width=2, num_siblings=1,
2416 *		     engines=CS[0],CS[1])
2417 *
2418 *	Results in the following valid placement:
2419 *	CS[0], CS[1]
2420 *
2421 *	Example 2 pseudo code:
2422 *	set_engines(INVALID)
2423 *	set_parallel(engine_index=0, width=2, num_siblings=2,
2424 *		     engines=CS[0],CS[2],CS[1],CS[3])
2425 *
2426 *	Results in the following valid placements:
2427 *	CS[0], CS[1]
2428 *	CS[2], CS[3]
2429 *
2430 *	This can be thought of as two virtual engines, each containing two
2431 *	engines thereby making a 2D array. However, there are bonds tying the
2432 *	entries together and placing restrictions on how they can be scheduled.
2433 *	Specifically, the scheduler can choose only vertical columns from the 2D
2434 *	array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
2435 *	scheduler wants to submit to CS[0], it must also choose CS[1] and vice
2436 *	versa. Same for CS[2] requires also using CS[3].
2437 *	VE[0] = CS[0], CS[2]
2438 *	VE[1] = CS[1], CS[3]
2439 *
2440 *	Example 3 pseudo code:
2441 *	set_engines(INVALID)
2442 *	set_parallel(engine_index=0, width=2, num_siblings=2,
2443 *		     engines=CS[0],CS[1],CS[1],CS[3])
2444 *
2445 *	Results in the following valid and invalid placements:
2446 *	CS[0], CS[1]
2447 *	CS[1], CS[3] - Not logically contiguous, return -EINVAL
2448 */
2449struct i915_context_engines_parallel_submit {
2450	/**
2451	 * @base: base user extension.
2452	 */
2453	struct i915_user_extension base;
2454
2455	/**
2456	 * @engine_index: slot for parallel engine
2457	 */
2458	__u16 engine_index;
2459
2460	/**
2461	 * @width: number of contexts per parallel engine or in other words the
2462	 * number of batches in each submission
2463	 */
2464	__u16 width;
2465
2466	/**
2467	 * @num_siblings: number of siblings per context or in other words the
2468	 * number of possible placements for each submission
2469	 */
2470	__u16 num_siblings;
2471
2472	/**
2473	 * @mbz16: reserved for future use; must be zero
2474	 */
2475	__u16 mbz16;
2476
2477	/**
2478	 * @flags: all undefined flags must be zero, currently not defined flags
2479	 */
2480	__u64 flags;
2481
2482	/**
2483	 * @mbz64: reserved for future use; must be zero
2484	 */
2485	__u64 mbz64[3];
2486
2487	/**
2488	 * @engines: 2-d array of engine instances to configure parallel engine
2489	 *
2490	 * length = width (i) * num_siblings (j)
2491	 * index = j + i * num_siblings
2492	 */
2493	struct i915_engine_class_instance engines[];
2494
2495} __attribute__((packed));
2496
2497#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
2498	struct i915_user_extension base; \
2499	__u16 engine_index; \
2500	__u16 width; \
2501	__u16 num_siblings; \
2502	__u16 mbz16; \
2503	__u64 flags; \
2504	__u64 mbz64[3]; \
2505	struct i915_engine_class_instance engines[N__]; \
2506} __attribute__((packed)) name__
2507
2508/**
2509 * DOC: Context Engine Map uAPI
2510 *
2511 * Context engine map is a new way of addressing engines when submitting batch-
2512 * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
2513 * inside the flags field of `struct drm_i915_gem_execbuffer2`.
2514 *
2515 * To use it created GEM contexts need to be configured with a list of engines
2516 * the user is intending to submit to. This is accomplished using the
2517 * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
2518 * i915_context_param_engines`.
2519 *
2520 * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
2521 * configured map.
2522 *
2523 * Example of creating such context and submitting against it:
2524 *
2525 * .. code-block:: C
2526 *
2527 * 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
2528 * 		.engines = { { I915_ENGINE_CLASS_RENDER, 0 },
2529 * 			     { I915_ENGINE_CLASS_COPY, 0 } }
2530 * 	};
2531 * 	struct drm_i915_gem_context_create_ext_setparam p_engines = {
2532 * 		.base = {
2533 * 			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2534 * 		},
2535 * 		.param = {
2536 * 			.param = I915_CONTEXT_PARAM_ENGINES,
2537 * 			.value = to_user_pointer(&engines),
2538 * 			.size = sizeof(engines),
2539 * 		},
2540 * 	};
2541 * 	struct drm_i915_gem_context_create_ext create = {
2542 * 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
2543 * 		.extensions = to_user_pointer(&p_engines);
2544 * 	};
2545 *
2546 * 	ctx_id = gem_context_create_ext(drm_fd, &create);
2547 *
2548 * 	// We have now created a GEM context with two engines in the map:
2549 * 	// Index 0 points to rcs0 while index 1 points to bcs0. Other engines
2550 * 	// will not be accessible from this context.
2551 *
2552 * 	...
2553 * 	execbuf.rsvd1 = ctx_id;
2554 * 	execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
2555 * 	gem_execbuf(drm_fd, &execbuf);
2556 *
2557 * 	...
2558 * 	execbuf.rsvd1 = ctx_id;
2559 * 	execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
2560 * 	gem_execbuf(drm_fd, &execbuf);
2561 */
2562
2563struct i915_context_param_engines {
2564	__u64 extensions; /* linked chain of extension blocks, 0 terminates */
2565#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
2566#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
2567#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
2568	struct i915_engine_class_instance engines[];
2569} __attribute__((packed));
2570
2571#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
2572	__u64 extensions; \
2573	struct i915_engine_class_instance engines[N__]; \
2574} __attribute__((packed)) name__
2575
2576struct i915_gem_context_param_context_image {
2577	/** @engine: Engine class & instance to be configured. */
2578	struct i915_engine_class_instance engine;
2579
2580	/** @flags: One of the supported flags or zero. */
2581	__u32 flags;
2582#define I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX (1u << 0)
2583
2584	/** @size: Size of the image blob pointed to by @image. */
2585	__u32 size;
2586
2587	/** @mbz: Must be zero. */
2588	__u32 mbz;
2589
2590	/** @image: Userspace memory containing the context image. */
2591	__u64 image;
2592} __attribute__((packed));
2593
2594/**
2595 * struct drm_i915_gem_context_create_ext_setparam - Context parameter
2596 * to set or query during context creation.
2597 */
2598struct drm_i915_gem_context_create_ext_setparam {
2599	/** @base: Extension link. See struct i915_user_extension. */
2600	struct i915_user_extension base;
2601
2602	/**
2603	 * @param: Context parameter to set or query.
2604	 * See struct drm_i915_gem_context_param.
2605	 */
2606	struct drm_i915_gem_context_param param;
2607};
2608
2609struct drm_i915_gem_context_destroy {
2610	__u32 ctx_id;
2611	__u32 pad;
2612};
2613
2614/**
2615 * struct drm_i915_gem_vm_control - Structure to create or destroy VM.
2616 *
2617 * DRM_I915_GEM_VM_CREATE -
2618 *
2619 * Create a new virtual memory address space (ppGTT) for use within a context
2620 * on the same file. Extensions can be provided to configure exactly how the
2621 * address space is setup upon creation.
2622 *
2623 * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
2624 * returned in the outparam @id.
2625 *
2626 * An extension chain maybe provided, starting with @extensions, and terminated
2627 * by the @next_extension being 0. Currently, no extensions are defined.
2628 *
2629 * DRM_I915_GEM_VM_DESTROY -
2630 *
2631 * Destroys a previously created VM id, specified in @vm_id.
2632 *
2633 * No extensions or flags are allowed currently, and so must be zero.
2634 */
2635struct drm_i915_gem_vm_control {
2636	/** @extensions: Zero-terminated chain of extensions. */
2637	__u64 extensions;
2638
2639	/** @flags: reserved for future usage, currently MBZ */
2640	__u32 flags;
2641
2642	/** @vm_id: Id of the VM created or to be destroyed */
2643	__u32 vm_id;
2644};
2645
2646struct drm_i915_reg_read {
2647	/*
2648	 * Register offset.
2649	 * For 64bit wide registers where the upper 32bits don't immediately
2650	 * follow the lower 32bits, the offset of the lower 32bits must
2651	 * be specified
2652	 */
2653	__u64 offset;
2654#define I915_REG_READ_8B_WA (1ul << 0)
2655
2656	__u64 val; /* Return value */
2657};
2658
2659/* Known registers:
2660 *
2661 * Render engine timestamp - 0x2358 + 64bit - gen7+
2662 * - Note this register returns an invalid value if using the default
2663 *   single instruction 8byte read, in order to workaround that pass
2664 *   flag I915_REG_READ_8B_WA in offset field.
2665 *
2666 */
2667
2668/*
2669 * struct drm_i915_reset_stats - Return global reset and other context stats
2670 *
2671 * Driver keeps few stats for each contexts and also global reset count.
2672 * This struct can be used to query those stats.
2673 */
2674struct drm_i915_reset_stats {
2675	/** @ctx_id: ID of the requested context */
2676	__u32 ctx_id;
2677
2678	/** @flags: MBZ */
2679	__u32 flags;
2680
2681	/** @reset_count: All resets since boot/module reload, for all contexts */
2682	__u32 reset_count;
2683
2684	/** @batch_active: Number of batches lost when active in GPU, for this context */
2685	__u32 batch_active;
2686
2687	/** @batch_pending: Number of batches lost pending for execution, for this context */
2688	__u32 batch_pending;
2689
2690	/** @pad: MBZ */
2691	__u32 pad;
2692};
2693
2694/**
2695 * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
2696 *
2697 * Userptr objects have several restrictions on what ioctls can be used with the
2698 * object handle.
2699 */
2700struct drm_i915_gem_userptr {
2701	/**
2702	 * @user_ptr: The pointer to the allocated memory.
2703	 *
2704	 * Needs to be aligned to PAGE_SIZE.
2705	 */
2706	__u64 user_ptr;
2707
2708	/**
2709	 * @user_size:
2710	 *
2711	 * The size in bytes for the allocated memory. This will also become the
2712	 * object size.
2713	 *
2714	 * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
2715	 * or larger.
2716	 */
2717	__u64 user_size;
2718
2719	/**
2720	 * @flags:
2721	 *
2722	 * Supported flags:
2723	 *
2724	 * I915_USERPTR_READ_ONLY:
2725	 *
2726	 * Mark the object as readonly, this also means GPU access can only be
2727	 * readonly. This is only supported on HW which supports readonly access
2728	 * through the GTT. If the HW can't support readonly access, an error is
2729	 * returned.
2730	 *
2731	 * I915_USERPTR_PROBE:
2732	 *
2733	 * Probe the provided @user_ptr range and validate that the @user_ptr is
2734	 * indeed pointing to normal memory and that the range is also valid.
2735	 * For example if some garbage address is given to the kernel, then this
2736	 * should complain.
2737	 *
2738	 * Returns -EFAULT if the probe failed.
2739	 *
2740	 * Note that this doesn't populate the backing pages, and also doesn't
2741	 * guarantee that the object will remain valid when the object is
2742	 * eventually used.
2743	 *
2744	 * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
2745	 * returns a non-zero value.
2746	 *
2747	 * I915_USERPTR_UNSYNCHRONIZED:
2748	 *
2749	 * NOT USED. Setting this flag will result in an error.
2750	 */
2751	__u32 flags;
2752#define I915_USERPTR_READ_ONLY 0x1
2753#define I915_USERPTR_PROBE 0x2
2754#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
2755	/**
2756	 * @handle: Returned handle for the object.
2757	 *
2758	 * Object handles are nonzero.
2759	 */
2760	__u32 handle;
2761};
2762
2763enum drm_i915_oa_format {
2764	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
2765	I915_OA_FORMAT_A29,	    /* HSW only */
2766	I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
2767	I915_OA_FORMAT_B4_C8,	    /* HSW only */
2768	I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
2769	I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
2770	I915_OA_FORMAT_C4_B8,	    /* HSW+ */
2771
2772	/* Gen8+ */
2773	I915_OA_FORMAT_A12,
2774	I915_OA_FORMAT_A12_B8_C8,
2775	I915_OA_FORMAT_A32u40_A4u32_B8_C8,
2776
2777	/* DG2 */
2778	I915_OAR_FORMAT_A32u40_A4u32_B8_C8,
2779	I915_OA_FORMAT_A24u40_A14u32_B8_C8,
2780
2781	/* MTL OAM */
2782	I915_OAM_FORMAT_MPEC8u64_B8_C8,
2783	I915_OAM_FORMAT_MPEC8u32_B8_C8,
2784
2785	I915_OA_FORMAT_MAX	    /* non-ABI */
2786};
2787
2788enum drm_i915_perf_property_id {
2789	/**
2790	 * Open the stream for a specific context handle (as used with
2791	 * execbuffer2). A stream opened for a specific context this way
2792	 * won't typically require root privileges.
2793	 *
2794	 * This property is available in perf revision 1.
2795	 */
2796	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
2797
2798	/**
2799	 * A value of 1 requests the inclusion of raw OA unit reports as
2800	 * part of stream samples.
2801	 *
2802	 * This property is available in perf revision 1.
2803	 */
2804	DRM_I915_PERF_PROP_SAMPLE_OA,
2805
2806	/**
2807	 * The value specifies which set of OA unit metrics should be
2808	 * configured, defining the contents of any OA unit reports.
2809	 *
2810	 * This property is available in perf revision 1.
2811	 */
2812	DRM_I915_PERF_PROP_OA_METRICS_SET,
2813
2814	/**
2815	 * The value specifies the size and layout of OA unit reports.
2816	 *
2817	 * This property is available in perf revision 1.
2818	 */
2819	DRM_I915_PERF_PROP_OA_FORMAT,
2820
2821	/**
2822	 * Specifying this property implicitly requests periodic OA unit
2823	 * sampling and (at least on Haswell) the sampling frequency is derived
2824	 * from this exponent as follows:
2825	 *
2826	 *   80ns * 2^(period_exponent + 1)
2827	 *
2828	 * This property is available in perf revision 1.
2829	 */
2830	DRM_I915_PERF_PROP_OA_EXPONENT,
2831
2832	/**
2833	 * Specifying this property is only valid when specify a context to
2834	 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
2835	 * will hold preemption of the particular context we want to gather
2836	 * performance data about. The execbuf2 submissions must include a
2837	 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
2838	 *
2839	 * This property is available in perf revision 3.
2840	 */
2841	DRM_I915_PERF_PROP_HOLD_PREEMPTION,
2842
2843	/**
2844	 * Specifying this pins all contexts to the specified SSEU power
2845	 * configuration for the duration of the recording.
2846	 *
2847	 * This parameter's value is a pointer to a struct
2848	 * drm_i915_gem_context_param_sseu.
2849	 *
2850	 * This property is available in perf revision 4.
2851	 */
2852	DRM_I915_PERF_PROP_GLOBAL_SSEU,
2853
2854	/**
2855	 * This optional parameter specifies the timer interval in nanoseconds
2856	 * at which the i915 driver will check the OA buffer for available data.
2857	 * Minimum allowed value is 100 microseconds. A default value is used by
2858	 * the driver if this parameter is not specified. Note that larger timer
2859	 * values will reduce cpu consumption during OA perf captures. However,
2860	 * excessively large values would potentially result in OA buffer
2861	 * overwrites as captures reach end of the OA buffer.
2862	 *
2863	 * This property is available in perf revision 5.
2864	 */
2865	DRM_I915_PERF_PROP_POLL_OA_PERIOD,
2866
2867	/**
2868	 * Multiple engines may be mapped to the same OA unit. The OA unit is
2869	 * identified by class:instance of any engine mapped to it.
2870	 *
2871	 * This parameter specifies the engine class and must be passed along
2872	 * with DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE.
2873	 *
2874	 * This property is available in perf revision 6.
2875	 */
2876	DRM_I915_PERF_PROP_OA_ENGINE_CLASS,
2877
2878	/**
2879	 * This parameter specifies the engine instance and must be passed along
2880	 * with DRM_I915_PERF_PROP_OA_ENGINE_CLASS.
2881	 *
2882	 * This property is available in perf revision 6.
2883	 */
2884	DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE,
2885
2886	DRM_I915_PERF_PROP_MAX /* non-ABI */
2887};
2888
2889struct drm_i915_perf_open_param {
2890	__u32 flags;
2891#define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
2892#define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
2893#define I915_PERF_FLAG_DISABLED		(1<<2)
2894
2895	/** The number of u64 (id, value) pairs */
2896	__u32 num_properties;
2897
2898	/**
2899	 * Pointer to array of u64 (id, value) pairs configuring the stream
2900	 * to open.
2901	 */
2902	__u64 properties_ptr;
2903};
2904
2905/*
2906 * Enable data capture for a stream that was either opened in a disabled state
2907 * via I915_PERF_FLAG_DISABLED or was later disabled via
2908 * I915_PERF_IOCTL_DISABLE.
2909 *
2910 * It is intended to be cheaper to disable and enable a stream than it may be
2911 * to close and re-open a stream with the same configuration.
2912 *
2913 * It's undefined whether any pending data for the stream will be lost.
2914 *
2915 * This ioctl is available in perf revision 1.
2916 */
2917#define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
2918
2919/*
2920 * Disable data capture for a stream.
2921 *
2922 * It is an error to try and read a stream that is disabled.
2923 *
2924 * This ioctl is available in perf revision 1.
2925 */
2926#define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
2927
2928/*
2929 * Change metrics_set captured by a stream.
2930 *
2931 * If the stream is bound to a specific context, the configuration change
2932 * will performed __inline__ with that context such that it takes effect before
2933 * the next execbuf submission.
2934 *
2935 * Returns the previously bound metrics set id, or a negative error code.
2936 *
2937 * This ioctl is available in perf revision 2.
2938 */
2939#define I915_PERF_IOCTL_CONFIG	_IO('i', 0x2)
2940
2941/*
2942 * Common to all i915 perf records
2943 */
2944struct drm_i915_perf_record_header {
2945	__u32 type;
2946	__u16 pad;
2947	__u16 size;
2948};
2949
2950enum drm_i915_perf_record_type {
2951
2952	/**
2953	 * Samples are the work horse record type whose contents are extensible
2954	 * and defined when opening an i915 perf stream based on the given
2955	 * properties.
2956	 *
2957	 * Boolean properties following the naming convention
2958	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
2959	 * every sample.
2960	 *
2961	 * The order of these sample properties given by userspace has no
2962	 * affect on the ordering of data within a sample. The order is
2963	 * documented here.
2964	 *
2965	 * struct {
2966	 *     struct drm_i915_perf_record_header header;
2967	 *
2968	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
2969	 * };
2970	 */
2971	DRM_I915_PERF_RECORD_SAMPLE = 1,
2972
2973	/*
2974	 * Indicates that one or more OA reports were not written by the
2975	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
2976	 * command collides with periodic sampling - which would be more likely
2977	 * at higher sampling frequencies.
2978	 */
2979	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
2980
2981	/**
2982	 * An error occurred that resulted in all pending OA reports being lost.
2983	 */
2984	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
2985
2986	DRM_I915_PERF_RECORD_MAX /* non-ABI */
2987};
2988
2989/**
2990 * struct drm_i915_perf_oa_config
2991 *
2992 * Structure to upload perf dynamic configuration into the kernel.
2993 */
2994struct drm_i915_perf_oa_config {
2995	/**
2996	 * @uuid:
2997	 *
2998	 * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x"
2999	 */
3000	char uuid[36];
3001
3002	/**
3003	 * @n_mux_regs:
3004	 *
3005	 * Number of mux regs in &mux_regs_ptr.
3006	 */
3007	__u32 n_mux_regs;
3008
3009	/**
3010	 * @n_boolean_regs:
3011	 *
3012	 * Number of boolean regs in &boolean_regs_ptr.
3013	 */
3014	__u32 n_boolean_regs;
3015
3016	/**
3017	 * @n_flex_regs:
3018	 *
3019	 * Number of flex regs in &flex_regs_ptr.
3020	 */
3021	__u32 n_flex_regs;
3022
3023	/**
3024	 * @mux_regs_ptr:
3025	 *
3026	 * Pointer to tuples of u32 values (register address, value) for mux
3027	 * registers.  Expected length of buffer is (2 * sizeof(u32) *
3028	 * &n_mux_regs).
3029	 */
3030	__u64 mux_regs_ptr;
3031
3032	/**
3033	 * @boolean_regs_ptr:
3034	 *
3035	 * Pointer to tuples of u32 values (register address, value) for mux
3036	 * registers.  Expected length of buffer is (2 * sizeof(u32) *
3037	 * &n_boolean_regs).
3038	 */
3039	__u64 boolean_regs_ptr;
3040
3041	/**
3042	 * @flex_regs_ptr:
3043	 *
3044	 * Pointer to tuples of u32 values (register address, value) for mux
3045	 * registers.  Expected length of buffer is (2 * sizeof(u32) *
3046	 * &n_flex_regs).
3047	 */
3048	__u64 flex_regs_ptr;
3049};
3050
3051/**
3052 * struct drm_i915_query_item - An individual query for the kernel to process.
3053 *
3054 * The behaviour is determined by the @query_id. Note that exactly what
3055 * @data_ptr is also depends on the specific @query_id.
3056 */
3057struct drm_i915_query_item {
3058	/**
3059	 * @query_id:
3060	 *
3061	 * The id for this query.  Currently accepted query IDs are:
3062	 *  - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info)
3063	 *  - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info)
3064	 *  - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config)
3065	 *  - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
3066	 *  - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
3067	 *  - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
3068	 *  - %DRM_I915_QUERY_GUC_SUBMISSION_VERSION (see struct drm_i915_query_guc_submission_version)
3069	 */
3070	__u64 query_id;
3071#define DRM_I915_QUERY_TOPOLOGY_INFO		1
3072#define DRM_I915_QUERY_ENGINE_INFO		2
3073#define DRM_I915_QUERY_PERF_CONFIG		3
3074#define DRM_I915_QUERY_MEMORY_REGIONS		4
3075#define DRM_I915_QUERY_HWCONFIG_BLOB		5
3076#define DRM_I915_QUERY_GEOMETRY_SUBSLICES	6
3077#define DRM_I915_QUERY_GUC_SUBMISSION_VERSION	7
3078/* Must be kept compact -- no holes and well documented */
3079
3080	/**
3081	 * @length:
3082	 *
3083	 * When set to zero by userspace, this is filled with the size of the
3084	 * data to be written at the @data_ptr pointer. The kernel sets this
3085	 * value to a negative value to signal an error on a particular query
3086	 * item.
3087	 */
3088	__s32 length;
3089
3090	/**
3091	 * @flags:
3092	 *
3093	 * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
3094	 *
3095	 * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the
3096	 * following:
3097	 *
3098	 *	- %DRM_I915_QUERY_PERF_CONFIG_LIST
3099	 *      - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
3100	 *      - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
3101	 *
3102	 * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain
3103	 * a struct i915_engine_class_instance that references a render engine.
3104	 */
3105	__u32 flags;
3106#define DRM_I915_QUERY_PERF_CONFIG_LIST          1
3107#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
3108#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID   3
3109
3110	/**
3111	 * @data_ptr:
3112	 *
3113	 * Data will be written at the location pointed by @data_ptr when the
3114	 * value of @length matches the length of the data to be written by the
3115	 * kernel.
3116	 */
3117	__u64 data_ptr;
3118};
3119
3120/**
3121 * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
3122 * kernel to fill out.
3123 *
3124 * Note that this is generally a two step process for each struct
3125 * drm_i915_query_item in the array:
3126 *
3127 * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
3128 *    drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
3129 *    kernel will then fill in the size, in bytes, which tells userspace how
3130 *    memory it needs to allocate for the blob(say for an array of properties).
3131 *
3132 * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
3133 *    &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
3134 *    the &drm_i915_query_item.length should still be the same as what the
3135 *    kernel previously set. At this point the kernel can fill in the blob.
3136 *
3137 * Note that for some query items it can make sense for userspace to just pass
3138 * in a buffer/blob equal to or larger than the required size. In this case only
3139 * a single ioctl call is needed. For some smaller query items this can work
3140 * quite well.
3141 *
3142 */
3143struct drm_i915_query {
3144	/** @num_items: The number of elements in the @items_ptr array */
3145	__u32 num_items;
3146
3147	/**
3148	 * @flags: Unused for now. Must be cleared to zero.
3149	 */
3150	__u32 flags;
3151
3152	/**
3153	 * @items_ptr:
3154	 *
3155	 * Pointer to an array of struct drm_i915_query_item. The number of
3156	 * array elements is @num_items.
3157	 */
3158	__u64 items_ptr;
3159};
3160
3161/**
3162 * struct drm_i915_query_topology_info
3163 *
3164 * Describes slice/subslice/EU information queried by
3165 * %DRM_I915_QUERY_TOPOLOGY_INFO
3166 */
3167struct drm_i915_query_topology_info {
3168	/**
3169	 * @flags:
3170	 *
3171	 * Unused for now. Must be cleared to zero.
3172	 */
3173	__u16 flags;
3174
3175	/**
3176	 * @max_slices:
3177	 *
3178	 * The number of bits used to express the slice mask.
3179	 */
3180	__u16 max_slices;
3181
3182	/**
3183	 * @max_subslices:
3184	 *
3185	 * The number of bits used to express the subslice mask.
3186	 */
3187	__u16 max_subslices;
3188
3189	/**
3190	 * @max_eus_per_subslice:
3191	 *
3192	 * The number of bits in the EU mask that correspond to a single
3193	 * subslice's EUs.
3194	 */
3195	__u16 max_eus_per_subslice;
3196
3197	/**
3198	 * @subslice_offset:
3199	 *
3200	 * Offset in data[] at which the subslice masks are stored.
3201	 */
3202	__u16 subslice_offset;
3203
3204	/**
3205	 * @subslice_stride:
3206	 *
3207	 * Stride at which each of the subslice masks for each slice are
3208	 * stored.
3209	 */
3210	__u16 subslice_stride;
3211
3212	/**
3213	 * @eu_offset:
3214	 *
3215	 * Offset in data[] at which the EU masks are stored.
3216	 */
3217	__u16 eu_offset;
3218
3219	/**
3220	 * @eu_stride:
3221	 *
3222	 * Stride at which each of the EU masks for each subslice are stored.
3223	 */
3224	__u16 eu_stride;
3225
3226	/**
3227	 * @data:
3228	 *
3229	 * Contains 3 pieces of information :
3230	 *
3231	 * - The slice mask with one bit per slice telling whether a slice is
3232	 *   available. The availability of slice X can be queried with the
3233	 *   following formula :
3234	 *
3235	 *   .. code:: c
3236	 *
3237	 *      (data[X / 8] >> (X % 8)) & 1
3238	 *
3239	 *   Starting with Xe_HP platforms, Intel hardware no longer has
3240	 *   traditional slices so i915 will always report a single slice
3241	 *   (hardcoded slicemask = 0x1) which contains all of the platform's
3242	 *   subslices.  I.e., the mask here does not reflect any of the newer
3243	 *   hardware concepts such as "gslices" or "cslices" since userspace
3244	 *   is capable of inferring those from the subslice mask.
3245	 *
3246	 * - The subslice mask for each slice with one bit per subslice telling
3247	 *   whether a subslice is available.  Starting with Gen12 we use the
3248	 *   term "subslice" to refer to what the hardware documentation
3249	 *   describes as a "dual-subslices."  The availability of subslice Y
3250	 *   in slice X can be queried with the following formula :
3251	 *
3252	 *   .. code:: c
3253	 *
3254	 *      (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1
3255	 *
3256	 * - The EU mask for each subslice in each slice, with one bit per EU
3257	 *   telling whether an EU is available. The availability of EU Z in
3258	 *   subslice Y in slice X can be queried with the following formula :
3259	 *
3260	 *   .. code:: c
3261	 *
3262	 *      (data[eu_offset +
3263	 *            (X * max_subslices + Y) * eu_stride +
3264	 *            Z / 8
3265	 *       ] >> (Z % 8)) & 1
3266	 */
3267	__u8 data[];
3268};
3269
3270/**
3271 * DOC: Engine Discovery uAPI
3272 *
3273 * Engine discovery uAPI is a way of enumerating physical engines present in a
3274 * GPU associated with an open i915 DRM file descriptor. This supersedes the old
3275 * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
3276 * `I915_PARAM_HAS_BLT`.
3277 *
3278 * The need for this interface came starting with Icelake and newer GPUs, which
3279 * started to establish a pattern of having multiple engines of a same class,
3280 * where not all instances were always completely functionally equivalent.
3281 *
3282 * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
3283 * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
3284 *
3285 * Example for getting the list of engines:
3286 *
3287 * .. code-block:: C
3288 *
3289 * 	struct drm_i915_query_engine_info *info;
3290 * 	struct drm_i915_query_item item = {
3291 * 		.query_id = DRM_I915_QUERY_ENGINE_INFO;
3292 * 	};
3293 * 	struct drm_i915_query query = {
3294 * 		.num_items = 1,
3295 * 		.items_ptr = (uintptr_t)&item,
3296 * 	};
3297 * 	int err, i;
3298 *
3299 * 	// First query the size of the blob we need, this needs to be large
3300 * 	// enough to hold our array of engines. The kernel will fill out the
3301 * 	// item.length for us, which is the number of bytes we need.
3302 * 	//
3303 *	// Alternatively a large buffer can be allocated straightaway enabling
3304 * 	// querying in one pass, in which case item.length should contain the
3305 * 	// length of the provided buffer.
3306 * 	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3307 * 	if (err) ...
3308 *
3309 * 	info = calloc(1, item.length);
3310 * 	// Now that we allocated the required number of bytes, we call the ioctl
3311 * 	// again, this time with the data_ptr pointing to our newly allocated
3312 * 	// blob, which the kernel can then populate with info on all engines.
3313 *	item.data_ptr = (uintptr_t)&info;
3314 *
3315 * 	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3316 * 	if (err) ...
3317 *
3318 * 	// We can now access each engine in the array
3319 * 	for (i = 0; i < info->num_engines; i++) {
3320 * 		struct drm_i915_engine_info einfo = info->engines[i];
3321 * 		u16 class = einfo.engine.class;
3322 * 		u16 instance = einfo.engine.instance;
3323 * 		....
3324 * 	}
3325 *
3326 * 	free(info);
3327 *
3328 * Each of the enumerated engines, apart from being defined by its class and
3329 * instance (see `struct i915_engine_class_instance`), also can have flags and
3330 * capabilities defined as documented in i915_drm.h.
3331 *
3332 * For instance video engines which support HEVC encoding will have the
3333 * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
3334 *
3335 * Engine discovery only fully comes to its own when combined with the new way
3336 * of addressing engines when submitting batch buffers using contexts with
3337 * engine maps configured.
3338 */
3339
3340/**
3341 * struct drm_i915_engine_info
3342 *
3343 * Describes one engine and its capabilities as known to the driver.
3344 */
3345struct drm_i915_engine_info {
3346	/** @engine: Engine class and instance. */
3347	struct i915_engine_class_instance engine;
3348
3349	/** @rsvd0: Reserved field. */
3350	__u32 rsvd0;
3351
3352	/** @flags: Engine flags. */
3353	__u64 flags;
3354#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE		(1 << 0)
3355
3356	/** @capabilities: Capabilities of this engine. */
3357	__u64 capabilities;
3358#define I915_VIDEO_CLASS_CAPABILITY_HEVC		(1 << 0)
3359#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC	(1 << 1)
3360
3361	/** @logical_instance: Logical instance of engine */
3362	__u16 logical_instance;
3363
3364	/** @rsvd1: Reserved fields. */
3365	__u16 rsvd1[3];
3366	/** @rsvd2: Reserved fields. */
3367	__u64 rsvd2[3];
3368};
3369
3370/**
3371 * struct drm_i915_query_engine_info
3372 *
3373 * Engine info query enumerates all engines known to the driver by filling in
3374 * an array of struct drm_i915_engine_info structures.
3375 */
3376struct drm_i915_query_engine_info {
3377	/** @num_engines: Number of struct drm_i915_engine_info structs following. */
3378	__u32 num_engines;
3379
3380	/** @rsvd: MBZ */
3381	__u32 rsvd[3];
3382
3383	/** @engines: Marker for drm_i915_engine_info structures. */
3384	struct drm_i915_engine_info engines[];
3385};
3386
3387/**
3388 * struct drm_i915_query_perf_config
3389 *
3390 * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and
3391 * %DRM_I915_QUERY_GEOMETRY_SUBSLICES.
3392 */
3393struct drm_i915_query_perf_config {
3394	union {
3395		/**
3396		 * @n_configs:
3397		 *
3398		 * When &drm_i915_query_item.flags ==
3399		 * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to
3400		 * the number of configurations available.
3401		 */
3402		__u64 n_configs;
3403
3404		/**
3405		 * @config:
3406		 *
3407		 * When &drm_i915_query_item.flags ==
3408		 * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the
3409		 * value in this field as configuration identifier to decide
3410		 * what data to write into config_ptr.
3411		 */
3412		__u64 config;
3413
3414		/**
3415		 * @uuid:
3416		 *
3417		 * When &drm_i915_query_item.flags ==
3418		 * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the
3419		 * value in this field as configuration identifier to decide
3420		 * what data to write into config_ptr.
3421		 *
3422		 * String formatted like "%08x-%04x-%04x-%04x-%012x"
3423		 */
3424		char uuid[36];
3425	};
3426
3427	/**
3428	 * @flags:
3429	 *
3430	 * Unused for now. Must be cleared to zero.
3431	 */
3432	__u32 flags;
3433
3434	/**
3435	 * @data:
3436	 *
3437	 * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST,
3438	 * i915 will write an array of __u64 of configuration identifiers.
3439	 *
3440	 * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA,
3441	 * i915 will write a struct drm_i915_perf_oa_config. If the following
3442	 * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will
3443	 * write into the associated pointers the values of submitted when the
3444	 * configuration was created :
3445	 *
3446	 *  - &drm_i915_perf_oa_config.n_mux_regs
3447	 *  - &drm_i915_perf_oa_config.n_boolean_regs
3448	 *  - &drm_i915_perf_oa_config.n_flex_regs
3449	 */
3450	__u8 data[];
3451};
3452
3453/**
3454 * enum drm_i915_gem_memory_class - Supported memory classes
3455 */
3456enum drm_i915_gem_memory_class {
3457	/** @I915_MEMORY_CLASS_SYSTEM: System memory */
3458	I915_MEMORY_CLASS_SYSTEM = 0,
3459	/** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
3460	I915_MEMORY_CLASS_DEVICE,
3461};
3462
3463/**
3464 * struct drm_i915_gem_memory_class_instance - Identify particular memory region
3465 */
3466struct drm_i915_gem_memory_class_instance {
3467	/** @memory_class: See enum drm_i915_gem_memory_class */
3468	__u16 memory_class;
3469
3470	/** @memory_instance: Which instance */
3471	__u16 memory_instance;
3472};
3473
3474/**
3475 * struct drm_i915_memory_region_info - Describes one region as known to the
3476 * driver.
3477 *
3478 * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
3479 * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
3480 * at &drm_i915_query_item.query_id.
3481 */
3482struct drm_i915_memory_region_info {
3483	/** @region: The class:instance pair encoding */
3484	struct drm_i915_gem_memory_class_instance region;
3485
3486	/** @rsvd0: MBZ */
3487	__u32 rsvd0;
3488
3489	/**
3490	 * @probed_size: Memory probed by the driver
3491	 *
3492	 * Note that it should not be possible to ever encounter a zero value
3493	 * here, also note that no current region type will ever return -1 here.
3494	 * Although for future region types, this might be a possibility. The
3495	 * same applies to the other size fields.
3496	 */
3497	__u64 probed_size;
3498
3499	/**
3500	 * @unallocated_size: Estimate of memory remaining
3501	 *
3502	 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting.
3503	 * Without this (or if this is an older kernel) the value here will
3504	 * always equal the @probed_size. Note this is only currently tracked
3505	 * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here
3506	 * will always equal the @probed_size).
3507	 */
3508	__u64 unallocated_size;
3509
3510	union {
3511		/** @rsvd1: MBZ */
3512		__u64 rsvd1[8];
3513		struct {
3514			/**
3515			 * @probed_cpu_visible_size: Memory probed by the driver
3516			 * that is CPU accessible.
3517			 *
3518			 * This will be always be <= @probed_size, and the
3519			 * remainder (if there is any) will not be CPU
3520			 * accessible.
3521			 *
3522			 * On systems without small BAR, the @probed_size will
3523			 * always equal the @probed_cpu_visible_size, since all
3524			 * of it will be CPU accessible.
3525			 *
3526			 * Note this is only tracked for
3527			 * I915_MEMORY_CLASS_DEVICE regions (for other types the
3528			 * value here will always equal the @probed_size).
3529			 *
3530			 * Note that if the value returned here is zero, then
3531			 * this must be an old kernel which lacks the relevant
3532			 * small-bar uAPI support (including
3533			 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on
3534			 * such systems we should never actually end up with a
3535			 * small BAR configuration, assuming we are able to load
3536			 * the kernel module. Hence it should be safe to treat
3537			 * this the same as when @probed_cpu_visible_size ==
3538			 * @probed_size.
3539			 */
3540			__u64 probed_cpu_visible_size;
3541
3542			/**
3543			 * @unallocated_cpu_visible_size: Estimate of CPU
3544			 * visible memory remaining.
3545			 *
3546			 * Note this is only tracked for
3547			 * I915_MEMORY_CLASS_DEVICE regions (for other types the
3548			 * value here will always equal the
3549			 * @probed_cpu_visible_size).
3550			 *
3551			 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
3552			 * accounting.  Without this the value here will always
3553			 * equal the @probed_cpu_visible_size. Note this is only
3554			 * currently tracked for I915_MEMORY_CLASS_DEVICE
3555			 * regions (for other types the value here will also
3556			 * always equal the @probed_cpu_visible_size).
3557			 *
3558			 * If this is an older kernel the value here will be
3559			 * zero, see also @probed_cpu_visible_size.
3560			 */
3561			__u64 unallocated_cpu_visible_size;
3562		};
3563	};
3564};
3565
3566/**
3567 * struct drm_i915_query_memory_regions
3568 *
3569 * The region info query enumerates all regions known to the driver by filling
3570 * in an array of struct drm_i915_memory_region_info structures.
3571 *
3572 * Example for getting the list of supported regions:
3573 *
3574 * .. code-block:: C
3575 *
3576 *	struct drm_i915_query_memory_regions *info;
3577 *	struct drm_i915_query_item item = {
3578 *		.query_id = DRM_I915_QUERY_MEMORY_REGIONS;
3579 *	};
3580 *	struct drm_i915_query query = {
3581 *		.num_items = 1,
3582 *		.items_ptr = (uintptr_t)&item,
3583 *	};
3584 *	int err, i;
3585 *
3586 *	// First query the size of the blob we need, this needs to be large
3587 *	// enough to hold our array of regions. The kernel will fill out the
3588 *	// item.length for us, which is the number of bytes we need.
3589 *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3590 *	if (err) ...
3591 *
3592 *	info = calloc(1, item.length);
3593 *	// Now that we allocated the required number of bytes, we call the ioctl
3594 *	// again, this time with the data_ptr pointing to our newly allocated
3595 *	// blob, which the kernel can then populate with the all the region info.
3596 *	item.data_ptr = (uintptr_t)&info,
3597 *
3598 *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3599 *	if (err) ...
3600 *
3601 *	// We can now access each region in the array
3602 *	for (i = 0; i < info->num_regions; i++) {
3603 *		struct drm_i915_memory_region_info mr = info->regions[i];
3604 *		u16 class = mr.region.class;
3605 *		u16 instance = mr.region.instance;
3606 *
3607 *		....
3608 *	}
3609 *
3610 *	free(info);
3611 */
3612struct drm_i915_query_memory_regions {
3613	/** @num_regions: Number of supported regions */
3614	__u32 num_regions;
3615
3616	/** @rsvd: MBZ */
3617	__u32 rsvd[3];
3618
3619	/** @regions: Info about each supported region */
3620	struct drm_i915_memory_region_info regions[];
3621};
3622
3623/**
3624 * struct drm_i915_query_guc_submission_version - query GuC submission interface version
3625 */
3626struct drm_i915_query_guc_submission_version {
3627	/** @branch: Firmware branch version. */
3628	__u32 branch;
3629	/** @major: Firmware major version. */
3630	__u32 major;
3631	/** @minor: Firmware minor version. */
3632	__u32 minor;
3633	/** @patch: Firmware patch version. */
3634	__u32 patch;
3635};
3636
3637/**
3638 * DOC: GuC HWCONFIG blob uAPI
3639 *
3640 * The GuC produces a blob with information about the current device.
3641 * i915 reads this blob from GuC and makes it available via this uAPI.
3642 *
3643 * The format and meaning of the blob content are documented in the
3644 * Programmer's Reference Manual.
3645 */
3646
3647/**
3648 * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
3649 * extension support using struct i915_user_extension.
3650 *
3651 * Note that new buffer flags should be added here, at least for the stuff that
3652 * is immutable. Previously we would have two ioctls, one to create the object
3653 * with gem_create, and another to apply various parameters, however this
3654 * creates some ambiguity for the params which are considered immutable. Also in
3655 * general we're phasing out the various SET/GET ioctls.
3656 */
3657struct drm_i915_gem_create_ext {
3658	/**
3659	 * @size: Requested size for the object.
3660	 *
3661	 * The (page-aligned) allocated size for the object will be returned.
3662	 *
3663	 * On platforms like DG2/ATS the kernel will always use 64K or larger
3664	 * pages for I915_MEMORY_CLASS_DEVICE. The kernel also requires a
3665	 * minimum of 64K GTT alignment for such objects.
3666	 *
3667	 * NOTE: Previously the ABI here required a minimum GTT alignment of 2M
3668	 * on DG2/ATS, due to how the hardware implemented 64K GTT page support,
3669	 * where we had the following complications:
3670	 *
3671	 *   1) The entire PDE (which covers a 2MB virtual address range), must
3672	 *   contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
3673	 *   PDE is forbidden by the hardware.
3674	 *
3675	 *   2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
3676	 *   objects.
3677	 *
3678	 * However on actual production HW this was completely changed to now
3679	 * allow setting a TLB hint at the PTE level (see PS64), which is a lot
3680	 * more flexible than the above. With this the 2M restriction was
3681	 * dropped where we now only require 64K.
3682	 */
3683	__u64 size;
3684
3685	/**
3686	 * @handle: Returned handle for the object.
3687	 *
3688	 * Object handles are nonzero.
3689	 */
3690	__u32 handle;
3691
3692	/**
3693	 * @flags: Optional flags.
3694	 *
3695	 * Supported values:
3696	 *
3697	 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
3698	 * the object will need to be accessed via the CPU.
3699	 *
3700	 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only
3701	 * strictly required on configurations where some subset of the device
3702	 * memory is directly visible/mappable through the CPU (which we also
3703	 * call small BAR), like on some DG2+ systems. Note that this is quite
3704	 * undesirable, but due to various factors like the client CPU, BIOS etc
3705	 * it's something we can expect to see in the wild. See
3706	 * &drm_i915_memory_region_info.probed_cpu_visible_size for how to
3707	 * determine if this system applies.
3708	 *
3709	 * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to
3710	 * ensure the kernel can always spill the allocation to system memory,
3711	 * if the object can't be allocated in the mappable part of
3712	 * I915_MEMORY_CLASS_DEVICE.
3713	 *
3714	 * Also note that since the kernel only supports flat-CCS on objects
3715	 * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
3716	 * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
3717	 * flat-CCS.
3718	 *
3719	 * Without this hint, the kernel will assume that non-mappable
3720	 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
3721	 * kernel can still migrate the object to the mappable part, as a last
3722	 * resort, if userspace ever CPU faults this object, but this might be
3723	 * expensive, and so ideally should be avoided.
3724	 *
3725	 * On older kernels which lack the relevant small-bar uAPI support (see
3726	 * also &drm_i915_memory_region_info.probed_cpu_visible_size),
3727	 * usage of the flag will result in an error, but it should NEVER be
3728	 * possible to end up with a small BAR configuration, assuming we can
3729	 * also successfully load the i915 kernel module. In such cases the
3730	 * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as
3731	 * such there are zero restrictions on where the object can be placed.
3732	 */
3733#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
3734	__u32 flags;
3735
3736	/**
3737	 * @extensions: The chain of extensions to apply to this object.
3738	 *
3739	 * This will be useful in the future when we need to support several
3740	 * different extensions, and we need to apply more than one when
3741	 * creating the object. See struct i915_user_extension.
3742	 *
3743	 * If we don't supply any extensions then we get the same old gem_create
3744	 * behaviour.
3745	 *
3746	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
3747	 * struct drm_i915_gem_create_ext_memory_regions.
3748	 *
3749	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
3750	 * struct drm_i915_gem_create_ext_protected_content.
3751	 *
3752	 * For I915_GEM_CREATE_EXT_SET_PAT usage see
3753	 * struct drm_i915_gem_create_ext_set_pat.
3754	 */
3755#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
3756#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
3757#define I915_GEM_CREATE_EXT_SET_PAT 2
3758	__u64 extensions;
3759};
3760
3761/**
3762 * struct drm_i915_gem_create_ext_memory_regions - The
3763 * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
3764 *
3765 * Set the object with the desired set of placements/regions in priority
3766 * order. Each entry must be unique and supported by the device.
3767 *
3768 * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
3769 * an equivalent layout of class:instance pair encodings. See struct
3770 * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
3771 * query the supported regions for a device.
3772 *
3773 * As an example, on discrete devices, if we wish to set the placement as
3774 * device local-memory we can do something like:
3775 *
3776 * .. code-block:: C
3777 *
3778 *	struct drm_i915_gem_memory_class_instance region_lmem = {
3779 *              .memory_class = I915_MEMORY_CLASS_DEVICE,
3780 *              .memory_instance = 0,
3781 *      };
3782 *      struct drm_i915_gem_create_ext_memory_regions regions = {
3783 *              .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
3784 *              .regions = (uintptr_t)&region_lmem,
3785 *              .num_regions = 1,
3786 *      };
3787 *      struct drm_i915_gem_create_ext create_ext = {
3788 *              .size = 16 * PAGE_SIZE,
3789 *              .extensions = (uintptr_t)&regions,
3790 *      };
3791 *
3792 *      int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
3793 *      if (err) ...
3794 *
3795 * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
3796 * along with the final object size in &drm_i915_gem_create_ext.size, which
3797 * should account for any rounding up, if required.
3798 *
3799 * Note that userspace has no means of knowing the current backing region
3800 * for objects where @num_regions is larger than one. The kernel will only
3801 * ensure that the priority order of the @regions array is honoured, either
3802 * when initially placing the object, or when moving memory around due to
3803 * memory pressure
3804 *
3805 * On Flat-CCS capable HW, compression is supported for the objects residing
3806 * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other
3807 * memory class in @regions and migrated (by i915, due to memory
3808 * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to
3809 * decompress the content. But i915 doesn't have the required information to
3810 * decompress the userspace compressed objects.
3811 *
3812 * So i915 supports Flat-CCS, on the objects which can reside only on
3813 * I915_MEMORY_CLASS_DEVICE regions.
3814 */
3815struct drm_i915_gem_create_ext_memory_regions {
3816	/** @base: Extension link. See struct i915_user_extension. */
3817	struct i915_user_extension base;
3818
3819	/** @pad: MBZ */
3820	__u32 pad;
3821	/** @num_regions: Number of elements in the @regions array. */
3822	__u32 num_regions;
3823	/**
3824	 * @regions: The regions/placements array.
3825	 *
3826	 * An array of struct drm_i915_gem_memory_class_instance.
3827	 */
3828	__u64 regions;
3829};
3830
3831/**
3832 * struct drm_i915_gem_create_ext_protected_content - The
3833 * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
3834 *
3835 * If this extension is provided, buffer contents are expected to be protected
3836 * by PXP encryption and require decryption for scan out and processing. This
3837 * is only possible on platforms that have PXP enabled, on all other scenarios
3838 * using this extension will cause the ioctl to fail and return -ENODEV. The
3839 * flags parameter is reserved for future expansion and must currently be set
3840 * to zero.
3841 *
3842 * The buffer contents are considered invalid after a PXP session teardown.
3843 *
3844 * The encryption is guaranteed to be processed correctly only if the object
3845 * is submitted with a context created using the
3846 * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
3847 * at submission time on the validity of the objects involved.
3848 *
3849 * Below is an example on how to create a protected object:
3850 *
3851 * .. code-block:: C
3852 *
3853 *      struct drm_i915_gem_create_ext_protected_content protected_ext = {
3854 *              .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
3855 *              .flags = 0,
3856 *      };
3857 *      struct drm_i915_gem_create_ext create_ext = {
3858 *              .size = PAGE_SIZE,
3859 *              .extensions = (uintptr_t)&protected_ext,
3860 *      };
3861 *
3862 *      int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
3863 *      if (err) ...
3864 */
3865struct drm_i915_gem_create_ext_protected_content {
3866	/** @base: Extension link. See struct i915_user_extension. */
3867	struct i915_user_extension base;
3868	/** @flags: reserved for future usage, currently MBZ */
3869	__u32 flags;
3870};
3871
3872/**
3873 * struct drm_i915_gem_create_ext_set_pat - The
3874 * I915_GEM_CREATE_EXT_SET_PAT extension.
3875 *
3876 * If this extension is provided, the specified caching policy (PAT index) is
3877 * applied to the buffer object.
3878 *
3879 * Below is an example on how to create an object with specific caching policy:
3880 *
3881 * .. code-block:: C
3882 *
3883 *      struct drm_i915_gem_create_ext_set_pat set_pat_ext = {
3884 *              .base = { .name = I915_GEM_CREATE_EXT_SET_PAT },
3885 *              .pat_index = 0,
3886 *      };
3887 *      struct drm_i915_gem_create_ext create_ext = {
3888 *              .size = PAGE_SIZE,
3889 *              .extensions = (uintptr_t)&set_pat_ext,
3890 *      };
3891 *
3892 *      int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
3893 *      if (err) ...
3894 */
3895struct drm_i915_gem_create_ext_set_pat {
3896	/** @base: Extension link. See struct i915_user_extension. */
3897	struct i915_user_extension base;
3898	/**
3899	 * @pat_index: PAT index to be set
3900	 * PAT index is a bit field in Page Table Entry to control caching
3901	 * behaviors for GPU accesses. The definition of PAT index is
3902	 * platform dependent and can be found in hardware specifications,
3903	 */
3904	__u32 pat_index;
3905	/** @rsvd: reserved for future use */
3906	__u32 rsvd;
3907};
3908
3909/* ID of the protected content session managed by i915 when PXP is active */
3910#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
3911
3912#if defined(__cplusplus)
3913}
3914#endif
3915
3916#endif /* _I915_DRM_H_ */