master
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#ifndef _XE_DRM_H_
7#define _XE_DRM_H_
8
9#include "drm.h"
10
11#if defined(__cplusplus)
12extern "C" {
13#endif
14
15/*
16 * Please note that modifications to all structs defined here are
17 * subject to backwards-compatibility constraints.
18 * Sections in this file are organized as follows:
19 * 1. IOCTL definition
20 * 2. Extension definition and helper structs
21 * 3. IOCTL's Query structs in the order of the Query's entries.
22 * 4. The rest of IOCTL structs in the order of IOCTL declaration.
23 */
24
25/**
26 * DOC: Xe Device Block Diagram
27 *
28 * The diagram below represents a high-level simplification of a discrete
29 * GPU supported by the Xe driver. It shows some device components which
30 * are necessary to understand this API, as well as how their relations
31 * to each other. This diagram does not represent real hardware::
32 *
33 * ┌──────────────────────────────────────────────────────────────────┐
34 * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │
35 * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │
36 * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │
37 * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │
38 * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │
39 * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │
40 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
41 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │
42 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
43 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
44 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │
45 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
46 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
47 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │
48 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
49 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
50 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │
51 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
52 * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
53 * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │
54 * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
55 * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │
56 * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │
57 * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │
58 * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │
59 * └─────────────────────────────Device0───────┬──────────────────────┘
60 * │
61 * ───────────────────────┴────────── PCI bus
62 */
63
64/**
65 * DOC: Xe uAPI Overview
66 *
67 * This section aims to describe the Xe's IOCTL entries, its structs, and other
68 * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
69 * entries and usage.
70 *
71 * List of supported IOCTLs:
72 * - &DRM_IOCTL_XE_DEVICE_QUERY
73 * - &DRM_IOCTL_XE_GEM_CREATE
74 * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
75 * - &DRM_IOCTL_XE_VM_CREATE
76 * - &DRM_IOCTL_XE_VM_DESTROY
77 * - &DRM_IOCTL_XE_VM_BIND
78 * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
79 * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
80 * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
81 * - &DRM_IOCTL_XE_EXEC
82 * - &DRM_IOCTL_XE_WAIT_USER_FENCE
83 * - &DRM_IOCTL_XE_OBSERVATION
84 */
85
86/*
87 * xe specific ioctls.
88 *
89 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
90 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
91 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
92 */
93#define DRM_XE_DEVICE_QUERY 0x00
94#define DRM_XE_GEM_CREATE 0x01
95#define DRM_XE_GEM_MMAP_OFFSET 0x02
96#define DRM_XE_VM_CREATE 0x03
97#define DRM_XE_VM_DESTROY 0x04
98#define DRM_XE_VM_BIND 0x05
99#define DRM_XE_EXEC_QUEUE_CREATE 0x06
100#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
101#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
102#define DRM_XE_EXEC 0x09
103#define DRM_XE_WAIT_USER_FENCE 0x0a
104#define DRM_XE_OBSERVATION 0x0b
105
106/* Must be kept compact -- no holes */
107
108#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
109#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
110#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
111#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
112#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
113#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
114#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
115#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
116#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
117#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
118#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
119#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
120
121/**
122 * DOC: Xe IOCTL Extensions
123 *
124 * Before detailing the IOCTLs and its structs, it is important to highlight
125 * that every IOCTL in Xe is extensible.
126 *
127 * Many interfaces need to grow over time. In most cases we can simply
128 * extend the struct and have userspace pass in more data. Another option,
129 * as demonstrated by Vulkan's approach to providing extensions for forward
130 * and backward compatibility, is to use a list of optional structs to
131 * provide those extra details.
132 *
133 * The key advantage to using an extension chain is that it allows us to
134 * redefine the interface more easily than an ever growing struct of
135 * increasing complexity, and for large parts of that interface to be
136 * entirely optional. The downside is more pointer chasing; chasing across
137 * the boundary with pointers encapsulated inside u64.
138 *
139 * Example chaining:
140 *
141 * .. code-block:: C
142 *
143 * struct drm_xe_user_extension ext3 {
144 * .next_extension = 0, // end
145 * .name = ...,
146 * };
147 * struct drm_xe_user_extension ext2 {
148 * .next_extension = (uintptr_t)&ext3,
149 * .name = ...,
150 * };
151 * struct drm_xe_user_extension ext1 {
152 * .next_extension = (uintptr_t)&ext2,
153 * .name = ...,
154 * };
155 *
156 * Typically the struct drm_xe_user_extension would be embedded in some uAPI
157 * struct, and in this case we would feed it the head of the chain(i.e ext1),
158 * which would then apply all of the above extensions.
159*/
160
161/**
162 * struct drm_xe_user_extension - Base class for defining a chain of extensions
163 */
164struct drm_xe_user_extension {
165 /**
166 * @next_extension:
167 *
168 * Pointer to the next struct drm_xe_user_extension, or zero if the end.
169 */
170 __u64 next_extension;
171
172 /**
173 * @name: Name of the extension.
174 *
175 * Note that the name here is just some integer.
176 *
177 * Also note that the name space for this is not global for the whole
178 * driver, but rather its scope/meaning is limited to the specific piece
179 * of uAPI which has embedded the struct drm_xe_user_extension.
180 */
181 __u32 name;
182
183 /**
184 * @pad: MBZ
185 *
186 * All undefined bits must be zero.
187 */
188 __u32 pad;
189};
190
191/**
192 * struct drm_xe_ext_set_property - Generic set property extension
193 *
194 * A generic struct that allows any of the Xe's IOCTL to be extended
195 * with a set_property operation.
196 */
197struct drm_xe_ext_set_property {
198 /** @base: base user extension */
199 struct drm_xe_user_extension base;
200
201 /** @property: property to set */
202 __u32 property;
203
204 /** @pad: MBZ */
205 __u32 pad;
206
207 /** @value: property value */
208 __u64 value;
209
210 /** @reserved: Reserved */
211 __u64 reserved[2];
212};
213
214/**
215 * struct drm_xe_engine_class_instance - instance of an engine class
216 *
217 * It is returned as part of the @drm_xe_engine, but it also is used as
218 * the input of engine selection for both @drm_xe_exec_queue_create and
219 * @drm_xe_query_engine_cycles
220 *
221 * The @engine_class can be:
222 * - %DRM_XE_ENGINE_CLASS_RENDER
223 * - %DRM_XE_ENGINE_CLASS_COPY
224 * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
225 * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
226 * - %DRM_XE_ENGINE_CLASS_COMPUTE
227 * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
228 * hardware engine class). Used for creating ordered queues of VM
229 * bind operations.
230 */
231struct drm_xe_engine_class_instance {
232#define DRM_XE_ENGINE_CLASS_RENDER 0
233#define DRM_XE_ENGINE_CLASS_COPY 1
234#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
235#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
236#define DRM_XE_ENGINE_CLASS_COMPUTE 4
237#define DRM_XE_ENGINE_CLASS_VM_BIND 5
238 /** @engine_class: engine class id */
239 __u16 engine_class;
240 /** @engine_instance: engine instance id */
241 __u16 engine_instance;
242 /** @gt_id: Unique ID of this GT within the PCI Device */
243 __u16 gt_id;
244 /** @pad: MBZ */
245 __u16 pad;
246};
247
248/**
249 * struct drm_xe_engine - describe hardware engine
250 */
251struct drm_xe_engine {
252 /** @instance: The @drm_xe_engine_class_instance */
253 struct drm_xe_engine_class_instance instance;
254
255 /** @reserved: Reserved */
256 __u64 reserved[3];
257};
258
259/**
260 * struct drm_xe_query_engines - describe engines
261 *
262 * If a query is made with a struct @drm_xe_device_query where .query
263 * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
264 * struct @drm_xe_query_engines in .data.
265 */
266struct drm_xe_query_engines {
267 /** @num_engines: number of engines returned in @engines */
268 __u32 num_engines;
269 /** @pad: MBZ */
270 __u32 pad;
271 /** @engines: The returned engines for this device */
272 struct drm_xe_engine engines[];
273};
274
275/**
276 * enum drm_xe_memory_class - Supported memory classes.
277 */
278enum drm_xe_memory_class {
279 /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
280 DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
281 /**
282 * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
283 * represents the memory that is local to the device, which we
284 * call VRAM. Not valid on integrated platforms.
285 */
286 DRM_XE_MEM_REGION_CLASS_VRAM
287};
288
289/**
290 * struct drm_xe_mem_region - Describes some region as known to
291 * the driver.
292 */
293struct drm_xe_mem_region {
294 /**
295 * @mem_class: The memory class describing this region.
296 *
297 * See enum drm_xe_memory_class for supported values.
298 */
299 __u16 mem_class;
300 /**
301 * @instance: The unique ID for this region, which serves as the
302 * index in the placement bitmask used as argument for
303 * &DRM_IOCTL_XE_GEM_CREATE
304 */
305 __u16 instance;
306 /**
307 * @min_page_size: Min page-size in bytes for this region.
308 *
309 * When the kernel allocates memory for this region, the
310 * underlying pages will be at least @min_page_size in size.
311 * Buffer objects with an allowable placement in this region must be
312 * created with a size aligned to this value.
313 * GPU virtual address mappings of (parts of) buffer objects that
314 * may be placed in this region must also have their GPU virtual
315 * address and range aligned to this value.
316 * Affected IOCTLS will return %-EINVAL if alignment restrictions are
317 * not met.
318 */
319 __u32 min_page_size;
320 /**
321 * @total_size: The usable size in bytes for this region.
322 */
323 __u64 total_size;
324 /**
325 * @used: Estimate of the memory used in bytes for this region.
326 *
327 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
328 * accounting. Without this the value here will always equal
329 * zero.
330 */
331 __u64 used;
332 /**
333 * @cpu_visible_size: How much of this region can be CPU
334 * accessed, in bytes.
335 *
336 * This will always be <= @total_size, and the remainder (if
337 * any) will not be CPU accessible. If the CPU accessible part
338 * is smaller than @total_size then this is referred to as a
339 * small BAR system.
340 *
341 * On systems without small BAR (full BAR), the probed_size will
342 * always equal the @total_size, since all of it will be CPU
343 * accessible.
344 *
345 * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
346 * regions (for other types the value here will always equal
347 * zero).
348 */
349 __u64 cpu_visible_size;
350 /**
351 * @cpu_visible_used: Estimate of CPU visible memory used, in
352 * bytes.
353 *
354 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
355 * accounting. Without this the value here will always equal
356 * zero. Note this is only currently tracked for
357 * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
358 * here will always be zero).
359 */
360 __u64 cpu_visible_used;
361 /** @reserved: Reserved */
362 __u64 reserved[6];
363};
364
365/**
366 * struct drm_xe_query_mem_regions - describe memory regions
367 *
368 * If a query is made with a struct drm_xe_device_query where .query
369 * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
370 * struct drm_xe_query_mem_regions in .data.
371 */
372struct drm_xe_query_mem_regions {
373 /** @num_mem_regions: number of memory regions returned in @mem_regions */
374 __u32 num_mem_regions;
375 /** @pad: MBZ */
376 __u32 pad;
377 /** @mem_regions: The returned memory regions for this device */
378 struct drm_xe_mem_region mem_regions[];
379};
380
381/**
382 * struct drm_xe_query_config - describe the device configuration
383 *
384 * If a query is made with a struct drm_xe_device_query where .query
385 * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses
386 * struct drm_xe_query_config in .data.
387 *
388 * The index in @info can be:
389 * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
390 * and the device revision (next 8 bits)
391 * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
392 * configuration, see list below
393 *
394 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
395 * has usable VRAM
396 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device
397 * has low latency hint support
398 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
399 * device has CPU address mirroring support
400 * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
401 * required by this device, typically SZ_4K or SZ_64K
402 * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
403 * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
404 * available exec queue priority
405 */
406struct drm_xe_query_config {
407 /** @num_params: number of parameters returned in info */
408 __u32 num_params;
409
410 /** @pad: MBZ */
411 __u32 pad;
412
413#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
414#define DRM_XE_QUERY_CONFIG_FLAGS 1
415 #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
416 #define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
417 #define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
418#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
419#define DRM_XE_QUERY_CONFIG_VA_BITS 3
420#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
421 /** @info: array of elements containing the config info */
422 __u64 info[];
423};
424
425/**
426 * struct drm_xe_gt - describe an individual GT.
427 *
428 * To be used with drm_xe_query_gt_list, which will return a list with all the
429 * existing GT individual descriptions.
430 * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
431 * implementing graphics and/or media operations.
432 *
433 * The index in @type can be:
434 * - %DRM_XE_QUERY_GT_TYPE_MAIN
435 * - %DRM_XE_QUERY_GT_TYPE_MEDIA
436 */
437struct drm_xe_gt {
438#define DRM_XE_QUERY_GT_TYPE_MAIN 0
439#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
440 /** @type: GT type: Main or Media */
441 __u16 type;
442 /** @tile_id: Tile ID where this GT lives (Information only) */
443 __u16 tile_id;
444 /** @gt_id: Unique ID of this GT within the PCI Device */
445 __u16 gt_id;
446 /** @pad: MBZ */
447 __u16 pad[3];
448 /** @reference_clock: A clock frequency for timestamp */
449 __u32 reference_clock;
450 /**
451 * @near_mem_regions: Bit mask of instances from
452 * drm_xe_query_mem_regions that are nearest to the current engines
453 * of this GT.
454 * Each index in this mask refers directly to the struct
455 * drm_xe_query_mem_regions' instance, no assumptions should
456 * be made about order. The type of each region is described
457 * by struct drm_xe_query_mem_regions' mem_class.
458 */
459 __u64 near_mem_regions;
460 /**
461 * @far_mem_regions: Bit mask of instances from
462 * drm_xe_query_mem_regions that are far from the engines of this GT.
463 * In general, they have extra indirections when compared to the
464 * @near_mem_regions. For a discrete device this could mean system
465 * memory and memory living in a different tile.
466 * Each index in this mask refers directly to the struct
467 * drm_xe_query_mem_regions' instance, no assumptions should
468 * be made about order. The type of each region is described
469 * by struct drm_xe_query_mem_regions' mem_class.
470 */
471 __u64 far_mem_regions;
472 /** @ip_ver_major: Graphics/media IP major version on GMD_ID platforms */
473 __u16 ip_ver_major;
474 /** @ip_ver_minor: Graphics/media IP minor version on GMD_ID platforms */
475 __u16 ip_ver_minor;
476 /** @ip_ver_rev: Graphics/media IP revision version on GMD_ID platforms */
477 __u16 ip_ver_rev;
478 /** @pad2: MBZ */
479 __u16 pad2;
480 /** @reserved: Reserved */
481 __u64 reserved[7];
482};
483
484/**
485 * struct drm_xe_query_gt_list - A list with GT description items.
486 *
487 * If a query is made with a struct drm_xe_device_query where .query
488 * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct
489 * drm_xe_query_gt_list in .data.
490 */
491struct drm_xe_query_gt_list {
492 /** @num_gt: number of GT items returned in gt_list */
493 __u32 num_gt;
494 /** @pad: MBZ */
495 __u32 pad;
496 /** @gt_list: The GT list returned for this device */
497 struct drm_xe_gt gt_list[];
498};
499
500/**
501 * struct drm_xe_query_topology_mask - describe the topology mask of a GT
502 *
503 * This is the hardware topology which reflects the internal physical
504 * structure of the GPU.
505 *
506 * If a query is made with a struct drm_xe_device_query where .query
507 * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses
508 * struct drm_xe_query_topology_mask in .data.
509 *
510 * The @type can be:
511 * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
512 * (DSS) available for geometry operations. For example a query response
513 * containing the following in mask:
514 * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00``
515 * means 32 DSS are available for geometry.
516 * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
517 * (DSS) available for compute operations. For example a query response
518 * containing the following in mask:
519 * ``DSS_COMPUTE ff ff ff ff 00 00 00 00``
520 * means 32 DSS are available for compute.
521 * - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks. This type
522 * may be omitted if the driver is unable to query the mask from the
523 * hardware.
524 * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
525 * available per Dual Sub Slices (DSS). For example a query response
526 * containing the following in mask:
527 * ``EU_PER_DSS ff ff 00 00 00 00 00 00``
528 * means each DSS has 16 SIMD8 EUs. This type may be omitted if device
529 * doesn't have SIMD8 EUs.
530 * - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution
531 * Units (EU) available per Dual Sub Slices (DSS). For example a query
532 * response containing the following in mask:
533 * ``SIMD16_EU_PER_DSS ff ff 00 00 00 00 00 00``
534 * means each DSS has 16 SIMD16 EUs. This type may be omitted if device
535 * doesn't have SIMD16 EUs.
536 */
537struct drm_xe_query_topology_mask {
538 /** @gt_id: GT ID the mask is associated with */
539 __u16 gt_id;
540
541#define DRM_XE_TOPO_DSS_GEOMETRY 1
542#define DRM_XE_TOPO_DSS_COMPUTE 2
543#define DRM_XE_TOPO_L3_BANK 3
544#define DRM_XE_TOPO_EU_PER_DSS 4
545#define DRM_XE_TOPO_SIMD16_EU_PER_DSS 5
546 /** @type: type of mask */
547 __u16 type;
548
549 /** @num_bytes: number of bytes in requested mask */
550 __u32 num_bytes;
551
552 /** @mask: little-endian mask of @num_bytes */
553 __u8 mask[];
554};
555
556/**
557 * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
558 *
559 * If a query is made with a struct drm_xe_device_query where .query is equal to
560 * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
561 * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
562 * .data points to this allocated structure.
563 *
564 * The query returns the engine cycles, which along with GT's @reference_clock,
565 * can be used to calculate the engine timestamp. In addition the
566 * query returns a set of cpu timestamps that indicate when the command
567 * streamer cycle count was captured.
568 */
569struct drm_xe_query_engine_cycles {
570 /**
571 * @eci: This is input by the user and is the engine for which command
572 * streamer cycles is queried.
573 */
574 struct drm_xe_engine_class_instance eci;
575
576 /**
577 * @clockid: This is input by the user and is the reference clock id for
578 * CPU timestamp. For definition, see clock_gettime(2) and
579 * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
580 * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
581 */
582 __s32 clockid;
583
584 /** @width: Width of the engine cycle counter in bits. */
585 __u32 width;
586
587 /**
588 * @engine_cycles: Engine cycles as read from its register
589 * at 0x358 offset.
590 */
591 __u64 engine_cycles;
592
593 /**
594 * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
595 * reading the engine_cycles register using the reference clockid set by the
596 * user.
597 */
598 __u64 cpu_timestamp;
599
600 /**
601 * @cpu_delta: Time delta in ns captured around reading the lower dword
602 * of the engine_cycles register.
603 */
604 __u64 cpu_delta;
605};
606
607/**
608 * struct drm_xe_query_uc_fw_version - query a micro-controller firmware version
609 *
610 * Given a uc_type this will return the branch, major, minor and patch version
611 * of the micro-controller firmware.
612 */
613struct drm_xe_query_uc_fw_version {
614 /** @uc_type: The micro-controller type to query firmware version */
615#define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0
616#define XE_QUERY_UC_TYPE_HUC 1
617 __u16 uc_type;
618
619 /** @pad: MBZ */
620 __u16 pad;
621
622 /** @branch_ver: branch uc fw version */
623 __u32 branch_ver;
624 /** @major_ver: major uc fw version */
625 __u32 major_ver;
626 /** @minor_ver: minor uc fw version */
627 __u32 minor_ver;
628 /** @patch_ver: patch uc fw version */
629 __u32 patch_ver;
630
631 /** @pad2: MBZ */
632 __u32 pad2;
633
634 /** @reserved: Reserved */
635 __u64 reserved;
636};
637
638/**
639 * struct drm_xe_query_pxp_status - query if PXP is ready
640 *
641 * If PXP is enabled and no fatal error has occurred, the status will be set to
642 * one of the following values:
643 * 0: PXP init still in progress
644 * 1: PXP init complete
645 *
646 * If PXP is not enabled or something has gone wrong, the query will be failed
647 * with one of the following error codes:
648 * -ENODEV: PXP not supported or disabled;
649 * -EIO: fatal error occurred during init, so PXP will never be enabled;
650 * -EINVAL: incorrect value provided as part of the query;
651 * -EFAULT: error copying the memory between kernel and userspace.
652 *
653 * The status can only be 0 in the first few seconds after driver load. If
654 * everything works as expected, the status will transition to init complete in
655 * less than 1 second, while in case of errors the driver might take longer to
656 * start returning an error code, but it should still take less than 10 seconds.
657 *
658 * The supported session type bitmask is based on the values in
659 * enum drm_xe_pxp_session_type. TYPE_NONE is always supported and therefore
660 * is not reported in the bitmask.
661 *
662 */
663struct drm_xe_query_pxp_status {
664 /** @status: current PXP status */
665 __u32 status;
666
667 /** @supported_session_types: bitmask of supported PXP session types */
668 __u32 supported_session_types;
669};
670
671/**
672 * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
673 * structure to query device information
674 *
675 * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_*
676 * and sets the value in the query member. This determines the type of
677 * the structure provided by the driver in data, among struct drm_xe_query_*.
678 *
679 * The @query can be:
680 * - %DRM_XE_DEVICE_QUERY_ENGINES
681 * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
682 * - %DRM_XE_DEVICE_QUERY_CONFIG
683 * - %DRM_XE_DEVICE_QUERY_GT_LIST
684 * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
685 * configuration of the device such as information on slices, memory,
686 * caches, and so on. It is provided as a table of key / value
687 * attributes.
688 * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
689 * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
690 * - %DRM_XE_DEVICE_QUERY_PXP_STATUS
691 *
692 * If size is set to 0, the driver fills it with the required size for
693 * the requested type of data to query. If size is equal to the required
694 * size, the queried information is copied into data. If size is set to
695 * a value different from 0 and different from the required size, the
696 * IOCTL call returns -EINVAL.
697 *
698 * For example the following code snippet allows retrieving and printing
699 * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES:
700 *
701 * .. code-block:: C
702 *
703 * struct drm_xe_query_engines *engines;
704 * struct drm_xe_device_query query = {
705 * .extensions = 0,
706 * .query = DRM_XE_DEVICE_QUERY_ENGINES,
707 * .size = 0,
708 * .data = 0,
709 * };
710 * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
711 * engines = malloc(query.size);
712 * query.data = (uintptr_t)engines;
713 * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
714 * for (int i = 0; i < engines->num_engines; i++) {
715 * printf("Engine %d: %s\n", i,
716 * engines->engines[i].instance.engine_class ==
717 * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
718 * engines->engines[i].instance.engine_class ==
719 * DRM_XE_ENGINE_CLASS_COPY ? "COPY":
720 * engines->engines[i].instance.engine_class ==
721 * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
722 * engines->engines[i].instance.engine_class ==
723 * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
724 * engines->engines[i].instance.engine_class ==
725 * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
726 * "UNKNOWN");
727 * }
728 * free(engines);
729 */
730struct drm_xe_device_query {
731 /** @extensions: Pointer to the first extension struct, if any */
732 __u64 extensions;
733
734#define DRM_XE_DEVICE_QUERY_ENGINES 0
735#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
736#define DRM_XE_DEVICE_QUERY_CONFIG 2
737#define DRM_XE_DEVICE_QUERY_GT_LIST 3
738#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
739#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
740#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
741#define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7
742#define DRM_XE_DEVICE_QUERY_OA_UNITS 8
743#define DRM_XE_DEVICE_QUERY_PXP_STATUS 9
744#define DRM_XE_DEVICE_QUERY_EU_STALL 10
745 /** @query: The type of data to query */
746 __u32 query;
747
748 /** @size: Size of the queried data */
749 __u32 size;
750
751 /** @data: Queried data is placed here */
752 __u64 data;
753
754 /** @reserved: Reserved */
755 __u64 reserved[2];
756};
757
758/**
759 * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
760 * gem creation
761 *
762 * The @flags can be:
763 * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
764 * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
765 * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
766 * possible placement, ensure that the corresponding VRAM allocation
767 * will always use the CPU accessible part of VRAM. This is important
768 * for small-bar systems (on full-bar systems this gets turned into a
769 * noop).
770 * Note1: System memory can be used as an extra placement if the kernel
771 * should spill the allocation to system memory, if space can't be made
772 * available in the CPU accessible part of VRAM (giving the same
773 * behaviour as the i915 interface, see
774 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS).
775 * Note2: For clear-color CCS surfaces the kernel needs to read the
776 * clear-color value stored in the buffer, and on discrete platforms we
777 * need to use VRAM for display surfaces, therefore the kernel requires
778 * setting this flag for such objects, otherwise an error is thrown on
779 * small-bar systems.
780 *
781 * @cpu_caching supports the following values:
782 * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
783 * caching. On iGPU this can't be used for scanout surfaces. Currently
784 * not allowed for objects placed in VRAM.
785 * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
786 * is uncached. Scanout surfaces should likely use this. All objects
787 * that can be placed in VRAM must use this.
788 *
789 * This ioctl supports setting the following properties via the
790 * %DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY extension, which uses the
791 * generic @drm_xe_ext_set_property struct:
792 *
793 * - %DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE - set the type of PXP session
794 * this object will be used with. Valid values are listed in enum
795 * drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so
796 * there is no need to explicitly set that. Objects used with session of type
797 * %DRM_XE_PXP_TYPE_HWDRM will be marked as invalid if a PXP invalidation
798 * event occurs after their creation. Attempting to flip an invalid object
799 * will cause a black frame to be displayed instead. Submissions with invalid
800 * objects mapped in the VM will be rejected.
801 */
802struct drm_xe_gem_create {
803#define DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY 0
804#define DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE 0
805 /** @extensions: Pointer to the first extension struct, if any */
806 __u64 extensions;
807
808 /**
809 * @size: Size of the object to be created, must match region
810 * (system or vram) minimum alignment (&min_page_size).
811 */
812 __u64 size;
813
814 /**
815 * @placement: A mask of memory instances of where BO can be placed.
816 * Each index in this mask refers directly to the struct
817 * drm_xe_query_mem_regions' instance, no assumptions should
818 * be made about order. The type of each region is described
819 * by struct drm_xe_query_mem_regions' mem_class.
820 */
821 __u32 placement;
822
823#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
824#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
825#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
826 /**
827 * @flags: Flags, currently a mask of memory instances of where BO can
828 * be placed
829 */
830 __u32 flags;
831
832 /**
833 * @vm_id: Attached VM, if any
834 *
835 * If a VM is specified, this BO must:
836 *
837 * 1. Only ever be bound to that VM.
838 * 2. Cannot be exported as a PRIME fd.
839 */
840 __u32 vm_id;
841
842 /**
843 * @handle: Returned handle for the object.
844 *
845 * Object handles are nonzero.
846 */
847 __u32 handle;
848
849#define DRM_XE_GEM_CPU_CACHING_WB 1
850#define DRM_XE_GEM_CPU_CACHING_WC 2
851 /**
852 * @cpu_caching: The CPU caching mode to select for this object. If
853 * mmaping the object the mode selected here will also be used. The
854 * exception is when mapping system memory (including data evicted
855 * to system) on discrete GPUs. The caching mode selected will
856 * then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency
857 * between GPU- and CPU is guaranteed. The caching mode of
858 * existing CPU-mappings will be updated transparently to
859 * user-space clients.
860 */
861 __u16 cpu_caching;
862 /** @pad: MBZ */
863 __u16 pad[3];
864
865 /** @reserved: Reserved */
866 __u64 reserved[2];
867};
868
869/**
870 * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
871 *
872 * The @flags can be:
873 * - %DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER - For user to query special offset
874 * for use in mmap ioctl. Writing to the returned mmap address will generate a
875 * PCI memory barrier with low overhead (avoiding IOCTL call as well as writing
876 * to VRAM which would also add overhead), acting like an MI_MEM_FENCE
877 * instruction.
878 *
879 * Note: The mmap size can be at most 4K, due to HW limitations. As a result
880 * this interface is only supported on CPU architectures that support 4K page
881 * size. The mmap_offset ioctl will detect this and gracefully return an
882 * error, where userspace is expected to have a different fallback method for
883 * triggering a barrier.
884 *
885 * Roughly the usage would be as follows:
886 *
887 * .. code-block:: C
888 *
889 * struct drm_xe_gem_mmap_offset mmo = {
890 * .handle = 0, // must be set to 0
891 * .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER,
892 * };
893 *
894 * err = ioctl(fd, DRM_IOCTL_XE_GEM_MMAP_OFFSET, &mmo);
895 * map = mmap(NULL, size, PROT_WRITE, MAP_SHARED, fd, mmo.offset);
896 * map[i] = 0xdeadbeaf; // issue barrier
897 */
898struct drm_xe_gem_mmap_offset {
899 /** @extensions: Pointer to the first extension struct, if any */
900 __u64 extensions;
901
902 /** @handle: Handle for the object being mapped. */
903 __u32 handle;
904
905#define DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER (1 << 0)
906 /** @flags: Flags */
907 __u32 flags;
908
909 /** @offset: The fake offset to use for subsequent mmap call */
910 __u64 offset;
911
912 /** @reserved: Reserved */
913 __u64 reserved[2];
914};
915
916/**
917 * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
918 *
919 * The @flags can be:
920 * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
921 * space of the VM to scratch page. A vm_bind would overwrite the scratch
922 * page mapping. This flag is mutually exclusive with the
923 * %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and
924 * xe3 platform.
925 * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
926 * exec submissions to its exec_queues that don't have an upper time
927 * limit on the job execution time. But exec submissions to these
928 * don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ,
929 * DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is,
930 * together with sync flag DRM_XE_SYNC_FLAG_SIGNAL.
931 * LR VMs can be created in recoverable page-fault mode using
932 * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
933 * If that flag is omitted, the UMD can not rely on the slightly
934 * different per-VM overcommit semantics that are enabled by
935 * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
936 * still enable recoverable pagefaults if supported by the device.
937 * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
938 * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on
939 * demand when accessed, and also allows per-VM overcommit of memory.
940 * The xe driver internally uses recoverable pagefaults to implement
941 * this.
942 */
943struct drm_xe_vm_create {
944 /** @extensions: Pointer to the first extension struct, if any */
945 __u64 extensions;
946
947#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
948#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
949#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
950 /** @flags: Flags */
951 __u32 flags;
952
953 /** @vm_id: Returned VM ID */
954 __u32 vm_id;
955
956 /** @reserved: Reserved */
957 __u64 reserved[2];
958};
959
960/**
961 * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
962 */
963struct drm_xe_vm_destroy {
964 /** @vm_id: VM ID */
965 __u32 vm_id;
966
967 /** @pad: MBZ */
968 __u32 pad;
969
970 /** @reserved: Reserved */
971 __u64 reserved[2];
972};
973
974/**
975 * struct drm_xe_vm_bind_op - run bind operations
976 *
977 * The @op can be:
978 * - %DRM_XE_VM_BIND_OP_MAP
979 * - %DRM_XE_VM_BIND_OP_UNMAP
980 * - %DRM_XE_VM_BIND_OP_MAP_USERPTR
981 * - %DRM_XE_VM_BIND_OP_UNMAP_ALL
982 * - %DRM_XE_VM_BIND_OP_PREFETCH
983 *
984 * and the @flags can be:
985 * - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only
986 * to ensure write protection
987 * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the
988 * MAP operation immediately rather than deferring the MAP to the page
989 * fault handler. This is implied on a non-faulting VM as there is no
990 * fault handler to defer to.
991 * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
992 * tables are setup with a special bit which indicates writes are
993 * dropped and all reads return zero. In the future, the NULL flags
994 * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
995 * handle MBZ, and the BO offset MBZ. This flag is intended to
996 * implement VK sparse bindings.
997 * - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP,
998 * reject the binding if the encryption key is no longer valid. This
999 * flag has no effect on BOs that are not marked as using PXP.
1000 * - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is
1001 * set, no mappings are created rather the range is reserved for CPU address
1002 * mirroring which will be populated on GPU page faults or prefetches. Only
1003 * valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
1004 * mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
1005 * handle MBZ, and the BO offset MBZ.
1006 */
1007struct drm_xe_vm_bind_op {
1008 /** @extensions: Pointer to the first extension struct, if any */
1009 __u64 extensions;
1010
1011 /**
1012 * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
1013 */
1014 __u32 obj;
1015
1016 /**
1017 * @pat_index: The platform defined @pat_index to use for this mapping.
1018 * The index basically maps to some predefined memory attributes,
1019 * including things like caching, coherency, compression etc. The exact
1020 * meaning of the pat_index is platform specific and defined in the
1021 * Bspec and PRMs. When the KMD sets up the binding the index here is
1022 * encoded into the ppGTT PTE.
1023 *
1024 * For coherency the @pat_index needs to be at least 1way coherent when
1025 * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD
1026 * will extract the coherency mode from the @pat_index and reject if
1027 * there is a mismatch (see note below for pre-MTL platforms).
1028 *
1029 * Note: On pre-MTL platforms there is only a caching mode and no
1030 * explicit coherency mode, but on such hardware there is always a
1031 * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
1032 * CPU caches even with the caching mode set as uncached. It's only the
1033 * display engine that is incoherent (on dgpu it must be in VRAM which
1034 * is always mapped as WC on the CPU). However to keep the uapi somewhat
1035 * consistent with newer platforms the KMD groups the different cache
1036 * levels into the following coherency buckets on all pre-MTL platforms:
1037 *
1038 * ppGTT UC -> COH_NONE
1039 * ppGTT WC -> COH_NONE
1040 * ppGTT WT -> COH_NONE
1041 * ppGTT WB -> COH_AT_LEAST_1WAY
1042 *
1043 * In practice UC/WC/WT should only ever used for scanout surfaces on
1044 * such platforms (or perhaps in general for dma-buf if shared with
1045 * another device) since it is only the display engine that is actually
1046 * incoherent. Everything else should typically use WB given that we
1047 * have a shared-LLC. On MTL+ this completely changes and the HW
1048 * defines the coherency mode as part of the @pat_index, where
1049 * incoherent GT access is possible.
1050 *
1051 * Note: For userptr and externally imported dma-buf the kernel expects
1052 * either 1WAY or 2WAY for the @pat_index.
1053 *
1054 * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
1055 * on the @pat_index. For such mappings there is no actual memory being
1056 * mapped (the address in the PTE is invalid), so the various PAT memory
1057 * attributes likely do not apply. Simply leaving as zero is one
1058 * option (still a valid pat_index). Same applies to
1059 * DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping
1060 * there is no actual memory being mapped.
1061 */
1062 __u16 pat_index;
1063
1064 /** @pad: MBZ */
1065 __u16 pad;
1066
1067 union {
1068 /**
1069 * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
1070 * ignored for unbind
1071 */
1072 __u64 obj_offset;
1073
1074 /** @userptr: user pointer to bind on */
1075 __u64 userptr;
1076
1077 /**
1078 * @cpu_addr_mirror_offset: Offset from GPU @addr to create
1079 * CPU address mirror mappings. MBZ with current level of
1080 * support (e.g. 1 to 1 mapping between GPU and CPU mappings
1081 * only supported).
1082 */
1083 __s64 cpu_addr_mirror_offset;
1084 };
1085
1086 /**
1087 * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
1088 */
1089 __u64 range;
1090
1091 /** @addr: Address to operate on, MBZ for UNMAP_ALL */
1092 __u64 addr;
1093
1094#define DRM_XE_VM_BIND_OP_MAP 0x0
1095#define DRM_XE_VM_BIND_OP_UNMAP 0x1
1096#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
1097#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
1098#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
1099 /** @op: Bind operation to perform */
1100 __u32 op;
1101
1102#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
1103#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
1104#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
1105#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
1106#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
1107#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
1108 /** @flags: Bind flags */
1109 __u32 flags;
1110
1111 /**
1112 * @prefetch_mem_region_instance: Memory region to prefetch VMA to.
1113 * It is a region instance, not a mask.
1114 * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
1115 */
1116 __u32 prefetch_mem_region_instance;
1117
1118 /** @pad2: MBZ */
1119 __u32 pad2;
1120
1121 /** @reserved: Reserved */
1122 __u64 reserved[3];
1123};
1124
1125/**
1126 * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
1127 *
1128 * Below is an example of a minimal use of @drm_xe_vm_bind to
1129 * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to
1130 * illustrate `userptr`. It can be synchronized by using the example
1131 * provided for @drm_xe_sync.
1132 *
1133 * .. code-block:: C
1134 *
1135 * data = aligned_alloc(ALIGNMENT, BO_SIZE);
1136 * struct drm_xe_vm_bind bind = {
1137 * .vm_id = vm,
1138 * .num_binds = 1,
1139 * .bind.obj = 0,
1140 * .bind.obj_offset = to_user_pointer(data),
1141 * .bind.range = BO_SIZE,
1142 * .bind.addr = BIND_ADDRESS,
1143 * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR,
1144 * .bind.flags = 0,
1145 * .num_syncs = 1,
1146 * .syncs = &sync,
1147 * .exec_queue_id = 0,
1148 * };
1149 * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind);
1150 *
1151 */
1152struct drm_xe_vm_bind {
1153 /** @extensions: Pointer to the first extension struct, if any */
1154 __u64 extensions;
1155
1156 /** @vm_id: The ID of the VM to bind to */
1157 __u32 vm_id;
1158
1159 /**
1160 * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
1161 * and exec queue must have same vm_id. If zero, the default VM bind engine
1162 * is used.
1163 */
1164 __u32 exec_queue_id;
1165
1166 /** @pad: MBZ */
1167 __u32 pad;
1168
1169 /** @num_binds: number of binds in this IOCTL */
1170 __u32 num_binds;
1171
1172 union {
1173 /** @bind: used if num_binds == 1 */
1174 struct drm_xe_vm_bind_op bind;
1175
1176 /**
1177 * @vector_of_binds: userptr to array of struct
1178 * drm_xe_vm_bind_op if num_binds > 1
1179 */
1180 __u64 vector_of_binds;
1181 };
1182
1183 /** @pad2: MBZ */
1184 __u32 pad2;
1185
1186 /** @num_syncs: amount of syncs to wait on */
1187 __u32 num_syncs;
1188
1189 /** @syncs: pointer to struct drm_xe_sync array */
1190 __u64 syncs;
1191
1192 /** @reserved: Reserved */
1193 __u64 reserved[2];
1194};
1195
1196/**
1197 * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
1198 *
1199 * This ioctl supports setting the following properties via the
1200 * %DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY extension, which uses the
1201 * generic @drm_xe_ext_set_property struct:
1202 *
1203 * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY - set the queue priority.
1204 * CAP_SYS_NICE is required to set a value above normal.
1205 * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE - set the queue timeslice
1206 * duration in microseconds.
1207 * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE - set the type of PXP session
1208 * this queue will be used with. Valid values are listed in enum
1209 * drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so
1210 * there is no need to explicitly set that. When a queue of type
1211 * %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session
1212 * (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running.
1213 * The user is expected to query the PXP status via the query ioctl (see
1214 * %DRM_XE_DEVICE_QUERY_PXP_STATUS) and to wait for PXP to be ready before
1215 * attempting to create a queue with this property. When a queue is created
1216 * before PXP is ready, the ioctl will return -EBUSY if init is still in
1217 * progress or -EIO if init failed.
1218 * Given that going into a power-saving state kills PXP HWDRM sessions,
1219 * runtime PM will be blocked while queues of this type are alive.
1220 * All PXP queues will be killed if a PXP invalidation event occurs.
1221 *
1222 * The example below shows how to use @drm_xe_exec_queue_create to create
1223 * a simple exec_queue (no parallel submission) of class
1224 * &DRM_XE_ENGINE_CLASS_RENDER.
1225 *
1226 * .. code-block:: C
1227 *
1228 * struct drm_xe_engine_class_instance instance = {
1229 * .engine_class = DRM_XE_ENGINE_CLASS_RENDER,
1230 * };
1231 * struct drm_xe_exec_queue_create exec_queue_create = {
1232 * .extensions = 0,
1233 * .vm_id = vm,
1234 * .num_bb_per_exec = 1,
1235 * .num_eng_per_bb = 1,
1236 * .instances = to_user_pointer(&instance),
1237 * };
1238 * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
1239 *
1240 * Allow users to provide a hint to kernel for cases demanding low latency
1241 * profile. Please note it will have impact on power consumption. User can
1242 * indicate low latency hint with flag while creating exec queue as
1243 * mentioned below,
1244 *
1245 * struct drm_xe_exec_queue_create exec_queue_create = {
1246 * .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT,
1247 * .extensions = 0,
1248 * .vm_id = vm,
1249 * .num_bb_per_exec = 1,
1250 * .num_eng_per_bb = 1,
1251 * .instances = to_user_pointer(&instance),
1252 * };
1253 * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
1254 *
1255 */
1256struct drm_xe_exec_queue_create {
1257#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
1258#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
1259#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
1260#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2
1261 /** @extensions: Pointer to the first extension struct, if any */
1262 __u64 extensions;
1263
1264 /** @width: submission width (number BB per exec) for this exec queue */
1265 __u16 width;
1266
1267 /** @num_placements: number of valid placements for this exec queue */
1268 __u16 num_placements;
1269
1270 /** @vm_id: VM to use for this exec queue */
1271 __u32 vm_id;
1272
1273#define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT (1 << 0)
1274 /** @flags: flags to use for this exec queue */
1275 __u32 flags;
1276
1277 /** @exec_queue_id: Returned exec queue ID */
1278 __u32 exec_queue_id;
1279
1280 /**
1281 * @instances: user pointer to a 2-d array of struct
1282 * drm_xe_engine_class_instance
1283 *
1284 * length = width (i) * num_placements (j)
1285 * index = j + i * width
1286 */
1287 __u64 instances;
1288
1289 /** @reserved: Reserved */
1290 __u64 reserved[2];
1291};
1292
1293/**
1294 * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
1295 */
1296struct drm_xe_exec_queue_destroy {
1297 /** @exec_queue_id: Exec queue ID */
1298 __u32 exec_queue_id;
1299
1300 /** @pad: MBZ */
1301 __u32 pad;
1302
1303 /** @reserved: Reserved */
1304 __u64 reserved[2];
1305};
1306
1307/**
1308 * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
1309 *
1310 * The @property can be:
1311 * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
1312 */
1313struct drm_xe_exec_queue_get_property {
1314 /** @extensions: Pointer to the first extension struct, if any */
1315 __u64 extensions;
1316
1317 /** @exec_queue_id: Exec queue ID */
1318 __u32 exec_queue_id;
1319
1320#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
1321 /** @property: property to get */
1322 __u32 property;
1323
1324 /** @value: property value */
1325 __u64 value;
1326
1327 /** @reserved: Reserved */
1328 __u64 reserved[2];
1329};
1330
1331/**
1332 * struct drm_xe_sync - sync object
1333 *
1334 * The @type can be:
1335 * - %DRM_XE_SYNC_TYPE_SYNCOBJ
1336 * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
1337 * - %DRM_XE_SYNC_TYPE_USER_FENCE
1338 *
1339 * and the @flags can be:
1340 * - %DRM_XE_SYNC_FLAG_SIGNAL
1341 *
1342 * A minimal use of @drm_xe_sync looks like this:
1343 *
1344 * .. code-block:: C
1345 *
1346 * struct drm_xe_sync sync = {
1347 * .flags = DRM_XE_SYNC_FLAG_SIGNAL,
1348 * .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
1349 * };
1350 * struct drm_syncobj_create syncobj_create = { 0 };
1351 * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create);
1352 * sync.handle = syncobj_create.handle;
1353 * ...
1354 * use of &sync in drm_xe_exec or drm_xe_vm_bind
1355 * ...
1356 * struct drm_syncobj_wait wait = {
1357 * .handles = &sync.handle,
1358 * .timeout_nsec = INT64_MAX,
1359 * .count_handles = 1,
1360 * .flags = 0,
1361 * .first_signaled = 0,
1362 * .pad = 0,
1363 * };
1364 * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
1365 */
1366struct drm_xe_sync {
1367 /** @extensions: Pointer to the first extension struct, if any */
1368 __u64 extensions;
1369
1370#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
1371#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
1372#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
1373 /** @type: Type of the this sync object */
1374 __u32 type;
1375
1376#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
1377 /** @flags: Sync Flags */
1378 __u32 flags;
1379
1380 union {
1381 /** @handle: Handle for the object */
1382 __u32 handle;
1383
1384 /**
1385 * @addr: Address of user fence. When sync is passed in via exec
1386 * IOCTL this is a GPU address in the VM. When sync passed in via
1387 * VM bind IOCTL this is a user pointer. In either case, it is
1388 * the users responsibility that this address is present and
1389 * mapped when the user fence is signalled. Must be qword
1390 * aligned.
1391 */
1392 __u64 addr;
1393 };
1394
1395 /**
1396 * @timeline_value: Input for the timeline sync object. Needs to be
1397 * different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ.
1398 */
1399 __u64 timeline_value;
1400
1401 /** @reserved: Reserved */
1402 __u64 reserved[2];
1403};
1404
1405/**
1406 * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
1407 *
1408 * This is an example to use @drm_xe_exec for execution of the object
1409 * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue
1410 * (see example in @drm_xe_exec_queue_create). It can be synchronized
1411 * by using the example provided for @drm_xe_sync.
1412 *
1413 * .. code-block:: C
1414 *
1415 * struct drm_xe_exec exec = {
1416 * .exec_queue_id = exec_queue,
1417 * .syncs = &sync,
1418 * .num_syncs = 1,
1419 * .address = BIND_ADDRESS,
1420 * .num_batch_buffer = 1,
1421 * };
1422 * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
1423 *
1424 */
1425struct drm_xe_exec {
1426 /** @extensions: Pointer to the first extension struct, if any */
1427 __u64 extensions;
1428
1429 /** @exec_queue_id: Exec queue ID for the batch buffer */
1430 __u32 exec_queue_id;
1431
1432 /** @num_syncs: Amount of struct drm_xe_sync in array. */
1433 __u32 num_syncs;
1434
1435 /** @syncs: Pointer to struct drm_xe_sync array. */
1436 __u64 syncs;
1437
1438 /**
1439 * @address: address of batch buffer if num_batch_buffer == 1 or an
1440 * array of batch buffer addresses
1441 */
1442 __u64 address;
1443
1444 /**
1445 * @num_batch_buffer: number of batch buffer in this exec, must match
1446 * the width of the engine
1447 */
1448 __u16 num_batch_buffer;
1449
1450 /** @pad: MBZ */
1451 __u16 pad[3];
1452
1453 /** @reserved: Reserved */
1454 __u64 reserved[2];
1455};
1456
1457/**
1458 * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
1459 *
1460 * Wait on user fence, XE will wake-up on every HW engine interrupt in the
1461 * instances list and check if user fence is complete::
1462 *
1463 * (*addr & MASK) OP (VALUE & MASK)
1464 *
1465 * Returns to user on user fence completion or timeout.
1466 *
1467 * The @op can be:
1468 * - %DRM_XE_UFENCE_WAIT_OP_EQ
1469 * - %DRM_XE_UFENCE_WAIT_OP_NEQ
1470 * - %DRM_XE_UFENCE_WAIT_OP_GT
1471 * - %DRM_XE_UFENCE_WAIT_OP_GTE
1472 * - %DRM_XE_UFENCE_WAIT_OP_LT
1473 * - %DRM_XE_UFENCE_WAIT_OP_LTE
1474 *
1475 * and the @flags can be:
1476 * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
1477 * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
1478 *
1479 * The @mask values can be for example:
1480 * - 0xffu for u8
1481 * - 0xffffu for u16
1482 * - 0xffffffffu for u32
1483 * - 0xffffffffffffffffu for u64
1484 */
1485struct drm_xe_wait_user_fence {
1486 /** @extensions: Pointer to the first extension struct, if any */
1487 __u64 extensions;
1488
1489 /**
1490 * @addr: user pointer address to wait on, must qword aligned
1491 */
1492 __u64 addr;
1493
1494#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
1495#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
1496#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
1497#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
1498#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
1499#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
1500 /** @op: wait operation (type of comparison) */
1501 __u16 op;
1502
1503#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
1504 /** @flags: wait flags */
1505 __u16 flags;
1506
1507 /** @pad: MBZ */
1508 __u32 pad;
1509
1510 /** @value: compare value */
1511 __u64 value;
1512
1513 /** @mask: comparison mask */
1514 __u64 mask;
1515
1516 /**
1517 * @timeout: how long to wait before bailing, value in nanoseconds.
1518 * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
1519 * it contains timeout expressed in nanoseconds to wait (fence will
1520 * expire at now() + timeout).
1521 * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
1522 * will end at timeout (uses system MONOTONIC_CLOCK).
1523 * Passing negative timeout leads to neverending wait.
1524 *
1525 * On relative timeout this value is updated with timeout left
1526 * (for restarting the call in case of signal delivery).
1527 * On absolute timeout this value stays intact (restarted call still
1528 * expire at the same point of time).
1529 */
1530 __s64 timeout;
1531
1532 /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
1533 __u32 exec_queue_id;
1534
1535 /** @pad2: MBZ */
1536 __u32 pad2;
1537
1538 /** @reserved: Reserved */
1539 __u64 reserved[2];
1540};
1541
1542/**
1543 * enum drm_xe_observation_type - Observation stream types
1544 */
1545enum drm_xe_observation_type {
1546 /** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */
1547 DRM_XE_OBSERVATION_TYPE_OA,
1548 /** @DRM_XE_OBSERVATION_TYPE_EU_STALL: EU stall sampling observation stream type */
1549 DRM_XE_OBSERVATION_TYPE_EU_STALL,
1550};
1551
1552/**
1553 * enum drm_xe_observation_op - Observation stream ops
1554 */
1555enum drm_xe_observation_op {
1556 /** @DRM_XE_OBSERVATION_OP_STREAM_OPEN: Open an observation stream */
1557 DRM_XE_OBSERVATION_OP_STREAM_OPEN,
1558
1559 /** @DRM_XE_OBSERVATION_OP_ADD_CONFIG: Add observation stream config */
1560 DRM_XE_OBSERVATION_OP_ADD_CONFIG,
1561
1562 /** @DRM_XE_OBSERVATION_OP_REMOVE_CONFIG: Remove observation stream config */
1563 DRM_XE_OBSERVATION_OP_REMOVE_CONFIG,
1564};
1565
1566/**
1567 * struct drm_xe_observation_param - Input of &DRM_XE_OBSERVATION
1568 *
1569 * The observation layer enables multiplexing observation streams of
1570 * multiple types. The actual params for a particular stream operation are
1571 * supplied via the @param pointer (use __copy_from_user to get these
1572 * params).
1573 */
1574struct drm_xe_observation_param {
1575 /** @extensions: Pointer to the first extension struct, if any */
1576 __u64 extensions;
1577 /** @observation_type: observation stream type, of enum @drm_xe_observation_type */
1578 __u64 observation_type;
1579 /** @observation_op: observation stream op, of enum @drm_xe_observation_op */
1580 __u64 observation_op;
1581 /** @param: Pointer to actual stream params */
1582 __u64 param;
1583};
1584
1585/**
1586 * enum drm_xe_observation_ioctls - Observation stream fd ioctl's
1587 *
1588 * Information exchanged between userspace and kernel for observation fd
1589 * ioctl's is stream type specific
1590 */
1591enum drm_xe_observation_ioctls {
1592 /** @DRM_XE_OBSERVATION_IOCTL_ENABLE: Enable data capture for an observation stream */
1593 DRM_XE_OBSERVATION_IOCTL_ENABLE = _IO('i', 0x0),
1594
1595 /** @DRM_XE_OBSERVATION_IOCTL_DISABLE: Disable data capture for a observation stream */
1596 DRM_XE_OBSERVATION_IOCTL_DISABLE = _IO('i', 0x1),
1597
1598 /** @DRM_XE_OBSERVATION_IOCTL_CONFIG: Change observation stream configuration */
1599 DRM_XE_OBSERVATION_IOCTL_CONFIG = _IO('i', 0x2),
1600
1601 /** @DRM_XE_OBSERVATION_IOCTL_STATUS: Return observation stream status */
1602 DRM_XE_OBSERVATION_IOCTL_STATUS = _IO('i', 0x3),
1603
1604 /** @DRM_XE_OBSERVATION_IOCTL_INFO: Return observation stream info */
1605 DRM_XE_OBSERVATION_IOCTL_INFO = _IO('i', 0x4),
1606};
1607
1608/**
1609 * enum drm_xe_oa_unit_type - OA unit types
1610 */
1611enum drm_xe_oa_unit_type {
1612 /**
1613 * @DRM_XE_OA_UNIT_TYPE_OAG: OAG OA unit. OAR/OAC are considered
1614 * sub-types of OAG. For OAR/OAC, use OAG.
1615 */
1616 DRM_XE_OA_UNIT_TYPE_OAG,
1617
1618 /** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */
1619 DRM_XE_OA_UNIT_TYPE_OAM,
1620
1621 /** @DRM_XE_OA_UNIT_TYPE_OAM_SAG: OAM_SAG OA unit */
1622 DRM_XE_OA_UNIT_TYPE_OAM_SAG,
1623};
1624
1625/**
1626 * struct drm_xe_oa_unit - describe OA unit
1627 */
1628struct drm_xe_oa_unit {
1629 /** @extensions: Pointer to the first extension struct, if any */
1630 __u64 extensions;
1631
1632 /** @oa_unit_id: OA unit ID */
1633 __u32 oa_unit_id;
1634
1635 /** @oa_unit_type: OA unit type of @drm_xe_oa_unit_type */
1636 __u32 oa_unit_type;
1637
1638 /** @capabilities: OA capabilities bit-mask */
1639 __u64 capabilities;
1640#define DRM_XE_OA_CAPS_BASE (1 << 0)
1641#define DRM_XE_OA_CAPS_SYNCS (1 << 1)
1642#define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2)
1643#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3)
1644#define DRM_XE_OA_CAPS_OAM (1 << 4)
1645
1646 /** @oa_timestamp_freq: OA timestamp freq */
1647 __u64 oa_timestamp_freq;
1648
1649 /** @reserved: MBZ */
1650 __u64 reserved[4];
1651
1652 /** @num_engines: number of engines in @eci array */
1653 __u64 num_engines;
1654
1655 /** @eci: engines attached to this OA unit */
1656 struct drm_xe_engine_class_instance eci[];
1657};
1658
1659/**
1660 * struct drm_xe_query_oa_units - describe OA units
1661 *
1662 * If a query is made with a struct drm_xe_device_query where .query
1663 * is equal to DRM_XE_DEVICE_QUERY_OA_UNITS, then the reply uses struct
1664 * drm_xe_query_oa_units in .data.
1665 *
1666 * OA unit properties for all OA units can be accessed using a code block
1667 * such as the one below:
1668 *
1669 * .. code-block:: C
1670 *
1671 * struct drm_xe_query_oa_units *qoa;
1672 * struct drm_xe_oa_unit *oau;
1673 * u8 *poau;
1674 *
1675 * // malloc qoa and issue DRM_XE_DEVICE_QUERY_OA_UNITS. Then:
1676 * poau = (u8 *)&qoa->oa_units[0];
1677 * for (int i = 0; i < qoa->num_oa_units; i++) {
1678 * oau = (struct drm_xe_oa_unit *)poau;
1679 * // Access 'struct drm_xe_oa_unit' fields here
1680 * poau += sizeof(*oau) + oau->num_engines * sizeof(oau->eci[0]);
1681 * }
1682 */
1683struct drm_xe_query_oa_units {
1684 /** @extensions: Pointer to the first extension struct, if any */
1685 __u64 extensions;
1686 /** @num_oa_units: number of OA units returned in oau[] */
1687 __u32 num_oa_units;
1688 /** @pad: MBZ */
1689 __u32 pad;
1690 /**
1691 * @oa_units: struct @drm_xe_oa_unit array returned for this device.
1692 * Written below as a u64 array to avoid problems with nested flexible
1693 * arrays with some compilers
1694 */
1695 __u64 oa_units[];
1696};
1697
1698/**
1699 * enum drm_xe_oa_format_type - OA format types as specified in PRM/Bspec
1700 * 52198/60942
1701 */
1702enum drm_xe_oa_format_type {
1703 /** @DRM_XE_OA_FMT_TYPE_OAG: OAG report format */
1704 DRM_XE_OA_FMT_TYPE_OAG,
1705 /** @DRM_XE_OA_FMT_TYPE_OAR: OAR report format */
1706 DRM_XE_OA_FMT_TYPE_OAR,
1707 /** @DRM_XE_OA_FMT_TYPE_OAM: OAM report format */
1708 DRM_XE_OA_FMT_TYPE_OAM,
1709 /** @DRM_XE_OA_FMT_TYPE_OAC: OAC report format */
1710 DRM_XE_OA_FMT_TYPE_OAC,
1711 /** @DRM_XE_OA_FMT_TYPE_OAM_MPEC: OAM SAMEDIA or OAM MPEC report format */
1712 DRM_XE_OA_FMT_TYPE_OAM_MPEC,
1713 /** @DRM_XE_OA_FMT_TYPE_PEC: PEC report format */
1714 DRM_XE_OA_FMT_TYPE_PEC,
1715};
1716
1717/**
1718 * enum drm_xe_oa_property_id - OA stream property id's
1719 *
1720 * Stream params are specified as a chain of @drm_xe_ext_set_property
1721 * struct's, with @property values from enum @drm_xe_oa_property_id and
1722 * @drm_xe_user_extension base.name set to @DRM_XE_OA_EXTENSION_SET_PROPERTY.
1723 * @param field in struct @drm_xe_observation_param points to the first
1724 * @drm_xe_ext_set_property struct.
1725 *
1726 * Exactly the same mechanism is also used for stream reconfiguration using the
1727 * @DRM_XE_OBSERVATION_IOCTL_CONFIG observation stream fd ioctl, though only a
1728 * subset of properties below can be specified for stream reconfiguration.
1729 */
1730enum drm_xe_oa_property_id {
1731#define DRM_XE_OA_EXTENSION_SET_PROPERTY 0
1732 /**
1733 * @DRM_XE_OA_PROPERTY_OA_UNIT_ID: ID of the OA unit on which to open
1734 * the OA stream, see @oa_unit_id in 'struct
1735 * drm_xe_query_oa_units'. Defaults to 0 if not provided.
1736 */
1737 DRM_XE_OA_PROPERTY_OA_UNIT_ID = 1,
1738
1739 /**
1740 * @DRM_XE_OA_PROPERTY_SAMPLE_OA: A value of 1 requests inclusion of raw
1741 * OA unit reports or stream samples in a global buffer attached to an
1742 * OA unit.
1743 */
1744 DRM_XE_OA_PROPERTY_SAMPLE_OA,
1745
1746 /**
1747 * @DRM_XE_OA_PROPERTY_OA_METRIC_SET: OA metrics defining contents of OA
1748 * reports, previously added via @DRM_XE_OBSERVATION_OP_ADD_CONFIG.
1749 */
1750 DRM_XE_OA_PROPERTY_OA_METRIC_SET,
1751
1752 /** @DRM_XE_OA_PROPERTY_OA_FORMAT: OA counter report format */
1753 DRM_XE_OA_PROPERTY_OA_FORMAT,
1754 /*
1755 * OA_FORMAT's are specified the same way as in PRM/Bspec 52198/60942,
1756 * in terms of the following quantities: a. enum @drm_xe_oa_format_type
1757 * b. Counter select c. Counter size and d. BC report. Also refer to the
1758 * oa_formats array in drivers/gpu/drm/xe/xe_oa.c.
1759 */
1760#define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xffu << 0)
1761#define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xffu << 8)
1762#define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xffu << 16)
1763#define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xffu << 24)
1764
1765 /**
1766 * @DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT: Requests periodic OA unit
1767 * sampling with sampling frequency proportional to 2^(period_exponent + 1)
1768 */
1769 DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT,
1770
1771 /**
1772 * @DRM_XE_OA_PROPERTY_OA_DISABLED: A value of 1 will open the OA
1773 * stream in a DISABLED state (see @DRM_XE_OBSERVATION_IOCTL_ENABLE).
1774 */
1775 DRM_XE_OA_PROPERTY_OA_DISABLED,
1776
1777 /**
1778 * @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID: Open the stream for a specific
1779 * @exec_queue_id. OA queries can be executed on this exec queue.
1780 */
1781 DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID,
1782
1783 /**
1784 * @DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE: Optional engine instance to
1785 * pass along with @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID or will default to 0.
1786 */
1787 DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE,
1788
1789 /**
1790 * @DRM_XE_OA_PROPERTY_NO_PREEMPT: Allow preemption and timeslicing
1791 * to be disabled for the stream exec queue.
1792 */
1793 DRM_XE_OA_PROPERTY_NO_PREEMPT,
1794
1795 /**
1796 * @DRM_XE_OA_PROPERTY_NUM_SYNCS: Number of syncs in the sync array
1797 * specified in @DRM_XE_OA_PROPERTY_SYNCS
1798 */
1799 DRM_XE_OA_PROPERTY_NUM_SYNCS,
1800
1801 /**
1802 * @DRM_XE_OA_PROPERTY_SYNCS: Pointer to struct @drm_xe_sync array
1803 * with array size specified via @DRM_XE_OA_PROPERTY_NUM_SYNCS. OA
1804 * configuration will wait till input fences signal. Output fences
1805 * will signal after the new OA configuration takes effect. For
1806 * @DRM_XE_SYNC_TYPE_USER_FENCE, @addr is a user pointer, similar
1807 * to the VM bind case.
1808 */
1809 DRM_XE_OA_PROPERTY_SYNCS,
1810
1811 /**
1812 * @DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE: Size of OA buffer to be
1813 * allocated by the driver in bytes. Supported sizes are powers of
1814 * 2 from 128 KiB to 128 MiB. When not specified, a 16 MiB OA
1815 * buffer is allocated by default.
1816 */
1817 DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE,
1818
1819 /**
1820 * @DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS: Number of reports to wait
1821 * for before unblocking poll or read
1822 */
1823 DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS,
1824};
1825
1826/**
1827 * struct drm_xe_oa_config - OA metric configuration
1828 *
1829 * Multiple OA configs can be added using @DRM_XE_OBSERVATION_OP_ADD_CONFIG. A
1830 * particular config can be specified when opening an OA stream using
1831 * @DRM_XE_OA_PROPERTY_OA_METRIC_SET property.
1832 */
1833struct drm_xe_oa_config {
1834 /** @extensions: Pointer to the first extension struct, if any */
1835 __u64 extensions;
1836
1837 /** @uuid: String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" */
1838 char uuid[36];
1839
1840 /** @n_regs: Number of regs in @regs_ptr */
1841 __u32 n_regs;
1842
1843 /**
1844 * @regs_ptr: Pointer to (register address, value) pairs for OA config
1845 * registers. Expected length of buffer is: (2 * sizeof(u32) * @n_regs).
1846 */
1847 __u64 regs_ptr;
1848};
1849
1850/**
1851 * struct drm_xe_oa_stream_status - OA stream status returned from
1852 * @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl. Userspace can
1853 * call the ioctl to query stream status in response to EIO errno from
1854 * observation fd read().
1855 */
1856struct drm_xe_oa_stream_status {
1857 /** @extensions: Pointer to the first extension struct, if any */
1858 __u64 extensions;
1859
1860 /** @oa_status: OA stream status (see Bspec 46717/61226) */
1861 __u64 oa_status;
1862#define DRM_XE_OASTATUS_MMIO_TRG_Q_FULL (1 << 3)
1863#define DRM_XE_OASTATUS_COUNTER_OVERFLOW (1 << 2)
1864#define DRM_XE_OASTATUS_BUFFER_OVERFLOW (1 << 1)
1865#define DRM_XE_OASTATUS_REPORT_LOST (1 << 0)
1866
1867 /** @reserved: reserved for future use */
1868 __u64 reserved[3];
1869};
1870
1871/**
1872 * struct drm_xe_oa_stream_info - OA stream info returned from
1873 * @DRM_XE_OBSERVATION_IOCTL_INFO observation stream fd ioctl
1874 */
1875struct drm_xe_oa_stream_info {
1876 /** @extensions: Pointer to the first extension struct, if any */
1877 __u64 extensions;
1878
1879 /** @oa_buf_size: OA buffer size */
1880 __u64 oa_buf_size;
1881
1882 /** @reserved: reserved for future use */
1883 __u64 reserved[3];
1884};
1885
1886/**
1887 * enum drm_xe_pxp_session_type - Supported PXP session types.
1888 *
1889 * We currently only support HWDRM sessions, which are used for protected
1890 * content that ends up being displayed, but the HW supports multiple types, so
1891 * we might extend support in the future.
1892 */
1893enum drm_xe_pxp_session_type {
1894 /** @DRM_XE_PXP_TYPE_NONE: PXP not used */
1895 DRM_XE_PXP_TYPE_NONE = 0,
1896 /**
1897 * @DRM_XE_PXP_TYPE_HWDRM: HWDRM sessions are used for content that ends
1898 * up on the display.
1899 */
1900 DRM_XE_PXP_TYPE_HWDRM = 1,
1901};
1902
1903/* ID of the protected content session managed by Xe when PXP is active */
1904#define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xf
1905
1906/**
1907 * enum drm_xe_eu_stall_property_id - EU stall sampling input property ids.
1908 *
1909 * These properties are passed to the driver at open as a chain of
1910 * @drm_xe_ext_set_property structures with @property set to these
1911 * properties' enums and @value set to the corresponding values of these
1912 * properties. @drm_xe_user_extension base.name should be set to
1913 * @DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY.
1914 *
1915 * With the file descriptor obtained from open, user space must enable
1916 * the EU stall stream fd with @DRM_XE_OBSERVATION_IOCTL_ENABLE before
1917 * calling read(). EIO errno from read() indicates HW dropped data
1918 * due to full buffer.
1919 */
1920enum drm_xe_eu_stall_property_id {
1921#define DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY 0
1922 /**
1923 * @DRM_XE_EU_STALL_PROP_GT_ID: @gt_id of the GT on which
1924 * EU stall data will be captured.
1925 */
1926 DRM_XE_EU_STALL_PROP_GT_ID = 1,
1927
1928 /**
1929 * @DRM_XE_EU_STALL_PROP_SAMPLE_RATE: Sampling rate in
1930 * GPU cycles from @sampling_rates in struct @drm_xe_query_eu_stall
1931 */
1932 DRM_XE_EU_STALL_PROP_SAMPLE_RATE,
1933
1934 /**
1935 * @DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS: Minimum number of
1936 * EU stall data reports to be present in the kernel buffer
1937 * before unblocking a blocked poll or read.
1938 */
1939 DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS,
1940};
1941
1942/**
1943 * struct drm_xe_query_eu_stall - Information about EU stall sampling.
1944 *
1945 * If a query is made with a struct @drm_xe_device_query where .query
1946 * is equal to @DRM_XE_DEVICE_QUERY_EU_STALL, then the reply uses
1947 * struct @drm_xe_query_eu_stall in .data.
1948 */
1949struct drm_xe_query_eu_stall {
1950 /** @extensions: Pointer to the first extension struct, if any */
1951 __u64 extensions;
1952
1953 /** @capabilities: EU stall capabilities bit-mask */
1954 __u64 capabilities;
1955#define DRM_XE_EU_STALL_CAPS_BASE (1 << 0)
1956
1957 /** @record_size: size of each EU stall data record */
1958 __u64 record_size;
1959
1960 /** @per_xecore_buf_size: internal per XeCore buffer size */
1961 __u64 per_xecore_buf_size;
1962
1963 /** @reserved: Reserved */
1964 __u64 reserved[5];
1965
1966 /** @num_sampling_rates: Number of sampling rates in @sampling_rates array */
1967 __u64 num_sampling_rates;
1968
1969 /**
1970 * @sampling_rates: Flexible array of sampling rates
1971 * sorted in the fastest to slowest order.
1972 * Sampling rates are specified in GPU clock cycles.
1973 */
1974 __u64 sampling_rates[];
1975};
1976
1977#if defined(__cplusplus)
1978}
1979#endif
1980
1981#endif /* _XE_DRM_H_ */