master
   1/* SPDX-License-Identifier: (GPL-2.0-only WITH Linux-syscall-note) OR MIT */
   2/* Copyright (c) 2023 Imagination Technologies Ltd. */
   3
   4#ifndef PVR_DRM_UAPI_H
   5#define PVR_DRM_UAPI_H
   6
   7#include "drm.h"
   8
   9#include <linux/const.h>
  10#include <linux/types.h>
  11
  12#if defined(__cplusplus)
  13extern "C" {
  14#endif
  15
  16/**
  17 * DOC: PowerVR UAPI
  18 *
  19 * The PowerVR IOCTL argument structs have a few limitations in place, in
  20 * addition to the standard kernel restrictions:
  21 *
  22 *  - All members must be type-aligned.
  23 *  - The overall struct must be padded to 64-bit alignment.
  24 *  - Explicit padding is almost always required. This takes the form of
  25 *    ``_padding_[x]`` members of sufficient size to pad to the next power-of-two
  26 *    alignment, where [x] is the offset into the struct in hexadecimal. Arrays
  27 *    are never used for alignment. Padding fields must be zeroed; this is
  28 *    always checked.
  29 *  - Unions may only appear as the last member of a struct.
  30 *  - Individual union members may grow in the future. The space between the
  31 *    end of a union member and the end of its containing union is considered
  32 *    "implicit padding" and must be zeroed. This is always checked.
  33 *
  34 * In addition to the IOCTL argument structs, the PowerVR UAPI makes use of
  35 * DEV_QUERY argument structs. These are used to fetch information about the
  36 * device and runtime. These structs are subject to the same rules set out
  37 * above.
  38 */
  39
  40/**
  41 * struct drm_pvr_obj_array - Container used to pass arrays of objects
  42 *
  43 * It is not unusual to have to extend objects to pass new parameters, and the DRM
  44 * ioctl infrastructure is supporting that by padding ioctl arguments with zeros
  45 * when the data passed by userspace is smaller than the struct defined in the
  46 * drm_ioctl_desc, thus keeping things backward compatible. This type is just
  47 * applying the same concepts to indirect objects passed through arrays referenced
  48 * from the main ioctl arguments structure: the stride basically defines the size
  49 * of the object passed by userspace, which allows the kernel driver to pad with
  50 * zeros when it's smaller than the size of the object it expects.
  51 *
  52 * Use ``DRM_PVR_OBJ_ARRAY()`` to fill object array fields, unless you
  53 * have a very good reason not to.
  54 */
  55struct drm_pvr_obj_array {
  56	/** @stride: Stride of object struct. Used for versioning. */
  57	__u32 stride;
  58
  59	/** @count: Number of objects in the array. */
  60	__u32 count;
  61
  62	/** @array: User pointer to an array of objects. */
  63	__u64 array;
  64};
  65
  66/**
  67 * DRM_PVR_OBJ_ARRAY() - Helper macro for filling &struct drm_pvr_obj_array.
  68 * @cnt: Number of elements pointed to py @ptr.
  69 * @ptr: Pointer to start of a C array.
  70 *
  71 * Return: Literal of type &struct drm_pvr_obj_array.
  72 */
  73#define DRM_PVR_OBJ_ARRAY(cnt, ptr) \
  74	{ .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
  75
  76/**
  77 * DOC: PowerVR IOCTL interface
  78 */
  79
  80/**
  81 * PVR_IOCTL() - Build a PowerVR IOCTL number
  82 * @_ioctl: An incrementing id for this IOCTL. Added to %DRM_COMMAND_BASE.
  83 * @_mode: Must be one of %DRM_IOR, %DRM_IOW or %DRM_IOWR.
  84 * @_data: The type of the args struct passed by this IOCTL.
  85 *
  86 * The struct referred to by @_data must have a ``drm_pvr_ioctl_`` prefix and an
  87 * ``_args suffix``. They are therefore omitted from @_data.
  88 *
  89 * This should only be used to build the constants described below; it should
  90 * never be used to call an IOCTL directly.
  91 *
  92 * Return: An IOCTL number to be passed to ioctl() from userspace.
  93 */
  94#define PVR_IOCTL(_ioctl, _mode, _data) \
  95	_mode(DRM_COMMAND_BASE + (_ioctl), struct drm_pvr_ioctl_##_data##_args)
  96
  97#define DRM_IOCTL_PVR_DEV_QUERY PVR_IOCTL(0x00, DRM_IOWR, dev_query)
  98#define DRM_IOCTL_PVR_CREATE_BO PVR_IOCTL(0x01, DRM_IOWR, create_bo)
  99#define DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET PVR_IOCTL(0x02, DRM_IOWR, get_bo_mmap_offset)
 100#define DRM_IOCTL_PVR_CREATE_VM_CONTEXT PVR_IOCTL(0x03, DRM_IOWR, create_vm_context)
 101#define DRM_IOCTL_PVR_DESTROY_VM_CONTEXT PVR_IOCTL(0x04, DRM_IOW, destroy_vm_context)
 102#define DRM_IOCTL_PVR_VM_MAP PVR_IOCTL(0x05, DRM_IOW, vm_map)
 103#define DRM_IOCTL_PVR_VM_UNMAP PVR_IOCTL(0x06, DRM_IOW, vm_unmap)
 104#define DRM_IOCTL_PVR_CREATE_CONTEXT PVR_IOCTL(0x07, DRM_IOWR, create_context)
 105#define DRM_IOCTL_PVR_DESTROY_CONTEXT PVR_IOCTL(0x08, DRM_IOW, destroy_context)
 106#define DRM_IOCTL_PVR_CREATE_FREE_LIST PVR_IOCTL(0x09, DRM_IOWR, create_free_list)
 107#define DRM_IOCTL_PVR_DESTROY_FREE_LIST PVR_IOCTL(0x0a, DRM_IOW, destroy_free_list)
 108#define DRM_IOCTL_PVR_CREATE_HWRT_DATASET PVR_IOCTL(0x0b, DRM_IOWR, create_hwrt_dataset)
 109#define DRM_IOCTL_PVR_DESTROY_HWRT_DATASET PVR_IOCTL(0x0c, DRM_IOW, destroy_hwrt_dataset)
 110#define DRM_IOCTL_PVR_SUBMIT_JOBS PVR_IOCTL(0x0d, DRM_IOW, submit_jobs)
 111
 112/**
 113 * DOC: PowerVR IOCTL DEV_QUERY interface
 114 */
 115
 116/**
 117 * struct drm_pvr_dev_query_gpu_info - Container used to fetch information about
 118 * the graphics processor.
 119 *
 120 * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
 121 * to %DRM_PVR_DEV_QUERY_GPU_INFO_GET.
 122 */
 123struct drm_pvr_dev_query_gpu_info {
 124	/**
 125	 * @gpu_id: GPU identifier.
 126	 *
 127	 * For all currently supported GPUs this is the BVNC encoded as a 64-bit
 128	 * value as follows:
 129	 *
 130	 *    +--------+--------+--------+-------+
 131	 *    | 63..48 | 47..32 | 31..16 | 15..0 |
 132	 *    +========+========+========+=======+
 133	 *    | B      | V      | N      | C     |
 134	 *    +--------+--------+--------+-------+
 135	 */
 136	__u64 gpu_id;
 137
 138	/**
 139	 * @num_phantoms: Number of Phantoms present.
 140	 */
 141	__u32 num_phantoms;
 142
 143	/** @_padding_c: Reserved. This field must be zeroed. */
 144	__u32 _padding_c;
 145};
 146
 147/**
 148 * struct drm_pvr_dev_query_runtime_info - Container used to fetch information
 149 * about the graphics runtime.
 150 *
 151 * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
 152 * to %DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET.
 153 */
 154struct drm_pvr_dev_query_runtime_info {
 155	/**
 156	 * @free_list_min_pages: Minimum allowed free list size,
 157	 * in PM physical pages.
 158	 */
 159	__u64 free_list_min_pages;
 160
 161	/**
 162	 * @free_list_max_pages: Maximum allowed free list size,
 163	 * in PM physical pages.
 164	 */
 165	__u64 free_list_max_pages;
 166
 167	/**
 168	 * @common_store_alloc_region_size: Size of the Allocation
 169	 * Region within the Common Store used for coefficient and shared
 170	 * registers, in dwords.
 171	 */
 172	__u32 common_store_alloc_region_size;
 173
 174	/**
 175	 * @common_store_partition_space_size: Size of the
 176	 * Partition Space within the Common Store for output buffers, in
 177	 * dwords.
 178	 */
 179	__u32 common_store_partition_space_size;
 180
 181	/**
 182	 * @max_coeffs: Maximum coefficients, in dwords.
 183	 */
 184	__u32 max_coeffs;
 185
 186	/**
 187	 * @cdm_max_local_mem_size_regs: Maximum amount of local
 188	 * memory available to a compute kernel, in dwords.
 189	 */
 190	__u32 cdm_max_local_mem_size_regs;
 191};
 192
 193/**
 194 * struct drm_pvr_dev_query_quirks - Container used to fetch information about
 195 * hardware fixes for which the device may require support in the user mode
 196 * driver.
 197 *
 198 * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
 199 * to %DRM_PVR_DEV_QUERY_QUIRKS_GET.
 200 */
 201struct drm_pvr_dev_query_quirks {
 202	/**
 203	 * @quirks: A userspace address for the hardware quirks __u32 array.
 204	 *
 205	 * The first @musthave_count items in the list are quirks that the
 206	 * client must support for this device. If userspace does not support
 207	 * all these quirks then functionality is not guaranteed and client
 208	 * initialisation must fail.
 209	 * The remaining quirks in the list affect userspace and the kernel or
 210	 * firmware. They are disabled by default and require userspace to
 211	 * opt-in. The opt-in mechanism depends on the quirk.
 212	 */
 213	__u64 quirks;
 214
 215	/** @count: Length of @quirks (number of __u32). */
 216	__u16 count;
 217
 218	/**
 219	 * @musthave_count: The number of entries in @quirks that are
 220	 * mandatory, starting at index 0.
 221	 */
 222	__u16 musthave_count;
 223
 224	/** @_padding_c: Reserved. This field must be zeroed. */
 225	__u32 _padding_c;
 226};
 227
 228/**
 229 * struct drm_pvr_dev_query_enhancements - Container used to fetch information
 230 * about optional enhancements supported by the device that require support in
 231 * the user mode driver.
 232 *
 233 * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
 234 * to %DRM_PVR_DEV_ENHANCEMENTS_GET.
 235 */
 236struct drm_pvr_dev_query_enhancements {
 237	/**
 238	 * @enhancements: A userspace address for the hardware enhancements
 239	 * __u32 array.
 240	 *
 241	 * These enhancements affect userspace and the kernel or firmware. They
 242	 * are disabled by default and require userspace to opt-in. The opt-in
 243	 * mechanism depends on the enhancement.
 244	 */
 245	__u64 enhancements;
 246
 247	/** @count: Length of @enhancements (number of __u32). */
 248	__u16 count;
 249
 250	/** @_padding_a: Reserved. This field must be zeroed. */
 251	__u16 _padding_a;
 252
 253	/** @_padding_c: Reserved. This field must be zeroed. */
 254	__u32 _padding_c;
 255};
 256
 257/**
 258 * enum drm_pvr_heap_id - Array index for heap info data returned by
 259 * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
 260 *
 261 * For compatibility reasons all indices will be present in the returned array,
 262 * however some heaps may not be present. These are indicated where
 263 * &struct drm_pvr_heap.size is set to zero.
 264 */
 265enum drm_pvr_heap_id {
 266	/** @DRM_PVR_HEAP_GENERAL: General purpose heap. */
 267	DRM_PVR_HEAP_GENERAL = 0,
 268	/** @DRM_PVR_HEAP_PDS_CODE_DATA: PDS code and data heap. */
 269	DRM_PVR_HEAP_PDS_CODE_DATA,
 270	/** @DRM_PVR_HEAP_USC_CODE: USC code heap. */
 271	DRM_PVR_HEAP_USC_CODE,
 272	/** @DRM_PVR_HEAP_RGNHDR: Region header heap. Only used if GPU has BRN63142. */
 273	DRM_PVR_HEAP_RGNHDR,
 274	/** @DRM_PVR_HEAP_VIS_TEST: Visibility test heap. */
 275	DRM_PVR_HEAP_VIS_TEST,
 276	/** @DRM_PVR_HEAP_TRANSFER_FRAG: Transfer fragment heap. */
 277	DRM_PVR_HEAP_TRANSFER_FRAG,
 278
 279	/**
 280	 * @DRM_PVR_HEAP_COUNT: The number of heaps returned by
 281	 * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
 282	 *
 283	 * More heaps may be added, so this also serves as the copy limit when
 284	 * sent by the caller.
 285	 */
 286	DRM_PVR_HEAP_COUNT
 287	/* Please only add additional heaps above DRM_PVR_HEAP_COUNT! */
 288};
 289
 290/**
 291 * struct drm_pvr_heap - Container holding information about a single heap.
 292 *
 293 * This will always be fetched as an array.
 294 */
 295struct drm_pvr_heap {
 296	/** @base: Base address of heap. */
 297	__u64 base;
 298
 299	/** @size: Size of heap, in bytes. Will be 0 if the heap is not present. */
 300	__u64 size;
 301
 302	/** @flags: Flags for this heap. Currently always 0. */
 303	__u32 flags;
 304
 305	/** @page_size_log2: Log2 of page size. */
 306	__u32 page_size_log2;
 307};
 308
 309/**
 310 * struct drm_pvr_dev_query_heap_info - Container used to fetch information
 311 * about heaps supported by the device driver.
 312 *
 313 * Please note all driver-supported heaps will be returned up to &heaps.count.
 314 * Some heaps will not be present in all devices, which will be indicated by
 315 * &struct drm_pvr_heap.size being set to zero.
 316 *
 317 * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
 318 * to %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
 319 */
 320struct drm_pvr_dev_query_heap_info {
 321	/**
 322	 * @heaps: Array of &struct drm_pvr_heap. If pointer is NULL, the count
 323	 * and stride will be updated with those known to the driver version, to
 324	 * facilitate allocation by the caller.
 325	 */
 326	struct drm_pvr_obj_array heaps;
 327};
 328
 329/**
 330 * enum drm_pvr_static_data_area_usage - Array index for static data area info
 331 * returned by %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET.
 332 *
 333 * For compatibility reasons all indices will be present in the returned array,
 334 * however some areas may not be present. These are indicated where
 335 * &struct drm_pvr_static_data_area.size is set to zero.
 336 */
 337enum drm_pvr_static_data_area_usage {
 338	/**
 339	 * @DRM_PVR_STATIC_DATA_AREA_EOT: End of Tile PDS program code segment.
 340	 *
 341	 * The End of Tile PDS task runs at completion of a tile during a fragment job, and is
 342	 * responsible for emitting the tile to the Pixel Back End.
 343	 */
 344	DRM_PVR_STATIC_DATA_AREA_EOT = 0,
 345
 346	/**
 347	 * @DRM_PVR_STATIC_DATA_AREA_FENCE: MCU fence area, used during cache flush and
 348	 * invalidation.
 349	 *
 350	 * This must point to valid physical memory but the contents otherwise are not used.
 351	 */
 352	DRM_PVR_STATIC_DATA_AREA_FENCE,
 353
 354	/**
 355	 * @DRM_PVR_STATIC_DATA_AREA_VDM_SYNC: VDM sync program.
 356	 *
 357	 * The VDM sync program is used to synchronise multiple areas of the GPU hardware.
 358	 */
 359	DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
 360
 361	/**
 362	 * @DRM_PVR_STATIC_DATA_AREA_YUV_CSC: YUV coefficients.
 363	 *
 364	 * Area contains up to 16 slots with stride of 64 bytes. Each is a 3x4 matrix of u16 fixed
 365	 * point numbers, with 1 sign bit, 2 integer bits and 13 fractional bits.
 366	 *
 367	 * The slots are :
 368	 * 0 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR
 369	 * 1 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (full range)
 370	 * 2 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (conformant range)
 371	 * 3 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (full range)
 372	 * 4 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range)
 373	 * 5 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (full range)
 374	 * 6 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range)
 375	 * 7 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (full range)
 376	 * 8 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range)
 377	 * 9 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range, 10 bit)
 378	 * 10 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range, 10 bit)
 379	 * 11 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range, 10 bit)
 380	 * 14 = Identity (biased)
 381	 * 15 = Identity
 382	 */
 383	DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
 384};
 385
 386/**
 387 * struct drm_pvr_static_data_area - Container holding information about a
 388 * single static data area.
 389 *
 390 * This will always be fetched as an array.
 391 */
 392struct drm_pvr_static_data_area {
 393	/**
 394	 * @area_usage: Usage of static data area.
 395	 * See &enum drm_pvr_static_data_area_usage.
 396	 */
 397	__u16 area_usage;
 398
 399	/**
 400	 * @location_heap_id: Array index of heap where this of static data
 401	 * area is located. This array is fetched using
 402	 * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
 403	 */
 404	__u16 location_heap_id;
 405
 406	/** @size: Size of static data area. Not present if set to zero. */
 407	__u32 size;
 408
 409	/** @offset: Offset of static data area from start of heap. */
 410	__u64 offset;
 411};
 412
 413/**
 414 * struct drm_pvr_dev_query_static_data_areas - Container used to fetch
 415 * information about the static data areas in heaps supported by the device
 416 * driver.
 417 *
 418 * Please note all driver-supported static data areas will be returned up to
 419 * &static_data_areas.count. Some will not be present for all devices which,
 420 * will be indicated by &struct drm_pvr_static_data_area.size being set to zero.
 421 *
 422 * Further, some heaps will not be present either. See &struct
 423 * drm_pvr_dev_query_heap_info.
 424 *
 425 * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
 426 * to %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET.
 427 */
 428struct drm_pvr_dev_query_static_data_areas {
 429	/**
 430	 * @static_data_areas: Array of &struct drm_pvr_static_data_area. If
 431	 * pointer is NULL, the count and stride will be updated with those
 432	 * known to the driver version, to facilitate allocation by the caller.
 433	 */
 434	struct drm_pvr_obj_array static_data_areas;
 435};
 436
 437/**
 438 * enum drm_pvr_dev_query - For use with &drm_pvr_ioctl_dev_query_args.type to
 439 * indicate the type of the receiving container.
 440 *
 441 * Append only. Do not reorder.
 442 */
 443enum drm_pvr_dev_query {
 444	/**
 445	 * @DRM_PVR_DEV_QUERY_GPU_INFO_GET: The dev query args contain a pointer
 446	 * to &struct drm_pvr_dev_query_gpu_info.
 447	 */
 448	DRM_PVR_DEV_QUERY_GPU_INFO_GET = 0,
 449
 450	/**
 451	 * @DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET: The dev query args contain a
 452	 * pointer to &struct drm_pvr_dev_query_runtime_info.
 453	 */
 454	DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET,
 455
 456	/**
 457	 * @DRM_PVR_DEV_QUERY_QUIRKS_GET: The dev query args contain a pointer
 458	 * to &struct drm_pvr_dev_query_quirks.
 459	 */
 460	DRM_PVR_DEV_QUERY_QUIRKS_GET,
 461
 462	/**
 463	 * @DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET: The dev query args contain a
 464	 * pointer to &struct drm_pvr_dev_query_enhancements.
 465	 */
 466	DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET,
 467
 468	/**
 469	 * @DRM_PVR_DEV_QUERY_HEAP_INFO_GET: The dev query args contain a
 470	 * pointer to &struct drm_pvr_dev_query_heap_info.
 471	 */
 472	DRM_PVR_DEV_QUERY_HEAP_INFO_GET,
 473
 474	/**
 475	 * @DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET: The dev query args contain
 476	 * a pointer to &struct drm_pvr_dev_query_static_data_areas.
 477	 */
 478	DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET,
 479};
 480
 481/**
 482 * struct drm_pvr_ioctl_dev_query_args - Arguments for %DRM_IOCTL_PVR_DEV_QUERY.
 483 */
 484struct drm_pvr_ioctl_dev_query_args {
 485	/**
 486	 * @type: Type of query and output struct. See &enum drm_pvr_dev_query.
 487	 */
 488	__u32 type;
 489
 490	/**
 491	 * @size: Size of the receiving struct, see @type.
 492	 *
 493	 * After a successful call this will be updated to the written byte
 494	 * length.
 495	 * Can also be used to get the minimum byte length (see @pointer).
 496	 * This allows additional fields to be appended to the structs in
 497	 * future.
 498	 */
 499	__u32 size;
 500
 501	/**
 502	 * @pointer: Pointer to struct @type.
 503	 *
 504	 * Must be large enough to contain @size bytes.
 505	 * If pointer is NULL, the expected size will be returned in the @size
 506	 * field, but no other data will be written.
 507	 */
 508	__u64 pointer;
 509};
 510
 511/**
 512 * DOC: PowerVR IOCTL CREATE_BO interface
 513 */
 514
 515/**
 516 * DOC: Flags for CREATE_BO
 517 *
 518 * We use "device" to refer to the GPU here because of the ambiguity between CPU and GPU in some
 519 * fonts.
 520 *
 521 * Device mapping options
 522 *    :DRM_PVR_BO_BYPASS_DEVICE_CACHE: Specify that device accesses to this memory will bypass the
 523 *       cache. This is used for buffers that will either be regularly updated by the CPU (eg free
 524 *       lists) or will be accessed only once and therefore isn't worth caching (eg partial render
 525 *       buffers).
 526 *       By default, the device flushes its memory caches after every job, so this is not normally
 527 *       required for coherency.
 528 *    :DRM_PVR_BO_PM_FW_PROTECT: Specify that only the Parameter Manager (PM) and/or firmware
 529 *       processor should be allowed to access this memory when mapped to the device. It is not
 530 *       valid to specify this flag with DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS.
 531 *
 532 * CPU mapping options
 533 *    :DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS: Allow userspace to map and access the contents of this
 534 *       memory. It is not valid to specify this flag with DRM_PVR_BO_PM_FW_PROTECT.
 535 */
 536#define DRM_PVR_BO_BYPASS_DEVICE_CACHE _BITULL(0)
 537#define DRM_PVR_BO_PM_FW_PROTECT _BITULL(1)
 538#define DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS _BITULL(2)
 539/* Bits 3..63 are reserved. */
 540
 541#define DRM_PVR_BO_FLAGS_MASK (DRM_PVR_BO_BYPASS_DEVICE_CACHE | DRM_PVR_BO_PM_FW_PROTECT | \
 542			       DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS)
 543
 544/**
 545 * struct drm_pvr_ioctl_create_bo_args - Arguments for %DRM_IOCTL_PVR_CREATE_BO
 546 */
 547struct drm_pvr_ioctl_create_bo_args {
 548	/**
 549	 * @size: [IN] Size of buffer object to create. This must be page size
 550	 * aligned.
 551	 */
 552	__u64 size;
 553
 554	/**
 555	 * @handle: [OUT] GEM handle of the new buffer object for use in
 556	 * userspace.
 557	 */
 558	__u32 handle;
 559
 560	/** @_padding_c: Reserved. This field must be zeroed. */
 561	__u32 _padding_c;
 562
 563	/**
 564	 * @flags: [IN] Options which will affect the behaviour of this
 565	 * creation operation and future mapping operations on the created
 566	 * object. This field must be a valid combination of ``DRM_PVR_BO_*``
 567	 * values, with all bits marked as reserved set to zero.
 568	 */
 569	__u64 flags;
 570};
 571
 572/**
 573 * DOC: PowerVR IOCTL GET_BO_MMAP_OFFSET interface
 574 */
 575
 576/**
 577 * struct drm_pvr_ioctl_get_bo_mmap_offset_args - Arguments for
 578 * %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET
 579 *
 580 * Like other DRM drivers, the "mmap" IOCTL doesn't actually map any memory.
 581 * Instead, it allocates a fake offset which refers to the specified buffer
 582 * object. This offset can be used with a real mmap call on the DRM device
 583 * itself.
 584 */
 585struct drm_pvr_ioctl_get_bo_mmap_offset_args {
 586	/** @handle: [IN] GEM handle of the buffer object to be mapped. */
 587	__u32 handle;
 588
 589	/** @_padding_4: Reserved. This field must be zeroed. */
 590	__u32 _padding_4;
 591
 592	/** @offset: [OUT] Fake offset to use in the real mmap call. */
 593	__u64 offset;
 594};
 595
 596/**
 597 * DOC: PowerVR IOCTL CREATE_VM_CONTEXT and DESTROY_VM_CONTEXT interfaces
 598 */
 599
 600/**
 601 * struct drm_pvr_ioctl_create_vm_context_args - Arguments for
 602 * %DRM_IOCTL_PVR_CREATE_VM_CONTEXT
 603 */
 604struct drm_pvr_ioctl_create_vm_context_args {
 605	/** @handle: [OUT] Handle for new VM context. */
 606	__u32 handle;
 607
 608	/** @_padding_4: Reserved. This field must be zeroed. */
 609	__u32 _padding_4;
 610};
 611
 612/**
 613 * struct drm_pvr_ioctl_destroy_vm_context_args - Arguments for
 614 * %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT
 615 */
 616struct drm_pvr_ioctl_destroy_vm_context_args {
 617	/**
 618	 * @handle: [IN] Handle for VM context to be destroyed.
 619	 */
 620	__u32 handle;
 621
 622	/** @_padding_4: Reserved. This field must be zeroed. */
 623	__u32 _padding_4;
 624};
 625
 626/**
 627 * DOC: PowerVR IOCTL VM_MAP and VM_UNMAP interfaces
 628 *
 629 * The VM UAPI allows userspace to create buffer object mappings in GPU virtual address space.
 630 *
 631 * The client is responsible for managing GPU address space. It should allocate mappings within
 632 * the heaps returned by %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
 633 *
 634 * %DRM_IOCTL_PVR_VM_MAP creates a new mapping. The client provides the target virtual address for
 635 * the mapping. Size and offset within the mapped buffer object can be specified, so the client can
 636 * partially map a buffer.
 637 *
 638 * %DRM_IOCTL_PVR_VM_UNMAP removes a mapping. The entire mapping will be removed from GPU address
 639 * space only if the size of the mapping matches that known to the driver.
 640 */
 641
 642/**
 643 * struct drm_pvr_ioctl_vm_map_args - Arguments for %DRM_IOCTL_PVR_VM_MAP.
 644 */
 645struct drm_pvr_ioctl_vm_map_args {
 646	/**
 647	 * @vm_context_handle: [IN] Handle for VM context for this mapping to
 648	 * exist in.
 649	 */
 650	__u32 vm_context_handle;
 651
 652	/** @flags: [IN] Flags which affect this mapping. Currently always 0. */
 653	__u32 flags;
 654
 655	/**
 656	 * @device_addr: [IN] Requested device-virtual address for the mapping.
 657	 * This must be non-zero and aligned to the device page size for the
 658	 * heap containing the requested address. It is an error to specify an
 659	 * address which is not contained within one of the heaps returned by
 660	 * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
 661	 */
 662	__u64 device_addr;
 663
 664	/**
 665	 * @handle: [IN] Handle of the target buffer object. This must be a
 666	 * valid handle returned by %DRM_IOCTL_PVR_CREATE_BO.
 667	 */
 668	__u32 handle;
 669
 670	/** @_padding_14: Reserved. This field must be zeroed. */
 671	__u32 _padding_14;
 672
 673	/**
 674	 * @offset: [IN] Offset into the target bo from which to begin the
 675	 * mapping.
 676	 */
 677	__u64 offset;
 678
 679	/**
 680	 * @size: [IN] Size of the requested mapping. Must be aligned to
 681	 * the device page size for the heap containing the requested address,
 682	 * as well as the host page size. When added to @device_addr, the
 683	 * result must not overflow the heap which contains @device_addr (i.e.
 684	 * the range specified by @device_addr and @size must be completely
 685	 * contained within a single heap specified by
 686	 * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET).
 687	 */
 688	__u64 size;
 689};
 690
 691/**
 692 * struct drm_pvr_ioctl_vm_unmap_args - Arguments for %DRM_IOCTL_PVR_VM_UNMAP.
 693 */
 694struct drm_pvr_ioctl_vm_unmap_args {
 695	/**
 696	 * @vm_context_handle: [IN] Handle for VM context that this mapping
 697	 * exists in.
 698	 */
 699	__u32 vm_context_handle;
 700
 701	/** @_padding_4: Reserved. This field must be zeroed. */
 702	__u32 _padding_4;
 703
 704	/**
 705	 * @device_addr: [IN] Device-virtual address at the start of the target
 706	 * mapping. This must be non-zero.
 707	 */
 708	__u64 device_addr;
 709
 710	/**
 711	 * @size: Size in bytes of the target mapping. This must be non-zero.
 712	 */
 713	__u64 size;
 714};
 715
 716/**
 717 * DOC: PowerVR IOCTL CREATE_CONTEXT and DESTROY_CONTEXT interfaces
 718 */
 719
 720/**
 721 * enum drm_pvr_ctx_priority - Arguments for
 722 * &drm_pvr_ioctl_create_context_args.priority
 723 */
 724enum drm_pvr_ctx_priority {
 725	/** @DRM_PVR_CTX_PRIORITY_LOW: Priority below normal. */
 726	DRM_PVR_CTX_PRIORITY_LOW = -512,
 727
 728	/** @DRM_PVR_CTX_PRIORITY_NORMAL: Normal priority. */
 729	DRM_PVR_CTX_PRIORITY_NORMAL = 0,
 730
 731	/**
 732	 * @DRM_PVR_CTX_PRIORITY_HIGH: Priority above normal.
 733	 * Note this requires ``CAP_SYS_NICE`` or ``DRM_MASTER``.
 734	 */
 735	DRM_PVR_CTX_PRIORITY_HIGH = 512,
 736};
 737
 738/**
 739 * enum drm_pvr_ctx_type - Arguments for
 740 * &struct drm_pvr_ioctl_create_context_args.type
 741 */
 742enum drm_pvr_ctx_type {
 743	/**
 744	 * @DRM_PVR_CTX_TYPE_RENDER: Render context.
 745	 */
 746	DRM_PVR_CTX_TYPE_RENDER = 0,
 747
 748	/**
 749	 * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context.
 750	 */
 751	DRM_PVR_CTX_TYPE_COMPUTE,
 752
 753	/**
 754	 * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data
 755	 * master.
 756	 */
 757	DRM_PVR_CTX_TYPE_TRANSFER_FRAG,
 758};
 759
 760/**
 761 * struct drm_pvr_ioctl_create_context_args - Arguments for
 762 * %DRM_IOCTL_PVR_CREATE_CONTEXT
 763 */
 764struct drm_pvr_ioctl_create_context_args {
 765	/**
 766	 * @type: [IN] Type of context to create.
 767	 *
 768	 * This must be one of the values defined by &enum drm_pvr_ctx_type.
 769	 */
 770	__u32 type;
 771
 772	/** @flags: [IN] Flags for context. */
 773	__u32 flags;
 774
 775	/**
 776	 * @priority: [IN] Priority of new context.
 777	 *
 778	 * This must be one of the values defined by &enum drm_pvr_ctx_priority.
 779	 */
 780	__s32 priority;
 781
 782	/** @handle: [OUT] Handle for new context. */
 783	__u32 handle;
 784
 785	/**
 786	 * @static_context_state: [IN] Pointer to static context state stream.
 787	 */
 788	__u64 static_context_state;
 789
 790	/**
 791	 * @static_context_state_len: [IN] Length of static context state, in bytes.
 792	 */
 793	__u32 static_context_state_len;
 794
 795	/**
 796	 * @vm_context_handle: [IN] Handle for VM context that this context is
 797	 * associated with.
 798	 */
 799	__u32 vm_context_handle;
 800
 801	/**
 802	 * @callstack_addr: [IN] Address for initial call stack pointer. Only valid
 803	 * if @type is %DRM_PVR_CTX_TYPE_RENDER, otherwise must be 0.
 804	 */
 805	__u64 callstack_addr;
 806};
 807
 808/**
 809 * struct drm_pvr_ioctl_destroy_context_args - Arguments for
 810 * %DRM_IOCTL_PVR_DESTROY_CONTEXT
 811 */
 812struct drm_pvr_ioctl_destroy_context_args {
 813	/**
 814	 * @handle: [IN] Handle for context to be destroyed.
 815	 */
 816	__u32 handle;
 817
 818	/** @_padding_4: Reserved. This field must be zeroed. */
 819	__u32 _padding_4;
 820};
 821
 822/**
 823 * DOC: PowerVR IOCTL CREATE_FREE_LIST and DESTROY_FREE_LIST interfaces
 824 */
 825
 826/**
 827 * struct drm_pvr_ioctl_create_free_list_args - Arguments for
 828 * %DRM_IOCTL_PVR_CREATE_FREE_LIST
 829 *
 830 * Free list arguments have the following constraints :
 831 *
 832 * - @max_num_pages must be greater than zero.
 833 * - @grow_threshold must be between 0 and 100.
 834 * - @grow_num_pages must be less than or equal to &max_num_pages.
 835 * - @initial_num_pages, @max_num_pages and @grow_num_pages must be multiples
 836 *   of 4.
 837 * - When &grow_num_pages is 0, @initial_num_pages must be equal to
 838 *   @max_num_pages.
 839 * - When &grow_num_pages is non-zero, @initial_num_pages must be less than
 840 *   @max_num_pages.
 841 */
 842struct drm_pvr_ioctl_create_free_list_args {
 843	/**
 844	 * @free_list_gpu_addr: [IN] Address of GPU mapping of buffer object
 845	 * containing memory to be used by free list.
 846	 *
 847	 * The mapped region of the buffer object must be at least
 848	 * @max_num_pages * ``sizeof(__u32)``.
 849	 *
 850	 * The buffer object must have been created with
 851	 * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT set and
 852	 * %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS not set.
 853	 */
 854	__u64 free_list_gpu_addr;
 855
 856	/** @initial_num_pages: [IN] Pages initially allocated to free list. */
 857	__u32 initial_num_pages;
 858
 859	/** @max_num_pages: [IN] Maximum number of pages in free list. */
 860	__u32 max_num_pages;
 861
 862	/** @grow_num_pages: [IN] Pages to grow free list by per request. */
 863	__u32 grow_num_pages;
 864
 865	/**
 866	 * @grow_threshold: [IN] Percentage of FL memory used that should
 867	 * trigger a new grow request.
 868	 */
 869	__u32 grow_threshold;
 870
 871	/**
 872	 * @vm_context_handle: [IN] Handle for VM context that the free list buffer
 873	 * object is mapped in.
 874	 */
 875	__u32 vm_context_handle;
 876
 877	/**
 878	 * @handle: [OUT] Handle for created free list.
 879	 */
 880	__u32 handle;
 881};
 882
 883/**
 884 * struct drm_pvr_ioctl_destroy_free_list_args - Arguments for
 885 * %DRM_IOCTL_PVR_DESTROY_FREE_LIST
 886 */
 887struct drm_pvr_ioctl_destroy_free_list_args {
 888	/**
 889	 * @handle: [IN] Handle for free list to be destroyed.
 890	 */
 891	__u32 handle;
 892
 893	/** @_padding_4: Reserved. This field must be zeroed. */
 894	__u32 _padding_4;
 895};
 896
 897/**
 898 * DOC: PowerVR IOCTL CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET interfaces
 899 */
 900
 901/**
 902 * struct drm_pvr_create_hwrt_geom_data_args - Geometry data arguments used for
 903 * &struct drm_pvr_ioctl_create_hwrt_dataset_args.geom_data_args.
 904 */
 905struct drm_pvr_create_hwrt_geom_data_args {
 906	/** @tpc_dev_addr: [IN] Tail pointer cache GPU virtual address. */
 907	__u64 tpc_dev_addr;
 908
 909	/** @tpc_size: [IN] Size of TPC, in bytes. */
 910	__u32 tpc_size;
 911
 912	/** @tpc_stride: [IN] Stride between layers in TPC, in pages */
 913	__u32 tpc_stride;
 914
 915	/** @vheap_table_dev_addr: [IN] VHEAP table GPU virtual address. */
 916	__u64 vheap_table_dev_addr;
 917
 918	/** @rtc_dev_addr: [IN] Render Target Cache virtual address. */
 919	__u64 rtc_dev_addr;
 920};
 921
 922/**
 923 * struct drm_pvr_create_hwrt_rt_data_args - Render target arguments used for
 924 * &struct drm_pvr_ioctl_create_hwrt_dataset_args.rt_data_args.
 925 */
 926struct drm_pvr_create_hwrt_rt_data_args {
 927	/** @pm_mlist_dev_addr: [IN] PM MLIST GPU virtual address. */
 928	__u64 pm_mlist_dev_addr;
 929
 930	/** @macrotile_array_dev_addr: [IN] Macrotile array GPU virtual address. */
 931	__u64 macrotile_array_dev_addr;
 932
 933	/** @region_header_dev_addr: [IN] Region header array GPU virtual address. */
 934	__u64 region_header_dev_addr;
 935};
 936
 937#define PVR_DRM_HWRT_FREE_LIST_LOCAL 0
 938#define PVR_DRM_HWRT_FREE_LIST_GLOBAL 1U
 939
 940/**
 941 * struct drm_pvr_ioctl_create_hwrt_dataset_args - Arguments for
 942 * %DRM_IOCTL_PVR_CREATE_HWRT_DATASET
 943 */
 944struct drm_pvr_ioctl_create_hwrt_dataset_args {
 945	/** @geom_data_args: [IN] Geometry data arguments. */
 946	struct drm_pvr_create_hwrt_geom_data_args geom_data_args;
 947
 948	/**
 949	 * @rt_data_args: [IN] Array of render target arguments.
 950	 *
 951	 * Each entry in this array represents a render target in a double buffered
 952	 * setup.
 953	 */
 954	struct drm_pvr_create_hwrt_rt_data_args rt_data_args[2];
 955
 956	/**
 957	 * @free_list_handles: [IN] Array of free list handles.
 958	 *
 959	 * free_list_handles[PVR_DRM_HWRT_FREE_LIST_LOCAL] must have initial
 960	 * size of at least that reported by
 961	 * &drm_pvr_dev_query_runtime_info.free_list_min_pages.
 962	 */
 963	__u32 free_list_handles[2];
 964
 965	/** @width: [IN] Width in pixels. */
 966	__u32 width;
 967
 968	/** @height: [IN] Height in pixels. */
 969	__u32 height;
 970
 971	/** @samples: [IN] Number of samples. */
 972	__u32 samples;
 973
 974	/** @layers: [IN] Number of layers. */
 975	__u32 layers;
 976
 977	/** @isp_merge_lower_x: [IN] Lower X coefficient for triangle merging. */
 978	__u32 isp_merge_lower_x;
 979
 980	/** @isp_merge_lower_y: [IN] Lower Y coefficient for triangle merging. */
 981	__u32 isp_merge_lower_y;
 982
 983	/** @isp_merge_scale_x: [IN] Scale X coefficient for triangle merging. */
 984	__u32 isp_merge_scale_x;
 985
 986	/** @isp_merge_scale_y: [IN] Scale Y coefficient for triangle merging. */
 987	__u32 isp_merge_scale_y;
 988
 989	/** @isp_merge_upper_x: [IN] Upper X coefficient for triangle merging. */
 990	__u32 isp_merge_upper_x;
 991
 992	/** @isp_merge_upper_y: [IN] Upper Y coefficient for triangle merging. */
 993	__u32 isp_merge_upper_y;
 994
 995	/**
 996	 * @region_header_size: [IN] Size of region header array. This common field is used by
 997	 * both render targets in this data set.
 998	 *
 999	 * The units for this field differ depending on what version of the simple internal
1000	 * parameter format the device uses. If format 2 is in use then this is interpreted as the
1001	 * number of region headers. For other formats it is interpreted as the size in dwords.
1002	 */
1003	__u32 region_header_size;
1004
1005	/**
1006	 * @handle: [OUT] Handle for created HWRT dataset.
1007	 */
1008	__u32 handle;
1009};
1010
1011/**
1012 * struct drm_pvr_ioctl_destroy_hwrt_dataset_args - Arguments for
1013 * %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET
1014 */
1015struct drm_pvr_ioctl_destroy_hwrt_dataset_args {
1016	/**
1017	 * @handle: [IN] Handle for HWRT dataset to be destroyed.
1018	 */
1019	__u32 handle;
1020
1021	/** @_padding_4: Reserved. This field must be zeroed. */
1022	__u32 _padding_4;
1023};
1024
1025/**
1026 * DOC: PowerVR IOCTL SUBMIT_JOBS interface
1027 */
1028
1029/**
1030 * DOC: Flags for the drm_pvr_sync_op object.
1031 *
1032 * .. c:macro:: DRM_PVR_SYNC_OP_HANDLE_TYPE_MASK
1033 *
1034 *    Handle type mask for the drm_pvr_sync_op::flags field.
1035 *
1036 * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ
1037 *
1038 *    Indicates the handle passed in drm_pvr_sync_op::handle is a syncobj handle.
1039 *    This is the default type.
1040 *
1041 * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ
1042 *
1043 *    Indicates the handle passed in drm_pvr_sync_op::handle is a timeline syncobj handle.
1044 *
1045 * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_SIGNAL
1046 *
1047 *    Signal operation requested. The out-fence bound to the job will be attached to
1048 *    the syncobj whose handle is passed in drm_pvr_sync_op::handle.
1049 *
1050 * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_WAIT
1051 *
1052 *    Wait operation requested. The job will wait for this particular syncobj or syncobj
1053 *    point to be signaled before being started.
1054 *    This is the default operation.
1055 */
1056#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK 0xf
1057#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ 0
1058#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ 1
1059#define DRM_PVR_SYNC_OP_FLAG_SIGNAL _BITULL(31)
1060#define DRM_PVR_SYNC_OP_FLAG_WAIT 0
1061
1062#define DRM_PVR_SYNC_OP_FLAGS_MASK (DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK | \
1063				    DRM_PVR_SYNC_OP_FLAG_SIGNAL)
1064
1065/**
1066 * struct drm_pvr_sync_op - Object describing a sync operation
1067 */
1068struct drm_pvr_sync_op {
1069	/** @handle: Handle of sync object. */
1070	__u32 handle;
1071
1072	/** @flags: Combination of ``DRM_PVR_SYNC_OP_FLAG_`` flags. */
1073	__u32 flags;
1074
1075	/** @value: Timeline value for this drm_syncobj. MBZ for a binary syncobj. */
1076	__u64 value;
1077};
1078
1079/**
1080 * DOC: Flags for SUBMIT_JOB ioctl geometry command.
1081 *
1082 * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST
1083 *
1084 *    Indicates if this the first command to be issued for a render.
1085 *
1086 * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST
1087 *
1088 *    Indicates if this the last command to be issued for a render.
1089 *
1090 * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE
1091 *
1092 *    Forces to use single core in a multi core device.
1093 *
1094 * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK
1095 *
1096 *    Logical OR of all the geometry cmd flags.
1097 */
1098#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST _BITULL(0)
1099#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST _BITULL(1)
1100#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE _BITULL(2)
1101#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK                                 \
1102	(DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST |                                   \
1103	 DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST |                                    \
1104	 DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE)
1105
1106/**
1107 * DOC: Flags for SUBMIT_JOB ioctl fragment command.
1108 *
1109 * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE
1110 *
1111 *    Use single core in a multi core setup.
1112 *
1113 * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER
1114 *
1115 *    Indicates whether a depth buffer is present.
1116 *
1117 * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER
1118 *
1119 *    Indicates whether a stencil buffer is present.
1120 *
1121 * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP
1122 *
1123 *    Disallow compute overlapped with this render.
1124 *
1125 * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS
1126 *
1127 *    Indicates whether this render produces visibility results.
1128 *
1129 * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER
1130 *
1131 *    Indicates whether partial renders write to a scratch buffer instead of
1132 *    the final surface. It also forces the full screen copy expected to be
1133 *    present on the last render after all partial renders have completed.
1134 *
1135 * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE
1136 *
1137 *    Disable pixel merging for this render.
1138 *
1139 * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK
1140 *
1141 *    Logical OR of all the fragment cmd flags.
1142 */
1143#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE _BITULL(0)
1144#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER _BITULL(1)
1145#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER _BITULL(2)
1146#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP _BITULL(3)
1147#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER _BITULL(4)
1148#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS _BITULL(5)
1149#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER _BITULL(6)
1150#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE _BITULL(7)
1151#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK                                 \
1152	(DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE |                             \
1153	 DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER |                             \
1154	 DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER |                           \
1155	 DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP |                     \
1156	 DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER |                           \
1157	 DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS |                         \
1158	 DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER |                          \
1159	 DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE)
1160
1161/**
1162 * DOC: Flags for SUBMIT_JOB ioctl compute command.
1163 *
1164 * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP
1165 *
1166 *    Disallow other jobs overlapped with this compute.
1167 *
1168 * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE
1169 *
1170 *    Forces to use single core in a multi core device.
1171 *
1172 * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK
1173 *
1174 *    Logical OR of all the compute cmd flags.
1175 */
1176#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP _BITULL(0)
1177#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE _BITULL(1)
1178#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK         \
1179	(DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP | \
1180	 DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE)
1181
1182/**
1183 * DOC: Flags for SUBMIT_JOB ioctl transfer command.
1184 *
1185 * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
1186 *
1187 *    Forces job to use a single core in a multi core device.
1188 *
1189 * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK
1190 *
1191 *    Logical OR of all the transfer cmd flags.
1192 */
1193#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE _BITULL(0)
1194
1195#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK \
1196	DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
1197
1198/**
1199 * enum drm_pvr_job_type - Arguments for &struct drm_pvr_job.job_type
1200 */
1201enum drm_pvr_job_type {
1202	/** @DRM_PVR_JOB_TYPE_GEOMETRY: Job type is geometry. */
1203	DRM_PVR_JOB_TYPE_GEOMETRY = 0,
1204
1205	/** @DRM_PVR_JOB_TYPE_FRAGMENT: Job type is fragment. */
1206	DRM_PVR_JOB_TYPE_FRAGMENT,
1207
1208	/** @DRM_PVR_JOB_TYPE_COMPUTE: Job type is compute. */
1209	DRM_PVR_JOB_TYPE_COMPUTE,
1210
1211	/** @DRM_PVR_JOB_TYPE_TRANSFER_FRAG: Job type is a fragment transfer. */
1212	DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
1213};
1214
1215/**
1216 * struct drm_pvr_hwrt_data_ref - Reference HWRT data
1217 */
1218struct drm_pvr_hwrt_data_ref {
1219	/** @set_handle: HWRT data set handle. */
1220	__u32 set_handle;
1221
1222	/** @data_index: Index of the HWRT data inside the data set. */
1223	__u32 data_index;
1224};
1225
1226/**
1227 * struct drm_pvr_job - Job arguments passed to the %DRM_IOCTL_PVR_SUBMIT_JOBS ioctl
1228 */
1229struct drm_pvr_job {
1230	/**
1231	 * @type: [IN] Type of job being submitted
1232	 *
1233	 * This must be one of the values defined by &enum drm_pvr_job_type.
1234	 */
1235	__u32 type;
1236
1237	/**
1238	 * @context_handle: [IN] Context handle.
1239	 *
1240	 * When @job_type is %DRM_PVR_JOB_TYPE_RENDER, %DRM_PVR_JOB_TYPE_COMPUTE or
1241	 * %DRM_PVR_JOB_TYPE_TRANSFER_FRAG, this must be a valid handle returned by
1242	 * %DRM_IOCTL_PVR_CREATE_CONTEXT. The type of context must be compatible
1243	 * with the type of job being submitted.
1244	 *
1245	 * When @job_type is %DRM_PVR_JOB_TYPE_NULL, this must be zero.
1246	 */
1247	__u32 context_handle;
1248
1249	/**
1250	 * @flags: [IN] Flags for command.
1251	 *
1252	 * Those are job-dependent. See all ``DRM_PVR_SUBMIT_JOB_*``.
1253	 */
1254	__u32 flags;
1255
1256	/**
1257	 * @cmd_stream_len: [IN] Length of command stream, in bytes.
1258	 */
1259	__u32 cmd_stream_len;
1260
1261	/**
1262	 * @cmd_stream: [IN] Pointer to command stream for command.
1263	 *
1264	 * The command stream must be u64-aligned.
1265	 */
1266	__u64 cmd_stream;
1267
1268	/** @sync_ops: [IN] Fragment sync operations. */
1269	struct drm_pvr_obj_array sync_ops;
1270
1271	/**
1272	 * @hwrt: [IN] HWRT data used by render jobs (geometry or fragment).
1273	 *
1274	 * Must be zero for non-render jobs.
1275	 */
1276	struct drm_pvr_hwrt_data_ref hwrt;
1277};
1278
1279/**
1280 * struct drm_pvr_ioctl_submit_jobs_args - Arguments for %DRM_IOCTL_PVR_SUBMIT_JOB
1281 *
1282 * If the syscall returns an error it is important to check the value of
1283 * @jobs.count. This indicates the index into @jobs.array where the
1284 * error occurred.
1285 */
1286struct drm_pvr_ioctl_submit_jobs_args {
1287	/** @jobs: [IN] Array of jobs to submit. */
1288	struct drm_pvr_obj_array jobs;
1289};
1290
1291#if defined(__cplusplus)
1292}
1293#endif
1294
1295#endif /* PVR_DRM_UAPI_H */