1/*
   2 * Header for the Direct Rendering Manager
   3 *
   4 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
   5 *
   6 * Acknowledgments:
   7 * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
   8 */
   9
  10/*
  11 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  12 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  13 * All rights reserved.
  14 *
  15 * Permission is hereby granted, free of charge, to any person obtaining a
  16 * copy of this software and associated documentation files (the "Software"),
  17 * to deal in the Software without restriction, including without limitation
  18 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  19 * and/or sell copies of the Software, and to permit persons to whom the
  20 * Software is furnished to do so, subject to the following conditions:
  21 *
  22 * The above copyright notice and this permission notice (including the next
  23 * paragraph) shall be included in all copies or substantial portions of the
  24 * Software.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  29 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  30 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  31 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  32 * OTHER DEALINGS IN THE SOFTWARE.
  33 */
  34
  35#ifndef _DRM_H_
  36#define _DRM_H_
  37
  38#if   defined(__linux__)
  39
  40#include <linux/types.h>
  41#include <asm/ioctl.h>
  42typedef unsigned int drm_handle_t;
  43
  44#else /* One of the BSDs */
  45
  46#include <stdint.h>
  47#include <sys/ioccom.h>
  48#include <sys/types.h>
  49typedef int8_t   __s8;
  50typedef uint8_t  __u8;
  51typedef int16_t  __s16;
  52typedef uint16_t __u16;
  53typedef int32_t  __s32;
  54typedef uint32_t __u32;
  55typedef int64_t  __s64;
  56typedef uint64_t __u64;
  57typedef size_t   __kernel_size_t;
  58typedef unsigned long drm_handle_t;
  59
  60#endif
  61
  62#if defined(__cplusplus)
  63extern "C" {
  64#endif
  65
  66#define DRM_NAME	"drm"	  /**< Name in kernel, /dev, and /proc */
  67#define DRM_MIN_ORDER	5	  /**< At least 2^5 bytes = 32 bytes */
  68#define DRM_MAX_ORDER	22	  /**< Up to 2^22 bytes = 4MB */
  69#define DRM_RAM_PERCENT 10	  /**< How much system ram can we lock? */
  70
  71#define _DRM_LOCK_HELD	0x80000000U /**< Hardware lock is held */
  72#define _DRM_LOCK_CONT	0x40000000U /**< Hardware lock is contended */
  73#define _DRM_LOCK_IS_HELD(lock)	   ((lock) & _DRM_LOCK_HELD)
  74#define _DRM_LOCK_IS_CONT(lock)	   ((lock) & _DRM_LOCK_CONT)
  75#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
  76
  77typedef unsigned int drm_context_t;
  78typedef unsigned int drm_drawable_t;
  79typedef unsigned int drm_magic_t;
  80
  81/*
  82 * Cliprect.
  83 *
  84 * \warning: If you change this structure, make sure you change
  85 * XF86DRIClipRectRec in the server as well
  86 *
  87 * \note KW: Actually it's illegal to change either for
  88 * backwards-compatibility reasons.
  89 */
  90struct drm_clip_rect {
  91	unsigned short x1;
  92	unsigned short y1;
  93	unsigned short x2;
  94	unsigned short y2;
  95};
  96
  97/*
  98 * Drawable information.
  99 */
 100struct drm_drawable_info {
 101	unsigned int num_rects;
 102	struct drm_clip_rect *rects;
 103};
 104
 105/*
 106 * Texture region,
 107 */
 108struct drm_tex_region {
 109	unsigned char next;
 110	unsigned char prev;
 111	unsigned char in_use;
 112	unsigned char padding;
 113	unsigned int age;
 114};
 115
 116/*
 117 * Hardware lock.
 118 *
 119 * The lock structure is a simple cache-line aligned integer.  To avoid
 120 * processor bus contention on a multiprocessor system, there should not be any
 121 * other data stored in the same cache line.
 122 */
 123struct drm_hw_lock {
 124	__volatile__ unsigned int lock;		/**< lock variable */
 125	char padding[60];			/**< Pad to cache line */
 126};
 127
 128/*
 129 * DRM_IOCTL_VERSION ioctl argument type.
 130 *
 131 * \sa drmGetVersion().
 132 */
 133struct drm_version {
 134	int version_major;	  /**< Major version */
 135	int version_minor;	  /**< Minor version */
 136	int version_patchlevel;	  /**< Patch level */
 137	__kernel_size_t name_len;	  /**< Length of name buffer */
 138	char *name;	  /**< Name of driver */
 139	__kernel_size_t date_len;	  /**< Length of date buffer */
 140	char *date;	  /**< User-space buffer to hold date */
 141	__kernel_size_t desc_len;	  /**< Length of desc buffer */
 142	char *desc;	  /**< User-space buffer to hold desc */
 143};
 144
 145/*
 146 * DRM_IOCTL_GET_UNIQUE ioctl argument type.
 147 *
 148 * \sa drmGetBusid() and drmSetBusId().
 149 */
 150struct drm_unique {
 151	__kernel_size_t unique_len;	  /**< Length of unique */
 152	char *unique;	  /**< Unique name for driver instantiation */
 153};
 154
 155struct drm_list {
 156	int count;		  /**< Length of user-space structures */
 157	struct drm_version *version;
 158};
 159
 160struct drm_block {
 161	int unused;
 162};
 163
 164/*
 165 * DRM_IOCTL_CONTROL ioctl argument type.
 166 *
 167 * \sa drmCtlInstHandler() and drmCtlUninstHandler().
 168 */
 169struct drm_control {
 170	enum {
 171		DRM_ADD_COMMAND,
 172		DRM_RM_COMMAND,
 173		DRM_INST_HANDLER,
 174		DRM_UNINST_HANDLER
 175	} func;
 176	int irq;
 177};
 178
 179/*
 180 * Type of memory to map.
 181 */
 182enum drm_map_type {
 183	_DRM_FRAME_BUFFER = 0,	  /**< WC (no caching), no core dump */
 184	_DRM_REGISTERS = 1,	  /**< no caching, no core dump */
 185	_DRM_SHM = 2,		  /**< shared, cached */
 186	_DRM_AGP = 3,		  /**< AGP/GART */
 187	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
 188	_DRM_CONSISTENT = 5	  /**< Consistent memory for PCI DMA */
 189};
 190
 191/*
 192 * Memory mapping flags.
 193 */
 194enum drm_map_flags {
 195	_DRM_RESTRICTED = 0x01,	     /**< Cannot be mapped to user-virtual */
 196	_DRM_READ_ONLY = 0x02,
 197	_DRM_LOCKED = 0x04,	     /**< shared, cached, locked */
 198	_DRM_KERNEL = 0x08,	     /**< kernel requires access */
 199	_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
 200	_DRM_CONTAINS_LOCK = 0x20,   /**< SHM page that contains lock */
 201	_DRM_REMOVABLE = 0x40,	     /**< Removable mapping */
 202	_DRM_DRIVER = 0x80	     /**< Managed by driver */
 203};
 204
 205struct drm_ctx_priv_map {
 206	unsigned int ctx_id;	 /**< Context requesting private mapping */
 207	void *handle;		 /**< Handle of map */
 208};
 209
 210/*
 211 * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
 212 * argument type.
 213 *
 214 * \sa drmAddMap().
 215 */
 216struct drm_map {
 217	unsigned long offset;	 /**< Requested physical address (0 for SAREA)*/
 218	unsigned long size;	 /**< Requested physical size (bytes) */
 219	enum drm_map_type type;	 /**< Type of memory to map */
 220	enum drm_map_flags flags;	 /**< Flags */
 221	void *handle;		 /**< User-space: "Handle" to pass to mmap() */
 222				 /**< Kernel-space: kernel-virtual address */
 223	int mtrr;		 /**< MTRR slot used */
 224	/*   Private data */
 225};
 226
 227/*
 228 * DRM_IOCTL_GET_CLIENT ioctl argument type.
 229 */
 230struct drm_client {
 231	int idx;		/**< Which client desired? */
 232	int auth;		/**< Is client authenticated? */
 233	unsigned long pid;	/**< Process ID */
 234	unsigned long uid;	/**< User ID */
 235	unsigned long magic;	/**< Magic */
 236	unsigned long iocs;	/**< Ioctl count */
 237};
 238
 239enum drm_stat_type {
 240	_DRM_STAT_LOCK,
 241	_DRM_STAT_OPENS,
 242	_DRM_STAT_CLOSES,
 243	_DRM_STAT_IOCTLS,
 244	_DRM_STAT_LOCKS,
 245	_DRM_STAT_UNLOCKS,
 246	_DRM_STAT_VALUE,	/**< Generic value */
 247	_DRM_STAT_BYTE,		/**< Generic byte counter (1024bytes/K) */
 248	_DRM_STAT_COUNT,	/**< Generic non-byte counter (1000/k) */
 249
 250	_DRM_STAT_IRQ,		/**< IRQ */
 251	_DRM_STAT_PRIMARY,	/**< Primary DMA bytes */
 252	_DRM_STAT_SECONDARY,	/**< Secondary DMA bytes */
 253	_DRM_STAT_DMA,		/**< DMA */
 254	_DRM_STAT_SPECIAL,	/**< Special DMA (e.g., priority or polled) */
 255	_DRM_STAT_MISSED	/**< Missed DMA opportunity */
 256	    /* Add to the *END* of the list */
 257};
 258
 259/*
 260 * DRM_IOCTL_GET_STATS ioctl argument type.
 261 */
 262struct drm_stats {
 263	unsigned long count;
 264	struct {
 265		unsigned long value;
 266		enum drm_stat_type type;
 267	} data[15];
 268};
 269
 270/*
 271 * Hardware locking flags.
 272 */
 273enum drm_lock_flags {
 274	_DRM_LOCK_READY = 0x01,	     /**< Wait until hardware is ready for DMA */
 275	_DRM_LOCK_QUIESCENT = 0x02,  /**< Wait until hardware quiescent */
 276	_DRM_LOCK_FLUSH = 0x04,	     /**< Flush this context's DMA queue first */
 277	_DRM_LOCK_FLUSH_ALL = 0x08,  /**< Flush all DMA queues first */
 278	/* These *HALT* flags aren't supported yet
 279	   -- they will be used to support the
 280	   full-screen DGA-like mode. */
 281	_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
 282	_DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
 283};
 284
 285/*
 286 * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
 287 *
 288 * \sa drmGetLock() and drmUnlock().
 289 */
 290struct drm_lock {
 291	int context;
 292	enum drm_lock_flags flags;
 293};
 294
 295/*
 296 * DMA flags
 297 *
 298 * \warning
 299 * These values \e must match xf86drm.h.
 300 *
 301 * \sa drm_dma.
 302 */
 303enum drm_dma_flags {
 304	/* Flags for DMA buffer dispatch */
 305	_DRM_DMA_BLOCK = 0x01,	      /**<
 306				       * Block until buffer dispatched.
 307				       *
 308				       * \note The buffer may not yet have
 309				       * been processed by the hardware --
 310				       * getting a hardware lock with the
 311				       * hardware quiescent will ensure
 312				       * that the buffer has been
 313				       * processed.
 314				       */
 315	_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
 316	_DRM_DMA_PRIORITY = 0x04,     /**< High priority dispatch */
 317
 318	/* Flags for DMA buffer request */
 319	_DRM_DMA_WAIT = 0x10,	      /**< Wait for free buffers */
 320	_DRM_DMA_SMALLER_OK = 0x20,   /**< Smaller-than-requested buffers OK */
 321	_DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
 322};
 323
 324/*
 325 * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
 326 *
 327 * \sa drmAddBufs().
 328 */
 329struct drm_buf_desc {
 330	int count;		 /**< Number of buffers of this size */
 331	int size;		 /**< Size in bytes */
 332	int low_mark;		 /**< Low water mark */
 333	int high_mark;		 /**< High water mark */
 334	enum {
 335		_DRM_PAGE_ALIGN = 0x01,	/**< Align on page boundaries for DMA */
 336		_DRM_AGP_BUFFER = 0x02,	/**< Buffer is in AGP space */
 337		_DRM_SG_BUFFER = 0x04,	/**< Scatter/gather memory buffer */
 338		_DRM_FB_BUFFER = 0x08,	/**< Buffer is in frame buffer */
 339		_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
 340	} flags;
 341	unsigned long agp_start; /**<
 342				  * Start address of where the AGP buffers are
 343				  * in the AGP aperture
 344				  */
 345};
 346
 347/*
 348 * DRM_IOCTL_INFO_BUFS ioctl argument type.
 349 */
 350struct drm_buf_info {
 351	int count;		/**< Entries in list */
 352	struct drm_buf_desc *list;
 353};
 354
 355/*
 356 * DRM_IOCTL_FREE_BUFS ioctl argument type.
 357 */
 358struct drm_buf_free {
 359	int count;
 360	int *list;
 361};
 362
 363/*
 364 * Buffer information
 365 *
 366 * \sa drm_buf_map.
 367 */
 368struct drm_buf_pub {
 369	int idx;		       /**< Index into the master buffer list */
 370	int total;		       /**< Buffer size */
 371	int used;		       /**< Amount of buffer in use (for DMA) */
 372	void *address;	       /**< Address of buffer */
 373};
 374
 375/*
 376 * DRM_IOCTL_MAP_BUFS ioctl argument type.
 377 */
 378struct drm_buf_map {
 379	int count;		/**< Length of the buffer list */
 380#ifdef __cplusplus
 381	void *virt;
 382#else
 383	void *virtual;		/**< Mmap'd area in user-virtual */
 384#endif
 385	struct drm_buf_pub *list;	/**< Buffer information */
 386};
 387
 388/*
 389 * DRM_IOCTL_DMA ioctl argument type.
 390 *
 391 * Indices here refer to the offset into the buffer list in drm_buf_get.
 392 *
 393 * \sa drmDMA().
 394 */
 395struct drm_dma {
 396	int context;			  /**< Context handle */
 397	int send_count;			  /**< Number of buffers to send */
 398	int *send_indices;	  /**< List of handles to buffers */
 399	int *send_sizes;		  /**< Lengths of data to send */
 400	enum drm_dma_flags flags;	  /**< Flags */
 401	int request_count;		  /**< Number of buffers requested */
 402	int request_size;		  /**< Desired size for buffers */
 403	int *request_indices;	  /**< Buffer information */
 404	int *request_sizes;
 405	int granted_count;		  /**< Number of buffers granted */
 406};
 407
 408enum drm_ctx_flags {
 409	_DRM_CONTEXT_PRESERVED = 0x01,
 410	_DRM_CONTEXT_2DONLY = 0x02
 411};
 412
 413/*
 414 * DRM_IOCTL_ADD_CTX ioctl argument type.
 415 *
 416 * \sa drmCreateContext() and drmDestroyContext().
 417 */
 418struct drm_ctx {
 419	drm_context_t handle;
 420	enum drm_ctx_flags flags;
 421};
 422
 423/*
 424 * DRM_IOCTL_RES_CTX ioctl argument type.
 425 */
 426struct drm_ctx_res {
 427	int count;
 428	struct drm_ctx *contexts;
 429};
 430
 431/*
 432 * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
 433 */
 434struct drm_draw {
 435	drm_drawable_t handle;
 436};
 437
 438/*
 439 * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
 440 */
 441typedef enum {
 442	DRM_DRAWABLE_CLIPRECTS
 443} drm_drawable_info_type_t;
 444
 445struct drm_update_draw {
 446	drm_drawable_t handle;
 447	unsigned int type;
 448	unsigned int num;
 449	unsigned long long data;
 450};
 451
 452/*
 453 * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
 454 */
 455struct drm_auth {
 456	drm_magic_t magic;
 457};
 458
 459/*
 460 * DRM_IOCTL_IRQ_BUSID ioctl argument type.
 461 *
 462 * \sa drmGetInterruptFromBusID().
 463 */
 464struct drm_irq_busid {
 465	int irq;	/**< IRQ number */
 466	int busnum;	/**< bus number */
 467	int devnum;	/**< device number */
 468	int funcnum;	/**< function number */
 469};
 470
 471enum drm_vblank_seq_type {
 472	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
 473	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
 474	/* bits 1-6 are reserved for high crtcs */
 475	_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
 476	_DRM_VBLANK_EVENT = 0x4000000,   /**< Send event instead of blocking */
 477	_DRM_VBLANK_FLIP = 0x8000000,   /**< Scheduled buffer swap should flip */
 478	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
 479	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
 480	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking, unsupported */
 481};
 482#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
 483
 484#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
 485#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
 486				_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
 487
 488struct drm_wait_vblank_request {
 489	enum drm_vblank_seq_type type;
 490	unsigned int sequence;
 491	unsigned long signal;
 492};
 493
 494struct drm_wait_vblank_reply {
 495	enum drm_vblank_seq_type type;
 496	unsigned int sequence;
 497	long tval_sec;
 498	long tval_usec;
 499};
 500
 501/*
 502 * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
 503 *
 504 * \sa drmWaitVBlank().
 505 */
 506union drm_wait_vblank {
 507	struct drm_wait_vblank_request request;
 508	struct drm_wait_vblank_reply reply;
 509};
 510
 511#define _DRM_PRE_MODESET 1
 512#define _DRM_POST_MODESET 2
 513
 514/*
 515 * DRM_IOCTL_MODESET_CTL ioctl argument type
 516 *
 517 * \sa drmModesetCtl().
 518 */
 519struct drm_modeset_ctl {
 520	__u32 crtc;
 521	__u32 cmd;
 522};
 523
 524/*
 525 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
 526 *
 527 * \sa drmAgpEnable().
 528 */
 529struct drm_agp_mode {
 530	unsigned long mode;	/**< AGP mode */
 531};
 532
 533/*
 534 * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
 535 *
 536 * \sa drmAgpAlloc() and drmAgpFree().
 537 */
 538struct drm_agp_buffer {
 539	unsigned long size;	/**< In bytes -- will round to page boundary */
 540	unsigned long handle;	/**< Used for binding / unbinding */
 541	unsigned long type;	/**< Type of memory to allocate */
 542	unsigned long physical;	/**< Physical used by i810 */
 543};
 544
 545/*
 546 * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
 547 *
 548 * \sa drmAgpBind() and drmAgpUnbind().
 549 */
 550struct drm_agp_binding {
 551	unsigned long handle;	/**< From drm_agp_buffer */
 552	unsigned long offset;	/**< In bytes -- will round to page boundary */
 553};
 554
 555/*
 556 * DRM_IOCTL_AGP_INFO ioctl argument type.
 557 *
 558 * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
 559 * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
 560 * drmAgpVendorId() and drmAgpDeviceId().
 561 */
 562struct drm_agp_info {
 563	int agp_version_major;
 564	int agp_version_minor;
 565	unsigned long mode;
 566	unsigned long aperture_base;	/* physical address */
 567	unsigned long aperture_size;	/* bytes */
 568	unsigned long memory_allowed;	/* bytes */
 569	unsigned long memory_used;
 570
 571	/* PCI information */
 572	unsigned short id_vendor;
 573	unsigned short id_device;
 574};
 575
 576/*
 577 * DRM_IOCTL_SG_ALLOC ioctl argument type.
 578 */
 579struct drm_scatter_gather {
 580	unsigned long size;	/**< In bytes -- will round to page boundary */
 581	unsigned long handle;	/**< Used for mapping / unmapping */
 582};
 583
 584/*
 585 * DRM_IOCTL_SET_VERSION ioctl argument type.
 586 */
 587struct drm_set_version {
 588	int drm_di_major;
 589	int drm_di_minor;
 590	int drm_dd_major;
 591	int drm_dd_minor;
 592};
 593
 594/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
 595struct drm_gem_close {
 596	/** Handle of the object to be closed. */
 597	__u32 handle;
 598	__u32 pad;
 599};
 600
 601/* DRM_IOCTL_GEM_FLINK ioctl argument type */
 602struct drm_gem_flink {
 603	/** Handle for the object being named */
 604	__u32 handle;
 605
 606	/** Returned global name */
 607	__u32 name;
 608};
 609
 610/* DRM_IOCTL_GEM_OPEN ioctl argument type */
 611struct drm_gem_open {
 612	/** Name of object being opened */
 613	__u32 name;
 614
 615	/** Returned handle for the object */
 616	__u32 handle;
 617
 618	/** Returned size of the object */
 619	__u64 size;
 620};
 621
 622/**
 623 * DRM_CAP_DUMB_BUFFER
 624 *
 625 * If set to 1, the driver supports creating dumb buffers via the
 626 * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
 627 */
 628#define DRM_CAP_DUMB_BUFFER		0x1
 629/**
 630 * DRM_CAP_VBLANK_HIGH_CRTC
 631 *
 632 * If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
 633 * in the high bits of &drm_wait_vblank_request.type.
 634 *
 635 * Starting kernel version 2.6.39, this capability is always set to 1.
 636 */
 637#define DRM_CAP_VBLANK_HIGH_CRTC	0x2
 638/**
 639 * DRM_CAP_DUMB_PREFERRED_DEPTH
 640 *
 641 * The preferred bit depth for dumb buffers.
 642 *
 643 * The bit depth is the number of bits used to indicate the color of a single
 644 * pixel excluding any padding. This is different from the number of bits per
 645 * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
 646 * pixel.
 647 *
 648 * Note that this preference only applies to dumb buffers, it's irrelevant for
 649 * other types of buffers.
 650 */
 651#define DRM_CAP_DUMB_PREFERRED_DEPTH	0x3
 652/**
 653 * DRM_CAP_DUMB_PREFER_SHADOW
 654 *
 655 * If set to 1, the driver prefers userspace to render to a shadow buffer
 656 * instead of directly rendering to a dumb buffer. For best speed, userspace
 657 * should do streaming ordered memory copies into the dumb buffer and never
 658 * read from it.
 659 *
 660 * Note that this preference only applies to dumb buffers, it's irrelevant for
 661 * other types of buffers.
 662 */
 663#define DRM_CAP_DUMB_PREFER_SHADOW	0x4
 664/**
 665 * DRM_CAP_PRIME
 666 *
 667 * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
 668 * and &DRM_PRIME_CAP_EXPORT.
 669 *
 670 * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
 671 * &DRM_PRIME_CAP_EXPORT are always advertised.
 672 *
 673 * PRIME buffers are exposed as dma-buf file descriptors.
 674 * See :ref:`prime_buffer_sharing`.
 675 */
 676#define DRM_CAP_PRIME			0x5
 677/**
 678 * DRM_PRIME_CAP_IMPORT
 679 *
 680 * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
 681 * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
 682 *
 683 * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
 684 */
 685#define  DRM_PRIME_CAP_IMPORT		0x1
 686/**
 687 * DRM_PRIME_CAP_EXPORT
 688 *
 689 * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
 690 * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
 691 *
 692 * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
 693 */
 694#define  DRM_PRIME_CAP_EXPORT		0x2
 695/**
 696 * DRM_CAP_TIMESTAMP_MONOTONIC
 697 *
 698 * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
 699 * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
 700 * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
 701 * clocks.
 702 *
 703 * Starting from kernel version 2.6.39, the default value for this capability
 704 * is 1. Starting kernel version 4.15, this capability is always set to 1.
 705 */
 706#define DRM_CAP_TIMESTAMP_MONOTONIC	0x6
 707/**
 708 * DRM_CAP_ASYNC_PAGE_FLIP
 709 *
 710 * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
 711 * page-flips.
 712 */
 713#define DRM_CAP_ASYNC_PAGE_FLIP		0x7
 714/**
 715 * DRM_CAP_CURSOR_WIDTH
 716 *
 717 * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
 718 * width x height combination for the hardware cursor. The intention is that a
 719 * hardware agnostic userspace can query a cursor plane size to use.
 720 *
 721 * Note that the cross-driver contract is to merely return a valid size;
 722 * drivers are free to attach another meaning on top, eg. i915 returns the
 723 * maximum plane size.
 724 */
 725#define DRM_CAP_CURSOR_WIDTH		0x8
 726/**
 727 * DRM_CAP_CURSOR_HEIGHT
 728 *
 729 * See &DRM_CAP_CURSOR_WIDTH.
 730 */
 731#define DRM_CAP_CURSOR_HEIGHT		0x9
 732/**
 733 * DRM_CAP_ADDFB2_MODIFIERS
 734 *
 735 * If set to 1, the driver supports supplying modifiers in the
 736 * &DRM_IOCTL_MODE_ADDFB2 ioctl.
 737 */
 738#define DRM_CAP_ADDFB2_MODIFIERS	0x10
 739/**
 740 * DRM_CAP_PAGE_FLIP_TARGET
 741 *
 742 * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
 743 * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
 744 * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
 745 * ioctl.
 746 */
 747#define DRM_CAP_PAGE_FLIP_TARGET	0x11
 748/**
 749 * DRM_CAP_CRTC_IN_VBLANK_EVENT
 750 *
 751 * If set to 1, the kernel supports reporting the CRTC ID in
 752 * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
 753 * &DRM_EVENT_FLIP_COMPLETE events.
 754 *
 755 * Starting kernel version 4.12, this capability is always set to 1.
 756 */
 757#define DRM_CAP_CRTC_IN_VBLANK_EVENT	0x12
 758/**
 759 * DRM_CAP_SYNCOBJ
 760 *
 761 * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
 762 */
 763#define DRM_CAP_SYNCOBJ		0x13
 764/**
 765 * DRM_CAP_SYNCOBJ_TIMELINE
 766 *
 767 * If set to 1, the driver supports timeline operations on sync objects. See
 768 * :ref:`drm_sync_objects`.
 769 */
 770#define DRM_CAP_SYNCOBJ_TIMELINE	0x14
 771/**
 772 * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
 773 *
 774 * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
 775 * commits.
 776 */
 777#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP	0x15
 778
 779/* DRM_IOCTL_GET_CAP ioctl argument type */
 780struct drm_get_cap {
 781	__u64 capability;
 782	__u64 value;
 783};
 784
 785/**
 786 * DRM_CLIENT_CAP_STEREO_3D
 787 *
 788 * If set to 1, the DRM core will expose the stereo 3D capabilities of the
 789 * monitor by advertising the supported 3D layouts in the flags of struct
 790 * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
 791 *
 792 * This capability is always supported for all drivers starting from kernel
 793 * version 3.13.
 794 */
 795#define DRM_CLIENT_CAP_STEREO_3D	1
 796
 797/**
 798 * DRM_CLIENT_CAP_UNIVERSAL_PLANES
 799 *
 800 * If set to 1, the DRM core will expose all planes (overlay, primary, and
 801 * cursor) to userspace.
 802 *
 803 * This capability has been introduced in kernel version 3.15. Starting from
 804 * kernel version 3.17, this capability is always supported for all drivers.
 805 */
 806#define DRM_CLIENT_CAP_UNIVERSAL_PLANES  2
 807
 808/**
 809 * DRM_CLIENT_CAP_ATOMIC
 810 *
 811 * If set to 1, the DRM core will expose atomic properties to userspace. This
 812 * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
 813 * &DRM_CLIENT_CAP_ASPECT_RATIO.
 814 *
 815 * If the driver doesn't support atomic mode-setting, enabling this capability
 816 * will fail with -EOPNOTSUPP.
 817 *
 818 * This capability has been introduced in kernel version 4.0. Starting from
 819 * kernel version 4.2, this capability is always supported for atomic-capable
 820 * drivers.
 821 */
 822#define DRM_CLIENT_CAP_ATOMIC	3
 823
 824/**
 825 * DRM_CLIENT_CAP_ASPECT_RATIO
 826 *
 827 * If set to 1, the DRM core will provide aspect ratio information in modes.
 828 * See ``DRM_MODE_FLAG_PIC_AR_*``.
 829 *
 830 * This capability is always supported for all drivers starting from kernel
 831 * version 4.18.
 832 */
 833#define DRM_CLIENT_CAP_ASPECT_RATIO    4
 834
 835/**
 836 * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
 837 *
 838 * If set to 1, the DRM core will expose special connectors to be used for
 839 * writing back to memory the scene setup in the commit. The client must enable
 840 * &DRM_CLIENT_CAP_ATOMIC first.
 841 *
 842 * This capability is always supported for atomic-capable drivers starting from
 843 * kernel version 4.19.
 844 */
 845#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS	5
 846
 847/**
 848 * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
 849 *
 850 * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
 851 * virtualbox) have additional restrictions for cursor planes (thus
 852 * making cursor planes on those drivers not truly universal,) e.g.
 853 * they need cursor planes to act like one would expect from a mouse
 854 * cursor and have correctly set hotspot properties.
 855 * If this client cap is not set the DRM core will hide cursor plane on
 856 * those virtualized drivers because not setting it implies that the
 857 * client is not capable of dealing with those extra restictions.
 858 * Clients which do set cursor hotspot and treat the cursor plane
 859 * like a mouse cursor should set this property.
 860 * The client must enable &DRM_CLIENT_CAP_ATOMIC first.
 861 *
 862 * Setting this property on drivers which do not special case
 863 * cursor planes (i.e. non-virtualized drivers) will return
 864 * EOPNOTSUPP, which can be used by userspace to gauge
 865 * requirements of the hardware/drivers they're running on.
 866 *
 867 * This capability is always supported for atomic-capable virtualized
 868 * drivers starting from kernel version 6.6.
 869 */
 870#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT	6
 871
 872/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 873struct drm_set_client_cap {
 874	__u64 capability;
 875	__u64 value;
 876};
 877
 878#define DRM_RDWR O_RDWR
 879#define DRM_CLOEXEC O_CLOEXEC
 880struct drm_prime_handle {
 881	__u32 handle;
 882
 883	/** Flags.. only applicable for handle->fd */
 884	__u32 flags;
 885
 886	/** Returned dmabuf file descriptor */
 887	__s32 fd;
 888};
 889
 890struct drm_syncobj_create {
 891	__u32 handle;
 892#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
 893	__u32 flags;
 894};
 895
 896struct drm_syncobj_destroy {
 897	__u32 handle;
 898	__u32 pad;
 899};
 900
 901#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
 902#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE         (1 << 1)
 903#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
 904#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE         (1 << 1)
 905struct drm_syncobj_handle {
 906	__u32 handle;
 907	__u32 flags;
 908
 909	__s32 fd;
 910	__u32 pad;
 911
 912	__u64 point;
 913};
 914
 915struct drm_syncobj_transfer {
 916	__u32 src_handle;
 917	__u32 dst_handle;
 918	__u64 src_point;
 919	__u64 dst_point;
 920	__u32 flags;
 921	__u32 pad;
 922};
 923
 924#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
 925#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
 926#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
 927#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
 928struct drm_syncobj_wait {
 929	__u64 handles;
 930	/* absolute timeout */
 931	__s64 timeout_nsec;
 932	__u32 count_handles;
 933	__u32 flags;
 934	__u32 first_signaled; /* only valid when not waiting all */
 935	__u32 pad;
 936	/**
 937	 * @deadline_nsec - fence deadline hint
 938	 *
 939	 * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
 940	 * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
 941	 * set.
 942	 */
 943	__u64 deadline_nsec;
 944};
 945
 946struct drm_syncobj_timeline_wait {
 947	__u64 handles;
 948	/* wait on specific timeline point for every handles*/
 949	__u64 points;
 950	/* absolute timeout */
 951	__s64 timeout_nsec;
 952	__u32 count_handles;
 953	__u32 flags;
 954	__u32 first_signaled; /* only valid when not waiting all */
 955	__u32 pad;
 956	/**
 957	 * @deadline_nsec - fence deadline hint
 958	 *
 959	 * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
 960	 * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
 961	 * set.
 962	 */
 963	__u64 deadline_nsec;
 964};
 965
 966/**
 967 * struct drm_syncobj_eventfd
 968 * @handle: syncobj handle.
 969 * @flags: Zero to wait for the point to be signalled, or
 970 *         &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
 971 *         available for the point.
 972 * @point: syncobj timeline point (set to zero for binary syncobjs).
 973 * @fd: Existing eventfd to sent events to.
 974 * @pad: Must be zero.
 975 *
 976 * Register an eventfd to be signalled by a syncobj. The eventfd counter will
 977 * be incremented by one.
 978 */
 979struct drm_syncobj_eventfd {
 980	__u32 handle;
 981	__u32 flags;
 982	__u64 point;
 983	__s32 fd;
 984	__u32 pad;
 985};
 986
 987
 988struct drm_syncobj_array {
 989	__u64 handles;
 990	__u32 count_handles;
 991	__u32 pad;
 992};
 993
 994#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
 995struct drm_syncobj_timeline_array {
 996	__u64 handles;
 997	__u64 points;
 998	__u32 count_handles;
 999	__u32 flags;
1000};
1001
1002
1003/* Query current scanout sequence number */
1004struct drm_crtc_get_sequence {
1005	__u32 crtc_id;		/* requested crtc_id */
1006	__u32 active;		/* return: crtc output is active */
1007	__u64 sequence;		/* return: most recent vblank sequence */
1008	__s64 sequence_ns;	/* return: most recent time of first pixel out */
1009};
1010
1011/* Queue event to be delivered at specified sequence. Time stamp marks
1012 * when the first pixel of the refresh cycle leaves the display engine
1013 * for the display
1014 */
1015#define DRM_CRTC_SEQUENCE_RELATIVE		0x00000001	/* sequence is relative to current */
1016#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS		0x00000002	/* Use next sequence if we've missed */
1017
1018struct drm_crtc_queue_sequence {
1019	__u32 crtc_id;
1020	__u32 flags;
1021	__u64 sequence;		/* on input, target sequence. on output, actual sequence */
1022	__u64 user_data;	/* user data passed to event */
1023};
1024
1025#define DRM_CLIENT_NAME_MAX_LEN		64
1026struct drm_set_client_name {
1027	__u64 name_len;
1028	__u64 name;
1029};
1030
1031
1032#if defined(__cplusplus)
1033}
1034#endif
1035
1036#include "drm_mode.h"
1037
1038#if defined(__cplusplus)
1039extern "C" {
1040#endif
1041
1042#define DRM_IOCTL_BASE			'd'
1043#define DRM_IO(nr)			_IO(DRM_IOCTL_BASE,nr)
1044#define DRM_IOR(nr,type)		_IOR(DRM_IOCTL_BASE,nr,type)
1045#define DRM_IOW(nr,type)		_IOW(DRM_IOCTL_BASE,nr,type)
1046#define DRM_IOWR(nr,type)		_IOWR(DRM_IOCTL_BASE,nr,type)
1047
1048#define DRM_IOCTL_VERSION		DRM_IOWR(0x00, struct drm_version)
1049#define DRM_IOCTL_GET_UNIQUE		DRM_IOWR(0x01, struct drm_unique)
1050#define DRM_IOCTL_GET_MAGIC		DRM_IOR( 0x02, struct drm_auth)
1051#define DRM_IOCTL_IRQ_BUSID		DRM_IOWR(0x03, struct drm_irq_busid)
1052#define DRM_IOCTL_GET_MAP               DRM_IOWR(0x04, struct drm_map)
1053#define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
1054#define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
1055#define DRM_IOCTL_SET_VERSION		DRM_IOWR(0x07, struct drm_set_version)
1056#define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08, struct drm_modeset_ctl)
1057/**
1058 * DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
1059 *
1060 * GEM handles are not reference-counted by the kernel. User-space is
1061 * responsible for managing their lifetime. For example, if user-space imports
1062 * the same memory object twice on the same DRM file description, the same GEM
1063 * handle is returned by both imports, and user-space needs to ensure
1064 * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
1065 * when a memory object is allocated, then exported and imported again on the
1066 * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
1067 * and always returns fresh new GEM handles even if an existing GEM handle
1068 * already refers to the same memory object before the IOCTL is performed.
1069 */
1070#define DRM_IOCTL_GEM_CLOSE		DRM_IOW (0x09, struct drm_gem_close)
1071#define DRM_IOCTL_GEM_FLINK		DRM_IOWR(0x0a, struct drm_gem_flink)
1072#define DRM_IOCTL_GEM_OPEN		DRM_IOWR(0x0b, struct drm_gem_open)
1073#define DRM_IOCTL_GET_CAP		DRM_IOWR(0x0c, struct drm_get_cap)
1074#define DRM_IOCTL_SET_CLIENT_CAP	DRM_IOW( 0x0d, struct drm_set_client_cap)
1075
1076#define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, struct drm_unique)
1077#define DRM_IOCTL_AUTH_MAGIC		DRM_IOW( 0x11, struct drm_auth)
1078#define DRM_IOCTL_BLOCK			DRM_IOWR(0x12, struct drm_block)
1079#define DRM_IOCTL_UNBLOCK		DRM_IOWR(0x13, struct drm_block)
1080#define DRM_IOCTL_CONTROL		DRM_IOW( 0x14, struct drm_control)
1081#define DRM_IOCTL_ADD_MAP		DRM_IOWR(0x15, struct drm_map)
1082#define DRM_IOCTL_ADD_BUFS		DRM_IOWR(0x16, struct drm_buf_desc)
1083#define DRM_IOCTL_MARK_BUFS		DRM_IOW( 0x17, struct drm_buf_desc)
1084#define DRM_IOCTL_INFO_BUFS		DRM_IOWR(0x18, struct drm_buf_info)
1085#define DRM_IOCTL_MAP_BUFS		DRM_IOWR(0x19, struct drm_buf_map)
1086#define DRM_IOCTL_FREE_BUFS		DRM_IOW( 0x1a, struct drm_buf_free)
1087
1088#define DRM_IOCTL_RM_MAP		DRM_IOW( 0x1b, struct drm_map)
1089
1090#define DRM_IOCTL_SET_SAREA_CTX		DRM_IOW( 0x1c, struct drm_ctx_priv_map)
1091#define DRM_IOCTL_GET_SAREA_CTX 	DRM_IOWR(0x1d, struct drm_ctx_priv_map)
1092
1093#define DRM_IOCTL_SET_MASTER            DRM_IO(0x1e)
1094#define DRM_IOCTL_DROP_MASTER           DRM_IO(0x1f)
1095
1096#define DRM_IOCTL_ADD_CTX		DRM_IOWR(0x20, struct drm_ctx)
1097#define DRM_IOCTL_RM_CTX		DRM_IOWR(0x21, struct drm_ctx)
1098#define DRM_IOCTL_MOD_CTX		DRM_IOW( 0x22, struct drm_ctx)
1099#define DRM_IOCTL_GET_CTX		DRM_IOWR(0x23, struct drm_ctx)
1100#define DRM_IOCTL_SWITCH_CTX		DRM_IOW( 0x24, struct drm_ctx)
1101#define DRM_IOCTL_NEW_CTX		DRM_IOW( 0x25, struct drm_ctx)
1102#define DRM_IOCTL_RES_CTX		DRM_IOWR(0x26, struct drm_ctx_res)
1103#define DRM_IOCTL_ADD_DRAW		DRM_IOWR(0x27, struct drm_draw)
1104#define DRM_IOCTL_RM_DRAW		DRM_IOWR(0x28, struct drm_draw)
1105#define DRM_IOCTL_DMA			DRM_IOWR(0x29, struct drm_dma)
1106#define DRM_IOCTL_LOCK			DRM_IOW( 0x2a, struct drm_lock)
1107#define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
1108#define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
1109
1110/**
1111 * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
1112 *
1113 * User-space sets &drm_prime_handle.handle with the GEM handle to export and
1114 * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
1115 * &drm_prime_handle.fd.
1116 *
1117 * The export can fail for any driver-specific reason, e.g. because export is
1118 * not supported for this specific GEM handle (but might be for others).
1119 *
1120 * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
1121 */
1122#define DRM_IOCTL_PRIME_HANDLE_TO_FD    DRM_IOWR(0x2d, struct drm_prime_handle)
1123/**
1124 * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
1125 *
1126 * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
1127 * import, and gets back a GEM handle in &drm_prime_handle.handle.
1128 * &drm_prime_handle.flags is unused.
1129 *
1130 * If an existing GEM handle refers to the memory object backing the DMA-BUF,
1131 * that GEM handle is returned. Therefore user-space which needs to handle
1132 * arbitrary DMA-BUFs must have a user-space lookup data structure to manually
1133 * reference-count duplicated GEM handles. For more information see
1134 * &DRM_IOCTL_GEM_CLOSE.
1135 *
1136 * The import can fail for any driver-specific reason, e.g. because import is
1137 * only supported for DMA-BUFs allocated on this DRM device.
1138 *
1139 * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
1140 */
1141#define DRM_IOCTL_PRIME_FD_TO_HANDLE    DRM_IOWR(0x2e, struct drm_prime_handle)
1142
1143#define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
1144#define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
1145#define DRM_IOCTL_AGP_ENABLE		DRM_IOW( 0x32, struct drm_agp_mode)
1146#define DRM_IOCTL_AGP_INFO		DRM_IOR( 0x33, struct drm_agp_info)
1147#define DRM_IOCTL_AGP_ALLOC		DRM_IOWR(0x34, struct drm_agp_buffer)
1148#define DRM_IOCTL_AGP_FREE		DRM_IOW( 0x35, struct drm_agp_buffer)
1149#define DRM_IOCTL_AGP_BIND		DRM_IOW( 0x36, struct drm_agp_binding)
1150#define DRM_IOCTL_AGP_UNBIND		DRM_IOW( 0x37, struct drm_agp_binding)
1151
1152#define DRM_IOCTL_SG_ALLOC		DRM_IOWR(0x38, struct drm_scatter_gather)
1153#define DRM_IOCTL_SG_FREE		DRM_IOW( 0x39, struct drm_scatter_gather)
1154
1155#define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, union drm_wait_vblank)
1156
1157#define DRM_IOCTL_CRTC_GET_SEQUENCE	DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
1158#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE	DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
1159
1160#define DRM_IOCTL_UPDATE_DRAW		DRM_IOW(0x3f, struct drm_update_draw)
1161
1162#define DRM_IOCTL_MODE_GETRESOURCES	DRM_IOWR(0xA0, struct drm_mode_card_res)
1163#define DRM_IOCTL_MODE_GETCRTC		DRM_IOWR(0xA1, struct drm_mode_crtc)
1164#define DRM_IOCTL_MODE_SETCRTC		DRM_IOWR(0xA2, struct drm_mode_crtc)
1165#define DRM_IOCTL_MODE_CURSOR		DRM_IOWR(0xA3, struct drm_mode_cursor)
1166#define DRM_IOCTL_MODE_GETGAMMA		DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
1167#define DRM_IOCTL_MODE_SETGAMMA		DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
1168#define DRM_IOCTL_MODE_GETENCODER	DRM_IOWR(0xA6, struct drm_mode_get_encoder)
1169#define DRM_IOCTL_MODE_GETCONNECTOR	DRM_IOWR(0xA7, struct drm_mode_get_connector)
1170#define DRM_IOCTL_MODE_ATTACHMODE	DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1171#define DRM_IOCTL_MODE_DETACHMODE	DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1172
1173#define DRM_IOCTL_MODE_GETPROPERTY	DRM_IOWR(0xAA, struct drm_mode_get_property)
1174#define DRM_IOCTL_MODE_SETPROPERTY	DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
1175#define DRM_IOCTL_MODE_GETPROPBLOB	DRM_IOWR(0xAC, struct drm_mode_get_blob)
1176#define DRM_IOCTL_MODE_GETFB		DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
1177#define DRM_IOCTL_MODE_ADDFB		DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
1178/**
1179 * DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
1180 *
1181 * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
1182 * argument is a framebuffer object ID.
1183 *
1184 * Warning: removing a framebuffer currently in-use on an enabled plane will
1185 * disable that plane. The CRTC the plane is linked to may also be disabled
1186 * (depending on driver capabilities).
1187 */
1188#define DRM_IOCTL_MODE_RMFB		DRM_IOWR(0xAF, unsigned int)
1189#define DRM_IOCTL_MODE_PAGE_FLIP	DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
1190#define DRM_IOCTL_MODE_DIRTYFB		DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
1191
1192/**
1193 * DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
1194 *
1195 * KMS dumb buffers provide a very primitive way to allocate a buffer object
1196 * suitable for scanout and map it for software rendering. KMS dumb buffers are
1197 * not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
1198 * buffers are not suitable to be displayed on any other device than the KMS
1199 * device where they were allocated from. Also see
1200 * :ref:`kms_dumb_buffer_objects`.
1201 *
1202 * The IOCTL argument is a struct drm_mode_create_dumb.
1203 *
1204 * User-space is expected to create a KMS dumb buffer via this IOCTL, then add
1205 * it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
1206 * &DRM_IOCTL_MODE_MAP_DUMB.
1207 *
1208 * &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
1209 * &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
1210 * driver preferences for dumb buffers.
1211 */
1212#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
1213#define DRM_IOCTL_MODE_MAP_DUMB    DRM_IOWR(0xB3, struct drm_mode_map_dumb)
1214#define DRM_IOCTL_MODE_DESTROY_DUMB    DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
1215#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
1216#define DRM_IOCTL_MODE_GETPLANE	DRM_IOWR(0xB6, struct drm_mode_get_plane)
1217#define DRM_IOCTL_MODE_SETPLANE	DRM_IOWR(0xB7, struct drm_mode_set_plane)
1218#define DRM_IOCTL_MODE_ADDFB2		DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
1219#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES	DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
1220#define DRM_IOCTL_MODE_OBJ_SETPROPERTY	DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
1221#define DRM_IOCTL_MODE_CURSOR2		DRM_IOWR(0xBB, struct drm_mode_cursor2)
1222#define DRM_IOCTL_MODE_ATOMIC		DRM_IOWR(0xBC, struct drm_mode_atomic)
1223#define DRM_IOCTL_MODE_CREATEPROPBLOB	DRM_IOWR(0xBD, struct drm_mode_create_blob)
1224#define DRM_IOCTL_MODE_DESTROYPROPBLOB	DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
1225
1226#define DRM_IOCTL_SYNCOBJ_CREATE	DRM_IOWR(0xBF, struct drm_syncobj_create)
1227#define DRM_IOCTL_SYNCOBJ_DESTROY	DRM_IOWR(0xC0, struct drm_syncobj_destroy)
1228#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD	DRM_IOWR(0xC1, struct drm_syncobj_handle)
1229#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE	DRM_IOWR(0xC2, struct drm_syncobj_handle)
1230#define DRM_IOCTL_SYNCOBJ_WAIT		DRM_IOWR(0xC3, struct drm_syncobj_wait)
1231#define DRM_IOCTL_SYNCOBJ_RESET		DRM_IOWR(0xC4, struct drm_syncobj_array)
1232#define DRM_IOCTL_SYNCOBJ_SIGNAL	DRM_IOWR(0xC5, struct drm_syncobj_array)
1233
1234#define DRM_IOCTL_MODE_CREATE_LEASE	DRM_IOWR(0xC6, struct drm_mode_create_lease)
1235#define DRM_IOCTL_MODE_LIST_LESSEES	DRM_IOWR(0xC7, struct drm_mode_list_lessees)
1236#define DRM_IOCTL_MODE_GET_LEASE	DRM_IOWR(0xC8, struct drm_mode_get_lease)
1237#define DRM_IOCTL_MODE_REVOKE_LEASE	DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
1238
1239#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
1240#define DRM_IOCTL_SYNCOBJ_QUERY		DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
1241#define DRM_IOCTL_SYNCOBJ_TRANSFER	DRM_IOWR(0xCC, struct drm_syncobj_transfer)
1242#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL	DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
1243
1244/**
1245 * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
1246 *
1247 * This queries metadata about a framebuffer. User-space fills
1248 * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
1249 * struct as the output.
1250 *
1251 * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
1252 * will be filled with GEM buffer handles. Fresh new GEM handles are always
1253 * returned, even if another GEM handle referring to the same memory object
1254 * already exists on the DRM file description. The caller is responsible for
1255 * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
1256 * new handle will be returned for multiple planes in case they use the same
1257 * memory object. Planes are valid until one has a zero handle -- this can be
1258 * used to compute the number of planes.
1259 *
1260 * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
1261 * until one has a zero &drm_mode_fb_cmd2.pitches.
1262 *
1263 * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
1264 * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
1265 * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
1266 *
1267 * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
1268 * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
1269 * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
1270 * double-close handles which are specified multiple times in the array.
1271 */
1272#define DRM_IOCTL_MODE_GETFB2		DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
1273
1274#define DRM_IOCTL_SYNCOBJ_EVENTFD	DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
1275
1276/**
1277 * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
1278 *
1279 * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
1280 * argument is a framebuffer object ID.
1281 *
1282 * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
1283 * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
1284 * alive. When the plane no longer uses the framebuffer (because the
1285 * framebuffer is replaced with another one, or the plane is disabled), the
1286 * framebuffer is cleaned up.
1287 *
1288 * This is useful to implement flicker-free transitions between two processes.
1289 *
1290 * Depending on the threat model, user-space may want to ensure that the
1291 * framebuffer doesn't expose any sensitive user information: closed
1292 * framebuffers attached to a plane can be read back by the next DRM master.
1293 */
1294#define DRM_IOCTL_MODE_CLOSEFB		DRM_IOWR(0xD0, struct drm_mode_closefb)
1295
1296/**
1297 * DRM_IOCTL_SET_CLIENT_NAME - Attach a name to a drm_file
1298 *
1299 * Having a name allows for easier tracking and debugging.
1300 * The length of the name (without null ending char) must be
1301 * <= DRM_CLIENT_NAME_MAX_LEN.
1302 * The call will fail if the name contains whitespaces or non-printable chars.
1303 */
1304#define DRM_IOCTL_SET_CLIENT_NAME	DRM_IOWR(0xD1, struct drm_set_client_name)
1305
1306/*
1307 * Device specific ioctls should only be in their respective headers
1308 * The device specific ioctl range is from 0x40 to 0x9f.
1309 * Generic IOCTLS restart at 0xA0.
1310 *
1311 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1312 * drmCommandReadWrite().
1313 */
1314#define DRM_COMMAND_BASE                0x40
1315#define DRM_COMMAND_END			0xA0
1316
1317/**
1318 * struct drm_event - Header for DRM events
1319 * @type: event type.
1320 * @length: total number of payload bytes (including header).
1321 *
1322 * This struct is a header for events written back to user-space on the DRM FD.
1323 * A read on the DRM FD will always only return complete events: e.g. if the
1324 * read buffer is 100 bytes large and there are two 64 byte events pending,
1325 * only one will be returned.
1326 *
1327 * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
1328 * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
1329 * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
1330 */
1331struct drm_event {
1332	__u32 type;
1333	__u32 length;
1334};
1335
1336/**
1337 * DRM_EVENT_VBLANK - vertical blanking event
1338 *
1339 * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
1340 * &_DRM_VBLANK_EVENT flag set.
1341 *
1342 * The event payload is a struct drm_event_vblank.
1343 */
1344#define DRM_EVENT_VBLANK 0x01
1345/**
1346 * DRM_EVENT_FLIP_COMPLETE - page-flip completion event
1347 *
1348 * This event is sent in response to an atomic commit or legacy page-flip with
1349 * the &DRM_MODE_PAGE_FLIP_EVENT flag set.
1350 *
1351 * The event payload is a struct drm_event_vblank.
1352 */
1353#define DRM_EVENT_FLIP_COMPLETE 0x02
1354/**
1355 * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
1356 *
1357 * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
1358 *
1359 * The event payload is a struct drm_event_crtc_sequence.
1360 */
1361#define DRM_EVENT_CRTC_SEQUENCE	0x03
1362
1363struct drm_event_vblank {
1364	struct drm_event base;
1365	__u64 user_data;
1366	__u32 tv_sec;
1367	__u32 tv_usec;
1368	__u32 sequence;
1369	__u32 crtc_id; /* 0 on older kernels that do not support this */
1370};
1371
1372/* Event delivered at sequence. Time stamp marks when the first pixel
1373 * of the refresh cycle leaves the display engine for the display
1374 */
1375struct drm_event_crtc_sequence {
1376	struct drm_event	base;
1377	__u64			user_data;
1378	__s64			time_ns;
1379	__u64			sequence;
1380};
1381
1382/* typedef area */
1383typedef struct drm_clip_rect drm_clip_rect_t;
1384typedef struct drm_drawable_info drm_drawable_info_t;
1385typedef struct drm_tex_region drm_tex_region_t;
1386typedef struct drm_hw_lock drm_hw_lock_t;
1387typedef struct drm_version drm_version_t;
1388typedef struct drm_unique drm_unique_t;
1389typedef struct drm_list drm_list_t;
1390typedef struct drm_block drm_block_t;
1391typedef struct drm_control drm_control_t;
1392typedef enum drm_map_type drm_map_type_t;
1393typedef enum drm_map_flags drm_map_flags_t;
1394typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
1395typedef struct drm_map drm_map_t;
1396typedef struct drm_client drm_client_t;
1397typedef enum drm_stat_type drm_stat_type_t;
1398typedef struct drm_stats drm_stats_t;
1399typedef enum drm_lock_flags drm_lock_flags_t;
1400typedef struct drm_lock drm_lock_t;
1401typedef enum drm_dma_flags drm_dma_flags_t;
1402typedef struct drm_buf_desc drm_buf_desc_t;
1403typedef struct drm_buf_info drm_buf_info_t;
1404typedef struct drm_buf_free drm_buf_free_t;
1405typedef struct drm_buf_pub drm_buf_pub_t;
1406typedef struct drm_buf_map drm_buf_map_t;
1407typedef struct drm_dma drm_dma_t;
1408typedef union drm_wait_vblank drm_wait_vblank_t;
1409typedef struct drm_agp_mode drm_agp_mode_t;
1410typedef enum drm_ctx_flags drm_ctx_flags_t;
1411typedef struct drm_ctx drm_ctx_t;
1412typedef struct drm_ctx_res drm_ctx_res_t;
1413typedef struct drm_draw drm_draw_t;
1414typedef struct drm_update_draw drm_update_draw_t;
1415typedef struct drm_auth drm_auth_t;
1416typedef struct drm_irq_busid drm_irq_busid_t;
1417typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
1418
1419typedef struct drm_agp_buffer drm_agp_buffer_t;
1420typedef struct drm_agp_binding drm_agp_binding_t;
1421typedef struct drm_agp_info drm_agp_info_t;
1422typedef struct drm_scatter_gather drm_scatter_gather_t;
1423typedef struct drm_set_version drm_set_version_t;
1424
1425#if defined(__cplusplus)
1426}
1427#endif
1428
1429#endif