1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
   2/*
   3 * VFIO API definition
   4 *
   5 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
   6 *     Author: Alex Williamson <alex.williamson@redhat.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12#ifndef VFIO_H
  13#define VFIO_H
  14
  15#include <linux/types.h>
  16#include <linux/ioctl.h>
  17
  18#define VFIO_API_VERSION	0
  19
  20
  21/* Kernel & User level defines for VFIO IOCTLs. */
  22
  23/* Extensions */
  24
  25#define VFIO_TYPE1_IOMMU		1
  26#define VFIO_SPAPR_TCE_IOMMU		2
  27#define VFIO_TYPE1v2_IOMMU		3
  28/*
  29 * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping).  This
  30 * capability is subject to change as groups are added or removed.
  31 */
  32#define VFIO_DMA_CC_IOMMU		4
  33
  34/* Check if EEH is supported */
  35#define VFIO_EEH			5
  36
  37/* Two-stage IOMMU */
  38#define __VFIO_RESERVED_TYPE1_NESTING_IOMMU	6	/* Implies v2 */
  39
  40#define VFIO_SPAPR_TCE_v2_IOMMU		7
  41
  42/*
  43 * The No-IOMMU IOMMU offers no translation or isolation for devices and
  44 * supports no ioctls outside of VFIO_CHECK_EXTENSION.  Use of VFIO's No-IOMMU
  45 * code will taint the host kernel and should be used with extreme caution.
  46 */
  47#define VFIO_NOIOMMU_IOMMU		8
  48
  49/* Supports VFIO_DMA_UNMAP_FLAG_ALL */
  50#define VFIO_UNMAP_ALL			9
  51
  52/*
  53 * Supports the vaddr flag for DMA map and unmap.  Not supported for mediated
  54 * devices, so this capability is subject to change as groups are added or
  55 * removed.
  56 */
  57#define VFIO_UPDATE_VADDR		10
  58
  59/*
  60 * The IOCTL interface is designed for extensibility by embedding the
  61 * structure length (argsz) and flags into structures passed between
  62 * kernel and userspace.  We therefore use the _IO() macro for these
  63 * defines to avoid implicitly embedding a size into the ioctl request.
  64 * As structure fields are added, argsz will increase to match and flag
  65 * bits will be defined to indicate additional fields with valid data.
  66 * It's *always* the caller's responsibility to indicate the size of
  67 * the structure passed by setting argsz appropriately.
  68 */
  69
  70#define VFIO_TYPE	(';')
  71#define VFIO_BASE	100
  72
  73/*
  74 * For extension of INFO ioctls, VFIO makes use of a capability chain
  75 * designed after PCI/e capabilities.  A flag bit indicates whether
  76 * this capability chain is supported and a field defined in the fixed
  77 * structure defines the offset of the first capability in the chain.
  78 * This field is only valid when the corresponding bit in the flags
  79 * bitmap is set.  This offset field is relative to the start of the
  80 * INFO buffer, as is the next field within each capability header.
  81 * The id within the header is a shared address space per INFO ioctl,
  82 * while the version field is specific to the capability id.  The
  83 * contents following the header are specific to the capability id.
  84 */
  85struct vfio_info_cap_header {
  86	__u16	id;		/* Identifies capability */
  87	__u16	version;	/* Version specific to the capability ID */
  88	__u32	next;		/* Offset of next capability */
  89};
  90
  91/*
  92 * Callers of INFO ioctls passing insufficiently sized buffers will see
  93 * the capability chain flag bit set, a zero value for the first capability
  94 * offset (if available within the provided argsz), and argsz will be
  95 * updated to report the necessary buffer size.  For compatibility, the
  96 * INFO ioctl will not report error in this case, but the capability chain
  97 * will not be available.
  98 */
  99
 100/* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
 101
 102/**
 103 * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0)
 104 *
 105 * Report the version of the VFIO API.  This allows us to bump the entire
 106 * API version should we later need to add or change features in incompatible
 107 * ways.
 108 * Return: VFIO_API_VERSION
 109 * Availability: Always
 110 */
 111#define VFIO_GET_API_VERSION		_IO(VFIO_TYPE, VFIO_BASE + 0)
 112
 113/**
 114 * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32)
 115 *
 116 * Check whether an extension is supported.
 117 * Return: 0 if not supported, 1 (or some other positive integer) if supported.
 118 * Availability: Always
 119 */
 120#define VFIO_CHECK_EXTENSION		_IO(VFIO_TYPE, VFIO_BASE + 1)
 121
 122/**
 123 * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32)
 124 *
 125 * Set the iommu to the given type.  The type must be supported by an
 126 * iommu driver as verified by calling CHECK_EXTENSION using the same
 127 * type.  A group must be set to this file descriptor before this
 128 * ioctl is available.  The IOMMU interfaces enabled by this call are
 129 * specific to the value set.
 130 * Return: 0 on success, -errno on failure
 131 * Availability: When VFIO group attached
 132 */
 133#define VFIO_SET_IOMMU			_IO(VFIO_TYPE, VFIO_BASE + 2)
 134
 135/* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */
 136
 137/**
 138 * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,
 139 *						struct vfio_group_status)
 140 *
 141 * Retrieve information about the group.  Fills in provided
 142 * struct vfio_group_info.  Caller sets argsz.
 143 * Return: 0 on succes, -errno on failure.
 144 * Availability: Always
 145 */
 146struct vfio_group_status {
 147	__u32	argsz;
 148	__u32	flags;
 149#define VFIO_GROUP_FLAGS_VIABLE		(1 << 0)
 150#define VFIO_GROUP_FLAGS_CONTAINER_SET	(1 << 1)
 151};
 152#define VFIO_GROUP_GET_STATUS		_IO(VFIO_TYPE, VFIO_BASE + 3)
 153
 154/**
 155 * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32)
 156 *
 157 * Set the container for the VFIO group to the open VFIO file
 158 * descriptor provided.  Groups may only belong to a single
 159 * container.  Containers may, at their discretion, support multiple
 160 * groups.  Only when a container is set are all of the interfaces
 161 * of the VFIO file descriptor and the VFIO group file descriptor
 162 * available to the user.
 163 * Return: 0 on success, -errno on failure.
 164 * Availability: Always
 165 */
 166#define VFIO_GROUP_SET_CONTAINER	_IO(VFIO_TYPE, VFIO_BASE + 4)
 167
 168/**
 169 * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5)
 170 *
 171 * Remove the group from the attached container.  This is the
 172 * opposite of the SET_CONTAINER call and returns the group to
 173 * an initial state.  All device file descriptors must be released
 174 * prior to calling this interface.  When removing the last group
 175 * from a container, the IOMMU will be disabled and all state lost,
 176 * effectively also returning the VFIO file descriptor to an initial
 177 * state.
 178 * Return: 0 on success, -errno on failure.
 179 * Availability: When attached to container
 180 */
 181#define VFIO_GROUP_UNSET_CONTAINER	_IO(VFIO_TYPE, VFIO_BASE + 5)
 182
 183/**
 184 * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char)
 185 *
 186 * Return a new file descriptor for the device object described by
 187 * the provided string.  The string should match a device listed in
 188 * the devices subdirectory of the IOMMU group sysfs entry.  The
 189 * group containing the device must already be added to this context.
 190 * Return: new file descriptor on success, -errno on failure.
 191 * Availability: When attached to container
 192 */
 193#define VFIO_GROUP_GET_DEVICE_FD	_IO(VFIO_TYPE, VFIO_BASE + 6)
 194
 195/* --------------- IOCTLs for DEVICE file descriptors --------------- */
 196
 197/**
 198 * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,
 199 *						struct vfio_device_info)
 200 *
 201 * Retrieve information about the device.  Fills in provided
 202 * struct vfio_device_info.  Caller sets argsz.
 203 * Return: 0 on success, -errno on failure.
 204 */
 205struct vfio_device_info {
 206	__u32	argsz;
 207	__u32	flags;
 208#define VFIO_DEVICE_FLAGS_RESET	(1 << 0)	/* Device supports reset */
 209#define VFIO_DEVICE_FLAGS_PCI	(1 << 1)	/* vfio-pci device */
 210#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2)	/* vfio-platform device */
 211#define VFIO_DEVICE_FLAGS_AMBA  (1 << 3)	/* vfio-amba device */
 212#define VFIO_DEVICE_FLAGS_CCW	(1 << 4)	/* vfio-ccw device */
 213#define VFIO_DEVICE_FLAGS_AP	(1 << 5)	/* vfio-ap device */
 214#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6)	/* vfio-fsl-mc device */
 215#define VFIO_DEVICE_FLAGS_CAPS	(1 << 7)	/* Info supports caps */
 216#define VFIO_DEVICE_FLAGS_CDX	(1 << 8)	/* vfio-cdx device */
 217	__u32	num_regions;	/* Max region index + 1 */
 218	__u32	num_irqs;	/* Max IRQ index + 1 */
 219	__u32   cap_offset;	/* Offset within info struct of first cap */
 220	__u32   pad;
 221};
 222#define VFIO_DEVICE_GET_INFO		_IO(VFIO_TYPE, VFIO_BASE + 7)
 223
 224/*
 225 * Vendor driver using Mediated device framework should provide device_api
 226 * attribute in supported type attribute groups. Device API string should be one
 227 * of the following corresponding to device flags in vfio_device_info structure.
 228 */
 229
 230#define VFIO_DEVICE_API_PCI_STRING		"vfio-pci"
 231#define VFIO_DEVICE_API_PLATFORM_STRING		"vfio-platform"
 232#define VFIO_DEVICE_API_AMBA_STRING		"vfio-amba"
 233#define VFIO_DEVICE_API_CCW_STRING		"vfio-ccw"
 234#define VFIO_DEVICE_API_AP_STRING		"vfio-ap"
 235
 236/*
 237 * The following capabilities are unique to s390 zPCI devices.  Their contents
 238 * are further-defined in vfio_zdev.h
 239 */
 240#define VFIO_DEVICE_INFO_CAP_ZPCI_BASE		1
 241#define VFIO_DEVICE_INFO_CAP_ZPCI_GROUP		2
 242#define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL		3
 243#define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP		4
 244
 245/*
 246 * The following VFIO_DEVICE_INFO capability reports support for PCIe AtomicOp
 247 * completion to the root bus with supported widths provided via flags.
 248 */
 249#define VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP	5
 250struct vfio_device_info_cap_pci_atomic_comp {
 251	struct vfio_info_cap_header header;
 252	__u32 flags;
 253#define VFIO_PCI_ATOMIC_COMP32	(1 << 0)
 254#define VFIO_PCI_ATOMIC_COMP64	(1 << 1)
 255#define VFIO_PCI_ATOMIC_COMP128	(1 << 2)
 256	__u32 reserved;
 257};
 258
 259/**
 260 * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
 261 *				       struct vfio_region_info)
 262 *
 263 * Retrieve information about a device region.  Caller provides
 264 * struct vfio_region_info with index value set.  Caller sets argsz.
 265 * Implementation of region mapping is bus driver specific.  This is
 266 * intended to describe MMIO, I/O port, as well as bus specific
 267 * regions (ex. PCI config space).  Zero sized regions may be used
 268 * to describe unimplemented regions (ex. unimplemented PCI BARs).
 269 * Return: 0 on success, -errno on failure.
 270 */
 271struct vfio_region_info {
 272	__u32	argsz;
 273	__u32	flags;
 274#define VFIO_REGION_INFO_FLAG_READ	(1 << 0) /* Region supports read */
 275#define VFIO_REGION_INFO_FLAG_WRITE	(1 << 1) /* Region supports write */
 276#define VFIO_REGION_INFO_FLAG_MMAP	(1 << 2) /* Region supports mmap */
 277#define VFIO_REGION_INFO_FLAG_CAPS	(1 << 3) /* Info supports caps */
 278	__u32	index;		/* Region index */
 279	__u32	cap_offset;	/* Offset within info struct of first cap */
 280	__aligned_u64	size;	/* Region size (bytes) */
 281	__aligned_u64	offset;	/* Region offset from start of device fd */
 282};
 283#define VFIO_DEVICE_GET_REGION_INFO	_IO(VFIO_TYPE, VFIO_BASE + 8)
 284
 285/*
 286 * The sparse mmap capability allows finer granularity of specifying areas
 287 * within a region with mmap support.  When specified, the user should only
 288 * mmap the offset ranges specified by the areas array.  mmaps outside of the
 289 * areas specified may fail (such as the range covering a PCI MSI-X table) or
 290 * may result in improper device behavior.
 291 *
 292 * The structures below define version 1 of this capability.
 293 */
 294#define VFIO_REGION_INFO_CAP_SPARSE_MMAP	1
 295
 296struct vfio_region_sparse_mmap_area {
 297	__aligned_u64	offset;	/* Offset of mmap'able area within region */
 298	__aligned_u64	size;	/* Size of mmap'able area */
 299};
 300
 301struct vfio_region_info_cap_sparse_mmap {
 302	struct vfio_info_cap_header header;
 303	__u32	nr_areas;
 304	__u32	reserved;
 305	struct vfio_region_sparse_mmap_area areas[];
 306};
 307
 308/*
 309 * The device specific type capability allows regions unique to a specific
 310 * device or class of devices to be exposed.  This helps solve the problem for
 311 * vfio bus drivers of defining which region indexes correspond to which region
 312 * on the device, without needing to resort to static indexes, as done by
 313 * vfio-pci.  For instance, if we were to go back in time, we might remove
 314 * VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes
 315 * greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd
 316 * make a "VGA" device specific type to describe the VGA access space.  This
 317 * means that non-VGA devices wouldn't need to waste this index, and thus the
 318 * address space associated with it due to implementation of device file
 319 * descriptor offsets in vfio-pci.
 320 *
 321 * The current implementation is now part of the user ABI, so we can't use this
 322 * for VGA, but there are other upcoming use cases, such as opregions for Intel
 323 * IGD devices and framebuffers for vGPU devices.  We missed VGA, but we'll
 324 * use this for future additions.
 325 *
 326 * The structure below defines version 1 of this capability.
 327 */
 328#define VFIO_REGION_INFO_CAP_TYPE	2
 329
 330struct vfio_region_info_cap_type {
 331	struct vfio_info_cap_header header;
 332	__u32 type;	/* global per bus driver */
 333	__u32 subtype;	/* type specific */
 334};
 335
 336/*
 337 * List of region types, global per bus driver.
 338 * If you introduce a new type, please add it here.
 339 */
 340
 341/* PCI region type containing a PCI vendor part */
 342#define VFIO_REGION_TYPE_PCI_VENDOR_TYPE	(1 << 31)
 343#define VFIO_REGION_TYPE_PCI_VENDOR_MASK	(0xffff)
 344#define VFIO_REGION_TYPE_GFX                    (1)
 345#define VFIO_REGION_TYPE_CCW			(2)
 346#define VFIO_REGION_TYPE_MIGRATION_DEPRECATED   (3)
 347
 348/* sub-types for VFIO_REGION_TYPE_PCI_* */
 349
 350/* 8086 vendor PCI sub-types */
 351#define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION	(1)
 352#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG	(2)
 353#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG	(3)
 354
 355/* 10de vendor PCI sub-types */
 356/*
 357 * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
 358 *
 359 * Deprecated, region no longer provided
 360 */
 361#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM	(1)
 362
 363/* 1014 vendor PCI sub-types */
 364/*
 365 * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
 366 * to do TLB invalidation on a GPU.
 367 *
 368 * Deprecated, region no longer provided
 369 */
 370#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD	(1)
 371
 372/* sub-types for VFIO_REGION_TYPE_GFX */
 373#define VFIO_REGION_SUBTYPE_GFX_EDID            (1)
 374
 375/**
 376 * struct vfio_region_gfx_edid - EDID region layout.
 377 *
 378 * Set display link state and EDID blob.
 379 *
 380 * The EDID blob has monitor information such as brand, name, serial
 381 * number, physical size, supported video modes and more.
 382 *
 383 * This special region allows userspace (typically qemu) set a virtual
 384 * EDID for the virtual monitor, which allows a flexible display
 385 * configuration.
 386 *
 387 * For the edid blob spec look here:
 388 *    https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
 389 *
 390 * On linux systems you can find the EDID blob in sysfs:
 391 *    /sys/class/drm/${card}/${connector}/edid
 392 *
 393 * You can use the edid-decode ulility (comes with xorg-x11-utils) to
 394 * decode the EDID blob.
 395 *
 396 * @edid_offset: location of the edid blob, relative to the
 397 *               start of the region (readonly).
 398 * @edid_max_size: max size of the edid blob (readonly).
 399 * @edid_size: actual edid size (read/write).
 400 * @link_state: display link state (read/write).
 401 * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on.
 402 * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off.
 403 * @max_xres: max display width (0 == no limitation, readonly).
 404 * @max_yres: max display height (0 == no limitation, readonly).
 405 *
 406 * EDID update protocol:
 407 *   (1) set link-state to down.
 408 *   (2) update edid blob and size.
 409 *   (3) set link-state to up.
 410 */
 411struct vfio_region_gfx_edid {
 412	__u32 edid_offset;
 413	__u32 edid_max_size;
 414	__u32 edid_size;
 415	__u32 max_xres;
 416	__u32 max_yres;
 417	__u32 link_state;
 418#define VFIO_DEVICE_GFX_LINK_STATE_UP    1
 419#define VFIO_DEVICE_GFX_LINK_STATE_DOWN  2
 420};
 421
 422/* sub-types for VFIO_REGION_TYPE_CCW */
 423#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD	(1)
 424#define VFIO_REGION_SUBTYPE_CCW_SCHIB		(2)
 425#define VFIO_REGION_SUBTYPE_CCW_CRW		(3)
 426
 427/* sub-types for VFIO_REGION_TYPE_MIGRATION */
 428#define VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED (1)
 429
 430struct vfio_device_migration_info {
 431	__u32 device_state;         /* VFIO device state */
 432#define VFIO_DEVICE_STATE_V1_STOP      (0)
 433#define VFIO_DEVICE_STATE_V1_RUNNING   (1 << 0)
 434#define VFIO_DEVICE_STATE_V1_SAVING    (1 << 1)
 435#define VFIO_DEVICE_STATE_V1_RESUMING  (1 << 2)
 436#define VFIO_DEVICE_STATE_MASK      (VFIO_DEVICE_STATE_V1_RUNNING | \
 437				     VFIO_DEVICE_STATE_V1_SAVING |  \
 438				     VFIO_DEVICE_STATE_V1_RESUMING)
 439
 440#define VFIO_DEVICE_STATE_VALID(state) \
 441	(state & VFIO_DEVICE_STATE_V1_RESUMING ? \
 442	(state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1)
 443
 444#define VFIO_DEVICE_STATE_IS_ERROR(state) \
 445	((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \
 446					      VFIO_DEVICE_STATE_V1_RESUMING))
 447
 448#define VFIO_DEVICE_STATE_SET_ERROR(state) \
 449	((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \
 450					     VFIO_DEVICE_STATE_V1_RESUMING)
 451
 452	__u32 reserved;
 453	__aligned_u64 pending_bytes;
 454	__aligned_u64 data_offset;
 455	__aligned_u64 data_size;
 456};
 457
 458/*
 459 * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
 460 * which allows direct access to non-MSIX registers which happened to be within
 461 * the same system page.
 462 *
 463 * Even though the userspace gets direct access to the MSIX data, the existing
 464 * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration.
 465 */
 466#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE	3
 467
 468/*
 469 * Capability with compressed real address (aka SSA - small system address)
 470 * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
 471 * and by the userspace to associate a NVLink bridge with a GPU.
 472 *
 473 * Deprecated, capability no longer provided
 474 */
 475#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT	4
 476
 477struct vfio_region_info_cap_nvlink2_ssatgt {
 478	struct vfio_info_cap_header header;
 479	__aligned_u64 tgt;
 480};
 481
 482/*
 483 * Capability with an NVLink link speed. The value is read by
 484 * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
 485 * property in the device tree. The value is fixed in the hardware
 486 * and failing to provide the correct value results in the link
 487 * not working with no indication from the driver why.
 488 *
 489 * Deprecated, capability no longer provided
 490 */
 491#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD	5
 492
 493struct vfio_region_info_cap_nvlink2_lnkspd {
 494	struct vfio_info_cap_header header;
 495	__u32 link_speed;
 496	__u32 __pad;
 497};
 498
 499/**
 500 * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
 501 *				    struct vfio_irq_info)
 502 *
 503 * Retrieve information about a device IRQ.  Caller provides
 504 * struct vfio_irq_info with index value set.  Caller sets argsz.
 505 * Implementation of IRQ mapping is bus driver specific.  Indexes
 506 * using multiple IRQs are primarily intended to support MSI-like
 507 * interrupt blocks.  Zero count irq blocks may be used to describe
 508 * unimplemented interrupt types.
 509 *
 510 * The EVENTFD flag indicates the interrupt index supports eventfd based
 511 * signaling.
 512 *
 513 * The MASKABLE flags indicates the index supports MASK and UNMASK
 514 * actions described below.
 515 *
 516 * AUTOMASKED indicates that after signaling, the interrupt line is
 517 * automatically masked by VFIO and the user needs to unmask the line
 518 * to receive new interrupts.  This is primarily intended to distinguish
 519 * level triggered interrupts.
 520 *
 521 * The NORESIZE flag indicates that the interrupt lines within the index
 522 * are setup as a set and new subindexes cannot be enabled without first
 523 * disabling the entire index.  This is used for interrupts like PCI MSI
 524 * and MSI-X where the driver may only use a subset of the available
 525 * indexes, but VFIO needs to enable a specific number of vectors
 526 * upfront.  In the case of MSI-X, where the user can enable MSI-X and
 527 * then add and unmask vectors, it's up to userspace to make the decision
 528 * whether to allocate the maximum supported number of vectors or tear
 529 * down setup and incrementally increase the vectors as each is enabled.
 530 * Absence of the NORESIZE flag indicates that vectors can be enabled
 531 * and disabled dynamically without impacting other vectors within the
 532 * index.
 533 */
 534struct vfio_irq_info {
 535	__u32	argsz;
 536	__u32	flags;
 537#define VFIO_IRQ_INFO_EVENTFD		(1 << 0)
 538#define VFIO_IRQ_INFO_MASKABLE		(1 << 1)
 539#define VFIO_IRQ_INFO_AUTOMASKED	(1 << 2)
 540#define VFIO_IRQ_INFO_NORESIZE		(1 << 3)
 541	__u32	index;		/* IRQ index */
 542	__u32	count;		/* Number of IRQs within this index */
 543};
 544#define VFIO_DEVICE_GET_IRQ_INFO	_IO(VFIO_TYPE, VFIO_BASE + 9)
 545
 546/**
 547 * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
 548 *
 549 * Set signaling, masking, and unmasking of interrupts.  Caller provides
 550 * struct vfio_irq_set with all fields set.  'start' and 'count' indicate
 551 * the range of subindexes being specified.
 552 *
 553 * The DATA flags specify the type of data provided.  If DATA_NONE, the
 554 * operation performs the specified action immediately on the specified
 555 * interrupt(s).  For example, to unmask AUTOMASKED interrupt [0,0]:
 556 * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1.
 557 *
 558 * DATA_BOOL allows sparse support for the same on arrays of interrupts.
 559 * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):
 560 * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,
 561 * data = {1,0,1}
 562 *
 563 * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd.
 564 * A value of -1 can be used to either de-assign interrupts if already
 565 * assigned or skip un-assigned interrupts.  For example, to set an eventfd
 566 * to be trigger for interrupts [0,0] and [0,2]:
 567 * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,
 568 * data = {fd1, -1, fd2}
 569 * If index [0,1] is previously set, two count = 1 ioctls calls would be
 570 * required to set [0,0] and [0,2] without changing [0,1].
 571 *
 572 * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used
 573 * with ACTION_TRIGGER to perform kernel level interrupt loopback testing
 574 * from userspace (ie. simulate hardware triggering).
 575 *
 576 * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER
 577 * enables the interrupt index for the device.  Individual subindex interrupts
 578 * can be disabled using the -1 value for DATA_EVENTFD or the index can be
 579 * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0.
 580 *
 581 * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while
 582 * ACTION_TRIGGER specifies kernel->user signaling.
 583 */
 584struct vfio_irq_set {
 585	__u32	argsz;
 586	__u32	flags;
 587#define VFIO_IRQ_SET_DATA_NONE		(1 << 0) /* Data not present */
 588#define VFIO_IRQ_SET_DATA_BOOL		(1 << 1) /* Data is bool (u8) */
 589#define VFIO_IRQ_SET_DATA_EVENTFD	(1 << 2) /* Data is eventfd (s32) */
 590#define VFIO_IRQ_SET_ACTION_MASK	(1 << 3) /* Mask interrupt */
 591#define VFIO_IRQ_SET_ACTION_UNMASK	(1 << 4) /* Unmask interrupt */
 592#define VFIO_IRQ_SET_ACTION_TRIGGER	(1 << 5) /* Trigger interrupt */
 593	__u32	index;
 594	__u32	start;
 595	__u32	count;
 596	__u8	data[];
 597};
 598#define VFIO_DEVICE_SET_IRQS		_IO(VFIO_TYPE, VFIO_BASE + 10)
 599
 600#define VFIO_IRQ_SET_DATA_TYPE_MASK	(VFIO_IRQ_SET_DATA_NONE | \
 601					 VFIO_IRQ_SET_DATA_BOOL | \
 602					 VFIO_IRQ_SET_DATA_EVENTFD)
 603#define VFIO_IRQ_SET_ACTION_TYPE_MASK	(VFIO_IRQ_SET_ACTION_MASK | \
 604					 VFIO_IRQ_SET_ACTION_UNMASK | \
 605					 VFIO_IRQ_SET_ACTION_TRIGGER)
 606/**
 607 * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11)
 608 *
 609 * Reset a device.
 610 */
 611#define VFIO_DEVICE_RESET		_IO(VFIO_TYPE, VFIO_BASE + 11)
 612
 613/*
 614 * The VFIO-PCI bus driver makes use of the following fixed region and
 615 * IRQ index mapping.  Unimplemented regions return a size of zero.
 616 * Unimplemented IRQ types return a count of zero.
 617 */
 618
 619enum {
 620	VFIO_PCI_BAR0_REGION_INDEX,
 621	VFIO_PCI_BAR1_REGION_INDEX,
 622	VFIO_PCI_BAR2_REGION_INDEX,
 623	VFIO_PCI_BAR3_REGION_INDEX,
 624	VFIO_PCI_BAR4_REGION_INDEX,
 625	VFIO_PCI_BAR5_REGION_INDEX,
 626	VFIO_PCI_ROM_REGION_INDEX,
 627	VFIO_PCI_CONFIG_REGION_INDEX,
 628	/*
 629	 * Expose VGA regions defined for PCI base class 03, subclass 00.
 630	 * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
 631	 * as well as the MMIO range 0xa0000 to 0xbffff.  Each implemented
 632	 * range is found at it's identity mapped offset from the region
 633	 * offset, for example 0x3b0 is region_info.offset + 0x3b0.  Areas
 634	 * between described ranges are unimplemented.
 635	 */
 636	VFIO_PCI_VGA_REGION_INDEX,
 637	VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */
 638				 /* device specific cap to define content. */
 639};
 640
 641enum {
 642	VFIO_PCI_INTX_IRQ_INDEX,
 643	VFIO_PCI_MSI_IRQ_INDEX,
 644	VFIO_PCI_MSIX_IRQ_INDEX,
 645	VFIO_PCI_ERR_IRQ_INDEX,
 646	VFIO_PCI_REQ_IRQ_INDEX,
 647	VFIO_PCI_NUM_IRQS
 648};
 649
 650/*
 651 * The vfio-ccw bus driver makes use of the following fixed region and
 652 * IRQ index mapping. Unimplemented regions return a size of zero.
 653 * Unimplemented IRQ types return a count of zero.
 654 */
 655
 656enum {
 657	VFIO_CCW_CONFIG_REGION_INDEX,
 658	VFIO_CCW_NUM_REGIONS
 659};
 660
 661enum {
 662	VFIO_CCW_IO_IRQ_INDEX,
 663	VFIO_CCW_CRW_IRQ_INDEX,
 664	VFIO_CCW_REQ_IRQ_INDEX,
 665	VFIO_CCW_NUM_IRQS
 666};
 667
 668/*
 669 * The vfio-ap bus driver makes use of the following IRQ index mapping.
 670 * Unimplemented IRQ types return a count of zero.
 671 */
 672enum {
 673	VFIO_AP_REQ_IRQ_INDEX,
 674	VFIO_AP_CFG_CHG_IRQ_INDEX,
 675	VFIO_AP_NUM_IRQS
 676};
 677
 678/**
 679 * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
 680 *					      struct vfio_pci_hot_reset_info)
 681 *
 682 * This command is used to query the affected devices in the hot reset for
 683 * a given device.
 684 *
 685 * This command always reports the segment, bus, and devfn information for
 686 * each affected device, and selectively reports the group_id or devid per
 687 * the way how the calling device is opened.
 688 *
 689 *	- If the calling device is opened via the traditional group/container
 690 *	  API, group_id is reported.  User should check if it has owned all
 691 *	  the affected devices and provides a set of group fds to prove the
 692 *	  ownership in VFIO_DEVICE_PCI_HOT_RESET ioctl.
 693 *
 694 *	- If the calling device is opened as a cdev, devid is reported.
 695 *	  Flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set to indicate this
 696 *	  data type.  All the affected devices should be represented in
 697 *	  the dev_set, ex. bound to a vfio driver, and also be owned by
 698 *	  this interface which is determined by the following conditions:
 699 *	  1) Has a valid devid within the iommufd_ctx of the calling device.
 700 *	     Ownership cannot be determined across separate iommufd_ctx and
 701 *	     the cdev calling conventions do not support a proof-of-ownership
 702 *	     model as provided in the legacy group interface.  In this case
 703 *	     valid devid with value greater than zero is provided in the return
 704 *	     structure.
 705 *	  2) Does not have a valid devid within the iommufd_ctx of the calling
 706 *	     device, but belongs to the same IOMMU group as the calling device
 707 *	     or another opened device that has a valid devid within the
 708 *	     iommufd_ctx of the calling device.  This provides implicit ownership
 709 *	     for devices within the same DMA isolation context.  In this case
 710 *	     the devid value of VFIO_PCI_DEVID_OWNED is provided in the return
 711 *	     structure.
 712 *
 713 *	  A devid value of VFIO_PCI_DEVID_NOT_OWNED is provided in the return
 714 *	  structure for affected devices where device is NOT represented in the
 715 *	  dev_set or ownership is not available.  Such devices prevent the use
 716 *	  of VFIO_DEVICE_PCI_HOT_RESET ioctl outside of the proof-of-ownership
 717 *	  calling conventions (ie. via legacy group accessed devices).  Flag
 718 *	  VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED would be set when all the
 719 *	  affected devices are represented in the dev_set and also owned by
 720 *	  the user.  This flag is available only when
 721 *	  flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set, otherwise reserved.
 722 *	  When set, user could invoke VFIO_DEVICE_PCI_HOT_RESET with a zero
 723 *	  length fd array on the calling device as the ownership is validated
 724 *	  by iommufd_ctx.
 725 *
 726 * Return: 0 on success, -errno on failure:
 727 *	-enospc = insufficient buffer, -enodev = unsupported for device.
 728 */
 729struct vfio_pci_dependent_device {
 730	union {
 731		__u32   group_id;
 732		__u32	devid;
 733#define VFIO_PCI_DEVID_OWNED		0
 734#define VFIO_PCI_DEVID_NOT_OWNED	-1
 735	};
 736	__u16	segment;
 737	__u8	bus;
 738	__u8	devfn; /* Use PCI_SLOT/PCI_FUNC */
 739};
 740
 741struct vfio_pci_hot_reset_info {
 742	__u32	argsz;
 743	__u32	flags;
 744#define VFIO_PCI_HOT_RESET_FLAG_DEV_ID		(1 << 0)
 745#define VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED	(1 << 1)
 746	__u32	count;
 747	struct vfio_pci_dependent_device	devices[];
 748};
 749
 750#define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO	_IO(VFIO_TYPE, VFIO_BASE + 12)
 751
 752/**
 753 * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
 754 *				    struct vfio_pci_hot_reset)
 755 *
 756 * A PCI hot reset results in either a bus or slot reset which may affect
 757 * other devices sharing the bus/slot.  The calling user must have
 758 * ownership of the full set of affected devices as determined by the
 759 * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO ioctl.
 760 *
 761 * When called on a device file descriptor acquired through the vfio
 762 * group interface, the user is required to provide proof of ownership
 763 * of those affected devices via the group_fds array in struct
 764 * vfio_pci_hot_reset.
 765 *
 766 * When called on a direct cdev opened vfio device, the flags field of
 767 * struct vfio_pci_hot_reset_info reports the ownership status of the
 768 * affected devices and this ioctl must be called with an empty group_fds
 769 * array.  See above INFO ioctl definition for ownership requirements.
 770 *
 771 * Mixed usage of legacy groups and cdevs across the set of affected
 772 * devices is not supported.
 773 *
 774 * Return: 0 on success, -errno on failure.
 775 */
 776struct vfio_pci_hot_reset {
 777	__u32	argsz;
 778	__u32	flags;
 779	__u32	count;
 780	__s32	group_fds[];
 781};
 782
 783#define VFIO_DEVICE_PCI_HOT_RESET	_IO(VFIO_TYPE, VFIO_BASE + 13)
 784
 785/**
 786 * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
 787 *                                    struct vfio_device_query_gfx_plane)
 788 *
 789 * Set the drm_plane_type and flags, then retrieve the gfx plane info.
 790 *
 791 * flags supported:
 792 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
 793 *   to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
 794 *   support for dma-buf.
 795 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
 796 *   to ask if the mdev supports region. 0 on support, -EINVAL on no
 797 *   support for region.
 798 * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
 799 *   with each call to query the plane info.
 800 * - Others are invalid and return -EINVAL.
 801 *
 802 * Note:
 803 * 1. Plane could be disabled by guest. In that case, success will be
 804 *    returned with zero-initialized drm_format, size, width and height
 805 *    fields.
 806 * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
 807 *
 808 * Return: 0 on success, -errno on other failure.
 809 */
 810struct vfio_device_gfx_plane_info {
 811	__u32 argsz;
 812	__u32 flags;
 813#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
 814#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
 815#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
 816	/* in */
 817	__u32 drm_plane_type;	/* type of plane: DRM_PLANE_TYPE_* */
 818	/* out */
 819	__u32 drm_format;	/* drm format of plane */
 820	__aligned_u64 drm_format_mod;   /* tiled mode */
 821	__u32 width;	/* width of plane */
 822	__u32 height;	/* height of plane */
 823	__u32 stride;	/* stride of plane */
 824	__u32 size;	/* size of plane in bytes, align on page*/
 825	__u32 x_pos;	/* horizontal position of cursor plane */
 826	__u32 y_pos;	/* vertical position of cursor plane*/
 827	__u32 x_hot;    /* horizontal position of cursor hotspot */
 828	__u32 y_hot;    /* vertical position of cursor hotspot */
 829	union {
 830		__u32 region_index;	/* region index */
 831		__u32 dmabuf_id;	/* dma-buf id */
 832	};
 833	__u32 reserved;
 834};
 835
 836#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
 837
 838/**
 839 * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
 840 *
 841 * Return a new dma-buf file descriptor for an exposed guest framebuffer
 842 * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
 843 * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
 844 */
 845
 846#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
 847
 848/**
 849 * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
 850 *                              struct vfio_device_ioeventfd)
 851 *
 852 * Perform a write to the device at the specified device fd offset, with
 853 * the specified data and width when the provided eventfd is triggered.
 854 * vfio bus drivers may not support this for all regions, for all widths,
 855 * or at all.  vfio-pci currently only enables support for BAR regions,
 856 * excluding the MSI-X vector table.
 857 *
 858 * Return: 0 on success, -errno on failure.
 859 */
 860struct vfio_device_ioeventfd {
 861	__u32	argsz;
 862	__u32	flags;
 863#define VFIO_DEVICE_IOEVENTFD_8		(1 << 0) /* 1-byte write */
 864#define VFIO_DEVICE_IOEVENTFD_16	(1 << 1) /* 2-byte write */
 865#define VFIO_DEVICE_IOEVENTFD_32	(1 << 2) /* 4-byte write */
 866#define VFIO_DEVICE_IOEVENTFD_64	(1 << 3) /* 8-byte write */
 867#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK	(0xf)
 868	__aligned_u64	offset;		/* device fd offset of write */
 869	__aligned_u64	data;		/* data to be written */
 870	__s32	fd;			/* -1 for de-assignment */
 871	__u32	reserved;
 872};
 873
 874#define VFIO_DEVICE_IOEVENTFD		_IO(VFIO_TYPE, VFIO_BASE + 16)
 875
 876/**
 877 * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
 878 *			       struct vfio_device_feature)
 879 *
 880 * Get, set, or probe feature data of the device.  The feature is selected
 881 * using the FEATURE_MASK portion of the flags field.  Support for a feature
 882 * can be probed by setting both the FEATURE_MASK and PROBE bits.  A probe
 883 * may optionally include the GET and/or SET bits to determine read vs write
 884 * access of the feature respectively.  Probing a feature will return success
 885 * if the feature is supported and all of the optionally indicated GET/SET
 886 * methods are supported.  The format of the data portion of the structure is
 887 * specific to the given feature.  The data portion is not required for
 888 * probing.  GET and SET are mutually exclusive, except for use with PROBE.
 889 *
 890 * Return 0 on success, -errno on failure.
 891 */
 892struct vfio_device_feature {
 893	__u32	argsz;
 894	__u32	flags;
 895#define VFIO_DEVICE_FEATURE_MASK	(0xffff) /* 16-bit feature index */
 896#define VFIO_DEVICE_FEATURE_GET		(1 << 16) /* Get feature into data[] */
 897#define VFIO_DEVICE_FEATURE_SET		(1 << 17) /* Set feature from data[] */
 898#define VFIO_DEVICE_FEATURE_PROBE	(1 << 18) /* Probe feature support */
 899	__u8	data[];
 900};
 901
 902#define VFIO_DEVICE_FEATURE		_IO(VFIO_TYPE, VFIO_BASE + 17)
 903
 904/*
 905 * VFIO_DEVICE_BIND_IOMMUFD - _IOR(VFIO_TYPE, VFIO_BASE + 18,
 906 *				   struct vfio_device_bind_iommufd)
 907 * @argsz:	 User filled size of this data.
 908 * @flags:	 Must be 0 or a bit flags of VFIO_DEVICE_BIND_*
 909 * @iommufd:	 iommufd to bind.
 910 * @out_devid:	 The device id generated by this bind. devid is a handle for
 911 *		 this device/iommufd bond and can be used in IOMMUFD commands.
 912 * @token_uuid_ptr: Valid if VFIO_DEVICE_BIND_FLAG_TOKEN. Points to a 16 byte
 913 *                  UUID in the same format as VFIO_DEVICE_FEATURE_PCI_VF_TOKEN.
 914 *
 915 * Bind a vfio_device to the specified iommufd.
 916 *
 917 * User is restricted from accessing the device before the binding operation
 918 * is completed.  Only allowed on cdev fds.
 919 *
 920 * Unbind is automatically conducted when device fd is closed.
 921 *
 922 * A token is sometimes required to open the device, unless this is known to be
 923 * needed VFIO_DEVICE_BIND_FLAG_TOKEN should not be set and token_uuid_ptr is
 924 * ignored. The only case today is a PF/VF relationship where the VF bind must
 925 * be provided the same token as VFIO_DEVICE_FEATURE_PCI_VF_TOKEN provided to
 926 * the PF.
 927 *
 928 * Return: 0 on success, -errno on failure.
 929 */
 930struct vfio_device_bind_iommufd {
 931	__u32		argsz;
 932	__u32		flags;
 933#define VFIO_DEVICE_BIND_FLAG_TOKEN (1 << 0)
 934	__s32		iommufd;
 935	__u32		out_devid;
 936	__aligned_u64	token_uuid_ptr;
 937};
 938
 939#define VFIO_DEVICE_BIND_IOMMUFD	_IO(VFIO_TYPE, VFIO_BASE + 18)
 940
 941/*
 942 * VFIO_DEVICE_ATTACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 19,
 943 *					struct vfio_device_attach_iommufd_pt)
 944 * @argsz:	User filled size of this data.
 945 * @flags:	Flags for attach.
 946 * @pt_id:	Input the target id which can represent an ioas or a hwpt
 947 *		allocated via iommufd subsystem.
 948 *		Output the input ioas id or the attached hwpt id which could
 949 *		be the specified hwpt itself or a hwpt automatically created
 950 *		for the specified ioas by kernel during the attachment.
 951 * @pasid:	The pasid to be attached, only meaningful when
 952 *		VFIO_DEVICE_ATTACH_PASID is set in @flags
 953 *
 954 * Associate the device with an address space within the bound iommufd.
 955 * Undo by VFIO_DEVICE_DETACH_IOMMUFD_PT or device fd close.  This is only
 956 * allowed on cdev fds.
 957 *
 958 * If a vfio device or a pasid of this device is currently attached to a valid
 959 * hw_pagetable (hwpt), without doing a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second
 960 * VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl passing in another hwpt id is allowed.
 961 * This action, also known as a hw_pagetable replacement, will replace the
 962 * currently attached hwpt of the device or the pasid of this device with a new
 963 * hwpt corresponding to the given pt_id.
 964 *
 965 * Return: 0 on success, -errno on failure.
 966 */
 967struct vfio_device_attach_iommufd_pt {
 968	__u32	argsz;
 969	__u32	flags;
 970#define VFIO_DEVICE_ATTACH_PASID	(1 << 0)
 971	__u32	pt_id;
 972	__u32	pasid;
 973};
 974
 975#define VFIO_DEVICE_ATTACH_IOMMUFD_PT		_IO(VFIO_TYPE, VFIO_BASE + 19)
 976
 977/*
 978 * VFIO_DEVICE_DETACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 20,
 979 *					struct vfio_device_detach_iommufd_pt)
 980 * @argsz:	User filled size of this data.
 981 * @flags:	Flags for detach.
 982 * @pasid:	The pasid to be detached, only meaningful when
 983 *		VFIO_DEVICE_DETACH_PASID is set in @flags
 984 *
 985 * Remove the association of the device or a pasid of the device and its current
 986 * associated address space.  After it, the device or the pasid should be in a
 987 * blocking DMA state.  This is only allowed on cdev fds.
 988 *
 989 * Return: 0 on success, -errno on failure.
 990 */
 991struct vfio_device_detach_iommufd_pt {
 992	__u32	argsz;
 993	__u32	flags;
 994#define VFIO_DEVICE_DETACH_PASID	(1 << 0)
 995	__u32	pasid;
 996};
 997
 998#define VFIO_DEVICE_DETACH_IOMMUFD_PT		_IO(VFIO_TYPE, VFIO_BASE + 20)
 999
1000/*
1001 * Provide support for setting a PCI VF Token, which is used as a shared
1002 * secret between PF and VF drivers.  This feature may only be set on a
1003 * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
1004 * open VFs.  Data provided when setting this feature is a 16-byte array
1005 * (__u8 b[16]), representing a UUID.
1006 */
1007#define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN	(0)
1008
1009/*
1010 * Indicates the device can support the migration API through
1011 * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and
1012 * ERROR states are always supported. Support for additional states is
1013 * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be
1014 * set.
1015 *
1016 * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and
1017 * RESUMING are supported.
1018 *
1019 * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P
1020 * is supported in addition to the STOP_COPY states.
1021 *
1022 * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY means that
1023 * PRE_COPY is supported in addition to the STOP_COPY states.
1024 *
1025 * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY
1026 * means that RUNNING_P2P, PRE_COPY and PRE_COPY_P2P are supported
1027 * in addition to the STOP_COPY states.
1028 *
1029 * Other combinations of flags have behavior to be defined in the future.
1030 */
1031struct vfio_device_feature_migration {
1032	__aligned_u64 flags;
1033#define VFIO_MIGRATION_STOP_COPY	(1 << 0)
1034#define VFIO_MIGRATION_P2P		(1 << 1)
1035#define VFIO_MIGRATION_PRE_COPY		(1 << 2)
1036};
1037#define VFIO_DEVICE_FEATURE_MIGRATION 1
1038
1039/*
1040 * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO
1041 * device. The new state is supplied in device_state, see enum
1042 * vfio_device_mig_state for details
1043 *
1044 * The kernel migration driver must fully transition the device to the new state
1045 * value before the operation returns to the user.
1046 *
1047 * The kernel migration driver must not generate asynchronous device state
1048 * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET
1049 * ioctl as described above.
1050 *
1051 * If this function fails then current device_state may be the original
1052 * operating state or some other state along the combination transition path.
1053 * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt
1054 * to return to the original state, or attempt to return to some other state
1055 * such as RUNNING or STOP.
1056 *
1057 * If the new_state starts a new data transfer session then the FD associated
1058 * with that session is returned in data_fd. The user is responsible to close
1059 * this FD when it is finished. The user must consider the migration data stream
1060 * carried over the FD to be opaque and must preserve the byte order of the
1061 * stream. The user is not required to preserve buffer segmentation when writing
1062 * the data stream during the RESUMING operation.
1063 *
1064 * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO
1065 * device, data_fd will be -1.
1066 */
1067struct vfio_device_feature_mig_state {
1068	__u32 device_state; /* From enum vfio_device_mig_state */
1069	__s32 data_fd;
1070};
1071#define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2
1072
1073/*
1074 * The device migration Finite State Machine is described by the enum
1075 * vfio_device_mig_state. Some of the FSM arcs will create a migration data
1076 * transfer session by returning a FD, in this case the migration data will
1077 * flow over the FD using read() and write() as discussed below.
1078 *
1079 * There are 5 states to support VFIO_MIGRATION_STOP_COPY:
1080 *  RUNNING - The device is running normally
1081 *  STOP - The device does not change the internal or external state
1082 *  STOP_COPY - The device internal state can be read out
1083 *  RESUMING - The device is stopped and is loading a new internal state
1084 *  ERROR - The device has failed and must be reset
1085 *
1086 * And optional states to support VFIO_MIGRATION_P2P:
1087 *  RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA
1088 * And VFIO_MIGRATION_PRE_COPY:
1089 *  PRE_COPY - The device is running normally but tracking internal state
1090 *             changes
1091 * And VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY:
1092 *  PRE_COPY_P2P - PRE_COPY, except the device cannot do peer to peer DMA
1093 *
1094 * The FSM takes actions on the arcs between FSM states. The driver implements
1095 * the following behavior for the FSM arcs:
1096 *
1097 * RUNNING_P2P -> STOP
1098 * STOP_COPY -> STOP
1099 *   While in STOP the device must stop the operation of the device. The device
1100 *   must not generate interrupts, DMA, or any other change to external state.
1101 *   It must not change its internal state. When stopped the device and kernel
1102 *   migration driver must accept and respond to interaction to support external
1103 *   subsystems in the STOP state, for example PCI MSI-X and PCI config space.
1104 *   Failure by the user to restrict device access while in STOP must not result
1105 *   in error conditions outside the user context (ex. host system faults).
1106 *
1107 *   The STOP_COPY arc will terminate a data transfer session.
1108 *
1109 * RESUMING -> STOP
1110 *   Leaving RESUMING terminates a data transfer session and indicates the
1111 *   device should complete processing of the data delivered by write(). The
1112 *   kernel migration driver should complete the incorporation of data written
1113 *   to the data transfer FD into the device internal state and perform
1114 *   final validity and consistency checking of the new device state. If the
1115 *   user provided data is found to be incomplete, inconsistent, or otherwise
1116 *   invalid, the migration driver must fail the SET_STATE ioctl and
1117 *   optionally go to the ERROR state as described below.
1118 *
1119 *   While in STOP the device has the same behavior as other STOP states
1120 *   described above.
1121 *
1122 *   To abort a RESUMING session the device must be reset.
1123 *
1124 * PRE_COPY -> RUNNING
1125 * RUNNING_P2P -> RUNNING
1126 *   While in RUNNING the device is fully operational, the device may generate
1127 *   interrupts, DMA, respond to MMIO, all vfio device regions are functional,
1128 *   and the device may advance its internal state.
1129 *
1130 *   The PRE_COPY arc will terminate a data transfer session.
1131 *
1132 * PRE_COPY_P2P -> RUNNING_P2P
1133 * RUNNING -> RUNNING_P2P
1134 * STOP -> RUNNING_P2P
1135 *   While in RUNNING_P2P the device is partially running in the P2P quiescent
1136 *   state defined below.
1137 *
1138 *   The PRE_COPY_P2P arc will terminate a data transfer session.
1139 *
1140 * RUNNING -> PRE_COPY
1141 * RUNNING_P2P -> PRE_COPY_P2P
1142 * STOP -> STOP_COPY
1143 *   PRE_COPY, PRE_COPY_P2P and STOP_COPY form the "saving group" of states
1144 *   which share a data transfer session. Moving between these states alters
1145 *   what is streamed in session, but does not terminate or otherwise affect
1146 *   the associated fd.
1147 *
1148 *   These arcs begin the process of saving the device state and will return a
1149 *   new data_fd. The migration driver may perform actions such as enabling
1150 *   dirty logging of device state when entering PRE_COPY or PER_COPY_P2P.
1151 *
1152 *   Each arc does not change the device operation, the device remains
1153 *   RUNNING, P2P quiesced or in STOP. The STOP_COPY state is described below
1154 *   in PRE_COPY_P2P -> STOP_COPY.
1155 *
1156 * PRE_COPY -> PRE_COPY_P2P
1157 *   Entering PRE_COPY_P2P continues all the behaviors of PRE_COPY above.
1158 *   However, while in the PRE_COPY_P2P state, the device is partially running
1159 *   in the P2P quiescent state defined below, like RUNNING_P2P.
1160 *
1161 * PRE_COPY_P2P -> PRE_COPY
1162 *   This arc allows returning the device to a full RUNNING behavior while
1163 *   continuing all the behaviors of PRE_COPY.
1164 *
1165 * PRE_COPY_P2P -> STOP_COPY
1166 *   While in the STOP_COPY state the device has the same behavior as STOP
1167 *   with the addition that the data transfers session continues to stream the
1168 *   migration state. End of stream on the FD indicates the entire device
1169 *   state has been transferred.
1170 *
1171 *   The user should take steps to restrict access to vfio device regions while
1172 *   the device is in STOP_COPY or risk corruption of the device migration data
1173 *   stream.
1174 *
1175 * STOP -> RESUMING
1176 *   Entering the RESUMING state starts a process of restoring the device state
1177 *   and will return a new data_fd. The data stream fed into the data_fd should
1178 *   be taken from the data transfer output of a single FD during saving from
1179 *   a compatible device. The migration driver may alter/reset the internal
1180 *   device state for this arc if required to prepare the device to receive the
1181 *   migration data.
1182 *
1183 * STOP_COPY -> PRE_COPY
1184 * STOP_COPY -> PRE_COPY_P2P
1185 *   These arcs are not permitted and return error if requested. Future
1186 *   revisions of this API may define behaviors for these arcs, in this case
1187 *   support will be discoverable by a new flag in
1188 *   VFIO_DEVICE_FEATURE_MIGRATION.
1189 *
1190 * any -> ERROR
1191 *   ERROR cannot be specified as a device state, however any transition request
1192 *   can be failed with an errno return and may then move the device_state into
1193 *   ERROR. In this case the device was unable to execute the requested arc and
1194 *   was also unable to restore the device to any valid device_state.
1195 *   To recover from ERROR VFIO_DEVICE_RESET must be used to return the
1196 *   device_state back to RUNNING.
1197 *
1198 * The optional peer to peer (P2P) quiescent state is intended to be a quiescent
1199 * state for the device for the purposes of managing multiple devices within a
1200 * user context where peer-to-peer DMA between devices may be active. The
1201 * RUNNING_P2P and PRE_COPY_P2P states must prevent the device from initiating
1202 * any new P2P DMA transactions. If the device can identify P2P transactions
1203 * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration
1204 * driver must complete any such outstanding operations prior to completing the
1205 * FSM arc into a P2P state. For the purpose of specification the states
1206 * behave as though the device was fully running if not supported. Like while in
1207 * STOP or STOP_COPY the user must not touch the device, otherwise the state
1208 * can be exited.
1209 *
1210 * The remaining possible transitions are interpreted as combinations of the
1211 * above FSM arcs. As there are multiple paths through the FSM arcs the path
1212 * should be selected based on the following rules:
1213 *   - Select the shortest path.
1214 *   - The path cannot have saving group states as interior arcs, only
1215 *     starting/end states.
1216 * Refer to vfio_mig_get_next_state() for the result of the algorithm.
1217 *
1218 * The automatic transit through the FSM arcs that make up the combination
1219 * transition is invisible to the user. When working with combination arcs the
1220 * user may see any step along the path in the device_state if SET_STATE
1221 * fails. When handling these types of errors users should anticipate future
1222 * revisions of this protocol using new states and those states becoming
1223 * visible in this case.
1224 *
1225 * The optional states cannot be used with SET_STATE if the device does not
1226 * support them. The user can discover if these states are supported by using
1227 * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can
1228 * avoid knowing about these optional states if the kernel driver supports them.
1229 *
1230 * Arcs touching PRE_COPY and PRE_COPY_P2P are removed if support for PRE_COPY
1231 * is not present.
1232 */
1233enum vfio_device_mig_state {
1234	VFIO_DEVICE_STATE_ERROR = 0,
1235	VFIO_DEVICE_STATE_STOP = 1,
1236	VFIO_DEVICE_STATE_RUNNING = 2,
1237	VFIO_DEVICE_STATE_STOP_COPY = 3,
1238	VFIO_DEVICE_STATE_RESUMING = 4,
1239	VFIO_DEVICE_STATE_RUNNING_P2P = 5,
1240	VFIO_DEVICE_STATE_PRE_COPY = 6,
1241	VFIO_DEVICE_STATE_PRE_COPY_P2P = 7,
1242	VFIO_DEVICE_STATE_NR,
1243};
1244
1245/**
1246 * VFIO_MIG_GET_PRECOPY_INFO - _IO(VFIO_TYPE, VFIO_BASE + 21)
1247 *
1248 * This ioctl is used on the migration data FD in the precopy phase of the
1249 * migration data transfer. It returns an estimate of the current data sizes
1250 * remaining to be transferred. It allows the user to judge when it is
1251 * appropriate to leave PRE_COPY for STOP_COPY.
1252 *
1253 * This ioctl is valid only in PRE_COPY states and kernel driver should
1254 * return -EINVAL from any other migration state.
1255 *
1256 * The vfio_precopy_info data structure returned by this ioctl provides
1257 * estimates of data available from the device during the PRE_COPY states.
1258 * This estimate is split into two categories, initial_bytes and
1259 * dirty_bytes.
1260 *
1261 * The initial_bytes field indicates the amount of initial precopy
1262 * data available from the device. This field should have a non-zero initial
1263 * value and decrease as migration data is read from the device.
1264 * It is recommended to leave PRE_COPY for STOP_COPY only after this field
1265 * reaches zero. Leaving PRE_COPY earlier might make things slower.
1266 *
1267 * The dirty_bytes field tracks device state changes relative to data
1268 * previously retrieved.  This field starts at zero and may increase as
1269 * the internal device state is modified or decrease as that modified
1270 * state is read from the device.
1271 *
1272 * Userspace may use the combination of these fields to estimate the
1273 * potential data size available during the PRE_COPY phases, as well as
1274 * trends relative to the rate the device is dirtying its internal
1275 * state, but these fields are not required to have any bearing relative
1276 * to the data size available during the STOP_COPY phase.
1277 *
1278 * Drivers have a lot of flexibility in when and what they transfer during the
1279 * PRE_COPY phase, and how they report this from VFIO_MIG_GET_PRECOPY_INFO.
1280 *
1281 * During pre-copy the migration data FD has a temporary "end of stream" that is
1282 * reached when both initial_bytes and dirty_byte are zero. For instance, this
1283 * may indicate that the device is idle and not currently dirtying any internal
1284 * state. When read() is done on this temporary end of stream the kernel driver
1285 * should return ENOMSG from read(). Userspace can wait for more data (which may
1286 * never come) by using poll.
1287 *
1288 * Once in STOP_COPY the migration data FD has a permanent end of stream
1289 * signaled in the usual way by read() always returning 0 and poll always
1290 * returning readable. ENOMSG may not be returned in STOP_COPY.
1291 * Support for this ioctl is mandatory if a driver claims to support
1292 * VFIO_MIGRATION_PRE_COPY.
1293 *
1294 * Return: 0 on success, -1 and errno set on failure.
1295 */
1296struct vfio_precopy_info {
1297	__u32 argsz;
1298	__u32 flags;
1299	__aligned_u64 initial_bytes;
1300	__aligned_u64 dirty_bytes;
1301};
1302
1303#define VFIO_MIG_GET_PRECOPY_INFO _IO(VFIO_TYPE, VFIO_BASE + 21)
1304
1305/*
1306 * Upon VFIO_DEVICE_FEATURE_SET, allow the device to be moved into a low power
1307 * state with the platform-based power management.  Device use of lower power
1308 * states depends on factors managed by the runtime power management core,
1309 * including system level support and coordinating support among dependent
1310 * devices.  Enabling device low power entry does not guarantee lower power
1311 * usage by the device, nor is a mechanism provided through this feature to
1312 * know the current power state of the device.  If any device access happens
1313 * (either from the host or through the vfio uAPI) when the device is in the
1314 * low power state, then the host will move the device out of the low power
1315 * state as necessary prior to the access.  Once the access is completed, the
1316 * device may re-enter the low power state.  For single shot low power support
1317 * with wake-up notification, see
1318 * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP below.  Access to mmap'd
1319 * device regions is disabled on LOW_POWER_ENTRY and may only be resumed after
1320 * calling LOW_POWER_EXIT.
1321 */
1322#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY 3
1323
1324/*
1325 * This device feature has the same behavior as
1326 * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY with the exception that the user
1327 * provides an eventfd for wake-up notification.  When the device moves out of
1328 * the low power state for the wake-up, the host will not allow the device to
1329 * re-enter a low power state without a subsequent user call to one of the low
1330 * power entry device feature IOCTLs.  Access to mmap'd device regions is
1331 * disabled on LOW_POWER_ENTRY_WITH_WAKEUP and may only be resumed after the
1332 * low power exit.  The low power exit can happen either through LOW_POWER_EXIT
1333 * or through any other access (where the wake-up notification has been
1334 * generated).  The access to mmap'd device regions will not trigger low power
1335 * exit.
1336 *
1337 * The notification through the provided eventfd will be generated only when
1338 * the device has entered and is resumed from a low power state after
1339 * calling this device feature IOCTL.  A device that has not entered low power
1340 * state, as managed through the runtime power management core, will not
1341 * generate a notification through the provided eventfd on access.  Calling the
1342 * LOW_POWER_EXIT feature is optional in the case where notification has been
1343 * signaled on the provided eventfd that a resume from low power has occurred.
1344 */
1345struct vfio_device_low_power_entry_with_wakeup {
1346	__s32 wakeup_eventfd;
1347	__u32 reserved;
1348};
1349
1350#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP 4
1351
1352/*
1353 * Upon VFIO_DEVICE_FEATURE_SET, disallow use of device low power states as
1354 * previously enabled via VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY or
1355 * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP device features.
1356 * This device feature IOCTL may itself generate a wakeup eventfd notification
1357 * in the latter case if the device had previously entered a low power state.
1358 */
1359#define VFIO_DEVICE_FEATURE_LOW_POWER_EXIT 5
1360
1361/*
1362 * Upon VFIO_DEVICE_FEATURE_SET start/stop device DMA logging.
1363 * VFIO_DEVICE_FEATURE_PROBE can be used to detect if the device supports
1364 * DMA logging.
1365 *
1366 * DMA logging allows a device to internally record what DMAs the device is
1367 * initiating and report them back to userspace. It is part of the VFIO
1368 * migration infrastructure that allows implementing dirty page tracking
1369 * during the pre copy phase of live migration. Only DMA WRITEs are logged,
1370 * and this API is not connected to VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE.
1371 *
1372 * When DMA logging is started a range of IOVAs to monitor is provided and the
1373 * device can optimize its logging to cover only the IOVA range given. Each
1374 * DMA that the device initiates inside the range will be logged by the device
1375 * for later retrieval.
1376 *
1377 * page_size is an input that hints what tracking granularity the device
1378 * should try to achieve. If the device cannot do the hinted page size then
1379 * it's the driver choice which page size to pick based on its support.
1380 * On output the device will return the page size it selected.
1381 *
1382 * ranges is a pointer to an array of
1383 * struct vfio_device_feature_dma_logging_range.
1384 *
1385 * The core kernel code guarantees to support by minimum num_ranges that fit
1386 * into a single kernel page. User space can try higher values but should give
1387 * up if the above can't be achieved as of some driver limitations.
1388 *
1389 * A single call to start device DMA logging can be issued and a matching stop
1390 * should follow at the end. Another start is not allowed in the meantime.
1391 */
1392struct vfio_device_feature_dma_logging_control {
1393	__aligned_u64 page_size;
1394	__u32 num_ranges;
1395	__u32 __reserved;
1396	__aligned_u64 ranges;
1397};
1398
1399struct vfio_device_feature_dma_logging_range {
1400	__aligned_u64 iova;
1401	__aligned_u64 length;
1402};
1403
1404#define VFIO_DEVICE_FEATURE_DMA_LOGGING_START 6
1405
1406/*
1407 * Upon VFIO_DEVICE_FEATURE_SET stop device DMA logging that was started
1408 * by VFIO_DEVICE_FEATURE_DMA_LOGGING_START
1409 */
1410#define VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP 7
1411
1412/*
1413 * Upon VFIO_DEVICE_FEATURE_GET read back and clear the device DMA log
1414 *
1415 * Query the device's DMA log for written pages within the given IOVA range.
1416 * During querying the log is cleared for the IOVA range.
1417 *
1418 * bitmap is a pointer to an array of u64s that will hold the output bitmap
1419 * with 1 bit reporting a page_size unit of IOVA. The mapping of IOVA to bits
1420 * is given by:
1421 *  bitmap[(addr - iova)/page_size] & (1ULL << (addr % 64))
1422 *
1423 * The input page_size can be any power of two value and does not have to
1424 * match the value given to VFIO_DEVICE_FEATURE_DMA_LOGGING_START. The driver
1425 * will format its internal logging to match the reporting page size, possibly
1426 * by replicating bits if the internal page size is lower than requested.
1427 *
1428 * The LOGGING_REPORT will only set bits in the bitmap and never clear or
1429 * perform any initialization of the user provided bitmap.
1430 *
1431 * If any error is returned userspace should assume that the dirty log is
1432 * corrupted. Error recovery is to consider all memory dirty and try to
1433 * restart the dirty tracking, or to abort/restart the whole migration.
1434 *
1435 * If DMA logging is not enabled, an error will be returned.
1436 *
1437 */
1438struct vfio_device_feature_dma_logging_report {
1439	__aligned_u64 iova;
1440	__aligned_u64 length;
1441	__aligned_u64 page_size;
1442	__aligned_u64 bitmap;
1443};
1444
1445#define VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT 8
1446
1447/*
1448 * Upon VFIO_DEVICE_FEATURE_GET read back the estimated data length that will
1449 * be required to complete stop copy.
1450 *
1451 * Note: Can be called on each device state.
1452 */
1453
1454struct vfio_device_feature_mig_data_size {
1455	__aligned_u64 stop_copy_length;
1456};
1457
1458#define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9
1459
1460/**
1461 * Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device
1462 * based on the operation specified in op flag.
1463 *
1464 * The functionality is incorporated for devices that needs bus master control,
1465 * but the in-band device interface lacks the support. Consequently, it is not
1466 * applicable to PCI devices, as bus master control for PCI devices is managed
1467 * in-band through the configuration space. At present, this feature is supported
1468 * only for CDX devices.
1469 * When the device's BUS MASTER setting is configured as CLEAR, it will result in
1470 * blocking all incoming DMA requests from the device. On the other hand, configuring
1471 * the device's BUS MASTER setting as SET (enable) will grant the device the
1472 * capability to perform DMA to the host memory.
1473 */
1474struct vfio_device_feature_bus_master {
1475	__u32 op;
1476#define		VFIO_DEVICE_FEATURE_CLEAR_MASTER	0	/* Clear Bus Master */
1477#define		VFIO_DEVICE_FEATURE_SET_MASTER		1	/* Set Bus Master */
1478};
1479#define VFIO_DEVICE_FEATURE_BUS_MASTER 10
1480
1481/* -------- API for Type1 VFIO IOMMU -------- */
1482
1483/**
1484 * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)
1485 *
1486 * Retrieve information about the IOMMU object. Fills in provided
1487 * struct vfio_iommu_info. Caller sets argsz.
1488 *
1489 * XXX Should we do these by CHECK_EXTENSION too?
1490 */
1491struct vfio_iommu_type1_info {
1492	__u32	argsz;
1493	__u32	flags;
1494#define VFIO_IOMMU_INFO_PGSIZES (1 << 0)	/* supported page sizes info */
1495#define VFIO_IOMMU_INFO_CAPS	(1 << 1)	/* Info supports caps */
1496	__aligned_u64	iova_pgsizes;		/* Bitmap of supported page sizes */
1497	__u32   cap_offset;	/* Offset within info struct of first cap */
1498	__u32   pad;
1499};
1500
1501/*
1502 * The IOVA capability allows to report the valid IOVA range(s)
1503 * excluding any non-relaxable reserved regions exposed by
1504 * devices attached to the container. Any DMA map attempt
1505 * outside the valid iova range will return error.
1506 *
1507 * The structures below define version 1 of this capability.
1508 */
1509#define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE  1
1510
1511struct vfio_iova_range {
1512	__u64	start;
1513	__u64	end;
1514};
1515
1516struct vfio_iommu_type1_info_cap_iova_range {
1517	struct	vfio_info_cap_header header;
1518	__u32	nr_iovas;
1519	__u32	reserved;
1520	struct	vfio_iova_range iova_ranges[];
1521};
1522
1523/*
1524 * The migration capability allows to report supported features for migration.
1525 *
1526 * The structures below define version 1 of this capability.
1527 *
1528 * The existence of this capability indicates that IOMMU kernel driver supports
1529 * dirty page logging.
1530 *
1531 * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty
1532 * page logging.
1533 * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap
1534 * size in bytes that can be used by user applications when getting the dirty
1535 * bitmap.
1536 */
1537#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION  2
1538
1539struct vfio_iommu_type1_info_cap_migration {
1540	struct	vfio_info_cap_header header;
1541	__u32	flags;
1542	__u64	pgsize_bitmap;
1543	__u64	max_dirty_bitmap_size;		/* in bytes */
1544};
1545
1546/*
1547 * The DMA available capability allows to report the current number of
1548 * simultaneously outstanding DMA mappings that are allowed.
1549 *
1550 * The structure below defines version 1 of this capability.
1551 *
1552 * avail: specifies the current number of outstanding DMA mappings allowed.
1553 */
1554#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
1555
1556struct vfio_iommu_type1_info_dma_avail {
1557	struct	vfio_info_cap_header header;
1558	__u32	avail;
1559};
1560
1561#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
1562
1563/**
1564 * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)
1565 *
1566 * Map process virtual addresses to IO virtual addresses using the
1567 * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
1568 *
1569 * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova. The vaddr
1570 * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR.  To
1571 * maintain memory consistency within the user application, the updated vaddr
1572 * must address the same memory object as originally mapped.  Failure to do so
1573 * will result in user memory corruption and/or device misbehavior.  iova and
1574 * size must match those in the original MAP_DMA call.  Protection is not
1575 * changed, and the READ & WRITE flags must be 0.
1576 */
1577struct vfio_iommu_type1_dma_map {
1578	__u32	argsz;
1579	__u32	flags;
1580#define VFIO_DMA_MAP_FLAG_READ (1 << 0)		/* readable from device */
1581#define VFIO_DMA_MAP_FLAG_WRITE (1 << 1)	/* writable from device */
1582#define VFIO_DMA_MAP_FLAG_VADDR (1 << 2)
1583	__u64	vaddr;				/* Process virtual address */
1584	__u64	iova;				/* IO virtual address */
1585	__u64	size;				/* Size of mapping (bytes) */
1586};
1587
1588#define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
1589
1590struct vfio_bitmap {
1591	__u64        pgsize;	/* page size for bitmap in bytes */
1592	__u64        size;	/* in bytes */
1593	__u64 *data;	/* one bit per page */
1594};
1595
1596/**
1597 * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
1598 *							struct vfio_dma_unmap)
1599 *
1600 * Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
1601 * Caller sets argsz.  The actual unmapped size is returned in the size
1602 * field.  No guarantee is made to the user that arbitrary unmaps of iova
1603 * or size different from those used in the original mapping call will
1604 * succeed.
1605 *
1606 * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
1607 * before unmapping IO virtual addresses. When this flag is set, the user must
1608 * provide a struct vfio_bitmap in data[]. User must provide zero-allocated
1609 * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field.
1610 * A bit in the bitmap represents one page, of user provided page size in
1611 * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
1612 * indicates that the page at that offset from iova is dirty. A Bitmap of the
1613 * pages in the range of unmapped size is returned in the user-provided
1614 * vfio_bitmap.data.
1615 *
1616 * If flags & VFIO_DMA_UNMAP_FLAG_ALL, unmap all addresses.  iova and size
1617 * must be 0.  This cannot be combined with the get-dirty-bitmap flag.
1618 *
1619 * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
1620 * virtual addresses in the iova range.  DMA to already-mapped pages continues.
1621 * Groups may not be added to the container while any addresses are invalid.
1622 * This cannot be combined with the get-dirty-bitmap flag.
1623 */
1624struct vfio_iommu_type1_dma_unmap {
1625	__u32	argsz;
1626	__u32	flags;
1627#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
1628#define VFIO_DMA_UNMAP_FLAG_ALL		     (1 << 1)
1629#define VFIO_DMA_UNMAP_FLAG_VADDR	     (1 << 2)
1630	__u64	iova;				/* IO virtual address */
1631	__u64	size;				/* Size of mapping (bytes) */
1632	__u8    data[];
1633};
1634
1635#define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
1636
1637/*
1638 * IOCTLs to enable/disable IOMMU container usage.
1639 * No parameters are supported.
1640 */
1641#define VFIO_IOMMU_ENABLE	_IO(VFIO_TYPE, VFIO_BASE + 15)
1642#define VFIO_IOMMU_DISABLE	_IO(VFIO_TYPE, VFIO_BASE + 16)
1643
1644/**
1645 * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
1646 *                                     struct vfio_iommu_type1_dirty_bitmap)
1647 * IOCTL is used for dirty pages logging.
1648 * Caller should set flag depending on which operation to perform, details as
1649 * below:
1650 *
1651 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs
1652 * the IOMMU driver to log pages that are dirtied or potentially dirtied by
1653 * the device; designed to be used when a migration is in progress. Dirty pages
1654 * are logged until logging is disabled by user application by calling the IOCTL
1655 * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag.
1656 *
1657 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs
1658 * the IOMMU driver to stop logging dirtied pages.
1659 *
1660 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set
1661 * returns the dirty pages bitmap for IOMMU container for a given IOVA range.
1662 * The user must specify the IOVA range and the pgsize through the structure
1663 * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface
1664 * supports getting a bitmap of the smallest supported pgsize only and can be
1665 * modified in future to get a bitmap of any specified supported pgsize. The
1666 * user must provide a zeroed memory area for the bitmap memory and specify its
1667 * size in bitmap.size. One bit is used to represent one page consecutively
1668 * starting from iova offset. The user should provide page size in bitmap.pgsize
1669 * field. A bit set in the bitmap indicates that the page at that offset from
1670 * iova is dirty. The caller must set argsz to a value including the size of
1671 * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the
1672 * actual bitmap. If dirty pages logging is not enabled, an error will be
1673 * returned.
1674 *
1675 * Only one of the flags _START, _STOP and _GET may be specified at a time.
1676 *
1677 */
1678struct vfio_iommu_type1_dirty_bitmap {
1679	__u32        argsz;
1680	__u32        flags;
1681#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START	(1 << 0)
1682#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP	(1 << 1)
1683#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP	(1 << 2)
1684	__u8         data[];
1685};
1686
1687struct vfio_iommu_type1_dirty_bitmap_get {
1688	__u64              iova;	/* IO virtual address */
1689	__u64              size;	/* Size of iova range */
1690	struct vfio_bitmap bitmap;
1691};
1692
1693#define VFIO_IOMMU_DIRTY_PAGES             _IO(VFIO_TYPE, VFIO_BASE + 17)
1694
1695/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
1696
1697/*
1698 * The SPAPR TCE DDW info struct provides the information about
1699 * the details of Dynamic DMA window capability.
1700 *
1701 * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
1702 * @max_dynamic_windows_supported tells the maximum number of windows
1703 * which the platform can create.
1704 * @levels tells the maximum number of levels in multi-level IOMMU tables;
1705 * this allows splitting a table into smaller chunks which reduces
1706 * the amount of physically contiguous memory required for the table.
1707 */
1708struct vfio_iommu_spapr_tce_ddw_info {
1709	__u64 pgsizes;			/* Bitmap of supported page sizes */
1710	__u32 max_dynamic_windows_supported;
1711	__u32 levels;
1712};
1713
1714/*
1715 * The SPAPR TCE info struct provides the information about the PCI bus
1716 * address ranges available for DMA, these values are programmed into
1717 * the hardware so the guest has to know that information.
1718 *
1719 * The DMA 32 bit window start is an absolute PCI bus address.
1720 * The IOVA address passed via map/unmap ioctls are absolute PCI bus
1721 * addresses too so the window works as a filter rather than an offset
1722 * for IOVA addresses.
1723 *
1724 * Flags supported:
1725 * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
1726 *   (DDW) support is present. @ddw is only supported when DDW is present.
1727 */
1728struct vfio_iommu_spapr_tce_info {
1729	__u32 argsz;
1730	__u32 flags;
1731#define VFIO_IOMMU_SPAPR_INFO_DDW	(1 << 0)	/* DDW supported */
1732	__u32 dma32_window_start;	/* 32 bit window start (bytes) */
1733	__u32 dma32_window_size;	/* 32 bit window size (bytes) */
1734	struct vfio_iommu_spapr_tce_ddw_info ddw;
1735};
1736
1737#define VFIO_IOMMU_SPAPR_TCE_GET_INFO	_IO(VFIO_TYPE, VFIO_BASE + 12)
1738
1739/*
1740 * EEH PE operation struct provides ways to:
1741 * - enable/disable EEH functionality;
1742 * - unfreeze IO/DMA for frozen PE;
1743 * - read PE state;
1744 * - reset PE;
1745 * - configure PE;
1746 * - inject EEH error.
1747 */
1748struct vfio_eeh_pe_err {
1749	__u32 type;
1750	__u32 func;
1751	__u64 addr;
1752	__u64 mask;
1753};
1754
1755struct vfio_eeh_pe_op {
1756	__u32 argsz;
1757	__u32 flags;
1758	__u32 op;
1759	union {
1760		struct vfio_eeh_pe_err err;
1761	};
1762};
1763
1764#define VFIO_EEH_PE_DISABLE		0	/* Disable EEH functionality */
1765#define VFIO_EEH_PE_ENABLE		1	/* Enable EEH functionality  */
1766#define VFIO_EEH_PE_UNFREEZE_IO		2	/* Enable IO for frozen PE   */
1767#define VFIO_EEH_PE_UNFREEZE_DMA	3	/* Enable DMA for frozen PE  */
1768#define VFIO_EEH_PE_GET_STATE		4	/* PE state retrieval        */
1769#define  VFIO_EEH_PE_STATE_NORMAL	0	/* PE in functional state    */
1770#define  VFIO_EEH_PE_STATE_RESET	1	/* PE reset in progress      */
1771#define  VFIO_EEH_PE_STATE_STOPPED	2	/* Stopped DMA and IO        */
1772#define  VFIO_EEH_PE_STATE_STOPPED_DMA	4	/* Stopped DMA only          */
1773#define  VFIO_EEH_PE_STATE_UNAVAIL	5	/* State unavailable         */
1774#define VFIO_EEH_PE_RESET_DEACTIVATE	5	/* Deassert PE reset         */
1775#define VFIO_EEH_PE_RESET_HOT		6	/* Assert hot reset          */
1776#define VFIO_EEH_PE_RESET_FUNDAMENTAL	7	/* Assert fundamental reset  */
1777#define VFIO_EEH_PE_CONFIGURE		8	/* PE configuration          */
1778#define VFIO_EEH_PE_INJECT_ERR		9	/* Inject EEH error          */
1779
1780#define VFIO_EEH_PE_OP			_IO(VFIO_TYPE, VFIO_BASE + 21)
1781
1782/**
1783 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
1784 *
1785 * Registers user space memory where DMA is allowed. It pins
1786 * user pages and does the locked memory accounting so
1787 * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
1788 * get faster.
1789 */
1790struct vfio_iommu_spapr_register_memory {
1791	__u32	argsz;
1792	__u32	flags;
1793	__u64	vaddr;				/* Process virtual address */
1794	__u64	size;				/* Size of mapping (bytes) */
1795};
1796#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY	_IO(VFIO_TYPE, VFIO_BASE + 17)
1797
1798/**
1799 * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
1800 *
1801 * Unregisters user space memory registered with
1802 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
1803 * Uses vfio_iommu_spapr_register_memory for parameters.
1804 */
1805#define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY	_IO(VFIO_TYPE, VFIO_BASE + 18)
1806
1807/**
1808 * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
1809 *
1810 * Creates an additional TCE table and programs it (sets a new DMA window)
1811 * to every IOMMU group in the container. It receives page shift, window
1812 * size and number of levels in the TCE table being created.
1813 *
1814 * It allocates and returns an offset on a PCI bus of the new DMA window.
1815 */
1816struct vfio_iommu_spapr_tce_create {
1817	__u32 argsz;
1818	__u32 flags;
1819	/* in */
1820	__u32 page_shift;
1821	__u32 __resv1;
1822	__u64 window_size;
1823	__u32 levels;
1824	__u32 __resv2;
1825	/* out */
1826	__u64 start_addr;
1827};
1828#define VFIO_IOMMU_SPAPR_TCE_CREATE	_IO(VFIO_TYPE, VFIO_BASE + 19)
1829
1830/**
1831 * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
1832 *
1833 * Unprograms a TCE table from all groups in the container and destroys it.
1834 * It receives a PCI bus offset as a window id.
1835 */
1836struct vfio_iommu_spapr_tce_remove {
1837	__u32 argsz;
1838	__u32 flags;
1839	/* in */
1840	__u64 start_addr;
1841};
1842#define VFIO_IOMMU_SPAPR_TCE_REMOVE	_IO(VFIO_TYPE, VFIO_BASE + 20)
1843
1844/* ***************************************************************** */
1845
1846#endif /* VFIO_H */