aboutsummaryrefslogtreecommitdiff
path: root/include/libdrm
diff options
context:
space:
mode:
authorpk33 <pk33@pk33.space>2024-11-01 23:51:15 +0100
committerpk33 <pk33@pk33.space>2024-11-01 23:51:15 +0100
commit294b585994da4427ac98a8353ff41aed5f301d54 (patch)
treeaafcd885c1fe28819ef1a511da5e6344411b5597 /include/libdrm
parentf0958551b43959174d28b2056f584d2081494e6c (diff)
downloadengine33-294b585994da4427ac98a8353ff41aed5f301d54.tar.gz
Import and fix old code
Diffstat (limited to 'include/libdrm')
-rw-r--r--include/libdrm/libdrm/amdgpu.h1876
-rw-r--r--include/libdrm/libdrm/amdgpu_drm.h1161
-rw-r--r--include/libdrm/libdrm/drm.h1183
-rw-r--r--include/libdrm/libdrm/drm_fourcc.h1459
-rw-r--r--include/libdrm/libdrm/drm_mode.h1129
-rw-r--r--include/libdrm/libdrm/drm_sarea.h92
-rw-r--r--include/libdrm/libdrm/i915_drm.h3724
-rw-r--r--include/libdrm/libdrm/intel_aub.h153
-rw-r--r--include/libdrm/libdrm/intel_bufmgr.h341
-rw-r--r--include/libdrm/libdrm/intel_debug.h44
-rw-r--r--include/libdrm/libdrm/mach64_drm.h256
-rw-r--r--include/libdrm/libdrm/mga_drm.h427
-rw-r--r--include/libdrm/libdrm/msm_drm.h308
-rw-r--r--include/libdrm/libdrm/nouveau/nouveau.h280
-rw-r--r--include/libdrm/libdrm/nouveau/nvif/cl0080.h45
-rw-r--r--include/libdrm/libdrm/nouveau/nvif/cl9097.h44
-rw-r--r--include/libdrm/libdrm/nouveau/nvif/class.h141
-rw-r--r--include/libdrm/libdrm/nouveau/nvif/if0002.h38
-rw-r--r--include/libdrm/libdrm/nouveau/nvif/if0003.h33
-rw-r--r--include/libdrm/libdrm/nouveau/nvif/ioctl.h132
-rw-r--r--include/libdrm/libdrm/nouveau/nvif/unpack.h28
-rw-r--r--include/libdrm/libdrm/nouveau_drm.h256
-rw-r--r--include/libdrm/libdrm/qxl_drm.h158
-rw-r--r--include/libdrm/libdrm/r128_drm.h336
-rw-r--r--include/libdrm/libdrm/r600_pci_ids.h487
-rw-r--r--include/libdrm/libdrm/radeon_bo.h73
-rw-r--r--include/libdrm/libdrm/radeon_bo_gem.h48
-rw-r--r--include/libdrm/libdrm/radeon_bo_int.h45
-rw-r--r--include/libdrm/libdrm/radeon_cs.h141
-rw-r--r--include/libdrm/libdrm/radeon_cs_gem.h41
-rw-r--r--include/libdrm/libdrm/radeon_cs_int.h67
-rw-r--r--include/libdrm/libdrm/radeon_drm.h1079
-rw-r--r--include/libdrm/libdrm/radeon_surface.h149
-rw-r--r--include/libdrm/libdrm/savage_drm.h220
-rw-r--r--include/libdrm/libdrm/sis_drm.h77
-rw-r--r--include/libdrm/libdrm/tegra_drm.h1060
-rw-r--r--include/libdrm/libdrm/vc4_drm.h442
-rw-r--r--include/libdrm/libdrm/via_drm.h283
-rw-r--r--include/libdrm/libdrm/virtgpu_drm.h182
-rw-r--r--include/libdrm/libdrm/vmwgfx_drm.h1128
-rw-r--r--include/libdrm/libsync.h148
-rw-r--r--include/libdrm/xf86drm.h969
-rw-r--r--include/libdrm/xf86drmMode.h536
43 files changed, 20819 insertions, 0 deletions
diff --git a/include/libdrm/libdrm/amdgpu.h b/include/libdrm/libdrm/amdgpu.h
new file mode 100644
index 0000000..5ef2524
--- /dev/null
+++ b/include/libdrm/libdrm/amdgpu.h
@@ -0,0 +1,1876 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/**
+ * \file amdgpu.h
+ *
+ * Declare public libdrm_amdgpu API
+ *
+ * This file define API exposed by libdrm_amdgpu library.
+ * User wanted to use libdrm_amdgpu functionality must include
+ * this file.
+ *
+ */
+#ifndef _AMDGPU_H_
+#define _AMDGPU_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct drm_amdgpu_info_hw_ip;
+struct drm_amdgpu_bo_list_entry;
+
+/*--------------------------------------------------------------------------*/
+/* --------------------------- Defines ------------------------------------ */
+/*--------------------------------------------------------------------------*/
+
+/**
+ * Define max. number of Command Buffers (IB) which could be sent to the single
+ * hardware IP to accommodate CE/DE requirements
+ *
+ * \sa amdgpu_cs_ib_info
+*/
+#define AMDGPU_CS_MAX_IBS_PER_SUBMIT 4
+
+/**
+ * Special timeout value meaning that the timeout is infinite.
+ */
+#define AMDGPU_TIMEOUT_INFINITE 0xffffffffffffffffull
+
+/**
+ * Used in amdgpu_cs_query_fence_status(), meaning that the given timeout
+ * is absolute.
+ */
+#define AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE (1 << 0)
+
+/*--------------------------------------------------------------------------*/
+/* ----------------------------- Enums ------------------------------------ */
+/*--------------------------------------------------------------------------*/
+
+/**
+ * Enum describing possible handle types
+ *
+ * \sa amdgpu_bo_import, amdgpu_bo_export
+ *
+*/
+enum amdgpu_bo_handle_type {
+ /** GEM flink name (needs DRM authentication, used by DRI2) */
+ amdgpu_bo_handle_type_gem_flink_name = 0,
+
+ /** KMS handle which is used by all driver ioctls */
+ amdgpu_bo_handle_type_kms = 1,
+
+ /** DMA-buf fd handle */
+ amdgpu_bo_handle_type_dma_buf_fd = 2,
+
+ /** Deprecated in favour of and same behaviour as
+ * amdgpu_bo_handle_type_kms, use that instead of this
+ */
+ amdgpu_bo_handle_type_kms_noimport = 3,
+};
+
+/** Define known types of GPU VM VA ranges */
+enum amdgpu_gpu_va_range
+{
+ /** Allocate from "normal"/general range */
+ amdgpu_gpu_va_range_general = 0
+};
+
+enum amdgpu_sw_info {
+ amdgpu_sw_info_address32_hi = 0,
+};
+
+/*--------------------------------------------------------------------------*/
+/* -------------------------- Datatypes ----------------------------------- */
+/*--------------------------------------------------------------------------*/
+
+/**
+ * Define opaque pointer to context associated with fd.
+ * This context will be returned as the result of
+ * "initialize" function and should be pass as the first
+ * parameter to any API call
+ */
+typedef struct amdgpu_device *amdgpu_device_handle;
+
+/**
+ * Define GPU Context type as pointer to opaque structure
+ * Example of GPU Context is the "rendering" context associated
+ * with OpenGL context (glCreateContext)
+ */
+typedef struct amdgpu_context *amdgpu_context_handle;
+
+/**
+ * Define handle for amdgpu resources: buffer, GDS, etc.
+ */
+typedef struct amdgpu_bo *amdgpu_bo_handle;
+
+/**
+ * Define handle for list of BOs
+ */
+typedef struct amdgpu_bo_list *amdgpu_bo_list_handle;
+
+/**
+ * Define handle to be used to work with VA allocated ranges
+ */
+typedef struct amdgpu_va *amdgpu_va_handle;
+
+/**
+ * Define handle for semaphore
+ */
+typedef struct amdgpu_semaphore *amdgpu_semaphore_handle;
+
+/*--------------------------------------------------------------------------*/
+/* -------------------------- Structures ---------------------------------- */
+/*--------------------------------------------------------------------------*/
+
+/**
+ * Structure describing memory allocation request
+ *
+ * \sa amdgpu_bo_alloc()
+ *
+*/
+struct amdgpu_bo_alloc_request {
+ /** Allocation request. It must be aligned correctly. */
+ uint64_t alloc_size;
+
+ /**
+ * It may be required to have some specific alignment requirements
+ * for physical back-up storage (e.g. for displayable surface).
+ * If 0 there is no special alignment requirement
+ */
+ uint64_t phys_alignment;
+
+ /**
+ * UMD should specify where to allocate memory and how it
+ * will be accessed by the CPU.
+ */
+ uint32_t preferred_heap;
+
+ /** Additional flags passed on allocation */
+ uint64_t flags;
+};
+
+/**
+ * Special UMD specific information associated with buffer.
+ *
+ * It may be need to pass some buffer charactersitic as part
+ * of buffer sharing. Such information are defined UMD and
+ * opaque for libdrm_amdgpu as well for kernel driver.
+ *
+ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info,
+ * amdgpu_bo_import(), amdgpu_bo_export
+ *
+*/
+struct amdgpu_bo_metadata {
+ /** Special flag associated with surface */
+ uint64_t flags;
+
+ /**
+ * ASIC-specific tiling information (also used by DCE).
+ * The encoding is defined by the AMDGPU_TILING_* definitions.
+ */
+ uint64_t tiling_info;
+
+ /** Size of metadata associated with the buffer, in bytes. */
+ uint32_t size_metadata;
+
+ /** UMD specific metadata. Opaque for kernel */
+ uint32_t umd_metadata[64];
+};
+
+/**
+ * Structure describing allocated buffer. Client may need
+ * to query such information as part of 'sharing' buffers mechanism
+ *
+ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info(),
+ * amdgpu_bo_import(), amdgpu_bo_export()
+*/
+struct amdgpu_bo_info {
+ /** Allocated memory size */
+ uint64_t alloc_size;
+
+ /**
+ * It may be required to have some specific alignment requirements
+ * for physical back-up storage.
+ */
+ uint64_t phys_alignment;
+
+ /** Heap where to allocate memory. */
+ uint32_t preferred_heap;
+
+ /** Additional allocation flags. */
+ uint64_t alloc_flags;
+
+ /** Metadata associated with buffer if any. */
+ struct amdgpu_bo_metadata metadata;
+};
+
+/**
+ * Structure with information about "imported" buffer
+ *
+ * \sa amdgpu_bo_import()
+ *
+ */
+struct amdgpu_bo_import_result {
+ /** Handle of memory/buffer to use */
+ amdgpu_bo_handle buf_handle;
+
+ /** Buffer size */
+ uint64_t alloc_size;
+};
+
+/**
+ *
+ * Structure to describe GDS partitioning information.
+ * \note OA and GWS resources are asscoiated with GDS partition
+ *
+ * \sa amdgpu_gpu_resource_query_gds_info
+ *
+*/
+struct amdgpu_gds_resource_info {
+ uint32_t gds_gfx_partition_size;
+ uint32_t compute_partition_size;
+ uint32_t gds_total_size;
+ uint32_t gws_per_gfx_partition;
+ uint32_t gws_per_compute_partition;
+ uint32_t oa_per_gfx_partition;
+ uint32_t oa_per_compute_partition;
+};
+
+/**
+ * Structure describing CS fence
+ *
+ * \sa amdgpu_cs_query_fence_status(), amdgpu_cs_request, amdgpu_cs_submit()
+ *
+*/
+struct amdgpu_cs_fence {
+
+ /** In which context IB was sent to execution */
+ amdgpu_context_handle context;
+
+ /** To which HW IP type the fence belongs */
+ uint32_t ip_type;
+
+ /** IP instance index if there are several IPs of the same type. */
+ uint32_t ip_instance;
+
+ /** Ring index of the HW IP */
+ uint32_t ring;
+
+ /** Specify fence for which we need to check submission status.*/
+ uint64_t fence;
+};
+
+/**
+ * Structure describing IB
+ *
+ * \sa amdgpu_cs_request, amdgpu_cs_submit()
+ *
+*/
+struct amdgpu_cs_ib_info {
+ /** Special flags */
+ uint64_t flags;
+
+ /** Virtual MC address of the command buffer */
+ uint64_t ib_mc_address;
+
+ /**
+ * Size of Command Buffer to be submitted.
+ * - The size is in units of dwords (4 bytes).
+ * - Could be 0
+ */
+ uint32_t size;
+};
+
+/**
+ * Structure describing fence information
+ *
+ * \sa amdgpu_cs_request, amdgpu_cs_query_fence,
+ * amdgpu_cs_submit(), amdgpu_cs_query_fence_status()
+*/
+struct amdgpu_cs_fence_info {
+ /** buffer object for the fence */
+ amdgpu_bo_handle handle;
+
+ /** fence offset in the unit of sizeof(uint64_t) */
+ uint64_t offset;
+};
+
+/**
+ * Structure describing submission request
+ *
+ * \note We could have several IBs as packet. e.g. CE, CE, DE case for gfx
+ *
+ * \sa amdgpu_cs_submit()
+*/
+struct amdgpu_cs_request {
+ /** Specify flags with additional information */
+ uint64_t flags;
+
+ /** Specify HW IP block type to which to send the IB. */
+ unsigned ip_type;
+
+ /** IP instance index if there are several IPs of the same type. */
+ unsigned ip_instance;
+
+ /**
+ * Specify ring index of the IP. We could have several rings
+ * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
+ */
+ uint32_t ring;
+
+ /**
+ * List handle with resources used by this request.
+ */
+ amdgpu_bo_list_handle resources;
+
+ /**
+ * Number of dependencies this Command submission needs to
+ * wait for before starting execution.
+ */
+ uint32_t number_of_dependencies;
+
+ /**
+ * Array of dependencies which need to be met before
+ * execution can start.
+ */
+ struct amdgpu_cs_fence *dependencies;
+
+ /** Number of IBs to submit in the field ibs. */
+ uint32_t number_of_ibs;
+
+ /**
+ * IBs to submit. Those IBs will be submit together as single entity
+ */
+ struct amdgpu_cs_ib_info *ibs;
+
+ /**
+ * The returned sequence number for the command submission
+ */
+ uint64_t seq_no;
+
+ /**
+ * The fence information
+ */
+ struct amdgpu_cs_fence_info fence_info;
+};
+
+/**
+ * Structure which provide information about GPU VM MC Address space
+ * alignments requirements
+ *
+ * \sa amdgpu_query_buffer_size_alignment
+ */
+struct amdgpu_buffer_size_alignments {
+ /** Size alignment requirement for allocation in
+ * local memory */
+ uint64_t size_local;
+
+ /**
+ * Size alignment requirement for allocation in remote memory
+ */
+ uint64_t size_remote;
+};
+
+/**
+ * Structure which provide information about heap
+ *
+ * \sa amdgpu_query_heap_info()
+ *
+ */
+struct amdgpu_heap_info {
+ /** Theoretical max. available memory in the given heap */
+ uint64_t heap_size;
+
+ /**
+ * Number of bytes allocated in the heap. This includes all processes
+ * and private allocations in the kernel. It changes when new buffers
+ * are allocated, freed, and moved. It cannot be larger than
+ * heap_size.
+ */
+ uint64_t heap_usage;
+
+ /**
+ * Theoretical possible max. size of buffer which
+ * could be allocated in the given heap
+ */
+ uint64_t max_allocation;
+};
+
+/**
+ * Describe GPU h/w info needed for UMD correct initialization
+ *
+ * \sa amdgpu_query_gpu_info()
+*/
+struct amdgpu_gpu_info {
+ /** Asic id */
+ uint32_t asic_id;
+ /** Chip revision */
+ uint32_t chip_rev;
+ /** Chip external revision */
+ uint32_t chip_external_rev;
+ /** Family ID */
+ uint32_t family_id;
+ /** Special flags */
+ uint64_t ids_flags;
+ /** max engine clock*/
+ uint64_t max_engine_clk;
+ /** max memory clock */
+ uint64_t max_memory_clk;
+ /** number of shader engines */
+ uint32_t num_shader_engines;
+ /** number of shader arrays per engine */
+ uint32_t num_shader_arrays_per_engine;
+ /** Number of available good shader pipes */
+ uint32_t avail_quad_shader_pipes;
+ /** Max. number of shader pipes.(including good and bad pipes */
+ uint32_t max_quad_shader_pipes;
+ /** Number of parameter cache entries per shader quad pipe */
+ uint32_t cache_entries_per_quad_pipe;
+ /** Number of available graphics context */
+ uint32_t num_hw_gfx_contexts;
+ /** Number of render backend pipes */
+ uint32_t rb_pipes;
+ /** Enabled render backend pipe mask */
+ uint32_t enabled_rb_pipes_mask;
+ /** Frequency of GPU Counter */
+ uint32_t gpu_counter_freq;
+ /** CC_RB_BACKEND_DISABLE.BACKEND_DISABLE per SE */
+ uint32_t backend_disable[4];
+ /** Value of MC_ARB_RAMCFG register*/
+ uint32_t mc_arb_ramcfg;
+ /** Value of GB_ADDR_CONFIG */
+ uint32_t gb_addr_cfg;
+ /** Values of the GB_TILE_MODE0..31 registers */
+ uint32_t gb_tile_mode[32];
+ /** Values of GB_MACROTILE_MODE0..15 registers */
+ uint32_t gb_macro_tile_mode[16];
+ /** Value of PA_SC_RASTER_CONFIG register per SE */
+ uint32_t pa_sc_raster_cfg[4];
+ /** Value of PA_SC_RASTER_CONFIG_1 register per SE */
+ uint32_t pa_sc_raster_cfg1[4];
+ /* CU info */
+ uint32_t cu_active_number;
+ uint32_t cu_ao_mask;
+ uint32_t cu_bitmap[4][4];
+ /* video memory type info*/
+ uint32_t vram_type;
+ /* video memory bit width*/
+ uint32_t vram_bit_width;
+ /** constant engine ram size*/
+ uint32_t ce_ram_size;
+ /* vce harvesting instance */
+ uint32_t vce_harvest_config;
+ /* PCI revision ID */
+ uint32_t pci_rev_id;
+};
+
+
+/*--------------------------------------------------------------------------*/
+/*------------------------- Functions --------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+/*
+ * Initialization / Cleanup
+ *
+*/
+
+/**
+ *
+ * \param fd - \c [in] File descriptor for AMD GPU device
+ * received previously as the result of
+ * e.g. drmOpen() call.
+ * For legacy fd type, the DRI2/DRI3
+ * authentication should be done before
+ * calling this function.
+ * \param major_version - \c [out] Major version of library. It is assumed
+ * that adding new functionality will cause
+ * increase in major version
+ * \param minor_version - \c [out] Minor version of library
+ * \param device_handle - \c [out] Pointer to opaque context which should
+ * be passed as the first parameter on each
+ * API call
+ *
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ *
+ * \sa amdgpu_device_deinitialize()
+*/
+int amdgpu_device_initialize(int fd,
+ uint32_t *major_version,
+ uint32_t *minor_version,
+ amdgpu_device_handle *device_handle);
+
+/**
+ *
+ * When access to such library does not needed any more the special
+ * function must be call giving opportunity to clean up any
+ * resources if needed.
+ *
+ * \param device_handle - \c [in] Context associated with file
+ * descriptor for AMD GPU device
+ * received previously as the
+ * result e.g. of drmOpen() call.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_device_initialize()
+ *
+*/
+int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
+
+/**
+ *
+ * /param device_handle - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ *
+ * \return Returns the drm fd used for operations on this
+ * device. This is still owned by the library and hence
+ * should not be closed. Guaranteed to be valid until
+ * #amdgpu_device_deinitialize gets called.
+ *
+*/
+int amdgpu_device_get_fd(amdgpu_device_handle device_handle);
+
+/*
+ * Memory Management
+ *
+*/
+
+/**
+ * Allocate memory to be used by UMD for GPU related operations
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param alloc_buffer - \c [in] Pointer to the structure describing an
+ * allocation request
+ * \param buf_handle - \c [out] Allocated buffer handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_free()
+*/
+int amdgpu_bo_alloc(amdgpu_device_handle dev,
+ struct amdgpu_bo_alloc_request *alloc_buffer,
+ amdgpu_bo_handle *buf_handle);
+
+/**
+ * Associate opaque data with buffer to be queried by another UMD
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle
+ * \param info - \c [in] Metadata to associated with buffer
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+*/
+int amdgpu_bo_set_metadata(amdgpu_bo_handle buf_handle,
+ struct amdgpu_bo_metadata *info);
+
+/**
+ * Query buffer information including metadata previusly associated with
+ * buffer.
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle
+ * \param info - \c [out] Structure describing buffer
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc()
+*/
+int amdgpu_bo_query_info(amdgpu_bo_handle buf_handle,
+ struct amdgpu_bo_info *info);
+
+/**
+ * Allow others to get access to buffer
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle
+ * \param type - \c [in] Type of handle requested
+ * \param shared_handle - \c [out] Special "shared" handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_import()
+ *
+*/
+int amdgpu_bo_export(amdgpu_bo_handle buf_handle,
+ enum amdgpu_bo_handle_type type,
+ uint32_t *shared_handle);
+
+/**
+ * Request access to "shared" buffer
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param type - \c [in] Type of handle requested
+ * \param shared_handle - \c [in] Shared handle received as result "import"
+ * operation
+ * \param output - \c [out] Pointer to structure with information
+ * about imported buffer
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note Buffer must be "imported" only using new "fd" (different from
+ * one used by "exporter").
+ *
+ * \sa amdgpu_bo_export()
+ *
+*/
+int amdgpu_bo_import(amdgpu_device_handle dev,
+ enum amdgpu_bo_handle_type type,
+ uint32_t shared_handle,
+ struct amdgpu_bo_import_result *output);
+
+/**
+ * Request GPU access to user allocated memory e.g. via "malloc"
+ *
+ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
+ * \param cpu - [in] CPU address of user allocated memory which we
+ * want to map to GPU address space (make GPU accessible)
+ * (This address must be correctly aligned).
+ * \param size - [in] Size of allocation (must be correctly aligned)
+ * \param buf_handle - [out] Buffer handle for the userptr memory
+ * resource on submission and be used in other operations.
+ *
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note
+ * This call doesn't guarantee that such memory will be persistently
+ * "locked" / make non-pageable. The purpose of this call is to provide
+ * opportunity for GPU get access to this resource during submission.
+ *
+ * The maximum amount of memory which could be mapped in this call depends
+ * if overcommit is disabled or not. If overcommit is disabled than the max.
+ * amount of memory to be pinned will be limited by left "free" size in total
+ * amount of memory which could be locked simultaneously ("GART" size).
+ *
+ * Supported (theoretical) max. size of mapping is restricted only by
+ * "GART" size.
+ *
+ * It is responsibility of caller to correctly specify access rights
+ * on VA assignment.
+*/
+int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
+ void *cpu, uint64_t size,
+ amdgpu_bo_handle *buf_handle);
+
+/**
+ * Validate if the user memory comes from BO
+ *
+ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
+ * \param cpu - [in] CPU address of user allocated memory which we
+ * want to map to GPU address space (make GPU accessible)
+ * (This address must be correctly aligned).
+ * \param size - [in] Size of allocation (must be correctly aligned)
+ * \param buf_handle - [out] Buffer handle for the userptr memory
+ * if the user memory is not from BO, the buf_handle will be NULL.
+ * \param offset_in_bo - [out] offset in this BO for this user memory
+ *
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
+ void *cpu,
+ uint64_t size,
+ amdgpu_bo_handle *buf_handle,
+ uint64_t *offset_in_bo);
+
+/**
+ * Free previously allocated memory
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle to free
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note In the case of memory shared between different applications all
+ * resources will be “physically” freed only all such applications
+ * will be terminated
+ * \note If is UMD responsibility to ‘free’ buffer only when there is no
+ * more GPU access
+ *
+ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc()
+ *
+*/
+int amdgpu_bo_free(amdgpu_bo_handle buf_handle);
+
+/**
+ * Increase the reference count of a buffer object
+ *
+ * \param bo - \c [in] Buffer object handle to increase the reference count
+ *
+ * \sa amdgpu_bo_alloc(), amdgpu_bo_free()
+ *
+*/
+void amdgpu_bo_inc_ref(amdgpu_bo_handle bo);
+
+/**
+ * Request CPU access to GPU accessible memory
+ *
+ * \param buf_handle - \c [in] Buffer handle
+ * \param cpu - \c [out] CPU address to be used for access
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_cpu_unmap()
+ *
+*/
+int amdgpu_bo_cpu_map(amdgpu_bo_handle buf_handle, void **cpu);
+
+/**
+ * Release CPU access to GPU memory
+ *
+ * \param buf_handle - \c [in] Buffer handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_cpu_map()
+ *
+*/
+int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
+
+/**
+ * Wait until a buffer is not used by the device.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param buf_handle - \c [in] Buffer handle.
+ * \param timeout_ns - Timeout in nanoseconds.
+ * \param buffer_busy - 0 if buffer is idle, all GPU access was completed
+ * and no GPU access is scheduled.
+ * 1 GPU access is in fly or scheduled
+ *
+ * \return 0 - on success
+ * <0 - Negative POSIX Error code
+ */
+int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
+ uint64_t timeout_ns,
+ bool *buffer_busy);
+
+/**
+ * Creates a BO list handle for command submission.
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param number_of_buffers - \c [in] Number of BOs in the list
+ * \param buffers - \c [in] List of BO handles
+ * \param result - \c [out] Created BO list handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_destroy_raw(), amdgpu_cs_submit_raw2()
+*/
+int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
+ uint32_t number_of_buffers,
+ struct drm_amdgpu_bo_list_entry *buffers,
+ uint32_t *result);
+
+/**
+ * Destroys a BO list handle.
+ *
+ * \param bo_list - \c [in] BO list handle.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2()
+*/
+int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t bo_list);
+
+/**
+ * Creates a BO list handle for command submission.
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param number_of_resources - \c [in] Number of BOs in the list
+ * \param resources - \c [in] List of BO handles
+ * \param resource_prios - \c [in] Optional priority for each handle
+ * \param result - \c [out] Created BO list handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_destroy()
+*/
+int amdgpu_bo_list_create(amdgpu_device_handle dev,
+ uint32_t number_of_resources,
+ amdgpu_bo_handle *resources,
+ uint8_t *resource_prios,
+ amdgpu_bo_list_handle *result);
+
+/**
+ * Destroys a BO list handle.
+ *
+ * \param handle - \c [in] BO list handle.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_create()
+*/
+int amdgpu_bo_list_destroy(amdgpu_bo_list_handle handle);
+
+/**
+ * Update resources for existing BO list
+ *
+ * \param handle - \c [in] BO list handle
+ * \param number_of_resources - \c [in] Number of BOs in the list
+ * \param resources - \c [in] List of BO handles
+ * \param resource_prios - \c [in] Optional priority for each handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_update()
+*/
+int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
+ uint32_t number_of_resources,
+ amdgpu_bo_handle *resources,
+ uint8_t *resource_prios);
+
+/*
+ * GPU Execution context
+ *
+*/
+
+/**
+ * Create GPU execution Context
+ *
+ * For the purpose of GPU Scheduler and GPU Robustness extensions it is
+ * necessary to have information/identify rendering/compute contexts.
+ * It also may be needed to associate some specific requirements with such
+ * contexts. Kernel driver will guarantee that submission from the same
+ * context will always be executed in order (first come, first serve).
+ *
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_*
+ * \param context - \c [out] GPU Context handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_cs_ctx_free()
+ *
+*/
+int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
+ uint32_t priority,
+ amdgpu_context_handle *context);
+/**
+ * Create GPU execution Context
+ *
+ * Refer to amdgpu_cs_ctx_create2 for full documentation. This call
+ * is missing the priority parameter.
+ *
+ * \sa amdgpu_cs_ctx_create2()
+ *
+*/
+int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
+ amdgpu_context_handle *context);
+
+/**
+ *
+ * Destroy GPU execution context when not needed any more
+ *
+ * \param context - \c [in] GPU Context handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_cs_ctx_create()
+ *
+*/
+int amdgpu_cs_ctx_free(amdgpu_context_handle context);
+
+/**
+ * Override the submission priority for the given context using a master fd.
+ *
+ * \param dev - \c [in] device handle
+ * \param context - \c [in] context handle for context id
+ * \param master_fd - \c [in] The master fd to authorize the override.
+ * \param priority - \c [in] The priority to assign to the context.
+ *
+ * \return 0 on success or a a negative Posix error code on failure.
+ */
+int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
+ amdgpu_context_handle context,
+ int master_fd,
+ unsigned priority);
+
+/**
+ * Set or query the stable power state for GPU profiling.
+ *
+ * \param dev - \c [in] device handle
+ * \param op - \c [in] AMDGPU_CTX_OP_{GET,SET}_STABLE_PSTATE
+ * \param flags - \c [in] AMDGPU_CTX_STABLE_PSTATE_*
+ * \param out_flags - \c [out] output current stable pstate
+ *
+ * \return 0 on success otherwise POSIX Error code.
+ */
+int amdgpu_cs_ctx_stable_pstate(amdgpu_context_handle context,
+ uint32_t op,
+ uint32_t flags,
+ uint32_t *out_flags);
+
+/**
+ * Query reset state for the specific GPU Context
+ *
+ * \param context - \c [in] GPU Context handle
+ * \param state - \c [out] One of AMDGPU_CTX_*_RESET
+ * \param hangs - \c [out] Number of hangs caused by the context.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_cs_ctx_create()
+ *
+*/
+int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
+ uint32_t *state, uint32_t *hangs);
+
+/**
+ * Query reset state for the specific GPU Context.
+ *
+ * \param context - \c [in] GPU Context handle
+ * \param flags - \c [out] A combination of AMDGPU_CTX_QUERY2_FLAGS_*
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_cs_ctx_create()
+ *
+*/
+int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
+ uint64_t *flags);
+
+/*
+ * Command Buffers Management
+ *
+*/
+
+/**
+ * Send request to submit command buffers to hardware.
+ *
+ * Kernel driver could use GPU Scheduler to make decision when physically
+ * sent this request to the hardware. Accordingly this request could be put
+ * in queue and sent for execution later. The only guarantee is that request
+ * from the same GPU context to the same ip:ip_instance:ring will be executed in
+ * order.
+ *
+ * The caller can specify the user fence buffer/location with the fence_info in the
+ * cs_request.The sequence number is returned via the 'seq_no' parameter
+ * in ibs_request structure.
+ *
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param context - \c [in] GPU Context
+ * \param flags - \c [in] Global submission flags
+ * \param ibs_request - \c [in/out] Pointer to submission requests.
+ * We could submit to the several
+ * engines/rings simulteniously as
+ * 'atomic' operation
+ * \param number_of_requests - \c [in] Number of submission requests
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note It is required to pass correct resource list with buffer handles
+ * which will be accessible by command buffers from submission
+ * This will allow kernel driver to correctly implement "paging".
+ * Failure to do so will have unpredictable results.
+ *
+ * \sa amdgpu_command_buffer_alloc(), amdgpu_command_buffer_free(),
+ * amdgpu_cs_query_fence_status()
+ *
+*/
+int amdgpu_cs_submit(amdgpu_context_handle context,
+ uint64_t flags,
+ struct amdgpu_cs_request *ibs_request,
+ uint32_t number_of_requests);
+
+/**
+ * Query status of Command Buffer Submission
+ *
+ * \param fence - \c [in] Structure describing fence to query
+ * \param timeout_ns - \c [in] Timeout value to wait
+ * \param flags - \c [in] Flags for the query
+ * \param expired - \c [out] If fence expired or not.\n
+ * 0 – if fence is not expired\n
+ * !0 - otherwise
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note If UMD wants only to check operation status and returned immediately
+ * then timeout value as 0 must be passed. In this case success will be
+ * returned in the case if submission was completed or timeout error
+ * code.
+ *
+ * \sa amdgpu_cs_submit()
+*/
+int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
+ uint64_t timeout_ns,
+ uint64_t flags,
+ uint32_t *expired);
+
+/**
+ * Wait for multiple fences
+ *
+ * \param fences - \c [in] The fence array to wait
+ * \param fence_count - \c [in] The fence count
+ * \param wait_all - \c [in] If true, wait all fences to be signaled,
+ * otherwise, wait at least one fence
+ * \param timeout_ns - \c [in] The timeout to wait, in nanoseconds
+ * \param status - \c [out] '1' for signaled, '0' for timeout
+ * \param first - \c [out] the index of the first signaled fence from @fences
+ *
+ * \return 0 on success
+ * <0 - Negative POSIX Error code
+ *
+ * \note Currently it supports only one amdgpu_device. All fences come from
+ * the same amdgpu_device with the same fd.
+*/
+int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
+ uint32_t fence_count,
+ bool wait_all,
+ uint64_t timeout_ns,
+ uint32_t *status, uint32_t *first);
+
+/*
+ * Query / Info API
+ *
+*/
+
+/**
+ * Query allocation size alignments
+ *
+ * UMD should query information about GPU VM MC size alignments requirements
+ * to be able correctly choose required allocation size and implement
+ * internal optimization if needed.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param info - \c [out] Pointer to structure to get size alignment
+ * requirements
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
+ struct amdgpu_buffer_size_alignments
+ *info);
+
+/**
+ * Query firmware versions
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param fw_type - \c [in] AMDGPU_INFO_FW_*
+ * \param ip_instance - \c [in] Index of the IP block of the same type.
+ * \param index - \c [in] Index of the engine. (for SDMA and MEC)
+ * \param version - \c [out] Pointer to to the "version" return value
+ * \param feature - \c [out] Pointer to to the "feature" return value
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
+ unsigned ip_instance, unsigned index,
+ uint32_t *version, uint32_t *feature);
+
+/**
+ * Query the number of HW IP instances of a certain type.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
+ * \param count - \c [out] Pointer to structure to get information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+*/
+int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
+ uint32_t *count);
+
+/**
+ * Query engine information
+ *
+ * This query allows UMD to query information different engines and their
+ * capabilities.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
+ * \param ip_instance - \c [in] Index of the IP block of the same type.
+ * \param info - \c [out] Pointer to structure to get information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+*/
+int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
+ unsigned ip_instance,
+ struct drm_amdgpu_info_hw_ip *info);
+
+/**
+ * Query heap information
+ *
+ * This query allows UMD to query potentially available memory resources and
+ * adjust their logic if necessary.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param heap - \c [in] Heap type
+ * \param info - \c [in] Pointer to structure to get needed information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_heap_info(amdgpu_device_handle dev, uint32_t heap,
+ uint32_t flags, struct amdgpu_heap_info *info);
+
+/**
+ * Get the CRTC ID from the mode object ID
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param id - \c [in] Mode object ID
+ * \param result - \c [in] Pointer to the CRTC ID
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
+ int32_t *result);
+
+/**
+ * Query GPU H/w Info
+ *
+ * Query hardware specific information
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param heap - \c [in] Heap type
+ * \param info - \c [in] Pointer to structure to get needed information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_gpu_info(amdgpu_device_handle dev,
+ struct amdgpu_gpu_info *info);
+
+/**
+ * Query hardware or driver information.
+ *
+ * The return size is query-specific and depends on the "info_id" parameter.
+ * No more than "size" bytes is returned.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param info_id - \c [in] AMDGPU_INFO_*
+ * \param size - \c [in] Size of the returned value.
+ * \param value - \c [out] Pointer to the return value.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX error code
+ *
+*/
+int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
+ unsigned size, void *value);
+
+/**
+ * Query hardware or driver information.
+ *
+ * The return size is query-specific and depends on the "info_id" parameter.
+ * No more than "size" bytes is returned.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param info - \c [in] amdgpu_sw_info_*
+ * \param value - \c [out] Pointer to the return value.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX error code
+ *
+*/
+int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info,
+ void *value);
+
+/**
+ * Query information about GDS
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param gds_info - \c [out] Pointer to structure to get GDS information
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_gds_info(amdgpu_device_handle dev,
+ struct amdgpu_gds_resource_info *gds_info);
+
+/**
+ * Query information about sensor.
+ *
+ * The return size is query-specific and depends on the "sensor_type"
+ * parameter. No more than "size" bytes is returned.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param sensor_type - \c [in] AMDGPU_INFO_SENSOR_*
+ * \param size - \c [in] Size of the returned value.
+ * \param value - \c [out] Pointer to the return value.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
+ unsigned size, void *value);
+
+/**
+ * Query information about video capabilities
+ *
+ * The return sizeof(struct drm_amdgpu_info_video_caps)
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param caps_type - \c [in] AMDGPU_INFO_VIDEO_CAPS_DECODE(ENCODE)
+ * \param size - \c [in] Size of the returned value.
+ * \param value - \c [out] Pointer to the return value.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
+ unsigned size, void *value);
+
+/**
+ * Read a set of consecutive memory-mapped registers.
+ * Not all registers are allowed to be read by userspace.
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize(
+ * \param dword_offset - \c [in] Register offset in dwords
+ * \param count - \c [in] The number of registers to read starting
+ * from the offset
+ * \param instance - \c [in] GRBM_GFX_INDEX selector. It may have other
+ * uses. Set it to 0xffffffff if unsure.
+ * \param flags - \c [in] Flags with additional information.
+ * \param values - \c [out] The pointer to return values.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX error code
+ *
+*/
+int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
+ unsigned count, uint32_t instance, uint32_t flags,
+ uint32_t *values);
+
+/**
+ * Flag to request VA address range in the 32bit address space
+*/
+#define AMDGPU_VA_RANGE_32_BIT 0x1
+#define AMDGPU_VA_RANGE_HIGH 0x2
+#define AMDGPU_VA_RANGE_REPLAYABLE 0x4
+
+/**
+ * Allocate virtual address range
+ *
+ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
+ * \param va_range_type - \c [in] Type of MC va range from which to allocate
+ * \param size - \c [in] Size of range. Size must be correctly* aligned.
+ * It is client responsibility to correctly aligned size based on the future
+ * usage of allocated range.
+ * \param va_base_alignment - \c [in] Overwrite base address alignment
+ * requirement for GPU VM MC virtual
+ * address assignment. Must be multiple of size alignments received as
+ * 'amdgpu_buffer_size_alignments'.
+ * If 0 use the default one.
+ * \param va_base_required - \c [in] Specified required va base address.
+ * If 0 then library choose available one.
+ * If !0 value will be passed and those value already "in use" then
+ * corresponding error status will be returned.
+ * \param va_base_allocated - \c [out] On return: Allocated VA base to be used
+ * by client.
+ * \param va_range_handle - \c [out] On return: Handle assigned to allocation
+ * \param flags - \c [in] flags for special VA range
+ *
+ * \return 0 on success\n
+ * >0 - AMD specific error code\n
+ * <0 - Negative POSIX Error code
+ *
+ * \notes \n
+ * It is client responsibility to correctly handle VA assignments and usage.
+ * Neither kernel driver nor libdrm_amdpgu are able to prevent and
+ * detect wrong va assignment.
+ *
+ * It is client responsibility to correctly handle multi-GPU cases and to pass
+ * the corresponding arrays of all devices handles where corresponding VA will
+ * be used.
+ *
+*/
+int amdgpu_va_range_alloc(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range va_range_type,
+ uint64_t size,
+ uint64_t va_base_alignment,
+ uint64_t va_base_required,
+ uint64_t *va_base_allocated,
+ amdgpu_va_handle *va_range_handle,
+ uint64_t flags);
+
+/**
+ * Free previously allocated virtual address range
+ *
+ *
+ * \param va_range_handle - \c [in] Handle assigned to VA allocation
+ *
+ * \return 0 on success\n
+ * >0 - AMD specific error code\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_va_range_free(amdgpu_va_handle va_range_handle);
+
+/**
+* Query virtual address range
+*
+* UMD can query GPU VM range supported by each device
+* to initialize its own VAM accordingly.
+*
+* \param dev - [in] Device handle. See #amdgpu_device_initialize()
+* \param type - \c [in] Type of virtual address range
+* \param offset - \c [out] Start offset of virtual address range
+* \param size - \c [out] Size of virtual address range
+*
+* \return 0 on success\n
+* <0 - Negative POSIX Error code
+*
+*/
+
+int amdgpu_va_range_query(amdgpu_device_handle dev,
+ enum amdgpu_gpu_va_range type,
+ uint64_t *start,
+ uint64_t *end);
+
+/**
+ * VA mapping/unmapping for the buffer object
+ *
+ * \param bo - \c [in] BO handle
+ * \param offset - \c [in] Start offset to map
+ * \param size - \c [in] Size to map
+ * \param addr - \c [in] Start virtual address.
+ * \param flags - \c [in] Supported flags for mapping/unmapping
+ * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+
+int amdgpu_bo_va_op(amdgpu_bo_handle bo,
+ uint64_t offset,
+ uint64_t size,
+ uint64_t addr,
+ uint64_t flags,
+ uint32_t ops);
+
+/**
+ * VA mapping/unmapping for a buffer object or PRT region.
+ *
+ * This is not a simple drop-in extension for amdgpu_bo_va_op; instead, all
+ * parameters are treated "raw", i.e. size is not automatically aligned, and
+ * all flags must be specified explicitly.
+ *
+ * \param dev - \c [in] device handle
+ * \param bo - \c [in] BO handle (may be NULL)
+ * \param offset - \c [in] Start offset to map
+ * \param size - \c [in] Size to map
+ * \param addr - \c [in] Start virtual address.
+ * \param flags - \c [in] Supported flags for mapping/unmapping
+ * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+
+int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
+ amdgpu_bo_handle bo,
+ uint64_t offset,
+ uint64_t size,
+ uint64_t addr,
+ uint64_t flags,
+ uint32_t ops);
+
+/**
+ * create semaphore
+ *
+ * \param sem - \c [out] semaphore handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem);
+
+/**
+ * signal semaphore
+ *
+ * \param context - \c [in] GPU Context
+ * \param ip_type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
+ * \param ip_instance - \c [in] Index of the IP block of the same type
+ * \param ring - \c [in] Specify ring index of the IP
+ * \param sem - \c [in] semaphore handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
+ uint32_t ip_type,
+ uint32_t ip_instance,
+ uint32_t ring,
+ amdgpu_semaphore_handle sem);
+
+/**
+ * wait semaphore
+ *
+ * \param context - \c [in] GPU Context
+ * \param ip_type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
+ * \param ip_instance - \c [in] Index of the IP block of the same type
+ * \param ring - \c [in] Specify ring index of the IP
+ * \param sem - \c [in] semaphore handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
+ uint32_t ip_type,
+ uint32_t ip_instance,
+ uint32_t ring,
+ amdgpu_semaphore_handle sem);
+
+/**
+ * destroy semaphore
+ *
+ * \param sem - \c [in] semaphore handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
+
+/**
+ * Get the ASIC marketing name
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ *
+ * \return the constant string of the marketing name
+ * "NULL" means the ASIC is not found
+*/
+const char *amdgpu_get_marketing_name(amdgpu_device_handle dev);
+
+/**
+ * Create kernel sync object
+ *
+ * \param dev - \c [in] device handle
+ * \param flags - \c [in] flags that affect creation
+ * \param syncobj - \c [out] sync object handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
+ uint32_t flags,
+ uint32_t *syncobj);
+
+/**
+ * Create kernel sync object
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [out] sync object handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
+ uint32_t *syncobj);
+/**
+ * Destroy kernel sync object
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [in] sync object handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
+ uint32_t syncobj);
+
+/**
+ * Reset kernel sync objects to unsignalled state.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobjs - \c [in] array of sync object handles
+ * \param syncobj_count - \c [in] number of handles in syncobjs
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
+ const uint32_t *syncobjs, uint32_t syncobj_count);
+
+/**
+ * Signal kernel sync objects.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobjs - \c [in] array of sync object handles
+ * \param syncobj_count - \c [in] number of handles in syncobjs
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
+ const uint32_t *syncobjs, uint32_t syncobj_count);
+
+/**
+ * Signal kernel timeline sync objects.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobjs - \c [in] array of sync object handles
+ * \param points - \c [in] array of timeline points
+ * \param syncobj_count - \c [in] number of handles in syncobjs
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
+ const uint32_t *syncobjs,
+ uint64_t *points,
+ uint32_t syncobj_count);
+
+/**
+ * Wait for one or all sync objects to signal.
+ *
+ * \param dev - \c [in] self-explanatory
+ * \param handles - \c [in] array of sync object handles
+ * \param num_handles - \c [in] self-explanatory
+ * \param timeout_nsec - \c [in] self-explanatory
+ * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_*
+ * \param first_signaled - \c [in] self-explanatory
+ *
+ * \return 0 on success\n
+ * -ETIME - Timeout
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
+ uint32_t *handles, unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled);
+
+/**
+ * Wait for one or all sync objects on their points to signal.
+ *
+ * \param dev - \c [in] self-explanatory
+ * \param handles - \c [in] array of sync object handles
+ * \param points - \c [in] array of sync points to wait
+ * \param num_handles - \c [in] self-explanatory
+ * \param timeout_nsec - \c [in] self-explanatory
+ * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_*
+ * \param first_signaled - \c [in] self-explanatory
+ *
+ * \return 0 on success\n
+ * -ETIME - Timeout
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
+ uint32_t *handles, uint64_t *points,
+ unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled);
+/**
+ * Query sync objects payloads.
+ *
+ * \param dev - \c [in] self-explanatory
+ * \param handles - \c [in] array of sync object handles
+ * \param points - \c [out] array of sync points returned, which presents
+ * syncobj payload.
+ * \param num_handles - \c [in] self-explanatory
+ *
+ * \return 0 on success\n
+ * -ETIME - Timeout
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
+ uint32_t *handles, uint64_t *points,
+ unsigned num_handles);
+/**
+ * Query sync objects last signaled or submitted point.
+ *
+ * \param dev - \c [in] self-explanatory
+ * \param handles - \c [in] array of sync object handles
+ * \param points - \c [out] array of sync points returned, which presents
+ * syncobj payload.
+ * \param num_handles - \c [in] self-explanatory
+ * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_QUERY_FLAGS_*
+ *
+ * \return 0 on success\n
+ * -ETIME - Timeout
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_query2(amdgpu_device_handle dev,
+ uint32_t *handles, uint64_t *points,
+ unsigned num_handles, uint32_t flags);
+
+/**
+ * Export kernel sync object to shareable fd.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [in] sync object handle
+ * \param shared_fd - \c [out] shared file descriptor.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ int *shared_fd);
+/**
+ * Import kernel sync object from shareable fd.
+ *
+ * \param dev - \c [in] device handle
+ * \param shared_fd - \c [in] shared file descriptor.
+ * \param syncobj - \c [out] sync object handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
+ int shared_fd,
+ uint32_t *syncobj);
+
+/**
+ * Export kernel sync object to a sync_file.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [in] sync object handle
+ * \param sync_file_fd - \c [out] sync_file file descriptor.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ int *sync_file_fd);
+
+/**
+ * Import kernel sync object from a sync_file.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [in] sync object handle
+ * \param sync_file_fd - \c [in] sync_file file descriptor.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ int sync_file_fd);
+/**
+ * Export kernel timeline sync object to a sync_file.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [in] sync object handle
+ * \param point - \c [in] timeline point
+ * \param flags - \c [in] flags
+ * \param sync_file_fd - \c [out] sync_file file descriptor.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ uint64_t point,
+ uint32_t flags,
+ int *sync_file_fd);
+
+/**
+ * Import kernel timeline sync object from a sync_file.
+ *
+ * \param dev - \c [in] device handle
+ * \param syncobj - \c [in] sync object handle
+ * \param point - \c [in] timeline point
+ * \param sync_file_fd - \c [in] sync_file file descriptor.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ uint64_t point,
+ int sync_file_fd);
+
+/**
+ * transfer between syncbojs.
+ *
+ * \param dev - \c [in] device handle
+ * \param dst_handle - \c [in] sync object handle
+ * \param dst_point - \c [in] timeline point, 0 presents dst is binary
+ * \param src_handle - \c [in] sync object handle
+ * \param src_point - \c [in] timeline point, 0 presents src is binary
+ * \param flags - \c [in] flags
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ */
+int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
+ uint32_t dst_handle,
+ uint64_t dst_point,
+ uint32_t src_handle,
+ uint64_t src_point,
+ uint32_t flags);
+
+/**
+ * Export an amdgpu fence as a handle (syncobj or fd).
+ *
+ * \param what AMDGPU_FENCE_TO_HANDLE_GET_{SYNCOBJ, FD}
+ * \param out_handle returned handle
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ */
+int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
+ struct amdgpu_cs_fence *fence,
+ uint32_t what,
+ uint32_t *out_handle);
+
+/**
+ * Submit raw command submission to kernel
+ *
+ * \param dev - \c [in] device handle
+ * \param context - \c [in] context handle for context id
+ * \param bo_list_handle - \c [in] request bo list handle (0 for none)
+ * \param num_chunks - \c [in] number of CS chunks to submit
+ * \param chunks - \c [in] array of CS chunks
+ * \param seq_no - \c [out] output sequence number for submission.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ */
+struct drm_amdgpu_cs_chunk;
+struct drm_amdgpu_cs_chunk_dep;
+struct drm_amdgpu_cs_chunk_data;
+
+int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
+ amdgpu_context_handle context,
+ amdgpu_bo_list_handle bo_list_handle,
+ int num_chunks,
+ struct drm_amdgpu_cs_chunk *chunks,
+ uint64_t *seq_no);
+
+/**
+ * Submit raw command submission to the kernel with a raw BO list handle.
+ *
+ * \param dev - \c [in] device handle
+ * \param context - \c [in] context handle for context id
+ * \param bo_list_handle - \c [in] raw bo list handle (0 for none)
+ * \param num_chunks - \c [in] number of CS chunks to submit
+ * \param chunks - \c [in] array of CS chunks
+ * \param seq_no - \c [out] output sequence number for submission.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw()
+ */
+int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
+ amdgpu_context_handle context,
+ uint32_t bo_list_handle,
+ int num_chunks,
+ struct drm_amdgpu_cs_chunk *chunks,
+ uint64_t *seq_no);
+
+void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
+ struct drm_amdgpu_cs_chunk_dep *dep);
+void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
+ struct drm_amdgpu_cs_chunk_data *data);
+
+/**
+ * Reserve VMID
+ * \param context - \c [in] GPU Context
+ * \param flags - \c [in] TBD
+ *
+ * \return 0 on success otherwise POSIX Error code
+*/
+int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags);
+
+/**
+ * Free reserved VMID
+ * \param context - \c [in] GPU Context
+ * \param flags - \c [in] TBD
+ *
+ * \return 0 on success otherwise POSIX Error code
+*/
+int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev, uint32_t flags);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* #ifdef _AMDGPU_H_ */
diff --git a/include/libdrm/libdrm/amdgpu_drm.h b/include/libdrm/libdrm/amdgpu_drm.h
new file mode 100644
index 0000000..c0a0ad1
--- /dev/null
+++ b/include/libdrm/libdrm/amdgpu_drm.h
@@ -0,0 +1,1161 @@
+/* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*-
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef __AMDGPU_DRM_H__
+#define __AMDGPU_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_AMDGPU_GEM_CREATE 0x00
+#define DRM_AMDGPU_GEM_MMAP 0x01
+#define DRM_AMDGPU_CTX 0x02
+#define DRM_AMDGPU_BO_LIST 0x03
+#define DRM_AMDGPU_CS 0x04
+#define DRM_AMDGPU_INFO 0x05
+#define DRM_AMDGPU_GEM_METADATA 0x06
+#define DRM_AMDGPU_GEM_WAIT_IDLE 0x07
+#define DRM_AMDGPU_GEM_VA 0x08
+#define DRM_AMDGPU_WAIT_CS 0x09
+#define DRM_AMDGPU_GEM_OP 0x10
+#define DRM_AMDGPU_GEM_USERPTR 0x11
+#define DRM_AMDGPU_WAIT_FENCES 0x12
+#define DRM_AMDGPU_VM 0x13
+#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
+#define DRM_AMDGPU_SCHED 0x15
+
+#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
+#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
+#define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx)
+#define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list)
+#define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs)
+#define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info)
+#define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata)
+#define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle)
+#define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va)
+#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
+#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
+#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
+#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
+#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
+#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
+#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
+
+/**
+ * DOC: memory domains
+ *
+ * %AMDGPU_GEM_DOMAIN_CPU System memory that is not GPU accessible.
+ * Memory in this pool could be swapped out to disk if there is pressure.
+ *
+ * %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
+ * GPU's virtual address space via gart. Gart memory linearizes non-contiguous
+ * pages of system memory, allows GPU access system memory in a linearized
+ * fashion.
+ *
+ * %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
+ * carved out by the BIOS.
+ *
+ * %AMDGPU_GEM_DOMAIN_GDS Global on-chip data storage used to share data
+ * across shader threads.
+ *
+ * %AMDGPU_GEM_DOMAIN_GWS Global wave sync, used to synchronize the
+ * execution of all the waves on a device.
+ *
+ * %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
+ * for appending data.
+ */
+#define AMDGPU_GEM_DOMAIN_CPU 0x1
+#define AMDGPU_GEM_DOMAIN_GTT 0x2
+#define AMDGPU_GEM_DOMAIN_VRAM 0x4
+#define AMDGPU_GEM_DOMAIN_GDS 0x8
+#define AMDGPU_GEM_DOMAIN_GWS 0x10
+#define AMDGPU_GEM_DOMAIN_OA 0x20
+#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
+ AMDGPU_GEM_DOMAIN_GTT | \
+ AMDGPU_GEM_DOMAIN_VRAM | \
+ AMDGPU_GEM_DOMAIN_GDS | \
+ AMDGPU_GEM_DOMAIN_GWS | \
+ AMDGPU_GEM_DOMAIN_OA)
+
+/* Flag that CPU access will be required for the case of VRAM domain */
+#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
+/* Flag that CPU access will not work, this VRAM domain is invisible */
+#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1)
+/* Flag that USWC attributes should be used for GTT */
+#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
+/* Flag that the memory should be in VRAM and cleared */
+#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
+/* Flag that allocating the BO should use linear VRAM */
+#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
+/* Flag that BO is always valid in this VM */
+#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
+/* Flag that BO sharing will be explicitly synchronized */
+#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
+/* Flag that indicates allocating MQD gart on GFX9, where the mtype
+ * for the second page onward should be set to NC. It should never
+ * be used by user space applications.
+ */
+#define AMDGPU_GEM_CREATE_CP_MQD_GFX9 (1 << 8)
+/* Flag that BO may contain sensitive data that must be wiped before
+ * releasing the memory
+ */
+#define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9)
+/* Flag that BO will be encrypted and that the TMZ bit should be
+ * set in the PTEs when mapping this buffer via GPUVM or
+ * accessing it with various hw blocks
+ */
+#define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10)
+/* Flag that BO will be used only in preemptible context, which does
+ * not require GTT memory accounting
+ */
+#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11)
+
+struct drm_amdgpu_gem_create_in {
+ /** the requested memory size */
+ __u64 bo_size;
+ /** physical start_addr alignment in bytes for some HW requirements */
+ __u64 alignment;
+ /** the requested memory domains */
+ __u64 domains;
+ /** allocation flags */
+ __u64 domain_flags;
+};
+
+struct drm_amdgpu_gem_create_out {
+ /** returned GEM object handle */
+ __u32 handle;
+ __u32 _pad;
+};
+
+union drm_amdgpu_gem_create {
+ struct drm_amdgpu_gem_create_in in;
+ struct drm_amdgpu_gem_create_out out;
+};
+
+/** Opcode to create new residency list. */
+#define AMDGPU_BO_LIST_OP_CREATE 0
+/** Opcode to destroy previously created residency list */
+#define AMDGPU_BO_LIST_OP_DESTROY 1
+/** Opcode to update resource information in the list */
+#define AMDGPU_BO_LIST_OP_UPDATE 2
+
+struct drm_amdgpu_bo_list_in {
+ /** Type of operation */
+ __u32 operation;
+ /** Handle of list or 0 if we want to create one */
+ __u32 list_handle;
+ /** Number of BOs in list */
+ __u32 bo_number;
+ /** Size of each element describing BO */
+ __u32 bo_info_size;
+ /** Pointer to array describing BOs */
+ __u64 bo_info_ptr;
+};
+
+struct drm_amdgpu_bo_list_entry {
+ /** Handle of BO */
+ __u32 bo_handle;
+ /** New (if specified) BO priority to be used during migration */
+ __u32 bo_priority;
+};
+
+struct drm_amdgpu_bo_list_out {
+ /** Handle of resource list */
+ __u32 list_handle;
+ __u32 _pad;
+};
+
+union drm_amdgpu_bo_list {
+ struct drm_amdgpu_bo_list_in in;
+ struct drm_amdgpu_bo_list_out out;
+};
+
+/* context related */
+#define AMDGPU_CTX_OP_ALLOC_CTX 1
+#define AMDGPU_CTX_OP_FREE_CTX 2
+#define AMDGPU_CTX_OP_QUERY_STATE 3
+#define AMDGPU_CTX_OP_QUERY_STATE2 4
+#define AMDGPU_CTX_OP_GET_STABLE_PSTATE 5
+#define AMDGPU_CTX_OP_SET_STABLE_PSTATE 6
+
+/* GPU reset status */
+#define AMDGPU_CTX_NO_RESET 0
+/* this the context caused it */
+#define AMDGPU_CTX_GUILTY_RESET 1
+/* some other context caused it */
+#define AMDGPU_CTX_INNOCENT_RESET 2
+/* unknown cause */
+#define AMDGPU_CTX_UNKNOWN_RESET 3
+
+/* indicate gpu reset occured after ctx created */
+#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
+/* indicate vram lost occured after ctx created */
+#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
+/* indicate some job from this context once cause gpu hang */
+#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
+/* indicate some errors are detected by RAS */
+#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
+#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
+
+/* Context priority level */
+#define AMDGPU_CTX_PRIORITY_UNSET -2048
+#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
+#define AMDGPU_CTX_PRIORITY_LOW -512
+#define AMDGPU_CTX_PRIORITY_NORMAL 0
+/*
+ * When used in struct drm_amdgpu_ctx_in, a priority above NORMAL requires
+ * CAP_SYS_NICE or DRM_MASTER
+*/
+#define AMDGPU_CTX_PRIORITY_HIGH 512
+#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
+
+/* select a stable profiling pstate for perfmon tools */
+#define AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK 0xf
+#define AMDGPU_CTX_STABLE_PSTATE_NONE 0
+#define AMDGPU_CTX_STABLE_PSTATE_STANDARD 1
+#define AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK 2
+#define AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK 3
+#define AMDGPU_CTX_STABLE_PSTATE_PEAK 4
+
+struct drm_amdgpu_ctx_in {
+ /** AMDGPU_CTX_OP_* */
+ __u32 op;
+ /** Flags */
+ __u32 flags;
+ __u32 ctx_id;
+ /** AMDGPU_CTX_PRIORITY_* */
+ __s32 priority;
+};
+
+union drm_amdgpu_ctx_out {
+ struct {
+ __u32 ctx_id;
+ __u32 _pad;
+ } alloc;
+
+ struct {
+ /** For future use, no flags defined so far */
+ __u64 flags;
+ /** Number of resets caused by this context so far. */
+ __u32 hangs;
+ /** Reset status since the last call of the ioctl. */
+ __u32 reset_status;
+ } state;
+
+ struct {
+ __u32 flags;
+ __u32 _pad;
+ } pstate;
+};
+
+union drm_amdgpu_ctx {
+ struct drm_amdgpu_ctx_in in;
+ union drm_amdgpu_ctx_out out;
+};
+
+/* vm ioctl */
+#define AMDGPU_VM_OP_RESERVE_VMID 1
+#define AMDGPU_VM_OP_UNRESERVE_VMID 2
+
+struct drm_amdgpu_vm_in {
+ /** AMDGPU_VM_OP_* */
+ __u32 op;
+ __u32 flags;
+};
+
+struct drm_amdgpu_vm_out {
+ /** For future use, no flags defined so far */
+ __u64 flags;
+};
+
+union drm_amdgpu_vm {
+ struct drm_amdgpu_vm_in in;
+ struct drm_amdgpu_vm_out out;
+};
+
+/* sched ioctl */
+#define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1
+#define AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE 2
+
+struct drm_amdgpu_sched_in {
+ /* AMDGPU_SCHED_OP_* */
+ __u32 op;
+ __u32 fd;
+ /** AMDGPU_CTX_PRIORITY_* */
+ __s32 priority;
+ __u32 ctx_id;
+};
+
+union drm_amdgpu_sched {
+ struct drm_amdgpu_sched_in in;
+};
+
+/*
+ * This is not a reliable API and you should expect it to fail for any
+ * number of reasons and have fallback path that do not use userptr to
+ * perform any operation.
+ */
+#define AMDGPU_GEM_USERPTR_READONLY (1 << 0)
+#define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1)
+#define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2)
+#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3)
+
+struct drm_amdgpu_gem_userptr {
+ __u64 addr;
+ __u64 size;
+ /* AMDGPU_GEM_USERPTR_* */
+ __u32 flags;
+ /* Resulting GEM handle */
+ __u32 handle;
+};
+
+/* SI-CI-VI: */
+/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
+#define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
+#define AMDGPU_TILING_ARRAY_MODE_MASK 0xf
+#define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4
+#define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f
+#define AMDGPU_TILING_TILE_SPLIT_SHIFT 9
+#define AMDGPU_TILING_TILE_SPLIT_MASK 0x7
+#define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12
+#define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7
+#define AMDGPU_TILING_BANK_WIDTH_SHIFT 15
+#define AMDGPU_TILING_BANK_WIDTH_MASK 0x3
+#define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17
+#define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3
+#define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19
+#define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3
+#define AMDGPU_TILING_NUM_BANKS_SHIFT 21
+#define AMDGPU_TILING_NUM_BANKS_MASK 0x3
+
+/* GFX9 and later: */
+#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
+#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
+#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
+#define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF
+#define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29
+#define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
+#define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44
+#define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1
+#define AMDGPU_TILING_SCANOUT_SHIFT 63
+#define AMDGPU_TILING_SCANOUT_MASK 0x1
+
+/* Set/Get helpers for tiling flags. */
+#define AMDGPU_TILING_SET(field, value) \
+ (((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
+#define AMDGPU_TILING_GET(value, field) \
+ (((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
+
+#define AMDGPU_GEM_METADATA_OP_SET_METADATA 1
+#define AMDGPU_GEM_METADATA_OP_GET_METADATA 2
+
+/** The same structure is shared for input/output */
+struct drm_amdgpu_gem_metadata {
+ /** GEM Object handle */
+ __u32 handle;
+ /** Do we want get or set metadata */
+ __u32 op;
+ struct {
+ /** For future use, no flags defined so far */
+ __u64 flags;
+ /** family specific tiling info */
+ __u64 tiling_info;
+ __u32 data_size_bytes;
+ __u32 data[64];
+ } data;
+};
+
+struct drm_amdgpu_gem_mmap_in {
+ /** the GEM object handle */
+ __u32 handle;
+ __u32 _pad;
+};
+
+struct drm_amdgpu_gem_mmap_out {
+ /** mmap offset from the vma offset manager */
+ __u64 addr_ptr;
+};
+
+union drm_amdgpu_gem_mmap {
+ struct drm_amdgpu_gem_mmap_in in;
+ struct drm_amdgpu_gem_mmap_out out;
+};
+
+struct drm_amdgpu_gem_wait_idle_in {
+ /** GEM object handle */
+ __u32 handle;
+ /** For future use, no flags defined so far */
+ __u32 flags;
+ /** Absolute timeout to wait */
+ __u64 timeout;
+};
+
+struct drm_amdgpu_gem_wait_idle_out {
+ /** BO status: 0 - BO is idle, 1 - BO is busy */
+ __u32 status;
+ /** Returned current memory domain */
+ __u32 domain;
+};
+
+union drm_amdgpu_gem_wait_idle {
+ struct drm_amdgpu_gem_wait_idle_in in;
+ struct drm_amdgpu_gem_wait_idle_out out;
+};
+
+struct drm_amdgpu_wait_cs_in {
+ /* Command submission handle
+ * handle equals 0 means none to wait for
+ * handle equals ~0ull means wait for the latest sequence number
+ */
+ __u64 handle;
+ /** Absolute timeout to wait */
+ __u64 timeout;
+ __u32 ip_type;
+ __u32 ip_instance;
+ __u32 ring;
+ __u32 ctx_id;
+};
+
+struct drm_amdgpu_wait_cs_out {
+ /** CS status: 0 - CS completed, 1 - CS still busy */
+ __u64 status;
+};
+
+union drm_amdgpu_wait_cs {
+ struct drm_amdgpu_wait_cs_in in;
+ struct drm_amdgpu_wait_cs_out out;
+};
+
+struct drm_amdgpu_fence {
+ __u32 ctx_id;
+ __u32 ip_type;
+ __u32 ip_instance;
+ __u32 ring;
+ __u64 seq_no;
+};
+
+struct drm_amdgpu_wait_fences_in {
+ /** This points to uint64_t * which points to fences */
+ __u64 fences;
+ __u32 fence_count;
+ __u32 wait_all;
+ __u64 timeout_ns;
+};
+
+struct drm_amdgpu_wait_fences_out {
+ __u32 status;
+ __u32 first_signaled;
+};
+
+union drm_amdgpu_wait_fences {
+ struct drm_amdgpu_wait_fences_in in;
+ struct drm_amdgpu_wait_fences_out out;
+};
+
+#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
+#define AMDGPU_GEM_OP_SET_PLACEMENT 1
+
+/* Sets or returns a value associated with a buffer. */
+struct drm_amdgpu_gem_op {
+ /** GEM object handle */
+ __u32 handle;
+ /** AMDGPU_GEM_OP_* */
+ __u32 op;
+ /** Input or return value */
+ __u64 value;
+};
+
+#define AMDGPU_VA_OP_MAP 1
+#define AMDGPU_VA_OP_UNMAP 2
+#define AMDGPU_VA_OP_CLEAR 3
+#define AMDGPU_VA_OP_REPLACE 4
+
+/* Delay the page table update till the next CS */
+#define AMDGPU_VM_DELAY_UPDATE (1 << 0)
+
+/* Mapping flags */
+/* readable mapping */
+#define AMDGPU_VM_PAGE_READABLE (1 << 1)
+/* writable mapping */
+#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
+/* executable mapping, new for VI */
+#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
+/* partially resident texture */
+#define AMDGPU_VM_PAGE_PRT (1 << 4)
+/* MTYPE flags use bit 5 to 8 */
+#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
+/* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */
+#define AMDGPU_VM_MTYPE_DEFAULT (0 << 5)
+/* Use Non Coherent MTYPE instead of default MTYPE */
+#define AMDGPU_VM_MTYPE_NC (1 << 5)
+/* Use Write Combine MTYPE instead of default MTYPE */
+#define AMDGPU_VM_MTYPE_WC (2 << 5)
+/* Use Cache Coherent MTYPE instead of default MTYPE */
+#define AMDGPU_VM_MTYPE_CC (3 << 5)
+/* Use UnCached MTYPE instead of default MTYPE */
+#define AMDGPU_VM_MTYPE_UC (4 << 5)
+/* Use Read Write MTYPE instead of default MTYPE */
+#define AMDGPU_VM_MTYPE_RW (5 << 5)
+
+struct drm_amdgpu_gem_va {
+ /** GEM object handle */
+ __u32 handle;
+ __u32 _pad;
+ /** AMDGPU_VA_OP_* */
+ __u32 operation;
+ /** AMDGPU_VM_PAGE_* */
+ __u32 flags;
+ /** va address to assign . Must be correctly aligned.*/
+ __u64 va_address;
+ /** Specify offset inside of BO to assign. Must be correctly aligned.*/
+ __u64 offset_in_bo;
+ /** Specify mapping size. Must be correctly aligned. */
+ __u64 map_size;
+};
+
+#define AMDGPU_HW_IP_GFX 0
+#define AMDGPU_HW_IP_COMPUTE 1
+#define AMDGPU_HW_IP_DMA 2
+#define AMDGPU_HW_IP_UVD 3
+#define AMDGPU_HW_IP_VCE 4
+#define AMDGPU_HW_IP_UVD_ENC 5
+#define AMDGPU_HW_IP_VCN_DEC 6
+/*
+ * From VCN4, AMDGPU_HW_IP_VCN_ENC is re-used to support
+ * both encoding and decoding jobs.
+ */
+#define AMDGPU_HW_IP_VCN_ENC 7
+#define AMDGPU_HW_IP_VCN_JPEG 8
+#define AMDGPU_HW_IP_NUM 9
+
+#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
+
+#define AMDGPU_CHUNK_ID_IB 0x01
+#define AMDGPU_CHUNK_ID_FENCE 0x02
+#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
+#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
+#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
+#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
+#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
+#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
+#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
+
+struct drm_amdgpu_cs_chunk {
+ __u32 chunk_id;
+ __u32 length_dw;
+ __u64 chunk_data;
+};
+
+struct drm_amdgpu_cs_in {
+ /** Rendering context id */
+ __u32 ctx_id;
+ /** Handle of resource list associated with CS */
+ __u32 bo_list_handle;
+ __u32 num_chunks;
+ __u32 flags;
+ /** this points to __u64 * which point to cs chunks */
+ __u64 chunks;
+};
+
+struct drm_amdgpu_cs_out {
+ __u64 handle;
+};
+
+union drm_amdgpu_cs {
+ struct drm_amdgpu_cs_in in;
+ struct drm_amdgpu_cs_out out;
+};
+
+/* Specify flags to be used for IB */
+
+/* This IB should be submitted to CE */
+#define AMDGPU_IB_FLAG_CE (1<<0)
+
+/* Preamble flag, which means the IB could be dropped if no context switch */
+#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
+
+/* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
+#define AMDGPU_IB_FLAG_PREEMPT (1<<2)
+
+/* The IB fence should do the L2 writeback but not invalidate any shader
+ * caches (L2/vL1/sL1/I$). */
+#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
+
+/* Set GDS_COMPUTE_MAX_WAVE_ID = DEFAULT before PACKET3_INDIRECT_BUFFER.
+ * This will reset wave ID counters for the IB.
+ */
+#define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
+
+/* Flag the IB as secure (TMZ)
+ */
+#define AMDGPU_IB_FLAGS_SECURE (1 << 5)
+
+/* Tell KMD to flush and invalidate caches
+ */
+#define AMDGPU_IB_FLAG_EMIT_MEM_SYNC (1 << 6)
+
+struct drm_amdgpu_cs_chunk_ib {
+ __u32 _pad;
+ /** AMDGPU_IB_FLAG_* */
+ __u32 flags;
+ /** Virtual address to begin IB execution */
+ __u64 va_start;
+ /** Size of submission */
+ __u32 ib_bytes;
+ /** HW IP to submit to */
+ __u32 ip_type;
+ /** HW IP index of the same type to submit to */
+ __u32 ip_instance;
+ /** Ring index to submit to */
+ __u32 ring;
+};
+
+struct drm_amdgpu_cs_chunk_dep {
+ __u32 ip_type;
+ __u32 ip_instance;
+ __u32 ring;
+ __u32 ctx_id;
+ __u64 handle;
+};
+
+struct drm_amdgpu_cs_chunk_fence {
+ __u32 handle;
+ __u32 offset;
+};
+
+struct drm_amdgpu_cs_chunk_sem {
+ __u32 handle;
+};
+
+struct drm_amdgpu_cs_chunk_syncobj {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+};
+
+#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
+#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
+#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
+
+union drm_amdgpu_fence_to_handle {
+ struct {
+ struct drm_amdgpu_fence fence;
+ __u32 what;
+ __u32 pad;
+ } in;
+ struct {
+ __u32 handle;
+ } out;
+};
+
+struct drm_amdgpu_cs_chunk_data {
+ union {
+ struct drm_amdgpu_cs_chunk_ib ib_data;
+ struct drm_amdgpu_cs_chunk_fence fence_data;
+ };
+};
+
+/*
+ * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
+ *
+ */
+#define AMDGPU_IDS_FLAGS_FUSION 0x1
+#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
+#define AMDGPU_IDS_FLAGS_TMZ 0x4
+
+/* indicate if acceleration can be working */
+#define AMDGPU_INFO_ACCEL_WORKING 0x00
+/* get the crtc_id from the mode object id? */
+#define AMDGPU_INFO_CRTC_FROM_ID 0x01
+/* query hw IP info */
+#define AMDGPU_INFO_HW_IP_INFO 0x02
+/* query hw IP instance count for the specified type */
+#define AMDGPU_INFO_HW_IP_COUNT 0x03
+/* timestamp for GL_ARB_timer_query */
+#define AMDGPU_INFO_TIMESTAMP 0x05
+/* Query the firmware version */
+#define AMDGPU_INFO_FW_VERSION 0x0e
+ /* Subquery id: Query VCE firmware version */
+ #define AMDGPU_INFO_FW_VCE 0x1
+ /* Subquery id: Query UVD firmware version */
+ #define AMDGPU_INFO_FW_UVD 0x2
+ /* Subquery id: Query GMC firmware version */
+ #define AMDGPU_INFO_FW_GMC 0x03
+ /* Subquery id: Query GFX ME firmware version */
+ #define AMDGPU_INFO_FW_GFX_ME 0x04
+ /* Subquery id: Query GFX PFP firmware version */
+ #define AMDGPU_INFO_FW_GFX_PFP 0x05
+ /* Subquery id: Query GFX CE firmware version */
+ #define AMDGPU_INFO_FW_GFX_CE 0x06
+ /* Subquery id: Query GFX RLC firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC 0x07
+ /* Subquery id: Query GFX MEC firmware version */
+ #define AMDGPU_INFO_FW_GFX_MEC 0x08
+ /* Subquery id: Query SMC firmware version */
+ #define AMDGPU_INFO_FW_SMC 0x0a
+ /* Subquery id: Query SDMA firmware version */
+ #define AMDGPU_INFO_FW_SDMA 0x0b
+ /* Subquery id: Query PSP SOS firmware version */
+ #define AMDGPU_INFO_FW_SOS 0x0c
+ /* Subquery id: Query PSP ASD firmware version */
+ #define AMDGPU_INFO_FW_ASD 0x0d
+ /* Subquery id: Query VCN firmware version */
+ #define AMDGPU_INFO_FW_VCN 0x0e
+ /* Subquery id: Query GFX RLC SRLC firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f
+ /* Subquery id: Query GFX RLC SRLG firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
+ /* Subquery id: Query GFX RLC SRLS firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
+ /* Subquery id: Query DMCU firmware version */
+ #define AMDGPU_INFO_FW_DMCU 0x12
+ #define AMDGPU_INFO_FW_TA 0x13
+ /* Subquery id: Query DMCUB firmware version */
+ #define AMDGPU_INFO_FW_DMCUB 0x14
+ /* Subquery id: Query TOC firmware version */
+ #define AMDGPU_INFO_FW_TOC 0x15
+
+/* number of bytes moved for TTM migration */
+#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
+/* the used VRAM size */
+#define AMDGPU_INFO_VRAM_USAGE 0x10
+/* the used GTT size */
+#define AMDGPU_INFO_GTT_USAGE 0x11
+/* Information about GDS, etc. resource configuration */
+#define AMDGPU_INFO_GDS_CONFIG 0x13
+/* Query information about VRAM and GTT domains */
+#define AMDGPU_INFO_VRAM_GTT 0x14
+/* Query information about register in MMR address space*/
+#define AMDGPU_INFO_READ_MMR_REG 0x15
+/* Query information about device: rev id, family, etc. */
+#define AMDGPU_INFO_DEV_INFO 0x16
+/* visible vram usage */
+#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
+/* number of TTM buffer evictions */
+#define AMDGPU_INFO_NUM_EVICTIONS 0x18
+/* Query memory about VRAM and GTT domains */
+#define AMDGPU_INFO_MEMORY 0x19
+/* Query vce clock table */
+#define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A
+/* Query vbios related information */
+#define AMDGPU_INFO_VBIOS 0x1B
+ /* Subquery id: Query vbios size */
+ #define AMDGPU_INFO_VBIOS_SIZE 0x1
+ /* Subquery id: Query vbios image */
+ #define AMDGPU_INFO_VBIOS_IMAGE 0x2
+ /* Subquery id: Query vbios info */
+ #define AMDGPU_INFO_VBIOS_INFO 0x3
+/* Query UVD handles */
+#define AMDGPU_INFO_NUM_HANDLES 0x1C
+/* Query sensor related information */
+#define AMDGPU_INFO_SENSOR 0x1D
+ /* Subquery id: Query GPU shader clock */
+ #define AMDGPU_INFO_SENSOR_GFX_SCLK 0x1
+ /* Subquery id: Query GPU memory clock */
+ #define AMDGPU_INFO_SENSOR_GFX_MCLK 0x2
+ /* Subquery id: Query GPU temperature */
+ #define AMDGPU_INFO_SENSOR_GPU_TEMP 0x3
+ /* Subquery id: Query GPU load */
+ #define AMDGPU_INFO_SENSOR_GPU_LOAD 0x4
+ /* Subquery id: Query average GPU power */
+ #define AMDGPU_INFO_SENSOR_GPU_AVG_POWER 0x5
+ /* Subquery id: Query northbridge voltage */
+ #define AMDGPU_INFO_SENSOR_VDDNB 0x6
+ /* Subquery id: Query graphics voltage */
+ #define AMDGPU_INFO_SENSOR_VDDGFX 0x7
+ /* Subquery id: Query GPU stable pstate shader clock */
+ #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
+ /* Subquery id: Query GPU stable pstate memory clock */
+ #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
+/* Number of VRAM page faults on CPU access. */
+#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
+#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
+/* query ras mask of enabled features*/
+#define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
+/* RAS MASK: UMC (VRAM) */
+#define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
+/* RAS MASK: SDMA */
+#define AMDGPU_INFO_RAS_ENABLED_SDMA (1 << 1)
+/* RAS MASK: GFX */
+#define AMDGPU_INFO_RAS_ENABLED_GFX (1 << 2)
+/* RAS MASK: MMHUB */
+#define AMDGPU_INFO_RAS_ENABLED_MMHUB (1 << 3)
+/* RAS MASK: ATHUB */
+#define AMDGPU_INFO_RAS_ENABLED_ATHUB (1 << 4)
+/* RAS MASK: PCIE */
+#define AMDGPU_INFO_RAS_ENABLED_PCIE (1 << 5)
+/* RAS MASK: HDP */
+#define AMDGPU_INFO_RAS_ENABLED_HDP (1 << 6)
+/* RAS MASK: XGMI */
+#define AMDGPU_INFO_RAS_ENABLED_XGMI (1 << 7)
+/* RAS MASK: DF */
+#define AMDGPU_INFO_RAS_ENABLED_DF (1 << 8)
+/* RAS MASK: SMN */
+#define AMDGPU_INFO_RAS_ENABLED_SMN (1 << 9)
+/* RAS MASK: SEM */
+#define AMDGPU_INFO_RAS_ENABLED_SEM (1 << 10)
+/* RAS MASK: MP0 */
+#define AMDGPU_INFO_RAS_ENABLED_MP0 (1 << 11)
+/* RAS MASK: MP1 */
+#define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12)
+/* RAS MASK: FUSE */
+#define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13)
+/* query video encode/decode caps */
+#define AMDGPU_INFO_VIDEO_CAPS 0x21
+ /* Subquery id: Decode */
+ #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
+ /* Subquery id: Encode */
+ #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
+
+#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
+#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
+#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
+#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff
+
+struct drm_amdgpu_query_fw {
+ /** AMDGPU_INFO_FW_* */
+ __u32 fw_type;
+ /**
+ * Index of the IP if there are more IPs of
+ * the same type.
+ */
+ __u32 ip_instance;
+ /**
+ * Index of the engine. Whether this is used depends
+ * on the firmware type. (e.g. MEC, SDMA)
+ */
+ __u32 index;
+ __u32 _pad;
+};
+
+/* Input structure for the INFO ioctl */
+struct drm_amdgpu_info {
+ /* Where the return value will be stored */
+ __u64 return_pointer;
+ /* The size of the return value. Just like "size" in "snprintf",
+ * it limits how many bytes the kernel can write. */
+ __u32 return_size;
+ /* The query request id. */
+ __u32 query;
+
+ union {
+ struct {
+ __u32 id;
+ __u32 _pad;
+ } mode_crtc;
+
+ struct {
+ /** AMDGPU_HW_IP_* */
+ __u32 type;
+ /**
+ * Index of the IP if there are more IPs of the same
+ * type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
+ */
+ __u32 ip_instance;
+ } query_hw_ip;
+
+ struct {
+ __u32 dword_offset;
+ /** number of registers to read */
+ __u32 count;
+ __u32 instance;
+ /** For future use, no flags defined so far */
+ __u32 flags;
+ } read_mmr_reg;
+
+ struct drm_amdgpu_query_fw query_fw;
+
+ struct {
+ __u32 type;
+ __u32 offset;
+ } vbios_info;
+
+ struct {
+ __u32 type;
+ } sensor_info;
+
+ struct {
+ __u32 type;
+ } video_cap;
+ };
+};
+
+struct drm_amdgpu_info_gds {
+ /** GDS GFX partition size */
+ __u32 gds_gfx_partition_size;
+ /** GDS compute partition size */
+ __u32 compute_partition_size;
+ /** total GDS memory size */
+ __u32 gds_total_size;
+ /** GWS size per GFX partition */
+ __u32 gws_per_gfx_partition;
+ /** GSW size per compute partition */
+ __u32 gws_per_compute_partition;
+ /** OA size per GFX partition */
+ __u32 oa_per_gfx_partition;
+ /** OA size per compute partition */
+ __u32 oa_per_compute_partition;
+ __u32 _pad;
+};
+
+struct drm_amdgpu_info_vram_gtt {
+ __u64 vram_size;
+ __u64 vram_cpu_accessible_size;
+ __u64 gtt_size;
+};
+
+struct drm_amdgpu_heap_info {
+ /** max. physical memory */
+ __u64 total_heap_size;
+
+ /** Theoretical max. available memory in the given heap */
+ __u64 usable_heap_size;
+
+ /**
+ * Number of bytes allocated in the heap. This includes all processes
+ * and private allocations in the kernel. It changes when new buffers
+ * are allocated, freed, and moved. It cannot be larger than
+ * heap_size.
+ */
+ __u64 heap_usage;
+
+ /**
+ * Theoretical possible max. size of buffer which
+ * could be allocated in the given heap
+ */
+ __u64 max_allocation;
+};
+
+struct drm_amdgpu_memory_info {
+ struct drm_amdgpu_heap_info vram;
+ struct drm_amdgpu_heap_info cpu_accessible_vram;
+ struct drm_amdgpu_heap_info gtt;
+};
+
+struct drm_amdgpu_info_firmware {
+ __u32 ver;
+ __u32 feature;
+};
+
+struct drm_amdgpu_info_vbios {
+ __u8 name[64];
+ __u8 vbios_pn[64];
+ __u32 version;
+ __u32 pad;
+ __u8 vbios_ver_str[32];
+ __u8 date[32];
+};
+
+#define AMDGPU_VRAM_TYPE_UNKNOWN 0
+#define AMDGPU_VRAM_TYPE_GDDR1 1
+#define AMDGPU_VRAM_TYPE_DDR2 2
+#define AMDGPU_VRAM_TYPE_GDDR3 3
+#define AMDGPU_VRAM_TYPE_GDDR4 4
+#define AMDGPU_VRAM_TYPE_GDDR5 5
+#define AMDGPU_VRAM_TYPE_HBM 6
+#define AMDGPU_VRAM_TYPE_DDR3 7
+#define AMDGPU_VRAM_TYPE_DDR4 8
+#define AMDGPU_VRAM_TYPE_GDDR6 9
+#define AMDGPU_VRAM_TYPE_DDR5 10
+
+struct drm_amdgpu_info_device {
+ /** PCI Device ID */
+ __u32 device_id;
+ /** Internal chip revision: A0, A1, etc.) */
+ __u32 chip_rev;
+ __u32 external_rev;
+ /** Revision id in PCI Config space */
+ __u32 pci_rev;
+ __u32 family;
+ __u32 num_shader_engines;
+ __u32 num_shader_arrays_per_engine;
+ /* in KHz */
+ __u32 gpu_counter_freq;
+ __u64 max_engine_clock;
+ __u64 max_memory_clock;
+ /* cu information */
+ __u32 cu_active_number;
+ /* NOTE: cu_ao_mask is INVALID, DON'T use it */
+ __u32 cu_ao_mask;
+ __u32 cu_bitmap[4][4];
+ /** Render backend pipe mask. One render backend is CB+DB. */
+ __u32 enabled_rb_pipes_mask;
+ __u32 num_rb_pipes;
+ __u32 num_hw_gfx_contexts;
+ __u32 _pad;
+ __u64 ids_flags;
+ /** Starting virtual address for UMDs. */
+ __u64 virtual_address_offset;
+ /** The maximum virtual address */
+ __u64 virtual_address_max;
+ /** Required alignment of virtual addresses. */
+ __u32 virtual_address_alignment;
+ /** Page table entry - fragment size */
+ __u32 pte_fragment_size;
+ __u32 gart_page_size;
+ /** constant engine ram size*/
+ __u32 ce_ram_size;
+ /** video memory type info*/
+ __u32 vram_type;
+ /** video memory bit width*/
+ __u32 vram_bit_width;
+ /* vce harvesting instance */
+ __u32 vce_harvest_config;
+ /* gfx double offchip LDS buffers */
+ __u32 gc_double_offchip_lds_buf;
+ /* NGG Primitive Buffer */
+ __u64 prim_buf_gpu_addr;
+ /* NGG Position Buffer */
+ __u64 pos_buf_gpu_addr;
+ /* NGG Control Sideband */
+ __u64 cntl_sb_buf_gpu_addr;
+ /* NGG Parameter Cache */
+ __u64 param_buf_gpu_addr;
+ __u32 prim_buf_size;
+ __u32 pos_buf_size;
+ __u32 cntl_sb_buf_size;
+ __u32 param_buf_size;
+ /* wavefront size*/
+ __u32 wave_front_size;
+ /* shader visible vgprs*/
+ __u32 num_shader_visible_vgprs;
+ /* CU per shader array*/
+ __u32 num_cu_per_sh;
+ /* number of tcc blocks*/
+ __u32 num_tcc_blocks;
+ /* gs vgt table depth*/
+ __u32 gs_vgt_table_depth;
+ /* gs primitive buffer depth*/
+ __u32 gs_prim_buffer_depth;
+ /* max gs wavefront per vgt*/
+ __u32 max_gs_waves_per_vgt;
+ __u32 _pad1;
+ /* always on cu bitmap */
+ __u32 cu_ao_bitmap[4][4];
+ /** Starting high virtual address for UMDs. */
+ __u64 high_va_offset;
+ /** The maximum high virtual address */
+ __u64 high_va_max;
+ /* gfx10 pa_sc_tile_steering_override */
+ __u32 pa_sc_tile_steering_override;
+ /* disabled TCCs */
+ __u64 tcc_disabled_mask;
+};
+
+struct drm_amdgpu_info_hw_ip {
+ /** Version of h/w IP */
+ __u32 hw_ip_version_major;
+ __u32 hw_ip_version_minor;
+ /** Capabilities */
+ __u64 capabilities_flags;
+ /** command buffer address start alignment*/
+ __u32 ib_start_alignment;
+ /** command buffer size alignment*/
+ __u32 ib_size_alignment;
+ /** Bitmask of available rings. Bit 0 means ring 0, etc. */
+ __u32 available_rings;
+ __u32 _pad;
+};
+
+struct drm_amdgpu_info_num_handles {
+ /** Max handles as supported by firmware for UVD */
+ __u32 uvd_max_handles;
+ /** Handles currently in use for UVD */
+ __u32 uvd_used_handles;
+};
+
+#define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6
+
+struct drm_amdgpu_info_vce_clock_table_entry {
+ /** System clock */
+ __u32 sclk;
+ /** Memory clock */
+ __u32 mclk;
+ /** VCE clock */
+ __u32 eclk;
+ __u32 pad;
+};
+
+struct drm_amdgpu_info_vce_clock_table {
+ struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES];
+ __u32 num_valid_entries;
+ __u32 pad;
+};
+
+/* query video encode/decode caps */
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2 0
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4 1
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1 2
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC 3
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC 4
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG 5
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9 6
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1 7
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT 8
+
+struct drm_amdgpu_info_video_codec_info {
+ __u32 valid;
+ __u32 max_width;
+ __u32 max_height;
+ __u32 max_pixels_per_frame;
+ __u32 max_level;
+ __u32 pad;
+};
+
+struct drm_amdgpu_info_video_caps {
+ struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
+};
+
+/*
+ * Supported GPU families
+ */
+#define AMDGPU_FAMILY_UNKNOWN 0
+#define AMDGPU_FAMILY_SI 110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */
+#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
+#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
+#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
+#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
+#define AMDGPU_FAMILY_AI 141 /* Vega10 */
+#define AMDGPU_FAMILY_RV 142 /* Raven */
+#define AMDGPU_FAMILY_NV 143 /* Navi10 */
+#define AMDGPU_FAMILY_VGH 144 /* Van Gogh */
+#define AMDGPU_FAMILY_YC 146 /* Yellow Carp */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/libdrm/drm.h b/include/libdrm/libdrm/drm.h
new file mode 100644
index 0000000..398c396
--- /dev/null
+++ b/include/libdrm/libdrm/drm.h
@@ -0,0 +1,1183 @@
+/*
+ * Header for the Direct Rendering Manager
+ *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_H_
+#define _DRM_H_
+
+#if defined(__linux__)
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+typedef unsigned int drm_handle_t;
+
+#else /* One of the BSDs */
+
+#include <stdint.h>
+#include <sys/ioccom.h>
+#include <sys/types.h>
+typedef int8_t __s8;
+typedef uint8_t __u8;
+typedef int16_t __s16;
+typedef uint16_t __u16;
+typedef int32_t __s32;
+typedef uint32_t __u32;
+typedef int64_t __s64;
+typedef uint64_t __u64;
+typedef size_t __kernel_size_t;
+typedef unsigned long drm_handle_t;
+
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
+#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
+#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
+#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
+
+#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
+#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
+#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
+#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+
+typedef unsigned int drm_context_t;
+typedef unsigned int drm_drawable_t;
+typedef unsigned int drm_magic_t;
+
+/*
+ * Cliprect.
+ *
+ * \warning: If you change this structure, make sure you change
+ * XF86DRIClipRectRec in the server as well
+ *
+ * \note KW: Actually it's illegal to change either for
+ * backwards-compatibility reasons.
+ */
+struct drm_clip_rect {
+ unsigned short x1;
+ unsigned short y1;
+ unsigned short x2;
+ unsigned short y2;
+};
+
+/*
+ * Drawable information.
+ */
+struct drm_drawable_info {
+ unsigned int num_rects;
+ struct drm_clip_rect *rects;
+};
+
+/*
+ * Texture region,
+ */
+struct drm_tex_region {
+ unsigned char next;
+ unsigned char prev;
+ unsigned char in_use;
+ unsigned char padding;
+ unsigned int age;
+};
+
+/*
+ * Hardware lock.
+ *
+ * The lock structure is a simple cache-line aligned integer. To avoid
+ * processor bus contention on a multiprocessor system, there should not be any
+ * other data stored in the same cache line.
+ */
+struct drm_hw_lock {
+ __volatile__ unsigned int lock; /**< lock variable */
+ char padding[60]; /**< Pad to cache line */
+};
+
+/*
+ * DRM_IOCTL_VERSION ioctl argument type.
+ *
+ * \sa drmGetVersion().
+ */
+struct drm_version {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel; /**< Patch level */
+ __kernel_size_t name_len; /**< Length of name buffer */
+ char *name; /**< Name of driver */
+ __kernel_size_t date_len; /**< Length of date buffer */
+ char *date; /**< User-space buffer to hold date */
+ __kernel_size_t desc_len; /**< Length of desc buffer */
+ char *desc; /**< User-space buffer to hold desc */
+};
+
+/*
+ * DRM_IOCTL_GET_UNIQUE ioctl argument type.
+ *
+ * \sa drmGetBusid() and drmSetBusId().
+ */
+struct drm_unique {
+ __kernel_size_t unique_len; /**< Length of unique */
+ char *unique; /**< Unique name for driver instantiation */
+};
+
+struct drm_list {
+ int count; /**< Length of user-space structures */
+ struct drm_version *version;
+};
+
+struct drm_block {
+ int unused;
+};
+
+/*
+ * DRM_IOCTL_CONTROL ioctl argument type.
+ *
+ * \sa drmCtlInstHandler() and drmCtlUninstHandler().
+ */
+struct drm_control {
+ enum {
+ DRM_ADD_COMMAND,
+ DRM_RM_COMMAND,
+ DRM_INST_HANDLER,
+ DRM_UNINST_HANDLER
+ } func;
+ int irq;
+};
+
+/*
+ * Type of memory to map.
+ */
+enum drm_map_type {
+ _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
+ _DRM_REGISTERS = 1, /**< no caching, no core dump */
+ _DRM_SHM = 2, /**< shared, cached */
+ _DRM_AGP = 3, /**< AGP/GART */
+ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
+ _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
+};
+
+/*
+ * Memory mapping flags.
+ */
+enum drm_map_flags {
+ _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
+ _DRM_READ_ONLY = 0x02,
+ _DRM_LOCKED = 0x04, /**< shared, cached, locked */
+ _DRM_KERNEL = 0x08, /**< kernel requires access */
+ _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
+ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
+ _DRM_REMOVABLE = 0x40, /**< Removable mapping */
+ _DRM_DRIVER = 0x80 /**< Managed by driver */
+};
+
+struct drm_ctx_priv_map {
+ unsigned int ctx_id; /**< Context requesting private mapping */
+ void *handle; /**< Handle of map */
+};
+
+/*
+ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
+ * argument type.
+ *
+ * \sa drmAddMap().
+ */
+struct drm_map {
+ unsigned long offset; /**< Requested physical address (0 for SAREA)*/
+ unsigned long size; /**< Requested physical size (bytes) */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
+ void *handle; /**< User-space: "Handle" to pass to mmap() */
+ /**< Kernel-space: kernel-virtual address */
+ int mtrr; /**< MTRR slot used */
+ /* Private data */
+};
+
+/*
+ * DRM_IOCTL_GET_CLIENT ioctl argument type.
+ */
+struct drm_client {
+ int idx; /**< Which client desired? */
+ int auth; /**< Is client authenticated? */
+ unsigned long pid; /**< Process ID */
+ unsigned long uid; /**< User ID */
+ unsigned long magic; /**< Magic */
+ unsigned long iocs; /**< Ioctl count */
+};
+
+enum drm_stat_type {
+ _DRM_STAT_LOCK,
+ _DRM_STAT_OPENS,
+ _DRM_STAT_CLOSES,
+ _DRM_STAT_IOCTLS,
+ _DRM_STAT_LOCKS,
+ _DRM_STAT_UNLOCKS,
+ _DRM_STAT_VALUE, /**< Generic value */
+ _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
+ _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
+
+ _DRM_STAT_IRQ, /**< IRQ */
+ _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
+ _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
+ _DRM_STAT_DMA, /**< DMA */
+ _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
+ _DRM_STAT_MISSED /**< Missed DMA opportunity */
+ /* Add to the *END* of the list */
+};
+
+/*
+ * DRM_IOCTL_GET_STATS ioctl argument type.
+ */
+struct drm_stats {
+ unsigned long count;
+ struct {
+ unsigned long value;
+ enum drm_stat_type type;
+ } data[15];
+};
+
+/*
+ * Hardware locking flags.
+ */
+enum drm_lock_flags {
+ _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
+ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
+ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
+ _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
+ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
+};
+
+/*
+ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
+ *
+ * \sa drmGetLock() and drmUnlock().
+ */
+struct drm_lock {
+ int context;
+ enum drm_lock_flags flags;
+};
+
+/*
+ * DMA flags
+ *
+ * \warning
+ * These values \e must match xf86drm.h.
+ *
+ * \sa drm_dma.
+ */
+enum drm_dma_flags {
+ /* Flags for DMA buffer dispatch */
+ _DRM_DMA_BLOCK = 0x01, /**<
+ * Block until buffer dispatched.
+ *
+ * \note The buffer may not yet have
+ * been processed by the hardware --
+ * getting a hardware lock with the
+ * hardware quiescent will ensure
+ * that the buffer has been
+ * processed.
+ */
+ _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
+ _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
+
+ /* Flags for DMA buffer request */
+ _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
+ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
+ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
+};
+
+/*
+ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
+ *
+ * \sa drmAddBufs().
+ */
+struct drm_buf_desc {
+ int count; /**< Number of buffers of this size */
+ int size; /**< Size in bytes */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+ enum {
+ _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
+ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
+ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
+ _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
+ _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
+ } flags;
+ unsigned long agp_start; /**<
+ * Start address of where the AGP buffers are
+ * in the AGP aperture
+ */
+};
+
+/*
+ * DRM_IOCTL_INFO_BUFS ioctl argument type.
+ */
+struct drm_buf_info {
+ int count; /**< Entries in list */
+ struct drm_buf_desc *list;
+};
+
+/*
+ * DRM_IOCTL_FREE_BUFS ioctl argument type.
+ */
+struct drm_buf_free {
+ int count;
+ int *list;
+};
+
+/*
+ * Buffer information
+ *
+ * \sa drm_buf_map.
+ */
+struct drm_buf_pub {
+ int idx; /**< Index into the master buffer list */
+ int total; /**< Buffer size */
+ int used; /**< Amount of buffer in use (for DMA) */
+ void *address; /**< Address of buffer */
+};
+
+/*
+ * DRM_IOCTL_MAP_BUFS ioctl argument type.
+ */
+struct drm_buf_map {
+ int count; /**< Length of the buffer list */
+#ifdef __cplusplus
+ void *virt;
+#else
+ void *virtual; /**< Mmap'd area in user-virtual */
+#endif
+ struct drm_buf_pub *list; /**< Buffer information */
+};
+
+/*
+ * DRM_IOCTL_DMA ioctl argument type.
+ *
+ * Indices here refer to the offset into the buffer list in drm_buf_get.
+ *
+ * \sa drmDMA().
+ */
+struct drm_dma {
+ int context; /**< Context handle */
+ int send_count; /**< Number of buffers to send */
+ int *send_indices; /**< List of handles to buffers */
+ int *send_sizes; /**< Lengths of data to send */
+ enum drm_dma_flags flags; /**< Flags */
+ int request_count; /**< Number of buffers requested */
+ int request_size; /**< Desired size for buffers */
+ int *request_indices; /**< Buffer information */
+ int *request_sizes;
+ int granted_count; /**< Number of buffers granted */
+};
+
+enum drm_ctx_flags {
+ _DRM_CONTEXT_PRESERVED = 0x01,
+ _DRM_CONTEXT_2DONLY = 0x02
+};
+
+/*
+ * DRM_IOCTL_ADD_CTX ioctl argument type.
+ *
+ * \sa drmCreateContext() and drmDestroyContext().
+ */
+struct drm_ctx {
+ drm_context_t handle;
+ enum drm_ctx_flags flags;
+};
+
+/*
+ * DRM_IOCTL_RES_CTX ioctl argument type.
+ */
+struct drm_ctx_res {
+ int count;
+ struct drm_ctx *contexts;
+};
+
+/*
+ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
+ */
+struct drm_draw {
+ drm_drawable_t handle;
+};
+
+/*
+ * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
+ */
+typedef enum {
+ DRM_DRAWABLE_CLIPRECTS
+} drm_drawable_info_type_t;
+
+struct drm_update_draw {
+ drm_drawable_t handle;
+ unsigned int type;
+ unsigned int num;
+ unsigned long long data;
+};
+
+/*
+ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
+ */
+struct drm_auth {
+ drm_magic_t magic;
+};
+
+/*
+ * DRM_IOCTL_IRQ_BUSID ioctl argument type.
+ *
+ * \sa drmGetInterruptFromBusID().
+ */
+struct drm_irq_busid {
+ int irq; /**< IRQ number */
+ int busnum; /**< bus number */
+ int devnum; /**< device number */
+ int funcnum; /**< function number */
+};
+
+enum drm_vblank_seq_type {
+ _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ /* bits 1-6 are reserved for high crtcs */
+ _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
+ _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
+ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
+ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
+};
+#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
+
+#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
+ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
+
+struct drm_wait_vblank_request {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+ unsigned long signal;
+};
+
+struct drm_wait_vblank_reply {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+ long tval_sec;
+ long tval_usec;
+};
+
+/*
+ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
+ *
+ * \sa drmWaitVBlank().
+ */
+union drm_wait_vblank {
+ struct drm_wait_vblank_request request;
+ struct drm_wait_vblank_reply reply;
+};
+
+#define _DRM_PRE_MODESET 1
+#define _DRM_POST_MODESET 2
+
+/*
+ * DRM_IOCTL_MODESET_CTL ioctl argument type
+ *
+ * \sa drmModesetCtl().
+ */
+struct drm_modeset_ctl {
+ __u32 crtc;
+ __u32 cmd;
+};
+
+/*
+ * DRM_IOCTL_AGP_ENABLE ioctl argument type.
+ *
+ * \sa drmAgpEnable().
+ */
+struct drm_agp_mode {
+ unsigned long mode; /**< AGP mode */
+};
+
+/*
+ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
+ *
+ * \sa drmAgpAlloc() and drmAgpFree().
+ */
+struct drm_agp_buffer {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for binding / unbinding */
+ unsigned long type; /**< Type of memory to allocate */
+ unsigned long physical; /**< Physical used by i810 */
+};
+
+/*
+ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
+ *
+ * \sa drmAgpBind() and drmAgpUnbind().
+ */
+struct drm_agp_binding {
+ unsigned long handle; /**< From drm_agp_buffer */
+ unsigned long offset; /**< In bytes -- will round to page boundary */
+};
+
+/*
+ * DRM_IOCTL_AGP_INFO ioctl argument type.
+ *
+ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
+ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
+ * drmAgpVendorId() and drmAgpDeviceId().
+ */
+struct drm_agp_info {
+ int agp_version_major;
+ int agp_version_minor;
+ unsigned long mode;
+ unsigned long aperture_base; /* physical address */
+ unsigned long aperture_size; /* bytes */
+ unsigned long memory_allowed; /* bytes */
+ unsigned long memory_used;
+
+ /* PCI information */
+ unsigned short id_vendor;
+ unsigned short id_device;
+};
+
+/*
+ * DRM_IOCTL_SG_ALLOC ioctl argument type.
+ */
+struct drm_scatter_gather {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for mapping / unmapping */
+};
+
+/*
+ * DRM_IOCTL_SET_VERSION ioctl argument type.
+ */
+struct drm_set_version {
+ int drm_di_major;
+ int drm_di_minor;
+ int drm_dd_major;
+ int drm_dd_minor;
+};
+
+/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
+struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ __u32 handle;
+ __u32 pad;
+};
+
+/* DRM_IOCTL_GEM_FLINK ioctl argument type */
+struct drm_gem_flink {
+ /** Handle for the object being named */
+ __u32 handle;
+
+ /** Returned global name */
+ __u32 name;
+};
+
+/* DRM_IOCTL_GEM_OPEN ioctl argument type */
+struct drm_gem_open {
+ /** Name of object being opened */
+ __u32 name;
+
+ /** Returned handle for the object */
+ __u32 handle;
+
+ /** Returned size of the object */
+ __u64 size;
+};
+
+/**
+ * DRM_CAP_DUMB_BUFFER
+ *
+ * If set to 1, the driver supports creating dumb buffers via the
+ * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
+ */
+#define DRM_CAP_DUMB_BUFFER 0x1
+/**
+ * DRM_CAP_VBLANK_HIGH_CRTC
+ *
+ * If set to 1, the kernel supports specifying a CRTC index in the high bits of
+ * &drm_wait_vblank_request.type.
+ *
+ * Starting kernel version 2.6.39, this capability is always set to 1.
+ */
+#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+/**
+ * DRM_CAP_DUMB_PREFERRED_DEPTH
+ *
+ * The preferred bit depth for dumb buffers.
+ *
+ * The bit depth is the number of bits used to indicate the color of a single
+ * pixel excluding any padding. This is different from the number of bits per
+ * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
+ * pixel.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
+#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+/**
+ * DRM_CAP_DUMB_PREFER_SHADOW
+ *
+ * If set to 1, the driver prefers userspace to render to a shadow buffer
+ * instead of directly rendering to a dumb buffer. For best speed, userspace
+ * should do streaming ordered memory copies into the dumb buffer and never
+ * read from it.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
+#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+/**
+ * DRM_CAP_PRIME
+ *
+ * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
+ * and &DRM_PRIME_CAP_EXPORT.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors. See
+ * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
+ */
+#define DRM_CAP_PRIME 0x5
+/**
+ * DRM_PRIME_CAP_IMPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
+ * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ */
+#define DRM_PRIME_CAP_IMPORT 0x1
+/**
+ * DRM_PRIME_CAP_EXPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
+ * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ */
+#define DRM_PRIME_CAP_EXPORT 0x2
+/**
+ * DRM_CAP_TIMESTAMP_MONOTONIC
+ *
+ * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
+ * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
+ * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
+ * clocks.
+ *
+ * Starting from kernel version 2.6.39, the default value for this capability
+ * is 1. Starting kernel version 4.15, this capability is always set to 1.
+ */
+#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+/**
+ * DRM_CAP_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ */
+#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
+/**
+ * DRM_CAP_CURSOR_WIDTH
+ *
+ * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
+ * width x height combination for the hardware cursor. The intention is that a
+ * hardware agnostic userspace can query a cursor plane size to use.
+ *
+ * Note that the cross-driver contract is to merely return a valid size;
+ * drivers are free to attach another meaning on top, eg. i915 returns the
+ * maximum plane size.
+ */
+#define DRM_CAP_CURSOR_WIDTH 0x8
+/**
+ * DRM_CAP_CURSOR_HEIGHT
+ *
+ * See &DRM_CAP_CURSOR_WIDTH.
+ */
+#define DRM_CAP_CURSOR_HEIGHT 0x9
+/**
+ * DRM_CAP_ADDFB2_MODIFIERS
+ *
+ * If set to 1, the driver supports supplying modifiers in the
+ * &DRM_IOCTL_MODE_ADDFB2 ioctl.
+ */
+#define DRM_CAP_ADDFB2_MODIFIERS 0x10
+/**
+ * DRM_CAP_PAGE_FLIP_TARGET
+ *
+ * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
+ * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
+ * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
+ * ioctl.
+ */
+#define DRM_CAP_PAGE_FLIP_TARGET 0x11
+/**
+ * DRM_CAP_CRTC_IN_VBLANK_EVENT
+ *
+ * If set to 1, the kernel supports reporting the CRTC ID in
+ * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
+ * &DRM_EVENT_FLIP_COMPLETE events.
+ *
+ * Starting kernel version 4.12, this capability is always set to 1.
+ */
+#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
+/**
+ * DRM_CAP_SYNCOBJ
+ *
+ * If set to 1, the driver supports sync objects. See
+ * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ */
+#define DRM_CAP_SYNCOBJ 0x13
+/**
+ * DRM_CAP_SYNCOBJ_TIMELINE
+ *
+ * If set to 1, the driver supports timeline operations on sync objects. See
+ * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ */
+#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
+
+/* DRM_IOCTL_GET_CAP ioctl argument type */
+struct drm_get_cap {
+ __u64 capability;
+ __u64 value;
+};
+
+/**
+ * DRM_CLIENT_CAP_STEREO_3D
+ *
+ * If set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * monitor by advertising the supported 3D layouts in the flags of struct
+ * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 3.13.
+ */
+#define DRM_CLIENT_CAP_STEREO_3D 1
+
+/**
+ * DRM_CLIENT_CAP_UNIVERSAL_PLANES
+ *
+ * If set to 1, the DRM core will expose all planes (overlay, primary, and
+ * cursor) to userspace.
+ *
+ * This capability has been introduced in kernel version 3.15. Starting from
+ * kernel version 3.17, this capability is always supported for all drivers.
+ */
+#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
+
+/**
+ * DRM_CLIENT_CAP_ATOMIC
+ *
+ * If set to 1, the DRM core will expose atomic properties to userspace. This
+ * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
+ * &DRM_CLIENT_CAP_ASPECT_RATIO.
+ *
+ * If the driver doesn't support atomic mode-setting, enabling this capability
+ * will fail with -EOPNOTSUPP.
+ *
+ * This capability has been introduced in kernel version 4.0. Starting from
+ * kernel version 4.2, this capability is always supported for atomic-capable
+ * drivers.
+ */
+#define DRM_CLIENT_CAP_ATOMIC 3
+
+/**
+ * DRM_CLIENT_CAP_ASPECT_RATIO
+ *
+ * If set to 1, the DRM core will provide aspect ratio information in modes.
+ * See ``DRM_MODE_FLAG_PIC_AR_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 4.18.
+ */
+#define DRM_CLIENT_CAP_ASPECT_RATIO 4
+
+/**
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
+ *
+ * If set to 1, the DRM core will expose special connectors to be used for
+ * writing back to memory the scene setup in the commit. The client must enable
+ * &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * This capability is always supported for atomic-capable drivers starting from
+ * kernel version 4.19.
+ */
+#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
+
+/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+struct drm_set_client_cap {
+ __u64 capability;
+ __u64 value;
+};
+
+#define DRM_RDWR O_RDWR
+#define DRM_CLOEXEC O_CLOEXEC
+struct drm_prime_handle {
+ __u32 handle;
+
+ /** Flags.. only applicable for handle->fd */
+ __u32 flags;
+
+ /** Returned dmabuf file descriptor */
+ __s32 fd;
+};
+
+struct drm_syncobj_create {
+ __u32 handle;
+#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
+ __u32 flags;
+};
+
+struct drm_syncobj_destroy {
+ __u32 handle;
+ __u32 pad;
+};
+
+#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
+#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
+struct drm_syncobj_handle {
+ __u32 handle;
+ __u32 flags;
+
+ __s32 fd;
+ __u32 pad;
+};
+
+struct drm_syncobj_transfer {
+ __u32 src_handle;
+ __u32 dst_handle;
+ __u64 src_point;
+ __u64 dst_point;
+ __u32 flags;
+ __u32 pad;
+};
+
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
+struct drm_syncobj_wait {
+ __u64 handles;
+ /* absolute timeout */
+ __s64 timeout_nsec;
+ __u32 count_handles;
+ __u32 flags;
+ __u32 first_signaled; /* only valid when not waiting all */
+ __u32 pad;
+};
+
+struct drm_syncobj_timeline_wait {
+ __u64 handles;
+ /* wait on specific timeline point for every handles*/
+ __u64 points;
+ /* absolute timeout */
+ __s64 timeout_nsec;
+ __u32 count_handles;
+ __u32 flags;
+ __u32 first_signaled; /* only valid when not waiting all */
+ __u32 pad;
+};
+
+
+struct drm_syncobj_array {
+ __u64 handles;
+ __u32 count_handles;
+ __u32 pad;
+};
+
+#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
+struct drm_syncobj_timeline_array {
+ __u64 handles;
+ __u64 points;
+ __u32 count_handles;
+ __u32 flags;
+};
+
+
+/* Query current scanout sequence number */
+struct drm_crtc_get_sequence {
+ __u32 crtc_id; /* requested crtc_id */
+ __u32 active; /* return: crtc output is active */
+ __u64 sequence; /* return: most recent vblank sequence */
+ __s64 sequence_ns; /* return: most recent time of first pixel out */
+};
+
+/* Queue event to be delivered at specified sequence. Time stamp marks
+ * when the first pixel of the refresh cycle leaves the display engine
+ * for the display
+ */
+#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
+#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
+
+struct drm_crtc_queue_sequence {
+ __u32 crtc_id;
+ __u32 flags;
+ __u64 sequence; /* on input, target sequence. on output, actual sequence */
+ __u64 user_data; /* user data passed to event */
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#include "drm_mode.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
+#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
+#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
+#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
+#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
+#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
+#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
+
+#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
+
+#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
+#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
+
+#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
+#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
+
+#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
+#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
+
+#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
+#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
+
+#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
+#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
+
+#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
+
+#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
+#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
+
+#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
+
+#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
+#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
+#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
+#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
+#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
+#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
+
+#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
+#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
+#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
+#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
+#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
+#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+
+#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
+#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
+#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
+#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
+#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
+#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
+#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
+#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
+#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
+#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
+#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
+
+#define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create)
+#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy)
+#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle)
+#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle)
+#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait)
+#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
+#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
+
+#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
+#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
+#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
+#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
+
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
+#define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
+#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
+
+#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+
+/*
+ * Device specific ioctls should only be in their respective headers
+ * The device specific ioctl range is from 0x40 to 0x9f.
+ * Generic IOCTLS restart at 0xA0.
+ *
+ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
+ * drmCommandReadWrite().
+ */
+#define DRM_COMMAND_BASE 0x40
+#define DRM_COMMAND_END 0xA0
+
+/*
+ * Header for events written back to userspace on the drm fd. The
+ * type defines the type of event, the length specifies the total
+ * length of the event (including the header), and user_data is
+ * typically a 64 bit value passed with the ioctl that triggered the
+ * event. A read on the drm fd will always only return complete
+ * events, that is, if for example the read buffer is 100 bytes, and
+ * there are two 64 byte events pending, only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
+ * up are chipset specific.
+ */
+struct drm_event {
+ __u32 type;
+ __u32 length;
+};
+
+#define DRM_EVENT_VBLANK 0x01
+#define DRM_EVENT_FLIP_COMPLETE 0x02
+#define DRM_EVENT_CRTC_SEQUENCE 0x03
+
+struct drm_event_vblank {
+ struct drm_event base;
+ __u64 user_data;
+ __u32 tv_sec;
+ __u32 tv_usec;
+ __u32 sequence;
+ __u32 crtc_id; /* 0 on older kernels that do not support this */
+};
+
+/* Event delivered at sequence. Time stamp marks when the first pixel
+ * of the refresh cycle leaves the display engine for the display
+ */
+struct drm_event_crtc_sequence {
+ struct drm_event base;
+ __u64 user_data;
+ __s64 time_ns;
+ __u64 sequence;
+};
+
+/* typedef area */
+typedef struct drm_clip_rect drm_clip_rect_t;
+typedef struct drm_drawable_info drm_drawable_info_t;
+typedef struct drm_tex_region drm_tex_region_t;
+typedef struct drm_hw_lock drm_hw_lock_t;
+typedef struct drm_version drm_version_t;
+typedef struct drm_unique drm_unique_t;
+typedef struct drm_list drm_list_t;
+typedef struct drm_block drm_block_t;
+typedef struct drm_control drm_control_t;
+typedef enum drm_map_type drm_map_type_t;
+typedef enum drm_map_flags drm_map_flags_t;
+typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
+typedef struct drm_map drm_map_t;
+typedef struct drm_client drm_client_t;
+typedef enum drm_stat_type drm_stat_type_t;
+typedef struct drm_stats drm_stats_t;
+typedef enum drm_lock_flags drm_lock_flags_t;
+typedef struct drm_lock drm_lock_t;
+typedef enum drm_dma_flags drm_dma_flags_t;
+typedef struct drm_buf_desc drm_buf_desc_t;
+typedef struct drm_buf_info drm_buf_info_t;
+typedef struct drm_buf_free drm_buf_free_t;
+typedef struct drm_buf_pub drm_buf_pub_t;
+typedef struct drm_buf_map drm_buf_map_t;
+typedef struct drm_dma drm_dma_t;
+typedef union drm_wait_vblank drm_wait_vblank_t;
+typedef struct drm_agp_mode drm_agp_mode_t;
+typedef enum drm_ctx_flags drm_ctx_flags_t;
+typedef struct drm_ctx drm_ctx_t;
+typedef struct drm_ctx_res drm_ctx_res_t;
+typedef struct drm_draw drm_draw_t;
+typedef struct drm_update_draw drm_update_draw_t;
+typedef struct drm_auth drm_auth_t;
+typedef struct drm_irq_busid drm_irq_busid_t;
+typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
+
+typedef struct drm_agp_buffer drm_agp_buffer_t;
+typedef struct drm_agp_binding drm_agp_binding_t;
+typedef struct drm_agp_info drm_agp_info_t;
+typedef struct drm_scatter_gather drm_scatter_gather_t;
+typedef struct drm_set_version drm_set_version_t;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/libdrm/drm_fourcc.h b/include/libdrm/libdrm/drm_fourcc.h
new file mode 100644
index 0000000..2c9051f
--- /dev/null
+++ b/include/libdrm/libdrm/drm_fourcc.h
@@ -0,0 +1,1459 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_FOURCC_H
+#define DRM_FOURCC_H
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: overview
+ *
+ * In the DRM subsystem, framebuffer pixel formats are described using the
+ * fourcc codes defined in `include/uapi/drm/drm_fourcc.h`. In addition to the
+ * fourcc code, a Format Modifier may optionally be provided, in order to
+ * further describe the buffer's format - for example tiling or compression.
+ *
+ * Format Modifiers
+ * ----------------
+ *
+ * Format modifiers are used in conjunction with a fourcc code, forming a
+ * unique fourcc:modifier pair. This format:modifier pair must fully define the
+ * format and data layout of the buffer, and should be the only way to describe
+ * that particular buffer.
+ *
+ * Having multiple fourcc:modifier pairs which describe the same layout should
+ * be avoided, as such aliases run the risk of different drivers exposing
+ * different names for the same data format, forcing userspace to understand
+ * that they are aliases.
+ *
+ * Format modifiers may change any property of the buffer, including the number
+ * of planes and/or the required allocation size. Format modifiers are
+ * vendor-namespaced, and as such the relationship between a fourcc code and a
+ * modifier is specific to the modifer being used. For example, some modifiers
+ * may preserve meaning - such as number of planes - from the fourcc code,
+ * whereas others may not.
+ *
+ * Modifiers must uniquely encode buffer layout. In other words, a buffer must
+ * match only a single modifier. A modifier must not be a subset of layouts of
+ * another modifier. For instance, it's incorrect to encode pitch alignment in
+ * a modifier: a buffer may match a 64-pixel aligned modifier and a 32-pixel
+ * aligned modifier. That said, modifiers can have implicit minimal
+ * requirements.
+ *
+ * For modifiers where the combination of fourcc code and modifier can alias,
+ * a canonical pair needs to be defined and used by all drivers. Preferred
+ * combinations are also encouraged where all combinations might lead to
+ * confusion and unnecessarily reduced interoperability. An example for the
+ * latter is AFBC, where the ABGR layouts are preferred over ARGB layouts.
+ *
+ * There are two kinds of modifier users:
+ *
+ * - Kernel and user-space drivers: for drivers it's important that modifiers
+ * don't alias, otherwise two drivers might support the same format but use
+ * different aliases, preventing them from sharing buffers in an efficient
+ * format.
+ * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
+ * see modifiers as opaque tokens they can check for equality and intersect.
+ * These users musn't need to know to reason about the modifier value
+ * (i.e. they are not expected to extract information out of the modifier).
+ *
+ * Vendors should document their modifier usage in as much detail as
+ * possible, to ensure maximum compatibility across devices, drivers and
+ * applications.
+ *
+ * The authoritative list of format modifier codes is found in
+ * `include/uapi/drm/drm_fourcc.h`
+ */
+
+#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
+ ((__u32)(c) << 16) | ((__u32)(d) << 24))
+
+#define DRM_FORMAT_BIG_ENDIAN (1U<<31) /* format is big endian instead of little endian */
+
+/* Reserve 0 for the invalid format specifier */
+#define DRM_FORMAT_INVALID 0
+
+/* color index */
+#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
+
+/* 8 bpp Red */
+#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
+
+/* 10 bpp Red */
+#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */
+
+/* 12 bpp Red */
+#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */
+
+/* 16 bpp Red */
+#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
+
+/* 16 bpp RG */
+#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
+#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
+
+/* 32 bpp RG */
+#define DRM_FORMAT_RG1616 fourcc_code('R', 'G', '3', '2') /* [31:0] R:G 16:16 little endian */
+#define DRM_FORMAT_GR1616 fourcc_code('G', 'R', '3', '2') /* [31:0] G:R 16:16 little endian */
+
+/* 8 bpp RGB */
+#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
+#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
+
+/* 16 bpp RGB */
+#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
+
+#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
+
+#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
+
+#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
+
+#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
+#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
+
+/* 24 bpp RGB */
+#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
+#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
+
+/* 32 bpp RGB */
+#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
+
+#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
+
+#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
+
+#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+
+/* 64 bpp RGB */
+#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define DRM_FORMAT_ARGB16161616 fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
+/*
+ * Floating point 64bpp RGB
+ * IEEE 754-2008 binary16 half-precision float
+ * [15:0] sign:exponent:mantissa 1:5:10
+ */
+#define DRM_FORMAT_XRGB16161616F fourcc_code('X', 'R', '4', 'H') /* [63:0] x:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_XBGR16161616F fourcc_code('X', 'B', '4', 'H') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
+/*
+ * RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
+ * of unused padding per component:
+ */
+#define DRM_FORMAT_AXBXGXRX106106106106 fourcc_code('A', 'B', '1', '0') /* [63:0] A:x:B:x:G:x:R:x 10:6:10:6:10:6:10:6 little endian */
+
+/* packed YCbCr */
+#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
+#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
+
+#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_VUY888 fourcc_code('V', 'U', '2', '4') /* [23:0] Cr:Cb:Y 8:8:8 little endian */
+#define DRM_FORMAT_VUY101010 fourcc_code('V', 'U', '3', '0') /* Y followed by U then V, 10:10:10. Non-linear modifier only */
+
+/*
+ * packed Y2xx indicate for each component, xx valid data occupy msb
+ * 16-xx padding occupy lsb
+ */
+#define DRM_FORMAT_Y210 fourcc_code('Y', '2', '1', '0') /* [63:0] Cr0:0:Y1:0:Cb0:0:Y0:0 10:6:10:6:10:6:10:6 little endian per 2 Y pixels */
+#define DRM_FORMAT_Y212 fourcc_code('Y', '2', '1', '2') /* [63:0] Cr0:0:Y1:0:Cb0:0:Y0:0 12:4:12:4:12:4:12:4 little endian per 2 Y pixels */
+#define DRM_FORMAT_Y216 fourcc_code('Y', '2', '1', '6') /* [63:0] Cr0:Y1:Cb0:Y0 16:16:16:16 little endian per 2 Y pixels */
+
+/*
+ * packed Y4xx indicate for each component, xx valid data occupy msb
+ * 16-xx padding occupy lsb except Y410
+ */
+#define DRM_FORMAT_Y410 fourcc_code('Y', '4', '1', '0') /* [31:0] A:Cr:Y:Cb 2:10:10:10 little endian */
+#define DRM_FORMAT_Y412 fourcc_code('Y', '4', '1', '2') /* [63:0] A:0:Cr:0:Y:0:Cb:0 12:4:12:4:12:4:12:4 little endian */
+#define DRM_FORMAT_Y416 fourcc_code('Y', '4', '1', '6') /* [63:0] A:Cr:Y:Cb 16:16:16:16 little endian */
+
+#define DRM_FORMAT_XVYU2101010 fourcc_code('X', 'V', '3', '0') /* [31:0] X:Cr:Y:Cb 2:10:10:10 little endian */
+#define DRM_FORMAT_XVYU12_16161616 fourcc_code('X', 'V', '3', '6') /* [63:0] X:0:Cr:0:Y:0:Cb:0 12:4:12:4:12:4:12:4 little endian */
+#define DRM_FORMAT_XVYU16161616 fourcc_code('X', 'V', '4', '8') /* [63:0] X:Cr:Y:Cb 16:16:16:16 little endian */
+
+/*
+ * packed YCbCr420 2x2 tiled formats
+ * first 64 bits will contain Y,Cb,Cr components for a 2x2 tile
+ */
+/* [63:0] A3:A2:Y3:0:Cr0:0:Y2:0:A1:A0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_Y0L0 fourcc_code('Y', '0', 'L', '0')
+/* [63:0] X3:X2:Y3:0:Cr0:0:Y2:0:X1:X0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_X0L0 fourcc_code('X', '0', 'L', '0')
+
+/* [63:0] A3:A2:Y3:Cr0:Y2:A1:A0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_Y0L2 fourcc_code('Y', '0', 'L', '2')
+/* [63:0] X3:X2:Y3:Cr0:Y2:X1:X0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_X0L2 fourcc_code('X', '0', 'L', '2')
+
+/*
+ * 1-plane YUV 4:2:0
+ * In these formats, the component ordering is specified (Y, followed by U
+ * then V), but the exact Linear layout is undefined.
+ * These formats can only be used with a non-Linear modifier.
+ */
+#define DRM_FORMAT_YUV420_8BIT fourcc_code('Y', 'U', '0', '8')
+#define DRM_FORMAT_YUV420_10BIT fourcc_code('Y', 'U', '1', '0')
+
+/*
+ * 2 plane RGB + A
+ * index 0 = RGB plane, same format as the corresponding non _A8 format has
+ * index 1 = A plane, [7:0] A
+ */
+#define DRM_FORMAT_XRGB8888_A8 fourcc_code('X', 'R', 'A', '8')
+#define DRM_FORMAT_XBGR8888_A8 fourcc_code('X', 'B', 'A', '8')
+#define DRM_FORMAT_RGBX8888_A8 fourcc_code('R', 'X', 'A', '8')
+#define DRM_FORMAT_BGRX8888_A8 fourcc_code('B', 'X', 'A', '8')
+#define DRM_FORMAT_RGB888_A8 fourcc_code('R', '8', 'A', '8')
+#define DRM_FORMAT_BGR888_A8 fourcc_code('B', '8', 'A', '8')
+#define DRM_FORMAT_RGB565_A8 fourcc_code('R', '5', 'A', '8')
+#define DRM_FORMAT_BGR565_A8 fourcc_code('B', '5', 'A', '8')
+
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [39:0] Y3:Y2:Y1:Y0 little endian
+ * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
+ */
+#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
+
+/*
+ * 2 plane YCbCr MSB aligned
+ * index 0 = Y plane, [15:0] Y:x [10:6] little endian
+ * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
+ */
+#define DRM_FORMAT_P210 fourcc_code('P', '2', '1', '0') /* 2x1 subsampled Cr:Cb plane, 10 bit per channel */
+
+/*
+ * 2 plane YCbCr MSB aligned
+ * index 0 = Y plane, [15:0] Y:x [10:6] little endian
+ * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
+ */
+#define DRM_FORMAT_P010 fourcc_code('P', '0', '1', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel */
+
+/*
+ * 2 plane YCbCr MSB aligned
+ * index 0 = Y plane, [15:0] Y:x [12:4] little endian
+ * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [12:4:12:4] little endian
+ */
+#define DRM_FORMAT_P012 fourcc_code('P', '0', '1', '2') /* 2x2 subsampled Cr:Cb plane 12 bits per channel */
+
+/*
+ * 2 plane YCbCr MSB aligned
+ * index 0 = Y plane, [15:0] Y little endian
+ * index 1 = Cr:Cb plane, [31:0] Cr:Cb [16:16] little endian
+ */
+#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */
+
+/* 2 plane YCbCr420.
+ * 3 10 bit components and 2 padding bits packed into 4 bytes.
+ * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian
+ * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian
+ */
+#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */
+
+/* 3 plane non-subsampled (444) YCbCr
+ * 16 bits per component, but only 10 bits are used and 6 bits are padded
+ * index 0: Y plane, [15:0] Y:x [10:6] little endian
+ * index 1: Cb plane, [15:0] Cb:x [10:6] little endian
+ * index 2: Cr plane, [15:0] Cr:x [10:6] little endian
+ */
+#define DRM_FORMAT_Q410 fourcc_code('Q', '4', '1', '0')
+
+/* 3 plane non-subsampled (444) YCrCb
+ * 16 bits per component, but only 10 bits are used and 6 bits are padded
+ * index 0: Y plane, [15:0] Y:x [10:6] little endian
+ * index 1: Cr plane, [15:0] Cr:x [10:6] little endian
+ * index 2: Cb plane, [15:0] Cb:x [10:6] little endian
+ */
+#define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1')
+
+/*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+
+/*
+ * Format Modifiers:
+ *
+ * Format modifiers describe, typically, a re-ordering or modification
+ * of the data in a plane of an FB. This can be used to express tiled/
+ * swizzled formats, or compression, or a combination of the two.
+ *
+ * The upper 8 bits of the format modifier are a vendor-id as assigned
+ * below. The lower 56 bits are assigned as vendor sees fit.
+ */
+
+/* Vendor Ids: */
+#define DRM_FORMAT_MOD_VENDOR_NONE 0
+#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
+#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
+#define DRM_FORMAT_MOD_VENDOR_NVIDIA 0x03
+#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
+#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
+#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
+#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
+#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
+#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
+#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
+
+/* add more to the end as needed */
+
+#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
+
+#define fourcc_mod_get_vendor(modifier) \
+ (((modifier) >> 56) & 0xff)
+
+#define fourcc_mod_is_vendor(modifier, vendor) \
+ (fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor)
+
+#define fourcc_mod_code(vendor, val) \
+ ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
+
+/*
+ * Format Modifier tokens:
+ *
+ * When adding a new token please document the layout with a code comment,
+ * similar to the fourcc codes above. drm_fourcc.h is considered the
+ * authoritative source for all of these.
+ *
+ * Generic modifier names:
+ *
+ * DRM_FORMAT_MOD_GENERIC_* definitions are used to provide vendor-neutral names
+ * for layouts which are common across multiple vendors. To preserve
+ * compatibility, in cases where a vendor-specific definition already exists and
+ * a generic name for it is desired, the common name is a purely symbolic alias
+ * and must use the same numerical value as the original definition.
+ *
+ * Note that generic names should only be used for modifiers which describe
+ * generic layouts (such as pixel re-ordering), which may have
+ * independently-developed support across multiple vendors.
+ *
+ * In future cases where a generic layout is identified before merging with a
+ * vendor-specific modifier, a new 'GENERIC' vendor or modifier using vendor
+ * 'NONE' could be considered. This should only be for obvious, exceptional
+ * cases to avoid polluting the 'GENERIC' namespace with modifiers which only
+ * apply to a single vendor.
+ *
+ * Generic names should not be used for cases where multiple hardware vendors
+ * have implementations of the same standardised compression scheme (such as
+ * AFBC). In those cases, all implementations should use the same format
+ * modifier(s), reflecting the vendor of the standard.
+ */
+
+#define DRM_FORMAT_MOD_GENERIC_16_16_TILE DRM_FORMAT_MOD_SAMSUNG_16_16_TILE
+
+/*
+ * Invalid Modifier
+ *
+ * This modifier can be used as a sentinel to terminate the format modifiers
+ * list, or to initialize a variable with an invalid modifier. It might also be
+ * used to report an error back to userspace for certain APIs.
+ */
+#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED)
+
+/*
+ * Linear Layout
+ *
+ * Just plain linear layout. Note that this is different from no specifying any
+ * modifier (e.g. not setting DRM_MODE_FB_MODIFIERS in the DRM_ADDFB2 ioctl),
+ * which tells the driver to also take driver-internal information into account
+ * and so might actually result in a tiled framebuffer.
+ */
+#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
+
+/*
+ * Deprecated: use DRM_FORMAT_MOD_LINEAR instead
+ *
+ * The "none" format modifier doesn't actually mean that the modifier is
+ * implicit, instead it means that the layout is linear. Whether modifiers are
+ * used is out-of-band information carried in an API-specific way (e.g. in a
+ * flag for drm_mode_fb_cmd2).
+ */
+#define DRM_FORMAT_MOD_NONE 0
+
+/* Intel framebuffer modifiers */
+
+/*
+ * Intel X-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out row-major, with
+ * a platform-dependent stride. On top of that the memory can apply
+ * platform-depending swizzling of some higher address bits into bit6.
+ *
+ * Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
+ * On earlier platforms the is highly platforms specific and not useful for
+ * cross-driver sharing. It exists since on a given platform it does uniquely
+ * identify the layout in a simple way for i915-specific userspace, which
+ * facilitated conversion of userspace to modifiers. Additionally the exact
+ * format on some really old platforms is not known.
+ */
+#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
+
+/*
+ * Intel Y-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes)
+ * chunks column-major, with a platform-dependent height. On top of that the
+ * memory can apply platform-depending swizzling of some higher address bits
+ * into bit6.
+ *
+ * Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
+ * On earlier platforms the is highly platforms specific and not useful for
+ * cross-driver sharing. It exists since on a given platform it does uniquely
+ * identify the layout in a simple way for i915-specific userspace, which
+ * facilitated conversion of userspace to modifiers. Additionally the exact
+ * format on some really old platforms is not known.
+ */
+#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
+
+/*
+ * Intel Yf-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles in row-major layout.
+ * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
+ * are arranged in four groups (two wide, two high) with column-major layout.
+ * Each group therefore consits out of four 256 byte units, which are also laid
+ * out as 2x2 column-major.
+ * 256 byte units are made out of four 64 byte blocks of pixels, producing
+ * either a square block or a 2:1 unit.
+ * 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width
+ * in pixel depends on the pixel depth.
+ */
+#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
+
+/*
+ * Intel color control surface (CCS) for render compression
+ *
+ * The framebuffer format must be one of the 8:8:8:8 RGB formats.
+ * The main surface will be plane index 0 and must be Y/Yf-tiled,
+ * the CCS will be plane index 1.
+ *
+ * Each CCS tile matches a 1024x512 pixel area of the main surface.
+ * To match certain aspects of the 3D hardware the CCS is
+ * considered to be made up of normal 128Bx32 Y tiles, Thus
+ * the CCS pitch must be specified in multiples of 128 bytes.
+ *
+ * In reality the CCS tile appears to be a 64Bx64 Y tile, composed
+ * of QWORD (8 bytes) chunks instead of OWORD (16 bytes) chunks.
+ * But that fact is not relevant unless the memory is accessed
+ * directly.
+ */
+#define I915_FORMAT_MOD_Y_TILED_CCS fourcc_mod_code(INTEL, 4)
+#define I915_FORMAT_MOD_Yf_TILED_CCS fourcc_mod_code(INTEL, 5)
+
+/*
+ * Intel color control surfaces (CCS) for Gen-12 render compression.
+ *
+ * The main surface is Y-tiled and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * Y-tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS fourcc_mod_code(INTEL, 6)
+
+/*
+ * Intel color control surfaces (CCS) for Gen-12 media compression
+ *
+ * The main surface is Y-tiled and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * Y-tile widths. For semi-planar formats like NV12, CCS planes follow the
+ * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
+ * planes 2 and 3 for the respective CCS.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for Gen-12 render
+ * compression.
+ *
+ * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
+
+/*
+ * Intel Tile 4 layout
+ *
+ * This is a tiled layout using 4KB tiles in a row-major layout. It has the same
+ * shape as Tile Y at two granularities: 4KB (128B x 32) and 64B (16B x 4). It
+ * only differs from Tile Y at the 256B granularity in between. At this
+ * granularity, Tile Y has a shape of 16B x 32 rows, but this tiling has a shape
+ * of 64B x 8 rows.
+ */
+#define I915_FORMAT_MOD_4_TILED fourcc_mod_code(INTEL, 9)
+
+/*
+ * Intel color control surfaces (CCS) for DG2 render compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. The CCS data is stored
+ * outside of the GEM object in a reserved memory area dedicated for the
+ * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
+ * main surface pitch is required to be a multiple of four Tile 4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS fourcc_mod_code(INTEL, 10)
+
+/*
+ * Intel color control surfaces (CCS) for DG2 media compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all RC/RC_CC/MC compressible GEM objects. The main surface
+ * pitch is required to be a multiple of four Tile 4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_MC_CCS fourcc_mod_code(INTEL, 11)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for DG2 render compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. The CCS data is stored
+ * outside of the GEM object in a reserved memory area dedicated for the
+ * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
+ * main surface pitch is required to be a multiple of four Tile 4 widths. The
+ * clear color is stored at plane index 1 and the pitch should be 64 bytes
+ * aligned. The format of the 256 bits of clear color data matches the one used
+ * for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description
+ * for details.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
+
+/*
+ * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
+ *
+ * Macroblocks are laid in a Z-shape, and each pixel data is following the
+ * standard NV12 style.
+ * As for NV12, an image is the result of two frame buffers: one for Y,
+ * one for the interleaved Cb/Cr components (1/2 the height of the Y buffer).
+ * Alignment requirements are (for each buffer):
+ * - multiple of 128 pixels for the width
+ * - multiple of 32 pixels for the height
+ *
+ * For more information: see https://linuxtv.org/downloads/v4l-dvb-apis/re32.html
+ */
+#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
+
+/*
+ * Tiled, 16 (pixels) x 16 (lines) - sized macroblocks
+ *
+ * This is a simple tiled layout using tiles of 16x16 pixels in a row-major
+ * layout. For YCbCr formats Cb/Cr components are taken in such a way that
+ * they correspond to their 16x16 luma block.
+ */
+#define DRM_FORMAT_MOD_SAMSUNG_16_16_TILE fourcc_mod_code(SAMSUNG, 2)
+
+/*
+ * Qualcomm Compressed Format
+ *
+ * Refers to a compressed variant of the base format that is compressed.
+ * Implementation may be platform and base-format specific.
+ *
+ * Each macrotile consists of m x n (mostly 4 x 4) tiles.
+ * Pixel data pitch/stride is aligned with macrotile width.
+ * Pixel data height is aligned with macrotile height.
+ * Entire pixel data buffer is aligned with 4k(bytes).
+ */
+#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
+
+/*
+ * Qualcomm Tiled Format
+ *
+ * Similar to DRM_FORMAT_MOD_QCOM_COMPRESSED but not compressed.
+ * Implementation may be platform and base-format specific.
+ *
+ * Each macrotile consists of m x n (mostly 4 x 4) tiles.
+ * Pixel data pitch/stride is aligned with macrotile width.
+ * Pixel data height is aligned with macrotile height.
+ * Entire pixel data buffer is aligned with 4k(bytes).
+ */
+#define DRM_FORMAT_MOD_QCOM_TILED3 fourcc_mod_code(QCOM, 3)
+
+/*
+ * Qualcomm Alternate Tiled Format
+ *
+ * Alternate tiled format typically only used within GMEM.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_TILED2 fourcc_mod_code(QCOM, 2)
+
+
+/* Vivante framebuffer modifiers */
+
+/*
+ * Vivante 4x4 tiling layout
+ *
+ * This is a simple tiled layout using tiles of 4x4 pixels in a row-major
+ * layout.
+ */
+#define DRM_FORMAT_MOD_VIVANTE_TILED fourcc_mod_code(VIVANTE, 1)
+
+/*
+ * Vivante 64x64 super-tiling layout
+ *
+ * This is a tiled layout using 64x64 pixel super-tiles, where each super-tile
+ * contains 8x4 groups of 2x4 tiles of 4x4 pixels (like above) each, all in row-
+ * major layout.
+ *
+ * For more information: see
+ * https://github.com/etnaviv/etna_viv/blob/master/doc/hardware.md#texture-tiling
+ */
+#define DRM_FORMAT_MOD_VIVANTE_SUPER_TILED fourcc_mod_code(VIVANTE, 2)
+
+/*
+ * Vivante 4x4 tiling layout for dual-pipe
+ *
+ * Same as the 4x4 tiling layout, except every second 4x4 pixel tile starts at a
+ * different base address. Offsets from the base addresses are therefore halved
+ * compared to the non-split tiled layout.
+ */
+#define DRM_FORMAT_MOD_VIVANTE_SPLIT_TILED fourcc_mod_code(VIVANTE, 3)
+
+/*
+ * Vivante 64x64 super-tiling layout for dual-pipe
+ *
+ * Same as the 64x64 super-tiling layout, except every second 4x4 pixel tile
+ * starts at a different base address. Offsets from the base addresses are
+ * therefore halved compared to the non-split super-tiled layout.
+ */
+#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
+
+/* NVIDIA frame buffer modifiers */
+
+/*
+ * Tegra Tiled Layout, used by Tegra 2, 3 and 4.
+ *
+ * Pixels are arranged in simple tiles of 16 x 16 bytes.
+ */
+#define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1)
+
+/*
+ * Generalized Block Linear layout, used by desktop GPUs starting with NV50/G80,
+ * and Tegra GPUs starting with Tegra K1.
+ *
+ * Pixels are arranged in Groups of Bytes (GOBs). GOB size and layout varies
+ * based on the architecture generation. GOBs themselves are then arranged in
+ * 3D blocks, with the block dimensions (in terms of GOBs) always being a power
+ * of two, and hence expressible as their log2 equivalent (E.g., "2" represents
+ * a block depth or height of "4").
+ *
+ * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
+ * in full detail.
+ *
+ * Macro
+ * Bits Param Description
+ * ---- ----- -----------------------------------------------------------------
+ *
+ * 3:0 h log2(height) of each block, in GOBs. Placed here for
+ * compatibility with the existing
+ * DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
+ *
+ * 4:4 - Must be 1, to indicate block-linear layout. Necessary for
+ * compatibility with the existing
+ * DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
+ *
+ * 8:5 - Reserved (To support 3D-surfaces with variable log2(depth) block
+ * size). Must be zero.
+ *
+ * Note there is no log2(width) parameter. Some portions of the
+ * hardware support a block width of two gobs, but it is impractical
+ * to use due to lack of support elsewhere, and has no known
+ * benefits.
+ *
+ * 11:9 - Reserved (To support 2D-array textures with variable array stride
+ * in blocks, specified via log2(tile width in blocks)). Must be
+ * zero.
+ *
+ * 19:12 k Page Kind. This value directly maps to a field in the page
+ * tables of all GPUs >= NV50. It affects the exact layout of bits
+ * in memory and can be derived from the tuple
+ *
+ * (format, GPU model, compression type, samples per pixel)
+ *
+ * Where compression type is defined below. If GPU model were
+ * implied by the format modifier, format, or memory buffer, page
+ * kind would not need to be included in the modifier itself, but
+ * since the modifier should define the layout of the associated
+ * memory buffer independent from any device or other context, it
+ * must be included here.
+ *
+ * 21:20 g GOB Height and Page Kind Generation. The height of a GOB changed
+ * starting with Fermi GPUs. Additionally, the mapping between page
+ * kind and bit layout has changed at various points.
+ *
+ * 0 = Gob Height 8, Fermi - Volta, Tegra K1+ Page Kind mapping
+ * 1 = Gob Height 4, G80 - GT2XX Page Kind mapping
+ * 2 = Gob Height 8, Turing+ Page Kind mapping
+ * 3 = Reserved for future use.
+ *
+ * 22:22 s Sector layout. On Tegra GPUs prior to Xavier, there is a further
+ * bit remapping step that occurs at an even lower level than the
+ * page kind and block linear swizzles. This causes the layout of
+ * surfaces mapped in those SOC's GPUs to be incompatible with the
+ * equivalent mapping on other GPUs in the same system.
+ *
+ * 0 = Tegra K1 - Tegra Parker/TX2 Layout.
+ * 1 = Desktop GPU and Tegra Xavier+ Layout
+ *
+ * 25:23 c Lossless Framebuffer Compression type.
+ *
+ * 0 = none
+ * 1 = ROP/3D, layout 1, exact compression format implied by Page
+ * Kind field
+ * 2 = ROP/3D, layout 2, exact compression format implied by Page
+ * Kind field
+ * 3 = CDE horizontal
+ * 4 = CDE vertical
+ * 5 = Reserved for future use
+ * 6 = Reserved for future use
+ * 7 = Reserved for future use
+ *
+ * 55:25 - Reserved for future use. Must be zero.
+ */
+#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \
+ fourcc_mod_code(NVIDIA, (0x10 | \
+ ((h) & 0xf) | \
+ (((k) & 0xff) << 12) | \
+ (((g) & 0x3) << 20) | \
+ (((s) & 0x1) << 22) | \
+ (((c) & 0x7) << 23)))
+
+/* To grandfather in prior block linear format modifiers to the above layout,
+ * the page kind "0", which corresponds to "pitch/linear" and hence is unusable
+ * with block-linear layouts, is remapped within drivers to the value 0xfe,
+ * which corresponds to the "generic" kind used for simple single-sample
+ * uncompressed color formats on Fermi - Volta GPUs.
+ */
+static __inline__ __u64
+drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
+{
+ if (!(modifier & 0x10) || (modifier & (0xff << 12)))
+ return modifier;
+ else
+ return modifier | (0xfe << 12);
+}
+
+/*
+ * 16Bx2 Block Linear layout, used by Tegra K1 and later
+ *
+ * Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
+ * vertically by a power of 2 (1 to 32 GOBs) to form a block.
+ *
+ * Within a GOB, data is ordered as 16B x 2 lines sectors laid in Z-shape.
+ *
+ * Parameter 'v' is the log2 encoding of the number of GOBs stacked vertically.
+ * Valid values are:
+ *
+ * 0 == ONE_GOB
+ * 1 == TWO_GOBS
+ * 2 == FOUR_GOBS
+ * 3 == EIGHT_GOBS
+ * 4 == SIXTEEN_GOBS
+ * 5 == THIRTYTWO_GOBS
+ *
+ * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
+ * in full detail.
+ */
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 0, 0, 0, (v))
+
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0)
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1)
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2)
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3)
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4)
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5)
+
+/*
+ * Some Broadcom modifiers take parameters, for example the number of
+ * vertical lines in the image. Reserve the lower 32 bits for modifier
+ * type, and the next 24 bits for parameters. Top 8 bits are the
+ * vendor code.
+ */
+#define __fourcc_mod_broadcom_param_shift 8
+#define __fourcc_mod_broadcom_param_bits 48
+#define fourcc_mod_broadcom_code(val, params) \
+ fourcc_mod_code(BROADCOM, ((((__u64)params) << __fourcc_mod_broadcom_param_shift) | val))
+#define fourcc_mod_broadcom_param(m) \
+ ((int)(((m) >> __fourcc_mod_broadcom_param_shift) & \
+ ((1ULL << __fourcc_mod_broadcom_param_bits) - 1)))
+#define fourcc_mod_broadcom_mod(m) \
+ ((m) & ~(((1ULL << __fourcc_mod_broadcom_param_bits) - 1) << \
+ __fourcc_mod_broadcom_param_shift))
+
+/*
+ * Broadcom VC4 "T" format
+ *
+ * This is the primary layout that the V3D GPU can texture from (it
+ * can't do linear). The T format has:
+ *
+ * - 64b utiles of pixels in a raster-order grid according to cpp. It's 4x4
+ * pixels at 32 bit depth.
+ *
+ * - 1k subtiles made of a 4x4 raster-order grid of 64b utiles (so usually
+ * 16x16 pixels).
+ *
+ * - 4k tiles made of a 2x2 grid of 1k subtiles (so usually 32x32 pixels). On
+ * even 4k tile rows, they're arranged as (BL, TL, TR, BR), and on odd rows
+ * they're (TR, BR, BL, TL), where bottom left is start of memory.
+ *
+ * - an image made of 4k tiles in rows either left-to-right (even rows of 4k
+ * tiles) or right-to-left (odd rows of 4k tiles).
+ */
+#define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1)
+
+/*
+ * Broadcom SAND format
+ *
+ * This is the native format that the H.264 codec block uses. For VC4
+ * HVS, it is only valid for H.264 (NV12/21) and RGBA modes.
+ *
+ * The image can be considered to be split into columns, and the
+ * columns are placed consecutively into memory. The width of those
+ * columns can be either 32, 64, 128, or 256 pixels, but in practice
+ * only 128 pixel columns are used.
+ *
+ * The pitch between the start of each column is set to optimally
+ * switch between SDRAM banks. This is passed as the number of lines
+ * of column width in the modifier (we can't use the stride value due
+ * to various core checks that look at it , so you should set the
+ * stride to width*cpp).
+ *
+ * Note that the column height for this format modifier is the same
+ * for all of the planes, assuming that each column contains both Y
+ * and UV. Some SAND-using hardware stores UV in a separate tiled
+ * image from Y to reduce the column height, which is not supported
+ * with these modifiers.
+ *
+ * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also
+ * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes
+ * wide, but as this is a 10 bpp format that translates to 96 pixels.
+ */
+
+#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(2, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(3, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(4, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(5, v)
+
+#define DRM_FORMAT_MOD_BROADCOM_SAND32 \
+ DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND64 \
+ DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND128 \
+ DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND256 \
+ DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(0)
+
+/* Broadcom UIF format
+ *
+ * This is the common format for the current Broadcom multimedia
+ * blocks, including V3D 3.x and newer, newer video codecs, and
+ * displays.
+ *
+ * The image consists of utiles (64b blocks), UIF blocks (2x2 utiles),
+ * and macroblocks (4x4 UIF blocks). Those 4x4 UIF block groups are
+ * stored in columns, with padding between the columns to ensure that
+ * moving from one column to the next doesn't hit the same SDRAM page
+ * bank.
+ *
+ * To calculate the padding, it is assumed that each hardware block
+ * and the software driving it knows the platform's SDRAM page size,
+ * number of banks, and XOR address, and that it's identical between
+ * all blocks using the format. This tiling modifier will use XOR as
+ * necessary to reduce the padding. If a hardware block can't do XOR,
+ * the assumption is that a no-XOR tiling modifier will be created.
+ */
+#define DRM_FORMAT_MOD_BROADCOM_UIF fourcc_mod_code(BROADCOM, 6)
+
+/*
+ * Arm Framebuffer Compression (AFBC) modifiers
+ *
+ * AFBC is a proprietary lossless image compression protocol and format.
+ * It provides fine-grained random access and minimizes the amount of data
+ * transferred between IP blocks.
+ *
+ * AFBC has several features which may be supported and/or used, which are
+ * represented using bits in the modifier. Not all combinations are valid,
+ * and different devices or use-cases may support different combinations.
+ *
+ * Further information on the use of AFBC modifiers can be found in
+ * Documentation/gpu/afbc.rst
+ */
+
+/*
+ * The top 4 bits (out of the 56 bits alloted for specifying vendor specific
+ * modifiers) denote the category for modifiers. Currently we have three
+ * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
+ * sixteen different categories.
+ */
+#define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \
+ fourcc_mod_code(ARM, ((__u64)(__type) << 52) | ((__val) & 0x000fffffffffffffULL))
+
+#define DRM_FORMAT_MOD_ARM_TYPE_AFBC 0x00
+#define DRM_FORMAT_MOD_ARM_TYPE_MISC 0x01
+
+#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFBC, __afbc_mode)
+
+/*
+ * AFBC superblock size
+ *
+ * Indicates the superblock size(s) used for the AFBC buffer. The buffer
+ * size (in pixels) must be aligned to a multiple of the superblock size.
+ * Four lowest significant bits(LSBs) are reserved for block size.
+ *
+ * Where one superblock size is specified, it applies to all planes of the
+ * buffer (e.g. 16x16, 32x8). When multiple superblock sizes are specified,
+ * the first applies to the Luma plane and the second applies to the Chroma
+ * plane(s). e.g. (32x8_64x4 means 32x8 Luma, with 64x4 Chroma).
+ * Multiple superblock sizes are only valid for multi-plane YCbCr formats.
+ */
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK 0xf
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 (1ULL)
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 (2ULL)
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_64x4 (3ULL)
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4 (4ULL)
+
+/*
+ * AFBC lossless colorspace transform
+ *
+ * Indicates that the buffer makes use of the AFBC lossless colorspace
+ * transform.
+ */
+#define AFBC_FORMAT_MOD_YTR (1ULL << 4)
+
+/*
+ * AFBC block-split
+ *
+ * Indicates that the payload of each superblock is split. The second
+ * half of the payload is positioned at a predefined offset from the start
+ * of the superblock payload.
+ */
+#define AFBC_FORMAT_MOD_SPLIT (1ULL << 5)
+
+/*
+ * AFBC sparse layout
+ *
+ * This flag indicates that the payload of each superblock must be stored at a
+ * predefined position relative to the other superblocks in the same AFBC
+ * buffer. This order is the same order used by the header buffer. In this mode
+ * each superblock is given the same amount of space as an uncompressed
+ * superblock of the particular format would require, rounding up to the next
+ * multiple of 128 bytes in size.
+ */
+#define AFBC_FORMAT_MOD_SPARSE (1ULL << 6)
+
+/*
+ * AFBC copy-block restrict
+ *
+ * Buffers with this flag must obey the copy-block restriction. The restriction
+ * is such that there are no copy-blocks referring across the border of 8x8
+ * blocks. For the subsampled data the 8x8 limitation is also subsampled.
+ */
+#define AFBC_FORMAT_MOD_CBR (1ULL << 7)
+
+/*
+ * AFBC tiled layout
+ *
+ * The tiled layout groups superblocks in 8x8 or 4x4 tiles, where all
+ * superblocks inside a tile are stored together in memory. 8x8 tiles are used
+ * for pixel formats up to and including 32 bpp while 4x4 tiles are used for
+ * larger bpp formats. The order between the tiles is scan line.
+ * When the tiled layout is used, the buffer size (in pixels) must be aligned
+ * to the tile size.
+ */
+#define AFBC_FORMAT_MOD_TILED (1ULL << 8)
+
+/*
+ * AFBC solid color blocks
+ *
+ * Indicates that the buffer makes use of solid-color blocks, whereby bandwidth
+ * can be reduced if a whole superblock is a single color.
+ */
+#define AFBC_FORMAT_MOD_SC (1ULL << 9)
+
+/*
+ * AFBC double-buffer
+ *
+ * Indicates that the buffer is allocated in a layout safe for front-buffer
+ * rendering.
+ */
+#define AFBC_FORMAT_MOD_DB (1ULL << 10)
+
+/*
+ * AFBC buffer content hints
+ *
+ * Indicates that the buffer includes per-superblock content hints.
+ */
+#define AFBC_FORMAT_MOD_BCH (1ULL << 11)
+
+/* AFBC uncompressed storage mode
+ *
+ * Indicates that the buffer is using AFBC uncompressed storage mode.
+ * In this mode all superblock payloads in the buffer use the uncompressed
+ * storage mode, which is usually only used for data which cannot be compressed.
+ * The buffer layout is the same as for AFBC buffers without USM set, this only
+ * affects the storage mode of the individual superblocks. Note that even a
+ * buffer without USM set may use uncompressed storage mode for some or all
+ * superblocks, USM just guarantees it for all.
+ */
+#define AFBC_FORMAT_MOD_USM (1ULL << 12)
+
+/*
+ * Arm Fixed-Rate Compression (AFRC) modifiers
+ *
+ * AFRC is a proprietary fixed rate image compression protocol and format,
+ * designed to provide guaranteed bandwidth and memory footprint
+ * reductions in graphics and media use-cases.
+ *
+ * AFRC buffers consist of one or more planes, with the same components
+ * and meaning as an uncompressed buffer using the same pixel format.
+ *
+ * Within each plane, the pixel/luma/chroma values are grouped into
+ * "coding unit" blocks which are individually compressed to a
+ * fixed size (in bytes). All coding units within a given plane of a buffer
+ * store the same number of values, and have the same compressed size.
+ *
+ * The coding unit size is configurable, allowing different rates of compression.
+ *
+ * The start of each AFRC buffer plane must be aligned to an alignment granule which
+ * depends on the coding unit size.
+ *
+ * Coding Unit Size Plane Alignment
+ * ---------------- ---------------
+ * 16 bytes 1024 bytes
+ * 24 bytes 512 bytes
+ * 32 bytes 2048 bytes
+ *
+ * Coding units are grouped into paging tiles. AFRC buffer dimensions must be aligned
+ * to a multiple of the paging tile dimensions.
+ * The dimensions of each paging tile depend on whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Layout Paging Tile Width Paging Tile Height
+ * ------ ----------------- ------------------
+ * SCAN 16 coding units 4 coding units
+ * ROT 8 coding units 8 coding units
+ *
+ * The dimensions of each coding unit depend on the number of components
+ * in the compressed plane and whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Number of Components in Plane Layout Coding Unit Width Coding Unit Height
+ * ----------------------------- --------- ----------------- ------------------
+ * 1 SCAN 16 samples 4 samples
+ * Example: 16x4 luma samples in a 'Y' plane
+ * 16x4 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 1 ROT 8 samples 8 samples
+ * Example: 8x8 luma samples in a 'Y' plane
+ * 8x8 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 2 DONT CARE 8 samples 4 samples
+ * Example: 8x4 chroma pairs in the 'UV' plane of a semi-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 3 DONT CARE 4 samples 4 samples
+ * Example: 4x4 pixels in an RGB buffer without alpha
+ * ----------------------------- --------- ----------------- ------------------
+ * 4 DONT CARE 4 samples 4 samples
+ * Example: 4x4 pixels in an RGB buffer with alpha
+ */
+
+#define DRM_FORMAT_MOD_ARM_TYPE_AFRC 0x02
+
+#define DRM_FORMAT_MOD_ARM_AFRC(__afrc_mode) \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFRC, __afrc_mode)
+
+/*
+ * AFRC coding unit size modifier.
+ *
+ * Indicates the number of bytes used to store each compressed coding unit for
+ * one or more planes in an AFRC encoded buffer. The coding unit size for chrominance
+ * is the same for both Cb and Cr, which may be stored in separate planes.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P0 indicates the number of bytes used to store
+ * each compressed coding unit in the first plane of the buffer. For RGBA buffers
+ * this is the only plane, while for semi-planar and fully-planar YUV buffers,
+ * this corresponds to the luma plane.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 indicates the number of bytes used to store
+ * each compressed coding unit in the second and third planes in the buffer.
+ * For semi-planar and fully-planar YUV buffers, this corresponds to the chroma plane(s).
+ *
+ * For single-plane buffers, AFRC_FORMAT_MOD_CU_SIZE_P0 must be specified
+ * and AFRC_FORMAT_MOD_CU_SIZE_P12 must be zero.
+ * For semi-planar and fully-planar buffers, both AFRC_FORMAT_MOD_CU_SIZE_P0 and
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 must be specified.
+ */
+#define AFRC_FORMAT_MOD_CU_SIZE_MASK 0xf
+#define AFRC_FORMAT_MOD_CU_SIZE_16 (1ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_24 (2ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_32 (3ULL)
+
+#define AFRC_FORMAT_MOD_CU_SIZE_P0(__afrc_cu_size) (__afrc_cu_size)
+#define AFRC_FORMAT_MOD_CU_SIZE_P12(__afrc_cu_size) ((__afrc_cu_size) << 4)
+
+/*
+ * AFRC scanline memory layout.
+ *
+ * Indicates if the buffer uses the scanline-optimised layout
+ * for an AFRC encoded buffer, otherwise, it uses the rotation-optimised layout.
+ * The memory layout is the same for all planes.
+ */
+#define AFRC_FORMAT_MOD_LAYOUT_SCAN (1ULL << 8)
+
+/*
+ * Arm 16x16 Block U-Interleaved modifier
+ *
+ * This is used by Arm Mali Utgard and Midgard GPUs. It divides the image
+ * into 16x16 pixel blocks. Blocks are stored linearly in order, but pixels
+ * in the block are reordered.
+ */
+#define DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_MISC, 1ULL)
+
+/*
+ * Allwinner tiled modifier
+ *
+ * This tiling mode is implemented by the VPU found on all Allwinner platforms,
+ * codenamed sunxi. It is associated with a YUV format that uses either 2 or 3
+ * planes.
+ *
+ * With this tiling, the luminance samples are disposed in tiles representing
+ * 32x32 pixels and the chrominance samples in tiles representing 32x64 pixels.
+ * The pixel order in each tile is linear and the tiles are disposed linearly,
+ * both in row-major order.
+ */
+#define DRM_FORMAT_MOD_ALLWINNER_TILED fourcc_mod_code(ALLWINNER, 1)
+
+/*
+ * Amlogic Video Framebuffer Compression modifiers
+ *
+ * Amlogic uses a proprietary lossless image compression protocol and format
+ * for their hardware video codec accelerators, either video decoders or
+ * video input encoders.
+ *
+ * It considerably reduces memory bandwidth while writing and reading
+ * frames in memory.
+ *
+ * The underlying storage is considered to be 3 components, 8bit or 10-bit
+ * per component YCbCr 420, single plane :
+ * - DRM_FORMAT_YUV420_8BIT
+ * - DRM_FORMAT_YUV420_10BIT
+ *
+ * The first 8 bits of the mode defines the layout, then the following 8 bits
+ * defines the options changing the layout.
+ *
+ * Not all combinations are valid, and different SoCs may support different
+ * combinations of layout and options.
+ */
+#define __fourcc_mod_amlogic_layout_mask 0xff
+#define __fourcc_mod_amlogic_options_shift 8
+#define __fourcc_mod_amlogic_options_mask 0xff
+
+#define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
+ fourcc_mod_code(AMLOGIC, \
+ ((__layout) & __fourcc_mod_amlogic_layout_mask) | \
+ (((__options) & __fourcc_mod_amlogic_options_mask) \
+ << __fourcc_mod_amlogic_options_shift))
+
+/* Amlogic FBC Layouts */
+
+/*
+ * Amlogic FBC Basic Layout
+ *
+ * The basic layout is composed of:
+ * - a body content organized in 64x32 superblocks with 4096 bytes per
+ * superblock in default mode.
+ * - a 32 bytes per 128x64 header block
+ *
+ * This layout is transferrable between Amlogic SoCs supporting this modifier.
+ */
+#define AMLOGIC_FBC_LAYOUT_BASIC (1ULL)
+
+/*
+ * Amlogic FBC Scatter Memory layout
+ *
+ * Indicates the header contains IOMMU references to the compressed
+ * frames content to optimize memory access and layout.
+ *
+ * In this mode, only the header memory address is needed, thus the
+ * content memory organization is tied to the current producer
+ * execution and cannot be saved/dumped neither transferrable between
+ * Amlogic SoCs supporting this modifier.
+ *
+ * Due to the nature of the layout, these buffers are not expected to
+ * be accessible by the user-space clients, but only accessible by the
+ * hardware producers and consumers.
+ *
+ * The user-space clients should expect a failure while trying to mmap
+ * the DMA-BUF handle returned by the producer.
+ */
+#define AMLOGIC_FBC_LAYOUT_SCATTER (2ULL)
+
+/* Amlogic FBC Layout Options Bit Mask */
+
+/*
+ * Amlogic FBC Memory Saving mode
+ *
+ * Indicates the storage is packed when pixel size is multiple of word
+ * boudaries, i.e. 8bit should be stored in this mode to save allocation
+ * memory.
+ *
+ * This mode reduces body layout to 3072 bytes per 64x32 superblock with
+ * the basic layout and 3200 bytes per 64x32 superblock combined with
+ * the scatter layout.
+ */
+#define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0)
+
+/*
+ * AMD modifiers
+ *
+ * Memory layout:
+ *
+ * without DCC:
+ * - main surface
+ *
+ * with DCC & without DCC_RETILE:
+ * - main surface in plane 0
+ * - DCC surface in plane 1 (RB-aligned, pipe-aligned if DCC_PIPE_ALIGN is set)
+ *
+ * with DCC & DCC_RETILE:
+ * - main surface in plane 0
+ * - displayable DCC surface in plane 1 (not RB-aligned & not pipe-aligned)
+ * - pipe-aligned DCC surface in plane 2 (RB-aligned & pipe-aligned)
+ *
+ * For multi-plane formats the above surfaces get merged into one plane for
+ * each format plane, based on the required alignment only.
+ *
+ * Bits Parameter Notes
+ * ----- ------------------------ ---------------------------------------------
+ *
+ * 7:0 TILE_VERSION Values are AMD_FMT_MOD_TILE_VER_*
+ * 12:8 TILE Values are AMD_FMT_MOD_TILE_<version>_*
+ * 13 DCC
+ * 14 DCC_RETILE
+ * 15 DCC_PIPE_ALIGN
+ * 16 DCC_INDEPENDENT_64B
+ * 17 DCC_INDEPENDENT_128B
+ * 19:18 DCC_MAX_COMPRESSED_BLOCK Values are AMD_FMT_MOD_DCC_BLOCK_*
+ * 20 DCC_CONSTANT_ENCODE
+ * 23:21 PIPE_XOR_BITS Only for some chips
+ * 26:24 BANK_XOR_BITS Only for some chips
+ * 29:27 PACKERS Only for some chips
+ * 32:30 RB Only for some chips
+ * 35:33 PIPE Only for some chips
+ * 55:36 - Reserved for future use, must be zero
+ */
+#define AMD_FMT_MOD fourcc_mod_code(AMD, 0)
+
+#define IS_AMD_FMT_MOD(val) (((val) >> 56) == DRM_FORMAT_MOD_VENDOR_AMD)
+
+/* Reserve 0 for GFX8 and older */
+#define AMD_FMT_MOD_TILE_VER_GFX9 1
+#define AMD_FMT_MOD_TILE_VER_GFX10 2
+#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
+#define AMD_FMT_MOD_TILE_VER_GFX11 4
+
+/*
+ * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
+ * version.
+ */
+#define AMD_FMT_MOD_TILE_GFX9_64K_S 9
+
+/*
+ * 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has
+ * GFX9 as canonical version.
+ */
+#define AMD_FMT_MOD_TILE_GFX9_64K_D 10
+#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
+#define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26
+#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
+#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
+
+#define AMD_FMT_MOD_DCC_BLOCK_64B 0
+#define AMD_FMT_MOD_DCC_BLOCK_128B 1
+#define AMD_FMT_MOD_DCC_BLOCK_256B 2
+
+#define AMD_FMT_MOD_TILE_VERSION_SHIFT 0
+#define AMD_FMT_MOD_TILE_VERSION_MASK 0xFF
+#define AMD_FMT_MOD_TILE_SHIFT 8
+#define AMD_FMT_MOD_TILE_MASK 0x1F
+
+/* Whether DCC compression is enabled. */
+#define AMD_FMT_MOD_DCC_SHIFT 13
+#define AMD_FMT_MOD_DCC_MASK 0x1
+
+/*
+ * Whether to include two DCC surfaces, one which is rb & pipe aligned, and
+ * one which is not-aligned.
+ */
+#define AMD_FMT_MOD_DCC_RETILE_SHIFT 14
+#define AMD_FMT_MOD_DCC_RETILE_MASK 0x1
+
+/* Only set if DCC_RETILE = false */
+#define AMD_FMT_MOD_DCC_PIPE_ALIGN_SHIFT 15
+#define AMD_FMT_MOD_DCC_PIPE_ALIGN_MASK 0x1
+
+#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_SHIFT 16
+#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_MASK 0x1
+#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_SHIFT 17
+#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_MASK 0x1
+#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_SHIFT 18
+#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3
+
+/*
+ * DCC supports embedding some clear colors directly in the DCC surface.
+ * However, on older GPUs the rendering HW ignores the embedded clear color
+ * and prefers the driver provided color. This necessitates doing a fastclear
+ * eliminate operation before a process transfers control.
+ *
+ * If this bit is set that means the fastclear eliminate is not needed for these
+ * embeddable colors.
+ */
+#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_SHIFT 20
+#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_MASK 0x1
+
+/*
+ * The below fields are for accounting for per GPU differences. These are only
+ * relevant for GFX9 and later and if the tile field is *_X/_T.
+ *
+ * PIPE_XOR_BITS = always needed
+ * BANK_XOR_BITS = only for TILE_VER_GFX9
+ * PACKERS = only for TILE_VER_GFX10_RBPLUS
+ * RB = only for TILE_VER_GFX9 & DCC
+ * PIPE = only for TILE_VER_GFX9 & DCC & (DCC_RETILE | DCC_PIPE_ALIGN)
+ */
+#define AMD_FMT_MOD_PIPE_XOR_BITS_SHIFT 21
+#define AMD_FMT_MOD_PIPE_XOR_BITS_MASK 0x7
+#define AMD_FMT_MOD_BANK_XOR_BITS_SHIFT 24
+#define AMD_FMT_MOD_BANK_XOR_BITS_MASK 0x7
+#define AMD_FMT_MOD_PACKERS_SHIFT 27
+#define AMD_FMT_MOD_PACKERS_MASK 0x7
+#define AMD_FMT_MOD_RB_SHIFT 30
+#define AMD_FMT_MOD_RB_MASK 0x7
+#define AMD_FMT_MOD_PIPE_SHIFT 33
+#define AMD_FMT_MOD_PIPE_MASK 0x7
+
+#define AMD_FMT_MOD_SET(field, value) \
+ ((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT)
+#define AMD_FMT_MOD_GET(field, value) \
+ (((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK)
+#define AMD_FMT_MOD_CLEAR(field) \
+ (~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* DRM_FOURCC_H */
diff --git a/include/libdrm/libdrm/drm_mode.h b/include/libdrm/libdrm/drm_mode.h
new file mode 100644
index 0000000..9b6722d
--- /dev/null
+++ b/include/libdrm/libdrm/drm_mode.h
@@ -0,0 +1,1129 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2007-2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_MODE_H
+#define _DRM_MODE_H
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: overview
+ *
+ * DRM exposes many UAPI and structure definition to have a consistent
+ * and standardized interface with user.
+ * Userspace can refer to these structure definitions and UAPI formats
+ * to communicate to driver
+ */
+
+#define DRM_CONNECTOR_NAME_LEN 32
+#define DRM_DISPLAY_MODE_LEN 32
+#define DRM_PROP_NAME_LEN 32
+
+#define DRM_MODE_TYPE_BUILTIN (1<<0) /* deprecated */
+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
+#define DRM_MODE_TYPE_PREFERRED (1<<3)
+#define DRM_MODE_TYPE_DEFAULT (1<<4) /* deprecated */
+#define DRM_MODE_TYPE_USERDEF (1<<5)
+#define DRM_MODE_TYPE_DRIVER (1<<6)
+
+#define DRM_MODE_TYPE_ALL (DRM_MODE_TYPE_PREFERRED | \
+ DRM_MODE_TYPE_USERDEF | \
+ DRM_MODE_TYPE_DRIVER)
+
+/* Video mode flags */
+/* bit compatible with the xrandr RR_ definitions (bits 0-13)
+ *
+ * ABI warning: Existing userspace really expects
+ * the mode flags to match the xrandr definitions. Any
+ * changes that don't match the xrandr definitions will
+ * likely need a new client cap or some other mechanism
+ * to avoid breaking existing userspace. This includes
+ * allocating new flags in the previously unused bits!
+ */
+#define DRM_MODE_FLAG_PHSYNC (1<<0)
+#define DRM_MODE_FLAG_NHSYNC (1<<1)
+#define DRM_MODE_FLAG_PVSYNC (1<<2)
+#define DRM_MODE_FLAG_NVSYNC (1<<3)
+#define DRM_MODE_FLAG_INTERLACE (1<<4)
+#define DRM_MODE_FLAG_DBLSCAN (1<<5)
+#define DRM_MODE_FLAG_CSYNC (1<<6)
+#define DRM_MODE_FLAG_PCSYNC (1<<7)
+#define DRM_MODE_FLAG_NCSYNC (1<<8)
+#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
+#define DRM_MODE_FLAG_BCAST (1<<10) /* deprecated */
+#define DRM_MODE_FLAG_PIXMUX (1<<11) /* deprecated */
+#define DRM_MODE_FLAG_DBLCLK (1<<12)
+#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
+ /*
+ * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
+ * (define not exposed to user space).
+ */
+#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
+#define DRM_MODE_FLAG_3D_NONE (0<<14)
+#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
+#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
+#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
+#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
+
+/* Picture aspect ratio options */
+#define DRM_MODE_PICTURE_ASPECT_NONE 0
+#define DRM_MODE_PICTURE_ASPECT_4_3 1
+#define DRM_MODE_PICTURE_ASPECT_16_9 2
+#define DRM_MODE_PICTURE_ASPECT_64_27 3
+#define DRM_MODE_PICTURE_ASPECT_256_135 4
+
+/* Content type options */
+#define DRM_MODE_CONTENT_TYPE_NO_DATA 0
+#define DRM_MODE_CONTENT_TYPE_GRAPHICS 1
+#define DRM_MODE_CONTENT_TYPE_PHOTO 2
+#define DRM_MODE_CONTENT_TYPE_CINEMA 3
+#define DRM_MODE_CONTENT_TYPE_GAME 4
+
+/* Aspect ratio flag bitmask (4 bits 22:19) */
+#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
+#define DRM_MODE_FLAG_PIC_AR_NONE \
+ (DRM_MODE_PICTURE_ASPECT_NONE<<19)
+#define DRM_MODE_FLAG_PIC_AR_4_3 \
+ (DRM_MODE_PICTURE_ASPECT_4_3<<19)
+#define DRM_MODE_FLAG_PIC_AR_16_9 \
+ (DRM_MODE_PICTURE_ASPECT_16_9<<19)
+#define DRM_MODE_FLAG_PIC_AR_64_27 \
+ (DRM_MODE_PICTURE_ASPECT_64_27<<19)
+#define DRM_MODE_FLAG_PIC_AR_256_135 \
+ (DRM_MODE_PICTURE_ASPECT_256_135<<19)
+
+#define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \
+ DRM_MODE_FLAG_NHSYNC | \
+ DRM_MODE_FLAG_PVSYNC | \
+ DRM_MODE_FLAG_NVSYNC | \
+ DRM_MODE_FLAG_INTERLACE | \
+ DRM_MODE_FLAG_DBLSCAN | \
+ DRM_MODE_FLAG_CSYNC | \
+ DRM_MODE_FLAG_PCSYNC | \
+ DRM_MODE_FLAG_NCSYNC | \
+ DRM_MODE_FLAG_HSKEW | \
+ DRM_MODE_FLAG_DBLCLK | \
+ DRM_MODE_FLAG_CLKDIV2 | \
+ DRM_MODE_FLAG_3D_MASK)
+
+/* DPMS flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_DPMS_ON 0
+#define DRM_MODE_DPMS_STANDBY 1
+#define DRM_MODE_DPMS_SUSPEND 2
+#define DRM_MODE_DPMS_OFF 3
+
+/* Scaling mode options */
+#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
+ software can still scale) */
+#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
+#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
+#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
+
+/* Dithering mode options */
+#define DRM_MODE_DITHERING_OFF 0
+#define DRM_MODE_DITHERING_ON 1
+#define DRM_MODE_DITHERING_AUTO 2
+
+/* Dirty info options */
+#define DRM_MODE_DIRTY_OFF 0
+#define DRM_MODE_DIRTY_ON 1
+#define DRM_MODE_DIRTY_ANNOTATE 2
+
+/* Link Status options */
+#define DRM_MODE_LINK_STATUS_GOOD 0
+#define DRM_MODE_LINK_STATUS_BAD 1
+
+/*
+ * DRM_MODE_ROTATE_<degrees>
+ *
+ * Signals that a drm plane is been rotated <degrees> degrees in counter
+ * clockwise direction.
+ *
+ * This define is provided as a convenience, looking up the property id
+ * using the name->prop id lookup is the preferred method.
+ */
+#define DRM_MODE_ROTATE_0 (1<<0)
+#define DRM_MODE_ROTATE_90 (1<<1)
+#define DRM_MODE_ROTATE_180 (1<<2)
+#define DRM_MODE_ROTATE_270 (1<<3)
+
+/*
+ * DRM_MODE_ROTATE_MASK
+ *
+ * Bitmask used to look for drm plane rotations.
+ */
+#define DRM_MODE_ROTATE_MASK (\
+ DRM_MODE_ROTATE_0 | \
+ DRM_MODE_ROTATE_90 | \
+ DRM_MODE_ROTATE_180 | \
+ DRM_MODE_ROTATE_270)
+
+/*
+ * DRM_MODE_REFLECT_<axis>
+ *
+ * Signals that the contents of a drm plane is reflected along the <axis> axis,
+ * in the same way as mirroring.
+ * See kerneldoc chapter "Plane Composition Properties" for more details.
+ *
+ * This define is provided as a convenience, looking up the property id
+ * using the name->prop id lookup is the preferred method.
+ */
+#define DRM_MODE_REFLECT_X (1<<4)
+#define DRM_MODE_REFLECT_Y (1<<5)
+
+/*
+ * DRM_MODE_REFLECT_MASK
+ *
+ * Bitmask used to look for drm plane reflections.
+ */
+#define DRM_MODE_REFLECT_MASK (\
+ DRM_MODE_REFLECT_X | \
+ DRM_MODE_REFLECT_Y)
+
+/* Content Protection Flags */
+#define DRM_MODE_CONTENT_PROTECTION_UNDESIRED 0
+#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
+#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
+
+/**
+ * struct drm_mode_modeinfo - Display mode information.
+ * @clock: pixel clock in kHz
+ * @hdisplay: horizontal display size
+ * @hsync_start: horizontal sync start
+ * @hsync_end: horizontal sync end
+ * @htotal: horizontal total size
+ * @hskew: horizontal skew
+ * @vdisplay: vertical display size
+ * @vsync_start: vertical sync start
+ * @vsync_end: vertical sync end
+ * @vtotal: vertical total size
+ * @vscan: vertical scan
+ * @vrefresh: approximate vertical refresh rate in Hz
+ * @flags: bitmask of misc. flags, see DRM_MODE_FLAG_* defines
+ * @type: bitmask of type flags, see DRM_MODE_TYPE_* defines
+ * @name: string describing the mode resolution
+ *
+ * This is the user-space API display mode information structure. For the
+ * kernel version see struct drm_display_mode.
+ */
+struct drm_mode_modeinfo {
+ __u32 clock;
+ __u16 hdisplay;
+ __u16 hsync_start;
+ __u16 hsync_end;
+ __u16 htotal;
+ __u16 hskew;
+ __u16 vdisplay;
+ __u16 vsync_start;
+ __u16 vsync_end;
+ __u16 vtotal;
+ __u16 vscan;
+
+ __u32 vrefresh;
+
+ __u32 flags;
+ __u32 type;
+ char name[DRM_DISPLAY_MODE_LEN];
+};
+
+struct drm_mode_card_res {
+ __u64 fb_id_ptr;
+ __u64 crtc_id_ptr;
+ __u64 connector_id_ptr;
+ __u64 encoder_id_ptr;
+ __u32 count_fbs;
+ __u32 count_crtcs;
+ __u32 count_connectors;
+ __u32 count_encoders;
+ __u32 min_width;
+ __u32 max_width;
+ __u32 min_height;
+ __u32 max_height;
+};
+
+struct drm_mode_crtc {
+ __u64 set_connectors_ptr;
+ __u32 count_connectors;
+
+ __u32 crtc_id; /**< Id */
+ __u32 fb_id; /**< Id of framebuffer */
+
+ __u32 x; /**< x Position on the framebuffer */
+ __u32 y; /**< y Position on the framebuffer */
+
+ __u32 gamma_size;
+ __u32 mode_valid;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
+#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
+
+/* Planes blend with or override other bits on the CRTC */
+struct drm_mode_set_plane {
+ __u32 plane_id;
+ __u32 crtc_id;
+ __u32 fb_id; /* fb object contains surface format type */
+ __u32 flags; /* see above flags */
+
+ /* Signed dest location allows it to be partially off screen */
+ __s32 crtc_x;
+ __s32 crtc_y;
+ __u32 crtc_w;
+ __u32 crtc_h;
+
+ /* Source values are 16.16 fixed point */
+ __u32 src_x;
+ __u32 src_y;
+ __u32 src_h;
+ __u32 src_w;
+};
+
+struct drm_mode_get_plane {
+ __u32 plane_id;
+
+ __u32 crtc_id;
+ __u32 fb_id;
+
+ __u32 possible_crtcs;
+ __u32 gamma_size;
+
+ __u32 count_format_types;
+ __u64 format_type_ptr;
+};
+
+struct drm_mode_get_plane_res {
+ __u64 plane_id_ptr;
+ __u32 count_planes;
+};
+
+#define DRM_MODE_ENCODER_NONE 0
+#define DRM_MODE_ENCODER_DAC 1
+#define DRM_MODE_ENCODER_TMDS 2
+#define DRM_MODE_ENCODER_LVDS 3
+#define DRM_MODE_ENCODER_TVDAC 4
+#define DRM_MODE_ENCODER_VIRTUAL 5
+#define DRM_MODE_ENCODER_DSI 6
+#define DRM_MODE_ENCODER_DPMST 7
+#define DRM_MODE_ENCODER_DPI 8
+
+struct drm_mode_get_encoder {
+ __u32 encoder_id;
+ __u32 encoder_type;
+
+ __u32 crtc_id; /**< Id of crtc */
+
+ __u32 possible_crtcs;
+ __u32 possible_clones;
+};
+
+/* This is for connectors with multiple signal types. */
+/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
+enum drm_mode_subconnector {
+ DRM_MODE_SUBCONNECTOR_Automatic = 0, /* DVI-I, TV */
+ DRM_MODE_SUBCONNECTOR_Unknown = 0, /* DVI-I, TV, DP */
+ DRM_MODE_SUBCONNECTOR_VGA = 1, /* DP */
+ DRM_MODE_SUBCONNECTOR_DVID = 3, /* DVI-I DP */
+ DRM_MODE_SUBCONNECTOR_DVIA = 4, /* DVI-I */
+ DRM_MODE_SUBCONNECTOR_Composite = 5, /* TV */
+ DRM_MODE_SUBCONNECTOR_SVIDEO = 6, /* TV */
+ DRM_MODE_SUBCONNECTOR_Component = 8, /* TV */
+ DRM_MODE_SUBCONNECTOR_SCART = 9, /* TV */
+ DRM_MODE_SUBCONNECTOR_DisplayPort = 10, /* DP */
+ DRM_MODE_SUBCONNECTOR_HDMIA = 11, /* DP */
+ DRM_MODE_SUBCONNECTOR_Native = 15, /* DP */
+ DRM_MODE_SUBCONNECTOR_Wireless = 18, /* DP */
+};
+
+#define DRM_MODE_CONNECTOR_Unknown 0
+#define DRM_MODE_CONNECTOR_VGA 1
+#define DRM_MODE_CONNECTOR_DVII 2
+#define DRM_MODE_CONNECTOR_DVID 3
+#define DRM_MODE_CONNECTOR_DVIA 4
+#define DRM_MODE_CONNECTOR_Composite 5
+#define DRM_MODE_CONNECTOR_SVIDEO 6
+#define DRM_MODE_CONNECTOR_LVDS 7
+#define DRM_MODE_CONNECTOR_Component 8
+#define DRM_MODE_CONNECTOR_9PinDIN 9
+#define DRM_MODE_CONNECTOR_DisplayPort 10
+#define DRM_MODE_CONNECTOR_HDMIA 11
+#define DRM_MODE_CONNECTOR_HDMIB 12
+#define DRM_MODE_CONNECTOR_TV 13
+#define DRM_MODE_CONNECTOR_eDP 14
+#define DRM_MODE_CONNECTOR_VIRTUAL 15
+#define DRM_MODE_CONNECTOR_DSI 16
+#define DRM_MODE_CONNECTOR_DPI 17
+#define DRM_MODE_CONNECTOR_WRITEBACK 18
+#define DRM_MODE_CONNECTOR_SPI 19
+#define DRM_MODE_CONNECTOR_USB 20
+
+/**
+ * struct drm_mode_get_connector - Get connector metadata.
+ *
+ * User-space can perform a GETCONNECTOR ioctl to retrieve information about a
+ * connector. User-space is expected to retrieve encoders, modes and properties
+ * by performing this ioctl at least twice: the first time to retrieve the
+ * number of elements, the second time to retrieve the elements themselves.
+ *
+ * To retrieve the number of elements, set @count_props and @count_encoders to
+ * zero, set @count_modes to 1, and set @modes_ptr to a temporary struct
+ * drm_mode_modeinfo element.
+ *
+ * To retrieve the elements, allocate arrays for @encoders_ptr, @modes_ptr,
+ * @props_ptr and @prop_values_ptr, then set @count_modes, @count_props and
+ * @count_encoders to their capacity.
+ *
+ * Performing the ioctl only twice may be racy: the number of elements may have
+ * changed with a hotplug event in-between the two ioctls. User-space is
+ * expected to retry the last ioctl until the number of elements stabilizes.
+ * The kernel won't fill any array which doesn't have the expected length.
+ *
+ * **Force-probing a connector**
+ *
+ * If the @count_modes field is set to zero and the DRM client is the current
+ * DRM master, the kernel will perform a forced probe on the connector to
+ * refresh the connector status, modes and EDID. A forced-probe can be slow,
+ * might cause flickering and the ioctl will block.
+ *
+ * User-space needs to force-probe connectors to ensure their metadata is
+ * up-to-date at startup and after receiving a hot-plug event. User-space
+ * may perform a forced-probe when the user explicitly requests it. User-space
+ * shouldn't perform a forced-probe in other situations.
+ */
+struct drm_mode_get_connector {
+ /** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */
+ __u64 encoders_ptr;
+ /** @modes_ptr: Pointer to struct drm_mode_modeinfo array. */
+ __u64 modes_ptr;
+ /** @props_ptr: Pointer to ``__u32`` array of property IDs. */
+ __u64 props_ptr;
+ /** @prop_values_ptr: Pointer to ``__u64`` array of property values. */
+ __u64 prop_values_ptr;
+
+ /** @count_modes: Number of modes. */
+ __u32 count_modes;
+ /** @count_props: Number of properties. */
+ __u32 count_props;
+ /** @count_encoders: Number of encoders. */
+ __u32 count_encoders;
+
+ /** @encoder_id: Object ID of the current encoder. */
+ __u32 encoder_id;
+ /** @connector_id: Object ID of the connector. */
+ __u32 connector_id;
+ /**
+ * @connector_type: Type of the connector.
+ *
+ * See DRM_MODE_CONNECTOR_* defines.
+ */
+ __u32 connector_type;
+ /**
+ * @connector_type_id: Type-specific connector number.
+ *
+ * This is not an object ID. This is a per-type connector number. Each
+ * (type, type_id) combination is unique across all connectors of a DRM
+ * device.
+ */
+ __u32 connector_type_id;
+
+ /**
+ * @connection: Status of the connector.
+ *
+ * See enum drm_connector_status.
+ */
+ __u32 connection;
+ /** @mm_width: Width of the connected sink in millimeters. */
+ __u32 mm_width;
+ /** @mm_height: Height of the connected sink in millimeters. */
+ __u32 mm_height;
+ /**
+ * @subpixel: Subpixel order of the connected sink.
+ *
+ * See enum subpixel_order.
+ */
+ __u32 subpixel;
+
+ /** @pad: Padding, must be zero. */
+ __u32 pad;
+};
+
+#define DRM_MODE_PROP_PENDING (1<<0) /* deprecated, do not use */
+#define DRM_MODE_PROP_RANGE (1<<1)
+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
+#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
+#define DRM_MODE_PROP_BLOB (1<<4)
+#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
+
+/* non-extended types: legacy bitmask, one bit per type: */
+#define DRM_MODE_PROP_LEGACY_TYPE ( \
+ DRM_MODE_PROP_RANGE | \
+ DRM_MODE_PROP_ENUM | \
+ DRM_MODE_PROP_BLOB | \
+ DRM_MODE_PROP_BITMASK)
+
+/* extended-types: rather than continue to consume a bit per type,
+ * grab a chunk of the bits to use as integer type id.
+ */
+#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0
+#define DRM_MODE_PROP_TYPE(n) ((n) << 6)
+#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
+#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
+
+/* the PROP_ATOMIC flag is used to hide properties from userspace that
+ * is not aware of atomic properties. This is mostly to work around
+ * older userspace (DDX drivers) that read/write each prop they find,
+ * witout being aware that this could be triggering a lengthy modeset.
+ */
+#define DRM_MODE_PROP_ATOMIC 0x80000000
+
+struct drm_mode_property_enum {
+ __u64 value;
+ char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_mode_get_property {
+ __u64 values_ptr; /* values and blob lengths */
+ __u64 enum_blob_ptr; /* enum and blob id ptrs */
+
+ __u32 prop_id;
+ __u32 flags;
+ char name[DRM_PROP_NAME_LEN];
+
+ __u32 count_values;
+ /* This is only used to count enum values, not blobs. The _blobs is
+ * simply because of a historical reason, i.e. backwards compat. */
+ __u32 count_enum_blobs;
+};
+
+struct drm_mode_connector_set_property {
+ __u64 value;
+ __u32 prop_id;
+ __u32 connector_id;
+};
+
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+#define DRM_MODE_OBJECT_ANY 0
+
+struct drm_mode_obj_get_properties {
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u32 count_props;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
+struct drm_mode_obj_set_property {
+ __u64 value;
+ __u32 prop_id;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
+struct drm_mode_get_blob {
+ __u32 blob_id;
+ __u32 length;
+ __u64 data;
+};
+
+struct drm_mode_fb_cmd {
+ __u32 fb_id;
+ __u32 width;
+ __u32 height;
+ __u32 pitch;
+ __u32 bpp;
+ __u32 depth;
+ /* driver specific handle */
+ __u32 handle;
+};
+
+#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
+#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+
+struct drm_mode_fb_cmd2 {
+ __u32 fb_id;
+ __u32 width;
+ __u32 height;
+ __u32 pixel_format; /* fourcc code from drm_fourcc.h */
+ __u32 flags; /* see above flags */
+
+ /*
+ * In case of planar formats, this ioctl allows up to 4
+ * buffer objects with offsets and pitches per plane.
+ * The pitch and offset order is dictated by the fourcc,
+ * e.g. NV12 (https://fourcc.org/yuv.php#NV12) is described as:
+ *
+ * YUV 4:2:0 image with a plane of 8 bit Y samples
+ * followed by an interleaved U/V plane containing
+ * 8 bit 2x2 subsampled colour difference samples.
+ *
+ * So it would consist of Y as offsets[0] and UV as
+ * offsets[1]. Note that offsets[0] will generally
+ * be 0 (but this is not required).
+ *
+ * To accommodate tiled, compressed, etc formats, a
+ * modifier can be specified. The default value of zero
+ * indicates "native" format as specified by the fourcc.
+ * Vendor specific modifier token. Note that even though
+ * it looks like we have a modifier per-plane, we in fact
+ * do not. The modifier for each plane must be identical.
+ * Thus all combinations of different data layouts for
+ * multi plane formats must be enumerated as separate
+ * modifiers.
+ */
+ __u32 handles[4];
+ __u32 pitches[4]; /* pitch for each plane */
+ __u32 offsets[4]; /* offset of each plane */
+ __u64 modifier[4]; /* ie, tiling, compress */
+};
+
+#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
+#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
+#define DRM_MODE_FB_DIRTY_FLAGS 0x03
+
+#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
+
+/*
+ * Mark a region of a framebuffer as dirty.
+ *
+ * Some hardware does not automatically update display contents
+ * as a hardware or software draw to a framebuffer. This ioctl
+ * allows userspace to tell the kernel and the hardware what
+ * regions of the framebuffer have changed.
+ *
+ * The kernel or hardware is free to update more then just the
+ * region specified by the clip rects. The kernel or hardware
+ * may also delay and/or coalesce several calls to dirty into a
+ * single update.
+ *
+ * Userspace may annotate the updates, the annotates are a
+ * promise made by the caller that the change is either a copy
+ * of pixels or a fill of a single color in the region specified.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
+ * the number of updated regions are half of num_clips given,
+ * where the clip rects are paired in src and dst. The width and
+ * height of each one of the pairs must match.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
+ * promises that the region specified of the clip rects is filled
+ * completely with a single color as given in the color argument.
+ */
+
+struct drm_mode_fb_dirty_cmd {
+ __u32 fb_id;
+ __u32 flags;
+ __u32 color;
+ __u32 num_clips;
+ __u64 clips_ptr;
+};
+
+struct drm_mode_mode_cmd {
+ __u32 connector_id;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_CURSOR_BO 0x01
+#define DRM_MODE_CURSOR_MOVE 0x02
+#define DRM_MODE_CURSOR_FLAGS 0x03
+
+/*
+ * depending on the value in flags different members are used.
+ *
+ * CURSOR_BO uses
+ * crtc_id
+ * width
+ * height
+ * handle - if 0 turns the cursor off
+ *
+ * CURSOR_MOVE uses
+ * crtc_id
+ * x
+ * y
+ */
+struct drm_mode_cursor {
+ __u32 flags;
+ __u32 crtc_id;
+ __s32 x;
+ __s32 y;
+ __u32 width;
+ __u32 height;
+ /* driver specific handle */
+ __u32 handle;
+};
+
+struct drm_mode_cursor2 {
+ __u32 flags;
+ __u32 crtc_id;
+ __s32 x;
+ __s32 y;
+ __u32 width;
+ __u32 height;
+ /* driver specific handle */
+ __u32 handle;
+ __s32 hot_x;
+ __s32 hot_y;
+};
+
+struct drm_mode_crtc_lut {
+ __u32 crtc_id;
+ __u32 gamma_size;
+
+ /* pointers to arrays */
+ __u64 red;
+ __u64 green;
+ __u64 blue;
+};
+
+struct drm_color_ctm {
+ /*
+ * Conversion matrix in S31.32 sign-magnitude
+ * (not two's complement!) format.
+ */
+ __u64 matrix[9];
+};
+
+struct drm_color_lut {
+ /*
+ * Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
+ * 0xffff == 1.0.
+ */
+ __u16 red;
+ __u16 green;
+ __u16 blue;
+ __u16 reserved;
+};
+
+/**
+ * struct hdr_metadata_infoframe - HDR Metadata Infoframe Data.
+ *
+ * HDR Metadata Infoframe as per CTA 861.G spec. This is expected
+ * to match exactly with the spec.
+ *
+ * Userspace is expected to pass the metadata information as per
+ * the format described in this structure.
+ */
+struct hdr_metadata_infoframe {
+ /**
+ * @eotf: Electro-Optical Transfer Function (EOTF)
+ * used in the stream.
+ */
+ __u8 eotf;
+ /**
+ * @metadata_type: Static_Metadata_Descriptor_ID.
+ */
+ __u8 metadata_type;
+ /**
+ * @display_primaries: Color Primaries of the Data.
+ * These are coded as unsigned 16-bit values in units of
+ * 0.00002, where 0x0000 represents zero and 0xC350
+ * represents 1.0000.
+ * @display_primaries.x: X cordinate of color primary.
+ * @display_primaries.y: Y cordinate of color primary.
+ */
+ struct {
+ __u16 x, y;
+ } display_primaries[3];
+ /**
+ * @white_point: White Point of Colorspace Data.
+ * These are coded as unsigned 16-bit values in units of
+ * 0.00002, where 0x0000 represents zero and 0xC350
+ * represents 1.0000.
+ * @white_point.x: X cordinate of whitepoint of color primary.
+ * @white_point.y: Y cordinate of whitepoint of color primary.
+ */
+ struct {
+ __u16 x, y;
+ } white_point;
+ /**
+ * @max_display_mastering_luminance: Max Mastering Display Luminance.
+ * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
+ * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
+ */
+ __u16 max_display_mastering_luminance;
+ /**
+ * @min_display_mastering_luminance: Min Mastering Display Luminance.
+ * This value is coded as an unsigned 16-bit value in units of
+ * 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF
+ * represents 6.5535 cd/m2.
+ */
+ __u16 min_display_mastering_luminance;
+ /**
+ * @max_cll: Max Content Light Level.
+ * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
+ * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
+ */
+ __u16 max_cll;
+ /**
+ * @max_fall: Max Frame Average Light Level.
+ * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
+ * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
+ */
+ __u16 max_fall;
+};
+
+/**
+ * struct hdr_output_metadata - HDR output metadata
+ *
+ * Metadata Information to be passed from userspace
+ */
+struct hdr_output_metadata {
+ /**
+ * @metadata_type: Static_Metadata_Descriptor_ID.
+ */
+ __u32 metadata_type;
+ /**
+ * @hdmi_metadata_type1: HDR Metadata Infoframe.
+ */
+ union {
+ struct hdr_metadata_infoframe hdmi_metadata_type1;
+ };
+};
+
+#define DRM_MODE_PAGE_FLIP_EVENT 0x01
+#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
+#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
+#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8
+#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \
+ DRM_MODE_PAGE_FLIP_TARGET_RELATIVE)
+#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \
+ DRM_MODE_PAGE_FLIP_ASYNC | \
+ DRM_MODE_PAGE_FLIP_TARGET)
+
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * This ioctl will ask KMS to schedule a page flip for the specified
+ * crtc. Once any pending rendering targeting the specified fb (as of
+ * ioctl time) has completed, the crtc will be reprogrammed to display
+ * that fb after the next vertical refresh. The ioctl returns
+ * immediately, but subsequent rendering to the current fb will block
+ * in the execbuffer ioctl until the page flip happens. If a page
+ * flip is already pending as the ioctl is called, EBUSY will be
+ * returned.
+ *
+ * Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank
+ * event (see drm.h: struct drm_event_vblank) when the page flip is
+ * done. The user_data field passed in with this ioctl will be
+ * returned as the user_data field in the vblank event struct.
+ *
+ * Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen
+ * 'as soon as possible', meaning that it not delay waiting for vblank.
+ * This may cause tearing on the screen.
+ *
+ * The reserved field must be zero.
+ */
+
+struct drm_mode_crtc_page_flip {
+ __u32 crtc_id;
+ __u32 fb_id;
+ __u32 flags;
+ __u32 reserved;
+ __u64 user_data;
+};
+
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * Same as struct drm_mode_crtc_page_flip, but supports new flags and
+ * re-purposes the reserved field:
+ *
+ * The sequence field must be zero unless either of the
+ * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is specified. When
+ * the ABSOLUTE flag is specified, the sequence field denotes the absolute
+ * vblank sequence when the flip should take effect. When the RELATIVE
+ * flag is specified, the sequence field denotes the relative (to the
+ * current one when the ioctl is called) vblank sequence when the flip
+ * should take effect. NOTE: DRM_IOCTL_WAIT_VBLANK must still be used to
+ * make sure the vblank sequence before the target one has passed before
+ * calling this ioctl. The purpose of the
+ * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is merely to clarify
+ * the target for when code dealing with a page flip runs during a
+ * vertical blank period.
+ */
+
+struct drm_mode_crtc_page_flip_target {
+ __u32 crtc_id;
+ __u32 fb_id;
+ __u32 flags;
+ __u32 sequence;
+ __u64 user_data;
+};
+
+/* create a dumb scanout buffer */
+struct drm_mode_create_dumb {
+ __u32 height;
+ __u32 width;
+ __u32 bpp;
+ __u32 flags;
+ /* handle, pitch, size will be returned */
+ __u32 handle;
+ __u32 pitch;
+ __u64 size;
+};
+
+/* set up for mmap of a dumb scanout buffer */
+struct drm_mode_map_dumb {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+struct drm_mode_destroy_dumb {
+ __u32 handle;
+};
+
+/* page-flip flags are valid, plus: */
+#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
+#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
+#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
+
+#define DRM_MODE_ATOMIC_FLAGS (\
+ DRM_MODE_PAGE_FLIP_EVENT |\
+ DRM_MODE_PAGE_FLIP_ASYNC |\
+ DRM_MODE_ATOMIC_TEST_ONLY |\
+ DRM_MODE_ATOMIC_NONBLOCK |\
+ DRM_MODE_ATOMIC_ALLOW_MODESET)
+
+struct drm_mode_atomic {
+ __u32 flags;
+ __u32 count_objs;
+ __u64 objs_ptr;
+ __u64 count_props_ptr;
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u64 reserved;
+ __u64 user_data;
+};
+
+struct drm_format_modifier_blob {
+#define FORMAT_BLOB_CURRENT 1
+ /* Version of this blob format */
+ __u32 version;
+
+ /* Flags */
+ __u32 flags;
+
+ /* Number of fourcc formats supported */
+ __u32 count_formats;
+
+ /* Where in this blob the formats exist (in bytes) */
+ __u32 formats_offset;
+
+ /* Number of drm_format_modifiers */
+ __u32 count_modifiers;
+
+ /* Where in this blob the modifiers exist (in bytes) */
+ __u32 modifiers_offset;
+
+ /* __u32 formats[] */
+ /* struct drm_format_modifier modifiers[] */
+};
+
+struct drm_format_modifier {
+ /* Bitmask of formats in get_plane format list this info applies to. The
+ * offset allows a sliding window of which 64 formats (bits).
+ *
+ * Some examples:
+ * In today's world with < 65 formats, and formats 0, and 2 are
+ * supported
+ * 0x0000000000000005
+ * ^-offset = 0, formats = 5
+ *
+ * If the number formats grew to 128, and formats 98-102 are
+ * supported with the modifier:
+ *
+ * 0x0000007c00000000 0000000000000000
+ * ^
+ * |__offset = 64, formats = 0x7c00000000
+ *
+ */
+ __u64 formats;
+ __u32 offset;
+ __u32 pad;
+
+ /* The modifier that applies to the >get_plane format list bitmask. */
+ __u64 modifier;
+};
+
+/**
+ * struct drm_mode_create_blob - Create New blob property
+ *
+ * Create a new 'blob' data property, copying length bytes from data pointer,
+ * and returning new blob ID.
+ */
+struct drm_mode_create_blob {
+ /** @data: Pointer to data to copy. */
+ __u64 data;
+ /** @length: Length of data to copy. */
+ __u32 length;
+ /** @blob_id: Return: new property ID. */
+ __u32 blob_id;
+};
+
+/**
+ * struct drm_mode_destroy_blob - Destroy user blob
+ * @blob_id: blob_id to destroy
+ *
+ * Destroy a user-created blob property.
+ *
+ * User-space can release blobs as soon as they do not need to refer to them by
+ * their blob object ID. For instance, if you are using a MODE_ID blob in an
+ * atomic commit and you will not make another commit re-using the same ID, you
+ * can destroy the blob as soon as the commit has been issued, without waiting
+ * for it to complete.
+ */
+struct drm_mode_destroy_blob {
+ __u32 blob_id;
+};
+
+/**
+ * struct drm_mode_create_lease - Create lease
+ *
+ * Lease mode resources, creating another drm_master.
+ */
+struct drm_mode_create_lease {
+ /** @object_ids: Pointer to array of object ids (__u32) */
+ __u64 object_ids;
+ /** @object_count: Number of object ids */
+ __u32 object_count;
+ /** @flags: flags for new FD (O_CLOEXEC, etc) */
+ __u32 flags;
+
+ /** @lessee_id: Return: unique identifier for lessee. */
+ __u32 lessee_id;
+ /** @fd: Return: file descriptor to new drm_master file */
+ __u32 fd;
+};
+
+/**
+ * struct drm_mode_list_lessees - List lessees
+ *
+ * List lesses from a drm_master.
+ */
+struct drm_mode_list_lessees {
+ /**
+ * @count_lessees: Number of lessees.
+ *
+ * On input, provides length of the array.
+ * On output, provides total number. No
+ * more than the input number will be written
+ * back, so two calls can be used to get
+ * the size and then the data.
+ */
+ __u32 count_lessees;
+ /** @pad: Padding. */
+ __u32 pad;
+
+ /**
+ * @lessees_ptr: Pointer to lessees.
+ *
+ * Pointer to __u64 array of lessee ids
+ */
+ __u64 lessees_ptr;
+};
+
+/**
+ * struct drm_mode_get_lease - Get Lease
+ *
+ * Get leased objects.
+ */
+struct drm_mode_get_lease {
+ /**
+ * @count_objects: Number of leased objects.
+ *
+ * On input, provides length of the array.
+ * On output, provides total number. No
+ * more than the input number will be written
+ * back, so two calls can be used to get
+ * the size and then the data.
+ */
+ __u32 count_objects;
+ /** @pad: Padding. */
+ __u32 pad;
+
+ /**
+ * @objects_ptr: Pointer to objects.
+ *
+ * Pointer to __u32 array of object ids.
+ */
+ __u64 objects_ptr;
+};
+
+/**
+ * struct drm_mode_revoke_lease - Revoke lease
+ */
+struct drm_mode_revoke_lease {
+ /** @lessee_id: Unique ID of lessee */
+ __u32 lessee_id;
+};
+
+/**
+ * struct drm_mode_rect - Two dimensional rectangle.
+ * @x1: Horizontal starting coordinate (inclusive).
+ * @y1: Vertical starting coordinate (inclusive).
+ * @x2: Horizontal ending coordinate (exclusive).
+ * @y2: Vertical ending coordinate (exclusive).
+ *
+ * With drm subsystem using struct drm_rect to manage rectangular area this
+ * export it to user-space.
+ *
+ * Currently used by drm_mode_atomic blob property FB_DAMAGE_CLIPS.
+ */
+struct drm_mode_rect {
+ __s32 x1;
+ __s32 y1;
+ __s32 x2;
+ __s32 y2;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/libdrm/drm_sarea.h b/include/libdrm/libdrm/drm_sarea.h
new file mode 100644
index 0000000..93025be
--- /dev/null
+++ b/include/libdrm/libdrm/drm_sarea.h
@@ -0,0 +1,92 @@
+/**
+ * \file drm_sarea.h
+ * \brief SAREA definitions
+ *
+ * \author Michel Dänzer <michel@daenzer.net>
+ */
+
+/*
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_SAREA_H_
+#define _DRM_SAREA_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* SAREA area needs to be at least a page */
+#if defined(__alpha__)
+#define SAREA_MAX 0x2000U
+#elif defined(__mips__)
+#define SAREA_MAX 0x4000U
+#elif defined(__ia64__)
+#define SAREA_MAX 0x10000U /* 64kB */
+#else
+/* Intel 830M driver needs at least 8k SAREA */
+#define SAREA_MAX 0x2000U
+#endif
+
+/** Maximum number of drawables in the SAREA */
+#define SAREA_MAX_DRAWABLES 256
+
+#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
+
+/** SAREA drawable */
+struct drm_sarea_drawable {
+ unsigned int stamp;
+ unsigned int flags;
+};
+
+/** SAREA frame */
+struct drm_sarea_frame {
+ unsigned int x;
+ unsigned int y;
+ unsigned int width;
+ unsigned int height;
+ unsigned int fullscreen;
+};
+
+/** SAREA */
+struct drm_sarea {
+ /** first thing is always the DRM locking structure */
+ struct drm_hw_lock lock;
+ /** \todo Use readers/writer lock for drm_sarea::drawable_lock */
+ struct drm_hw_lock drawable_lock;
+ struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
+ struct drm_sarea_frame frame; /**< frame */
+ drm_context_t dummy_context;
+};
+
+typedef struct drm_sarea_drawable drm_sarea_drawable_t;
+typedef struct drm_sarea_frame drm_sarea_frame_t;
+typedef struct drm_sarea drm_sarea_t;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _DRM_SAREA_H_ */
diff --git a/include/libdrm/libdrm/i915_drm.h b/include/libdrm/libdrm/i915_drm.h
new file mode 100644
index 0000000..1de0433
--- /dev/null
+++ b/include/libdrm/libdrm/i915_drm.h
@@ -0,0 +1,3724 @@
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_DRM_H_
+#define _I915_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+/**
+ * DOC: uevents generated by i915 on it's device node
+ *
+ * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
+ * event from the gpu l3 cache. Additional information supplied is ROW,
+ * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
+ * track of these events and if a specific cache-line seems to have a
+ * persistent error remap it with the l3 remapping tool supplied in
+ * intel-gpu-tools. The value supplied with the event is always 1.
+ *
+ * I915_ERROR_UEVENT - Generated upon error detection, currently only via
+ * hangcheck. The error detection event is a good indicator of when things
+ * began to go badly. The value supplied with the event is a 1 upon error
+ * detection, and a 0 upon reset completion, signifying no more error
+ * exists. NOTE: Disabling hangcheck or reset via module parameter will
+ * cause the related events to not be seen.
+ *
+ * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
+ * GPU. The value supplied with the event is always 1. NOTE: Disable
+ * reset via module parameter will cause this event to not be seen.
+ */
+#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
+#define I915_ERROR_UEVENT "ERROR"
+#define I915_RESET_UEVENT "RESET"
+
+/**
+ * struct i915_user_extension - Base class for defining a chain of extensions
+ *
+ * Many interfaces need to grow over time. In most cases we can simply
+ * extend the struct and have userspace pass in more data. Another option,
+ * as demonstrated by Vulkan's approach to providing extensions for forward
+ * and backward compatibility, is to use a list of optional structs to
+ * provide those extra details.
+ *
+ * The key advantage to using an extension chain is that it allows us to
+ * redefine the interface more easily than an ever growing struct of
+ * increasing complexity, and for large parts of that interface to be
+ * entirely optional. The downside is more pointer chasing; chasing across
+ * the boundary with pointers encapsulated inside u64.
+ *
+ * Example chaining:
+ *
+ * .. code-block:: C
+ *
+ * struct i915_user_extension ext3 {
+ * .next_extension = 0, // end
+ * .name = ...,
+ * };
+ * struct i915_user_extension ext2 {
+ * .next_extension = (uintptr_t)&ext3,
+ * .name = ...,
+ * };
+ * struct i915_user_extension ext1 {
+ * .next_extension = (uintptr_t)&ext2,
+ * .name = ...,
+ * };
+ *
+ * Typically the struct i915_user_extension would be embedded in some uAPI
+ * struct, and in this case we would feed it the head of the chain(i.e ext1),
+ * which would then apply all of the above extensions.
+ *
+ */
+struct i915_user_extension {
+ /**
+ * @next_extension:
+ *
+ * Pointer to the next struct i915_user_extension, or zero if the end.
+ */
+ __u64 next_extension;
+ /**
+ * @name: Name of the extension.
+ *
+ * Note that the name here is just some integer.
+ *
+ * Also note that the name space for this is not global for the whole
+ * driver, but rather its scope/meaning is limited to the specific piece
+ * of uAPI which has embedded the struct i915_user_extension.
+ */
+ __u32 name;
+ /**
+ * @flags: MBZ
+ *
+ * All undefined bits must be zero.
+ */
+ __u32 flags;
+ /**
+ * @rsvd: MBZ
+ *
+ * Reserved for future use; must be zero.
+ */
+ __u32 rsvd[4];
+};
+
+/*
+ * MOCS indexes used for GPU surfaces, defining the cacheability of the
+ * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
+ */
+enum i915_mocs_table_index {
+ /*
+ * Not cached anywhere, coherency between CPU and GPU accesses is
+ * guaranteed.
+ */
+ I915_MOCS_UNCACHED,
+ /*
+ * Cacheability and coherency controlled by the kernel automatically
+ * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
+ * usage of the surface (used for display scanout or not).
+ */
+ I915_MOCS_PTE,
+ /*
+ * Cached in all GPU caches available on the platform.
+ * Coherency between CPU and GPU accesses to the surface is not
+ * guaranteed without extra synchronization.
+ */
+ I915_MOCS_CACHED,
+};
+
+/**
+ * enum drm_i915_gem_engine_class - uapi engine type enumeration
+ *
+ * Different engines serve different roles, and there may be more than one
+ * engine serving each role. This enum provides a classification of the role
+ * of the engine, which may be used when requesting operations to be performed
+ * on a certain subset of engines, or for providing information about that
+ * group.
+ */
+enum drm_i915_gem_engine_class {
+ /**
+ * @I915_ENGINE_CLASS_RENDER:
+ *
+ * Render engines support instructions used for 3D, Compute (GPGPU),
+ * and programmable media workloads. These instructions fetch data and
+ * dispatch individual work items to threads that operate in parallel.
+ * The threads run small programs (called "kernels" or "shaders") on
+ * the GPU's execution units (EUs).
+ */
+ I915_ENGINE_CLASS_RENDER = 0,
+
+ /**
+ * @I915_ENGINE_CLASS_COPY:
+ *
+ * Copy engines (also referred to as "blitters") support instructions
+ * that move blocks of data from one location in memory to another,
+ * or that fill a specified location of memory with fixed data.
+ * Copy engines can perform pre-defined logical or bitwise operations
+ * on the source, destination, or pattern data.
+ */
+ I915_ENGINE_CLASS_COPY = 1,
+
+ /**
+ * @I915_ENGINE_CLASS_VIDEO:
+ *
+ * Video engines (also referred to as "bit stream decode" (BSD) or
+ * "vdbox") support instructions that perform fixed-function media
+ * decode and encode.
+ */
+ I915_ENGINE_CLASS_VIDEO = 2,
+
+ /**
+ * @I915_ENGINE_CLASS_VIDEO_ENHANCE:
+ *
+ * Video enhancement engines (also referred to as "vebox") support
+ * instructions related to image enhancement.
+ */
+ I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
+
+ /**
+ * @I915_ENGINE_CLASS_COMPUTE:
+ *
+ * Compute engines support a subset of the instructions available
+ * on render engines: compute engines support Compute (GPGPU) and
+ * programmable media workloads, but do not support the 3D pipeline.
+ */
+ I915_ENGINE_CLASS_COMPUTE = 4,
+
+ /* Values in this enum should be kept compact. */
+
+ /**
+ * @I915_ENGINE_CLASS_INVALID:
+ *
+ * Placeholder value to represent an invalid engine class assignment.
+ */
+ I915_ENGINE_CLASS_INVALID = -1
+};
+
+/**
+ * struct i915_engine_class_instance - Engine class/instance identifier
+ *
+ * There may be more than one engine fulfilling any role within the system.
+ * Each engine of a class is given a unique instance number and therefore
+ * any engine can be specified by its class:instance tuplet. APIs that allow
+ * access to any engine in the system will use struct i915_engine_class_instance
+ * for this identification.
+ */
+struct i915_engine_class_instance {
+ /**
+ * @engine_class:
+ *
+ * Engine class from enum drm_i915_gem_engine_class
+ */
+ __u16 engine_class;
+#define I915_ENGINE_CLASS_INVALID_NONE -1
+#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
+
+ /**
+ * @engine_instance:
+ *
+ * Engine instance.
+ */
+ __u16 engine_instance;
+};
+
+/**
+ * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
+ *
+ */
+
+enum drm_i915_pmu_engine_sample {
+ I915_SAMPLE_BUSY = 0,
+ I915_SAMPLE_WAIT = 1,
+ I915_SAMPLE_SEMA = 2
+};
+
+#define I915_PMU_SAMPLE_BITS (4)
+#define I915_PMU_SAMPLE_MASK (0xf)
+#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
+#define I915_PMU_CLASS_SHIFT \
+ (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
+
+#define __I915_PMU_ENGINE(class, instance, sample) \
+ ((class) << I915_PMU_CLASS_SHIFT | \
+ (instance) << I915_PMU_SAMPLE_BITS | \
+ (sample))
+
+#define I915_PMU_ENGINE_BUSY(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
+
+#define I915_PMU_ENGINE_WAIT(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
+
+#define I915_PMU_ENGINE_SEMA(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
+
+#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
+
+#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
+#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
+#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
+#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
+#define I915_PMU_SOFTWARE_GT_AWAKE_TIME __I915_PMU_OTHER(4)
+
+#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
+
+/* Each region is a minimum of 16k, and there are at most 255 of them.
+ */
+#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
+ * of chars for next/prev indices */
+#define I915_LOG_MIN_TEX_REGION_SIZE 14
+
+typedef struct _drm_i915_init {
+ enum {
+ I915_INIT_DMA = 0x01,
+ I915_CLEANUP_DMA = 0x02,
+ I915_RESUME_DMA = 0x03
+ } func;
+ unsigned int mmio_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+ unsigned int back_pitch;
+ unsigned int depth_pitch;
+ unsigned int cpp;
+ unsigned int chipset;
+} drm_i915_init_t;
+
+typedef struct _drm_i915_sarea {
+ struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
+ int last_upload; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int ctxOwner; /* last context to upload state */
+ int texAge;
+ int pf_enabled; /* is pageflipping allowed? */
+ int pf_active;
+ int pf_current_page; /* which buffer is being displayed? */
+ int perf_boxes; /* performance boxes to be displayed */
+ int width, height; /* screen size in pixels */
+
+ drm_handle_t front_handle;
+ int front_offset;
+ int front_size;
+
+ drm_handle_t back_handle;
+ int back_offset;
+ int back_size;
+
+ drm_handle_t depth_handle;
+ int depth_offset;
+ int depth_size;
+
+ drm_handle_t tex_handle;
+ int tex_offset;
+ int tex_size;
+ int log_tex_granularity;
+ int pitch;
+ int rotation; /* 0, 90, 180 or 270 */
+ int rotated_offset;
+ int rotated_size;
+ int rotated_pitch;
+ int virtualX, virtualY;
+
+ unsigned int front_tiled;
+ unsigned int back_tiled;
+ unsigned int depth_tiled;
+ unsigned int rotated_tiled;
+ unsigned int rotated2_tiled;
+
+ int pipeA_x;
+ int pipeA_y;
+ int pipeA_w;
+ int pipeA_h;
+ int pipeB_x;
+ int pipeB_y;
+ int pipeB_w;
+ int pipeB_h;
+
+ /* fill out some space for old userspace triple buffer */
+ drm_handle_t unused_handle;
+ __u32 unused1, unused2, unused3;
+
+ /* buffer object handles for static buffers. May change
+ * over the lifetime of the client.
+ */
+ __u32 front_bo_handle;
+ __u32 back_bo_handle;
+ __u32 unused_bo_handle;
+ __u32 depth_bo_handle;
+
+} drm_i915_sarea_t;
+
+/* due to userspace building against these headers we need some compat here */
+#define planeA_x pipeA_x
+#define planeA_y pipeA_y
+#define planeA_w pipeA_w
+#define planeA_h pipeA_h
+#define planeB_x pipeB_x
+#define planeB_y pipeB_y
+#define planeB_w pipeB_w
+#define planeB_h pipeB_h
+
+/* Flags for perf_boxes
+ */
+#define I915_BOX_RING_EMPTY 0x1
+#define I915_BOX_FLIP 0x2
+#define I915_BOX_WAIT 0x4
+#define I915_BOX_TEXTURE_LOAD 0x8
+#define I915_BOX_LOST_CONTEXT 0x10
+
+/*
+ * i915 specific ioctls.
+ *
+ * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
+ * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
+ * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
+ */
+#define DRM_I915_INIT 0x00
+#define DRM_I915_FLUSH 0x01
+#define DRM_I915_FLIP 0x02
+#define DRM_I915_BATCHBUFFER 0x03
+#define DRM_I915_IRQ_EMIT 0x04
+#define DRM_I915_IRQ_WAIT 0x05
+#define DRM_I915_GETPARAM 0x06
+#define DRM_I915_SETPARAM 0x07
+#define DRM_I915_ALLOC 0x08
+#define DRM_I915_FREE 0x09
+#define DRM_I915_INIT_HEAP 0x0a
+#define DRM_I915_CMDBUFFER 0x0b
+#define DRM_I915_DESTROY_HEAP 0x0c
+#define DRM_I915_SET_VBLANK_PIPE 0x0d
+#define DRM_I915_GET_VBLANK_PIPE 0x0e
+#define DRM_I915_VBLANK_SWAP 0x0f
+#define DRM_I915_HWS_ADDR 0x11
+#define DRM_I915_GEM_INIT 0x13
+#define DRM_I915_GEM_EXECBUFFER 0x14
+#define DRM_I915_GEM_PIN 0x15
+#define DRM_I915_GEM_UNPIN 0x16
+#define DRM_I915_GEM_BUSY 0x17
+#define DRM_I915_GEM_THROTTLE 0x18
+#define DRM_I915_GEM_ENTERVT 0x19
+#define DRM_I915_GEM_LEAVEVT 0x1a
+#define DRM_I915_GEM_CREATE 0x1b
+#define DRM_I915_GEM_PREAD 0x1c
+#define DRM_I915_GEM_PWRITE 0x1d
+#define DRM_I915_GEM_MMAP 0x1e
+#define DRM_I915_GEM_SET_DOMAIN 0x1f
+#define DRM_I915_GEM_SW_FINISH 0x20
+#define DRM_I915_GEM_SET_TILING 0x21
+#define DRM_I915_GEM_GET_TILING 0x22
+#define DRM_I915_GEM_GET_APERTURE 0x23
+#define DRM_I915_GEM_MMAP_GTT 0x24
+#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
+#define DRM_I915_GEM_MADVISE 0x26
+#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
+#define DRM_I915_OVERLAY_ATTRS 0x28
+#define DRM_I915_GEM_EXECBUFFER2 0x29
+#define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2
+#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
+#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
+#define DRM_I915_GEM_WAIT 0x2c
+#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
+#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
+#define DRM_I915_GEM_SET_CACHING 0x2f
+#define DRM_I915_GEM_GET_CACHING 0x30
+#define DRM_I915_REG_READ 0x31
+#define DRM_I915_GET_RESET_STATS 0x32
+#define DRM_I915_GEM_USERPTR 0x33
+#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
+#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
+#define DRM_I915_PERF_OPEN 0x36
+#define DRM_I915_PERF_ADD_CONFIG 0x37
+#define DRM_I915_PERF_REMOVE_CONFIG 0x38
+#define DRM_I915_QUERY 0x39
+#define DRM_I915_GEM_VM_CREATE 0x3a
+#define DRM_I915_GEM_VM_DESTROY 0x3b
+#define DRM_I915_GEM_CREATE_EXT 0x3c
+/* Must be kept compact -- no holes */
+
+#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
+#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
+#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
+#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
+#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
+#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
+#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
+#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
+#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
+#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
+#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
+#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
+#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
+#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
+#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
+#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
+#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
+#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
+#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
+#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
+#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
+#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
+#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
+#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
+#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
+#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
+#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
+#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
+#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
+
+/* Allow drivers to submit batchbuffers directly to hardware, relying
+ * on the security mechanisms provided by hardware.
+ */
+typedef struct drm_i915_batchbuffer {
+ int start; /* agp offset */
+ int used; /* nr bytes in use */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
+} drm_i915_batchbuffer_t;
+
+/* As above, but pass a pointer to userspace buffer which can be
+ * validated by the kernel prior to sending to hardware.
+ */
+typedef struct _drm_i915_cmdbuffer {
+ char *buf; /* pointer to userspace command buffer */
+ int sz; /* nr bytes in buf */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
+} drm_i915_cmdbuffer_t;
+
+/* Userspace can request & wait on irq's:
+ */
+typedef struct drm_i915_irq_emit {
+ int *irq_seq;
+} drm_i915_irq_emit_t;
+
+typedef struct drm_i915_irq_wait {
+ int irq_seq;
+} drm_i915_irq_wait_t;
+
+/*
+ * Different modes of per-process Graphics Translation Table,
+ * see I915_PARAM_HAS_ALIASING_PPGTT
+ */
+#define I915_GEM_PPGTT_NONE 0
+#define I915_GEM_PPGTT_ALIASING 1
+#define I915_GEM_PPGTT_FULL 2
+
+/* Ioctl to query kernel params:
+ */
+#define I915_PARAM_IRQ_ACTIVE 1
+#define I915_PARAM_ALLOW_BATCHBUFFER 2
+#define I915_PARAM_LAST_DISPATCH 3
+#define I915_PARAM_CHIPSET_ID 4
+#define I915_PARAM_HAS_GEM 5
+#define I915_PARAM_NUM_FENCES_AVAIL 6
+#define I915_PARAM_HAS_OVERLAY 7
+#define I915_PARAM_HAS_PAGEFLIPPING 8
+#define I915_PARAM_HAS_EXECBUF2 9
+#define I915_PARAM_HAS_BSD 10
+#define I915_PARAM_HAS_BLT 11
+#define I915_PARAM_HAS_RELAXED_FENCING 12
+#define I915_PARAM_HAS_COHERENT_RINGS 13
+#define I915_PARAM_HAS_EXEC_CONSTANTS 14
+#define I915_PARAM_HAS_RELAXED_DELTA 15
+#define I915_PARAM_HAS_GEN7_SOL_RESET 16
+#define I915_PARAM_HAS_LLC 17
+#define I915_PARAM_HAS_ALIASING_PPGTT 18
+#define I915_PARAM_HAS_WAIT_TIMEOUT 19
+#define I915_PARAM_HAS_SEMAPHORES 20
+#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
+#define I915_PARAM_HAS_VEBOX 22
+#define I915_PARAM_HAS_SECURE_BATCHES 23
+#define I915_PARAM_HAS_PINNED_BATCHES 24
+#define I915_PARAM_HAS_EXEC_NO_RELOC 25
+#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
+#define I915_PARAM_HAS_WT 27
+#define I915_PARAM_CMD_PARSER_VERSION 28
+#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
+#define I915_PARAM_MMAP_VERSION 30
+#define I915_PARAM_HAS_BSD2 31
+#define I915_PARAM_REVISION 32
+#define I915_PARAM_SUBSLICE_TOTAL 33
+#define I915_PARAM_EU_TOTAL 34
+#define I915_PARAM_HAS_GPU_RESET 35
+#define I915_PARAM_HAS_RESOURCE_STREAMER 36
+#define I915_PARAM_HAS_EXEC_SOFTPIN 37
+#define I915_PARAM_HAS_POOLED_EU 38
+#define I915_PARAM_MIN_EU_IN_POOL 39
+#define I915_PARAM_MMAP_GTT_VERSION 40
+
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
+ * priorities and the driver will attempt to execute batches in priority order.
+ * The param returns a capability bitmask, nonzero implies that the scheduler
+ * is enabled, with different features present according to the mask.
+ *
+ * The initial priority for each batch is supplied by the context and is
+ * controlled via I915_CONTEXT_PARAM_PRIORITY.
+ */
+#define I915_PARAM_HAS_SCHEDULER 41
+#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
+#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
+#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
+#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
+#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
+/*
+ * Indicates the 2k user priority levels are statically mapped into 3 buckets as
+ * follows:
+ *
+ * -1k to -1 Low priority
+ * 0 Normal priority
+ * 1 to 1k Highest priority
+ */
+#define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5)
+
+#define I915_PARAM_HUC_STATUS 42
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
+ * synchronisation with implicit fencing on individual objects.
+ * See EXEC_OBJECT_ASYNC.
+ */
+#define I915_PARAM_HAS_EXEC_ASYNC 43
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
+ * both being able to pass in a sync_file fd to wait upon before executing,
+ * and being able to return a new sync_file fd that is signaled when the
+ * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
+ */
+#define I915_PARAM_HAS_EXEC_FENCE 44
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
+ * user specified bufffers for post-mortem debugging of GPU hangs. See
+ * EXEC_OBJECT_CAPTURE.
+ */
+#define I915_PARAM_HAS_EXEC_CAPTURE 45
+
+#define I915_PARAM_SLICE_MASK 46
+
+/* Assuming it's uniform for each slice, this queries the mask of subslices
+ * per-slice for this system.
+ */
+#define I915_PARAM_SUBSLICE_MASK 47
+
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
+ * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
+ */
+#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
+ * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
+ */
+#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
+
+/*
+ * Query whether every context (both per-file default and user created) is
+ * isolated (insofar as HW supports). If this parameter is not true, then
+ * freshly created contexts may inherit values from an existing context,
+ * rather than default HW values. If true, it also ensures (insofar as HW
+ * supports) that all state set by this context will not leak to any other
+ * context.
+ *
+ * As not every engine across every gen support contexts, the returned
+ * value reports the support of context isolation for individual engines by
+ * returning a bitmask of each engine class set to true if that class supports
+ * isolation.
+ */
+#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
+
+/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
+ * registers. This used to be fixed per platform but from CNL onwards, this
+ * might vary depending on the parts.
+ */
+#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
+
+/*
+ * Once upon a time we supposed that writes through the GGTT would be
+ * immediately in physical memory (once flushed out of the CPU path). However,
+ * on a few different processors and chipsets, this is not necessarily the case
+ * as the writes appear to be buffered internally. Thus a read of the backing
+ * storage (physical memory) via a different path (with different physical tags
+ * to the indirect write via the GGTT) will see stale values from before
+ * the GGTT write. Inside the kernel, we can for the most part keep track of
+ * the different read/write domains in use (e.g. set-domain), but the assumption
+ * of coherency is baked into the ABI, hence reporting its true state in this
+ * parameter.
+ *
+ * Reports true when writes via mmap_gtt are immediately visible following an
+ * lfence to flush the WCB.
+ *
+ * Reports false when writes via mmap_gtt are indeterminately delayed in an in
+ * internal buffer and are _not_ immediately visible to third parties accessing
+ * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
+ * communications channel when reporting false is strongly disadvised.
+ */
+#define I915_PARAM_MMAP_GTT_COHERENT 52
+
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
+ * execution through use of explicit fence support.
+ * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
+ */
+#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
+
+/*
+ * Revision of the i915-perf uAPI. The value returned helps determine what
+ * i915-perf features are available. See drm_i915_perf_property_id.
+ */
+#define I915_PARAM_PERF_REVISION 54
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
+ * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
+ * I915_EXEC_USE_EXTENSIONS.
+ */
+#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
+
+/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
+#define I915_PARAM_HAS_USERPTR_PROBE 56
+
+/* Must be kept compact -- no holes and well documented */
+
+/**
+ * struct drm_i915_getparam - Driver parameter query structure.
+ */
+struct drm_i915_getparam {
+ /** @param: Driver parameter to query. */
+ __s32 param;
+
+ /**
+ * @value: Address of memory where queried value should be put.
+ *
+ * WARNING: Using pointers instead of fixed-size u64 means we need to write
+ * compat32 code. Don't repeat this mistake.
+ */
+ int *value;
+};
+
+/**
+ * typedef drm_i915_getparam_t - Driver parameter query structure.
+ * See struct drm_i915_getparam.
+ */
+typedef struct drm_i915_getparam drm_i915_getparam_t;
+
+/* Ioctl to set kernel params:
+ */
+#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
+#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
+#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
+#define I915_SETPARAM_NUM_USED_FENCES 4
+/* Must be kept compact -- no holes */
+
+typedef struct drm_i915_setparam {
+ int param;
+ int value;
+} drm_i915_setparam_t;
+
+/* A memory manager for regions of shared memory:
+ */
+#define I915_MEM_REGION_AGP 1
+
+typedef struct drm_i915_mem_alloc {
+ int region;
+ int alignment;
+ int size;
+ int *region_offset; /* offset from start of fb or agp */
+} drm_i915_mem_alloc_t;
+
+typedef struct drm_i915_mem_free {
+ int region;
+ int region_offset;
+} drm_i915_mem_free_t;
+
+typedef struct drm_i915_mem_init_heap {
+ int region;
+ int size;
+ int start;
+} drm_i915_mem_init_heap_t;
+
+/* Allow memory manager to be torn down and re-initialized (eg on
+ * rotate):
+ */
+typedef struct drm_i915_mem_destroy_heap {
+ int region;
+} drm_i915_mem_destroy_heap_t;
+
+/* Allow X server to configure which pipes to monitor for vblank signals
+ */
+#define DRM_I915_VBLANK_PIPE_A 1
+#define DRM_I915_VBLANK_PIPE_B 2
+
+typedef struct drm_i915_vblank_pipe {
+ int pipe;
+} drm_i915_vblank_pipe_t;
+
+/* Schedule buffer swap at given vertical blank:
+ */
+typedef struct drm_i915_vblank_swap {
+ drm_drawable_t drawable;
+ enum drm_vblank_seq_type seqtype;
+ unsigned int sequence;
+} drm_i915_vblank_swap_t;
+
+typedef struct drm_i915_hws_addr {
+ __u64 addr;
+} drm_i915_hws_addr_t;
+
+struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ __u64 gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ __u64 gtt_end;
+};
+
+struct drm_i915_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ __u64 size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_i915_gem_pread {
+ /** Handle for the object being read. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset into the object to read from */
+ __u64 offset;
+ /** Length of data to read */
+ __u64 size;
+ /**
+ * Pointer to write the data into.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_gem_pwrite {
+ /** Handle for the object being written to. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset into the object to write to */
+ __u64 offset;
+ /** Length of data to write */
+ __u64 size;
+ /**
+ * Pointer to read the data from.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_gem_mmap {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset in the object to map. */
+ __u64 offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ __u64 size;
+ /**
+ * Returned pointer the data was mapped at.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 addr_ptr;
+
+ /**
+ * Flags for extended behaviour.
+ *
+ * Added in version 2.
+ */
+ __u64 flags;
+#define I915_MMAP_WC 0x1
+};
+
+struct drm_i915_gem_mmap_gtt {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+/**
+ * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
+ *
+ * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
+ * and is used to retrieve the fake offset to mmap an object specified by &handle.
+ *
+ * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
+ * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
+ * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
+ */
+struct drm_i915_gem_mmap_offset {
+ /** @handle: Handle for the object being mapped. */
+ __u32 handle;
+ /** @pad: Must be zero */
+ __u32 pad;
+ /**
+ * @offset: The fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+
+ /**
+ * @flags: Flags for extended behaviour.
+ *
+ * It is mandatory that one of the `MMAP_OFFSET` types
+ * should be included:
+ *
+ * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
+ * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
+ * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
+ * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
+ *
+ * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
+ * type. On devices without local memory, this caching mode is invalid.
+ *
+ * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
+ * be used, depending on the object placement on creation. WB will be used
+ * when the object can only exist in system memory, WC otherwise.
+ */
+ __u64 flags;
+
+#define I915_MMAP_OFFSET_GTT 0
+#define I915_MMAP_OFFSET_WC 1
+#define I915_MMAP_OFFSET_WB 2
+#define I915_MMAP_OFFSET_UC 3
+#define I915_MMAP_OFFSET_FIXED 4
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
+ *
+ * No current extensions defined; mbz.
+ */
+ __u64 extensions;
+};
+
+/**
+ * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
+ * preparation for accessing the pages via some CPU domain.
+ *
+ * Specifying a new write or read domain will flush the object out of the
+ * previous domain(if required), before then updating the objects domain
+ * tracking with the new domain.
+ *
+ * Note this might involve waiting for the object first if it is still active on
+ * the GPU.
+ *
+ * Supported values for @read_domains and @write_domain:
+ *
+ * - I915_GEM_DOMAIN_WC: Uncached write-combined domain
+ * - I915_GEM_DOMAIN_CPU: CPU cache domain
+ * - I915_GEM_DOMAIN_GTT: Mappable aperture domain
+ *
+ * All other domains are rejected.
+ *
+ * Note that for discrete, starting from DG1, this is no longer supported, and
+ * is instead rejected. On such platforms the CPU domain is effectively static,
+ * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
+ * which can't be set explicitly and instead depends on the object placements,
+ * as per the below.
+ *
+ * Implicit caching rules, starting from DG1:
+ *
+ * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
+ * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
+ * mapped as write-combined only.
+ *
+ * - Everything else is always allocated and mapped as write-back, with the
+ * guarantee that everything is also coherent with the GPU.
+ *
+ * Note that this is likely to change in the future again, where we might need
+ * more flexibility on future devices, so making this all explicit as part of a
+ * new &drm_i915_gem_create_ext extension is probable.
+ */
+struct drm_i915_gem_set_domain {
+ /** @handle: Handle for the object. */
+ __u32 handle;
+
+ /** @read_domains: New read domains. */
+ __u32 read_domains;
+
+ /**
+ * @write_domain: New write domain.
+ *
+ * Note that having something in the write domain implies it's in the
+ * read domain, and only that read domain.
+ */
+ __u32 write_domain;
+};
+
+struct drm_i915_gem_sw_finish {
+ /** Handle for the object */
+ __u32 handle;
+};
+
+struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this relocation entry.
+ *
+ * It's appealing to make this be an index into the mm_validate_entry
+ * list to refer to the buffer, but this allows the driver to create
+ * a relocation list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ __u32 target_handle;
+
+ /**
+ * Value to be added to the offset of the target buffer to make up
+ * the relocation entry.
+ */
+ __u32 delta;
+
+ /** Offset in the buffer the relocation entry will be written into */
+ __u64 offset;
+
+ /**
+ * Offset value of the target buffer that the relocation entry was last
+ * written as.
+ *
+ * If the buffer has the same offset as last time, we can skip syncing
+ * and writing the relocation. This value is written back out by
+ * the execbuffer ioctl when the relocation is written.
+ */
+ __u64 presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ __u32 read_domains;
+
+ /**
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the whole
+ * execbuffer operation, so that where there are conflicts,
+ * the application will get -EINVAL back.
+ */
+ __u32 write_domain;
+};
+
+/** @{
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU 0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER 0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND 0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX 0x00000020
+/** GTT domain - aperture and scanout */
+#define I915_GEM_DOMAIN_GTT 0x00000040
+/** WC domain - uncached access */
+#define I915_GEM_DOMAIN_WC 0x00000080
+/** @} */
+
+struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ __u32 handle;
+
+ /** Number of relocations to be performed on this buffer */
+ __u32 relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ __u64 relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ __u64 alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ __u64 offset;
+};
+
+/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
+struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their relocations to be
+ * performend on them.
+ *
+ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+ *
+ * These buffers must be listed in an order such that all relocations
+ * a buffer is performing refer to buffers that have already appeared
+ * in the validate list.
+ */
+ __u64 buffers_ptr;
+ __u32 buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ __u32 batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ __u32 batch_len;
+ __u32 DR1;
+ __u32 DR4;
+ __u32 num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ __u64 cliprects_ptr;
+};
+
+struct drm_i915_gem_exec_object2 {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ __u32 handle;
+
+ /** Number of relocations to be performed on this buffer */
+ __u32 relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ __u64 relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ __u64 alignment;
+
+ /**
+ * When the EXEC_OBJECT_PINNED flag is specified this is populated by
+ * the user with the GTT offset at which this object will be pinned.
+ *
+ * When the I915_EXEC_NO_RELOC flag is specified this must contain the
+ * presumed_offset of the object.
+ *
+ * During execbuffer2 the kernel populates it with the value of the
+ * current GTT offset of the object, for future presumed_offset writes.
+ *
+ * See struct drm_i915_gem_create_ext for the rules when dealing with
+ * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with
+ * minimum page sizes, like DG2.
+ */
+ __u64 offset;
+
+#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+#define EXEC_OBJECT_NEEDS_GTT (1<<1)
+#define EXEC_OBJECT_WRITE (1<<2)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+#define EXEC_OBJECT_PINNED (1<<4)
+#define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
+/* The kernel implicitly tracks GPU activity on all GEM objects, and
+ * synchronises operations with outstanding rendering. This includes
+ * rendering on other devices if exported via dma-buf. However, sometimes
+ * this tracking is too coarse and the user knows better. For example,
+ * if the object is split into non-overlapping ranges shared between different
+ * clients or engines (i.e. suballocating objects), the implicit tracking
+ * by kernel assumes that each operation affects the whole object rather
+ * than an individual range, causing needless synchronisation between clients.
+ * The kernel will also forgo any CPU cache flushes prior to rendering from
+ * the object as the client is expected to be also handling such domain
+ * tracking.
+ *
+ * The kernel maintains the implicit tracking in order to manage resources
+ * used by the GPU - this flag only disables the synchronisation prior to
+ * rendering with this object in this execbuf.
+ *
+ * Opting out of implicit synhronisation requires the user to do its own
+ * explicit tracking to avoid rendering corruption. See, for example,
+ * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
+ */
+#define EXEC_OBJECT_ASYNC (1<<6)
+/* Request that the contents of this execobject be copied into the error
+ * state upon a GPU hang involving this batch for post-mortem debugging.
+ * These buffers are recorded in no particular order as "user" in
+ * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
+ * if the kernel supports this flag.
+ */
+#define EXEC_OBJECT_CAPTURE (1<<7)
+/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
+ __u64 flags;
+
+ union {
+ __u64 rsvd1;
+ __u64 pad_to_size;
+ };
+ __u64 rsvd2;
+};
+
+/**
+ * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf
+ * ioctl.
+ *
+ * The request will wait for input fence to signal before submission.
+ *
+ * The returned output fence will be signaled after the completion of the
+ * request.
+ */
+struct drm_i915_gem_exec_fence {
+ /** @handle: User's handle for a drm_syncobj to wait on or signal. */
+ __u32 handle;
+
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_EXEC_FENCE_WAIT:
+ * Wait for the input fence before request submission.
+ *
+ * I915_EXEC_FENCE_SIGNAL:
+ * Return request completion fence as output
+ */
+ __u32 flags;
+#define I915_EXEC_FENCE_WAIT (1<<0)
+#define I915_EXEC_FENCE_SIGNAL (1<<1)
+#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
+};
+
+/**
+ * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences
+ * for execbuf ioctl.
+ *
+ * This structure describes an array of drm_syncobj and associated points for
+ * timeline variants of drm_syncobj. It is invalid to append this structure to
+ * the execbuf if I915_EXEC_FENCE_ARRAY is set.
+ */
+struct drm_i915_gem_execbuffer_ext_timeline_fences {
+#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /**
+ * @fence_count: Number of elements in the @handles_ptr & @value_ptr
+ * arrays.
+ */
+ __u64 fence_count;
+
+ /**
+ * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence
+ * of length @fence_count.
+ */
+ __u64 handles_ptr;
+
+ /**
+ * @values_ptr: Pointer to an array of u64 values of length
+ * @fence_count.
+ * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
+ * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
+ * binary one.
+ */
+ __u64 values_ptr;
+};
+
+/**
+ * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2
+ * ioctl.
+ */
+struct drm_i915_gem_execbuffer2 {
+ /** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */
+ __u64 buffers_ptr;
+
+ /** @buffer_count: Number of elements in @buffers_ptr array */
+ __u32 buffer_count;
+
+ /**
+ * @batch_start_offset: Offset in the batchbuffer to start execution
+ * from.
+ */
+ __u32 batch_start_offset;
+
+ /**
+ * @batch_len: Length in bytes of the batch buffer, starting from the
+ * @batch_start_offset. If 0, length is assumed to be the batch buffer
+ * object size.
+ */
+ __u32 batch_len;
+
+ /** @DR1: deprecated */
+ __u32 DR1;
+
+ /** @DR4: deprecated */
+ __u32 DR4;
+
+ /** @num_cliprects: See @cliprects_ptr */
+ __u32 num_cliprects;
+
+ /**
+ * @cliprects_ptr: Kernel clipping was a DRI1 misfeature.
+ *
+ * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or
+ * I915_EXEC_USE_EXTENSIONS flags are not set.
+ *
+ * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
+ * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the
+ * array.
+ *
+ * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
+ * single &i915_user_extension and num_cliprects is 0.
+ */
+ __u64 cliprects_ptr;
+
+ /** @flags: Execbuf flags */
+ __u64 flags;
+#define I915_EXEC_RING_MASK (0x3f)
+#define I915_EXEC_DEFAULT (0<<0)
+#define I915_EXEC_RENDER (1<<0)
+#define I915_EXEC_BSD (2<<0)
+#define I915_EXEC_BLT (3<<0)
+#define I915_EXEC_VEBOX (4<<0)
+
+/* Used for switching the constants addressing mode on gen4+ RENDER ring.
+ * Gen6+ only supports relative addressing to dynamic state (default) and
+ * absolute addressing.
+ *
+ * These flags are ignored for the BSD and BLT rings.
+ */
+#define I915_EXEC_CONSTANTS_MASK (3<<6)
+#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
+#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
+#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
+
+/** Resets the SO write offset registers for transform feedback on gen7. */
+#define I915_EXEC_GEN7_SOL_RESET (1<<8)
+
+/** Request a privileged ("secure") batch buffer. Note only available for
+ * DRM_ROOT_ONLY | DRM_MASTER processes.
+ */
+#define I915_EXEC_SECURE (1<<9)
+
+/** Inform the kernel that the batch is and will always be pinned. This
+ * negates the requirement for a workaround to be performed to avoid
+ * an incoherent CS (such as can be found on 830/845). If this flag is
+ * not passed, the kernel will endeavour to make sure the batch is
+ * coherent with the CS before execution. If this flag is passed,
+ * userspace assumes the responsibility for ensuring the same.
+ */
+#define I915_EXEC_IS_PINNED (1<<10)
+
+/** Provide a hint to the kernel that the command stream and auxiliary
+ * state buffers already holds the correct presumed addresses and so the
+ * relocation process may be skipped if no buffers need to be moved in
+ * preparation for the execbuffer.
+ */
+#define I915_EXEC_NO_RELOC (1<<11)
+
+/** Use the reloc.handle as an index into the exec object array rather
+ * than as the per-file handle.
+ */
+#define I915_EXEC_HANDLE_LUT (1<<12)
+
+/** Used for switching BSD rings on the platforms with two BSD rings */
+#define I915_EXEC_BSD_SHIFT (13)
+#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
+/* default ping-pong mode */
+#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
+#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
+#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
+
+/** Tell the kernel that the batchbuffer is processed by
+ * the resource streamer.
+ */
+#define I915_EXEC_RESOURCE_STREAMER (1<<15)
+
+/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
+ * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
+ * the batch.
+ *
+ * Returns -EINVAL if the sync_file fd cannot be found.
+ */
+#define I915_EXEC_FENCE_IN (1<<16)
+
+/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
+ * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
+ * to the caller, and it should be close() after use. (The fd is a regular
+ * file descriptor and will be cleaned up on process termination. It holds
+ * a reference to the request, but nothing else.)
+ *
+ * The sync_file fd can be combined with other sync_file and passed either
+ * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
+ * will only occur after this request completes), or to other devices.
+ *
+ * Using I915_EXEC_FENCE_OUT requires use of
+ * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
+ * back to userspace. Failure to do so will cause the out-fence to always
+ * be reported as zero, and the real fence fd to be leaked.
+ */
+#define I915_EXEC_FENCE_OUT (1<<17)
+
+/*
+ * Traditionally the execbuf ioctl has only considered the final element in
+ * the execobject[] to be the executable batch. Often though, the client
+ * will known the batch object prior to construction and being able to place
+ * it into the execobject[] array first can simplify the relocation tracking.
+ * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
+ * execobject[] as the * batch instead (the default is to use the last
+ * element).
+ */
+#define I915_EXEC_BATCH_FIRST (1<<18)
+
+/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
+ * define an array of i915_gem_exec_fence structures which specify a set of
+ * dma fences to wait upon or signal.
+ */
+#define I915_EXEC_FENCE_ARRAY (1<<19)
+
+/*
+ * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
+ * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
+ * the batch.
+ *
+ * Returns -EINVAL if the sync_file fd cannot be found.
+ */
+#define I915_EXEC_FENCE_SUBMIT (1 << 20)
+
+/*
+ * Setting I915_EXEC_USE_EXTENSIONS implies that
+ * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
+ * list of i915_user_extension. Each i915_user_extension node is the base of a
+ * larger structure. The list of supported structures are listed in the
+ * drm_i915_gem_execbuffer_ext enum.
+ */
+#define I915_EXEC_USE_EXTENSIONS (1 << 21)
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
+
+ /** @rsvd1: Context id */
+ __u64 rsvd1;
+
+ /**
+ * @rsvd2: in and out sync_file file descriptors.
+ *
+ * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the
+ * lower 32 bits of this field will have the in sync_file fd (input).
+ *
+ * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this
+ * field will have the out sync_file fd (output).
+ */
+ __u64 rsvd2;
+};
+
+#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
+#define i915_execbuffer2_set_context_id(eb2, context) \
+ (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
+#define i915_execbuffer2_get_context_id(eb2) \
+ ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
+
+struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ __u32 handle;
+ __u32 pad;
+
+ /** alignment required within the aperture */
+ __u64 alignment;
+
+ /** Returned GTT offset of the buffer. */
+ __u64 offset;
+};
+
+struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_i915_gem_busy {
+ /** Handle of the buffer to check for busy */
+ __u32 handle;
+
+ /** Return busy status
+ *
+ * A return of 0 implies that the object is idle (after
+ * having flushed any pending activity), and a non-zero return that
+ * the object is still in-flight on the GPU. (The GPU has not yet
+ * signaled completion for all pending requests that reference the
+ * object.) An object is guaranteed to become idle eventually (so
+ * long as no new GPU commands are executed upon it). Due to the
+ * asynchronous nature of the hardware, an object reported
+ * as busy may become idle before the ioctl is completed.
+ *
+ * Furthermore, if the object is busy, which engine is busy is only
+ * provided as a guide and only indirectly by reporting its class
+ * (there may be more than one engine in each class). There are race
+ * conditions which prevent the report of which engines are busy from
+ * being always accurate. However, the converse is not true. If the
+ * object is idle, the result of the ioctl, that all engines are idle,
+ * is accurate.
+ *
+ * The returned dword is split into two fields to indicate both
+ * the engine classess on which the object is being read, and the
+ * engine class on which it is currently being written (if any).
+ *
+ * The low word (bits 0:15) indicate if the object is being written
+ * to by any engine (there can only be one, as the GEM implicit
+ * synchronisation rules force writes to be serialised). Only the
+ * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
+ * 1 not 0 etc) for the last write is reported.
+ *
+ * The high word (bits 16:31) are a bitmask of which engines classes
+ * are currently reading from the object. Multiple engines may be
+ * reading from the object simultaneously.
+ *
+ * The value of each engine class is the same as specified in the
+ * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
+ * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
+ * Some hardware may have parallel execution engines, e.g. multiple
+ * media engines, which are mapped to the same class identifier and so
+ * are not separately reported for busyness.
+ *
+ * Caveat emptor:
+ * Only the boolean result of this query is reliable; that is whether
+ * the object is idle or busy. The report of which engines are busy
+ * should be only used as a heuristic.
+ */
+ __u32 busy;
+};
+
+/**
+ * struct drm_i915_gem_caching - Set or get the caching for given object
+ * handle.
+ *
+ * Allow userspace to control the GTT caching bits for a given object when the
+ * object is later mapped through the ppGTT(or GGTT on older platforms lacking
+ * ppGTT support, or if the object is used for scanout). Note that this might
+ * require unbinding the object from the GTT first, if its current caching value
+ * doesn't match.
+ *
+ * Note that this all changes on discrete platforms, starting from DG1, the
+ * set/get caching is no longer supported, and is now rejected. Instead the CPU
+ * caching attributes(WB vs WC) will become an immutable creation time property
+ * for the object, along with the GTT caching level. For now we don't expose any
+ * new uAPI for this, instead on DG1 this is all implicit, although this largely
+ * shouldn't matter since DG1 is coherent by default(without any way of
+ * controlling it).
+ *
+ * Implicit caching rules, starting from DG1:
+ *
+ * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
+ * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
+ * mapped as write-combined only.
+ *
+ * - Everything else is always allocated and mapped as write-back, with the
+ * guarantee that everything is also coherent with the GPU.
+ *
+ * Note that this is likely to change in the future again, where we might need
+ * more flexibility on future devices, so making this all explicit as part of a
+ * new &drm_i915_gem_create_ext extension is probable.
+ *
+ * Side note: Part of the reason for this is that changing the at-allocation-time CPU
+ * caching attributes for the pages might be required(and is expensive) if we
+ * need to then CPU map the pages later with different caching attributes. This
+ * inconsistent caching behaviour, while supported on x86, is not universally
+ * supported on other architectures. So for simplicity we opt for setting
+ * everything at creation time, whilst also making it immutable, on discrete
+ * platforms.
+ */
+struct drm_i915_gem_caching {
+ /**
+ * @handle: Handle of the buffer to set/get the caching level.
+ */
+ __u32 handle;
+
+ /**
+ * @caching: The GTT caching level to apply or possible return value.
+ *
+ * The supported @caching values:
+ *
+ * I915_CACHING_NONE:
+ *
+ * GPU access is not coherent with CPU caches. Default for machines
+ * without an LLC. This means manual flushing might be needed, if we
+ * want GPU access to be coherent.
+ *
+ * I915_CACHING_CACHED:
+ *
+ * GPU access is coherent with CPU caches and furthermore the data is
+ * cached in last-level caches shared between CPU cores and the GPU GT.
+ *
+ * I915_CACHING_DISPLAY:
+ *
+ * Special GPU caching mode which is coherent with the scanout engines.
+ * Transparently falls back to I915_CACHING_NONE on platforms where no
+ * special cache mode (like write-through or gfdt flushing) is
+ * available. The kernel automatically sets this mode when using a
+ * buffer as a scanout target. Userspace can manually set this mode to
+ * avoid a costly stall and clflush in the hotpath of drawing the first
+ * frame.
+ */
+#define I915_CACHING_NONE 0
+#define I915_CACHING_CACHED 1
+#define I915_CACHING_DISPLAY 2
+ __u32 caching;
+};
+
+#define I915_TILING_NONE 0
+#define I915_TILING_X 1
+#define I915_TILING_Y 2
+/*
+ * Do not add new tiling types here. The I915_TILING_* values are for
+ * de-tiling fence registers that no longer exist on modern platforms. Although
+ * the hardware may support new types of tiling in general (e.g., Tile4), we
+ * do not need to add them to the uapi that is specific to now-defunct ioctls.
+ */
+#define I915_TILING_LAST I915_TILING_Y
+
+#define I915_BIT_6_SWIZZLE_NONE 0
+#define I915_BIT_6_SWIZZLE_9 1
+#define I915_BIT_6_SWIZZLE_9_10 2
+#define I915_BIT_6_SWIZZLE_9_11 3
+#define I915_BIT_6_SWIZZLE_9_10_11 4
+/* Not seen by userland */
+#define I915_BIT_6_SWIZZLE_UNKNOWN 5
+/* Seen by userland. */
+#define I915_BIT_6_SWIZZLE_9_17 6
+#define I915_BIT_6_SWIZZLE_9_10_17 7
+
+struct drm_i915_gem_set_tiling {
+ /** Handle of the buffer to have its tiling state updated */
+ __u32 handle;
+
+ /**
+ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ *
+ * This value is to be set on request, and will be updated by the
+ * kernel on successful return with the actual chosen tiling layout.
+ *
+ * The tiling mode may be demoted to I915_TILING_NONE when the system
+ * has bit 6 swizzling that can't be managed correctly by GEM.
+ *
+ * Buffer contents become undefined when changing tiling_mode.
+ */
+ __u32 tiling_mode;
+
+ /**
+ * Stride in bytes for the object when in I915_TILING_X or
+ * I915_TILING_Y.
+ */
+ __u32 stride;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ __u32 swizzle_mode;
+};
+
+struct drm_i915_gem_get_tiling {
+ /** Handle of the buffer to get tiling state for. */
+ __u32 handle;
+
+ /**
+ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ */
+ __u32 tiling_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ __u32 swizzle_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping whilst bound.
+ */
+ __u32 phys_swizzle_mode;
+};
+
+struct drm_i915_gem_get_aperture {
+ /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
+ __u64 aper_size;
+
+ /**
+ * Available space in the aperture used by i915_gem_execbuffer, in
+ * bytes
+ */
+ __u64 aper_available_size;
+};
+
+struct drm_i915_get_pipe_from_crtc_id {
+ /** ID of CRTC being requested **/
+ __u32 crtc_id;
+
+ /** pipe of requested CRTC **/
+ __u32 pipe;
+};
+
+#define I915_MADV_WILLNEED 0
+#define I915_MADV_DONTNEED 1
+#define __I915_MADV_PURGED 2 /* internal state */
+
+struct drm_i915_gem_madvise {
+ /** Handle of the buffer to change the backing store advice */
+ __u32 handle;
+
+ /* Advice: either the buffer will be needed again in the near future,
+ * or wont be and could be discarded under memory pressure.
+ */
+ __u32 madv;
+
+ /** Whether the backing store still exists. */
+ __u32 retained;
+};
+
+/* flags */
+#define I915_OVERLAY_TYPE_MASK 0xff
+#define I915_OVERLAY_YUV_PLANAR 0x01
+#define I915_OVERLAY_YUV_PACKED 0x02
+#define I915_OVERLAY_RGB 0x03
+
+#define I915_OVERLAY_DEPTH_MASK 0xff00
+#define I915_OVERLAY_RGB24 0x1000
+#define I915_OVERLAY_RGB16 0x2000
+#define I915_OVERLAY_RGB15 0x3000
+#define I915_OVERLAY_YUV422 0x0100
+#define I915_OVERLAY_YUV411 0x0200
+#define I915_OVERLAY_YUV420 0x0300
+#define I915_OVERLAY_YUV410 0x0400
+
+#define I915_OVERLAY_SWAP_MASK 0xff0000
+#define I915_OVERLAY_NO_SWAP 0x000000
+#define I915_OVERLAY_UV_SWAP 0x010000
+#define I915_OVERLAY_Y_SWAP 0x020000
+#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
+
+#define I915_OVERLAY_FLAGS_MASK 0xff000000
+#define I915_OVERLAY_ENABLE 0x01000000
+
+struct drm_intel_overlay_put_image {
+ /* various flags and src format description */
+ __u32 flags;
+ /* source picture description */
+ __u32 bo_handle;
+ /* stride values and offsets are in bytes, buffer relative */
+ __u16 stride_Y; /* stride for packed formats */
+ __u16 stride_UV;
+ __u32 offset_Y; /* offset for packet formats */
+ __u32 offset_U;
+ __u32 offset_V;
+ /* in pixels */
+ __u16 src_width;
+ __u16 src_height;
+ /* to compensate the scaling factors for partially covered surfaces */
+ __u16 src_scan_width;
+ __u16 src_scan_height;
+ /* output crtc description */
+ __u32 crtc_id;
+ __u16 dst_x;
+ __u16 dst_y;
+ __u16 dst_width;
+ __u16 dst_height;
+};
+
+/* flags */
+#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
+#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
+#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
+struct drm_intel_overlay_attrs {
+ __u32 flags;
+ __u32 color_key;
+ __s32 brightness;
+ __u32 contrast;
+ __u32 saturation;
+ __u32 gamma0;
+ __u32 gamma1;
+ __u32 gamma2;
+ __u32 gamma3;
+ __u32 gamma4;
+ __u32 gamma5;
+};
+
+/*
+ * Intel sprite handling
+ *
+ * Color keying works with a min/mask/max tuple. Both source and destination
+ * color keying is allowed.
+ *
+ * Source keying:
+ * Sprite pixels within the min & max values, masked against the color channels
+ * specified in the mask field, will be transparent. All other pixels will
+ * be displayed on top of the primary plane. For RGB surfaces, only the min
+ * and mask fields will be used; ranged compares are not allowed.
+ *
+ * Destination keying:
+ * Primary plane pixels that match the min value, masked against the color
+ * channels specified in the mask field, will be replaced by corresponding
+ * pixels from the sprite plane.
+ *
+ * Note that source & destination keying are exclusive; only one can be
+ * active on a given plane.
+ */
+
+#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
+ * flags==0 to disable colorkeying.
+ */
+#define I915_SET_COLORKEY_DESTINATION (1<<1)
+#define I915_SET_COLORKEY_SOURCE (1<<2)
+struct drm_intel_sprite_colorkey {
+ __u32 plane_id;
+ __u32 min_value;
+ __u32 channel_mask;
+ __u32 max_value;
+ __u32 flags;
+};
+
+struct drm_i915_gem_wait {
+ /** Handle of BO we shall wait on */
+ __u32 bo_handle;
+ __u32 flags;
+ /** Number of nanoseconds to wait, Returns time remaining. */
+ __s64 timeout_ns;
+};
+
+struct drm_i915_gem_context_create {
+ __u32 ctx_id; /* output: id of new context*/
+ __u32 pad;
+};
+
+/**
+ * struct drm_i915_gem_context_create_ext - Structure for creating contexts.
+ */
+struct drm_i915_gem_context_create_ext {
+ /** @ctx_id: Id of the created context (output) */
+ __u32 ctx_id;
+
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS:
+ *
+ * Extensions may be appended to this structure and driver must check
+ * for those. See @extensions.
+ *
+ * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE
+ *
+ * Created context will have single timeline.
+ */
+ __u32 flags;
+#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
+#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
+#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
+ (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
+ *
+ * I915_CONTEXT_CREATE_EXT_SETPARAM:
+ * Context parameter to set or query during context creation.
+ * See struct drm_i915_gem_context_create_ext_setparam.
+ *
+ * I915_CONTEXT_CREATE_EXT_CLONE:
+ * This extension has been removed. On the off chance someone somewhere
+ * has attempted to use it, never re-use this extension number.
+ */
+ __u64 extensions;
+#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+#define I915_CONTEXT_CREATE_EXT_CLONE 1
+};
+
+/**
+ * struct drm_i915_gem_context_param - Context parameter to set or query.
+ */
+struct drm_i915_gem_context_param {
+ /** @ctx_id: Context id */
+ __u32 ctx_id;
+
+ /** @size: Size of the parameter @value */
+ __u32 size;
+
+ /** @param: Parameter to set or query */
+ __u64 param;
+#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance
+ * someone somewhere has attempted to use it, never re-use this context
+ * param number.
+ */
+#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
+#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
+#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
+#define I915_CONTEXT_PARAM_BANNABLE 0x5
+#define I915_CONTEXT_PARAM_PRIORITY 0x6
+#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
+#define I915_CONTEXT_DEFAULT_PRIORITY 0
+#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
+ /*
+ * When using the following param, value should be a pointer to
+ * drm_i915_gem_context_param_sseu.
+ */
+#define I915_CONTEXT_PARAM_SSEU 0x7
+
+/*
+ * Not all clients may want to attempt automatic recover of a context after
+ * a hang (for example, some clients may only submit very small incremental
+ * batches relying on known logical state of previous batches which will never
+ * recover correctly and each attempt will hang), and so would prefer that
+ * the context is forever banned instead.
+ *
+ * If set to false (0), after a reset, subsequent (and in flight) rendering
+ * from this context is discarded, and the client will need to create a new
+ * context to use instead.
+ *
+ * If set to true (1), the kernel will automatically attempt to recover the
+ * context by skipping the hanging batch and executing the next batch starting
+ * from the default context state (discarding the incomplete logical context
+ * state lost due to the reset).
+ *
+ * On creation, all new contexts are marked as recoverable.
+ */
+#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
+
+ /*
+ * The id of the associated virtual memory address space (ppGTT) of
+ * this context. Can be retrieved and passed to another context
+ * (on the same fd) for both to use the same ppGTT and so share
+ * address layouts, and avoid reloading the page tables on context
+ * switches between themselves.
+ *
+ * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
+ */
+#define I915_CONTEXT_PARAM_VM 0x9
+
+/*
+ * I915_CONTEXT_PARAM_ENGINES:
+ *
+ * Bind this context to operate on this subset of available engines. Henceforth,
+ * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
+ * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
+ * and upwards. Slots 0...N are filled in using the specified (class, instance).
+ * Use
+ * engine_class: I915_ENGINE_CLASS_INVALID,
+ * engine_instance: I915_ENGINE_CLASS_INVALID_NONE
+ * to specify a gap in the array that can be filled in later, e.g. by a
+ * virtual engine used for load balancing.
+ *
+ * Setting the number of engines bound to the context to 0, by passing a zero
+ * sized argument, will revert back to default settings.
+ *
+ * See struct i915_context_param_engines.
+ *
+ * Extensions:
+ * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
+ * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
+ */
+#define I915_CONTEXT_PARAM_ENGINES 0xa
+
+/*
+ * I915_CONTEXT_PARAM_PERSISTENCE:
+ *
+ * Allow the context and active rendering to survive the process until
+ * completion. Persistence allows fire-and-forget clients to queue up a
+ * bunch of work, hand the output over to a display server and then quit.
+ * If the context is marked as not persistent, upon closing (either via
+ * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
+ * or process termination), the context and any outstanding requests will be
+ * cancelled (and exported fences for cancelled requests marked as -EIO).
+ *
+ * By default, new contexts allow persistence.
+ */
+#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
+
+/* This API has been removed. On the off chance someone somewhere has
+ * attempted to use it, never re-use this context param number.
+ */
+#define I915_CONTEXT_PARAM_RINGSIZE 0xc
+
+/*
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+ *
+ * Mark that the context makes use of protected content, which will result
+ * in the context being invalidated when the protected content session is.
+ * Given that the protected content session is killed on suspend, the device
+ * is kept awake for the lifetime of a protected context, so the user should
+ * make sure to dispose of them once done.
+ * This flag can only be set at context creation time and, when set to true,
+ * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
+ * to false. This flag can't be set to true in conjunction with setting the
+ * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_context_create_ext_setparam p_protected = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
+ * .value = 1,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_norecover = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * .next_extension = to_user_pointer(&p_protected),
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_RECOVERABLE,
+ * .value = 0,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_norecover);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * In addition to the normal failure cases, setting this flag during context
+ * creation can result in the following errors:
+ *
+ * -ENODEV: feature not available
+ * -EPERM: trying to mark a recoverable or not bannable context as protected
+ */
+#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
+/* Must be kept compact -- no holes and well documented */
+
+ /** @value: Context parameter value to be set or queried */
+ __u64 value;
+};
+
+/*
+ * Context SSEU programming
+ *
+ * It may be necessary for either functional or performance reason to configure
+ * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
+ * Sub-slice/EU).
+ *
+ * This is done by configuring SSEU configuration using the below
+ * @struct drm_i915_gem_context_param_sseu for every supported engine which
+ * userspace intends to use.
+ *
+ * Not all GPUs or engines support this functionality in which case an error
+ * code -ENODEV will be returned.
+ *
+ * Also, flexibility of possible SSEU configuration permutations varies between
+ * GPU generations and software imposed limitations. Requesting such a
+ * combination will return an error code of -EINVAL.
+ *
+ * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
+ * favour of a single global setting.
+ */
+struct drm_i915_gem_context_param_sseu {
+ /*
+ * Engine class & instance to be configured or queried.
+ */
+ struct i915_engine_class_instance engine;
+
+ /*
+ * Unknown flags must be cleared to zero.
+ */
+ __u32 flags;
+#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
+
+ /*
+ * Mask of slices to enable for the context. Valid values are a subset
+ * of the bitmask value returned for I915_PARAM_SLICE_MASK.
+ */
+ __u64 slice_mask;
+
+ /*
+ * Mask of subslices to enable for the context. Valid values are a
+ * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
+ */
+ __u64 subslice_mask;
+
+ /*
+ * Minimum/Maximum number of EUs to enable per subslice for the
+ * context. min_eus_per_subslice must be inferior or equal to
+ * max_eus_per_subslice.
+ */
+ __u16 min_eus_per_subslice;
+ __u16 max_eus_per_subslice;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 rsvd;
+};
+
+/**
+ * DOC: Virtual Engine uAPI
+ *
+ * Virtual engine is a concept where userspace is able to configure a set of
+ * physical engines, submit a batch buffer, and let the driver execute it on any
+ * engine from the set as it sees fit.
+ *
+ * This is primarily useful on parts which have multiple instances of a same
+ * class engine, like for example GT3+ Skylake parts with their two VCS engines.
+ *
+ * For instance userspace can enumerate all engines of a certain class using the
+ * previously described `Engine Discovery uAPI`_. After that userspace can
+ * create a GEM context with a placeholder slot for the virtual engine (using
+ * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
+ * and instance respectively) and finally using the
+ * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
+ * the same reserved slot.
+ *
+ * Example of creating a virtual engine and submitting a batch buffer to it:
+ *
+ * .. code-block:: C
+ *
+ * I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
+ * .base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
+ * .engine_index = 0, // Place this virtual engine into engine map slot 0
+ * .num_siblings = 2,
+ * .engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
+ * { I915_ENGINE_CLASS_VIDEO, 1 }, },
+ * };
+ * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
+ * .engines = { { I915_ENGINE_CLASS_INVALID,
+ * I915_ENGINE_CLASS_INVALID_NONE } },
+ * .extensions = to_user_pointer(&virtual), // Chains after load_balance extension
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_ENGINES,
+ * .value = to_user_pointer(&engines),
+ * .size = sizeof(engines),
+ * },
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_engines);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * // Now we have created a GEM context with its engine map containing a
+ * // single virtual engine. Submissions to this slot can go either to
+ * // vcs0 or vcs1, depending on the load balancing algorithm used inside
+ * // the driver. The load balancing is dynamic from one batch buffer to
+ * // another and transparent to userspace.
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 0; // Submits to index 0 which is the virtual engine
+ * gem_execbuf(drm_fd, &execbuf);
+ */
+
+/*
+ * i915_context_engines_load_balance:
+ *
+ * Enable load balancing across this set of engines.
+ *
+ * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
+ * used will proxy the execbuffer request onto one of the set of engines
+ * in such a way as to distribute the load evenly across the set.
+ *
+ * The set of engines must be compatible (e.g. the same HW class) as they
+ * will share the same logical GPU context and ring.
+ *
+ * To intermix rendering with the virtual engine and direct rendering onto
+ * the backing engines (bypassing the load balancing proxy), the context must
+ * be defined to use a single timeline for all engines.
+ */
+struct i915_context_engines_load_balance {
+ struct i915_user_extension base;
+
+ __u16 engine_index;
+ __u16 num_siblings;
+ __u32 flags; /* all undefined flags must be zero */
+
+ __u64 mbz64; /* reserved for future use; must be zero */
+
+ struct i915_engine_class_instance engines[];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 num_siblings; \
+ __u32 flags; \
+ __u64 mbz64; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/*
+ * i915_context_engines_bond:
+ *
+ * Constructed bonded pairs for execution within a virtual engine.
+ *
+ * All engines are equal, but some are more equal than others. Given
+ * the distribution of resources in the HW, it may be preferable to run
+ * a request on a given subset of engines in parallel to a request on a
+ * specific engine. We enable this selection of engines within a virtual
+ * engine by specifying bonding pairs, for any given master engine we will
+ * only execute on one of the corresponding siblings within the virtual engine.
+ *
+ * To execute a request in parallel on the master engine and a sibling requires
+ * coordination with a I915_EXEC_FENCE_SUBMIT.
+ */
+struct i915_context_engines_bond {
+ struct i915_user_extension base;
+
+ struct i915_engine_class_instance master;
+
+ __u16 virtual_index; /* index of virtual engine in ctx->engines[] */
+ __u16 num_bonds;
+
+ __u64 flags; /* all undefined flags must be zero */
+ __u64 mbz64[4]; /* reserved for future use; must be zero */
+
+ struct i915_engine_class_instance engines[];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
+ struct i915_user_extension base; \
+ struct i915_engine_class_instance master; \
+ __u16 virtual_index; \
+ __u16 num_bonds; \
+ __u64 flags; \
+ __u64 mbz64[4]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/**
+ * struct i915_context_engines_parallel_submit - Configure engine for
+ * parallel submission.
+ *
+ * Setup a slot in the context engine map to allow multiple BBs to be submitted
+ * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
+ * in parallel. Multiple hardware contexts are created internally in the i915 to
+ * run these BBs. Once a slot is configured for N BBs only N BBs can be
+ * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
+ * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
+ * many BBs there are based on the slot's configuration. The N BBs are the last
+ * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
+ *
+ * The default placement behavior is to create implicit bonds between each
+ * context if each context maps to more than 1 physical engine (e.g. context is
+ * a virtual engine). Also we only allow contexts of same engine class and these
+ * contexts must be in logically contiguous order. Examples of the placement
+ * behavior are described below. Lastly, the default is to not allow BBs to be
+ * preempted mid-batch. Rather insert coordinated preemption points on all
+ * hardware contexts between each set of BBs. Flags could be added in the future
+ * to change both of these default behaviors.
+ *
+ * Returns -EINVAL if hardware context placement configuration is invalid or if
+ * the placement configuration isn't supported on the platform / submission
+ * interface.
+ * Returns -ENODEV if extension isn't supported on the platform / submission
+ * interface.
+ *
+ * .. code-block:: none
+ *
+ * Examples syntax:
+ * CS[X] = generic engine of same class, logical instance X
+ * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
+ *
+ * Example 1 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=1,
+ * engines=CS[0],CS[1])
+ *
+ * Results in the following valid placement:
+ * CS[0], CS[1]
+ *
+ * Example 2 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[2],CS[1],CS[3])
+ *
+ * Results in the following valid placements:
+ * CS[0], CS[1]
+ * CS[2], CS[3]
+ *
+ * This can be thought of as two virtual engines, each containing two
+ * engines thereby making a 2D array. However, there are bonds tying the
+ * entries together and placing restrictions on how they can be scheduled.
+ * Specifically, the scheduler can choose only vertical columns from the 2D
+ * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
+ * scheduler wants to submit to CS[0], it must also choose CS[1] and vice
+ * versa. Same for CS[2] requires also using CS[3].
+ * VE[0] = CS[0], CS[2]
+ * VE[1] = CS[1], CS[3]
+ *
+ * Example 3 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[1],CS[1],CS[3])
+ *
+ * Results in the following valid and invalid placements:
+ * CS[0], CS[1]
+ * CS[1], CS[3] - Not logically contiguous, return -EINVAL
+ */
+struct i915_context_engines_parallel_submit {
+ /**
+ * @base: base user extension.
+ */
+ struct i915_user_extension base;
+
+ /**
+ * @engine_index: slot for parallel engine
+ */
+ __u16 engine_index;
+
+ /**
+ * @width: number of contexts per parallel engine or in other words the
+ * number of batches in each submission
+ */
+ __u16 width;
+
+ /**
+ * @num_siblings: number of siblings per context or in other words the
+ * number of possible placements for each submission
+ */
+ __u16 num_siblings;
+
+ /**
+ * @mbz16: reserved for future use; must be zero
+ */
+ __u16 mbz16;
+
+ /**
+ * @flags: all undefined flags must be zero, currently not defined flags
+ */
+ __u64 flags;
+
+ /**
+ * @mbz64: reserved for future use; must be zero
+ */
+ __u64 mbz64[3];
+
+ /**
+ * @engines: 2-d array of engine instances to configure parallel engine
+ *
+ * length = width (i) * num_siblings (j)
+ * index = j + i * num_siblings
+ */
+ struct i915_engine_class_instance engines[];
+
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 width; \
+ __u16 num_siblings; \
+ __u16 mbz16; \
+ __u64 flags; \
+ __u64 mbz64[3]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/**
+ * DOC: Context Engine Map uAPI
+ *
+ * Context engine map is a new way of addressing engines when submitting batch-
+ * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
+ * inside the flags field of `struct drm_i915_gem_execbuffer2`.
+ *
+ * To use it created GEM contexts need to be configured with a list of engines
+ * the user is intending to submit to. This is accomplished using the
+ * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
+ * i915_context_param_engines`.
+ *
+ * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
+ * configured map.
+ *
+ * Example of creating such context and submitting against it:
+ *
+ * .. code-block:: C
+ *
+ * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
+ * .engines = { { I915_ENGINE_CLASS_RENDER, 0 },
+ * { I915_ENGINE_CLASS_COPY, 0 } }
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_ENGINES,
+ * .value = to_user_pointer(&engines),
+ * .size = sizeof(engines),
+ * },
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_engines);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * // We have now created a GEM context with two engines in the map:
+ * // Index 0 points to rcs0 while index 1 points to bcs0. Other engines
+ * // will not be accessible from this context.
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
+ * gem_execbuf(drm_fd, &execbuf);
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
+ * gem_execbuf(drm_fd, &execbuf);
+ */
+
+struct i915_context_param_engines {
+ __u64 extensions; /* linked chain of extension blocks, 0 terminates */
+#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
+#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
+#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
+ __u64 extensions; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/**
+ * struct drm_i915_gem_context_create_ext_setparam - Context parameter
+ * to set or query during context creation.
+ */
+struct drm_i915_gem_context_create_ext_setparam {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /**
+ * @param: Context parameter to set or query.
+ * See struct drm_i915_gem_context_param.
+ */
+ struct drm_i915_gem_context_param param;
+};
+
+struct drm_i915_gem_context_destroy {
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+/**
+ * struct drm_i915_gem_vm_control - Structure to create or destroy VM.
+ *
+ * DRM_I915_GEM_VM_CREATE -
+ *
+ * Create a new virtual memory address space (ppGTT) for use within a context
+ * on the same file. Extensions can be provided to configure exactly how the
+ * address space is setup upon creation.
+ *
+ * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
+ * returned in the outparam @id.
+ *
+ * An extension chain maybe provided, starting with @extensions, and terminated
+ * by the @next_extension being 0. Currently, no extensions are defined.
+ *
+ * DRM_I915_GEM_VM_DESTROY -
+ *
+ * Destroys a previously created VM id, specified in @vm_id.
+ *
+ * No extensions or flags are allowed currently, and so must be zero.
+ */
+struct drm_i915_gem_vm_control {
+ /** @extensions: Zero-terminated chain of extensions. */
+ __u64 extensions;
+
+ /** @flags: reserved for future usage, currently MBZ */
+ __u32 flags;
+
+ /** @vm_id: Id of the VM created or to be destroyed */
+ __u32 vm_id;
+};
+
+struct drm_i915_reg_read {
+ /*
+ * Register offset.
+ * For 64bit wide registers where the upper 32bits don't immediately
+ * follow the lower 32bits, the offset of the lower 32bits must
+ * be specified
+ */
+ __u64 offset;
+#define I915_REG_READ_8B_WA (1ul << 0)
+
+ __u64 val; /* Return value */
+};
+
+/* Known registers:
+ *
+ * Render engine timestamp - 0x2358 + 64bit - gen7+
+ * - Note this register returns an invalid value if using the default
+ * single instruction 8byte read, in order to workaround that pass
+ * flag I915_REG_READ_8B_WA in offset field.
+ *
+ */
+
+struct drm_i915_reset_stats {
+ __u32 ctx_id;
+ __u32 flags;
+
+ /* All resets since boot/module reload, for all contexts */
+ __u32 reset_count;
+
+ /* Number of batches lost when active in GPU, for this context */
+ __u32 batch_active;
+
+ /* Number of batches lost pending for execution, for this context */
+ __u32 batch_pending;
+
+ __u32 pad;
+};
+
+/**
+ * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
+ *
+ * Userptr objects have several restrictions on what ioctls can be used with the
+ * object handle.
+ */
+struct drm_i915_gem_userptr {
+ /**
+ * @user_ptr: The pointer to the allocated memory.
+ *
+ * Needs to be aligned to PAGE_SIZE.
+ */
+ __u64 user_ptr;
+
+ /**
+ * @user_size:
+ *
+ * The size in bytes for the allocated memory. This will also become the
+ * object size.
+ *
+ * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
+ * or larger.
+ */
+ __u64 user_size;
+
+ /**
+ * @flags:
+ *
+ * Supported flags:
+ *
+ * I915_USERPTR_READ_ONLY:
+ *
+ * Mark the object as readonly, this also means GPU access can only be
+ * readonly. This is only supported on HW which supports readonly access
+ * through the GTT. If the HW can't support readonly access, an error is
+ * returned.
+ *
+ * I915_USERPTR_PROBE:
+ *
+ * Probe the provided @user_ptr range and validate that the @user_ptr is
+ * indeed pointing to normal memory and that the range is also valid.
+ * For example if some garbage address is given to the kernel, then this
+ * should complain.
+ *
+ * Returns -EFAULT if the probe failed.
+ *
+ * Note that this doesn't populate the backing pages, and also doesn't
+ * guarantee that the object will remain valid when the object is
+ * eventually used.
+ *
+ * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
+ * returns a non-zero value.
+ *
+ * I915_USERPTR_UNSYNCHRONIZED:
+ *
+ * NOT USED. Setting this flag will result in an error.
+ */
+ __u32 flags;
+#define I915_USERPTR_READ_ONLY 0x1
+#define I915_USERPTR_PROBE 0x2
+#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+};
+
+enum drm_i915_oa_format {
+ I915_OA_FORMAT_A13 = 1, /* HSW only */
+ I915_OA_FORMAT_A29, /* HSW only */
+ I915_OA_FORMAT_A13_B8_C8, /* HSW only */
+ I915_OA_FORMAT_B4_C8, /* HSW only */
+ I915_OA_FORMAT_A45_B8_C8, /* HSW only */
+ I915_OA_FORMAT_B4_C8_A16, /* HSW only */
+ I915_OA_FORMAT_C4_B8, /* HSW+ */
+
+ /* Gen8+ */
+ I915_OA_FORMAT_A12,
+ I915_OA_FORMAT_A12_B8_C8,
+ I915_OA_FORMAT_A32u40_A4u32_B8_C8,
+
+ I915_OA_FORMAT_MAX /* non-ABI */
+};
+
+enum drm_i915_perf_property_id {
+ /**
+ * Open the stream for a specific context handle (as used with
+ * execbuffer2). A stream opened for a specific context this way
+ * won't typically require root privileges.
+ *
+ * This property is available in perf revision 1.
+ */
+ DRM_I915_PERF_PROP_CTX_HANDLE = 1,
+
+ /**
+ * A value of 1 requests the inclusion of raw OA unit reports as
+ * part of stream samples.
+ *
+ * This property is available in perf revision 1.
+ */
+ DRM_I915_PERF_PROP_SAMPLE_OA,
+
+ /**
+ * The value specifies which set of OA unit metrics should be
+ * configured, defining the contents of any OA unit reports.
+ *
+ * This property is available in perf revision 1.
+ */
+ DRM_I915_PERF_PROP_OA_METRICS_SET,
+
+ /**
+ * The value specifies the size and layout of OA unit reports.
+ *
+ * This property is available in perf revision 1.
+ */
+ DRM_I915_PERF_PROP_OA_FORMAT,
+
+ /**
+ * Specifying this property implicitly requests periodic OA unit
+ * sampling and (at least on Haswell) the sampling frequency is derived
+ * from this exponent as follows:
+ *
+ * 80ns * 2^(period_exponent + 1)
+ *
+ * This property is available in perf revision 1.
+ */
+ DRM_I915_PERF_PROP_OA_EXPONENT,
+
+ /**
+ * Specifying this property is only valid when specify a context to
+ * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
+ * will hold preemption of the particular context we want to gather
+ * performance data about. The execbuf2 submissions must include a
+ * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
+ *
+ * This property is available in perf revision 3.
+ */
+ DRM_I915_PERF_PROP_HOLD_PREEMPTION,
+
+ /**
+ * Specifying this pins all contexts to the specified SSEU power
+ * configuration for the duration of the recording.
+ *
+ * This parameter's value is a pointer to a struct
+ * drm_i915_gem_context_param_sseu.
+ *
+ * This property is available in perf revision 4.
+ */
+ DRM_I915_PERF_PROP_GLOBAL_SSEU,
+
+ /**
+ * This optional parameter specifies the timer interval in nanoseconds
+ * at which the i915 driver will check the OA buffer for available data.
+ * Minimum allowed value is 100 microseconds. A default value is used by
+ * the driver if this parameter is not specified. Note that larger timer
+ * values will reduce cpu consumption during OA perf captures. However,
+ * excessively large values would potentially result in OA buffer
+ * overwrites as captures reach end of the OA buffer.
+ *
+ * This property is available in perf revision 5.
+ */
+ DRM_I915_PERF_PROP_POLL_OA_PERIOD,
+
+ DRM_I915_PERF_PROP_MAX /* non-ABI */
+};
+
+struct drm_i915_perf_open_param {
+ __u32 flags;
+#define I915_PERF_FLAG_FD_CLOEXEC (1<<0)
+#define I915_PERF_FLAG_FD_NONBLOCK (1<<1)
+#define I915_PERF_FLAG_DISABLED (1<<2)
+
+ /** The number of u64 (id, value) pairs */
+ __u32 num_properties;
+
+ /**
+ * Pointer to array of u64 (id, value) pairs configuring the stream
+ * to open.
+ */
+ __u64 properties_ptr;
+};
+
+/*
+ * Enable data capture for a stream that was either opened in a disabled state
+ * via I915_PERF_FLAG_DISABLED or was later disabled via
+ * I915_PERF_IOCTL_DISABLE.
+ *
+ * It is intended to be cheaper to disable and enable a stream than it may be
+ * to close and re-open a stream with the same configuration.
+ *
+ * It's undefined whether any pending data for the stream will be lost.
+ *
+ * This ioctl is available in perf revision 1.
+ */
+#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
+
+/*
+ * Disable data capture for a stream.
+ *
+ * It is an error to try and read a stream that is disabled.
+ *
+ * This ioctl is available in perf revision 1.
+ */
+#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
+
+/*
+ * Change metrics_set captured by a stream.
+ *
+ * If the stream is bound to a specific context, the configuration change
+ * will performed __inline__ with that context such that it takes effect before
+ * the next execbuf submission.
+ *
+ * Returns the previously bound metrics set id, or a negative error code.
+ *
+ * This ioctl is available in perf revision 2.
+ */
+#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
+
+/*
+ * Common to all i915 perf records
+ */
+struct drm_i915_perf_record_header {
+ __u32 type;
+ __u16 pad;
+ __u16 size;
+};
+
+enum drm_i915_perf_record_type {
+
+ /**
+ * Samples are the work horse record type whose contents are extensible
+ * and defined when opening an i915 perf stream based on the given
+ * properties.
+ *
+ * Boolean properties following the naming convention
+ * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
+ * every sample.
+ *
+ * The order of these sample properties given by userspace has no
+ * affect on the ordering of data within a sample. The order is
+ * documented here.
+ *
+ * struct {
+ * struct drm_i915_perf_record_header header;
+ *
+ * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
+ * };
+ */
+ DRM_I915_PERF_RECORD_SAMPLE = 1,
+
+ /*
+ * Indicates that one or more OA reports were not written by the
+ * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
+ * command collides with periodic sampling - which would be more likely
+ * at higher sampling frequencies.
+ */
+ DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
+
+ /**
+ * An error occurred that resulted in all pending OA reports being lost.
+ */
+ DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
+
+ DRM_I915_PERF_RECORD_MAX /* non-ABI */
+};
+
+/**
+ * struct drm_i915_perf_oa_config
+ *
+ * Structure to upload perf dynamic configuration into the kernel.
+ */
+struct drm_i915_perf_oa_config {
+ /**
+ * @uuid:
+ *
+ * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x"
+ */
+ char uuid[36];
+
+ /**
+ * @n_mux_regs:
+ *
+ * Number of mux regs in &mux_regs_ptr.
+ */
+ __u32 n_mux_regs;
+
+ /**
+ * @n_boolean_regs:
+ *
+ * Number of boolean regs in &boolean_regs_ptr.
+ */
+ __u32 n_boolean_regs;
+
+ /**
+ * @n_flex_regs:
+ *
+ * Number of flex regs in &flex_regs_ptr.
+ */
+ __u32 n_flex_regs;
+
+ /**
+ * @mux_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_mux_regs).
+ */
+ __u64 mux_regs_ptr;
+
+ /**
+ * @boolean_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_boolean_regs).
+ */
+ __u64 boolean_regs_ptr;
+
+ /**
+ * @flex_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_flex_regs).
+ */
+ __u64 flex_regs_ptr;
+};
+
+/**
+ * struct drm_i915_query_item - An individual query for the kernel to process.
+ *
+ * The behaviour is determined by the @query_id. Note that exactly what
+ * @data_ptr is also depends on the specific @query_id.
+ */
+struct drm_i915_query_item {
+ /**
+ * @query_id:
+ *
+ * The id for this query. Currently accepted query IDs are:
+ * - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info)
+ * - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info)
+ * - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config)
+ * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
+ * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
+ * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
+ */
+ __u64 query_id;
+#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+#define DRM_I915_QUERY_ENGINE_INFO 2
+#define DRM_I915_QUERY_PERF_CONFIG 3
+#define DRM_I915_QUERY_MEMORY_REGIONS 4
+#define DRM_I915_QUERY_HWCONFIG_BLOB 5
+#define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6
+/* Must be kept compact -- no holes and well documented */
+
+ /**
+ * @length:
+ *
+ * When set to zero by userspace, this is filled with the size of the
+ * data to be written at the @data_ptr pointer. The kernel sets this
+ * value to a negative value to signal an error on a particular query
+ * item.
+ */
+ __s32 length;
+
+ /**
+ * @flags:
+ *
+ * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+ *
+ * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the
+ * following:
+ *
+ * - %DRM_I915_QUERY_PERF_CONFIG_LIST
+ * - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+ * - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ *
+ * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain
+ * a struct i915_engine_class_instance that references a render engine.
+ */
+ __u32 flags;
+#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
+#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
+#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
+
+ /**
+ * @data_ptr:
+ *
+ * Data will be written at the location pointed by @data_ptr when the
+ * value of @length matches the length of the data to be written by the
+ * kernel.
+ */
+ __u64 data_ptr;
+};
+
+/**
+ * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
+ * kernel to fill out.
+ *
+ * Note that this is generally a two step process for each struct
+ * drm_i915_query_item in the array:
+ *
+ * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
+ * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
+ * kernel will then fill in the size, in bytes, which tells userspace how
+ * memory it needs to allocate for the blob(say for an array of properties).
+ *
+ * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
+ * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
+ * the &drm_i915_query_item.length should still be the same as what the
+ * kernel previously set. At this point the kernel can fill in the blob.
+ *
+ * Note that for some query items it can make sense for userspace to just pass
+ * in a buffer/blob equal to or larger than the required size. In this case only
+ * a single ioctl call is needed. For some smaller query items this can work
+ * quite well.
+ *
+ */
+struct drm_i915_query {
+ /** @num_items: The number of elements in the @items_ptr array */
+ __u32 num_items;
+
+ /**
+ * @flags: Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /**
+ * @items_ptr:
+ *
+ * Pointer to an array of struct drm_i915_query_item. The number of
+ * array elements is @num_items.
+ */
+ __u64 items_ptr;
+};
+
+/**
+ * struct drm_i915_query_topology_info
+ *
+ * Describes slice/subslice/EU information queried by
+ * %DRM_I915_QUERY_TOPOLOGY_INFO
+ */
+struct drm_i915_query_topology_info {
+ /**
+ * @flags:
+ *
+ * Unused for now. Must be cleared to zero.
+ */
+ __u16 flags;
+
+ /**
+ * @max_slices:
+ *
+ * The number of bits used to express the slice mask.
+ */
+ __u16 max_slices;
+
+ /**
+ * @max_subslices:
+ *
+ * The number of bits used to express the subslice mask.
+ */
+ __u16 max_subslices;
+
+ /**
+ * @max_eus_per_subslice:
+ *
+ * The number of bits in the EU mask that correspond to a single
+ * subslice's EUs.
+ */
+ __u16 max_eus_per_subslice;
+
+ /**
+ * @subslice_offset:
+ *
+ * Offset in data[] at which the subslice masks are stored.
+ */
+ __u16 subslice_offset;
+
+ /**
+ * @subslice_stride:
+ *
+ * Stride at which each of the subslice masks for each slice are
+ * stored.
+ */
+ __u16 subslice_stride;
+
+ /**
+ * @eu_offset:
+ *
+ * Offset in data[] at which the EU masks are stored.
+ */
+ __u16 eu_offset;
+
+ /**
+ * @eu_stride:
+ *
+ * Stride at which each of the EU masks for each subslice are stored.
+ */
+ __u16 eu_stride;
+
+ /**
+ * @data:
+ *
+ * Contains 3 pieces of information :
+ *
+ * - The slice mask with one bit per slice telling whether a slice is
+ * available. The availability of slice X can be queried with the
+ * following formula :
+ *
+ * .. code:: c
+ *
+ * (data[X / 8] >> (X % 8)) & 1
+ *
+ * Starting with Xe_HP platforms, Intel hardware no longer has
+ * traditional slices so i915 will always report a single slice
+ * (hardcoded slicemask = 0x1) which contains all of the platform's
+ * subslices. I.e., the mask here does not reflect any of the newer
+ * hardware concepts such as "gslices" or "cslices" since userspace
+ * is capable of inferring those from the subslice mask.
+ *
+ * - The subslice mask for each slice with one bit per subslice telling
+ * whether a subslice is available. Starting with Gen12 we use the
+ * term "subslice" to refer to what the hardware documentation
+ * describes as a "dual-subslices." The availability of subslice Y
+ * in slice X can be queried with the following formula :
+ *
+ * .. code:: c
+ *
+ * (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1
+ *
+ * - The EU mask for each subslice in each slice, with one bit per EU
+ * telling whether an EU is available. The availability of EU Z in
+ * subslice Y in slice X can be queried with the following formula :
+ *
+ * .. code:: c
+ *
+ * (data[eu_offset +
+ * (X * max_subslices + Y) * eu_stride +
+ * Z / 8
+ * ] >> (Z % 8)) & 1
+ */
+ __u8 data[];
+};
+
+/**
+ * DOC: Engine Discovery uAPI
+ *
+ * Engine discovery uAPI is a way of enumerating physical engines present in a
+ * GPU associated with an open i915 DRM file descriptor. This supersedes the old
+ * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
+ * `I915_PARAM_HAS_BLT`.
+ *
+ * The need for this interface came starting with Icelake and newer GPUs, which
+ * started to establish a pattern of having multiple engines of a same class,
+ * where not all instances were always completely functionally equivalent.
+ *
+ * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
+ * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
+ *
+ * Example for getting the list of engines:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_query_engine_info *info;
+ * struct drm_i915_query_item item = {
+ * .query_id = DRM_I915_QUERY_ENGINE_INFO;
+ * };
+ * struct drm_i915_query query = {
+ * .num_items = 1,
+ * .items_ptr = (uintptr_t)&item,
+ * };
+ * int err, i;
+ *
+ * // First query the size of the blob we need, this needs to be large
+ * // enough to hold our array of engines. The kernel will fill out the
+ * // item.length for us, which is the number of bytes we need.
+ * //
+ * // Alternatively a large buffer can be allocated straight away enabling
+ * // querying in one pass, in which case item.length should contain the
+ * // length of the provided buffer.
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * info = calloc(1, item.length);
+ * // Now that we allocated the required number of bytes, we call the ioctl
+ * // again, this time with the data_ptr pointing to our newly allocated
+ * // blob, which the kernel can then populate with info on all engines.
+ * item.data_ptr = (uintptr_t)&info,
+ *
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * // We can now access each engine in the array
+ * for (i = 0; i < info->num_engines; i++) {
+ * struct drm_i915_engine_info einfo = info->engines[i];
+ * u16 class = einfo.engine.class;
+ * u16 instance = einfo.engine.instance;
+ * ....
+ * }
+ *
+ * free(info);
+ *
+ * Each of the enumerated engines, apart from being defined by its class and
+ * instance (see `struct i915_engine_class_instance`), also can have flags and
+ * capabilities defined as documented in i915_drm.h.
+ *
+ * For instance video engines which support HEVC encoding will have the
+ * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
+ *
+ * Engine discovery only fully comes to its own when combined with the new way
+ * of addressing engines when submitting batch buffers using contexts with
+ * engine maps configured.
+ */
+
+/**
+ * struct drm_i915_engine_info
+ *
+ * Describes one engine and it's capabilities as known to the driver.
+ */
+struct drm_i915_engine_info {
+ /** @engine: Engine class and instance. */
+ struct i915_engine_class_instance engine;
+
+ /** @rsvd0: Reserved field. */
+ __u32 rsvd0;
+
+ /** @flags: Engine flags. */
+ __u64 flags;
+#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0)
+
+ /** @capabilities: Capabilities of this engine. */
+ __u64 capabilities;
+#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
+#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
+
+ /** @logical_instance: Logical instance of engine */
+ __u16 logical_instance;
+
+ /** @rsvd1: Reserved fields. */
+ __u16 rsvd1[3];
+ /** @rsvd2: Reserved fields. */
+ __u64 rsvd2[3];
+};
+
+/**
+ * struct drm_i915_query_engine_info
+ *
+ * Engine info query enumerates all engines known to the driver by filling in
+ * an array of struct drm_i915_engine_info structures.
+ */
+struct drm_i915_query_engine_info {
+ /** @num_engines: Number of struct drm_i915_engine_info structs following. */
+ __u32 num_engines;
+
+ /** @rsvd: MBZ */
+ __u32 rsvd[3];
+
+ /** @engines: Marker for drm_i915_engine_info structures. */
+ struct drm_i915_engine_info engines[];
+};
+
+/**
+ * struct drm_i915_query_perf_config
+ *
+ * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and
+ * %DRM_I915_QUERY_GEOMETRY_SUBSLICES.
+ */
+struct drm_i915_query_perf_config {
+ union {
+ /**
+ * @n_configs:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to
+ * the number of configurations available.
+ */
+ __u64 n_configs;
+
+ /**
+ * @config:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the
+ * value in this field as configuration identifier to decide
+ * what data to write into config_ptr.
+ */
+ __u64 config;
+
+ /**
+ * @uuid:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the
+ * value in this field as configuration identifier to decide
+ * what data to write into config_ptr.
+ *
+ * String formatted like "%08x-%04x-%04x-%04x-%012x"
+ */
+ char uuid[36];
+ };
+
+ /**
+ * @flags:
+ *
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /**
+ * @data:
+ *
+ * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST,
+ * i915 will write an array of __u64 of configuration identifiers.
+ *
+ * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA,
+ * i915 will write a struct drm_i915_perf_oa_config. If the following
+ * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will
+ * write into the associated pointers the values of submitted when the
+ * configuration was created :
+ *
+ * - &drm_i915_perf_oa_config.n_mux_regs
+ * - &drm_i915_perf_oa_config.n_boolean_regs
+ * - &drm_i915_perf_oa_config.n_flex_regs
+ */
+ __u8 data[];
+};
+
+/**
+ * enum drm_i915_gem_memory_class - Supported memory classes
+ */
+enum drm_i915_gem_memory_class {
+ /** @I915_MEMORY_CLASS_SYSTEM: System memory */
+ I915_MEMORY_CLASS_SYSTEM = 0,
+ /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
+ I915_MEMORY_CLASS_DEVICE,
+};
+
+/**
+ * struct drm_i915_gem_memory_class_instance - Identify particular memory region
+ */
+struct drm_i915_gem_memory_class_instance {
+ /** @memory_class: See enum drm_i915_gem_memory_class */
+ __u16 memory_class;
+
+ /** @memory_instance: Which instance */
+ __u16 memory_instance;
+};
+
+/**
+ * struct drm_i915_memory_region_info - Describes one region as known to the
+ * driver.
+ *
+ * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
+ * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
+ * at &drm_i915_query_item.query_id.
+ */
+struct drm_i915_memory_region_info {
+ /** @region: The class:instance pair encoding */
+ struct drm_i915_gem_memory_class_instance region;
+
+ /** @rsvd0: MBZ */
+ __u32 rsvd0;
+
+ /**
+ * @probed_size: Memory probed by the driver
+ *
+ * Note that it should not be possible to ever encounter a zero value
+ * here, also note that no current region type will ever return -1 here.
+ * Although for future region types, this might be a possibility. The
+ * same applies to the other size fields.
+ */
+ __u64 probed_size;
+
+ /**
+ * @unallocated_size: Estimate of memory remaining
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting.
+ * Without this (or if this is an older kernel) the value here will
+ * always equal the @probed_size. Note this is only currently tracked
+ * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here
+ * will always equal the @probed_size).
+ */
+ __u64 unallocated_size;
+
+ union {
+ /** @rsvd1: MBZ */
+ __u64 rsvd1[8];
+ struct {
+ /**
+ * @probed_cpu_visible_size: Memory probed by the driver
+ * that is CPU accessible.
+ *
+ * This will be always be <= @probed_size, and the
+ * remainder (if there is any) will not be CPU
+ * accessible.
+ *
+ * On systems without small BAR, the @probed_size will
+ * always equal the @probed_cpu_visible_size, since all
+ * of it will be CPU accessible.
+ *
+ * Note this is only tracked for
+ * I915_MEMORY_CLASS_DEVICE regions (for other types the
+ * value here will always equal the @probed_size).
+ *
+ * Note that if the value returned here is zero, then
+ * this must be an old kernel which lacks the relevant
+ * small-bar uAPI support (including
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on
+ * such systems we should never actually end up with a
+ * small BAR configuration, assuming we are able to load
+ * the kernel module. Hence it should be safe to treat
+ * this the same as when @probed_cpu_visible_size ==
+ * @probed_size.
+ */
+ __u64 probed_cpu_visible_size;
+
+ /**
+ * @unallocated_cpu_visible_size: Estimate of CPU
+ * visible memory remaining.
+ *
+ * Note this is only tracked for
+ * I915_MEMORY_CLASS_DEVICE regions (for other types the
+ * value here will always equal the
+ * @probed_cpu_visible_size).
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always
+ * equal the @probed_cpu_visible_size. Note this is only
+ * currently tracked for I915_MEMORY_CLASS_DEVICE
+ * regions (for other types the value here will also
+ * always equal the @probed_cpu_visible_size).
+ *
+ * If this is an older kernel the value here will be
+ * zero, see also @probed_cpu_visible_size.
+ */
+ __u64 unallocated_cpu_visible_size;
+ };
+ };
+};
+
+/**
+ * struct drm_i915_query_memory_regions
+ *
+ * The region info query enumerates all regions known to the driver by filling
+ * in an array of struct drm_i915_memory_region_info structures.
+ *
+ * Example for getting the list of supported regions:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_query_memory_regions *info;
+ * struct drm_i915_query_item item = {
+ * .query_id = DRM_I915_QUERY_MEMORY_REGIONS;
+ * };
+ * struct drm_i915_query query = {
+ * .num_items = 1,
+ * .items_ptr = (uintptr_t)&item,
+ * };
+ * int err, i;
+ *
+ * // First query the size of the blob we need, this needs to be large
+ * // enough to hold our array of regions. The kernel will fill out the
+ * // item.length for us, which is the number of bytes we need.
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * info = calloc(1, item.length);
+ * // Now that we allocated the required number of bytes, we call the ioctl
+ * // again, this time with the data_ptr pointing to our newly allocated
+ * // blob, which the kernel can then populate with the all the region info.
+ * item.data_ptr = (uintptr_t)&info,
+ *
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * // We can now access each region in the array
+ * for (i = 0; i < info->num_regions; i++) {
+ * struct drm_i915_memory_region_info mr = info->regions[i];
+ * u16 class = mr.region.class;
+ * u16 instance = mr.region.instance;
+ *
+ * ....
+ * }
+ *
+ * free(info);
+ */
+struct drm_i915_query_memory_regions {
+ /** @num_regions: Number of supported regions */
+ __u32 num_regions;
+
+ /** @rsvd: MBZ */
+ __u32 rsvd[3];
+
+ /** @regions: Info about each supported region */
+ struct drm_i915_memory_region_info regions[];
+};
+
+/**
+ * DOC: GuC HWCONFIG blob uAPI
+ *
+ * The GuC produces a blob with information about the current device.
+ * i915 reads this blob from GuC and makes it available via this uAPI.
+ *
+ * The format and meaning of the blob content are documented in the
+ * Programmer's Reference Manual.
+ */
+
+/**
+ * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
+ * extension support using struct i915_user_extension.
+ *
+ * Note that new buffer flags should be added here, at least for the stuff that
+ * is immutable. Previously we would have two ioctls, one to create the object
+ * with gem_create, and another to apply various parameters, however this
+ * creates some ambiguity for the params which are considered immutable. Also in
+ * general we're phasing out the various SET/GET ioctls.
+ */
+struct drm_i915_gem_create_ext {
+ /**
+ * @size: Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ *
+ * DG2 64K min page size implications:
+ *
+ * On discrete platforms, starting from DG2, we have to contend with GTT
+ * page size restrictions when dealing with I915_MEMORY_CLASS_DEVICE
+ * objects. Specifically the hardware only supports 64K or larger GTT
+ * page sizes for such memory. The kernel will already ensure that all
+ * I915_MEMORY_CLASS_DEVICE memory is allocated using 64K or larger page
+ * sizes underneath.
+ *
+ * Note that the returned size here will always reflect any required
+ * rounding up done by the kernel, i.e 4K will now become 64K on devices
+ * such as DG2. The kernel will always select the largest minimum
+ * page-size for the set of possible placements as the value to use when
+ * rounding up the @size.
+ *
+ * Special DG2 GTT address alignment requirement:
+ *
+ * The GTT alignment will also need to be at least 2M for such objects.
+ *
+ * Note that due to how the hardware implements 64K GTT page support, we
+ * have some further complications:
+ *
+ * 1) The entire PDE (which covers a 2MB virtual address range), must
+ * contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
+ * PDE is forbidden by the hardware.
+ *
+ * 2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
+ * objects.
+ *
+ * To keep things simple for userland, we mandate that any GTT mappings
+ * must be aligned to and rounded up to 2MB. The kernel will internally
+ * pad them out to the next 2MB boundary. As this only wastes virtual
+ * address space and avoids userland having to copy any needlessly
+ * complicated PDE sharing scheme (coloring) and only affects DG2, this
+ * is deemed to be a good compromise.
+ */
+ __u64 size;
+
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+
+ /**
+ * @flags: Optional flags.
+ *
+ * Supported values:
+ *
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
+ * the object will need to be accessed via the CPU.
+ *
+ * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only
+ * strictly required on configurations where some subset of the device
+ * memory is directly visible/mappable through the CPU (which we also
+ * call small BAR), like on some DG2+ systems. Note that this is quite
+ * undesirable, but due to various factors like the client CPU, BIOS etc
+ * it's something we can expect to see in the wild. See
+ * &drm_i915_memory_region_info.probed_cpu_visible_size for how to
+ * determine if this system applies.
+ *
+ * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to
+ * ensure the kernel can always spill the allocation to system memory,
+ * if the object can't be allocated in the mappable part of
+ * I915_MEMORY_CLASS_DEVICE.
+ *
+ * Also note that since the kernel only supports flat-CCS on objects
+ * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
+ * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
+ * flat-CCS.
+ *
+ * Without this hint, the kernel will assume that non-mappable
+ * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
+ * kernel can still migrate the object to the mappable part, as a last
+ * resort, if userspace ever CPU faults this object, but this might be
+ * expensive, and so ideally should be avoided.
+ *
+ * On older kernels which lack the relevant small-bar uAPI support (see
+ * also &drm_i915_memory_region_info.probed_cpu_visible_size),
+ * usage of the flag will result in an error, but it should NEVER be
+ * possible to end up with a small BAR configuration, assuming we can
+ * also successfully load the i915 kernel module. In such cases the
+ * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as
+ * such there are zero restrictions on where the object can be placed.
+ */
+#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
+ __u32 flags;
+
+ /**
+ * @extensions: The chain of extensions to apply to this object.
+ *
+ * This will be useful in the future when we need to support several
+ * different extensions, and we need to apply more than one when
+ * creating the object. See struct i915_user_extension.
+ *
+ * If we don't supply any extensions then we get the same old gem_create
+ * behaviour.
+ *
+ * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
+ * struct drm_i915_gem_create_ext_memory_regions.
+ *
+ * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
+ * struct drm_i915_gem_create_ext_protected_content.
+ */
+#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
+#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
+ __u64 extensions;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_memory_regions - The
+ * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
+ *
+ * Set the object with the desired set of placements/regions in priority
+ * order. Each entry must be unique and supported by the device.
+ *
+ * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
+ * an equivalent layout of class:instance pair encodings. See struct
+ * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
+ * query the supported regions for a device.
+ *
+ * As an example, on discrete devices, if we wish to set the placement as
+ * device local-memory we can do something like:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_memory_class_instance region_lmem = {
+ * .memory_class = I915_MEMORY_CLASS_DEVICE,
+ * .memory_instance = 0,
+ * };
+ * struct drm_i915_gem_create_ext_memory_regions regions = {
+ * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
+ * .regions = (uintptr_t)&region_lmem,
+ * .num_regions = 1,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = 16 * PAGE_SIZE,
+ * .extensions = (uintptr_t)&regions,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ *
+ * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
+ * along with the final object size in &drm_i915_gem_create_ext.size, which
+ * should account for any rounding up, if required.
+ *
+ * Note that userspace has no means of knowing the current backing region
+ * for objects where @num_regions is larger than one. The kernel will only
+ * ensure that the priority order of the @regions array is honoured, either
+ * when initially placing the object, or when moving memory around due to
+ * memory pressure
+ *
+ * On Flat-CCS capable HW, compression is supported for the objects residing
+ * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other
+ * memory class in @regions and migrated (by i915, due to memory
+ * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to
+ * decompress the content. But i915 doesn't have the required information to
+ * decompress the userspace compressed objects.
+ *
+ * So i915 supports Flat-CCS, on the objects which can reside only on
+ * I915_MEMORY_CLASS_DEVICE regions.
+ */
+struct drm_i915_gem_create_ext_memory_regions {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @num_regions: Number of elements in the @regions array. */
+ __u32 num_regions;
+ /**
+ * @regions: The regions/placements array.
+ *
+ * An array of struct drm_i915_gem_memory_class_instance.
+ */
+ __u64 regions;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_protected_content - The
+ * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
+ *
+ * If this extension is provided, buffer contents are expected to be protected
+ * by PXP encryption and require decryption for scan out and processing. This
+ * is only possible on platforms that have PXP enabled, on all other scenarios
+ * using this extension will cause the ioctl to fail and return -ENODEV. The
+ * flags parameter is reserved for future expansion and must currently be set
+ * to zero.
+ *
+ * The buffer contents are considered invalid after a PXP session teardown.
+ *
+ * The encryption is guaranteed to be processed correctly only if the object
+ * is submitted with a context created using the
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
+ * at submission time on the validity of the objects involved.
+ *
+ * Below is an example on how to create a protected object:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_create_ext_protected_content protected_ext = {
+ * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
+ * .flags = 0,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = PAGE_SIZE,
+ * .extensions = (uintptr_t)&protected_ext,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ */
+struct drm_i915_gem_create_ext_protected_content {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+ /** @flags: reserved for future usage, currently MBZ */
+ __u32 flags;
+};
+
+/* ID of the protected content session managed by i915 when PXP is active */
+#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _I915_DRM_H_ */
diff --git a/include/libdrm/libdrm/intel_aub.h b/include/libdrm/libdrm/intel_aub.h
new file mode 100644
index 0000000..5f0aba8
--- /dev/null
+++ b/include/libdrm/libdrm/intel_aub.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file intel_aub.h
+ *
+ * The AUB file is a file format used by Intel's internal simulation
+ * and other validation tools. It can be used at various levels by a
+ * driver to input state to the simulated hardware or a replaying
+ * debugger.
+ *
+ * We choose to dump AUB files using the trace block format for ease
+ * of implementation -- dump out the blocks of memory as plain blobs
+ * and insert ring commands to execute the batchbuffer blob.
+ */
+
+#ifndef _INTEL_AUB_H
+#define _INTEL_AUB_H
+
+#define AUB_MI_NOOP (0)
+#define AUB_MI_BATCH_BUFFER_START (0x31 << 23)
+#define AUB_PIPE_CONTROL (0x7a000002)
+
+/* DW0: instruction type. */
+
+#define CMD_AUB (7 << 29)
+
+#define CMD_AUB_HEADER (CMD_AUB | (1 << 23) | (0x05 << 16))
+/* DW1 */
+# define AUB_HEADER_MAJOR_SHIFT 24
+# define AUB_HEADER_MINOR_SHIFT 16
+
+#define CMD_AUB_TRACE_HEADER_BLOCK (CMD_AUB | (1 << 23) | (0x41 << 16))
+#define CMD_AUB_DUMP_BMP (CMD_AUB | (1 << 23) | (0x9e << 16))
+
+/* DW1 */
+#define AUB_TRACE_OPERATION_MASK 0x000000ff
+#define AUB_TRACE_OP_COMMENT 0x00000000
+#define AUB_TRACE_OP_DATA_WRITE 0x00000001
+#define AUB_TRACE_OP_COMMAND_WRITE 0x00000002
+#define AUB_TRACE_OP_MMIO_WRITE 0x00000003
+// operation = TRACE_DATA_WRITE, Type
+#define AUB_TRACE_TYPE_MASK 0x0000ff00
+#define AUB_TRACE_TYPE_NOTYPE (0 << 8)
+#define AUB_TRACE_TYPE_BATCH (1 << 8)
+#define AUB_TRACE_TYPE_VERTEX_BUFFER (5 << 8)
+#define AUB_TRACE_TYPE_2D_MAP (6 << 8)
+#define AUB_TRACE_TYPE_CUBE_MAP (7 << 8)
+#define AUB_TRACE_TYPE_VOLUME_MAP (9 << 8)
+#define AUB_TRACE_TYPE_1D_MAP (10 << 8)
+#define AUB_TRACE_TYPE_CONSTANT_BUFFER (11 << 8)
+#define AUB_TRACE_TYPE_CONSTANT_URB (12 << 8)
+#define AUB_TRACE_TYPE_INDEX_BUFFER (13 << 8)
+#define AUB_TRACE_TYPE_GENERAL (14 << 8)
+#define AUB_TRACE_TYPE_SURFACE (15 << 8)
+
+
+// operation = TRACE_COMMAND_WRITE, Type =
+#define AUB_TRACE_TYPE_RING_HWB (1 << 8)
+#define AUB_TRACE_TYPE_RING_PRB0 (2 << 8)
+#define AUB_TRACE_TYPE_RING_PRB1 (3 << 8)
+#define AUB_TRACE_TYPE_RING_PRB2 (4 << 8)
+
+// Address space
+#define AUB_TRACE_ADDRESS_SPACE_MASK 0x00ff0000
+#define AUB_TRACE_MEMTYPE_GTT (0 << 16)
+#define AUB_TRACE_MEMTYPE_LOCAL (1 << 16)
+#define AUB_TRACE_MEMTYPE_NONLOCAL (2 << 16)
+#define AUB_TRACE_MEMTYPE_PCI (3 << 16)
+#define AUB_TRACE_MEMTYPE_GTT_ENTRY (4 << 16)
+
+/* DW2 */
+
+/**
+ * aub_state_struct_type enum values are encoded with the top 16 bits
+ * representing the type to be delivered to the .aub file, and the bottom 16
+ * bits representing the subtype. This macro performs the encoding.
+ */
+#define ENCODE_SS_TYPE(type, subtype) (((type) << 16) | (subtype))
+
+enum aub_state_struct_type {
+ AUB_TRACE_VS_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 1),
+ AUB_TRACE_GS_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 2),
+ AUB_TRACE_CLIP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 3),
+ AUB_TRACE_SF_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 4),
+ AUB_TRACE_WM_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 5),
+ AUB_TRACE_CC_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 6),
+ AUB_TRACE_CLIP_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 7),
+ AUB_TRACE_SF_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 8),
+ AUB_TRACE_CC_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x9),
+ AUB_TRACE_SAMPLER_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xa),
+ AUB_TRACE_KERNEL_INSTRUCTIONS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xb),
+ AUB_TRACE_SCRATCH_SPACE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xc),
+ AUB_TRACE_SAMPLER_DEFAULT_COLOR = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xd),
+
+ AUB_TRACE_SCISSOR_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x15),
+ AUB_TRACE_BLEND_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x16),
+ AUB_TRACE_DEPTH_STENCIL_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x17),
+
+ AUB_TRACE_VERTEX_BUFFER = ENCODE_SS_TYPE(AUB_TRACE_TYPE_VERTEX_BUFFER, 0),
+ AUB_TRACE_BINDING_TABLE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_SURFACE, 0x100),
+ AUB_TRACE_SURFACE_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_SURFACE, 0x200),
+ AUB_TRACE_VS_CONSTANTS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_CONSTANT_BUFFER, 0),
+ AUB_TRACE_WM_CONSTANTS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_CONSTANT_BUFFER, 1),
+};
+
+#undef ENCODE_SS_TYPE
+
+/**
+ * Decode a aub_state_struct_type value to determine the type that should be
+ * stored in the .aub file.
+ */
+static inline uint32_t AUB_TRACE_TYPE(enum aub_state_struct_type ss_type)
+{
+ return (ss_type & 0xFFFF0000) >> 16;
+}
+
+/**
+ * Decode a state_struct_type value to determine the subtype that should be
+ * stored in the .aub file.
+ */
+static inline uint32_t AUB_TRACE_SUBTYPE(enum aub_state_struct_type ss_type)
+{
+ return ss_type & 0xFFFF;
+}
+
+/* DW3: address */
+/* DW4: len */
+
+#endif /* _INTEL_AUB_H */
diff --git a/include/libdrm/libdrm/intel_bufmgr.h b/include/libdrm/libdrm/intel_bufmgr.h
new file mode 100644
index 0000000..693472a
--- /dev/null
+++ b/include/libdrm/libdrm/intel_bufmgr.h
@@ -0,0 +1,341 @@
+/*
+ * Copyright © 2008-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/**
+ * @file intel_bufmgr.h
+ *
+ * Public definitions of Intel-specific bufmgr functions.
+ */
+
+#ifndef INTEL_BUFMGR_H
+#define INTEL_BUFMGR_H
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+struct drm_clip_rect;
+
+typedef struct _drm_intel_bufmgr drm_intel_bufmgr;
+typedef struct _drm_intel_context drm_intel_context;
+typedef struct _drm_intel_bo drm_intel_bo;
+
+struct _drm_intel_bo {
+ /**
+ * Size in bytes of the buffer object.
+ *
+ * The size may be larger than the size originally requested for the
+ * allocation, such as being aligned to page size.
+ */
+ unsigned long size;
+
+ /**
+ * Alignment requirement for object
+ *
+ * Used for GTT mapping & pinning the object.
+ */
+ unsigned long align;
+
+ /**
+ * Deprecated field containing (possibly the low 32-bits of) the last
+ * seen virtual card address. Use offset64 instead.
+ */
+ unsigned long offset;
+
+ /**
+ * Virtual address for accessing the buffer data. Only valid while
+ * mapped.
+ */
+#ifdef __cplusplus
+ void *virt;
+#else
+ void *virtual;
+#endif
+
+ /** Buffer manager context associated with this buffer object */
+ drm_intel_bufmgr *bufmgr;
+
+ /**
+ * MM-specific handle for accessing object
+ */
+ int handle;
+
+ /**
+ * Last seen card virtual address (offset from the beginning of the
+ * aperture) for the object. This should be used to fill relocation
+ * entries when calling drm_intel_bo_emit_reloc()
+ */
+ uint64_t offset64;
+};
+
+enum aub_dump_bmp_format {
+ AUB_DUMP_BMP_FORMAT_8BIT = 1,
+ AUB_DUMP_BMP_FORMAT_ARGB_4444 = 4,
+ AUB_DUMP_BMP_FORMAT_ARGB_0888 = 6,
+ AUB_DUMP_BMP_FORMAT_ARGB_8888 = 7,
+};
+
+typedef struct _drm_intel_aub_annotation {
+ uint32_t type;
+ uint32_t subtype;
+ uint32_t ending_offset;
+} drm_intel_aub_annotation;
+
+#define BO_ALLOC_FOR_RENDER (1<<0)
+
+drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment);
+drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment);
+drm_intel_bo *drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ void *addr, uint32_t tiling_mode,
+ uint32_t stride, unsigned long size,
+ unsigned long flags);
+drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ int x, int y, int cpp,
+ uint32_t *tiling_mode,
+ unsigned long *pitch,
+ unsigned long flags);
+void drm_intel_bo_reference(drm_intel_bo *bo);
+void drm_intel_bo_unreference(drm_intel_bo *bo);
+int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
+int drm_intel_bo_unmap(drm_intel_bo *bo);
+
+int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data);
+int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
+
+void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
+void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
+int drm_intel_bo_exec(drm_intel_bo *bo, int used,
+ struct drm_clip_rect *cliprects, int num_cliprects, int DR4);
+int drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
+ struct drm_clip_rect *cliprects, int num_cliprects, int DR4,
+ unsigned int flags);
+int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count);
+
+int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+int drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo,
+ uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment);
+int drm_intel_bo_unpin(drm_intel_bo *bo);
+int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride);
+int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode);
+int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name);
+int drm_intel_bo_busy(drm_intel_bo *bo);
+int drm_intel_bo_madvise(drm_intel_bo *bo, int madv);
+int drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable);
+int drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset);
+
+int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
+int drm_intel_bo_is_reusable(drm_intel_bo *bo);
+int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo);
+
+/* drm_intel_bufmgr_gem.c */
+drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size);
+drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned int handle);
+void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
+void drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr);
+void drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr,
+ int limit);
+int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo);
+int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
+int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
+
+#define HAVE_DRM_INTEL_GEM_BO_DISABLE_IMPLICIT_SYNC 1
+int drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr);
+void drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo);
+void drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo);
+
+void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo);
+void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo);
+void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo);
+
+int drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo);
+void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start);
+void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
+
+void
+drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
+ const char *filename);
+void drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable);
+void drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
+ int x1, int y1, int width, int height,
+ enum aub_dump_bmp_format format,
+ int pitch, int offset);
+void
+drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
+ drm_intel_aub_annotation *annotations,
+ unsigned count);
+
+int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id);
+
+int drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total);
+int drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr);
+int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns);
+
+drm_intel_context *drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr);
+int drm_intel_gem_context_get_id(drm_intel_context *ctx,
+ uint32_t *ctx_id);
+void drm_intel_gem_context_destroy(drm_intel_context *ctx);
+int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
+ int used, unsigned int flags);
+int drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
+ drm_intel_context *ctx,
+ int used,
+ int in_fence,
+ int *out_fence,
+ unsigned int flags);
+
+int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd);
+drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr,
+ int prime_fd, int size);
+
+/* drm_intel_bufmgr_fake.c */
+drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
+ unsigned long low_offset,
+ void *low_virtual,
+ unsigned long size,
+ volatile unsigned int
+ *last_dispatch);
+void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
+ volatile unsigned int
+ *last_dispatch);
+void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
+ int (*exec) (drm_intel_bo *bo,
+ unsigned int used,
+ void *priv),
+ void *priv);
+void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
+ unsigned int (*emit) (void *priv),
+ void (*wait) (unsigned int fence,
+ void *priv),
+ void *priv);
+drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long offset,
+ unsigned long size, void *virt);
+void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
+ void (*invalidate_cb) (drm_intel_bo
+ * bo,
+ void *ptr),
+ void *ptr);
+
+void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
+void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
+
+struct drm_intel_decode *drm_intel_decode_context_alloc(uint32_t devid);
+void drm_intel_decode_context_free(struct drm_intel_decode *ctx);
+void drm_intel_decode_set_batch_pointer(struct drm_intel_decode *ctx,
+ void *data, uint32_t hw_offset,
+ int count);
+void drm_intel_decode_set_dump_past_end(struct drm_intel_decode *ctx,
+ int dump_past_end);
+void drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx,
+ uint32_t head, uint32_t tail);
+void drm_intel_decode_set_output_file(struct drm_intel_decode *ctx, FILE *out);
+void drm_intel_decode(struct drm_intel_decode *ctx);
+
+int drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
+ uint32_t offset,
+ uint64_t *result);
+
+int drm_intel_get_reset_stats(drm_intel_context *ctx,
+ uint32_t *reset_count,
+ uint32_t *active,
+ uint32_t *pending);
+
+int drm_intel_get_subslice_total(int fd, unsigned int *subslice_total);
+int drm_intel_get_eu_total(int fd, unsigned int *eu_total);
+
+int drm_intel_get_pooled_eu(int fd);
+int drm_intel_get_min_eu_in_pool(int fd);
+
+/** @{ Compatibility defines to keep old code building despite the symbol rename
+ * from dri_* to drm_intel_*
+ */
+#define dri_bo drm_intel_bo
+#define dri_bufmgr drm_intel_bufmgr
+#define dri_bo_alloc drm_intel_bo_alloc
+#define dri_bo_reference drm_intel_bo_reference
+#define dri_bo_unreference drm_intel_bo_unreference
+#define dri_bo_map drm_intel_bo_map
+#define dri_bo_unmap drm_intel_bo_unmap
+#define dri_bo_subdata drm_intel_bo_subdata
+#define dri_bo_get_subdata drm_intel_bo_get_subdata
+#define dri_bo_wait_rendering drm_intel_bo_wait_rendering
+#define dri_bufmgr_set_debug drm_intel_bufmgr_set_debug
+#define dri_bufmgr_destroy drm_intel_bufmgr_destroy
+#define dri_bo_exec drm_intel_bo_exec
+#define dri_bufmgr_check_aperture_space drm_intel_bufmgr_check_aperture_space
+#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset, \
+ reloc_offset, target_bo) \
+ drm_intel_bo_emit_reloc(reloc_bo, reloc_offset, \
+ target_bo, target_offset, \
+ read, write);
+#define dri_bo_pin drm_intel_bo_pin
+#define dri_bo_unpin drm_intel_bo_unpin
+#define dri_bo_get_tiling drm_intel_bo_get_tiling
+#define dri_bo_set_tiling(bo, mode) drm_intel_bo_set_tiling(bo, mode, 0)
+#define dri_bo_flink drm_intel_bo_flink
+#define intel_bufmgr_gem_init drm_intel_bufmgr_gem_init
+#define intel_bo_gem_create_from_name drm_intel_bo_gem_create_from_name
+#define intel_bufmgr_gem_enable_reuse drm_intel_bufmgr_gem_enable_reuse
+#define intel_bufmgr_fake_init drm_intel_bufmgr_fake_init
+#define intel_bufmgr_fake_set_last_dispatch drm_intel_bufmgr_fake_set_last_dispatch
+#define intel_bufmgr_fake_set_exec_callback drm_intel_bufmgr_fake_set_exec_callback
+#define intel_bufmgr_fake_set_fence_callback drm_intel_bufmgr_fake_set_fence_callback
+#define intel_bo_fake_alloc_static drm_intel_bo_fake_alloc_static
+#define intel_bo_fake_disable_backing_store drm_intel_bo_fake_disable_backing_store
+#define intel_bufmgr_fake_contended_lock_take drm_intel_bufmgr_fake_contended_lock_take
+#define intel_bufmgr_fake_evict_all drm_intel_bufmgr_fake_evict_all
+
+/** @{ */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* INTEL_BUFMGR_H */
diff --git a/include/libdrm/libdrm/intel_debug.h b/include/libdrm/libdrm/intel_debug.h
new file mode 100644
index 0000000..fa0737c
--- /dev/null
+++ b/include/libdrm/libdrm/intel_debug.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#ifndef INTEL_DEBUG_H
+#define INTEL_DEBUG_H
+
+#include <stdint.h>
+
+#define SHADER_DEBUG_SOCKET "/var/run/gen_debug"
+#define DEBUG_HANDSHAKE_VERSION 0x3
+#define DEBUG_HANDSHAKE_ACK "okay"
+
+/* First byte must always be the 1 byte version */
+struct intel_debug_handshake {
+ uint32_t version;
+ int flink_handle;
+ uint32_t per_thread_scratch;
+} __attribute__((packed));
+
+#endif
diff --git a/include/libdrm/libdrm/mach64_drm.h b/include/libdrm/libdrm/mach64_drm.h
new file mode 100644
index 0000000..1f5fd84
--- /dev/null
+++ b/include/libdrm/libdrm/mach64_drm.h
@@ -0,0 +1,256 @@
+/* mach64_drm.h -- Public header for the mach64 driver -*- linux-c -*-
+ * Created: Thu Nov 30 20:04:32 2000 by gareth@valinux.com
+ */
+/*
+ * Copyright 2000 Gareth Hughes
+ * Copyright 2002 Frank C. Earl
+ * Copyright 2002-2003 Leif Delgass
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ * Frank C. Earl <fearl@airmail.net>
+ * Leif Delgass <ldelgass@retinalburn.net>
+ */
+
+#ifndef __MACH64_DRM_H__
+#define __MACH64_DRM_H__
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (mach64_sarea.h)
+ */
+#ifndef __MACH64_SAREA_DEFINES__
+#define __MACH64_SAREA_DEFINES__
+
+/* What needs to be changed for the current vertex buffer?
+ * GH: We're going to be pedantic about this. We want the card to do as
+ * little as possible, so let's avoid having it fetch a whole bunch of
+ * register values that don't change all that often, if at all.
+ */
+#define MACH64_UPLOAD_DST_OFF_PITCH 0x0001
+#define MACH64_UPLOAD_Z_OFF_PITCH 0x0002
+#define MACH64_UPLOAD_Z_ALPHA_CNTL 0x0004
+#define MACH64_UPLOAD_SCALE_3D_CNTL 0x0008
+#define MACH64_UPLOAD_DP_FOG_CLR 0x0010
+#define MACH64_UPLOAD_DP_WRITE_MASK 0x0020
+#define MACH64_UPLOAD_DP_PIX_WIDTH 0x0040
+#define MACH64_UPLOAD_SETUP_CNTL 0x0080
+#define MACH64_UPLOAD_MISC 0x0100
+#define MACH64_UPLOAD_TEXTURE 0x0200
+#define MACH64_UPLOAD_TEX0IMAGE 0x0400
+#define MACH64_UPLOAD_TEX1IMAGE 0x0800
+#define MACH64_UPLOAD_CLIPRECTS 0x1000 /* handled client-side */
+#define MACH64_UPLOAD_CONTEXT 0x00ff
+#define MACH64_UPLOAD_ALL 0x1fff
+
+/* DMA buffer size
+ */
+#define MACH64_BUFFER_SIZE 16384
+
+/* Max number of swaps allowed on the ring
+ * before the client must wait
+ */
+#define MACH64_MAX_QUEUED_FRAMES 3U
+
+/* Byte offsets for host blit buffer data
+ */
+#define MACH64_HOSTDATA_BLIT_OFFSET 104
+
+/* Keep these small for testing.
+ */
+#define MACH64_NR_SAREA_CLIPRECTS 8
+
+#define MACH64_CARD_HEAP 0
+#define MACH64_AGP_HEAP 1
+#define MACH64_NR_TEX_HEAPS 2
+#define MACH64_NR_TEX_REGIONS 64
+#define MACH64_LOG_TEX_GRANULARITY 16
+
+#define MACH64_TEX_MAXLEVELS 1
+
+#define MACH64_NR_CONTEXT_REGS 15
+#define MACH64_NR_TEXTURE_REGS 4
+
+#endif /* __MACH64_SAREA_DEFINES__ */
+
+typedef struct {
+ unsigned int dst_off_pitch;
+
+ unsigned int z_off_pitch;
+ unsigned int z_cntl;
+ unsigned int alpha_tst_cntl;
+
+ unsigned int scale_3d_cntl;
+
+ unsigned int sc_left_right;
+ unsigned int sc_top_bottom;
+
+ unsigned int dp_fog_clr;
+ unsigned int dp_write_mask;
+ unsigned int dp_pix_width;
+ unsigned int dp_mix;
+ unsigned int dp_src;
+
+ unsigned int clr_cmp_cntl;
+ unsigned int gui_traj_cntl;
+
+ unsigned int setup_cntl;
+
+ unsigned int tex_size_pitch;
+ unsigned int tex_cntl;
+ unsigned int secondary_tex_off;
+ unsigned int tex_offset;
+} drm_mach64_context_regs_t;
+
+typedef struct drm_mach64_sarea {
+ /* The channel for communication of state information to the kernel
+ * on firing a vertex dma buffer.
+ */
+ drm_mach64_context_regs_t context_state;
+ unsigned int dirty;
+ unsigned int vertsize;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ struct drm_clip_rect boxes[MACH64_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Counters for client-side throttling of rendering clients.
+ */
+ unsigned int frames_queued;
+
+ /* Texture memory LRU.
+ */
+ struct drm_tex_region tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS +
+ 1];
+ unsigned int tex_age[MACH64_NR_TEX_HEAPS];
+ int ctx_owner;
+} drm_mach64_sarea_t;
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (mach64_common.h)
+ */
+
+/* Mach64 specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+
+#define DRM_MACH64_INIT 0x00
+#define DRM_MACH64_IDLE 0x01
+#define DRM_MACH64_RESET 0x02
+#define DRM_MACH64_SWAP 0x03
+#define DRM_MACH64_CLEAR 0x04
+#define DRM_MACH64_VERTEX 0x05
+#define DRM_MACH64_BLIT 0x06
+#define DRM_MACH64_FLUSH 0x07
+#define DRM_MACH64_GETPARAM 0x08
+
+#define DRM_IOCTL_MACH64_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_INIT, drm_mach64_init_t)
+#define DRM_IOCTL_MACH64_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_IDLE )
+#define DRM_IOCTL_MACH64_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_RESET )
+#define DRM_IOCTL_MACH64_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_SWAP )
+#define DRM_IOCTL_MACH64_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_CLEAR, drm_mach64_clear_t)
+#define DRM_IOCTL_MACH64_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_VERTEX, drm_mach64_vertex_t)
+#define DRM_IOCTL_MACH64_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_BLIT, drm_mach64_blit_t)
+#define DRM_IOCTL_MACH64_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_FLUSH )
+#define DRM_IOCTL_MACH64_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_MACH64_GETPARAM, drm_mach64_getparam_t)
+
+/* Buffer flags for clears
+ */
+#define MACH64_FRONT 0x1
+#define MACH64_BACK 0x2
+#define MACH64_DEPTH 0x4
+
+/* Primitive types for vertex buffers
+ */
+#define MACH64_PRIM_POINTS 0x00000000
+#define MACH64_PRIM_LINES 0x00000001
+#define MACH64_PRIM_LINE_LOOP 0x00000002
+#define MACH64_PRIM_LINE_STRIP 0x00000003
+#define MACH64_PRIM_TRIANGLES 0x00000004
+#define MACH64_PRIM_TRIANGLE_STRIP 0x00000005
+#define MACH64_PRIM_TRIANGLE_FAN 0x00000006
+#define MACH64_PRIM_QUADS 0x00000007
+#define MACH64_PRIM_QUAD_STRIP 0x00000008
+#define MACH64_PRIM_POLYGON 0x00000009
+
+typedef enum _drm_mach64_dma_mode_t {
+ MACH64_MODE_DMA_ASYNC,
+ MACH64_MODE_DMA_SYNC,
+ MACH64_MODE_MMIO
+} drm_mach64_dma_mode_t;
+
+typedef struct drm_mach64_init {
+ enum {
+ DRM_MACH64_INIT_DMA = 0x01,
+ DRM_MACH64_CLEANUP_DMA = 0x02
+ } func;
+
+ unsigned long sarea_priv_offset;
+ int is_pci;
+ drm_mach64_dma_mode_t dma_mode;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long ring_offset;
+ unsigned long buffers_offset;
+ unsigned long agp_textures_offset;
+} drm_mach64_init_t;
+
+typedef struct drm_mach64_clear {
+ unsigned int flags;
+ int x, y, w, h;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+} drm_mach64_clear_t;
+
+typedef struct drm_mach64_vertex {
+ int prim;
+ void *buf; /* Address of vertex buffer */
+ unsigned long used; /* Number of bytes in buffer */
+ int discard; /* Client finished with buffer? */
+} drm_mach64_vertex_t;
+
+typedef struct drm_mach64_blit {
+ void *buf;
+ int pitch;
+ int offset;
+ int format;
+ unsigned short x, y;
+ unsigned short width, height;
+} drm_mach64_blit_t;
+
+typedef struct drm_mach64_getparam {
+ enum {
+ MACH64_PARAM_FRAMES_QUEUED = 0x01,
+ MACH64_PARAM_IRQ_NR = 0x02
+ } param;
+ void *value;
+} drm_mach64_getparam_t;
+
+#endif
diff --git a/include/libdrm/libdrm/mga_drm.h b/include/libdrm/libdrm/mga_drm.h
new file mode 100644
index 0000000..7930011
--- /dev/null
+++ b/include/libdrm/libdrm/mga_drm.h
@@ -0,0 +1,427 @@
+/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
+ * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jeff Hartmann <jhartmann@valinux.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ *
+ * Rewritten by:
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __MGA_DRM_H__
+#define __MGA_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (mga_sarea.h)
+ */
+
+#ifndef __MGA_SAREA_DEFINES__
+#define __MGA_SAREA_DEFINES__
+
+/* WARP pipe flags
+ */
+#define MGA_F 0x1 /* fog */
+#define MGA_A 0x2 /* alpha */
+#define MGA_S 0x4 /* specular */
+#define MGA_T2 0x8 /* multitexture */
+
+#define MGA_WARP_TGZ 0
+#define MGA_WARP_TGZF (MGA_F)
+#define MGA_WARP_TGZA (MGA_A)
+#define MGA_WARP_TGZAF (MGA_F|MGA_A)
+#define MGA_WARP_TGZS (MGA_S)
+#define MGA_WARP_TGZSF (MGA_S|MGA_F)
+#define MGA_WARP_TGZSA (MGA_S|MGA_A)
+#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
+#define MGA_WARP_T2GZ (MGA_T2)
+#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
+#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
+#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
+#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
+#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
+#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
+#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
+
+#define MGA_MAX_G200_PIPES 8 /* no multitex */
+#define MGA_MAX_G400_PIPES 16
+#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
+#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
+
+#define MGA_CARD_TYPE_G200 1
+#define MGA_CARD_TYPE_G400 2
+#define MGA_CARD_TYPE_G450 3 /* not currently used */
+#define MGA_CARD_TYPE_G550 4
+
+#define MGA_FRONT 0x1
+#define MGA_BACK 0x2
+#define MGA_DEPTH 0x4
+
+/* What needs to be changed for the current vertex dma buffer?
+ */
+#define MGA_UPLOAD_CONTEXT 0x1
+#define MGA_UPLOAD_TEX0 0x2
+#define MGA_UPLOAD_TEX1 0x4
+#define MGA_UPLOAD_PIPE 0x8
+#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
+#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
+#define MGA_UPLOAD_2D 0x40
+#define MGA_WAIT_AGE 0x80 /* handled client-side */
+#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
+#if 0
+#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
+ quiescent */
+#endif
+
+/* 32 buffers of 64k each, total 2 meg.
+ */
+#define MGA_BUFFER_SIZE (1 << 16)
+#define MGA_NUM_BUFFERS 128
+
+/* Keep these small for testing.
+ */
+#define MGA_NR_SAREA_CLIPRECTS 8
+
+/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
+ * regions, subject to a minimum region size of (1<<16) == 64k.
+ *
+ * Clients may subdivide regions internally, but when sharing between
+ * clients, the region size is the minimum granularity.
+ */
+
+#define MGA_CARD_HEAP 0
+#define MGA_AGP_HEAP 1
+#define MGA_NR_TEX_HEAPS 2
+#define MGA_NR_TEX_REGIONS 16
+#define MGA_LOG_MIN_TEX_REGION_SIZE 16
+
+#define DRM_MGA_IDLE_RETRY 2048
+
+#endif /* __MGA_SAREA_DEFINES__ */
+
+/* Setup registers for 3D context
+ */
+typedef struct {
+ unsigned int dstorg;
+ unsigned int maccess;
+ unsigned int plnwt;
+ unsigned int dwgctl;
+ unsigned int alphactrl;
+ unsigned int fogcolor;
+ unsigned int wflag;
+ unsigned int tdualstage0;
+ unsigned int tdualstage1;
+ unsigned int fcol;
+ unsigned int stencil;
+ unsigned int stencilctl;
+} drm_mga_context_regs_t;
+
+/* Setup registers for 2D, X server
+ */
+typedef struct {
+ unsigned int pitch;
+} drm_mga_server_regs_t;
+
+/* Setup registers for each texture unit
+ */
+typedef struct {
+ unsigned int texctl;
+ unsigned int texctl2;
+ unsigned int texfilter;
+ unsigned int texbordercol;
+ unsigned int texorg;
+ unsigned int texwidth;
+ unsigned int texheight;
+ unsigned int texorg1;
+ unsigned int texorg2;
+ unsigned int texorg3;
+ unsigned int texorg4;
+} drm_mga_texture_regs_t;
+
+/* General aging mechanism
+ */
+typedef struct {
+ unsigned int head; /* Position of head pointer */
+ unsigned int wrap; /* Primary DMA wrap count */
+} drm_mga_age_t;
+
+typedef struct _drm_mga_sarea {
+ /* The channel for communication of state information to the kernel
+ * on firing a vertex dma buffer.
+ */
+ drm_mga_context_regs_t context_state;
+ drm_mga_server_regs_t server_state;
+ drm_mga_texture_regs_t tex_state[2];
+ unsigned int warp_pipe;
+ unsigned int dirty;
+ unsigned int vertsize;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Information about the most recently used 3d drawable. The
+ * client fills in the req_* fields, the server fills in the
+ * exported_ fields and puts the cliprects into boxes, above.
+ *
+ * The client clears the exported_drawable field before
+ * clobbering the boxes data.
+ */
+ unsigned int req_drawable; /* the X drawable id */
+ unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
+
+ unsigned int exported_drawable;
+ unsigned int exported_index;
+ unsigned int exported_stamp;
+ unsigned int exported_buffers;
+ unsigned int exported_nfront;
+ unsigned int exported_nback;
+ int exported_back_x, exported_front_x, exported_w;
+ int exported_back_y, exported_front_y, exported_h;
+ struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
+
+ /* Counters for aging textures and for client-side throttling.
+ */
+ unsigned int status[4];
+ unsigned int last_wrap;
+
+ drm_mga_age_t last_frame;
+ unsigned int last_enqueue; /* last time a buffer was enqueued */
+ unsigned int last_dispatch; /* age of the most recently dispatched buffer */
+ unsigned int last_quiescent; /* */
+
+ /* LRU lists for texture memory in agp space and on the card.
+ */
+ struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
+ unsigned int texAge[MGA_NR_TEX_HEAPS];
+
+ /* Mechanism to validate card state.
+ */
+ int ctxOwner;
+} drm_mga_sarea_t;
+
+/* MGA specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_MGA_INIT 0x00
+#define DRM_MGA_FLUSH 0x01
+#define DRM_MGA_RESET 0x02
+#define DRM_MGA_SWAP 0x03
+#define DRM_MGA_CLEAR 0x04
+#define DRM_MGA_VERTEX 0x05
+#define DRM_MGA_INDICES 0x06
+#define DRM_MGA_ILOAD 0x07
+#define DRM_MGA_BLIT 0x08
+#define DRM_MGA_GETPARAM 0x09
+
+/* 3.2:
+ * ioctls for operating on fences.
+ */
+#define DRM_MGA_SET_FENCE 0x0a
+#define DRM_MGA_WAIT_FENCE 0x0b
+#define DRM_MGA_DMA_BOOTSTRAP 0x0c
+
+#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
+#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock)
+#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
+#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
+#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
+#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
+#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
+#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
+#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
+#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
+#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, __u32)
+#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, __u32)
+#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
+
+typedef struct _drm_mga_warp_index {
+ int installed;
+ unsigned long phys_addr;
+ int size;
+} drm_mga_warp_index_t;
+
+typedef struct drm_mga_init {
+ enum {
+ MGA_INIT_DMA = 0x01,
+ MGA_CLEANUP_DMA = 0x02
+ } func;
+
+ unsigned long sarea_priv_offset;
+
+ int chipset;
+ int sgram;
+
+ unsigned int maccess;
+
+ unsigned int fb_cpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+
+ unsigned int depth_cpp;
+ unsigned int depth_offset, depth_pitch;
+
+ unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+ unsigned int texture_size[MGA_NR_TEX_HEAPS];
+
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long status_offset;
+ unsigned long warp_offset;
+ unsigned long primary_offset;
+ unsigned long buffers_offset;
+} drm_mga_init_t;
+
+typedef struct drm_mga_dma_bootstrap {
+ /**
+ * \name AGP texture region
+ *
+ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
+ * be filled in with the actual AGP texture settings.
+ *
+ * \warning
+ * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
+ * is zero, it means that PCI memory (most likely through the use of
+ * an IOMMU) is being used for "AGP" textures.
+ */
+ /*@{ */
+ unsigned long texture_handle; /**< Handle used to map AGP textures. */
+ __u32 texture_size; /**< Size of the AGP texture region. */
+ /*@} */
+
+ /**
+ * Requested size of the primary DMA region.
+ *
+ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
+ * filled in with the actual AGP mode. If AGP was not available
+ */
+ __u32 primary_size;
+
+ /**
+ * Requested number of secondary DMA buffers.
+ *
+ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
+ * filled in with the actual number of secondary DMA buffers
+ * allocated. Particularly when PCI DMA is used, this may be
+ * (subtantially) less than the number requested.
+ */
+ __u32 secondary_bin_count;
+
+ /**
+ * Requested size of each secondary DMA buffer.
+ *
+ * While the kernel \b is free to reduce
+ * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
+ * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
+ */
+ __u32 secondary_bin_size;
+
+ /**
+ * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
+ * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
+ * zero, it means that PCI DMA should be used, even if AGP is
+ * possible.
+ *
+ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
+ * filled in with the actual AGP mode. If AGP was not available
+ * (i.e., PCI DMA was used), this value will be zero.
+ */
+ __u32 agp_mode;
+
+ /**
+ * Desired AGP GART size, measured in megabytes.
+ */
+ __u8 agp_size;
+} drm_mga_dma_bootstrap_t;
+
+typedef struct drm_mga_clear {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask;
+} drm_mga_clear_t;
+
+typedef struct drm_mga_vertex {
+ int idx; /* buffer to queue */
+ int used; /* bytes in use */
+ int discard; /* client finished with buffer? */
+} drm_mga_vertex_t;
+
+typedef struct drm_mga_indices {
+ int idx; /* buffer to queue */
+ unsigned int start;
+ unsigned int end;
+ int discard; /* client finished with buffer? */
+} drm_mga_indices_t;
+
+typedef struct drm_mga_iload {
+ int idx;
+ unsigned int dstorg;
+ unsigned int length;
+} drm_mga_iload_t;
+
+typedef struct _drm_mga_blit {
+ unsigned int planemask;
+ unsigned int srcorg;
+ unsigned int dstorg;
+ int src_pitch, dst_pitch;
+ int delta_sx, delta_sy;
+ int delta_dx, delta_dy;
+ int height, ydir; /* flip image vertically */
+ int source_pitch, dest_pitch;
+} drm_mga_blit_t;
+
+/* 3.1: An ioctl to get parameters that aren't available to the 3d
+ * client any other way.
+ */
+#define MGA_PARAM_IRQ_NR 1
+
+/* 3.2: Query the actual card type. The DDX only distinguishes between
+ * G200 chips and non-G200 chips, which it calls G400. It turns out that
+ * there are some very sublte differences between the G4x0 chips and the G550
+ * chips. Using this parameter query, a client-side driver can detect the
+ * difference between a G4x0 and a G550.
+ */
+#define MGA_PARAM_CARD_TYPE 2
+
+typedef struct drm_mga_getparam {
+ int param;
+ void *value;
+} drm_mga_getparam_t;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/libdrm/msm_drm.h b/include/libdrm/libdrm/msm_drm.h
new file mode 100644
index 0000000..c06d0a5
--- /dev/null
+++ b/include/libdrm/libdrm/msm_drm.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MSM_DRM_H__
+#define __MSM_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints:
+ * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
+ * user/kernel compatibility
+ * 2) Keep fields aligned to their size
+ * 3) Because of how drm_ioctl() works, we can add new fields at
+ * the end of an ioctl if some care is taken: drm_ioctl() will
+ * zero out the new fields at the tail of the ioctl, so a zero
+ * value should have a backwards compatible meaning. And for
+ * output params, userspace won't see the newly added output
+ * fields.. so that has to be somehow ok.
+ */
+
+#define MSM_PIPE_NONE 0x00
+#define MSM_PIPE_2D0 0x01
+#define MSM_PIPE_2D1 0x02
+#define MSM_PIPE_3D0 0x10
+
+/* The pipe-id just uses the lower bits, so can be OR'd with flags in
+ * the upper 16 bits (which could be extended further, if needed, maybe
+ * we extend/overload the pipe-id some day to deal with multiple rings,
+ * but even then I don't think we need the full lower 16 bits).
+ */
+#define MSM_PIPE_ID_MASK 0xffff
+#define MSM_PIPE_ID(x) ((x) & MSM_PIPE_ID_MASK)
+#define MSM_PIPE_FLAGS(x) ((x) & ~MSM_PIPE_ID_MASK)
+
+/* timeouts are specified in clock-monotonic absolute times (to simplify
+ * restarting interrupted ioctls). The following struct is logically the
+ * same as 'struct timespec' but 32/64b ABI safe.
+ */
+struct drm_msm_timespec {
+ __s64 tv_sec; /* seconds */
+ __s64 tv_nsec; /* nanoseconds */
+};
+
+#define MSM_PARAM_GPU_ID 0x01
+#define MSM_PARAM_GMEM_SIZE 0x02
+#define MSM_PARAM_CHIP_ID 0x03
+#define MSM_PARAM_MAX_FREQ 0x04
+#define MSM_PARAM_TIMESTAMP 0x05
+#define MSM_PARAM_GMEM_BASE 0x06
+#define MSM_PARAM_NR_RINGS 0x07
+
+struct drm_msm_param {
+ __u32 pipe; /* in, MSM_PIPE_x */
+ __u32 param; /* in, MSM_PARAM_x */
+ __u64 value; /* out (get_param) or in (set_param) */
+};
+
+/*
+ * GEM buffers:
+ */
+
+#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
+#define MSM_BO_GPU_READONLY 0x00000002
+#define MSM_BO_CACHE_MASK 0x000f0000
+/* cache modes */
+#define MSM_BO_CACHED 0x00010000
+#define MSM_BO_WC 0x00020000
+#define MSM_BO_UNCACHED 0x00040000
+
+#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
+ MSM_BO_GPU_READONLY | \
+ MSM_BO_CACHED | \
+ MSM_BO_WC | \
+ MSM_BO_UNCACHED)
+
+struct drm_msm_gem_new {
+ __u64 size; /* in */
+ __u32 flags; /* in, mask of MSM_BO_x */
+ __u32 handle; /* out */
+};
+
+#define MSM_INFO_IOVA 0x01
+
+#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
+
+struct drm_msm_gem_info {
+ __u32 handle; /* in */
+ __u32 flags; /* in - combination of MSM_INFO_* flags */
+ __u64 offset; /* out, mmap() offset or iova */
+};
+
+#define MSM_PREP_READ 0x01
+#define MSM_PREP_WRITE 0x02
+#define MSM_PREP_NOSYNC 0x04
+
+#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
+
+struct drm_msm_gem_cpu_prep {
+ __u32 handle; /* in */
+ __u32 op; /* in, mask of MSM_PREP_x */
+ struct drm_msm_timespec timeout; /* in */
+};
+
+struct drm_msm_gem_cpu_fini {
+ __u32 handle; /* in */
+};
+
+/*
+ * Cmdstream Submission:
+ */
+
+/* The value written into the cmdstream is logically:
+ *
+ * ((relocbuf->gpuaddr + reloc_offset) << shift) | or
+ *
+ * When we have GPU's w/ >32bit ptrs, it should be possible to deal
+ * with this by emit'ing two reloc entries with appropriate shift
+ * values. Or a new MSM_SUBMIT_CMD_x type would also be an option.
+ *
+ * NOTE that reloc's must be sorted by order of increasing submit_offset,
+ * otherwise EINVAL.
+ */
+struct drm_msm_gem_submit_reloc {
+ __u32 submit_offset; /* in, offset from submit_bo */
+ __u32 or; /* in, value OR'd with result */
+ __s32 shift; /* in, amount of left shift (can be negative) */
+ __u32 reloc_idx; /* in, index of reloc_bo buffer */
+ __u64 reloc_offset; /* in, offset from start of reloc_bo */
+};
+
+/* submit-types:
+ * BUF - this cmd buffer is executed normally.
+ * IB_TARGET_BUF - this cmd buffer is an IB target. Reloc's are
+ * processed normally, but the kernel does not setup an IB to
+ * this buffer in the first-level ringbuffer
+ * CTX_RESTORE_BUF - only executed if there has been a GPU context
+ * switch since the last SUBMIT ioctl
+ */
+#define MSM_SUBMIT_CMD_BUF 0x0001
+#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
+#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
+struct drm_msm_gem_submit_cmd {
+ __u32 type; /* in, one of MSM_SUBMIT_CMD_x */
+ __u32 submit_idx; /* in, index of submit_bo cmdstream buffer */
+ __u32 submit_offset; /* in, offset into submit_bo */
+ __u32 size; /* in, cmdstream size */
+ __u32 pad;
+ __u32 nr_relocs; /* in, number of submit_reloc's */
+ __u64 relocs; /* in, ptr to array of submit_reloc's */
+};
+
+/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
+ * cmdstream buffer(s) themselves or reloc entries) has one (and only
+ * one) entry in the submit->bos[] table.
+ *
+ * As a optimization, the current buffer (gpu virtual address) can be
+ * passed back through the 'presumed' field. If on a subsequent reloc,
+ * userspace passes back a 'presumed' address that is still valid,
+ * then patching the cmdstream for this entry is skipped. This can
+ * avoid kernel needing to map/access the cmdstream bo in the common
+ * case.
+ */
+#define MSM_SUBMIT_BO_READ 0x0001
+#define MSM_SUBMIT_BO_WRITE 0x0002
+
+#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+
+struct drm_msm_gem_submit_bo {
+ __u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
+ __u32 handle; /* in, GEM handle */
+ __u64 presumed; /* in/out, presumed buffer address */
+};
+
+/* Valid submit ioctl flags: */
+#define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */
+#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
+#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
+#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
+#define MSM_SUBMIT_FLAGS ( \
+ MSM_SUBMIT_NO_IMPLICIT | \
+ MSM_SUBMIT_FENCE_FD_IN | \
+ MSM_SUBMIT_FENCE_FD_OUT | \
+ MSM_SUBMIT_SUDO | \
+ 0)
+
+/* Each cmdstream submit consists of a table of buffers involved, and
+ * one or more cmdstream buffers. This allows for conditional execution
+ * (context-restore), and IB buffers needed for per tile/bin draw cmds.
+ */
+struct drm_msm_gem_submit {
+ __u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */
+ __u32 fence; /* out */
+ __u32 nr_bos; /* in, number of submit_bo's */
+ __u32 nr_cmds; /* in, number of submit_cmd's */
+ __u64 bos; /* in, ptr to array of submit_bo's */
+ __u64 cmds; /* in, ptr to array of submit_cmd's */
+ __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
+ __u32 queueid; /* in, submitqueue id */
+};
+
+/* The normal way to synchronize with the GPU is just to CPU_PREP on
+ * a buffer if you need to access it from the CPU (other cmdstream
+ * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
+ * handle the required synchronization under the hood). This ioctl
+ * mainly just exists as a way to implement the gallium pipe_fence
+ * APIs without requiring a dummy bo to synchronize on.
+ */
+struct drm_msm_wait_fence {
+ __u32 fence; /* in */
+ __u32 pad;
+ struct drm_msm_timespec timeout; /* in */
+ __u32 queueid; /* in, submitqueue id */
+};
+
+/* madvise provides a way to tell the kernel in case a buffers contents
+ * can be discarded under memory pressure, which is useful for userspace
+ * bo cache where we want to optimistically hold on to buffer allocate
+ * and potential mmap, but allow the pages to be discarded under memory
+ * pressure.
+ *
+ * Typical usage would involve madvise(DONTNEED) when buffer enters BO
+ * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
+ * In the WILLNEED case, 'retained' indicates to userspace whether the
+ * backing pages still exist.
+ */
+#define MSM_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
+#define MSM_MADV_DONTNEED 1 /* backing pages not needed */
+#define __MSM_MADV_PURGED 2 /* internal state */
+
+struct drm_msm_gem_madvise {
+ __u32 handle; /* in, GEM handle */
+ __u32 madv; /* in, MSM_MADV_x */
+ __u32 retained; /* out, whether backing store still exists */
+};
+
+/*
+ * Draw queues allow the user to set specific submission parameter. Command
+ * submissions specify a specific submitqueue to use. ID 0 is reserved for
+ * backwards compatibility as a "default" submitqueue
+ */
+
+#define MSM_SUBMITQUEUE_FLAGS (0)
+
+struct drm_msm_submitqueue {
+ __u32 flags; /* in, MSM_SUBMITQUEUE_x */
+ __u32 prio; /* in, Priority level */
+ __u32 id; /* out, identifier */
+};
+
+#define DRM_MSM_GET_PARAM 0x00
+/* placeholder:
+#define DRM_MSM_SET_PARAM 0x01
+ */
+#define DRM_MSM_GEM_NEW 0x02
+#define DRM_MSM_GEM_INFO 0x03
+#define DRM_MSM_GEM_CPU_PREP 0x04
+#define DRM_MSM_GEM_CPU_FINI 0x05
+#define DRM_MSM_GEM_SUBMIT 0x06
+#define DRM_MSM_WAIT_FENCE 0x07
+#define DRM_MSM_GEM_MADVISE 0x08
+/* placeholder:
+#define DRM_MSM_GEM_SVM_NEW 0x09
+ */
+#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
+#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
+
+#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
+#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
+#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info)
+#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep)
+#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
+#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
+#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
+#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __MSM_DRM_H__ */
diff --git a/include/libdrm/libdrm/nouveau/nouveau.h b/include/libdrm/libdrm/nouveau/nouveau.h
new file mode 100644
index 0000000..0c632fe
--- /dev/null
+++ b/include/libdrm/libdrm/nouveau/nouveau.h
@@ -0,0 +1,280 @@
+#ifndef __NOUVEAU_H__
+#define __NOUVEAU_H__
+
+#include <stdint.h>
+#include <stdbool.h>
+
+/* Supported class information, provided by the kernel */
+struct nouveau_sclass {
+ int32_t oclass;
+ int minver;
+ int maxver;
+};
+
+/* Client-provided array describing class versions that are desired.
+ *
+ * These are used to match against the kernel's list of supported classes.
+ */
+struct nouveau_mclass {
+ int32_t oclass;
+ int version;
+ void *data;
+};
+
+struct nouveau_object {
+ struct nouveau_object *parent;
+ uint64_t handle;
+ uint32_t oclass;
+ uint32_t length; /* deprecated */
+ void *data; /* deprecated */
+};
+
+int nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
+ uint32_t oclass, void *data, uint32_t length,
+ struct nouveau_object **);
+void nouveau_object_del(struct nouveau_object **);
+int nouveau_object_mthd(struct nouveau_object *, uint32_t mthd,
+ void *data, uint32_t size);
+int nouveau_object_sclass_get(struct nouveau_object *,
+ struct nouveau_sclass **);
+void nouveau_object_sclass_put(struct nouveau_sclass **);
+int nouveau_object_mclass(struct nouveau_object *,
+ const struct nouveau_mclass *);
+
+struct nouveau_drm {
+ struct nouveau_object client;
+ int fd;
+ uint32_t version;
+ bool nvif;
+};
+
+static inline struct nouveau_drm *
+nouveau_drm(struct nouveau_object *obj)
+{
+ while (obj && obj->parent)
+ obj = obj->parent;
+ return (struct nouveau_drm *)obj;
+}
+
+int nouveau_drm_new(int fd, struct nouveau_drm **);
+void nouveau_drm_del(struct nouveau_drm **);
+
+struct nouveau_device {
+ struct nouveau_object object;
+ int fd; /* deprecated */
+ uint32_t lib_version; /* deprecated */
+ uint32_t drm_version; /* deprecated */
+ uint32_t chipset;
+ uint64_t vram_size;
+ uint64_t gart_size;
+ uint64_t vram_limit;
+ uint64_t gart_limit;
+};
+
+int nouveau_device_new(struct nouveau_object *parent, int32_t oclass,
+ void *data, uint32_t size, struct nouveau_device **);
+void nouveau_device_del(struct nouveau_device **);
+
+int nouveau_getparam(struct nouveau_device *, uint64_t param, uint64_t *value);
+int nouveau_setparam(struct nouveau_device *, uint64_t param, uint64_t value);
+
+/* deprecated */
+int nouveau_device_wrap(int fd, int close, struct nouveau_device **);
+int nouveau_device_open(const char *busid, struct nouveau_device **);
+
+struct nouveau_client {
+ struct nouveau_device *device;
+ int id;
+};
+
+int nouveau_client_new(struct nouveau_device *, struct nouveau_client **);
+void nouveau_client_del(struct nouveau_client **);
+
+union nouveau_bo_config {
+ struct {
+#define NV04_BO_16BPP 0x00000001
+#define NV04_BO_32BPP 0x00000002
+#define NV04_BO_ZETA 0x00000004
+ uint32_t surf_flags;
+ uint32_t surf_pitch;
+ } nv04;
+ struct {
+ uint32_t memtype;
+ uint32_t tile_mode;
+ } nv50;
+ struct {
+ uint32_t memtype;
+ uint32_t tile_mode;
+ } nvc0;
+ uint32_t data[8];
+};
+
+#define NOUVEAU_BO_VRAM 0x00000001
+#define NOUVEAU_BO_GART 0x00000002
+#define NOUVEAU_BO_APER (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)
+#define NOUVEAU_BO_RD 0x00000100
+#define NOUVEAU_BO_WR 0x00000200
+#define NOUVEAU_BO_RDWR (NOUVEAU_BO_RD | NOUVEAU_BO_WR)
+#define NOUVEAU_BO_NOBLOCK 0x00000400
+#define NOUVEAU_BO_LOW 0x00001000
+#define NOUVEAU_BO_HIGH 0x00002000
+#define NOUVEAU_BO_OR 0x00004000
+#define NOUVEAU_BO_MAP 0x80000000
+#define NOUVEAU_BO_CONTIG 0x40000000
+#define NOUVEAU_BO_NOSNOOP 0x20000000
+#define NOUVEAU_BO_COHERENT 0x10000000
+
+struct nouveau_bo {
+ struct nouveau_device *device;
+ uint32_t handle;
+ uint64_t size;
+ uint32_t flags;
+ uint64_t offset;
+ void *map;
+ union nouveau_bo_config config;
+};
+
+int nouveau_bo_new(struct nouveau_device *, uint32_t flags, uint32_t align,
+ uint64_t size, union nouveau_bo_config *,
+ struct nouveau_bo **);
+int nouveau_bo_wrap(struct nouveau_device *, uint32_t handle,
+ struct nouveau_bo **);
+int nouveau_bo_name_ref(struct nouveau_device *v, uint32_t name,
+ struct nouveau_bo **);
+int nouveau_bo_name_get(struct nouveau_bo *, uint32_t *name);
+void nouveau_bo_ref(struct nouveau_bo *, struct nouveau_bo **);
+int nouveau_bo_map(struct nouveau_bo *, uint32_t access,
+ struct nouveau_client *);
+int nouveau_bo_wait(struct nouveau_bo *, uint32_t access,
+ struct nouveau_client *);
+int nouveau_bo_prime_handle_ref(struct nouveau_device *, int prime_fd,
+ struct nouveau_bo **);
+int nouveau_bo_set_prime(struct nouveau_bo *, int *prime_fd);
+
+struct nouveau_list {
+ struct nouveau_list *prev;
+ struct nouveau_list *next;
+};
+
+struct nouveau_bufref {
+ struct nouveau_list thead;
+ struct nouveau_bo *bo;
+ uint32_t packet;
+ uint32_t flags;
+ uint32_t data;
+ uint32_t vor;
+ uint32_t tor;
+ uint32_t priv_data;
+ void *priv;
+};
+
+struct nouveau_bufctx {
+ struct nouveau_client *client;
+ struct nouveau_list head;
+ struct nouveau_list pending;
+ struct nouveau_list current;
+ int relocs;
+};
+
+int nouveau_bufctx_new(struct nouveau_client *, int bins,
+ struct nouveau_bufctx **);
+void nouveau_bufctx_del(struct nouveau_bufctx **);
+struct nouveau_bufref *
+nouveau_bufctx_refn(struct nouveau_bufctx *, int bin,
+ struct nouveau_bo *, uint32_t flags);
+struct nouveau_bufref *
+nouveau_bufctx_mthd(struct nouveau_bufctx *, int bin, uint32_t packet,
+ struct nouveau_bo *, uint64_t data, uint32_t flags,
+ uint32_t vor, uint32_t tor);
+void nouveau_bufctx_reset(struct nouveau_bufctx *, int bin);
+
+struct nouveau_pushbuf_krec;
+struct nouveau_pushbuf {
+ struct nouveau_client *client;
+ struct nouveau_object *channel;
+ struct nouveau_bufctx *bufctx;
+ void (*kick_notify)(struct nouveau_pushbuf *);
+ void *user_priv;
+ uint32_t rsvd_kick;
+ uint32_t flags;
+ uint32_t *cur;
+ uint32_t *end;
+};
+
+struct nouveau_pushbuf_refn {
+ struct nouveau_bo *bo;
+ uint32_t flags;
+};
+
+int nouveau_pushbuf_new(struct nouveau_client *, struct nouveau_object *chan,
+ int nr, uint32_t size, bool immediate,
+ struct nouveau_pushbuf **);
+void nouveau_pushbuf_del(struct nouveau_pushbuf **);
+int nouveau_pushbuf_space(struct nouveau_pushbuf *, uint32_t dwords,
+ uint32_t relocs, uint32_t pushes);
+void nouveau_pushbuf_data(struct nouveau_pushbuf *, struct nouveau_bo *,
+ uint64_t offset, uint64_t length);
+int nouveau_pushbuf_refn(struct nouveau_pushbuf *,
+ struct nouveau_pushbuf_refn *, int nr);
+/* Emits a reloc into the push buffer at the current position, you *must*
+ * have previously added the referenced buffer to a buffer context, and
+ * validated it against the current push buffer.
+ */
+void nouveau_pushbuf_reloc(struct nouveau_pushbuf *, struct nouveau_bo *,
+ uint32_t data, uint32_t flags,
+ uint32_t vor, uint32_t tor);
+int nouveau_pushbuf_validate(struct nouveau_pushbuf *);
+uint32_t nouveau_pushbuf_refd(struct nouveau_pushbuf *, struct nouveau_bo *);
+int nouveau_pushbuf_kick(struct nouveau_pushbuf *, struct nouveau_object *chan);
+struct nouveau_bufctx *
+nouveau_pushbuf_bufctx(struct nouveau_pushbuf *, struct nouveau_bufctx *);
+
+#define NOUVEAU_DEVICE_CLASS 0x80000000
+#define NOUVEAU_FIFO_CHANNEL_CLASS 0x80000001
+#define NOUVEAU_NOTIFIER_CLASS 0x80000002
+
+struct nouveau_fifo {
+ struct nouveau_object *object;
+ uint32_t channel;
+ uint32_t pushbuf;
+ uint64_t unused1[3];
+};
+
+struct nv04_fifo {
+ struct nouveau_fifo base;
+ uint32_t vram;
+ uint32_t gart;
+ uint32_t notify;
+};
+
+struct nvc0_fifo {
+ struct nouveau_fifo base;
+ uint32_t notify;
+};
+
+#define NVE0_FIFO_ENGINE_GR 0x00000001
+#define NVE0_FIFO_ENGINE_VP 0x00000002
+#define NVE0_FIFO_ENGINE_PPP 0x00000004
+#define NVE0_FIFO_ENGINE_BSP 0x00000008
+#define NVE0_FIFO_ENGINE_CE0 0x00000010
+#define NVE0_FIFO_ENGINE_CE1 0x00000020
+#define NVE0_FIFO_ENGINE_ENC 0x00000040
+
+struct nve0_fifo {
+ struct {
+ struct nouveau_fifo base;
+ uint32_t notify;
+ };
+ uint32_t engine;
+};
+
+struct nv04_notify {
+ struct nouveau_object *object;
+ uint32_t offset;
+ uint32_t length;
+};
+
+bool
+nouveau_check_dead_channel(struct nouveau_drm *, struct nouveau_object *chan);
+
+#endif
diff --git a/include/libdrm/libdrm/nouveau/nvif/cl0080.h b/include/libdrm/libdrm/nouveau/nvif/cl0080.h
new file mode 100644
index 0000000..331620a
--- /dev/null
+++ b/include/libdrm/libdrm/nouveau/nvif/cl0080.h
@@ -0,0 +1,45 @@
+#ifndef __NVIF_CL0080_H__
+#define __NVIF_CL0080_H__
+
+struct nv_device_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 device; /* device identifier, ~0 for client default */
+};
+
+#define NV_DEVICE_V0_INFO 0x00
+#define NV_DEVICE_V0_TIME 0x01
+
+struct nv_device_info_v0 {
+ __u8 version;
+#define NV_DEVICE_INFO_V0_IGP 0x00
+#define NV_DEVICE_INFO_V0_PCI 0x01
+#define NV_DEVICE_INFO_V0_AGP 0x02
+#define NV_DEVICE_INFO_V0_PCIE 0x03
+#define NV_DEVICE_INFO_V0_SOC 0x04
+ __u8 platform;
+ __u16 chipset; /* from NV_PMC_BOOT_0 */
+ __u8 revision; /* from NV_PMC_BOOT_0 */
+#define NV_DEVICE_INFO_V0_TNT 0x01
+#define NV_DEVICE_INFO_V0_CELSIUS 0x02
+#define NV_DEVICE_INFO_V0_KELVIN 0x03
+#define NV_DEVICE_INFO_V0_RANKINE 0x04
+#define NV_DEVICE_INFO_V0_CURIE 0x05
+#define NV_DEVICE_INFO_V0_TESLA 0x06
+#define NV_DEVICE_INFO_V0_FERMI 0x07
+#define NV_DEVICE_INFO_V0_KEPLER 0x08
+#define NV_DEVICE_INFO_V0_MAXWELL 0x09
+ __u8 family;
+ __u8 pad06[2];
+ __u64 ram_size;
+ __u64 ram_user;
+ char chip[16];
+ char name[64];
+};
+
+struct nv_device_time_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 time;
+};
+#endif
diff --git a/include/libdrm/libdrm/nouveau/nvif/cl9097.h b/include/libdrm/libdrm/nouveau/nvif/cl9097.h
new file mode 100644
index 0000000..4057676
--- /dev/null
+++ b/include/libdrm/libdrm/nouveau/nvif/cl9097.h
@@ -0,0 +1,44 @@
+#ifndef __NVIF_CL9097_H__
+#define __NVIF_CL9097_H__
+
+#define FERMI_A_ZBC_COLOR 0x00
+#define FERMI_A_ZBC_DEPTH 0x01
+
+struct fermi_a_zbc_color_v0 {
+ __u8 version;
+#define FERMI_A_ZBC_COLOR_V0_FMT_ZERO 0x01
+#define FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE 0x02
+#define FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32 0x04
+#define FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16 0x08
+#define FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16 0x0c
+#define FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16 0x10
+#define FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16 0x14
+#define FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16 0x16
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8 0x18
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8 0x1c
+#define FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10 0x20
+#define FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10 0x24
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8 0x28
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8 0x2c
+#define FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8 0x30
+#define FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8 0x34
+#define FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8 0x38
+#define FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10 0x3c
+#define FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11 0x40
+ __u8 format;
+ __u8 index;
+ __u8 pad03[5];
+ __u32 ds[4];
+ __u32 l2[4];
+};
+
+struct fermi_a_zbc_depth_v0 {
+ __u8 version;
+#define FERMI_A_ZBC_DEPTH_V0_FMT_FP32 0x01
+ __u8 format;
+ __u8 index;
+ __u8 pad03[5];
+ __u32 ds;
+ __u32 l2;
+};
+#endif
diff --git a/include/libdrm/libdrm/nouveau/nvif/class.h b/include/libdrm/libdrm/nouveau/nvif/class.h
new file mode 100644
index 0000000..4179cd6
--- /dev/null
+++ b/include/libdrm/libdrm/nouveau/nvif/class.h
@@ -0,0 +1,141 @@
+#ifndef __NVIF_CLASS_H__
+#define __NVIF_CLASS_H__
+
+/* these class numbers are made up by us, and not nvidia-assigned */
+#define NVIF_CLASS_CONTROL /* if0001.h */ -1
+#define NVIF_CLASS_PERFMON /* if0002.h */ -2
+#define NVIF_CLASS_PERFDOM /* if0003.h */ -3
+#define NVIF_CLASS_SW_NV04 /* if0004.h */ -4
+#define NVIF_CLASS_SW_NV10 /* if0005.h */ -5
+#define NVIF_CLASS_SW_NV50 /* if0005.h */ -6
+#define NVIF_CLASS_SW_GF100 /* if0005.h */ -7
+
+/* the below match nvidia-assigned (either in hw, or sw) class numbers */
+#define NV_DEVICE /* cl0080.h */ 0x00000080
+
+#define NV_DMA_FROM_MEMORY /* cl0002.h */ 0x00000002
+#define NV_DMA_TO_MEMORY /* cl0002.h */ 0x00000003
+#define NV_DMA_IN_MEMORY /* cl0002.h */ 0x0000003d
+
+#define FERMI_TWOD_A 0x0000902d
+
+#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039
+
+#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
+#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140
+
+#define NV04_DISP /* cl0046.h */ 0x00000046
+
+#define NV03_CHANNEL_DMA /* cl506b.h */ 0x0000006b
+#define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e
+#define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e
+#define NV40_CHANNEL_DMA /* cl506b.h */ 0x0000406e
+#define NV50_CHANNEL_DMA /* cl506e.h */ 0x0000506e
+#define G82_CHANNEL_DMA /* cl826e.h */ 0x0000826e
+
+#define NV50_CHANNEL_GPFIFO /* cl506f.h */ 0x0000506f
+#define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f
+#define FERMI_CHANNEL_GPFIFO /* cl906f.h */ 0x0000906f
+#define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f
+#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
+
+#define NV50_DISP /* cl5070.h */ 0x00005070
+#define G82_DISP /* cl5070.h */ 0x00008270
+#define GT200_DISP /* cl5070.h */ 0x00008370
+#define GT214_DISP /* cl5070.h */ 0x00008570
+#define GT206_DISP /* cl5070.h */ 0x00008870
+#define GF110_DISP /* cl5070.h */ 0x00009070
+#define GK104_DISP /* cl5070.h */ 0x00009170
+#define GK110_DISP /* cl5070.h */ 0x00009270
+#define GM107_DISP /* cl5070.h */ 0x00009470
+#define GM204_DISP /* cl5070.h */ 0x00009570
+
+#define NV31_MPEG 0x00003174
+#define G82_MPEG 0x00008274
+
+#define NV74_VP2 0x00007476
+
+#define NV50_DISP_CURSOR /* cl507a.h */ 0x0000507a
+#define G82_DISP_CURSOR /* cl507a.h */ 0x0000827a
+#define GT214_DISP_CURSOR /* cl507a.h */ 0x0000857a
+#define GF110_DISP_CURSOR /* cl507a.h */ 0x0000907a
+#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
+
+#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
+#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
+#define GT214_DISP_OVERLAY /* cl507b.h */ 0x0000857b
+#define GF110_DISP_OVERLAY /* cl507b.h */ 0x0000907b
+#define GK104_DISP_OVERLAY /* cl507b.h */ 0x0000917b
+
+#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
+#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
+#define GT200_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000837c
+#define GT214_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000857c
+#define GF110_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000907c
+#define GK104_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000917c
+#define GK110_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000927c
+
+#define NV50_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000507d
+#define G82_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000827d
+#define GT200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000837d
+#define GT214_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000857d
+#define GT206_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000887d
+#define GF110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000907d
+#define GK104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000917d
+#define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d
+#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
+#define GM204_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
+
+#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
+#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
+#define GT200_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000837e
+#define GT214_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000857e
+#define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e
+#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e
+
+#define FERMI_A /* cl9097.h */ 0x00009097
+#define FERMI_B /* cl9097.h */ 0x00009197
+#define FERMI_C /* cl9097.h */ 0x00009297
+
+#define KEPLER_A /* cl9097.h */ 0x0000a097
+#define KEPLER_B /* cl9097.h */ 0x0000a197
+#define KEPLER_C /* cl9097.h */ 0x0000a297
+
+#define MAXWELL_A /* cl9097.h */ 0x0000b097
+#define MAXWELL_B /* cl9097.h */ 0x0000b197
+
+#define NV74_BSP 0x000074b0
+
+#define GT212_MSVLD 0x000085b1
+#define IGT21A_MSVLD 0x000086b1
+#define G98_MSVLD 0x000088b1
+#define GF100_MSVLD 0x000090b1
+#define GK104_MSVLD 0x000095b1
+
+#define GT212_MSPDEC 0x000085b2
+#define G98_MSPDEC 0x000088b2
+#define GF100_MSPDEC 0x000090b2
+#define GK104_MSPDEC 0x000095b2
+
+#define GT212_MSPPP 0x000085b3
+#define G98_MSPPP 0x000088b3
+#define GF100_MSPPP 0x000090b3
+
+#define G98_SEC 0x000088b4
+
+#define GT212_DMA 0x000085b5
+#define FERMI_DMA 0x000090b5
+#define KEPLER_DMA_COPY_A 0x0000a0b5
+#define MAXWELL_DMA_COPY_A 0x0000b0b5
+
+#define FERMI_DECOMPRESS 0x000090b8
+
+#define FERMI_COMPUTE_A 0x000090c0
+#define FERMI_COMPUTE_B 0x000091c0
+#define KEPLER_COMPUTE_A 0x0000a0c0
+#define KEPLER_COMPUTE_B 0x0000a1c0
+#define MAXWELL_COMPUTE_A 0x0000b0c0
+#define MAXWELL_COMPUTE_B 0x0000b1c0
+
+#define NV74_CIPHER 0x000074c1
+#endif
diff --git a/include/libdrm/libdrm/nouveau/nvif/if0002.h b/include/libdrm/libdrm/nouveau/nvif/if0002.h
new file mode 100644
index 0000000..c04c91d
--- /dev/null
+++ b/include/libdrm/libdrm/nouveau/nvif/if0002.h
@@ -0,0 +1,38 @@
+#ifndef __NVIF_IF0002_H__
+#define __NVIF_IF0002_H__
+
+#define NVIF_PERFMON_V0_QUERY_DOMAIN 0x00
+#define NVIF_PERFMON_V0_QUERY_SIGNAL 0x01
+#define NVIF_PERFMON_V0_QUERY_SOURCE 0x02
+
+struct nvif_perfmon_query_domain_v0 {
+ __u8 version;
+ __u8 id;
+ __u8 counter_nr;
+ __u8 iter;
+ __u16 signal_nr;
+ __u8 pad05[2];
+ char name[64];
+};
+
+struct nvif_perfmon_query_signal_v0 {
+ __u8 version;
+ __u8 domain;
+ __u16 iter;
+ __u8 signal;
+ __u8 source_nr;
+ __u8 pad05[2];
+ char name[64];
+};
+
+struct nvif_perfmon_query_source_v0 {
+ __u8 version;
+ __u8 domain;
+ __u8 signal;
+ __u8 iter;
+ __u8 pad04[4];
+ __u32 source;
+ __u32 mask;
+ char name[64];
+};
+#endif
diff --git a/include/libdrm/libdrm/nouveau/nvif/if0003.h b/include/libdrm/libdrm/nouveau/nvif/if0003.h
new file mode 100644
index 0000000..0cd03ef
--- /dev/null
+++ b/include/libdrm/libdrm/nouveau/nvif/if0003.h
@@ -0,0 +1,33 @@
+#ifndef __NVIF_IF0003_H__
+#define __NVIF_IF0003_H__
+
+struct nvif_perfdom_v0 {
+ __u8 version;
+ __u8 domain;
+ __u8 mode;
+ __u8 pad03[1];
+ struct {
+ __u8 signal[4];
+ __u64 source[4][8];
+ __u16 logic_op;
+ } ctr[4];
+};
+
+#define NVIF_PERFDOM_V0_INIT 0x00
+#define NVIF_PERFDOM_V0_SAMPLE 0x01
+#define NVIF_PERFDOM_V0_READ 0x02
+
+struct nvif_perfdom_init {
+};
+
+struct nvif_perfdom_sample {
+};
+
+struct nvif_perfdom_read_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u32 ctr[4];
+ __u32 clk;
+ __u8 pad04[4];
+};
+#endif
diff --git a/include/libdrm/libdrm/nouveau/nvif/ioctl.h b/include/libdrm/libdrm/nouveau/nvif/ioctl.h
new file mode 100644
index 0000000..c5f5eb8
--- /dev/null
+++ b/include/libdrm/libdrm/nouveau/nvif/ioctl.h
@@ -0,0 +1,132 @@
+#ifndef __NVIF_IOCTL_H__
+#define __NVIF_IOCTL_H__
+
+#define NVIF_VERSION_LATEST 0x0000000000000000ULL
+
+struct nvif_ioctl_v0 {
+ __u8 version;
+#define NVIF_IOCTL_V0_NOP 0x00
+#define NVIF_IOCTL_V0_SCLASS 0x01
+#define NVIF_IOCTL_V0_NEW 0x02
+#define NVIF_IOCTL_V0_DEL 0x03
+#define NVIF_IOCTL_V0_MTHD 0x04
+#define NVIF_IOCTL_V0_RD 0x05
+#define NVIF_IOCTL_V0_WR 0x06
+#define NVIF_IOCTL_V0_MAP 0x07
+#define NVIF_IOCTL_V0_UNMAP 0x08
+#define NVIF_IOCTL_V0_NTFY_NEW 0x09
+#define NVIF_IOCTL_V0_NTFY_DEL 0x0a
+#define NVIF_IOCTL_V0_NTFY_GET 0x0b
+#define NVIF_IOCTL_V0_NTFY_PUT 0x0c
+ __u8 type;
+ __u8 pad02[4];
+#define NVIF_IOCTL_V0_OWNER_NVIF 0x00
+#define NVIF_IOCTL_V0_OWNER_ANY 0xff
+ __u8 owner;
+#define NVIF_IOCTL_V0_ROUTE_NVIF 0x00
+#define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff
+ __u8 route;
+ __u64 token;
+ __u64 object;
+ __u8 data[]; /* ioctl data (below) */
+};
+
+struct nvif_ioctl_nop_v0 {
+ __u64 version;
+};
+
+struct nvif_ioctl_sclass_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 count;
+ __u8 pad02[6];
+ struct nvif_ioctl_sclass_oclass_v0 {
+ __s32 oclass;
+ __s16 minver;
+ __s16 maxver;
+ } oclass[];
+};
+
+struct nvif_ioctl_new_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 pad01[6];
+ __u8 route;
+ __u64 token;
+ __u64 object;
+ __u32 handle;
+ __s32 oclass;
+ __u8 data[]; /* class data (class.h) */
+};
+
+struct nvif_ioctl_del {
+};
+
+struct nvif_ioctl_rd_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 size;
+ __u8 pad02[2];
+ __u32 data;
+ __u64 addr;
+};
+
+struct nvif_ioctl_wr_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 size;
+ __u8 pad02[2];
+ __u32 data;
+ __u64 addr;
+};
+
+struct nvif_ioctl_map_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 pad01[3];
+ __u32 length;
+ __u64 handle;
+};
+
+struct nvif_ioctl_unmap {
+};
+
+struct nvif_ioctl_ntfy_new_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 event;
+ __u8 index;
+ __u8 pad03[5];
+ __u8 data[]; /* event request data (event.h) */
+};
+
+struct nvif_ioctl_ntfy_del_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+};
+
+struct nvif_ioctl_ntfy_get_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+};
+
+struct nvif_ioctl_ntfy_put_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+};
+
+struct nvif_ioctl_mthd_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 method;
+ __u8 pad02[6];
+ __u8 data[]; /* method data (class.h) */
+};
+
+#endif
diff --git a/include/libdrm/libdrm/nouveau/nvif/unpack.h b/include/libdrm/libdrm/nouveau/nvif/unpack.h
new file mode 100644
index 0000000..751bcf4
--- /dev/null
+++ b/include/libdrm/libdrm/nouveau/nvif/unpack.h
@@ -0,0 +1,28 @@
+#ifndef __NVIF_UNPACK_H__
+#define __NVIF_UNPACK_H__
+
+#define nvif_unvers(r,d,s,m) ({ \
+ void **_data = (d); __u32 *_size = (s); int _ret = (r); \
+ if (_ret == -ENOSYS && *_size == sizeof(m)) { \
+ *_data = NULL; \
+ *_size = _ret = 0; \
+ } \
+ _ret; \
+})
+
+#define nvif_unpack(r,d,s,m,vl,vh,x) ({ \
+ void **_data = (d); __u32 *_size = (s); \
+ int _ret = (r), _vl = (vl), _vh = (vh); \
+ if (_ret == -ENOSYS && *_size >= sizeof(m) && \
+ (m).version >= _vl && (m).version <= _vh) { \
+ *_data = (__u8 *)*_data + sizeof(m); \
+ *_size = *_size - sizeof(m); \
+ if (_ret = 0, !(x)) { \
+ _ret = *_size ? -E2BIG : 0; \
+ *_data = NULL; \
+ *_size = 0; \
+ } \
+ } \
+ _ret; \
+})
+#endif
diff --git a/include/libdrm/libdrm/nouveau_drm.h b/include/libdrm/libdrm/nouveau_drm.h
new file mode 100644
index 0000000..4f94148
--- /dev/null
+++ b/include/libdrm/libdrm/nouveau_drm.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_DRM_H__
+#define __NOUVEAU_DRM_H__
+
+#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+struct drm_nouveau_channel_alloc {
+ uint32_t fb_ctxdma_handle;
+ uint32_t tt_ctxdma_handle;
+
+ int channel;
+ uint32_t pushbuf_domains;
+
+ /* Notifier memory */
+ uint32_t notifier_handle;
+
+ /* DRM-enforced subchannel assignments */
+ struct {
+ uint32_t handle;
+ uint32_t grclass;
+ } subchan[8];
+ uint32_t nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+ int channel;
+};
+
+struct drm_nouveau_grobj_alloc {
+ int channel;
+ uint32_t handle;
+ int class;
+};
+
+struct drm_nouveau_notifierobj_alloc {
+ uint32_t channel;
+ uint32_t handle;
+ uint32_t size;
+ uint32_t offset;
+};
+
+struct drm_nouveau_gpuobj_free {
+ int channel;
+ uint32_t handle;
+};
+
+#define NOUVEAU_GETPARAM_PCI_VENDOR 3
+#define NOUVEAU_GETPARAM_PCI_DEVICE 4
+#define NOUVEAU_GETPARAM_BUS_TYPE 5
+#define NOUVEAU_GETPARAM_FB_SIZE 8
+#define NOUVEAU_GETPARAM_AGP_SIZE 9
+#define NOUVEAU_GETPARAM_CHIPSET_ID 11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
+#define NOUVEAU_GETPARAM_PTIMER_TIME 14
+#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
+struct drm_nouveau_getparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+struct drm_nouveau_setparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
+#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
+#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
+#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
+#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
+
+#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
+#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
+#define NOUVEAU_GEM_TILE_16BPP 0x00000001
+#define NOUVEAU_GEM_TILE_32BPP 0x00000002
+#define NOUVEAU_GEM_TILE_ZETA 0x00000004
+#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
+
+struct drm_nouveau_gem_info {
+ __u32 handle;
+ __u32 domain;
+ __u64 size;
+ __u64 offset;
+ __u64 map_handle;
+ __u32 tile_mode;
+ __u32 tile_flags;
+};
+
+struct drm_nouveau_gem_new {
+ struct drm_nouveau_gem_info info;
+ __u32 channel_hint;
+ __u32 align;
+};
+
+#define NOUVEAU_GEM_MAX_BUFFERS 1024
+struct drm_nouveau_gem_pushbuf_bo_presumed {
+ __u32 valid;
+ __u32 domain;
+ __u64 offset;
+};
+
+struct drm_nouveau_gem_pushbuf_bo {
+ __u64 user_priv;
+ __u32 handle;
+ __u32 read_domains;
+ __u32 write_domains;
+ __u32 valid_domains;
+ struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
+};
+
+#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
+#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
+#define NOUVEAU_GEM_RELOC_OR (1 << 2)
+#define NOUVEAU_GEM_MAX_RELOCS 1024
+struct drm_nouveau_gem_pushbuf_reloc {
+ __u32 reloc_bo_index;
+ __u32 reloc_bo_offset;
+ __u32 bo_index;
+ __u32 flags;
+ __u32 data;
+ __u32 vor;
+ __u32 tor;
+};
+
+#define NOUVEAU_GEM_MAX_PUSH 512
+struct drm_nouveau_gem_pushbuf_push {
+ __u32 bo_index;
+ __u32 pad;
+ __u64 offset;
+ __u64 length;
+};
+
+struct drm_nouveau_gem_pushbuf {
+ __u32 channel;
+ __u32 nr_buffers;
+ __u64 buffers;
+ __u32 nr_relocs;
+ __u32 nr_push;
+ __u64 relocs;
+ __u64 push;
+ __u32 suffix0;
+ __u32 suffix1;
+#define NOUVEAU_GEM_PUSHBUF_SYNC (1ULL << 0)
+ __u64 vram_available;
+ __u64 gart_available;
+};
+
+#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
+#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
+struct drm_nouveau_gem_cpu_prep {
+ __u32 handle;
+ __u32 flags;
+};
+
+struct drm_nouveau_gem_cpu_fini {
+ __u32 handle;
+};
+
+#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
+#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
+#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
+#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
+#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
+#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
+#define DRM_NOUVEAU_NVIF 0x07
+#define DRM_NOUVEAU_SVM_INIT 0x08
+#define DRM_NOUVEAU_SVM_BIND 0x09
+#define DRM_NOUVEAU_GEM_NEW 0x40
+#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
+#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
+#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
+#define DRM_NOUVEAU_GEM_INFO 0x44
+
+struct drm_nouveau_svm_init {
+ __u64 unmanaged_addr;
+ __u64 unmanaged_size;
+};
+
+struct drm_nouveau_svm_bind {
+ __u64 header;
+ __u64 va_start;
+ __u64 va_end;
+ __u64 npages;
+ __u64 stride;
+ __u64 result;
+ __u64 reserved0;
+ __u64 reserved1;
+};
+
+#define NOUVEAU_SVM_BIND_COMMAND_SHIFT 0
+#define NOUVEAU_SVM_BIND_COMMAND_BITS 8
+#define NOUVEAU_SVM_BIND_COMMAND_MASK ((1 << 8) - 1)
+#define NOUVEAU_SVM_BIND_PRIORITY_SHIFT 8
+#define NOUVEAU_SVM_BIND_PRIORITY_BITS 8
+#define NOUVEAU_SVM_BIND_PRIORITY_MASK ((1 << 8) - 1)
+#define NOUVEAU_SVM_BIND_TARGET_SHIFT 16
+#define NOUVEAU_SVM_BIND_TARGET_BITS 32
+#define NOUVEAU_SVM_BIND_TARGET_MASK 0xffffffff
+
+/*
+ * Below is use to validate ioctl argument, userspace can also use it to make
+ * sure that no bit are set beyond known fields for a given kernel version.
+ */
+#define NOUVEAU_SVM_BIND_VALID_BITS 48
+#define NOUVEAU_SVM_BIND_VALID_MASK ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
+
+
+/*
+ * NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
+ * result: number of page successfuly migrate to the target memory.
+ */
+#define NOUVEAU_SVM_BIND_COMMAND__MIGRATE 0
+
+/*
+ * NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
+ */
+#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/libdrm/libdrm/qxl_drm.h b/include/libdrm/libdrm/qxl_drm.h
new file mode 100644
index 0000000..880999d
--- /dev/null
+++ b/include/libdrm/libdrm/qxl_drm.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2013 Red Hat
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef QXL_DRM_H
+#define QXL_DRM_H
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ *
+ * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
+ * compatibility Keep fields aligned to their size
+ */
+
+#define QXL_GEM_DOMAIN_CPU 0
+#define QXL_GEM_DOMAIN_VRAM 1
+#define QXL_GEM_DOMAIN_SURFACE 2
+
+#define DRM_QXL_ALLOC 0x00
+#define DRM_QXL_MAP 0x01
+#define DRM_QXL_EXECBUFFER 0x02
+#define DRM_QXL_UPDATE_AREA 0x03
+#define DRM_QXL_GETPARAM 0x04
+#define DRM_QXL_CLIENTCAP 0x05
+
+#define DRM_QXL_ALLOC_SURF 0x06
+
+struct drm_qxl_alloc {
+ __u32 size;
+ __u32 handle; /* 0 is an invalid handle */
+};
+
+struct drm_qxl_map {
+ __u64 offset; /* use for mmap system call */
+ __u32 handle;
+ __u32 pad;
+};
+
+/*
+ * dest is the bo we are writing the relocation into
+ * src is bo we are relocating.
+ * *(dest_handle.base_addr + dest_offset) = physical_address(src_handle.addr +
+ * src_offset)
+ */
+#define QXL_RELOC_TYPE_BO 1
+#define QXL_RELOC_TYPE_SURF 2
+
+struct drm_qxl_reloc {
+ __u64 src_offset; /* offset into src_handle or src buffer */
+ __u64 dst_offset; /* offset in dest handle */
+ __u32 src_handle; /* dest handle to compute address from */
+ __u32 dst_handle; /* 0 if to command buffer */
+ __u32 reloc_type;
+ __u32 pad;
+};
+
+struct drm_qxl_command {
+ __u64 command; /* void* */
+ __u64 relocs; /* struct drm_qxl_reloc* */
+ __u32 type;
+ __u32 command_size;
+ __u32 relocs_num;
+ __u32 pad;
+};
+
+struct drm_qxl_execbuffer {
+ __u32 flags; /* for future use */
+ __u32 commands_num;
+ __u64 commands; /* struct drm_qxl_command* */
+};
+
+struct drm_qxl_update_area {
+ __u32 handle;
+ __u32 top;
+ __u32 left;
+ __u32 bottom;
+ __u32 right;
+ __u32 pad;
+};
+
+#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */
+#define QXL_PARAM_MAX_RELOCS 2
+struct drm_qxl_getparam {
+ __u64 param;
+ __u64 value;
+};
+
+/* these are one bit values */
+struct drm_qxl_clientcap {
+ __u32 index;
+ __u32 pad;
+};
+
+struct drm_qxl_alloc_surf {
+ __u32 format;
+ __u32 width;
+ __u32 height;
+ __s32 stride;
+ __u32 handle;
+ __u32 pad;
+};
+
+#define DRM_IOCTL_QXL_ALLOC \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC, struct drm_qxl_alloc)
+
+#define DRM_IOCTL_QXL_MAP \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_MAP, struct drm_qxl_map)
+
+#define DRM_IOCTL_QXL_EXECBUFFER \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_EXECBUFFER,\
+ struct drm_qxl_execbuffer)
+
+#define DRM_IOCTL_QXL_UPDATE_AREA \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_UPDATE_AREA,\
+ struct drm_qxl_update_area)
+
+#define DRM_IOCTL_QXL_GETPARAM \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_GETPARAM,\
+ struct drm_qxl_getparam)
+
+#define DRM_IOCTL_QXL_CLIENTCAP \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_CLIENTCAP,\
+ struct drm_qxl_clientcap)
+
+#define DRM_IOCTL_QXL_ALLOC_SURF \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\
+ struct drm_qxl_alloc_surf)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/libdrm/r128_drm.h b/include/libdrm/libdrm/r128_drm.h
new file mode 100644
index 0000000..bf431a0
--- /dev/null
+++ b/include/libdrm/libdrm/r128_drm.h
@@ -0,0 +1,336 @@
+/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
+ * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
+ */
+/*
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ * Kevin E. Martin <martin@valinux.com>
+ */
+
+#ifndef __R128_DRM_H__
+#define __R128_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the X server file (r128_sarea.h)
+ */
+#ifndef __R128_SAREA_DEFINES__
+#define __R128_SAREA_DEFINES__
+
+/* What needs to be changed for the current vertex buffer?
+ */
+#define R128_UPLOAD_CONTEXT 0x001
+#define R128_UPLOAD_SETUP 0x002
+#define R128_UPLOAD_TEX0 0x004
+#define R128_UPLOAD_TEX1 0x008
+#define R128_UPLOAD_TEX0IMAGES 0x010
+#define R128_UPLOAD_TEX1IMAGES 0x020
+#define R128_UPLOAD_CORE 0x040
+#define R128_UPLOAD_MASKS 0x080
+#define R128_UPLOAD_WINDOW 0x100
+#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
+#define R128_REQUIRE_QUIESCENCE 0x400
+#define R128_UPLOAD_ALL 0x7ff
+
+#define R128_FRONT 0x1
+#define R128_BACK 0x2
+#define R128_DEPTH 0x4
+
+/* Primitive types
+ */
+#define R128_POINTS 0x1
+#define R128_LINES 0x2
+#define R128_LINE_STRIP 0x3
+#define R128_TRIANGLES 0x4
+#define R128_TRIANGLE_FAN 0x5
+#define R128_TRIANGLE_STRIP 0x6
+
+/* Vertex/indirect buffer size
+ */
+#define R128_BUFFER_SIZE 16384
+
+/* Byte offsets for indirect buffer data
+ */
+#define R128_INDEX_PRIM_OFFSET 20
+#define R128_HOSTDATA_BLIT_OFFSET 32
+
+/* Keep these small for testing.
+ */
+#define R128_NR_SAREA_CLIPRECTS 12
+
+/* There are 2 heaps (local/AGP). Each region within a heap is a
+ * minimum of 64k, and there are at most 64 of them per heap.
+ */
+#define R128_LOCAL_TEX_HEAP 0
+#define R128_AGP_TEX_HEAP 1
+#define R128_NR_TEX_HEAPS 2
+#define R128_NR_TEX_REGIONS 64
+#define R128_LOG_TEX_GRANULARITY 16
+
+#define R128_NR_CONTEXT_REGS 12
+
+#define R128_MAX_TEXTURE_LEVELS 11
+#define R128_MAX_TEXTURE_UNITS 2
+
+#endif /* __R128_SAREA_DEFINES__ */
+
+typedef struct {
+ /* Context state - can be written in one large chunk */
+ unsigned int dst_pitch_offset_c;
+ unsigned int dp_gui_master_cntl_c;
+ unsigned int sc_top_left_c;
+ unsigned int sc_bottom_right_c;
+ unsigned int z_offset_c;
+ unsigned int z_pitch_c;
+ unsigned int z_sten_cntl_c;
+ unsigned int tex_cntl_c;
+ unsigned int misc_3d_state_cntl_reg;
+ unsigned int texture_clr_cmp_clr_c;
+ unsigned int texture_clr_cmp_msk_c;
+ unsigned int fog_color_c;
+
+ /* Texture state */
+ unsigned int tex_size_pitch_c;
+ unsigned int constant_color_c;
+
+ /* Setup state */
+ unsigned int pm4_vc_fpu_setup;
+ unsigned int setup_cntl;
+
+ /* Mask state */
+ unsigned int dp_write_mask;
+ unsigned int sten_ref_mask_c;
+ unsigned int plane_3d_mask_c;
+
+ /* Window state */
+ unsigned int window_xy_offset;
+
+ /* Core state */
+ unsigned int scale_3d_cntl;
+} drm_r128_context_regs_t;
+
+/* Setup registers for each texture unit
+ */
+typedef struct {
+ unsigned int tex_cntl;
+ unsigned int tex_combine_cntl;
+ unsigned int tex_size_pitch;
+ unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
+ unsigned int tex_border_color;
+} drm_r128_texture_regs_t;
+
+typedef struct drm_r128_sarea {
+ /* The channel for communication of state information to the kernel
+ * on firing a vertex buffer.
+ */
+ drm_r128_context_regs_t context_state;
+ drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
+ unsigned int dirty;
+ unsigned int vertsize;
+ unsigned int vc_format;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Counters for client-side throttling of rendering clients.
+ */
+ unsigned int last_frame;
+ unsigned int last_dispatch;
+
+ struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
+ unsigned int tex_age[R128_NR_TEX_HEAPS];
+ int ctx_owner;
+ int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
+ int pfCurrentPage; /* which buffer is being displayed? */
+} drm_r128_sarea_t;
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmR128.h)
+ */
+
+/* Rage 128 specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_R128_INIT 0x00
+#define DRM_R128_CCE_START 0x01
+#define DRM_R128_CCE_STOP 0x02
+#define DRM_R128_CCE_RESET 0x03
+#define DRM_R128_CCE_IDLE 0x04
+/* 0x05 not used */
+#define DRM_R128_RESET 0x06
+#define DRM_R128_SWAP 0x07
+#define DRM_R128_CLEAR 0x08
+#define DRM_R128_VERTEX 0x09
+#define DRM_R128_INDICES 0x0a
+#define DRM_R128_BLIT 0x0b
+#define DRM_R128_DEPTH 0x0c
+#define DRM_R128_STIPPLE 0x0d
+/* 0x0e not used */
+#define DRM_R128_INDIRECT 0x0f
+#define DRM_R128_FULLSCREEN 0x10
+#define DRM_R128_CLEAR2 0x11
+#define DRM_R128_GETPARAM 0x12
+#define DRM_R128_FLIP 0x13
+
+#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
+#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START)
+#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
+#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
+#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
+/* 0x05 not used */
+#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET)
+#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP)
+#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
+#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
+#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
+#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
+#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
+#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
+/* 0x0e not used */
+#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
+#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
+#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
+#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
+#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
+
+typedef struct drm_r128_init {
+ enum {
+ R128_INIT_CCE = 0x01,
+ R128_CLEANUP_CCE = 0x02
+ } func;
+ unsigned long sarea_priv_offset;
+ int is_pci;
+ int cce_mode;
+ int cce_secure;
+ int ring_size;
+ int usec_timeout;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+ unsigned int span_offset;
+
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long ring_offset;
+ unsigned long ring_rptr_offset;
+ unsigned long buffers_offset;
+ unsigned long agp_textures_offset;
+} drm_r128_init_t;
+
+typedef struct drm_r128_cce_stop {
+ int flush;
+ int idle;
+} drm_r128_cce_stop_t;
+
+typedef struct drm_r128_clear {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask;
+} drm_r128_clear_t;
+
+typedef struct drm_r128_vertex {
+ int prim;
+ int idx; /* Index of vertex buffer */
+ int count; /* Number of vertices in buffer */
+ int discard; /* Client finished with buffer? */
+} drm_r128_vertex_t;
+
+typedef struct drm_r128_indices {
+ int prim;
+ int idx;
+ int start;
+ int end;
+ int discard; /* Client finished with buffer? */
+} drm_r128_indices_t;
+
+typedef struct drm_r128_blit {
+ int idx;
+ int pitch;
+ int offset;
+ int format;
+ unsigned short x, y;
+ unsigned short width, height;
+} drm_r128_blit_t;
+
+typedef struct drm_r128_depth {
+ enum {
+ R128_WRITE_SPAN = 0x01,
+ R128_WRITE_PIXELS = 0x02,
+ R128_READ_SPAN = 0x03,
+ R128_READ_PIXELS = 0x04
+ } func;
+ int n;
+ int *x;
+ int *y;
+ unsigned int *buffer;
+ unsigned char *mask;
+} drm_r128_depth_t;
+
+typedef struct drm_r128_stipple {
+ unsigned int *mask;
+} drm_r128_stipple_t;
+
+typedef struct drm_r128_indirect {
+ int idx;
+ int start;
+ int end;
+ int discard;
+} drm_r128_indirect_t;
+
+typedef struct drm_r128_fullscreen {
+ enum {
+ R128_INIT_FULLSCREEN = 0x01,
+ R128_CLEANUP_FULLSCREEN = 0x02
+ } func;
+} drm_r128_fullscreen_t;
+
+/* 2.3: An ioctl to get parameters that aren't available to the 3d
+ * client any other way.
+ */
+#define R128_PARAM_IRQ_NR 1
+
+typedef struct drm_r128_getparam {
+ int param;
+ void *value;
+} drm_r128_getparam_t;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/libdrm/r600_pci_ids.h b/include/libdrm/libdrm/r600_pci_ids.h
new file mode 100644
index 0000000..a3b2eac
--- /dev/null
+++ b/include/libdrm/libdrm/r600_pci_ids.h
@@ -0,0 +1,487 @@
+CHIPSET(0x9400, R600_9400, R600)
+CHIPSET(0x9401, R600_9401, R600)
+CHIPSET(0x9402, R600_9402, R600)
+CHIPSET(0x9403, R600_9403, R600)
+CHIPSET(0x9405, R600_9405, R600)
+CHIPSET(0x940A, R600_940A, R600)
+CHIPSET(0x940B, R600_940B, R600)
+CHIPSET(0x940F, R600_940F, R600)
+
+CHIPSET(0x94C0, RV610_94C0, RV610)
+CHIPSET(0x94C1, RV610_94C1, RV610)
+CHIPSET(0x94C3, RV610_94C3, RV610)
+CHIPSET(0x94C4, RV610_94C4, RV610)
+CHIPSET(0x94C5, RV610_94C5, RV610)
+CHIPSET(0x94C6, RV610_94C6, RV610)
+CHIPSET(0x94C7, RV610_94C7, RV610)
+CHIPSET(0x94C8, RV610_94C8, RV610)
+CHIPSET(0x94C9, RV610_94C9, RV610)
+CHIPSET(0x94CB, RV610_94CB, RV610)
+CHIPSET(0x94CC, RV610_94CC, RV610)
+CHIPSET(0x94CD, RV610_94CD, RV610)
+
+CHIPSET(0x9580, RV630_9580, RV630)
+CHIPSET(0x9581, RV630_9581, RV630)
+CHIPSET(0x9583, RV630_9583, RV630)
+CHIPSET(0x9586, RV630_9586, RV630)
+CHIPSET(0x9587, RV630_9587, RV630)
+CHIPSET(0x9588, RV630_9588, RV630)
+CHIPSET(0x9589, RV630_9589, RV630)
+CHIPSET(0x958A, RV630_958A, RV630)
+CHIPSET(0x958B, RV630_958B, RV630)
+CHIPSET(0x958C, RV630_958C, RV630)
+CHIPSET(0x958D, RV630_958D, RV630)
+CHIPSET(0x958E, RV630_958E, RV630)
+CHIPSET(0x958F, RV630_958F, RV630)
+
+CHIPSET(0x9500, RV670_9500, RV670)
+CHIPSET(0x9501, RV670_9501, RV670)
+CHIPSET(0x9504, RV670_9504, RV670)
+CHIPSET(0x9505, RV670_9505, RV670)
+CHIPSET(0x9506, RV670_9506, RV670)
+CHIPSET(0x9507, RV670_9507, RV670)
+CHIPSET(0x9508, RV670_9508, RV670)
+CHIPSET(0x9509, RV670_9509, RV670)
+CHIPSET(0x950F, RV670_950F, RV670)
+CHIPSET(0x9511, RV670_9511, RV670)
+CHIPSET(0x9515, RV670_9515, RV670)
+CHIPSET(0x9517, RV670_9517, RV670)
+CHIPSET(0x9519, RV670_9519, RV670)
+
+CHIPSET(0x95C0, RV620_95C0, RV620)
+CHIPSET(0x95C2, RV620_95C2, RV620)
+CHIPSET(0x95C4, RV620_95C4, RV620)
+CHIPSET(0x95C5, RV620_95C5, RV620)
+CHIPSET(0x95C6, RV620_95C6, RV620)
+CHIPSET(0x95C7, RV620_95C7, RV620)
+CHIPSET(0x95C9, RV620_95C9, RV620)
+CHIPSET(0x95CC, RV620_95CC, RV620)
+CHIPSET(0x95CD, RV620_95CD, RV620)
+CHIPSET(0x95CE, RV620_95CE, RV620)
+CHIPSET(0x95CF, RV620_95CF, RV620)
+
+CHIPSET(0x9590, RV635_9590, RV635)
+CHIPSET(0x9591, RV635_9591, RV635)
+CHIPSET(0x9593, RV635_9593, RV635)
+CHIPSET(0x9595, RV635_9595, RV635)
+CHIPSET(0x9596, RV635_9596, RV635)
+CHIPSET(0x9597, RV635_9597, RV635)
+CHIPSET(0x9598, RV635_9598, RV635)
+CHIPSET(0x9599, RV635_9599, RV635)
+CHIPSET(0x959B, RV635_959B, RV635)
+
+CHIPSET(0x9610, RS780_9610, RS780)
+CHIPSET(0x9611, RS780_9611, RS780)
+CHIPSET(0x9612, RS780_9612, RS780)
+CHIPSET(0x9613, RS780_9613, RS780)
+CHIPSET(0x9614, RS780_9614, RS780)
+CHIPSET(0x9615, RS780_9615, RS780)
+CHIPSET(0x9616, RS780_9616, RS780)
+
+CHIPSET(0x9710, RS880_9710, RS880)
+CHIPSET(0x9711, RS880_9711, RS880)
+CHIPSET(0x9712, RS880_9712, RS880)
+CHIPSET(0x9713, RS880_9713, RS880)
+CHIPSET(0x9714, RS880_9714, RS880)
+CHIPSET(0x9715, RS880_9715, RS880)
+
+CHIPSET(0x9440, RV770_9440, RV770)
+CHIPSET(0x9441, RV770_9441, RV770)
+CHIPSET(0x9442, RV770_9442, RV770)
+CHIPSET(0x9443, RV770_9443, RV770)
+CHIPSET(0x9444, RV770_9444, RV770)
+CHIPSET(0x9446, RV770_9446, RV770)
+CHIPSET(0x944A, RV770_944A, RV770)
+CHIPSET(0x944B, RV770_944B, RV770)
+CHIPSET(0x944C, RV770_944C, RV770)
+CHIPSET(0x944E, RV770_944E, RV770)
+CHIPSET(0x9450, RV770_9450, RV770)
+CHIPSET(0x9452, RV770_9452, RV770)
+CHIPSET(0x9456, RV770_9456, RV770)
+CHIPSET(0x945A, RV770_945A, RV770)
+CHIPSET(0x945B, RV770_945B, RV770)
+CHIPSET(0x945E, RV770_945E, RV770)
+CHIPSET(0x9460, RV790_9460, RV770)
+CHIPSET(0x9462, RV790_9462, RV770)
+CHIPSET(0x946A, RV770_946A, RV770)
+CHIPSET(0x946B, RV770_946B, RV770)
+CHIPSET(0x947A, RV770_947A, RV770)
+CHIPSET(0x947B, RV770_947B, RV770)
+
+CHIPSET(0x9480, RV730_9480, RV730)
+CHIPSET(0x9487, RV730_9487, RV730)
+CHIPSET(0x9488, RV730_9488, RV730)
+CHIPSET(0x9489, RV730_9489, RV730)
+CHIPSET(0x948A, RV730_948A, RV730)
+CHIPSET(0x948F, RV730_948F, RV730)
+CHIPSET(0x9490, RV730_9490, RV730)
+CHIPSET(0x9491, RV730_9491, RV730)
+CHIPSET(0x9495, RV730_9495, RV730)
+CHIPSET(0x9498, RV730_9498, RV730)
+CHIPSET(0x949C, RV730_949C, RV730)
+CHIPSET(0x949E, RV730_949E, RV730)
+CHIPSET(0x949F, RV730_949F, RV730)
+
+CHIPSET(0x9540, RV710_9540, RV710)
+CHIPSET(0x9541, RV710_9541, RV710)
+CHIPSET(0x9542, RV710_9542, RV710)
+CHIPSET(0x954E, RV710_954E, RV710)
+CHIPSET(0x954F, RV710_954F, RV710)
+CHIPSET(0x9552, RV710_9552, RV710)
+CHIPSET(0x9553, RV710_9553, RV710)
+CHIPSET(0x9555, RV710_9555, RV710)
+CHIPSET(0x9557, RV710_9557, RV710)
+CHIPSET(0x955F, RV710_955F, RV710)
+
+CHIPSET(0x94A0, RV740_94A0, RV740)
+CHIPSET(0x94A1, RV740_94A1, RV740)
+CHIPSET(0x94A3, RV740_94A3, RV740)
+CHIPSET(0x94B1, RV740_94B1, RV740)
+CHIPSET(0x94B3, RV740_94B3, RV740)
+CHIPSET(0x94B4, RV740_94B4, RV740)
+CHIPSET(0x94B5, RV740_94B5, RV740)
+CHIPSET(0x94B9, RV740_94B9, RV740)
+
+CHIPSET(0x68E0, CEDAR_68E0, CEDAR)
+CHIPSET(0x68E1, CEDAR_68E1, CEDAR)
+CHIPSET(0x68E4, CEDAR_68E4, CEDAR)
+CHIPSET(0x68E5, CEDAR_68E5, CEDAR)
+CHIPSET(0x68E8, CEDAR_68E8, CEDAR)
+CHIPSET(0x68E9, CEDAR_68E9, CEDAR)
+CHIPSET(0x68F1, CEDAR_68F1, CEDAR)
+CHIPSET(0x68F2, CEDAR_68F2, CEDAR)
+CHIPSET(0x68F8, CEDAR_68F8, CEDAR)
+CHIPSET(0x68F9, CEDAR_68F9, CEDAR)
+CHIPSET(0x68FA, CEDAR_68FA, CEDAR)
+CHIPSET(0x68FE, CEDAR_68FE, CEDAR)
+
+CHIPSET(0x68C0, REDWOOD_68C0, REDWOOD)
+CHIPSET(0x68C1, REDWOOD_68C1, REDWOOD)
+CHIPSET(0x68C7, REDWOOD_68C7, REDWOOD)
+CHIPSET(0x68C8, REDWOOD_68C8, REDWOOD)
+CHIPSET(0x68C9, REDWOOD_68C9, REDWOOD)
+CHIPSET(0x68D8, REDWOOD_68D8, REDWOOD)
+CHIPSET(0x68D9, REDWOOD_68D9, REDWOOD)
+CHIPSET(0x68DA, REDWOOD_68DA, REDWOOD)
+CHIPSET(0x68DE, REDWOOD_68DE, REDWOOD)
+
+CHIPSET(0x68A0, JUNIPER_68A0, JUNIPER)
+CHIPSET(0x68A1, JUNIPER_68A1, JUNIPER)
+CHIPSET(0x68A8, JUNIPER_68A8, JUNIPER)
+CHIPSET(0x68A9, JUNIPER_68A9, JUNIPER)
+CHIPSET(0x68B0, JUNIPER_68B0, JUNIPER)
+CHIPSET(0x68B8, JUNIPER_68B8, JUNIPER)
+CHIPSET(0x68B9, JUNIPER_68B9, JUNIPER)
+CHIPSET(0x68BA, JUNIPER_68BA, JUNIPER)
+CHIPSET(0x68BE, JUNIPER_68BE, JUNIPER)
+CHIPSET(0x68BF, JUNIPER_68BF, JUNIPER)
+
+CHIPSET(0x6880, CYPRESS_6880, CYPRESS)
+CHIPSET(0x6888, CYPRESS_6888, CYPRESS)
+CHIPSET(0x6889, CYPRESS_6889, CYPRESS)
+CHIPSET(0x688A, CYPRESS_688A, CYPRESS)
+CHIPSET(0x688C, CYPRESS_688C, CYPRESS)
+CHIPSET(0x688D, CYPRESS_688D, CYPRESS)
+CHIPSET(0x6898, CYPRESS_6898, CYPRESS)
+CHIPSET(0x6899, CYPRESS_6899, CYPRESS)
+CHIPSET(0x689B, CYPRESS_689B, CYPRESS)
+CHIPSET(0x689E, CYPRESS_689E, CYPRESS)
+
+CHIPSET(0x689C, HEMLOCK_689C, HEMLOCK)
+CHIPSET(0x689D, HEMLOCK_689D, HEMLOCK)
+
+CHIPSET(0x9802, PALM_9802, PALM)
+CHIPSET(0x9803, PALM_9803, PALM)
+CHIPSET(0x9804, PALM_9804, PALM)
+CHIPSET(0x9805, PALM_9805, PALM)
+CHIPSET(0x9806, PALM_9806, PALM)
+CHIPSET(0x9807, PALM_9807, PALM)
+CHIPSET(0x9808, PALM_9808, PALM)
+CHIPSET(0x9809, PALM_9809, PALM)
+CHIPSET(0x980A, PALM_980A, PALM)
+
+CHIPSET(0x9640, SUMO_9640, SUMO)
+CHIPSET(0x9641, SUMO_9641, SUMO)
+CHIPSET(0x9642, SUMO2_9642, SUMO2)
+CHIPSET(0x9643, SUMO2_9643, SUMO2)
+CHIPSET(0x9644, SUMO2_9644, SUMO2)
+CHIPSET(0x9645, SUMO2_9645, SUMO2)
+CHIPSET(0x9647, SUMO_9647, SUMO)
+CHIPSET(0x9648, SUMO_9648, SUMO)
+CHIPSET(0x9649, SUMO2_9649, SUMO2)
+CHIPSET(0x964a, SUMO_964A, SUMO)
+CHIPSET(0x964b, SUMO_964B, SUMO)
+CHIPSET(0x964c, SUMO_964C, SUMO)
+CHIPSET(0x964e, SUMO_964E, SUMO)
+CHIPSET(0x964f, SUMO_964F, SUMO)
+
+CHIPSET(0x6700, CAYMAN_6700, CAYMAN)
+CHIPSET(0x6701, CAYMAN_6701, CAYMAN)
+CHIPSET(0x6702, CAYMAN_6702, CAYMAN)
+CHIPSET(0x6703, CAYMAN_6703, CAYMAN)
+CHIPSET(0x6704, CAYMAN_6704, CAYMAN)
+CHIPSET(0x6705, CAYMAN_6705, CAYMAN)
+CHIPSET(0x6706, CAYMAN_6706, CAYMAN)
+CHIPSET(0x6707, CAYMAN_6707, CAYMAN)
+CHIPSET(0x6708, CAYMAN_6708, CAYMAN)
+CHIPSET(0x6709, CAYMAN_6709, CAYMAN)
+CHIPSET(0x6718, CAYMAN_6718, CAYMAN)
+CHIPSET(0x6719, CAYMAN_6719, CAYMAN)
+CHIPSET(0x671C, CAYMAN_671C, CAYMAN)
+CHIPSET(0x671D, CAYMAN_671D, CAYMAN)
+CHIPSET(0x671F, CAYMAN_671F, CAYMAN)
+
+CHIPSET(0x6720, BARTS_6720, BARTS)
+CHIPSET(0x6721, BARTS_6721, BARTS)
+CHIPSET(0x6722, BARTS_6722, BARTS)
+CHIPSET(0x6723, BARTS_6723, BARTS)
+CHIPSET(0x6724, BARTS_6724, BARTS)
+CHIPSET(0x6725, BARTS_6725, BARTS)
+CHIPSET(0x6726, BARTS_6726, BARTS)
+CHIPSET(0x6727, BARTS_6727, BARTS)
+CHIPSET(0x6728, BARTS_6728, BARTS)
+CHIPSET(0x6729, BARTS_6729, BARTS)
+CHIPSET(0x6738, BARTS_6738, BARTS)
+CHIPSET(0x6739, BARTS_6739, BARTS)
+CHIPSET(0x673E, BARTS_673E, BARTS)
+
+CHIPSET(0x6740, TURKS_6740, TURKS)
+CHIPSET(0x6741, TURKS_6741, TURKS)
+CHIPSET(0x6742, TURKS_6742, TURKS)
+CHIPSET(0x6743, TURKS_6743, TURKS)
+CHIPSET(0x6744, TURKS_6744, TURKS)
+CHIPSET(0x6745, TURKS_6745, TURKS)
+CHIPSET(0x6746, TURKS_6746, TURKS)
+CHIPSET(0x6747, TURKS_6747, TURKS)
+CHIPSET(0x6748, TURKS_6748, TURKS)
+CHIPSET(0x6749, TURKS_6749, TURKS)
+CHIPSET(0x674A, TURKS_674A, TURKS)
+CHIPSET(0x6750, TURKS_6750, TURKS)
+CHIPSET(0x6751, TURKS_6751, TURKS)
+CHIPSET(0x6758, TURKS_6758, TURKS)
+CHIPSET(0x6759, TURKS_6759, TURKS)
+CHIPSET(0x675B, TURKS_675B, TURKS)
+CHIPSET(0x675D, TURKS_675D, TURKS)
+CHIPSET(0x675F, TURKS_675F, TURKS)
+CHIPSET(0x6840, TURKS_6840, TURKS)
+CHIPSET(0x6841, TURKS_6841, TURKS)
+CHIPSET(0x6842, TURKS_6842, TURKS)
+CHIPSET(0x6843, TURKS_6843, TURKS)
+CHIPSET(0x6849, TURKS_6849, TURKS)
+CHIPSET(0x6850, TURKS_6850, TURKS)
+CHIPSET(0x6858, TURKS_6858, TURKS)
+CHIPSET(0x6859, TURKS_6859, TURKS)
+
+CHIPSET(0x6760, CAICOS_6760, CAICOS)
+CHIPSET(0x6761, CAICOS_6761, CAICOS)
+CHIPSET(0x6762, CAICOS_6762, CAICOS)
+CHIPSET(0x6763, CAICOS_6763, CAICOS)
+CHIPSET(0x6764, CAICOS_6764, CAICOS)
+CHIPSET(0x6765, CAICOS_6765, CAICOS)
+CHIPSET(0x6766, CAICOS_6766, CAICOS)
+CHIPSET(0x6767, CAICOS_6767, CAICOS)
+CHIPSET(0x6768, CAICOS_6768, CAICOS)
+CHIPSET(0x6770, CAICOS_6770, CAICOS)
+CHIPSET(0x6771, CAICOS_6771, CAICOS)
+CHIPSET(0x6772, CAICOS_6772, CAICOS)
+CHIPSET(0x6778, CAICOS_6778, CAICOS)
+CHIPSET(0x6779, CAICOS_6779, CAICOS)
+CHIPSET(0x677B, CAICOS_677B, CAICOS)
+
+CHIPSET(0x9900, ARUBA_9900, ARUBA)
+CHIPSET(0x9901, ARUBA_9901, ARUBA)
+CHIPSET(0x9903, ARUBA_9903, ARUBA)
+CHIPSET(0x9904, ARUBA_9904, ARUBA)
+CHIPSET(0x9905, ARUBA_9905, ARUBA)
+CHIPSET(0x9906, ARUBA_9906, ARUBA)
+CHIPSET(0x9907, ARUBA_9907, ARUBA)
+CHIPSET(0x9908, ARUBA_9908, ARUBA)
+CHIPSET(0x9909, ARUBA_9909, ARUBA)
+CHIPSET(0x990A, ARUBA_990A, ARUBA)
+CHIPSET(0x990B, ARUBA_990B, ARUBA)
+CHIPSET(0x990C, ARUBA_990C, ARUBA)
+CHIPSET(0x990D, ARUBA_990D, ARUBA)
+CHIPSET(0x990E, ARUBA_990E, ARUBA)
+CHIPSET(0x990F, ARUBA_990F, ARUBA)
+CHIPSET(0x9910, ARUBA_9910, ARUBA)
+CHIPSET(0x9913, ARUBA_9913, ARUBA)
+CHIPSET(0x9917, ARUBA_9917, ARUBA)
+CHIPSET(0x9918, ARUBA_9918, ARUBA)
+CHIPSET(0x9919, ARUBA_9919, ARUBA)
+CHIPSET(0x9990, ARUBA_9990, ARUBA)
+CHIPSET(0x9991, ARUBA_9991, ARUBA)
+CHIPSET(0x9992, ARUBA_9992, ARUBA)
+CHIPSET(0x9993, ARUBA_9993, ARUBA)
+CHIPSET(0x9994, ARUBA_9994, ARUBA)
+CHIPSET(0x9995, ARUBA_9995, ARUBA)
+CHIPSET(0x9996, ARUBA_9996, ARUBA)
+CHIPSET(0x9997, ARUBA_9997, ARUBA)
+CHIPSET(0x9998, ARUBA_9998, ARUBA)
+CHIPSET(0x9999, ARUBA_9999, ARUBA)
+CHIPSET(0x999A, ARUBA_999A, ARUBA)
+CHIPSET(0x999B, ARUBA_999B, ARUBA)
+CHIPSET(0x999C, ARUBA_999C, ARUBA)
+CHIPSET(0x999D, ARUBA_999D, ARUBA)
+CHIPSET(0x99A0, ARUBA_99A0, ARUBA)
+CHIPSET(0x99A2, ARUBA_99A2, ARUBA)
+CHIPSET(0x99A4, ARUBA_99A4, ARUBA)
+
+CHIPSET(0x6780, TAHITI_6780, TAHITI)
+CHIPSET(0x6784, TAHITI_6784, TAHITI)
+CHIPSET(0x6788, TAHITI_6788, TAHITI)
+CHIPSET(0x678A, TAHITI_678A, TAHITI)
+CHIPSET(0x6790, TAHITI_6790, TAHITI)
+CHIPSET(0x6791, TAHITI_6791, TAHITI)
+CHIPSET(0x6792, TAHITI_6792, TAHITI)
+CHIPSET(0x6798, TAHITI_6798, TAHITI)
+CHIPSET(0x6799, TAHITI_6799, TAHITI)
+CHIPSET(0x679A, TAHITI_679A, TAHITI)
+CHIPSET(0x679B, TAHITI_679B, TAHITI)
+CHIPSET(0x679E, TAHITI_679E, TAHITI)
+CHIPSET(0x679F, TAHITI_679F, TAHITI)
+
+CHIPSET(0x6800, PITCAIRN_6800, PITCAIRN)
+CHIPSET(0x6801, PITCAIRN_6801, PITCAIRN)
+CHIPSET(0x6802, PITCAIRN_6802, PITCAIRN)
+CHIPSET(0x6806, PITCAIRN_6806, PITCAIRN)
+CHIPSET(0x6808, PITCAIRN_6808, PITCAIRN)
+CHIPSET(0x6809, PITCAIRN_6809, PITCAIRN)
+CHIPSET(0x6810, PITCAIRN_6810, PITCAIRN)
+CHIPSET(0x6811, PITCAIRN_6811, PITCAIRN)
+CHIPSET(0x6816, PITCAIRN_6816, PITCAIRN)
+CHIPSET(0x6817, PITCAIRN_6817, PITCAIRN)
+CHIPSET(0x6818, PITCAIRN_6818, PITCAIRN)
+CHIPSET(0x6819, PITCAIRN_6819, PITCAIRN)
+CHIPSET(0x684C, PITCAIRN_684C, PITCAIRN)
+
+CHIPSET(0x6820, VERDE_6820, VERDE)
+CHIPSET(0x6821, VERDE_6821, VERDE)
+CHIPSET(0x6822, VERDE_6822, VERDE)
+CHIPSET(0x6823, VERDE_6823, VERDE)
+CHIPSET(0x6824, VERDE_6824, VERDE)
+CHIPSET(0x6825, VERDE_6825, VERDE)
+CHIPSET(0x6826, VERDE_6826, VERDE)
+CHIPSET(0x6827, VERDE_6827, VERDE)
+CHIPSET(0x6828, VERDE_6828, VERDE)
+CHIPSET(0x6829, VERDE_6829, VERDE)
+CHIPSET(0x682A, VERDE_682A, VERDE)
+CHIPSET(0x682B, VERDE_682B, VERDE)
+CHIPSET(0x682C, VERDE_682C, VERDE)
+CHIPSET(0x682D, VERDE_682D, VERDE)
+CHIPSET(0x682F, VERDE_682F, VERDE)
+CHIPSET(0x6830, VERDE_6830, VERDE)
+CHIPSET(0x6831, VERDE_6831, VERDE)
+CHIPSET(0x6835, VERDE_6835, VERDE)
+CHIPSET(0x6837, VERDE_6837, VERDE)
+CHIPSET(0x6838, VERDE_6838, VERDE)
+CHIPSET(0x6839, VERDE_6839, VERDE)
+CHIPSET(0x683B, VERDE_683B, VERDE)
+CHIPSET(0x683D, VERDE_683D, VERDE)
+CHIPSET(0x683F, VERDE_683F, VERDE)
+
+CHIPSET(0x6600, OLAND_6600, OLAND)
+CHIPSET(0x6601, OLAND_6601, OLAND)
+CHIPSET(0x6602, OLAND_6602, OLAND)
+CHIPSET(0x6603, OLAND_6603, OLAND)
+CHIPSET(0x6604, OLAND_6604, OLAND)
+CHIPSET(0x6605, OLAND_6605, OLAND)
+CHIPSET(0x6606, OLAND_6606, OLAND)
+CHIPSET(0x6607, OLAND_6607, OLAND)
+CHIPSET(0x6608, OLAND_6608, OLAND)
+CHIPSET(0x6610, OLAND_6610, OLAND)
+CHIPSET(0x6611, OLAND_6611, OLAND)
+CHIPSET(0x6613, OLAND_6613, OLAND)
+CHIPSET(0x6617, OLAND_6617, OLAND)
+CHIPSET(0x6620, OLAND_6620, OLAND)
+CHIPSET(0x6621, OLAND_6621, OLAND)
+CHIPSET(0x6623, OLAND_6623, OLAND)
+CHIPSET(0x6631, OLAND_6631, OLAND)
+
+CHIPSET(0x6660, HAINAN_6660, HAINAN)
+CHIPSET(0x6663, HAINAN_6663, HAINAN)
+CHIPSET(0x6664, HAINAN_6664, HAINAN)
+CHIPSET(0x6665, HAINAN_6665, HAINAN)
+CHIPSET(0x6667, HAINAN_6667, HAINAN)
+CHIPSET(0x666F, HAINAN_666F, HAINAN)
+
+CHIPSET(0x6640, BONAIRE_6640, BONAIRE)
+CHIPSET(0x6641, BONAIRE_6641, BONAIRE)
+CHIPSET(0x6646, BONAIRE_6646, BONAIRE)
+CHIPSET(0x6647, BONAIRE_6647, BONAIRE)
+CHIPSET(0x6649, BONAIRE_6649, BONAIRE)
+CHIPSET(0x6650, BONAIRE_6650, BONAIRE)
+CHIPSET(0x6651, BONAIRE_6651, BONAIRE)
+CHIPSET(0x6658, BONAIRE_6658, BONAIRE)
+CHIPSET(0x665C, BONAIRE_665C, BONAIRE)
+CHIPSET(0x665D, BONAIRE_665D, BONAIRE)
+CHIPSET(0x665F, BONAIRE_665F, BONAIRE)
+
+CHIPSET(0x9830, KABINI_9830, KABINI)
+CHIPSET(0x9831, KABINI_9831, KABINI)
+CHIPSET(0x9832, KABINI_9832, KABINI)
+CHIPSET(0x9833, KABINI_9833, KABINI)
+CHIPSET(0x9834, KABINI_9834, KABINI)
+CHIPSET(0x9835, KABINI_9835, KABINI)
+CHIPSET(0x9836, KABINI_9836, KABINI)
+CHIPSET(0x9837, KABINI_9837, KABINI)
+CHIPSET(0x9838, KABINI_9838, KABINI)
+CHIPSET(0x9839, KABINI_9839, KABINI)
+CHIPSET(0x983A, KABINI_983A, KABINI)
+CHIPSET(0x983B, KABINI_983B, KABINI)
+CHIPSET(0x983C, KABINI_983C, KABINI)
+CHIPSET(0x983D, KABINI_983D, KABINI)
+CHIPSET(0x983E, KABINI_983E, KABINI)
+CHIPSET(0x983F, KABINI_983F, KABINI)
+
+CHIPSET(0x9850, MULLINS_9850, MULLINS)
+CHIPSET(0x9851, MULLINS_9851, MULLINS)
+CHIPSET(0x9852, MULLINS_9852, MULLINS)
+CHIPSET(0x9853, MULLINS_9853, MULLINS)
+CHIPSET(0x9854, MULLINS_9854, MULLINS)
+CHIPSET(0x9855, MULLINS_9855, MULLINS)
+CHIPSET(0x9856, MULLINS_9856, MULLINS)
+CHIPSET(0x9857, MULLINS_9857, MULLINS)
+CHIPSET(0x9858, MULLINS_9858, MULLINS)
+CHIPSET(0x9859, MULLINS_9859, MULLINS)
+CHIPSET(0x985A, MULLINS_985A, MULLINS)
+CHIPSET(0x985B, MULLINS_985B, MULLINS)
+CHIPSET(0x985C, MULLINS_985C, MULLINS)
+CHIPSET(0x985D, MULLINS_985D, MULLINS)
+CHIPSET(0x985E, MULLINS_985E, MULLINS)
+CHIPSET(0x985F, MULLINS_985F, MULLINS)
+
+CHIPSET(0x1304, KAVERI_1304, KAVERI)
+CHIPSET(0x1305, KAVERI_1305, KAVERI)
+CHIPSET(0x1306, KAVERI_1306, KAVERI)
+CHIPSET(0x1307, KAVERI_1307, KAVERI)
+CHIPSET(0x1309, KAVERI_1309, KAVERI)
+CHIPSET(0x130A, KAVERI_130A, KAVERI)
+CHIPSET(0x130B, KAVERI_130B, KAVERI)
+CHIPSET(0x130C, KAVERI_130C, KAVERI)
+CHIPSET(0x130D, KAVERI_130D, KAVERI)
+CHIPSET(0x130E, KAVERI_130E, KAVERI)
+CHIPSET(0x130F, KAVERI_130F, KAVERI)
+CHIPSET(0x1310, KAVERI_1310, KAVERI)
+CHIPSET(0x1311, KAVERI_1311, KAVERI)
+CHIPSET(0x1312, KAVERI_1312, KAVERI)
+CHIPSET(0x1313, KAVERI_1313, KAVERI)
+CHIPSET(0x1315, KAVERI_1315, KAVERI)
+CHIPSET(0x1316, KAVERI_1316, KAVERI)
+CHIPSET(0x1317, KAVERI_1317, KAVERI)
+CHIPSET(0x1318, KAVERI_1318, KAVERI)
+CHIPSET(0x131B, KAVERI_131B, KAVERI)
+CHIPSET(0x131C, KAVERI_131C, KAVERI)
+CHIPSET(0x131D, KAVERI_131D, KAVERI)
+
+CHIPSET(0x67A0, HAWAII_67A0, HAWAII)
+CHIPSET(0x67A1, HAWAII_67A1, HAWAII)
+CHIPSET(0x67A2, HAWAII_67A2, HAWAII)
+CHIPSET(0x67A8, HAWAII_67A8, HAWAII)
+CHIPSET(0x67A9, HAWAII_67A9, HAWAII)
+CHIPSET(0x67AA, HAWAII_67AA, HAWAII)
+CHIPSET(0x67B0, HAWAII_67B0, HAWAII)
+CHIPSET(0x67B1, HAWAII_67B1, HAWAII)
+CHIPSET(0x67B8, HAWAII_67B8, HAWAII)
+CHIPSET(0x67B9, HAWAII_67B9, HAWAII)
+CHIPSET(0x67BA, HAWAII_67BA, HAWAII)
+CHIPSET(0x67BE, HAWAII_67BE, HAWAII)
diff --git a/include/libdrm/libdrm/radeon_bo.h b/include/libdrm/libdrm/radeon_bo.h
new file mode 100644
index 0000000..6e20c6c
--- /dev/null
+++ b/include/libdrm/libdrm/radeon_bo.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_BO_H
+#define RADEON_BO_H
+
+#include <stdio.h>
+#include <stdint.h>
+
+/* bo object */
+#define RADEON_BO_FLAGS_MACRO_TILE 1
+#define RADEON_BO_FLAGS_MICRO_TILE 2
+#define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
+
+struct radeon_bo_manager;
+struct radeon_cs;
+
+struct radeon_bo {
+ void *ptr;
+ uint32_t flags;
+ uint32_t handle;
+ uint32_t size;
+};
+
+
+void radeon_bo_debug(struct radeon_bo *bo, const char *op);
+
+struct radeon_bo *radeon_bo_open(struct radeon_bo_manager *bom,
+ uint32_t handle,
+ uint32_t size,
+ uint32_t alignment,
+ uint32_t domains,
+ uint32_t flags);
+
+void radeon_bo_ref(struct radeon_bo *bo);
+struct radeon_bo *radeon_bo_unref(struct radeon_bo *bo);
+int radeon_bo_map(struct radeon_bo *bo, int write);
+int radeon_bo_unmap(struct radeon_bo *bo);
+int radeon_bo_wait(struct radeon_bo *bo);
+int radeon_bo_is_busy(struct radeon_bo *bo, uint32_t *domain);
+int radeon_bo_set_tiling(struct radeon_bo *bo, uint32_t tiling_flags, uint32_t pitch);
+int radeon_bo_get_tiling(struct radeon_bo *bo, uint32_t *tiling_flags, uint32_t *pitch);
+int radeon_bo_is_static(struct radeon_bo *bo);
+int radeon_bo_is_referenced_by_cs(struct radeon_bo *bo, struct radeon_cs *cs);
+uint32_t radeon_bo_get_handle(struct radeon_bo *bo);
+uint32_t radeon_bo_get_src_domain(struct radeon_bo *bo);
+#endif
diff --git a/include/libdrm/libdrm/radeon_bo_gem.h b/include/libdrm/libdrm/radeon_bo_gem.h
new file mode 100644
index 0000000..08965f3
--- /dev/null
+++ b/include/libdrm/libdrm/radeon_bo_gem.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright © 2008 Dave Airlie
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Dave Airlie
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_BO_GEM_H
+#define RADEON_BO_GEM_H
+
+#include "radeon_bo.h"
+
+struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd);
+void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom);
+
+uint32_t radeon_gem_name_bo(struct radeon_bo *bo);
+void *radeon_gem_get_reloc_in_cs(struct radeon_bo *bo);
+int radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain);
+int radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name);
+int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle);
+struct radeon_bo *radeon_gem_bo_open_prime(struct radeon_bo_manager *bom,
+ int fd_handle,
+ uint32_t size);
+#endif
diff --git a/include/libdrm/libdrm/radeon_bo_int.h b/include/libdrm/libdrm/radeon_bo_int.h
new file mode 100644
index 0000000..de981b0
--- /dev/null
+++ b/include/libdrm/libdrm/radeon_bo_int.h
@@ -0,0 +1,45 @@
+#ifndef RADEON_BO_INT
+#define RADEON_BO_INT
+
+struct radeon_bo_manager {
+ const struct radeon_bo_funcs *funcs;
+ int fd;
+};
+
+struct radeon_bo_int {
+ void *ptr;
+ uint32_t flags;
+ uint32_t handle;
+ uint32_t size;
+ /* private members */
+ uint32_t alignment;
+ uint32_t domains;
+ unsigned cref;
+ struct radeon_bo_manager *bom;
+ uint32_t space_accounted;
+ uint32_t referenced_in_cs;
+};
+
+/* bo functions */
+struct radeon_bo_funcs {
+ struct radeon_bo *(*bo_open)(struct radeon_bo_manager *bom,
+ uint32_t handle,
+ uint32_t size,
+ uint32_t alignment,
+ uint32_t domains,
+ uint32_t flags);
+ void (*bo_ref)(struct radeon_bo_int *bo);
+ struct radeon_bo *(*bo_unref)(struct radeon_bo_int *bo);
+ int (*bo_map)(struct radeon_bo_int *bo, int write);
+ int (*bo_unmap)(struct radeon_bo_int *bo);
+ int (*bo_wait)(struct radeon_bo_int *bo);
+ int (*bo_is_static)(struct radeon_bo_int *bo);
+ int (*bo_set_tiling)(struct radeon_bo_int *bo, uint32_t tiling_flags,
+ uint32_t pitch);
+ int (*bo_get_tiling)(struct radeon_bo_int *bo, uint32_t *tiling_flags,
+ uint32_t *pitch);
+ int (*bo_is_busy)(struct radeon_bo_int *bo, uint32_t *domain);
+ int (*bo_is_referenced_by_cs)(struct radeon_bo_int *bo, struct radeon_cs *cs);
+};
+
+#endif
diff --git a/include/libdrm/libdrm/radeon_cs.h b/include/libdrm/libdrm/radeon_cs.h
new file mode 100644
index 0000000..f68a624
--- /dev/null
+++ b/include/libdrm/libdrm/radeon_cs.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright © 2008 Nicolai Haehnle
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Aapo Tahkola <aet@rasterburn.org>
+ * Nicolai Haehnle <prefect_@gmx.net>
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_CS_H
+#define RADEON_CS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_bo.h"
+
+struct radeon_cs_reloc {
+ struct radeon_bo *bo;
+ uint32_t read_domain;
+ uint32_t write_domain;
+ uint32_t flags;
+};
+
+
+#define RADEON_CS_SPACE_OK 0
+#define RADEON_CS_SPACE_OP_TO_BIG 1
+#define RADEON_CS_SPACE_FLUSH 2
+
+struct radeon_cs {
+ uint32_t *packets;
+ unsigned cdw;
+ unsigned ndw;
+ unsigned section_ndw;
+ unsigned section_cdw;
+};
+
+#define MAX_SPACE_BOS (32)
+
+struct radeon_cs_manager;
+
+extern struct radeon_cs *radeon_cs_create(struct radeon_cs_manager *csm,
+ uint32_t ndw);
+
+extern int radeon_cs_begin(struct radeon_cs *cs,
+ uint32_t ndw,
+ const char *file,
+ const char *func, int line);
+extern int radeon_cs_end(struct radeon_cs *cs,
+ const char *file,
+ const char *func,
+ int line);
+extern int radeon_cs_emit(struct radeon_cs *cs);
+extern int radeon_cs_destroy(struct radeon_cs *cs);
+extern int radeon_cs_erase(struct radeon_cs *cs);
+extern int radeon_cs_need_flush(struct radeon_cs *cs);
+extern void radeon_cs_print(struct radeon_cs *cs, FILE *file);
+extern void radeon_cs_set_limit(struct radeon_cs *cs, uint32_t domain, uint32_t limit);
+extern void radeon_cs_space_set_flush(struct radeon_cs *cs, void (*fn)(void *), void *data);
+extern int radeon_cs_write_reloc(struct radeon_cs *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domain,
+ uint32_t write_domain,
+ uint32_t flags);
+extern uint32_t radeon_cs_get_id(struct radeon_cs *cs);
+/*
+ * add a persistent BO to the list
+ * a persistent BO is one that will be referenced across flushes,
+ * i.e. colorbuffer, textures etc.
+ * They get reset when a new "operation" happens, where an operation
+ * is a state emission with a color/textures etc followed by a bunch of vertices.
+ */
+void radeon_cs_space_add_persistent_bo(struct radeon_cs *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+/* reset the persistent BO list */
+void radeon_cs_space_reset_bos(struct radeon_cs *cs);
+
+/* do a space check with the current persistent BO list */
+int radeon_cs_space_check(struct radeon_cs *cs);
+
+/* do a space check with the current persistent BO list and a temporary BO
+ * a temporary BO is like a DMA buffer, which gets flushed with the
+ * command buffer */
+int radeon_cs_space_check_with_bo(struct radeon_cs *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+static inline void radeon_cs_write_dword(struct radeon_cs *cs, uint32_t dword)
+{
+ cs->packets[cs->cdw++] = dword;
+ if (cs->section_ndw) {
+ cs->section_cdw++;
+ }
+}
+
+static inline void radeon_cs_write_qword(struct radeon_cs *cs, uint64_t qword)
+{
+ memcpy(cs->packets + cs->cdw, &qword, sizeof(uint64_t));
+ cs->cdw += 2;
+ if (cs->section_ndw) {
+ cs->section_cdw += 2;
+ }
+}
+
+static inline void radeon_cs_write_table(struct radeon_cs *cs,
+ const void *data, uint32_t size)
+{
+ memcpy(cs->packets + cs->cdw, data, size * 4);
+ cs->cdw += size;
+ if (cs->section_ndw) {
+ cs->section_cdw += size;
+ }
+}
+#endif
diff --git a/include/libdrm/libdrm/radeon_cs_gem.h b/include/libdrm/libdrm/radeon_cs_gem.h
new file mode 100644
index 0000000..5dea38a
--- /dev/null
+++ b/include/libdrm/libdrm/radeon_cs_gem.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright © 2008 Nicolai Haehnle
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Aapo Tahkola <aet@rasterburn.org>
+ * Nicolai Haehnle <prefect_@gmx.net>
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_CS_GEM_H
+#define RADEON_CS_GEM_H
+
+#include "radeon_cs.h"
+
+struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd);
+void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm);
+
+#endif
diff --git a/include/libdrm/libdrm/radeon_cs_int.h b/include/libdrm/libdrm/radeon_cs_int.h
new file mode 100644
index 0000000..d906ad4
--- /dev/null
+++ b/include/libdrm/libdrm/radeon_cs_int.h
@@ -0,0 +1,67 @@
+
+#ifndef _RADEON_CS_INT_H_
+#define _RADEON_CS_INT_H_
+
+struct radeon_cs_space_check {
+ struct radeon_bo_int *bo;
+ uint32_t read_domains;
+ uint32_t write_domain;
+ uint32_t new_accounted;
+};
+
+struct radeon_cs_int {
+ /* keep first two in same place */
+ uint32_t *packets;
+ unsigned cdw;
+ unsigned ndw;
+ unsigned section_ndw;
+ unsigned section_cdw;
+ /* private members */
+ struct radeon_cs_manager *csm;
+ void *relocs;
+ unsigned crelocs;
+ unsigned relocs_total_size;
+ const char *section_file;
+ const char *section_func;
+ int section_line;
+ struct radeon_cs_space_check bos[MAX_SPACE_BOS];
+ int bo_count;
+ void (*space_flush_fn)(void *);
+ void *space_flush_data;
+ uint32_t id;
+};
+
+/* cs functions */
+struct radeon_cs_funcs {
+ struct radeon_cs_int *(*cs_create)(struct radeon_cs_manager *csm,
+ uint32_t ndw);
+ int (*cs_write_reloc)(struct radeon_cs_int *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domain,
+ uint32_t write_domain,
+ uint32_t flags);
+ int (*cs_begin)(struct radeon_cs_int *cs,
+ uint32_t ndw,
+ const char *file,
+ const char *func,
+ int line);
+ int (*cs_end)(struct radeon_cs_int *cs,
+ const char *file, const char *func,
+ int line);
+
+
+ int (*cs_emit)(struct radeon_cs_int *cs);
+ int (*cs_destroy)(struct radeon_cs_int *cs);
+ int (*cs_erase)(struct radeon_cs_int *cs);
+ int (*cs_need_flush)(struct radeon_cs_int *cs);
+ void (*cs_print)(struct radeon_cs_int *cs, FILE *file);
+};
+
+struct radeon_cs_manager {
+ const struct radeon_cs_funcs *funcs;
+ int fd;
+ int32_t vram_limit, gart_limit;
+ int32_t vram_write_used, gart_write_used;
+ int32_t read_used;
+};
+#endif
diff --git a/include/libdrm/libdrm/radeon_drm.h b/include/libdrm/libdrm/radeon_drm.h
new file mode 100644
index 0000000..a1e385d
--- /dev/null
+++ b/include/libdrm/libdrm/radeon_drm.h
@@ -0,0 +1,1079 @@
+/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef __RADEON_DRM_H__
+#define __RADEON_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the X server file (radeon_sarea.h)
+ */
+#ifndef __RADEON_SAREA_DEFINES__
+#define __RADEON_SAREA_DEFINES__
+
+/* Old style state flags, required for sarea interface (1.1 and 1.2
+ * clears) and 1.2 drm_vertex2 ioctl.
+ */
+#define RADEON_UPLOAD_CONTEXT 0x00000001
+#define RADEON_UPLOAD_VERTFMT 0x00000002
+#define RADEON_UPLOAD_LINE 0x00000004
+#define RADEON_UPLOAD_BUMPMAP 0x00000008
+#define RADEON_UPLOAD_MASKS 0x00000010
+#define RADEON_UPLOAD_VIEWPORT 0x00000020
+#define RADEON_UPLOAD_SETUP 0x00000040
+#define RADEON_UPLOAD_TCL 0x00000080
+#define RADEON_UPLOAD_MISC 0x00000100
+#define RADEON_UPLOAD_TEX0 0x00000200
+#define RADEON_UPLOAD_TEX1 0x00000400
+#define RADEON_UPLOAD_TEX2 0x00000800
+#define RADEON_UPLOAD_TEX0IMAGES 0x00001000
+#define RADEON_UPLOAD_TEX1IMAGES 0x00002000
+#define RADEON_UPLOAD_TEX2IMAGES 0x00004000
+#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */
+#define RADEON_REQUIRE_QUIESCENCE 0x00010000
+#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */
+#define RADEON_UPLOAD_ALL 0x003effff
+#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff
+
+/* New style per-packet identifiers for use in cmd_buffer ioctl with
+ * the RADEON_EMIT_PACKET command. Comments relate new packets to old
+ * state bits and the packet size:
+ */
+#define RADEON_EMIT_PP_MISC 0 /* context/7 */
+#define RADEON_EMIT_PP_CNTL 1 /* context/3 */
+#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */
+#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */
+#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */
+#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */
+#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */
+#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */
+#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */
+#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */
+#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */
+#define RADEON_EMIT_RE_MISC 11 /* misc/1 */
+#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */
+#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */
+#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */
+#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */
+#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */
+#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */
+#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */
+#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */
+#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */
+#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */
+#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */
+#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */
+#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */
+#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */
+#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */
+#define R200_EMIT_TFACTOR_0 30 /* tf/7 */
+#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */
+#define R200_EMIT_VAP_CTL 32 /* vap/1 */
+#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */
+#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */
+#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */
+#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */
+#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */
+#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */
+#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */
+#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */
+#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */
+#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */
+#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */
+#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */
+#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */
+#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */
+#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */
+#define R200_EMIT_VTE_CNTL 48 /* vte/1 */
+#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */
+#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */
+#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */
+#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */
+#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */
+#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */
+#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */
+#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */
+#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */
+#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */
+#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */
+#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */
+#define R200_EMIT_PP_CUBIC_FACES_0 61
+#define R200_EMIT_PP_CUBIC_OFFSETS_0 62
+#define R200_EMIT_PP_CUBIC_FACES_1 63
+#define R200_EMIT_PP_CUBIC_OFFSETS_1 64
+#define R200_EMIT_PP_CUBIC_FACES_2 65
+#define R200_EMIT_PP_CUBIC_OFFSETS_2 66
+#define R200_EMIT_PP_CUBIC_FACES_3 67
+#define R200_EMIT_PP_CUBIC_OFFSETS_3 68
+#define R200_EMIT_PP_CUBIC_FACES_4 69
+#define R200_EMIT_PP_CUBIC_OFFSETS_4 70
+#define R200_EMIT_PP_CUBIC_FACES_5 71
+#define R200_EMIT_PP_CUBIC_OFFSETS_5 72
+#define RADEON_EMIT_PP_TEX_SIZE_0 73
+#define RADEON_EMIT_PP_TEX_SIZE_1 74
+#define RADEON_EMIT_PP_TEX_SIZE_2 75
+#define R200_EMIT_RB3D_BLENDCOLOR 76
+#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77
+#define RADEON_EMIT_PP_CUBIC_FACES_0 78
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79
+#define RADEON_EMIT_PP_CUBIC_FACES_1 80
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81
+#define RADEON_EMIT_PP_CUBIC_FACES_2 82
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83
+#define R200_EMIT_PP_TRI_PERF_CNTL 84
+#define R200_EMIT_PP_AFS_0 85
+#define R200_EMIT_PP_AFS_1 86
+#define R200_EMIT_ATF_TFACTOR 87
+#define R200_EMIT_PP_TXCTLALL_0 88
+#define R200_EMIT_PP_TXCTLALL_1 89
+#define R200_EMIT_PP_TXCTLALL_2 90
+#define R200_EMIT_PP_TXCTLALL_3 91
+#define R200_EMIT_PP_TXCTLALL_4 92
+#define R200_EMIT_PP_TXCTLALL_5 93
+#define R200_EMIT_VAP_PVS_CNTL 94
+#define RADEON_MAX_STATE_PACKETS 95
+
+/* Commands understood by cmd_buffer ioctl. More can be added but
+ * obviously these can't be removed or changed:
+ */
+#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */
+#define RADEON_CMD_SCALARS 2 /* emit scalar data */
+#define RADEON_CMD_VECTORS 3 /* emit vector data */
+#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */
+#define RADEON_CMD_PACKET3 5 /* emit hw packet */
+#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */
+#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */
+#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note:
+ * doesn't make the cpu wait, just
+ * the graphics hardware */
+#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */
+
+typedef union {
+ int i;
+ struct {
+ unsigned char cmd_type, pad0, pad1, pad2;
+ } header;
+ struct {
+ unsigned char cmd_type, packet_id, pad0, pad1;
+ } packet;
+ struct {
+ unsigned char cmd_type, offset, stride, count;
+ } scalars;
+ struct {
+ unsigned char cmd_type, offset, stride, count;
+ } vectors;
+ struct {
+ unsigned char cmd_type, addr_lo, addr_hi, count;
+ } veclinear;
+ struct {
+ unsigned char cmd_type, buf_idx, pad0, pad1;
+ } dma;
+ struct {
+ unsigned char cmd_type, flags, pad0, pad1;
+ } wait;
+} drm_radeon_cmd_header_t;
+
+#define RADEON_WAIT_2D 0x1
+#define RADEON_WAIT_3D 0x2
+
+/* Allowed parameters for R300_CMD_PACKET3
+ */
+#define R300_CMD_PACKET3_CLEAR 0
+#define R300_CMD_PACKET3_RAW 1
+
+/* Commands understood by cmd_buffer ioctl for R300.
+ * The interface has not been stabilized, so some of these may be removed
+ * and eventually reordered before stabilization.
+ */
+#define R300_CMD_PACKET0 1
+#define R300_CMD_VPU 2 /* emit vertex program upload */
+#define R300_CMD_PACKET3 3 /* emit a packet3 */
+#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
+#define R300_CMD_CP_DELAY 5
+#define R300_CMD_DMA_DISCARD 6
+#define R300_CMD_WAIT 7
+# define R300_WAIT_2D 0x1
+# define R300_WAIT_3D 0x2
+/* these two defines are DOING IT WRONG - however
+ * we have userspace which relies on using these.
+ * The wait interface is backwards compat new
+ * code should use the NEW_WAIT defines below
+ * THESE ARE NOT BIT FIELDS
+ */
+# define R300_WAIT_2D_CLEAN 0x3
+# define R300_WAIT_3D_CLEAN 0x4
+
+# define R300_NEW_WAIT_2D_3D 0x3
+# define R300_NEW_WAIT_2D_2D_CLEAN 0x4
+# define R300_NEW_WAIT_3D_3D_CLEAN 0x6
+# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8
+
+#define R300_CMD_SCRATCH 8
+#define R300_CMD_R500FP 9
+
+typedef union {
+ unsigned int u;
+ struct {
+ unsigned char cmd_type, pad0, pad1, pad2;
+ } header;
+ struct {
+ unsigned char cmd_type, count, reglo, reghi;
+ } packet0;
+ struct {
+ unsigned char cmd_type, count, adrlo, adrhi;
+ } vpu;
+ struct {
+ unsigned char cmd_type, packet, pad0, pad1;
+ } packet3;
+ struct {
+ unsigned char cmd_type, packet;
+ unsigned short count; /* amount of packet2 to emit */
+ } delay;
+ struct {
+ unsigned char cmd_type, buf_idx, pad0, pad1;
+ } dma;
+ struct {
+ unsigned char cmd_type, flags, pad0, pad1;
+ } wait;
+ struct {
+ unsigned char cmd_type, reg, n_bufs, flags;
+ } scratch;
+ struct {
+ unsigned char cmd_type, count, adrlo, adrhi_flags;
+ } r500fp;
+} drm_r300_cmd_header_t;
+
+#define RADEON_FRONT 0x1
+#define RADEON_BACK 0x2
+#define RADEON_DEPTH 0x4
+#define RADEON_STENCIL 0x8
+#define RADEON_CLEAR_FASTZ 0x80000000
+#define RADEON_USE_HIERZ 0x40000000
+#define RADEON_USE_COMP_ZBUF 0x20000000
+
+#define R500FP_CONSTANT_TYPE (1 << 1)
+#define R500FP_CONSTANT_CLAMP (1 << 2)
+
+/* Primitive types
+ */
+#define RADEON_POINTS 0x1
+#define RADEON_LINES 0x2
+#define RADEON_LINE_STRIP 0x3
+#define RADEON_TRIANGLES 0x4
+#define RADEON_TRIANGLE_FAN 0x5
+#define RADEON_TRIANGLE_STRIP 0x6
+
+/* Vertex/indirect buffer size
+ */
+#define RADEON_BUFFER_SIZE 65536
+
+/* Byte offsets for indirect buffer data
+ */
+#define RADEON_INDEX_PRIM_OFFSET 20
+
+#define RADEON_SCRATCH_REG_OFFSET 32
+
+#define R600_SCRATCH_REG_OFFSET 256
+
+#define RADEON_NR_SAREA_CLIPRECTS 12
+
+/* There are 2 heaps (local/GART). Each region within a heap is a
+ * minimum of 64k, and there are at most 64 of them per heap.
+ */
+#define RADEON_LOCAL_TEX_HEAP 0
+#define RADEON_GART_TEX_HEAP 1
+#define RADEON_NR_TEX_HEAPS 2
+#define RADEON_NR_TEX_REGIONS 64
+#define RADEON_LOG_TEX_GRANULARITY 16
+
+#define RADEON_MAX_TEXTURE_LEVELS 12
+#define RADEON_MAX_TEXTURE_UNITS 3
+
+#define RADEON_MAX_SURFACES 8
+
+/* Blits have strict offset rules. All blit offset must be aligned on
+ * a 1K-byte boundary.
+ */
+#define RADEON_OFFSET_SHIFT 10
+#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT)
+#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1)
+
+#endif /* __RADEON_SAREA_DEFINES__ */
+
+typedef struct {
+ unsigned int red;
+ unsigned int green;
+ unsigned int blue;
+ unsigned int alpha;
+} radeon_color_regs_t;
+
+typedef struct {
+ /* Context state */
+ unsigned int pp_misc; /* 0x1c14 */
+ unsigned int pp_fog_color;
+ unsigned int re_solid_color;
+ unsigned int rb3d_blendcntl;
+ unsigned int rb3d_depthoffset;
+ unsigned int rb3d_depthpitch;
+ unsigned int rb3d_zstencilcntl;
+
+ unsigned int pp_cntl; /* 0x1c38 */
+ unsigned int rb3d_cntl;
+ unsigned int rb3d_coloroffset;
+ unsigned int re_width_height;
+ unsigned int rb3d_colorpitch;
+ unsigned int se_cntl;
+
+ /* Vertex format state */
+ unsigned int se_coord_fmt; /* 0x1c50 */
+
+ /* Line state */
+ unsigned int re_line_pattern; /* 0x1cd0 */
+ unsigned int re_line_state;
+
+ unsigned int se_line_width; /* 0x1db8 */
+
+ /* Bumpmap state */
+ unsigned int pp_lum_matrix; /* 0x1d00 */
+
+ unsigned int pp_rot_matrix_0; /* 0x1d58 */
+ unsigned int pp_rot_matrix_1;
+
+ /* Mask state */
+ unsigned int rb3d_stencilrefmask; /* 0x1d7c */
+ unsigned int rb3d_ropcntl;
+ unsigned int rb3d_planemask;
+
+ /* Viewport state */
+ unsigned int se_vport_xscale; /* 0x1d98 */
+ unsigned int se_vport_xoffset;
+ unsigned int se_vport_yscale;
+ unsigned int se_vport_yoffset;
+ unsigned int se_vport_zscale;
+ unsigned int se_vport_zoffset;
+
+ /* Setup state */
+ unsigned int se_cntl_status; /* 0x2140 */
+
+ /* Misc state */
+ unsigned int re_top_left; /* 0x26c0 */
+ unsigned int re_misc;
+} drm_radeon_context_regs_t;
+
+typedef struct {
+ /* Zbias state */
+ unsigned int se_zbias_factor; /* 0x1dac */
+ unsigned int se_zbias_constant;
+} drm_radeon_context2_regs_t;
+
+/* Setup registers for each texture unit
+ */
+typedef struct {
+ unsigned int pp_txfilter;
+ unsigned int pp_txformat;
+ unsigned int pp_txoffset;
+ unsigned int pp_txcblend;
+ unsigned int pp_txablend;
+ unsigned int pp_tfactor;
+ unsigned int pp_border_color;
+} drm_radeon_texture_regs_t;
+
+typedef struct {
+ unsigned int start;
+ unsigned int finish;
+ unsigned int prim:8;
+ unsigned int stateidx:8;
+ unsigned int numverts:16; /* overloaded as offset/64 for elt prims */
+ unsigned int vc_format; /* vertex format */
+} drm_radeon_prim_t;
+
+typedef struct {
+ drm_radeon_context_regs_t context;
+ drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
+ drm_radeon_context2_regs_t context2;
+ unsigned int dirty;
+} drm_radeon_state_t;
+
+typedef struct {
+ /* The channel for communication of state information to the
+ * kernel on firing a vertex buffer with either of the
+ * obsoleted vertex/index ioctls.
+ */
+ drm_radeon_context_regs_t context_state;
+ drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
+ unsigned int dirty;
+ unsigned int vertsize;
+ unsigned int vc_format;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Counters for client-side throttling of rendering clients.
+ */
+ unsigned int last_frame;
+ unsigned int last_dispatch;
+ unsigned int last_clear;
+
+ struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
+ 1];
+ unsigned int tex_age[RADEON_NR_TEX_HEAPS];
+ int ctx_owner;
+ int pfState; /* number of 3d windows (0,1,2ormore) */
+ int pfCurrentPage; /* which buffer is being displayed? */
+ int crtc2_base; /* CRTC2 frame offset */
+ int tiling_enabled; /* set by drm, read by 2d + 3d clients */
+} drm_radeon_sarea_t;
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmRadeon.h)
+ *
+ * KW: actually it's illegal to change any of this (backwards compatibility).
+ */
+
+/* Radeon specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_RADEON_CP_INIT 0x00
+#define DRM_RADEON_CP_START 0x01
+#define DRM_RADEON_CP_STOP 0x02
+#define DRM_RADEON_CP_RESET 0x03
+#define DRM_RADEON_CP_IDLE 0x04
+#define DRM_RADEON_RESET 0x05
+#define DRM_RADEON_FULLSCREEN 0x06
+#define DRM_RADEON_SWAP 0x07
+#define DRM_RADEON_CLEAR 0x08
+#define DRM_RADEON_VERTEX 0x09
+#define DRM_RADEON_INDICES 0x0A
+#define DRM_RADEON_NOT_USED
+#define DRM_RADEON_STIPPLE 0x0C
+#define DRM_RADEON_INDIRECT 0x0D
+#define DRM_RADEON_TEXTURE 0x0E
+#define DRM_RADEON_VERTEX2 0x0F
+#define DRM_RADEON_CMDBUF 0x10
+#define DRM_RADEON_GETPARAM 0x11
+#define DRM_RADEON_FLIP 0x12
+#define DRM_RADEON_ALLOC 0x13
+#define DRM_RADEON_FREE 0x14
+#define DRM_RADEON_INIT_HEAP 0x15
+#define DRM_RADEON_IRQ_EMIT 0x16
+#define DRM_RADEON_IRQ_WAIT 0x17
+#define DRM_RADEON_CP_RESUME 0x18
+#define DRM_RADEON_SETPARAM 0x19
+#define DRM_RADEON_SURF_ALLOC 0x1a
+#define DRM_RADEON_SURF_FREE 0x1b
+/* KMS ioctl */
+#define DRM_RADEON_GEM_INFO 0x1c
+#define DRM_RADEON_GEM_CREATE 0x1d
+#define DRM_RADEON_GEM_MMAP 0x1e
+#define DRM_RADEON_GEM_PREAD 0x21
+#define DRM_RADEON_GEM_PWRITE 0x22
+#define DRM_RADEON_GEM_SET_DOMAIN 0x23
+#define DRM_RADEON_GEM_WAIT_IDLE 0x24
+#define DRM_RADEON_CS 0x26
+#define DRM_RADEON_INFO 0x27
+#define DRM_RADEON_GEM_SET_TILING 0x28
+#define DRM_RADEON_GEM_GET_TILING 0x29
+#define DRM_RADEON_GEM_BUSY 0x2a
+#define DRM_RADEON_GEM_VA 0x2b
+#define DRM_RADEON_GEM_OP 0x2c
+#define DRM_RADEON_GEM_USERPTR 0x2d
+
+#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
+#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
+#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t)
+#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET)
+#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE)
+#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET)
+#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t)
+#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP)
+#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t)
+#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t)
+#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t)
+#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t)
+#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t)
+#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t)
+#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t)
+#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t)
+#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t)
+#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP)
+#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t)
+#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t)
+#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t)
+#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t)
+#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
+#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
+#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
+#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
+#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
+/* KMS */
+#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info)
+#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create)
+#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap)
+#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread)
+#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite)
+#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
+#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
+#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
+#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
+#define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
+#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
+#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
+#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
+#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op)
+#define DRM_IOCTL_RADEON_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_USERPTR, struct drm_radeon_gem_userptr)
+
+typedef struct drm_radeon_init {
+ enum {
+ RADEON_INIT_CP = 0x01,
+ RADEON_CLEANUP_CP = 0x02,
+ RADEON_INIT_R200_CP = 0x03,
+ RADEON_INIT_R300_CP = 0x04,
+ RADEON_INIT_R600_CP = 0x05
+ } func;
+ unsigned long sarea_priv_offset;
+ int is_pci;
+ int cp_mode;
+ int gart_size;
+ int ring_size;
+ int usec_timeout;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long ring_offset;
+ unsigned long ring_rptr_offset;
+ unsigned long buffers_offset;
+ unsigned long gart_textures_offset;
+} drm_radeon_init_t;
+
+typedef struct drm_radeon_cp_stop {
+ int flush;
+ int idle;
+} drm_radeon_cp_stop_t;
+
+typedef struct drm_radeon_fullscreen {
+ enum {
+ RADEON_INIT_FULLSCREEN = 0x01,
+ RADEON_CLEANUP_FULLSCREEN = 0x02
+ } func;
+} drm_radeon_fullscreen_t;
+
+#define CLEAR_X1 0
+#define CLEAR_Y1 1
+#define CLEAR_X2 2
+#define CLEAR_Y2 3
+#define CLEAR_DEPTH 4
+
+typedef union drm_radeon_clear_rect {
+ float f[5];
+ unsigned int ui[5];
+} drm_radeon_clear_rect_t;
+
+typedef struct drm_radeon_clear {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask; /* misnamed field: should be stencil */
+ drm_radeon_clear_rect_t *depth_boxes;
+} drm_radeon_clear_t;
+
+typedef struct drm_radeon_vertex {
+ int prim;
+ int idx; /* Index of vertex buffer */
+ int count; /* Number of vertices in buffer */
+ int discard; /* Client finished with buffer? */
+} drm_radeon_vertex_t;
+
+typedef struct drm_radeon_indices {
+ int prim;
+ int idx;
+ int start;
+ int end;
+ int discard; /* Client finished with buffer? */
+} drm_radeon_indices_t;
+
+/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices
+ * - allows multiple primitives and state changes in a single ioctl
+ * - supports driver change to emit native primitives
+ */
+typedef struct drm_radeon_vertex2 {
+ int idx; /* Index of vertex buffer */
+ int discard; /* Client finished with buffer? */
+ int nr_states;
+ drm_radeon_state_t *state;
+ int nr_prims;
+ drm_radeon_prim_t *prim;
+} drm_radeon_vertex2_t;
+
+/* v1.3 - obsoletes drm_radeon_vertex2
+ * - allows arbitrarily large cliprect list
+ * - allows updating of tcl packet, vector and scalar state
+ * - allows memory-efficient description of state updates
+ * - allows state to be emitted without a primitive
+ * (for clears, ctx switches)
+ * - allows more than one dma buffer to be referenced per ioctl
+ * - supports tcl driver
+ * - may be extended in future versions with new cmd types, packets
+ */
+typedef struct drm_radeon_cmd_buffer {
+ int bufsz;
+ char *buf;
+ int nbox;
+ struct drm_clip_rect *boxes;
+} drm_radeon_cmd_buffer_t;
+
+typedef struct drm_radeon_tex_image {
+ unsigned int x, y; /* Blit coordinates */
+ unsigned int width, height;
+ const void *data;
+} drm_radeon_tex_image_t;
+
+typedef struct drm_radeon_texture {
+ unsigned int offset;
+ int pitch;
+ int format;
+ int width; /* Texture image coordinates */
+ int height;
+ drm_radeon_tex_image_t *image;
+} drm_radeon_texture_t;
+
+typedef struct drm_radeon_stipple {
+ unsigned int *mask;
+} drm_radeon_stipple_t;
+
+typedef struct drm_radeon_indirect {
+ int idx;
+ int start;
+ int end;
+ int discard;
+} drm_radeon_indirect_t;
+
+/* enum for card type parameters */
+#define RADEON_CARD_PCI 0
+#define RADEON_CARD_AGP 1
+#define RADEON_CARD_PCIE 2
+
+/* 1.3: An ioctl to get parameters that aren't available to the 3d
+ * client any other way.
+ */
+#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */
+#define RADEON_PARAM_LAST_FRAME 2
+#define RADEON_PARAM_LAST_DISPATCH 3
+#define RADEON_PARAM_LAST_CLEAR 4
+/* Added with DRM version 1.6. */
+#define RADEON_PARAM_IRQ_NR 5
+#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */
+/* Added with DRM version 1.8. */
+#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */
+#define RADEON_PARAM_STATUS_HANDLE 8
+#define RADEON_PARAM_SAREA_HANDLE 9
+#define RADEON_PARAM_GART_TEX_HANDLE 10
+#define RADEON_PARAM_SCRATCH_OFFSET 11
+#define RADEON_PARAM_CARD_TYPE 12
+#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
+#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
+#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
+#define RADEON_PARAM_DEVICE_ID 16
+#define RADEON_PARAM_NUM_Z_PIPES 17 /* num Z pipes */
+
+typedef struct drm_radeon_getparam {
+ int param;
+ void *value;
+} drm_radeon_getparam_t;
+
+/* 1.6: Set up a memory manager for regions of shared memory:
+ */
+#define RADEON_MEM_REGION_GART 1
+#define RADEON_MEM_REGION_FB 2
+
+typedef struct drm_radeon_mem_alloc {
+ int region;
+ int alignment;
+ int size;
+ int *region_offset; /* offset from start of fb or GART */
+} drm_radeon_mem_alloc_t;
+
+typedef struct drm_radeon_mem_free {
+ int region;
+ int region_offset;
+} drm_radeon_mem_free_t;
+
+typedef struct drm_radeon_mem_init_heap {
+ int region;
+ int size;
+ int start;
+} drm_radeon_mem_init_heap_t;
+
+/* 1.6: Userspace can request & wait on irq's:
+ */
+typedef struct drm_radeon_irq_emit {
+ int *irq_seq;
+} drm_radeon_irq_emit_t;
+
+typedef struct drm_radeon_irq_wait {
+ int irq_seq;
+} drm_radeon_irq_wait_t;
+
+/* 1.10: Clients tell the DRM where they think the framebuffer is located in
+ * the card's address space, via a new generic ioctl to set parameters
+ */
+
+typedef struct drm_radeon_setparam {
+ unsigned int param;
+ __s64 value;
+} drm_radeon_setparam_t;
+
+#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
+#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
+#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
+#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
+#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
+#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
+/* 1.14: Clients can allocate/free a surface
+ */
+typedef struct drm_radeon_surface_alloc {
+ unsigned int address;
+ unsigned int size;
+ unsigned int flags;
+} drm_radeon_surface_alloc_t;
+
+typedef struct drm_radeon_surface_free {
+ unsigned int address;
+} drm_radeon_surface_free_t;
+
+#define DRM_RADEON_VBLANK_CRTC1 1
+#define DRM_RADEON_VBLANK_CRTC2 2
+
+/*
+ * Kernel modesetting world below.
+ */
+#define RADEON_GEM_DOMAIN_CPU 0x1
+#define RADEON_GEM_DOMAIN_GTT 0x2
+#define RADEON_GEM_DOMAIN_VRAM 0x4
+
+struct drm_radeon_gem_info {
+ __u64 gart_size;
+ __u64 vram_size;
+ __u64 vram_visible;
+};
+
+#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
+#define RADEON_GEM_GTT_UC (1 << 1)
+#define RADEON_GEM_GTT_WC (1 << 2)
+/* BO is expected to be accessed by the CPU */
+#define RADEON_GEM_CPU_ACCESS (1 << 3)
+/* CPU access is not expected to work for this BO */
+#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
+
+struct drm_radeon_gem_create {
+ __u64 size;
+ __u64 alignment;
+ __u32 handle;
+ __u32 initial_domain;
+ __u32 flags;
+};
+
+/*
+ * This is not a reliable API and you should expect it to fail for any
+ * number of reasons and have fallback path that do not use userptr to
+ * perform any operation.
+ */
+#define RADEON_GEM_USERPTR_READONLY (1 << 0)
+#define RADEON_GEM_USERPTR_ANONONLY (1 << 1)
+#define RADEON_GEM_USERPTR_VALIDATE (1 << 2)
+#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
+
+struct drm_radeon_gem_userptr {
+ __u64 addr;
+ __u64 size;
+ __u32 flags;
+ __u32 handle;
+};
+
+#define RADEON_TILING_MACRO 0x1
+#define RADEON_TILING_MICRO 0x2
+#define RADEON_TILING_SWAP_16BIT 0x4
+#define RADEON_TILING_R600_NO_SCANOUT RADEON_TILING_SWAP_16BIT
+#define RADEON_TILING_SWAP_32BIT 0x8
+/* this object requires a surface when mapped - i.e. front buffer */
+#define RADEON_TILING_SURFACE 0x10
+#define RADEON_TILING_MICRO_SQUARE 0x20
+#define RADEON_TILING_EG_BANKW_SHIFT 8
+#define RADEON_TILING_EG_BANKW_MASK 0xf
+#define RADEON_TILING_EG_BANKH_SHIFT 12
+#define RADEON_TILING_EG_BANKH_MASK 0xf
+#define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16
+#define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf
+#define RADEON_TILING_EG_TILE_SPLIT_SHIFT 24
+#define RADEON_TILING_EG_TILE_SPLIT_MASK 0xf
+#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28
+#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf
+
+struct drm_radeon_gem_set_tiling {
+ __u32 handle;
+ __u32 tiling_flags;
+ __u32 pitch;
+};
+
+struct drm_radeon_gem_get_tiling {
+ __u32 handle;
+ __u32 tiling_flags;
+ __u32 pitch;
+};
+
+struct drm_radeon_gem_mmap {
+ __u32 handle;
+ __u32 pad;
+ __u64 offset;
+ __u64 size;
+ __u64 addr_ptr;
+};
+
+struct drm_radeon_gem_set_domain {
+ __u32 handle;
+ __u32 read_domains;
+ __u32 write_domain;
+};
+
+struct drm_radeon_gem_wait_idle {
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_radeon_gem_busy {
+ __u32 handle;
+ __u32 domain;
+};
+
+struct drm_radeon_gem_pread {
+ /** Handle for the object being read. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset into the object to read from */
+ __u64 offset;
+ /** Length of data to read */
+ __u64 size;
+ /** Pointer to write the data into. */
+ /* void *, but pointers are not 32/64 compatible */
+ __u64 data_ptr;
+};
+
+struct drm_radeon_gem_pwrite {
+ /** Handle for the object being written to. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset into the object to write to */
+ __u64 offset;
+ /** Length of data to write */
+ __u64 size;
+ /** Pointer to read the data from. */
+ /* void *, but pointers are not 32/64 compatible */
+ __u64 data_ptr;
+};
+
+/* Sets or returns a value associated with a buffer. */
+struct drm_radeon_gem_op {
+ __u32 handle; /* buffer */
+ __u32 op; /* RADEON_GEM_OP_* */
+ __u64 value; /* input or return value */
+};
+
+#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0
+#define RADEON_GEM_OP_SET_INITIAL_DOMAIN 1
+
+#define RADEON_VA_MAP 1
+#define RADEON_VA_UNMAP 2
+
+#define RADEON_VA_RESULT_OK 0
+#define RADEON_VA_RESULT_ERROR 1
+#define RADEON_VA_RESULT_VA_EXIST 2
+
+#define RADEON_VM_PAGE_VALID (1 << 0)
+#define RADEON_VM_PAGE_READABLE (1 << 1)
+#define RADEON_VM_PAGE_WRITEABLE (1 << 2)
+#define RADEON_VM_PAGE_SYSTEM (1 << 3)
+#define RADEON_VM_PAGE_SNOOPED (1 << 4)
+
+struct drm_radeon_gem_va {
+ __u32 handle;
+ __u32 operation;
+ __u32 vm_id;
+ __u32 flags;
+ __u64 offset;
+};
+
+#define RADEON_CHUNK_ID_RELOCS 0x01
+#define RADEON_CHUNK_ID_IB 0x02
+#define RADEON_CHUNK_ID_FLAGS 0x03
+#define RADEON_CHUNK_ID_CONST_IB 0x04
+
+/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
+#define RADEON_CS_KEEP_TILING_FLAGS 0x01
+#define RADEON_CS_USE_VM 0x02
+#define RADEON_CS_END_OF_FRAME 0x04 /* a hint from userspace which CS is the last one */
+/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
+#define RADEON_CS_RING_GFX 0
+#define RADEON_CS_RING_COMPUTE 1
+#define RADEON_CS_RING_DMA 2
+#define RADEON_CS_RING_UVD 3
+#define RADEON_CS_RING_VCE 4
+/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
+/* 0 = normal, + = higher priority, - = lower priority */
+
+struct drm_radeon_cs_chunk {
+ __u32 chunk_id;
+ __u32 length_dw;
+ __u64 chunk_data;
+};
+
+/* drm_radeon_cs_reloc.flags */
+#define RADEON_RELOC_PRIO_MASK (0xf << 0)
+
+struct drm_radeon_cs_reloc {
+ __u32 handle;
+ __u32 read_domains;
+ __u32 write_domain;
+ __u32 flags;
+};
+
+struct drm_radeon_cs {
+ __u32 num_chunks;
+ __u32 cs_id;
+ /* this points to __u64 * which point to cs chunks */
+ __u64 chunks;
+ /* updates to the limits after this CS ioctl */
+ __u64 gart_limit;
+ __u64 vram_limit;
+};
+
+#define RADEON_INFO_DEVICE_ID 0x00
+#define RADEON_INFO_NUM_GB_PIPES 0x01
+#define RADEON_INFO_NUM_Z_PIPES 0x02
+#define RADEON_INFO_ACCEL_WORKING 0x03
+#define RADEON_INFO_CRTC_FROM_ID 0x04
+#define RADEON_INFO_ACCEL_WORKING2 0x05
+#define RADEON_INFO_TILING_CONFIG 0x06
+#define RADEON_INFO_WANT_HYPERZ 0x07
+#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */
+#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */
+#define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */
+#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */
+#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */
+#define RADEON_INFO_BACKEND_MAP 0x0d /* pipe to backend map, needed by mesa */
+/* virtual address start, va < start are reserved by the kernel */
+#define RADEON_INFO_VA_START 0x0e
+/* maximum size of ib using the virtual memory cs */
+#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
+/* max pipes - needed for compute shaders */
+#define RADEON_INFO_MAX_PIPES 0x10
+/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
+#define RADEON_INFO_TIMESTAMP 0x11
+/* max shader engines (SE) - needed for geometry shaders, etc. */
+#define RADEON_INFO_MAX_SE 0x12
+/* max SH per SE */
+#define RADEON_INFO_MAX_SH_PER_SE 0x13
+/* fast fb access is enabled */
+#define RADEON_INFO_FASTFB_WORKING 0x14
+/* query if a RADEON_CS_RING_* submission is supported */
+#define RADEON_INFO_RING_WORKING 0x15
+/* SI tile mode array */
+#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
+/* query if CP DMA is supported on the compute ring */
+#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
+/* CIK macrotile mode array */
+#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
+/* query the number of render backends */
+#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19
+/* max engine clock - needed for OpenCL */
+#define RADEON_INFO_MAX_SCLK 0x1a
+/* version of VCE firmware */
+#define RADEON_INFO_VCE_FW_VERSION 0x1b
+/* version of VCE feedback */
+#define RADEON_INFO_VCE_FB_VERSION 0x1c
+#define RADEON_INFO_NUM_BYTES_MOVED 0x1d
+#define RADEON_INFO_VRAM_USAGE 0x1e
+#define RADEON_INFO_GTT_USAGE 0x1f
+#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
+#define RADEON_INFO_CURRENT_GPU_TEMP 0x21
+#define RADEON_INFO_CURRENT_GPU_SCLK 0x22
+#define RADEON_INFO_CURRENT_GPU_MCLK 0x23
+#define RADEON_INFO_READ_REG 0x24
+#define RADEON_INFO_VA_UNMAP_WORKING 0x25
+#define RADEON_INFO_GPU_RESET_COUNTER 0x26
+
+struct drm_radeon_info {
+ __u32 request;
+ __u32 pad;
+ __u64 value;
+};
+
+/* Those correspond to the tile index to use, this is to explicitly state
+ * the API that is implicitly defined by the tile mode array.
+ */
+#define SI_TILE_MODE_COLOR_LINEAR_ALIGNED 8
+#define SI_TILE_MODE_COLOR_1D 13
+#define SI_TILE_MODE_COLOR_1D_SCANOUT 9
+#define SI_TILE_MODE_COLOR_2D_8BPP 14
+#define SI_TILE_MODE_COLOR_2D_16BPP 15
+#define SI_TILE_MODE_COLOR_2D_32BPP 16
+#define SI_TILE_MODE_COLOR_2D_64BPP 17
+#define SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP 11
+#define SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP 12
+#define SI_TILE_MODE_DEPTH_STENCIL_1D 4
+#define SI_TILE_MODE_DEPTH_STENCIL_2D 0
+#define SI_TILE_MODE_DEPTH_STENCIL_2D_2AA 3
+#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
+#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
+
+#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/libdrm/radeon_surface.h b/include/libdrm/libdrm/radeon_surface.h
new file mode 100644
index 0000000..af7cab6
--- /dev/null
+++ b/include/libdrm/libdrm/radeon_surface.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright © 2011 Red Hat All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <jglisse@redhat.com>
+ */
+#ifndef RADEON_SURFACE_H
+#define RADEON_SURFACE_H
+
+/* Note :
+ *
+ * For texture array, the n layer are stored one after the other within each
+ * mipmap level. 0 value for field than can be hint is always valid.
+ */
+
+#define RADEON_SURF_MAX_LEVEL 32
+
+#define RADEON_SURF_TYPE_MASK 0xFF
+#define RADEON_SURF_TYPE_SHIFT 0
+#define RADEON_SURF_TYPE_1D 0
+#define RADEON_SURF_TYPE_2D 1
+#define RADEON_SURF_TYPE_3D 2
+#define RADEON_SURF_TYPE_CUBEMAP 3
+#define RADEON_SURF_TYPE_1D_ARRAY 4
+#define RADEON_SURF_TYPE_2D_ARRAY 5
+#define RADEON_SURF_MODE_MASK 0xFF
+#define RADEON_SURF_MODE_SHIFT 8
+#define RADEON_SURF_MODE_LINEAR 0
+#define RADEON_SURF_MODE_LINEAR_ALIGNED 1
+#define RADEON_SURF_MODE_1D 2
+#define RADEON_SURF_MODE_2D 3
+#define RADEON_SURF_SCANOUT (1 << 16)
+#define RADEON_SURF_ZBUFFER (1 << 17)
+#define RADEON_SURF_SBUFFER (1 << 18)
+#define RADEON_SURF_Z_OR_SBUFFER (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)
+#define RADEON_SURF_HAS_SBUFFER_MIPTREE (1 << 19)
+#define RADEON_SURF_HAS_TILE_MODE_INDEX (1 << 20)
+#define RADEON_SURF_FMASK (1 << 21)
+
+#define RADEON_SURF_GET(v, field) (((v) >> RADEON_SURF_ ## field ## _SHIFT) & RADEON_SURF_ ## field ## _MASK)
+#define RADEON_SURF_SET(v, field) (((v) & RADEON_SURF_ ## field ## _MASK) << RADEON_SURF_ ## field ## _SHIFT)
+#define RADEON_SURF_CLR(v, field) ((v) & ~(RADEON_SURF_ ## field ## _MASK << RADEON_SURF_ ## field ## _SHIFT))
+
+/* first field up to mode need to match r6 struct so that we can reuse
+ * same function for linear & linear aligned
+ */
+struct radeon_surface_level {
+ uint64_t offset;
+ uint64_t slice_size;
+ uint32_t npix_x;
+ uint32_t npix_y;
+ uint32_t npix_z;
+ uint32_t nblk_x;
+ uint32_t nblk_y;
+ uint32_t nblk_z;
+ uint32_t pitch_bytes;
+ uint32_t mode;
+};
+
+enum si_tiling_mode {
+ SI_TILING_AUTO = 0,
+
+ SI_TILING_COLOR_1D,
+ SI_TILING_COLOR_1D_SCANOUT,
+ SI_TILING_COLOR_2D_8BPP,
+ SI_TILING_COLOR_2D_16BPP,
+ SI_TILING_COLOR_2D_32BPP,
+ SI_TILING_COLOR_2D_64BPP,
+ SI_TILING_COLOR_2D_SCANOUT_16BPP,
+ SI_TILING_COLOR_2D_SCANOUT_32BPP,
+ SI_TILING_COLOR_LINEAR,
+
+ SI_TILING_STENCIL_1D,
+ SI_TILING_STENCIL_2D,
+ SI_TILING_STENCIL_2D_2AA,
+ SI_TILING_STENCIL_2D_4AA,
+ SI_TILING_STENCIL_2D_8AA,
+
+ SI_TILING_DEPTH_1D,
+ SI_TILING_DEPTH_2D,
+ SI_TILING_DEPTH_2D_2AA,
+ SI_TILING_DEPTH_2D_4AA,
+ SI_TILING_DEPTH_2D_8AA,
+
+ SI_TILING_LAST_MODE,
+};
+
+struct radeon_surface {
+ uint32_t npix_x;
+ uint32_t npix_y;
+ uint32_t npix_z;
+ uint32_t blk_w;
+ uint32_t blk_h;
+ uint32_t blk_d;
+ uint32_t array_size;
+ uint32_t last_level;
+ uint32_t bpe;
+ uint32_t nsamples;
+ uint32_t flags;
+ /* Following is updated/fill by the allocator. It's allowed to
+ * set some of the value but they are use as hint and can be
+ * overridden (things lile bankw/bankh on evergreen for
+ * instance).
+ */
+ uint64_t bo_size;
+ uint64_t bo_alignment;
+ /* apply to eg */
+ uint32_t bankw;
+ uint32_t bankh;
+ uint32_t mtilea;
+ uint32_t tile_split;
+ uint32_t stencil_tile_split;
+ uint64_t stencil_offset;
+ struct radeon_surface_level level[RADEON_SURF_MAX_LEVEL];
+ struct radeon_surface_level stencil_level[RADEON_SURF_MAX_LEVEL];
+ uint32_t tiling_index[RADEON_SURF_MAX_LEVEL];
+ uint32_t stencil_tiling_index[RADEON_SURF_MAX_LEVEL];
+};
+
+struct radeon_surface_manager *radeon_surface_manager_new(int fd);
+void radeon_surface_manager_free(struct radeon_surface_manager *surf_man);
+int radeon_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+int radeon_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+
+#endif
diff --git a/include/libdrm/libdrm/savage_drm.h b/include/libdrm/libdrm/savage_drm.h
new file mode 100644
index 0000000..1a91234
--- /dev/null
+++ b/include/libdrm/libdrm/savage_drm.h
@@ -0,0 +1,220 @@
+/* savage_drm.h -- Public header for the savage driver
+ *
+ * Copyright 2004 Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SAVAGE_DRM_H__
+#define __SAVAGE_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef __SAVAGE_SAREA_DEFINES__
+#define __SAVAGE_SAREA_DEFINES__
+
+/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
+ * regions, subject to a minimum region size of (1<<16) == 64k.
+ *
+ * Clients may subdivide regions internally, but when sharing between
+ * clients, the region size is the minimum granularity.
+ */
+
+#define SAVAGE_CARD_HEAP 0
+#define SAVAGE_AGP_HEAP 1
+#define SAVAGE_NR_TEX_HEAPS 2
+#define SAVAGE_NR_TEX_REGIONS 16
+#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
+
+#endif /* __SAVAGE_SAREA_DEFINES__ */
+
+typedef struct _drm_savage_sarea {
+ /* LRU lists for texture memory in agp space and on the card.
+ */
+ struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
+ 1];
+ unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
+
+ /* Mechanism to validate card state.
+ */
+ int ctxOwner;
+} drm_savage_sarea_t, *drm_savage_sarea_ptr;
+
+/* Savage-specific ioctls
+ */
+#define DRM_SAVAGE_BCI_INIT 0x00
+#define DRM_SAVAGE_BCI_CMDBUF 0x01
+#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
+#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
+
+#define DRM_IOCTL_SAVAGE_BCI_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
+#define DRM_IOCTL_SAVAGE_BCI_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
+#define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
+#define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
+
+#define SAVAGE_DMA_PCI 1
+#define SAVAGE_DMA_AGP 3
+typedef struct drm_savage_init {
+ enum {
+ SAVAGE_INIT_BCI = 1,
+ SAVAGE_CLEANUP_BCI = 2
+ } func;
+ unsigned int sarea_priv_offset;
+
+ /* some parameters */
+ unsigned int cob_size;
+ unsigned int bci_threshold_lo, bci_threshold_hi;
+ unsigned int dma_type;
+
+ /* frame buffer layout */
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ /* local textures */
+ unsigned int texture_offset;
+ unsigned int texture_size;
+
+ /* physical locations of non-permanent maps */
+ unsigned long status_offset;
+ unsigned long buffers_offset;
+ unsigned long agp_textures_offset;
+ unsigned long cmd_dma_offset;
+} drm_savage_init_t;
+
+typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
+typedef struct drm_savage_cmdbuf {
+ /* command buffer in client's address space */
+ drm_savage_cmd_header_t *cmd_addr;
+ unsigned int size; /* size of the command buffer in 64bit units */
+
+ unsigned int dma_idx; /* DMA buffer index to use */
+ int discard; /* discard DMA buffer when done */
+ /* vertex buffer in client's address space */
+ unsigned int *vb_addr;
+ unsigned int vb_size; /* size of client vertex buffer in bytes */
+ unsigned int vb_stride; /* stride of vertices in 32bit words */
+ /* boxes in client's address space */
+ struct drm_clip_rect *box_addr;
+ unsigned int nbox; /* number of clipping boxes */
+} drm_savage_cmdbuf_t;
+
+#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
+#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
+#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
+typedef struct drm_savage_event {
+ unsigned int count;
+ unsigned int flags;
+} drm_savage_event_emit_t, drm_savage_event_wait_t;
+
+/* Commands for the cmdbuf ioctl
+ */
+#define SAVAGE_CMD_STATE 0 /* a range of state registers */
+#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
+#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
+#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
+#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
+#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
+#define SAVAGE_CMD_SWAP 6 /* swap buffers */
+
+/* Primitive types
+*/
+#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
+#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
+#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
+#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
+ * shading on s3d */
+
+/* Skip flags (vertex format)
+ */
+#define SAVAGE_SKIP_Z 0x01
+#define SAVAGE_SKIP_W 0x02
+#define SAVAGE_SKIP_C0 0x04
+#define SAVAGE_SKIP_C1 0x08
+#define SAVAGE_SKIP_S0 0x10
+#define SAVAGE_SKIP_T0 0x20
+#define SAVAGE_SKIP_ST0 0x30
+#define SAVAGE_SKIP_S1 0x40
+#define SAVAGE_SKIP_T1 0x80
+#define SAVAGE_SKIP_ST1 0xc0
+#define SAVAGE_SKIP_ALL_S3D 0x3f
+#define SAVAGE_SKIP_ALL_S4 0xff
+
+/* Buffer names for clear command
+ */
+#define SAVAGE_FRONT 0x1
+#define SAVAGE_BACK 0x2
+#define SAVAGE_DEPTH 0x4
+
+/* 64-bit command header
+ */
+union drm_savage_cmd_header {
+ struct {
+ unsigned char cmd; /* command */
+ unsigned char pad0;
+ unsigned short pad1;
+ unsigned short pad2;
+ unsigned short pad3;
+ } cmd; /* generic */
+ struct {
+ unsigned char cmd;
+ unsigned char global; /* need idle engine? */
+ unsigned short count; /* number of consecutive registers */
+ unsigned short start; /* first register */
+ unsigned short pad3;
+ } state; /* SAVAGE_CMD_STATE */
+ struct {
+ unsigned char cmd;
+ unsigned char prim; /* primitive type */
+ unsigned short skip; /* vertex format (skip flags) */
+ unsigned short count; /* number of vertices */
+ unsigned short start; /* first vertex in DMA/vertex buffer */
+ } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
+ struct {
+ unsigned char cmd;
+ unsigned char prim;
+ unsigned short skip;
+ unsigned short count; /* number of indices that follow */
+ unsigned short pad3;
+ } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
+ struct {
+ unsigned char cmd;
+ unsigned char pad0;
+ unsigned short pad1;
+ unsigned int flags;
+ } clear0; /* SAVAGE_CMD_CLEAR */
+ struct {
+ unsigned int mask;
+ unsigned int value;
+ } clear1; /* SAVAGE_CMD_CLEAR data */
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/libdrm/sis_drm.h b/include/libdrm/libdrm/sis_drm.h
new file mode 100644
index 0000000..8e51bb9
--- /dev/null
+++ b/include/libdrm/libdrm/sis_drm.h
@@ -0,0 +1,77 @@
+/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
+/*
+ * Copyright 2005 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __SIS_DRM_H__
+#define __SIS_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* SiS specific ioctls */
+#define NOT_USED_0_3
+#define DRM_SIS_FB_ALLOC 0x04
+#define DRM_SIS_FB_FREE 0x05
+#define NOT_USED_6_12
+#define DRM_SIS_AGP_INIT 0x13
+#define DRM_SIS_AGP_ALLOC 0x14
+#define DRM_SIS_AGP_FREE 0x15
+#define DRM_SIS_FB_INIT 0x16
+
+#define DRM_IOCTL_SIS_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t)
+#define DRM_IOCTL_SIS_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t)
+#define DRM_IOCTL_SIS_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t)
+#define DRM_IOCTL_SIS_AGP_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t)
+#define DRM_IOCTL_SIS_AGP_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t)
+#define DRM_IOCTL_SIS_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t)
+/*
+#define DRM_IOCTL_SIS_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
+#define DRM_IOCTL_SIS_FLIP_INIT DRM_IO( 0x49)
+#define DRM_IOCTL_SIS_FLIP_FINAL DRM_IO( 0x50)
+*/
+
+typedef struct {
+ int context;
+ unsigned int offset;
+ unsigned int size;
+ unsigned long free;
+} drm_sis_mem_t;
+
+typedef struct {
+ unsigned int offset, size;
+} drm_sis_agp_t;
+
+typedef struct {
+ unsigned int offset, size;
+} drm_sis_fb_t;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __SIS_DRM_H__ */
diff --git a/include/libdrm/libdrm/tegra_drm.h b/include/libdrm/libdrm/tegra_drm.h
new file mode 100644
index 0000000..94cfc30
--- /dev/null
+++ b/include/libdrm/libdrm/tegra_drm.h
@@ -0,0 +1,1060 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright (c) 2012-2020 NVIDIA Corporation */
+
+#ifndef _UAPI_TEGRA_DRM_H_
+#define _UAPI_TEGRA_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Tegra DRM legacy UAPI. Only enabled with STAGING */
+
+#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
+#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
+
+/**
+ * struct drm_tegra_gem_create - parameters for the GEM object creation IOCTL
+ */
+struct drm_tegra_gem_create {
+ /**
+ * @size:
+ *
+ * The size, in bytes, of the buffer object to be created.
+ */
+ __u64 size;
+
+ /**
+ * @flags:
+ *
+ * A bitmask of flags that influence the creation of GEM objects:
+ *
+ * DRM_TEGRA_GEM_CREATE_TILED
+ * Use the 16x16 tiling format for this buffer.
+ *
+ * DRM_TEGRA_GEM_CREATE_BOTTOM_UP
+ * The buffer has a bottom-up layout.
+ */
+ __u32 flags;
+
+ /**
+ * @handle:
+ *
+ * The handle of the created GEM object. Set by the kernel upon
+ * successful completion of the IOCTL.
+ */
+ __u32 handle;
+};
+
+/**
+ * struct drm_tegra_gem_mmap - parameters for the GEM mmap IOCTL
+ */
+struct drm_tegra_gem_mmap {
+ /**
+ * @handle:
+ *
+ * Handle of the GEM object to obtain an mmap offset for.
+ */
+ __u32 handle;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
+ __u32 pad;
+
+ /**
+ * @offset:
+ *
+ * The mmap offset for the given GEM object. Set by the kernel upon
+ * successful completion of the IOCTL.
+ */
+ __u64 offset;
+};
+
+/**
+ * struct drm_tegra_syncpt_read - parameters for the read syncpoint IOCTL
+ */
+struct drm_tegra_syncpt_read {
+ /**
+ * @id:
+ *
+ * ID of the syncpoint to read the current value from.
+ */
+ __u32 id;
+
+ /**
+ * @value:
+ *
+ * The current syncpoint value. Set by the kernel upon successful
+ * completion of the IOCTL.
+ */
+ __u32 value;
+};
+
+/**
+ * struct drm_tegra_syncpt_incr - parameters for the increment syncpoint IOCTL
+ */
+struct drm_tegra_syncpt_incr {
+ /**
+ * @id:
+ *
+ * ID of the syncpoint to increment.
+ */
+ __u32 id;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_tegra_syncpt_wait - parameters for the wait syncpoint IOCTL
+ */
+struct drm_tegra_syncpt_wait {
+ /**
+ * @id:
+ *
+ * ID of the syncpoint to wait on.
+ */
+ __u32 id;
+
+ /**
+ * @thresh:
+ *
+ * Threshold value for which to wait.
+ */
+ __u32 thresh;
+
+ /**
+ * @timeout:
+ *
+ * Timeout, in milliseconds, to wait.
+ */
+ __u32 timeout;
+
+ /**
+ * @value:
+ *
+ * The new syncpoint value after the wait. Set by the kernel upon
+ * successful completion of the IOCTL.
+ */
+ __u32 value;
+};
+
+#define DRM_TEGRA_NO_TIMEOUT (0xffffffff)
+
+/**
+ * struct drm_tegra_open_channel - parameters for the open channel IOCTL
+ */
+struct drm_tegra_open_channel {
+ /**
+ * @client:
+ *
+ * The client ID for this channel.
+ */
+ __u32 client;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
+ __u32 pad;
+
+ /**
+ * @context:
+ *
+ * The application context of this channel. Set by the kernel upon
+ * successful completion of the IOCTL. This context needs to be passed
+ * to the DRM_TEGRA_CHANNEL_CLOSE or the DRM_TEGRA_SUBMIT IOCTLs.
+ */
+ __u64 context;
+};
+
+/**
+ * struct drm_tegra_close_channel - parameters for the close channel IOCTL
+ */
+struct drm_tegra_close_channel {
+ /**
+ * @context:
+ *
+ * The application context of this channel. This is obtained from the
+ * DRM_TEGRA_OPEN_CHANNEL IOCTL.
+ */
+ __u64 context;
+};
+
+/**
+ * struct drm_tegra_get_syncpt - parameters for the get syncpoint IOCTL
+ */
+struct drm_tegra_get_syncpt {
+ /**
+ * @context:
+ *
+ * The application context identifying the channel for which to obtain
+ * the syncpoint ID.
+ */
+ __u64 context;
+
+ /**
+ * @index:
+ *
+ * Index of the client syncpoint for which to obtain the ID.
+ */
+ __u32 index;
+
+ /**
+ * @id:
+ *
+ * The ID of the given syncpoint. Set by the kernel upon successful
+ * completion of the IOCTL.
+ */
+ __u32 id;
+};
+
+/**
+ * struct drm_tegra_get_syncpt_base - parameters for the get wait base IOCTL
+ */
+struct drm_tegra_get_syncpt_base {
+ /**
+ * @context:
+ *
+ * The application context identifying for which channel to obtain the
+ * wait base.
+ */
+ __u64 context;
+
+ /**
+ * @syncpt:
+ *
+ * ID of the syncpoint for which to obtain the wait base.
+ */
+ __u32 syncpt;
+
+ /**
+ * @id:
+ *
+ * The ID of the wait base corresponding to the client syncpoint. Set
+ * by the kernel upon successful completion of the IOCTL.
+ */
+ __u32 id;
+};
+
+/**
+ * struct drm_tegra_syncpt - syncpoint increment operation
+ */
+struct drm_tegra_syncpt {
+ /**
+ * @id:
+ *
+ * ID of the syncpoint to operate on.
+ */
+ __u32 id;
+
+ /**
+ * @incrs:
+ *
+ * Number of increments to perform for the syncpoint.
+ */
+ __u32 incrs;
+};
+
+/**
+ * struct drm_tegra_cmdbuf - structure describing a command buffer
+ */
+struct drm_tegra_cmdbuf {
+ /**
+ * @handle:
+ *
+ * Handle to a GEM object containing the command buffer.
+ */
+ __u32 handle;
+
+ /**
+ * @offset:
+ *
+ * Offset, in bytes, into the GEM object identified by @handle at
+ * which the command buffer starts.
+ */
+ __u32 offset;
+
+ /**
+ * @words:
+ *
+ * Number of 32-bit words in this command buffer.
+ */
+ __u32 words;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_tegra_reloc - GEM object relocation structure
+ */
+struct drm_tegra_reloc {
+ struct {
+ /**
+ * @cmdbuf.handle:
+ *
+ * Handle to the GEM object containing the command buffer for
+ * which to perform this GEM object relocation.
+ */
+ __u32 handle;
+
+ /**
+ * @cmdbuf.offset:
+ *
+ * Offset, in bytes, into the command buffer at which to
+ * insert the relocated address.
+ */
+ __u32 offset;
+ } cmdbuf;
+ struct {
+ /**
+ * @target.handle:
+ *
+ * Handle to the GEM object to be relocated.
+ */
+ __u32 handle;
+
+ /**
+ * @target.offset:
+ *
+ * Offset, in bytes, into the target GEM object at which the
+ * relocated data starts.
+ */
+ __u32 offset;
+ } target;
+
+ /**
+ * @shift:
+ *
+ * The number of bits by which to shift relocated addresses.
+ */
+ __u32 shift;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_tegra_waitchk - wait check structure
+ */
+struct drm_tegra_waitchk {
+ /**
+ * @handle:
+ *
+ * Handle to the GEM object containing a command stream on which to
+ * perform the wait check.
+ */
+ __u32 handle;
+
+ /**
+ * @offset:
+ *
+ * Offset, in bytes, of the location in the command stream to perform
+ * the wait check on.
+ */
+ __u32 offset;
+
+ /**
+ * @syncpt:
+ *
+ * ID of the syncpoint to wait check.
+ */
+ __u32 syncpt;
+
+ /**
+ * @thresh:
+ *
+ * Threshold value for which to check.
+ */
+ __u32 thresh;
+};
+
+/**
+ * struct drm_tegra_submit - job submission structure
+ */
+struct drm_tegra_submit {
+ /**
+ * @context:
+ *
+ * The application context identifying the channel to use for the
+ * execution of this job.
+ */
+ __u64 context;
+
+ /**
+ * @num_syncpts:
+ *
+ * The number of syncpoints operated on by this job. This defines the
+ * length of the array pointed to by @syncpts.
+ */
+ __u32 num_syncpts;
+
+ /**
+ * @num_cmdbufs:
+ *
+ * The number of command buffers to execute as part of this job. This
+ * defines the length of the array pointed to by @cmdbufs.
+ */
+ __u32 num_cmdbufs;
+
+ /**
+ * @num_relocs:
+ *
+ * The number of relocations to perform before executing this job.
+ * This defines the length of the array pointed to by @relocs.
+ */
+ __u32 num_relocs;
+
+ /**
+ * @num_waitchks:
+ *
+ * The number of wait checks to perform as part of this job. This
+ * defines the length of the array pointed to by @waitchks.
+ */
+ __u32 num_waitchks;
+
+ /**
+ * @waitchk_mask:
+ *
+ * Bitmask of valid wait checks.
+ */
+ __u32 waitchk_mask;
+
+ /**
+ * @timeout:
+ *
+ * Timeout, in milliseconds, before this job is cancelled.
+ */
+ __u32 timeout;
+
+ /**
+ * @syncpts:
+ *
+ * A pointer to an array of &struct drm_tegra_syncpt structures that
+ * specify the syncpoint operations performed as part of this job.
+ * The number of elements in the array must be equal to the value
+ * given by @num_syncpts.
+ */
+ __u64 syncpts;
+
+ /**
+ * @cmdbufs:
+ *
+ * A pointer to an array of &struct drm_tegra_cmdbuf structures that
+ * define the command buffers to execute as part of this job. The
+ * number of elements in the array must be equal to the value given
+ * by @num_syncpts.
+ */
+ __u64 cmdbufs;
+
+ /**
+ * @relocs:
+ *
+ * A pointer to an array of &struct drm_tegra_reloc structures that
+ * specify the relocations that need to be performed before executing
+ * this job. The number of elements in the array must be equal to the
+ * value given by @num_relocs.
+ */
+ __u64 relocs;
+
+ /**
+ * @waitchks:
+ *
+ * A pointer to an array of &struct drm_tegra_waitchk structures that
+ * specify the wait checks to be performed while executing this job.
+ * The number of elements in the array must be equal to the value
+ * given by @num_waitchks.
+ */
+ __u64 waitchks;
+
+ /**
+ * @fence:
+ *
+ * The threshold of the syncpoint associated with this job after it
+ * has been completed. Set by the kernel upon successful completion of
+ * the IOCTL. This can be used with the DRM_TEGRA_SYNCPT_WAIT IOCTL to
+ * wait for this job to be finished.
+ */
+ __u32 fence;
+
+ /**
+ * @reserved:
+ *
+ * This field is reserved for future use. Must be 0.
+ */
+ __u32 reserved[5];
+};
+
+#define DRM_TEGRA_GEM_TILING_MODE_PITCH 0
+#define DRM_TEGRA_GEM_TILING_MODE_TILED 1
+#define DRM_TEGRA_GEM_TILING_MODE_BLOCK 2
+
+/**
+ * struct drm_tegra_gem_set_tiling - parameters for the set tiling IOCTL
+ */
+struct drm_tegra_gem_set_tiling {
+ /**
+ * @handle:
+ *
+ * Handle to the GEM object for which to set the tiling parameters.
+ */
+ __u32 handle;
+
+ /**
+ * @mode:
+ *
+ * The tiling mode to set. Must be one of:
+ *
+ * DRM_TEGRA_GEM_TILING_MODE_PITCH
+ * pitch linear format
+ *
+ * DRM_TEGRA_GEM_TILING_MODE_TILED
+ * 16x16 tiling format
+ *
+ * DRM_TEGRA_GEM_TILING_MODE_BLOCK
+ * 16Bx2 tiling format
+ */
+ __u32 mode;
+
+ /**
+ * @value:
+ *
+ * The value to set for the tiling mode parameter.
+ */
+ __u32 value;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_tegra_gem_get_tiling - parameters for the get tiling IOCTL
+ */
+struct drm_tegra_gem_get_tiling {
+ /**
+ * @handle:
+ *
+ * Handle to the GEM object for which to query the tiling parameters.
+ */
+ __u32 handle;
+
+ /**
+ * @mode:
+ *
+ * The tiling mode currently associated with the GEM object. Set by
+ * the kernel upon successful completion of the IOCTL.
+ */
+ __u32 mode;
+
+ /**
+ * @value:
+ *
+ * The tiling mode parameter currently associated with the GEM object.
+ * Set by the kernel upon successful completion of the IOCTL.
+ */
+ __u32 value;
+
+ /**
+ * @pad:
+ *
+ * Structure padding that may be used in the future. Must be 0.
+ */
+ __u32 pad;
+};
+
+#define DRM_TEGRA_GEM_BOTTOM_UP (1 << 0)
+#define DRM_TEGRA_GEM_FLAGS (DRM_TEGRA_GEM_BOTTOM_UP)
+
+/**
+ * struct drm_tegra_gem_set_flags - parameters for the set flags IOCTL
+ */
+struct drm_tegra_gem_set_flags {
+ /**
+ * @handle:
+ *
+ * Handle to the GEM object for which to set the flags.
+ */
+ __u32 handle;
+
+ /**
+ * @flags:
+ *
+ * The flags to set for the GEM object.
+ */
+ __u32 flags;
+};
+
+/**
+ * struct drm_tegra_gem_get_flags - parameters for the get flags IOCTL
+ */
+struct drm_tegra_gem_get_flags {
+ /**
+ * @handle:
+ *
+ * Handle to the GEM object for which to query the flags.
+ */
+ __u32 handle;
+
+ /**
+ * @flags:
+ *
+ * The flags currently associated with the GEM object. Set by the
+ * kernel upon successful completion of the IOCTL.
+ */
+ __u32 flags;
+};
+
+#define DRM_TEGRA_GEM_CREATE 0x00
+#define DRM_TEGRA_GEM_MMAP 0x01
+#define DRM_TEGRA_SYNCPT_READ 0x02
+#define DRM_TEGRA_SYNCPT_INCR 0x03
+#define DRM_TEGRA_SYNCPT_WAIT 0x04
+#define DRM_TEGRA_OPEN_CHANNEL 0x05
+#define DRM_TEGRA_CLOSE_CHANNEL 0x06
+#define DRM_TEGRA_GET_SYNCPT 0x07
+#define DRM_TEGRA_SUBMIT 0x08
+#define DRM_TEGRA_GET_SYNCPT_BASE 0x09
+#define DRM_TEGRA_GEM_SET_TILING 0x0a
+#define DRM_TEGRA_GEM_GET_TILING 0x0b
+#define DRM_TEGRA_GEM_SET_FLAGS 0x0c
+#define DRM_TEGRA_GEM_GET_FLAGS 0x0d
+
+#define DRM_IOCTL_TEGRA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_CREATE, struct drm_tegra_gem_create)
+#define DRM_IOCTL_TEGRA_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_MMAP, struct drm_tegra_gem_mmap)
+#define DRM_IOCTL_TEGRA_SYNCPT_READ DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_READ, struct drm_tegra_syncpt_read)
+#define DRM_IOCTL_TEGRA_SYNCPT_INCR DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_INCR, struct drm_tegra_syncpt_incr)
+#define DRM_IOCTL_TEGRA_SYNCPT_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_WAIT, struct drm_tegra_syncpt_wait)
+#define DRM_IOCTL_TEGRA_OPEN_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_OPEN_CHANNEL, struct drm_tegra_open_channel)
+#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_close_channel)
+#define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
+#define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
+#define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base)
+#define DRM_IOCTL_TEGRA_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_TILING, struct drm_tegra_gem_set_tiling)
+#define DRM_IOCTL_TEGRA_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_TILING, struct drm_tegra_gem_get_tiling)
+#define DRM_IOCTL_TEGRA_GEM_SET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_FLAGS, struct drm_tegra_gem_set_flags)
+#define DRM_IOCTL_TEGRA_GEM_GET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_FLAGS, struct drm_tegra_gem_get_flags)
+
+/* New Tegra DRM UAPI */
+
+/*
+ * Reported by the driver in the `capabilities` field.
+ *
+ * DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT: If set, the engine is cache coherent
+ * with regard to the system memory.
+ */
+#define DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT (1 << 0)
+
+struct drm_tegra_channel_open {
+ /**
+ * @host1x_class: [in]
+ *
+ * Host1x class of the engine that will be programmed using this
+ * channel.
+ */
+ __u32 host1x_class;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ /**
+ * @context: [out]
+ *
+ * Opaque identifier corresponding to the opened channel.
+ */
+ __u32 context;
+
+ /**
+ * @version: [out]
+ *
+ * Version of the engine hardware. This can be used by userspace
+ * to determine how the engine needs to be programmed.
+ */
+ __u32 version;
+
+ /**
+ * @capabilities: [out]
+ *
+ * Flags describing the hardware capabilities.
+ */
+ __u32 capabilities;
+ __u32 padding;
+};
+
+struct drm_tegra_channel_close {
+ /**
+ * @context: [in]
+ *
+ * Identifier of the channel to close.
+ */
+ __u32 context;
+ __u32 padding;
+};
+
+/*
+ * Mapping flags that can be used to influence how the mapping is created.
+ *
+ * DRM_TEGRA_CHANNEL_MAP_READ: create mapping that allows HW read access
+ * DRM_TEGRA_CHANNEL_MAP_WRITE: create mapping that allows HW write access
+ */
+#define DRM_TEGRA_CHANNEL_MAP_READ (1 << 0)
+#define DRM_TEGRA_CHANNEL_MAP_WRITE (1 << 1)
+#define DRM_TEGRA_CHANNEL_MAP_READ_WRITE (DRM_TEGRA_CHANNEL_MAP_READ | \
+ DRM_TEGRA_CHANNEL_MAP_WRITE)
+
+struct drm_tegra_channel_map {
+ /**
+ * @context: [in]
+ *
+ * Identifier of the channel to which make memory available for.
+ */
+ __u32 context;
+
+ /**
+ * @handle: [in]
+ *
+ * GEM handle of the memory to map.
+ */
+ __u32 handle;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ /**
+ * @mapping: [out]
+ *
+ * Identifier corresponding to the mapping, to be used for
+ * relocations or unmapping later.
+ */
+ __u32 mapping;
+};
+
+struct drm_tegra_channel_unmap {
+ /**
+ * @context: [in]
+ *
+ * Channel identifier of the channel to unmap memory from.
+ */
+ __u32 context;
+
+ /**
+ * @mapping: [in]
+ *
+ * Mapping identifier of the memory mapping to unmap.
+ */
+ __u32 mapping;
+};
+
+/* Submission */
+
+/**
+ * Specify that bit 39 of the patched-in address should be set to switch
+ * swizzling between Tegra and non-Tegra sector layout on systems that store
+ * surfaces in system memory in non-Tegra sector layout.
+ */
+#define DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT (1 << 0)
+
+struct drm_tegra_submit_buf {
+ /**
+ * @mapping: [in]
+ *
+ * Identifier of the mapping to use in the submission.
+ */
+ __u32 mapping;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ /**
+ * Information for relocation patching.
+ */
+ struct {
+ /**
+ * @target_offset: [in]
+ *
+ * Offset from the start of the mapping of the data whose
+ * address is to be patched into the gather.
+ */
+ __u64 target_offset;
+
+ /**
+ * @gather_offset_words: [in]
+ *
+ * Offset in words from the start of the gather data to
+ * where the address should be patched into.
+ */
+ __u32 gather_offset_words;
+
+ /**
+ * @shift: [in]
+ *
+ * Number of bits the address should be shifted right before
+ * patching in.
+ */
+ __u32 shift;
+ } reloc;
+};
+
+/**
+ * Execute `words` words of Host1x opcodes specified in the `gather_data_ptr`
+ * buffer. Each GATHER_UPTR command uses successive words from the buffer.
+ */
+#define DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR 0
+/**
+ * Wait for a syncpoint to reach a value before continuing with further
+ * commands.
+ */
+#define DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT 1
+/**
+ * Wait for a syncpoint to reach a value before continuing with further
+ * commands. The threshold is calculated relative to the start of the job.
+ */
+#define DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE 2
+
+struct drm_tegra_submit_cmd_gather_uptr {
+ __u32 words;
+ __u32 reserved[3];
+};
+
+struct drm_tegra_submit_cmd_wait_syncpt {
+ __u32 id;
+ __u32 value;
+ __u32 reserved[2];
+};
+
+struct drm_tegra_submit_cmd {
+ /**
+ * @type: [in]
+ *
+ * Command type to execute. One of the DRM_TEGRA_SUBMIT_CMD*
+ * defines.
+ */
+ __u32 type;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ union {
+ struct drm_tegra_submit_cmd_gather_uptr gather_uptr;
+ struct drm_tegra_submit_cmd_wait_syncpt wait_syncpt;
+ __u32 reserved[4];
+ };
+};
+
+struct drm_tegra_submit_syncpt {
+ /**
+ * @id: [in]
+ *
+ * ID of the syncpoint that the job will increment.
+ */
+ __u32 id;
+
+ /**
+ * @flags: [in]
+ *
+ * Flags.
+ */
+ __u32 flags;
+
+ /**
+ * @increments: [in]
+ *
+ * Number of times the job will increment this syncpoint.
+ */
+ __u32 increments;
+
+ /**
+ * @value: [out]
+ *
+ * Value the syncpoint will have once the job has completed all
+ * its specified syncpoint increments.
+ *
+ * Note that the kernel may increment the syncpoint before or after
+ * the job. These increments are not reflected in this field.
+ *
+ * If the job hangs or times out, not all of the increments may
+ * get executed.
+ */
+ __u32 value;
+};
+
+struct drm_tegra_channel_submit {
+ /**
+ * @context: [in]
+ *
+ * Identifier of the channel to submit this job to.
+ */
+ __u32 context;
+
+ /**
+ * @num_bufs: [in]
+ *
+ * Number of elements in the `bufs_ptr` array.
+ */
+ __u32 num_bufs;
+
+ /**
+ * @num_cmds: [in]
+ *
+ * Number of elements in the `cmds_ptr` array.
+ */
+ __u32 num_cmds;
+
+ /**
+ * @gather_data_words: [in]
+ *
+ * Number of 32-bit words in the `gather_data_ptr` array.
+ */
+ __u32 gather_data_words;
+
+ /**
+ * @bufs_ptr: [in]
+ *
+ * Pointer to an array of drm_tegra_submit_buf structures.
+ */
+ __u64 bufs_ptr;
+
+ /**
+ * @cmds_ptr: [in]
+ *
+ * Pointer to an array of drm_tegra_submit_cmd structures.
+ */
+ __u64 cmds_ptr;
+
+ /**
+ * @gather_data_ptr: [in]
+ *
+ * Pointer to an array of Host1x opcodes to be used by GATHER_UPTR
+ * commands.
+ */
+ __u64 gather_data_ptr;
+
+ /**
+ * @syncobj_in: [in]
+ *
+ * Handle for DRM syncobj that will be waited before submission.
+ * Ignored if zero.
+ */
+ __u32 syncobj_in;
+
+ /**
+ * @syncobj_out: [in]
+ *
+ * Handle for DRM syncobj that will have its fence replaced with
+ * the job's completion fence. Ignored if zero.
+ */
+ __u32 syncobj_out;
+
+ /**
+ * @syncpt_incr: [in,out]
+ *
+ * Information about the syncpoint the job will increment.
+ */
+ struct drm_tegra_submit_syncpt syncpt;
+};
+
+struct drm_tegra_syncpoint_allocate {
+ /**
+ * @id: [out]
+ *
+ * ID of allocated syncpoint.
+ */
+ __u32 id;
+ __u32 padding;
+};
+
+struct drm_tegra_syncpoint_free {
+ /**
+ * @id: [in]
+ *
+ * ID of syncpoint to free.
+ */
+ __u32 id;
+ __u32 padding;
+};
+
+struct drm_tegra_syncpoint_wait {
+ /**
+ * @timeout: [in]
+ *
+ * Absolute timestamp at which the wait will time out.
+ */
+ __s64 timeout_ns;
+
+ /**
+ * @id: [in]
+ *
+ * ID of syncpoint to wait on.
+ */
+ __u32 id;
+
+ /**
+ * @threshold: [in]
+ *
+ * Threshold to wait for.
+ */
+ __u32 threshold;
+
+ /**
+ * @value: [out]
+ *
+ * Value of the syncpoint upon wait completion.
+ */
+ __u32 value;
+
+ __u32 padding;
+};
+
+#define DRM_IOCTL_TEGRA_CHANNEL_OPEN DRM_IOWR(DRM_COMMAND_BASE + 0x10, struct drm_tegra_channel_open)
+#define DRM_IOCTL_TEGRA_CHANNEL_CLOSE DRM_IOWR(DRM_COMMAND_BASE + 0x11, struct drm_tegra_channel_close)
+#define DRM_IOCTL_TEGRA_CHANNEL_MAP DRM_IOWR(DRM_COMMAND_BASE + 0x12, struct drm_tegra_channel_map)
+#define DRM_IOCTL_TEGRA_CHANNEL_UNMAP DRM_IOWR(DRM_COMMAND_BASE + 0x13, struct drm_tegra_channel_unmap)
+#define DRM_IOCTL_TEGRA_CHANNEL_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + 0x14, struct drm_tegra_channel_submit)
+
+#define DRM_IOCTL_TEGRA_SYNCPOINT_ALLOCATE DRM_IOWR(DRM_COMMAND_BASE + 0x20, struct drm_tegra_syncpoint_allocate)
+#define DRM_IOCTL_TEGRA_SYNCPOINT_FREE DRM_IOWR(DRM_COMMAND_BASE + 0x21, struct drm_tegra_syncpoint_free)
+#define DRM_IOCTL_TEGRA_SYNCPOINT_WAIT DRM_IOWR(DRM_COMMAND_BASE + 0x22, struct drm_tegra_syncpoint_wait)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/libdrm/vc4_drm.h b/include/libdrm/libdrm/vc4_drm.h
new file mode 100644
index 0000000..31f50de
--- /dev/null
+++ b/include/libdrm/libdrm/vc4_drm.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright © 2014-2015 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _VC4_DRM_H_
+#define _VC4_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_VC4_SUBMIT_CL 0x00
+#define DRM_VC4_WAIT_SEQNO 0x01
+#define DRM_VC4_WAIT_BO 0x02
+#define DRM_VC4_CREATE_BO 0x03
+#define DRM_VC4_MMAP_BO 0x04
+#define DRM_VC4_CREATE_SHADER_BO 0x05
+#define DRM_VC4_GET_HANG_STATE 0x06
+#define DRM_VC4_GET_PARAM 0x07
+#define DRM_VC4_SET_TILING 0x08
+#define DRM_VC4_GET_TILING 0x09
+#define DRM_VC4_LABEL_BO 0x0a
+#define DRM_VC4_GEM_MADVISE 0x0b
+#define DRM_VC4_PERFMON_CREATE 0x0c
+#define DRM_VC4_PERFMON_DESTROY 0x0d
+#define DRM_VC4_PERFMON_GET_VALUES 0x0e
+
+#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
+#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
+#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
+#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
+#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
+#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
+#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
+#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
+#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
+#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
+#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
+#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
+#define DRM_IOCTL_VC4_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_CREATE, struct drm_vc4_perfmon_create)
+#define DRM_IOCTL_VC4_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_DESTROY, struct drm_vc4_perfmon_destroy)
+#define DRM_IOCTL_VC4_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_GET_VALUES, struct drm_vc4_perfmon_get_values)
+
+struct drm_vc4_submit_rcl_surface {
+ __u32 hindex; /* Handle index, or ~0 if not present. */
+ __u32 offset; /* Offset to start of buffer. */
+ /*
+ * Bits for either render config (color_write) or load/store packet.
+ * Bits should all be 0 for MSAA load/stores.
+ */
+ __u16 bits;
+
+#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0)
+ __u16 flags;
+};
+
+/**
+ * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
+ * engine.
+ *
+ * Drivers typically use GPU BOs to store batchbuffers / command lists and
+ * their associated state. However, because the VC4 lacks an MMU, we have to
+ * do validation of memory accesses by the GPU commands. If we were to store
+ * our commands in BOs, we'd need to do uncached readback from them to do the
+ * validation process, which is too expensive. Instead, userspace accumulates
+ * commands and associated state in plain memory, then the kernel copies the
+ * data to its own address space, and then validates and stores it in a GPU
+ * BO.
+ */
+struct drm_vc4_submit_cl {
+ /* Pointer to the binner command list.
+ *
+ * This is the first set of commands executed, which runs the
+ * coordinate shader to determine where primitives land on the screen,
+ * then writes out the state updates and draw calls necessary per tile
+ * to the tile allocation BO.
+ */
+ __u64 bin_cl;
+
+ /* Pointer to the shader records.
+ *
+ * Shader records are the structures read by the hardware that contain
+ * pointers to uniforms, shaders, and vertex attributes. The
+ * reference to the shader record has enough information to determine
+ * how many pointers are necessary (fixed number for shaders/uniforms,
+ * and an attribute count), so those BO indices into bo_handles are
+ * just stored as __u32s before each shader record passed in.
+ */
+ __u64 shader_rec;
+
+ /* Pointer to uniform data and texture handles for the textures
+ * referenced by the shader.
+ *
+ * For each shader state record, there is a set of uniform data in the
+ * order referenced by the record (FS, VS, then CS). Each set of
+ * uniform data has a __u32 index into bo_handles per texture
+ * sample operation, in the order the QPU_W_TMUn_S writes appear in
+ * the program. Following the texture BO handle indices is the actual
+ * uniform data.
+ *
+ * The individual uniform state blocks don't have sizes passed in,
+ * because the kernel has to determine the sizes anyway during shader
+ * code validation.
+ */
+ __u64 uniforms;
+ __u64 bo_handles;
+
+ /* Size in bytes of the binner command list. */
+ __u32 bin_cl_size;
+ /* Size in bytes of the set of shader records. */
+ __u32 shader_rec_size;
+ /* Number of shader records.
+ *
+ * This could just be computed from the contents of shader_records and
+ * the address bits of references to them from the bin CL, but it
+ * keeps the kernel from having to resize some allocations it makes.
+ */
+ __u32 shader_rec_count;
+ /* Size in bytes of the uniform state. */
+ __u32 uniforms_size;
+
+ /* Number of BO handles passed in (size is that times 4). */
+ __u32 bo_handle_count;
+
+ /* RCL setup: */
+ __u16 width;
+ __u16 height;
+ __u8 min_x_tile;
+ __u8 min_y_tile;
+ __u8 max_x_tile;
+ __u8 max_y_tile;
+ struct drm_vc4_submit_rcl_surface color_read;
+ struct drm_vc4_submit_rcl_surface color_write;
+ struct drm_vc4_submit_rcl_surface zs_read;
+ struct drm_vc4_submit_rcl_surface zs_write;
+ struct drm_vc4_submit_rcl_surface msaa_color_write;
+ struct drm_vc4_submit_rcl_surface msaa_zs_write;
+ __u32 clear_color[2];
+ __u32 clear_z;
+ __u8 clear_s;
+
+ __u32 pad:24;
+
+#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
+/* By default, the kernel gets to choose the order that the tiles are
+ * rendered in. If this is set, then the tiles will be rendered in a
+ * raster order, with the right-to-left vs left-to-right and
+ * top-to-bottom vs bottom-to-top dictated by
+ * VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping
+ * blits to be implemented using the 3D engine.
+ */
+#define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1)
+#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2)
+#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3)
+ __u32 flags;
+
+ /* Returned value of the seqno of this render job (for the
+ * wait ioctl).
+ */
+ __u64 seqno;
+
+ /* ID of the perfmon to attach to this job. 0 means no perfmon. */
+ __u32 perfmonid;
+
+ /* Syncobj handle to wait on. If set, processing of this render job
+ * will not start until the syncobj is signaled. 0 means ignore.
+ */
+ __u32 in_sync;
+
+ /* Syncobj handle to export fence to. If set, the fence in the syncobj
+ * will be replaced with a fence that signals upon completion of this
+ * render job. 0 means ignore.
+ */
+ __u32 out_sync;
+
+ __u32 pad2;
+};
+
+/**
+ * struct drm_vc4_wait_seqno - ioctl argument for waiting for
+ * DRM_VC4_SUBMIT_CL completion using its returned seqno.
+ *
+ * timeout_ns is the timeout in nanoseconds, where "0" means "don't
+ * block, just return the status."
+ */
+struct drm_vc4_wait_seqno {
+ __u64 seqno;
+ __u64 timeout_ns;
+};
+
+/**
+ * struct drm_vc4_wait_bo - ioctl argument for waiting for
+ * completion of the last DRM_VC4_SUBMIT_CL on a BO.
+ *
+ * This is useful for cases where multiple processes might be
+ * rendering to a BO and you want to wait for all rendering to be
+ * completed.
+ */
+struct drm_vc4_wait_bo {
+ __u32 handle;
+ __u32 pad;
+ __u64 timeout_ns;
+};
+
+/**
+ * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_vc4_create_bo {
+ __u32 size;
+ __u32 flags;
+ /** Returned GEM handle for the BO. */
+ __u32 handle;
+ __u32 pad;
+};
+
+/**
+ * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
+ *
+ * This doesn't actually perform an mmap. Instead, it returns the
+ * offset you need to use in an mmap on the DRM device node. This
+ * means that tools like valgrind end up knowing about the mapped
+ * memory.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_vc4_mmap_bo {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 flags;
+ /** offset into the drm node to use for subsequent mmap call. */
+ __u64 offset;
+};
+
+/**
+ * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
+ * shader BOs.
+ *
+ * Since allowing a shader to be overwritten while it's also being
+ * executed from would allow privlege escalation, shaders must be
+ * created using this ioctl, and they can't be mmapped later.
+ */
+struct drm_vc4_create_shader_bo {
+ /* Size of the data argument. */
+ __u32 size;
+ /* Flags, currently must be 0. */
+ __u32 flags;
+
+ /* Pointer to the data. */
+ __u64 data;
+
+ /** Returned GEM handle for the BO. */
+ __u32 handle;
+ /* Pad, must be 0. */
+ __u32 pad;
+};
+
+struct drm_vc4_get_hang_state_bo {
+ __u32 handle;
+ __u32 paddr;
+ __u32 size;
+ __u32 pad;
+};
+
+/**
+ * struct drm_vc4_hang_state - ioctl argument for collecting state
+ * from a GPU hang for analysis.
+*/
+struct drm_vc4_get_hang_state {
+ /** Pointer to array of struct drm_vc4_get_hang_state_bo. */
+ __u64 bo;
+ /**
+ * On input, the size of the bo array. Output is the number
+ * of bos to be returned.
+ */
+ __u32 bo_count;
+
+ __u32 start_bin, start_render;
+
+ __u32 ct0ca, ct0ea;
+ __u32 ct1ca, ct1ea;
+ __u32 ct0cs, ct1cs;
+ __u32 ct0ra0, ct1ra0;
+
+ __u32 bpca, bpcs;
+ __u32 bpoa, bpos;
+
+ __u32 vpmbase;
+
+ __u32 dbge;
+ __u32 fdbgo;
+ __u32 fdbgb;
+ __u32 fdbgr;
+ __u32 fdbgs;
+ __u32 errstat;
+
+ /* Pad that we may save more registers into in the future. */
+ __u32 pad[16];
+};
+
+#define DRM_VC4_PARAM_V3D_IDENT0 0
+#define DRM_VC4_PARAM_V3D_IDENT1 1
+#define DRM_VC4_PARAM_V3D_IDENT2 2
+#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
+#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
+#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
+#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
+#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
+#define DRM_VC4_PARAM_SUPPORTS_PERFMON 8
+
+struct drm_vc4_get_param {
+ __u32 param;
+ __u32 pad;
+ __u64 value;
+};
+
+struct drm_vc4_get_tiling {
+ __u32 handle;
+ __u32 flags;
+ __u64 modifier;
+};
+
+struct drm_vc4_set_tiling {
+ __u32 handle;
+ __u32 flags;
+ __u64 modifier;
+};
+
+/**
+ * struct drm_vc4_label_bo - Attach a name to a BO for debug purposes.
+ */
+struct drm_vc4_label_bo {
+ __u32 handle;
+ __u32 len;
+ __u64 name;
+};
+
+/*
+ * States prefixed with '__' are internal states and cannot be passed to the
+ * DRM_IOCTL_VC4_GEM_MADVISE ioctl.
+ */
+#define VC4_MADV_WILLNEED 0
+#define VC4_MADV_DONTNEED 1
+#define __VC4_MADV_PURGED 2
+#define __VC4_MADV_NOTSUPP 3
+
+struct drm_vc4_gem_madvise {
+ __u32 handle;
+ __u32 madv;
+ __u32 retained;
+ __u32 pad;
+};
+
+enum {
+ VC4_PERFCNT_FEP_VALID_PRIMS_NO_RENDER,
+ VC4_PERFCNT_FEP_VALID_PRIMS_RENDER,
+ VC4_PERFCNT_FEP_CLIPPED_QUADS,
+ VC4_PERFCNT_FEP_VALID_QUADS,
+ VC4_PERFCNT_TLB_QUADS_NOT_PASSING_STENCIL,
+ VC4_PERFCNT_TLB_QUADS_NOT_PASSING_Z_AND_STENCIL,
+ VC4_PERFCNT_TLB_QUADS_PASSING_Z_AND_STENCIL,
+ VC4_PERFCNT_TLB_QUADS_ZERO_COVERAGE,
+ VC4_PERFCNT_TLB_QUADS_NON_ZERO_COVERAGE,
+ VC4_PERFCNT_TLB_QUADS_WRITTEN_TO_COLOR_BUF,
+ VC4_PERFCNT_PLB_PRIMS_OUTSIDE_VIEWPORT,
+ VC4_PERFCNT_PLB_PRIMS_NEED_CLIPPING,
+ VC4_PERFCNT_PSE_PRIMS_REVERSED,
+ VC4_PERFCNT_QPU_TOTAL_IDLE_CYCLES,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_VERTEX_COORD_SHADING,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_FRAGMENT_SHADING,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_EXEC_VALID_INST,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_TMUS,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_SCOREBOARD,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_VARYINGS,
+ VC4_PERFCNT_QPU_TOTAL_INST_CACHE_HIT,
+ VC4_PERFCNT_QPU_TOTAL_INST_CACHE_MISS,
+ VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_HIT,
+ VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_MISS,
+ VC4_PERFCNT_TMU_TOTAL_TEXT_QUADS_PROCESSED,
+ VC4_PERFCNT_TMU_TOTAL_TEXT_CACHE_MISS,
+ VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VDW_STALLED,
+ VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VCD_STALLED,
+ VC4_PERFCNT_L2C_TOTAL_L2_CACHE_HIT,
+ VC4_PERFCNT_L2C_TOTAL_L2_CACHE_MISS,
+ VC4_PERFCNT_NUM_EVENTS,
+};
+
+#define DRM_VC4_MAX_PERF_COUNTERS 16
+
+struct drm_vc4_perfmon_create {
+ __u32 id;
+ __u32 ncounters;
+ __u8 events[DRM_VC4_MAX_PERF_COUNTERS];
+};
+
+struct drm_vc4_perfmon_destroy {
+ __u32 id;
+};
+
+/*
+ * Returns the values of the performance counters tracked by this
+ * perfmon (as an array of ncounters u64 values).
+ *
+ * No implicit synchronization is performed, so the user has to
+ * guarantee that any jobs using this perfmon have already been
+ * completed (probably by blocking on the seqno returned by the
+ * last exec that used the perfmon).
+ */
+struct drm_vc4_perfmon_get_values {
+ __u32 id;
+ __u64 values_ptr;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _VC4_DRM_H_ */
diff --git a/include/libdrm/libdrm/via_drm.h b/include/libdrm/libdrm/via_drm.h
new file mode 100644
index 0000000..8b69e81
--- /dev/null
+++ b/include/libdrm/libdrm/via_drm.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _VIA_DRM_H_
+#define _VIA_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* WARNING: These defines must be the same as what the Xserver uses.
+ * if you change them, you must change the defines in the Xserver.
+ */
+
+#ifndef _VIA_DEFINES_
+#define _VIA_DEFINES_
+
+#include "via_drmclient.h"
+
+#define VIA_NR_SAREA_CLIPRECTS 8
+#define VIA_NR_XVMC_PORTS 10
+#define VIA_NR_XVMC_LOCKS 5
+#define VIA_MAX_CACHELINE_SIZE 64
+#define XVMCLOCKPTR(saPriv,lockNo) \
+ ((__volatile__ struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
+ (VIA_MAX_CACHELINE_SIZE - 1)) & \
+ ~(VIA_MAX_CACHELINE_SIZE - 1)) + \
+ VIA_MAX_CACHELINE_SIZE*(lockNo)))
+
+/* Each region is a minimum of 64k, and there are at most 64 of them.
+ */
+#define VIA_NR_TEX_REGIONS 64
+#define VIA_LOG_MIN_TEX_REGION_SIZE 16
+#endif
+
+#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
+#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
+#define VIA_UPLOAD_CTX 0x4
+#define VIA_UPLOAD_BUFFERS 0x8
+#define VIA_UPLOAD_TEX0 0x10
+#define VIA_UPLOAD_TEX1 0x20
+#define VIA_UPLOAD_CLIPRECTS 0x40
+#define VIA_UPLOAD_ALL 0xff
+
+/* VIA specific ioctls */
+#define DRM_VIA_ALLOCMEM 0x00
+#define DRM_VIA_FREEMEM 0x01
+#define DRM_VIA_AGP_INIT 0x02
+#define DRM_VIA_FB_INIT 0x03
+#define DRM_VIA_MAP_INIT 0x04
+#define DRM_VIA_DEC_FUTEX 0x05
+#define NOT_USED
+#define DRM_VIA_DMA_INIT 0x07
+#define DRM_VIA_CMDBUFFER 0x08
+#define DRM_VIA_FLUSH 0x09
+#define DRM_VIA_PCICMD 0x0a
+#define DRM_VIA_CMDBUF_SIZE 0x0b
+#define NOT_USED
+#define DRM_VIA_WAIT_IRQ 0x0d
+#define DRM_VIA_DMA_BLIT 0x0e
+#define DRM_VIA_BLIT_SYNC 0x0f
+
+#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
+#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
+#define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
+#define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
+#define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
+#define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
+#define DRM_IOCTL_VIA_DMA_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
+#define DRM_IOCTL_VIA_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
+#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
+#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
+#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
+ drm_via_cmdbuf_size_t)
+#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
+#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
+#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
+
+/* Indices into buf.Setup where various bits of state are mirrored per
+ * context and per buffer. These can be fired at the card as a unit,
+ * or in a piecewise fashion as required.
+ */
+
+#define VIA_TEX_SETUP_SIZE 8
+
+/* Flags for clear ioctl
+ */
+#define VIA_FRONT 0x1
+#define VIA_BACK 0x2
+#define VIA_DEPTH 0x4
+#define VIA_STENCIL 0x8
+#define VIA_MEM_VIDEO 0 /* matches drm constant */
+#define VIA_MEM_AGP 1 /* matches drm constant */
+#define VIA_MEM_SYSTEM 2
+#define VIA_MEM_MIXED 3
+#define VIA_MEM_UNKNOWN 4
+
+typedef struct {
+ __u32 offset;
+ __u32 size;
+} drm_via_agp_t;
+
+typedef struct {
+ __u32 offset;
+ __u32 size;
+} drm_via_fb_t;
+
+typedef struct {
+ __u32 context;
+ __u32 type;
+ __u32 size;
+ unsigned long index;
+ unsigned long offset;
+} drm_via_mem_t;
+
+typedef struct _drm_via_init {
+ enum {
+ VIA_INIT_MAP = 0x01,
+ VIA_CLEANUP_MAP = 0x02
+ } func;
+
+ unsigned long sarea_priv_offset;
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long agpAddr;
+} drm_via_init_t;
+
+typedef struct _drm_via_futex {
+ enum {
+ VIA_FUTEX_WAIT = 0x00,
+ VIA_FUTEX_WAKE = 0X01
+ } func;
+ __u32 ms;
+ __u32 lock;
+ __u32 val;
+} drm_via_futex_t;
+
+typedef struct _drm_via_dma_init {
+ enum {
+ VIA_INIT_DMA = 0x01,
+ VIA_CLEANUP_DMA = 0x02,
+ VIA_DMA_INITIALIZED = 0x03
+ } func;
+
+ unsigned long offset;
+ unsigned long size;
+ unsigned long reg_pause_addr;
+} drm_via_dma_init_t;
+
+typedef struct _drm_via_cmdbuffer {
+ char *buf;
+ unsigned long size;
+} drm_via_cmdbuffer_t;
+
+/* Warning: If you change the SAREA structure you must change the Xserver
+ * structure as well */
+
+typedef struct _drm_via_tex_region {
+ unsigned char next, prev; /* indices to form a circular LRU */
+ unsigned char inUse; /* owned by a client, or free? */
+ int age; /* tracked by clients to update local LRU's */
+} drm_via_tex_region_t;
+
+typedef struct _drm_via_sarea {
+ unsigned int dirty;
+ unsigned int nbox;
+ struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
+ drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
+ int texAge; /* last time texture was uploaded */
+ int ctxOwner; /* last context to upload state */
+ int vertexPrim;
+
+ /*
+ * Below is for XvMC.
+ * We want the lock integers alone on, and aligned to, a cache line.
+ * Therefore this somewhat strange construct.
+ */
+
+ char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
+
+ unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
+ unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
+ unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
+
+ /* Used by the 3d driver only at this point, for pageflipping:
+ */
+ unsigned int pfCurrentOffset;
+} drm_via_sarea_t;
+
+typedef struct _drm_via_cmdbuf_size {
+ enum {
+ VIA_CMDBUF_SPACE = 0x01,
+ VIA_CMDBUF_LAG = 0x02
+ } func;
+ int wait;
+ __u32 size;
+} drm_via_cmdbuf_size_t;
+
+typedef enum {
+ VIA_IRQ_ABSOLUTE = 0x0,
+ VIA_IRQ_RELATIVE = 0x1,
+ VIA_IRQ_SIGNAL = 0x10000000,
+ VIA_IRQ_FORCE_SEQUENCE = 0x20000000
+} via_irq_seq_type_t;
+
+#define VIA_IRQ_FLAGS_MASK 0xF0000000
+
+enum drm_via_irqs {
+ drm_via_irq_hqv0 = 0,
+ drm_via_irq_hqv1,
+ drm_via_irq_dma0_dd,
+ drm_via_irq_dma0_td,
+ drm_via_irq_dma1_dd,
+ drm_via_irq_dma1_td,
+ drm_via_irq_num
+};
+
+struct drm_via_wait_irq_request {
+ unsigned irq;
+ via_irq_seq_type_t type;
+ __u32 sequence;
+ __u32 signal;
+};
+
+typedef union drm_via_irqwait {
+ struct drm_via_wait_irq_request request;
+ struct drm_wait_vblank_reply reply;
+} drm_via_irqwait_t;
+
+typedef struct drm_via_blitsync {
+ __u32 sync_handle;
+ unsigned engine;
+} drm_via_blitsync_t;
+
+/* - * Below,"flags" is currently unused but will be used for possible future
+ * extensions like kernel space bounce buffers for bad alignments and
+ * blit engine busy-wait polling for better latency in the absence of
+ * interrupts.
+ */
+
+typedef struct drm_via_dmablit {
+ __u32 num_lines;
+ __u32 line_length;
+
+ __u32 fb_addr;
+ __u32 fb_stride;
+
+ unsigned char *mem_addr;
+ __u32 mem_stride;
+
+ __u32 flags;
+ int to_fb;
+
+ drm_via_blitsync_t sync;
+} drm_via_dmablit_t;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _VIA_DRM_H_ */
diff --git a/include/libdrm/libdrm/virtgpu_drm.h b/include/libdrm/libdrm/virtgpu_drm.h
new file mode 100644
index 0000000..f06a789
--- /dev/null
+++ b/include/libdrm/libdrm/virtgpu_drm.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2013 Red Hat
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRTGPU_DRM_H
+#define VIRTGPU_DRM_H
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ *
+ * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
+ * compatibility Keep fields aligned to their size
+ */
+
+#define DRM_VIRTGPU_MAP 0x01
+#define DRM_VIRTGPU_EXECBUFFER 0x02
+#define DRM_VIRTGPU_GETPARAM 0x03
+#define DRM_VIRTGPU_RESOURCE_CREATE 0x04
+#define DRM_VIRTGPU_RESOURCE_INFO 0x05
+#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
+#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
+#define DRM_VIRTGPU_WAIT 0x08
+#define DRM_VIRTGPU_GET_CAPS 0x09
+
+#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
+#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_FLAGS (\
+ VIRTGPU_EXECBUF_FENCE_FD_IN |\
+ VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ 0)
+
+struct drm_virtgpu_map {
+ __u64 offset; /* use for mmap system call */
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_virtgpu_execbuffer {
+ __u32 flags;
+ __u32 size;
+ __u64 command; /* void* */
+ __u64 bo_handles;
+ __u32 num_bo_handles;
+ __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
+};
+
+#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
+#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+
+struct drm_virtgpu_getparam {
+ __u64 param;
+ __u64 value;
+};
+
+/* NO_BO flags? NO resource flag? */
+/* resource flag for y_0_top */
+struct drm_virtgpu_resource_create {
+ __u32 target;
+ __u32 format;
+ __u32 bind;
+ __u32 width;
+ __u32 height;
+ __u32 depth;
+ __u32 array_size;
+ __u32 last_level;
+ __u32 nr_samples;
+ __u32 flags;
+ __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
+ __u32 res_handle; /* returned by kernel */
+ __u32 size; /* validate transfer in the host */
+ __u32 stride; /* validate transfer in the host */
+};
+
+struct drm_virtgpu_resource_info {
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u32 size;
+ __u32 stride;
+};
+
+struct drm_virtgpu_3d_box {
+ __u32 x;
+ __u32 y;
+ __u32 z;
+ __u32 w;
+ __u32 h;
+ __u32 d;
+};
+
+struct drm_virtgpu_3d_transfer_to_host {
+ __u32 bo_handle;
+ struct drm_virtgpu_3d_box box;
+ __u32 level;
+ __u32 offset;
+};
+
+struct drm_virtgpu_3d_transfer_from_host {
+ __u32 bo_handle;
+ struct drm_virtgpu_3d_box box;
+ __u32 level;
+ __u32 offset;
+};
+
+#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
+struct drm_virtgpu_3d_wait {
+ __u32 handle; /* 0 is an invalid handle */
+ __u32 flags;
+};
+
+struct drm_virtgpu_get_caps {
+ __u32 cap_set_id;
+ __u32 cap_set_ver;
+ __u64 addr;
+ __u32 size;
+ __u32 pad;
+};
+
+#define DRM_IOCTL_VIRTGPU_MAP \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
+
+#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
+ struct drm_virtgpu_execbuffer)
+
+#define DRM_IOCTL_VIRTGPU_GETPARAM \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\
+ struct drm_virtgpu_getparam)
+
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \
+ struct drm_virtgpu_resource_create)
+
+#define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
+ struct drm_virtgpu_resource_info)
+
+#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \
+ struct drm_virtgpu_3d_transfer_from_host)
+
+#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \
+ struct drm_virtgpu_3d_transfer_to_host)
+
+#define DRM_IOCTL_VIRTGPU_WAIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \
+ struct drm_virtgpu_3d_wait)
+
+#define DRM_IOCTL_VIRTGPU_GET_CAPS \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
+ struct drm_virtgpu_get_caps)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/libdrm/vmwgfx_drm.h b/include/libdrm/libdrm/vmwgfx_drm.h
new file mode 100644
index 0000000..2b8d47e
--- /dev/null
+++ b/include/libdrm/libdrm/vmwgfx_drm.h
@@ -0,0 +1,1128 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef __VMWGFX_DRM_H__
+#define __VMWGFX_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_VMW_MAX_SURFACE_FACES 6
+#define DRM_VMW_MAX_MIP_LEVELS 24
+
+
+#define DRM_VMW_GET_PARAM 0
+#define DRM_VMW_ALLOC_DMABUF 1
+#define DRM_VMW_UNREF_DMABUF 2
+#define DRM_VMW_HANDLE_CLOSE 2
+#define DRM_VMW_CURSOR_BYPASS 3
+/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
+#define DRM_VMW_CONTROL_STREAM 4
+#define DRM_VMW_CLAIM_STREAM 5
+#define DRM_VMW_UNREF_STREAM 6
+/* guarded by DRM_VMW_PARAM_3D == 1 */
+#define DRM_VMW_CREATE_CONTEXT 7
+#define DRM_VMW_UNREF_CONTEXT 8
+#define DRM_VMW_CREATE_SURFACE 9
+#define DRM_VMW_UNREF_SURFACE 10
+#define DRM_VMW_REF_SURFACE 11
+#define DRM_VMW_EXECBUF 12
+#define DRM_VMW_GET_3D_CAP 13
+#define DRM_VMW_FENCE_WAIT 14
+#define DRM_VMW_FENCE_SIGNALED 15
+#define DRM_VMW_FENCE_UNREF 16
+#define DRM_VMW_FENCE_EVENT 17
+#define DRM_VMW_PRESENT 18
+#define DRM_VMW_PRESENT_READBACK 19
+#define DRM_VMW_UPDATE_LAYOUT 20
+#define DRM_VMW_CREATE_SHADER 21
+#define DRM_VMW_UNREF_SHADER 22
+#define DRM_VMW_GB_SURFACE_CREATE 23
+#define DRM_VMW_GB_SURFACE_REF 24
+#define DRM_VMW_SYNCCPU 25
+#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GET_PARAM - get device information.
+ *
+ * DRM_VMW_PARAM_FIFO_OFFSET:
+ * Offset to use to map the first page of the FIFO read-only.
+ * The fifo is mapped using the mmap() system call on the drm device.
+ *
+ * DRM_VMW_PARAM_OVERLAY_IOCTL:
+ * Does the driver support the overlay ioctl.
+ */
+
+#define DRM_VMW_PARAM_NUM_STREAMS 0
+#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
+#define DRM_VMW_PARAM_3D 2
+#define DRM_VMW_PARAM_HW_CAPS 3
+#define DRM_VMW_PARAM_FIFO_CAPS 4
+#define DRM_VMW_PARAM_MAX_FB_SIZE 5
+#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
+#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
+#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
+#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
+#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
+#define DRM_VMW_PARAM_SCREEN_TARGET 11
+#define DRM_VMW_PARAM_DX 12
+
+/**
+ * enum drm_vmw_handle_type - handle type for ref ioctls
+ *
+ */
+enum drm_vmw_handle_type {
+ DRM_VMW_HANDLE_LEGACY = 0,
+ DRM_VMW_HANDLE_PRIME = 1
+};
+
+/**
+ * struct drm_vmw_getparam_arg
+ *
+ * @value: Returned value. //Out
+ * @param: Parameter to query. //In.
+ *
+ * Argument to the DRM_VMW_GET_PARAM Ioctl.
+ */
+
+struct drm_vmw_getparam_arg {
+ __u64 value;
+ __u32 param;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+
+/**
+ * struct drm_vmw_context_arg
+ *
+ * @cid: Device unique context ID.
+ *
+ * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
+ * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
+ */
+
+struct drm_vmw_context_arg {
+ __s32 cid;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_CONTEXT - Create a host context.
+ *
+ * Frees a global context id, and queues a destroy host command for the host.
+ * Does not wait for host completion. The context ID can be used directly
+ * in the command stream and shows up as the same context ID on the host.
+ */
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_SURFACE - Create a host suface.
+ *
+ * Allocates a device unique surface id, and queues a create surface command
+ * for the host. Does not wait for host completion. The surface ID can be
+ * used directly in the command stream and shows up as the same surface
+ * ID on the host.
+ */
+
+/**
+ * struct drm_wmv_surface_create_req
+ *
+ * @flags: Surface flags as understood by the host.
+ * @format: Surface format as understood by the host.
+ * @mip_levels: Number of mip levels for each face.
+ * An unused face should have 0 encoded.
+ * @size_addr: Address of a user-space array of sruct drm_vmw_size
+ * cast to an __u64 for 32-64 bit compatibility.
+ * The size of the array should equal the total number of mipmap levels.
+ * @shareable: Boolean whether other clients (as identified by file descriptors)
+ * may reference this surface.
+ * @scanout: Boolean whether the surface is intended to be used as a
+ * scanout.
+ *
+ * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
+ * Output data from the DRM_VMW_REF_SURFACE Ioctl.
+ */
+
+struct drm_vmw_surface_create_req {
+ __u32 flags;
+ __u32 format;
+ __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ __u64 size_addr;
+ __s32 shareable;
+ __s32 scanout;
+};
+
+/**
+ * struct drm_wmv_surface_arg
+ *
+ * @sid: Surface id of created surface or surface to destroy or reference.
+ * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
+ *
+ * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
+ * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
+ * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
+ */
+
+struct drm_vmw_surface_arg {
+ __s32 sid;
+ enum drm_vmw_handle_type handle_type;
+};
+
+/**
+ * struct drm_vmw_size ioctl.
+ *
+ * @width - mip level width
+ * @height - mip level height
+ * @depth - mip level depth
+ *
+ * Description of a mip level.
+ * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
+ */
+
+struct drm_vmw_size {
+ __u32 width;
+ __u32 height;
+ __u32 depth;
+ __u32 pad64;
+};
+
+/**
+ * union drm_vmw_surface_create_arg
+ *
+ * @rep: Output data as described above.
+ * @req: Input data as described above.
+ *
+ * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
+ */
+
+union drm_vmw_surface_create_arg {
+ struct drm_vmw_surface_arg rep;
+ struct drm_vmw_surface_create_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_REF_SURFACE - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a give sid, as previously
+ * returned by the DRM_VMW_CREATE_SURFACE ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface ID in the command
+ * stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * in the DRM_VMW_CREATE_SURFACE ioctl.
+ */
+
+/**
+ * union drm_vmw_surface_reference_arg
+ *
+ * @rep: Output data as described above.
+ * @req: Input data as described above.
+ *
+ * Argument to the DRM_VMW_REF_SURFACE Ioctl.
+ */
+
+union drm_vmw_surface_reference_arg {
+ struct drm_vmw_surface_create_req rep;
+ struct drm_vmw_surface_arg req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
+ *
+ * Clear a reference previously put on a host surface.
+ * When all references are gone, including the one implicitly placed
+ * on creation,
+ * a destroy surface command will be queued for the host.
+ * Does not wait for completion.
+ */
+
+/*************************************************************************/
+/**
+ * DRM_VMW_EXECBUF
+ *
+ * Submit a command buffer for execution on the host, and return a
+ * fence seqno that when signaled, indicates that the command buffer has
+ * executed.
+ */
+
+/**
+ * struct drm_vmw_execbuf_arg
+ *
+ * @commands: User-space address of a command buffer cast to an __u64.
+ * @command-size: Size in bytes of the command buffer.
+ * @throttle-us: Sleep until software is less than @throttle_us
+ * microseconds ahead of hardware. The driver may round this value
+ * to the nearest kernel tick.
+ * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
+ * __u64.
+ * @version: Allows expanding the execbuf ioctl parameters without breaking
+ * backwards compatibility, since user-space will always tell the kernel
+ * which version it uses.
+ * @flags: Execbuf flags.
+ * @imported_fence_fd: FD for a fence imported from another device
+ *
+ * Argument to the DRM_VMW_EXECBUF Ioctl.
+ */
+
+#define DRM_VMW_EXECBUF_VERSION 2
+
+#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
+#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
+
+struct drm_vmw_execbuf_arg {
+ __u64 commands;
+ __u32 command_size;
+ __u32 throttle_us;
+ __u64 fence_rep;
+ __u32 version;
+ __u32 flags;
+ __u32 context_handle;
+ __s32 imported_fence_fd;
+};
+
+/**
+ * struct drm_vmw_fence_rep
+ *
+ * @handle: Fence object handle for fence associated with a command submission.
+ * @mask: Fence flags relevant for this fence object.
+ * @seqno: Fence sequence number in fifo. A fence object with a lower
+ * seqno will signal the EXEC flag before a fence object with a higher
+ * seqno. This can be used by user-space to avoid kernel calls to determine
+ * whether a fence has signaled the EXEC flag. Note that @seqno will
+ * wrap at 32-bit.
+ * @passed_seqno: The highest seqno number processed by the hardware
+ * so far. This can be used to mark user-space fence objects as signaled, and
+ * to determine whether a fence seqno might be stale.
+ * @fd: FD associated with the fence, -1 if not exported
+ * @error: This member should've been set to -EFAULT on submission.
+ * The following actions should be take on completion:
+ * error == -EFAULT: Fence communication failed. The host is synchronized.
+ * Use the last fence id read from the FIFO fence register.
+ * error != 0 && error != -EFAULT:
+ * Fence submission failed. The host is synchronized. Use the fence_seq member.
+ * error == 0: All is OK, The host may not be synchronized.
+ * Use the fence_seq member.
+ *
+ * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
+ */
+
+struct drm_vmw_fence_rep {
+ __u32 handle;
+ __u32 mask;
+ __u32 seqno;
+ __u32 passed_seqno;
+ __s32 fd;
+ __s32 error;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_ALLOC_DMABUF
+ *
+ * Allocate a DMA buffer that is visible also to the host.
+ * NOTE: The buffer is
+ * identified by a handle and an offset, which are private to the guest, but
+ * usable in the command stream. The guest kernel may translate these
+ * and patch up the command stream accordingly. In the future, the offset may
+ * be zero at all times, or it may disappear from the interface before it is
+ * fixed.
+ *
+ * The DMA buffer may stay user-space mapped in the guest at all times,
+ * and is thus suitable for sub-allocation.
+ *
+ * DMA buffers are mapped using the mmap() syscall on the drm device.
+ */
+
+/**
+ * struct drm_vmw_alloc_dmabuf_req
+ *
+ * @size: Required minimum size of the buffer.
+ *
+ * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ */
+
+struct drm_vmw_alloc_dmabuf_req {
+ __u32 size;
+ __u32 pad64;
+};
+
+/**
+ * struct drm_vmw_dmabuf_rep
+ *
+ * @map_handle: Offset to use in the mmap() call used to map the buffer.
+ * @handle: Handle unique to this buffer. Used for unreferencing.
+ * @cur_gmr_id: GMR id to use in the command stream when this buffer is
+ * referenced. See not above.
+ * @cur_gmr_offset: Offset to use in the command stream when this buffer is
+ * referenced. See note above.
+ *
+ * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
+ */
+
+struct drm_vmw_dmabuf_rep {
+ __u64 map_handle;
+ __u32 handle;
+ __u32 cur_gmr_id;
+ __u32 cur_gmr_offset;
+ __u32 pad64;
+};
+
+/**
+ * union drm_vmw_dmabuf_arg
+ *
+ * @req: Input data as described above.
+ * @rep: Output data as described above.
+ *
+ * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ */
+
+union drm_vmw_alloc_dmabuf_arg {
+ struct drm_vmw_alloc_dmabuf_req req;
+ struct drm_vmw_dmabuf_rep rep;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
+ *
+ */
+
+/**
+ * struct drm_vmw_unref_dmabuf_arg
+ *
+ * @handle: Handle indicating what buffer to free. Obtained from the
+ * DRM_VMW_ALLOC_DMABUF Ioctl.
+ *
+ * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
+ */
+
+struct drm_vmw_unref_dmabuf_arg {
+ __u32 handle;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
+ *
+ * This IOCTL controls the overlay units of the svga device.
+ * The SVGA overlay units does not work like regular hardware units in
+ * that they do not automatically read back the contents of the given dma
+ * buffer. But instead only read back for each call to this ioctl, and
+ * at any point between this call being made and a following call that
+ * either changes the buffer or disables the stream.
+ */
+
+/**
+ * struct drm_vmw_rect
+ *
+ * Defines a rectangle. Used in the overlay ioctl to define
+ * source and destination rectangle.
+ */
+
+struct drm_vmw_rect {
+ __s32 x;
+ __s32 y;
+ __u32 w;
+ __u32 h;
+};
+
+/**
+ * struct drm_vmw_control_stream_arg
+ *
+ * @stream_id: Stearm to control
+ * @enabled: If false all following arguments are ignored.
+ * @handle: Handle to buffer for getting data from.
+ * @format: Format of the overlay as understood by the host.
+ * @width: Width of the overlay.
+ * @height: Height of the overlay.
+ * @size: Size of the overlay in bytes.
+ * @pitch: Array of pitches, the two last are only used for YUV12 formats.
+ * @offset: Offset from start of dma buffer to overlay.
+ * @src: Source rect, must be within the defined area above.
+ * @dst: Destination rect, x and y may be negative.
+ *
+ * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
+ */
+
+struct drm_vmw_control_stream_arg {
+ __u32 stream_id;
+ __u32 enabled;
+
+ __u32 flags;
+ __u32 color_key;
+
+ __u32 handle;
+ __u32 offset;
+ __s32 format;
+ __u32 size;
+ __u32 width;
+ __u32 height;
+ __u32 pitch[3];
+
+ __u32 pad64;
+ struct drm_vmw_rect src;
+ struct drm_vmw_rect dst;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
+ *
+ */
+
+#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
+#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
+
+/**
+ * struct drm_vmw_cursor_bypass_arg
+ *
+ * @flags: Flags.
+ * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
+ * @xpos: X position of cursor.
+ * @ypos: Y position of cursor.
+ * @xhot: X hotspot.
+ * @yhot: Y hotspot.
+ *
+ * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
+ */
+
+struct drm_vmw_cursor_bypass_arg {
+ __u32 flags;
+ __u32 crtc_id;
+ __s32 xpos;
+ __s32 ypos;
+ __s32 xhot;
+ __s32 yhot;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CLAIM_STREAM - Claim a single stream.
+ */
+
+/**
+ * struct drm_vmw_context_arg
+ *
+ * @stream_id: Device unique context ID.
+ *
+ * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
+ * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
+ */
+
+struct drm_vmw_stream_arg {
+ __u32 stream_id;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_STREAM - Unclaim a stream.
+ *
+ * Return a single stream that was claimed by this process. Also makes
+ * sure that the stream has been stopped.
+ */
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GET_3D_CAP
+ *
+ * Read 3D capabilities from the FIFO
+ *
+ */
+
+/**
+ * struct drm_vmw_get_3d_cap_arg
+ *
+ * @buffer: Pointer to a buffer for capability data, cast to an __u64
+ * @size: Max size to copy
+ *
+ * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
+ * ioctls.
+ */
+
+struct drm_vmw_get_3d_cap_arg {
+ __u64 buffer;
+ __u32 max_size;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_WAIT
+ *
+ * Waits for a fence object to signal. The wait is interruptible, so that
+ * signals may be delivered during the interrupt. The wait may timeout,
+ * in which case the calls returns -EBUSY. If the wait is restarted,
+ * that is restarting without resetting @cookie_valid to zero,
+ * the timeout is computed from the first call.
+ *
+ * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
+ * on:
+ * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
+ * stream
+ * have executed.
+ * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
+ * commands
+ * in the buffer given to the EXECBUF ioctl returning the fence object handle
+ * are available to user-space.
+ *
+ * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
+ * fenc wait ioctl returns 0, the fence object has been unreferenced after
+ * the wait.
+ */
+
+#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
+#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
+
+#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
+
+/**
+ * struct drm_vmw_fence_wait_arg
+ *
+ * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
+ * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
+ * @kernel_cookie: Set to 0 on first call. Left alone on restart.
+ * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
+ * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
+ * before returning.
+ * @flags: Fence flags to wait on.
+ * @wait_options: Options that control the behaviour of the wait ioctl.
+ *
+ * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
+ */
+
+struct drm_vmw_fence_wait_arg {
+ __u32 handle;
+ __s32 cookie_valid;
+ __u64 kernel_cookie;
+ __u64 timeout_us;
+ __s32 lazy;
+ __s32 flags;
+ __s32 wait_options;
+ __s32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_SIGNALED
+ *
+ * Checks if a fence object is signaled..
+ */
+
+/**
+ * struct drm_vmw_fence_signaled_arg
+ *
+ * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
+ * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
+ * @signaled: Out: Flags signaled.
+ * @sequence: Out: Highest sequence passed so far. Can be used to signal the
+ * EXEC flag of user-space fence objects.
+ *
+ * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
+ * ioctls.
+ */
+
+struct drm_vmw_fence_signaled_arg {
+ __u32 handle;
+ __u32 flags;
+ __s32 signaled;
+ __u32 passed_seqno;
+ __u32 signaled_flags;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_UNREF
+ *
+ * Unreferences a fence object, and causes it to be destroyed if there are no
+ * other references to it.
+ *
+ */
+
+/**
+ * struct drm_vmw_fence_arg
+ *
+ * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
+ *
+ * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
+ */
+
+struct drm_vmw_fence_arg {
+ __u32 handle;
+ __u32 pad64;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_EVENT
+ *
+ * Queues an event on a fence to be delivered on the drm character device
+ * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
+ * Optionally the approximate time when the fence signaled is
+ * given by the event.
+ */
+
+/*
+ * The event type
+ */
+#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
+
+struct drm_vmw_event_fence {
+ struct drm_event base;
+ __u64 user_data;
+ __u32 tv_sec;
+ __u32 tv_usec;
+};
+
+/*
+ * Flags that may be given to the command.
+ */
+/* Request fence signaled time on the event. */
+#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
+
+/**
+ * struct drm_vmw_fence_event_arg
+ *
+ * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
+ * the fence is not supposed to be referenced by user-space.
+ * @user_info: Info to be delivered with the event.
+ * @handle: Attach the event to this fence only.
+ * @flags: A set of flags as defined above.
+ */
+struct drm_vmw_fence_event_arg {
+ __u64 fence_rep;
+ __u64 user_data;
+ __u32 handle;
+ __u32 flags;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_PRESENT
+ *
+ * Executes an SVGA present on a given fb for a given surface. The surface
+ * is placed on the framebuffer. Cliprects are given relative to the given
+ * point (the point disignated by dest_{x|y}).
+ *
+ */
+
+/**
+ * struct drm_vmw_present_arg
+ * @fb_id: framebuffer id to present / read back from.
+ * @sid: Surface id to present from.
+ * @dest_x: X placement coordinate for surface.
+ * @dest_y: Y placement coordinate for surface.
+ * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
+ * @num_clips: Number of cliprects given relative to the framebuffer origin,
+ * in the same coordinate space as the frame buffer.
+ * @pad64: Unused 64-bit padding.
+ *
+ * Input argument to the DRM_VMW_PRESENT ioctl.
+ */
+
+struct drm_vmw_present_arg {
+ __u32 fb_id;
+ __u32 sid;
+ __s32 dest_x;
+ __s32 dest_y;
+ __u64 clips_ptr;
+ __u32 num_clips;
+ __u32 pad64;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_PRESENT_READBACK
+ *
+ * Executes an SVGA present readback from a given fb to the dma buffer
+ * currently bound as the fb. If there is no dma buffer bound to the fb,
+ * an error will be returned.
+ *
+ */
+
+/**
+ * struct drm_vmw_present_arg
+ * @fb_id: fb_id to present / read back from.
+ * @num_clips: Number of cliprects.
+ * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
+ * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
+ * If this member is NULL, then the ioctl should not return a fence.
+ */
+
+struct drm_vmw_present_readback_arg {
+ __u32 fb_id;
+ __u32 num_clips;
+ __u64 clips_ptr;
+ __u64 fence_rep;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UPDATE_LAYOUT - Update layout
+ *
+ * Updates the preferred modes and connection status for connectors. The
+ * command consists of one drm_vmw_update_layout_arg pointing to an array
+ * of num_outputs drm_vmw_rect's.
+ */
+
+/**
+ * struct drm_vmw_update_layout_arg
+ *
+ * @num_outputs: number of active connectors
+ * @rects: pointer to array of drm_vmw_rect cast to an __u64
+ *
+ * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
+ */
+struct drm_vmw_update_layout_arg {
+ __u32 num_outputs;
+ __u32 pad64;
+ __u64 rects;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_SHADER - Create shader
+ *
+ * Creates a shader and optionally binds it to a dma buffer containing
+ * the shader byte-code.
+ */
+
+/**
+ * enum drm_vmw_shader_type - Shader types
+ */
+enum drm_vmw_shader_type {
+ drm_vmw_shader_type_vs = 0,
+ drm_vmw_shader_type_ps,
+};
+
+
+/**
+ * struct drm_vmw_shader_create_arg
+ *
+ * @shader_type: Shader type of the shader to create.
+ * @size: Size of the byte-code in bytes.
+ * where the shader byte-code starts
+ * @buffer_handle: Buffer handle identifying the buffer containing the
+ * shader byte-code
+ * @shader_handle: On successful completion contains a handle that
+ * can be used to subsequently identify the shader.
+ * @offset: Offset in bytes into the buffer given by @buffer_handle,
+ *
+ * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
+ */
+struct drm_vmw_shader_create_arg {
+ enum drm_vmw_shader_type shader_type;
+ __u32 size;
+ __u32 buffer_handle;
+ __u32 shader_handle;
+ __u64 offset;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_SHADER - Unreferences a shader
+ *
+ * Destroys a user-space reference to a shader, optionally destroying
+ * it.
+ */
+
+/**
+ * struct drm_vmw_shader_arg
+ *
+ * @handle: Handle identifying the shader to destroy.
+ *
+ * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
+ */
+struct drm_vmw_shader_arg {
+ __u32 handle;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
+ *
+ * Allocates a surface handle and queues a create surface command
+ * for the host on the first use of the surface. The surface ID can
+ * be used as the surface ID in commands referencing the surface.
+ */
+
+/**
+ * enum drm_vmw_surface_flags
+ *
+ * @drm_vmw_surface_flag_shareable: Whether the surface is shareable
+ * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
+ * surface.
+ * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
+ * given.
+ */
+enum drm_vmw_surface_flags {
+ drm_vmw_surface_flag_shareable = (1 << 0),
+ drm_vmw_surface_flag_scanout = (1 << 1),
+ drm_vmw_surface_flag_create_buffer = (1 << 2)
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_req
+ *
+ * @svga3d_flags: SVGA3d surface flags for the device.
+ * @format: SVGA3d format.
+ * @mip_level: Number of mip levels for all faces.
+ * @drm_surface_flags Flags as described above.
+ * @multisample_count Future use. Set to 0.
+ * @autogen_filter Future use. Set to 0.
+ * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
+ * if none.
+ * @base_size Size of the base mip level for all faces.
+ * @array_size Must be zero for non-DX hardware, and if non-zero
+ * svga3d_flags must have proper bind flags setup.
+ *
+ * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+struct drm_vmw_gb_surface_create_req {
+ __u32 svga3d_flags;
+ __u32 format;
+ __u32 mip_levels;
+ enum drm_vmw_surface_flags drm_surface_flags;
+ __u32 multisample_count;
+ __u32 autogen_filter;
+ __u32 buffer_handle;
+ __u32 array_size;
+ struct drm_vmw_size base_size;
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_rep
+ *
+ * @handle: Surface handle.
+ * @backup_size: Size of backup buffers for this surface.
+ * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
+ * @buffer_size: Actual size of the buffer identified by
+ * @buffer_handle
+ * @buffer_map_handle: Offset into device address space for the buffer
+ * identified by @buffer_handle.
+ *
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
+ * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+struct drm_vmw_gb_surface_create_rep {
+ __u32 handle;
+ __u32 backup_size;
+ __u32 buffer_handle;
+ __u32 buffer_size;
+ __u64 buffer_map_handle;
+};
+
+/**
+ * union drm_vmw_gb_surface_create_arg
+ *
+ * @req: Input argument as described above.
+ * @rep: Output argument as described above.
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+union drm_vmw_gb_surface_create_arg {
+ struct drm_vmw_gb_surface_create_rep rep;
+ struct drm_vmw_gb_surface_create_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a given handle, as previously
+ * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface handle in
+ * the command stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+
+/**
+ * struct drm_vmw_gb_surface_reference_arg
+ *
+ * @creq: The data used as input when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_req"
+ * @crep: Additional data output when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_rep"
+ *
+ * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
+ */
+struct drm_vmw_gb_surface_ref_rep {
+ struct drm_vmw_gb_surface_create_req creq;
+ struct drm_vmw_gb_surface_create_rep crep;
+};
+
+/**
+ * union drm_vmw_gb_surface_reference_arg
+ *
+ * @req: Input data as described above at "struct drm_vmw_surface_arg"
+ * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+union drm_vmw_gb_surface_reference_arg {
+ struct drm_vmw_gb_surface_ref_rep rep;
+ struct drm_vmw_surface_arg req;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
+ *
+ * Idles any previously submitted GPU operations on the buffer and
+ * by default blocks command submissions that reference the buffer.
+ * If the file descriptor used to grab a blocking CPU sync is closed, the
+ * cpu sync is released.
+ * The flags argument indicates how the grab / release operation should be
+ * performed:
+ */
+
+/**
+ * enum drm_vmw_synccpu_flags - Synccpu flags:
+ *
+ * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
+ * hint to the kernel to allow command submissions that references the buffer
+ * for read-only.
+ * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
+ * referencing this buffer.
+ * @drm_vmw_synccpu_dontblock: Don't wait for GPU idle, but rather return
+ * -EBUSY should the buffer be busy.
+ * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
+ * while the buffer is synced for CPU. This is similar to the GEM bo idle
+ * behavior.
+ */
+enum drm_vmw_synccpu_flags {
+ drm_vmw_synccpu_read = (1 << 0),
+ drm_vmw_synccpu_write = (1 << 1),
+ drm_vmw_synccpu_dontblock = (1 << 2),
+ drm_vmw_synccpu_allow_cs = (1 << 3)
+};
+
+/**
+ * enum drm_vmw_synccpu_op - Synccpu operations:
+ *
+ * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
+ * @drm_vmw_synccpu_release: Release a previous grab.
+ */
+enum drm_vmw_synccpu_op {
+ drm_vmw_synccpu_grab,
+ drm_vmw_synccpu_release
+};
+
+/**
+ * struct drm_vmw_synccpu_arg
+ *
+ * @op: The synccpu operation as described above.
+ * @handle: Handle identifying the buffer object.
+ * @flags: Flags as described above.
+ */
+struct drm_vmw_synccpu_arg {
+ enum drm_vmw_synccpu_op op;
+ enum drm_vmw_synccpu_flags flags;
+ __u32 handle;
+ __u32 pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+enum drm_vmw_extended_context {
+ drm_vmw_context_legacy,
+ drm_vmw_context_dx
+};
+
+/**
+ * union drm_vmw_extended_context_arg
+ *
+ * @req: Context type.
+ * @rep: Context identifier.
+ *
+ * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
+ */
+union drm_vmw_extended_context_arg {
+ enum drm_vmw_extended_context req;
+ struct drm_vmw_context_arg rep;
+};
+
+/*************************************************************************/
+/*
+ * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
+ * underlying resource.
+ *
+ * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
+ * The ioctl arguments therefore need to be identical in layout.
+ *
+ */
+
+/**
+ * struct drm_vmw_handle_close_arg
+ *
+ * @handle: Handle to close.
+ *
+ * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
+ */
+struct drm_vmw_handle_close_arg {
+ __u32 handle;
+ __u32 pad64;
+};
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/libsync.h b/include/libdrm/libsync.h
new file mode 100644
index 0000000..f1a2f96
--- /dev/null
+++ b/include/libdrm/libsync.h
@@ -0,0 +1,148 @@
+/*
+ * sync abstraction
+ * Copyright 2015-2016 Collabora Ltd.
+ *
+ * Based on the implementation from the Android Open Source Project,
+ *
+ * Copyright 2012 Google, Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _LIBSYNC_H
+#define _LIBSYNC_H
+
+#include <assert.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/poll.h>
+#include <unistd.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef SYNC_IOC_MERGE
+/* duplicated from linux/sync_file.h to avoid build-time dependency
+ * on new (v4.7) kernel headers. Once distro's are mostly using
+ * something newer than v4.7 drop this and #include <linux/sync_file.h>
+ * instead.
+ */
+struct sync_merge_data {
+ char name[32];
+ int32_t fd2;
+ int32_t fence;
+ uint32_t flags;
+ uint32_t pad;
+};
+#define SYNC_IOC_MAGIC '>'
+#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
+#endif
+
+
+static inline int sync_wait(int fd, int timeout)
+{
+ struct pollfd fds = {0};
+ int ret;
+
+ fds.fd = fd;
+ fds.events = POLLIN;
+
+ do {
+ ret = poll(&fds, 1, timeout);
+ if (ret > 0) {
+ if (fds.revents & (POLLERR | POLLNVAL)) {
+ errno = EINVAL;
+ return -1;
+ }
+ return 0;
+ } else if (ret == 0) {
+ errno = ETIME;
+ return -1;
+ }
+ } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+
+ return ret;
+}
+
+static inline int sync_merge(const char *name, int fd1, int fd2)
+{
+ struct sync_merge_data data = {0};
+ int ret;
+
+ data.fd2 = fd2;
+ strncpy(data.name, name, sizeof(data.name));
+
+ do {
+ ret = ioctl(fd1, SYNC_IOC_MERGE, &data);
+ } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+
+ if (ret < 0)
+ return ret;
+
+ return data.fence;
+}
+
+/* accumulate fd2 into fd1. If *fd1 is not a valid fd then dup fd2,
+ * otherwise sync_merge() and close the old *fd1. This can be used
+ * to implement the pattern:
+ *
+ * init()
+ * {
+ * batch.fence_fd = -1;
+ * }
+ *
+ * // does *NOT* take ownership of fd
+ * server_sync(int fd)
+ * {
+ * if (sync_accumulate("foo", &batch.fence_fd, fd)) {
+ * ... error ...
+ * }
+ * }
+ */
+static inline int sync_accumulate(const char *name, int *fd1, int fd2)
+{
+ int ret;
+
+ assert(fd2 >= 0);
+
+ if (*fd1 < 0) {
+ *fd1 = dup(fd2);
+ return 0;
+ }
+
+ ret = sync_merge(name, *fd1, fd2);
+ if (ret < 0) {
+ /* leave *fd1 as it is */
+ return ret;
+ }
+
+ close(*fd1);
+ *fd1 = ret;
+
+ return 0;
+}
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/xf86drm.h b/include/libdrm/xf86drm.h
new file mode 100644
index 0000000..4badaae
--- /dev/null
+++ b/include/libdrm/xf86drm.h
@@ -0,0 +1,969 @@
+/**
+ * \file xf86drm.h
+ * OS-independent header for DRM user-level library interface.
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+/*
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _XF86DRM_H_
+#define _XF86DRM_H_
+
+#include <stdarg.h>
+#include <sys/types.h>
+#include <stdint.h>
+#include <drm.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef DRM_MAX_MINOR
+#define DRM_MAX_MINOR 16
+#endif
+
+#if defined(__linux__)
+
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOC_VOID _IOC_NONE
+#define DRM_IOC_READ _IOC_READ
+#define DRM_IOC_WRITE _IOC_WRITE
+#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+
+#else /* One of the *BSDs */
+
+#include <sys/ioccom.h>
+#define DRM_IOCTL_NR(n) ((n) & 0xff)
+#define DRM_IOC_VOID IOC_VOID
+#define DRM_IOC_READ IOC_OUT
+#define DRM_IOC_WRITE IOC_IN
+#define DRM_IOC_READWRITE IOC_INOUT
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+
+#endif
+
+ /* Defaults, if nothing set in xf86config */
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+/* Default /dev/dri directory permissions 0755 */
+#define DRM_DEV_DIRMODE \
+ (S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
+#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+
+#ifdef __OpenBSD__
+#define DRM_DIR_NAME "/dev"
+#define DRM_PRIMARY_MINOR_NAME "drm"
+#define DRM_CONTROL_MINOR_NAME "drmC"
+#define DRM_RENDER_MINOR_NAME "drmR"
+#else
+#define DRM_DIR_NAME "/dev/dri"
+#define DRM_PRIMARY_MINOR_NAME "card"
+#define DRM_CONTROL_MINOR_NAME "controlD"
+#define DRM_RENDER_MINOR_NAME "renderD"
+#define DRM_PROC_NAME "/proc/dri/" /* For backward Linux compatibility */
+#endif
+
+#define DRM_DEV_NAME "%s/" DRM_PRIMARY_MINOR_NAME "%d"
+#define DRM_CONTROL_DEV_NAME "%s/" DRM_CONTROL_MINOR_NAME "%d"
+#define DRM_RENDER_DEV_NAME "%s/" DRM_RENDER_MINOR_NAME "%d"
+
+#define DRM_NODE_NAME_MAX \
+ (sizeof(DRM_DIR_NAME) + 1 /* slash */ \
+ + MAX3(sizeof(DRM_PRIMARY_MINOR_NAME), \
+ sizeof(DRM_CONTROL_MINOR_NAME), \
+ sizeof(DRM_RENDER_MINOR_NAME)) \
+ + sizeof("144") /* highest possible node number */ \
+ + 1) /* NULL-terminator */
+
+#define DRM_ERR_NO_DEVICE (-1001)
+#define DRM_ERR_NO_ACCESS (-1002)
+#define DRM_ERR_NOT_ROOT (-1003)
+#define DRM_ERR_INVALID (-1004)
+#define DRM_ERR_NO_FD (-1005)
+
+#define DRM_AGP_NO_HANDLE 0
+
+typedef unsigned int drmSize, *drmSizePtr; /**< For mapped regions */
+typedef void *drmAddress, **drmAddressPtr; /**< For mapped regions */
+
+#if (__GNUC__ >= 3)
+#define DRM_PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a)))
+#else
+#define DRM_PRINTFLIKE(f, a)
+#endif
+
+typedef struct _drmServerInfo {
+ int (*debug_print)(const char *format, va_list ap) DRM_PRINTFLIKE(1,0);
+ int (*load_module)(const char *name);
+ void (*get_perms)(gid_t *, mode_t *);
+} drmServerInfo, *drmServerInfoPtr;
+
+typedef struct drmHashEntry {
+ int fd;
+ void (*f)(int, void *, void *);
+ void *tagTable;
+} drmHashEntry;
+
+extern int drmIoctl(int fd, unsigned long request, void *arg);
+extern void *drmGetHashTable(void);
+extern drmHashEntry *drmGetEntry(int fd);
+
+/**
+ * Driver version information.
+ *
+ * \sa drmGetVersion() and drmSetVersion().
+ */
+typedef struct _drmVersion {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel; /**< Patch level */
+ int name_len; /**< Length of name buffer */
+ char *name; /**< Name of driver */
+ int date_len; /**< Length of date buffer */
+ char *date; /**< User-space buffer to hold date */
+ int desc_len; /**< Length of desc buffer */
+ char *desc; /**< User-space buffer to hold desc */
+} drmVersion, *drmVersionPtr;
+
+typedef struct _drmStats {
+ unsigned long count; /**< Number of data */
+ struct {
+ unsigned long value; /**< Value from kernel */
+ const char *long_format; /**< Suggested format for long_name */
+ const char *long_name; /**< Long name for value */
+ const char *rate_format; /**< Suggested format for rate_name */
+ const char *rate_name; /**< Short name for value per second */
+ int isvalue; /**< True if value (vs. counter) */
+ const char *mult_names; /**< Multiplier names (e.g., "KGM") */
+ int mult; /**< Multiplier value (e.g., 1024) */
+ int verbose; /**< Suggest only in verbose output */
+ } data[15];
+} drmStatsT;
+
+
+ /* All of these enums *MUST* match with the
+ kernel implementation -- so do *NOT*
+ change them! (The drmlib implementation
+ will just copy the flags instead of
+ translating them.) */
+typedef enum {
+ DRM_FRAME_BUFFER = 0, /**< WC, no caching, no core dump */
+ DRM_REGISTERS = 1, /**< no caching, no core dump */
+ DRM_SHM = 2, /**< shared, cached */
+ DRM_AGP = 3, /**< AGP/GART */
+ DRM_SCATTER_GATHER = 4, /**< PCI scatter/gather */
+ DRM_CONSISTENT = 5 /**< PCI consistent */
+} drmMapType;
+
+typedef enum {
+ DRM_RESTRICTED = 0x0001, /**< Cannot be mapped to client-virtual */
+ DRM_READ_ONLY = 0x0002, /**< Read-only in client-virtual */
+ DRM_LOCKED = 0x0004, /**< Physical pages locked */
+ DRM_KERNEL = 0x0008, /**< Kernel requires access */
+ DRM_WRITE_COMBINING = 0x0010, /**< Use write-combining, if available */
+ DRM_CONTAINS_LOCK = 0x0020, /**< SHM page that contains lock */
+ DRM_REMOVABLE = 0x0040 /**< Removable mapping */
+} drmMapFlags;
+
+/**
+ * \warning These values *MUST* match drm.h
+ */
+typedef enum {
+ /** \name Flags for DMA buffer dispatch */
+ /*@{*/
+ DRM_DMA_BLOCK = 0x01, /**<
+ * Block until buffer dispatched.
+ *
+ * \note the buffer may not yet have been
+ * processed by the hardware -- getting a
+ * hardware lock with the hardware quiescent
+ * will ensure that the buffer has been
+ * processed.
+ */
+ DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
+ DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
+ /*@}*/
+
+ /** \name Flags for DMA buffer request */
+ /*@{*/
+ DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
+ DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
+ DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
+ /*@}*/
+} drmDMAFlags;
+
+typedef enum {
+ DRM_PAGE_ALIGN = 0x01,
+ DRM_AGP_BUFFER = 0x02,
+ DRM_SG_BUFFER = 0x04,
+ DRM_FB_BUFFER = 0x08,
+ DRM_PCI_BUFFER_RO = 0x10
+} drmBufDescFlags;
+
+typedef enum {
+ DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
+ DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
+ DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
+ DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
+ DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
+} drmLockFlags;
+
+typedef enum {
+ DRM_CONTEXT_PRESERVED = 0x01, /**< This context is preserved and
+ never swapped. */
+ DRM_CONTEXT_2DONLY = 0x02 /**< This context is for 2D rendering only. */
+} drm_context_tFlags, *drm_context_tFlagsPtr;
+
+typedef struct _drmBufDesc {
+ int count; /**< Number of buffers of this size */
+ int size; /**< Size in bytes */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+} drmBufDesc, *drmBufDescPtr;
+
+typedef struct _drmBufInfo {
+ int count; /**< Number of buffers described in list */
+ drmBufDescPtr list; /**< List of buffer descriptions */
+} drmBufInfo, *drmBufInfoPtr;
+
+typedef struct _drmBuf {
+ int idx; /**< Index into the master buffer list */
+ int total; /**< Buffer size */
+ int used; /**< Amount of buffer in use (for DMA) */
+ drmAddress address; /**< Address */
+} drmBuf, *drmBufPtr;
+
+/**
+ * Buffer mapping information.
+ *
+ * Used by drmMapBufs() and drmUnmapBufs() to store information about the
+ * mapped buffers.
+ */
+typedef struct _drmBufMap {
+ int count; /**< Number of buffers mapped */
+ drmBufPtr list; /**< Buffers */
+} drmBufMap, *drmBufMapPtr;
+
+typedef struct _drmLock {
+ volatile unsigned int lock;
+ char padding[60];
+ /* This is big enough for most current (and future?) architectures:
+ DEC Alpha: 32 bytes
+ Intel Merced: ?
+ Intel P5/PPro/PII/PIII: 32 bytes
+ Intel StrongARM: 32 bytes
+ Intel i386/i486: 16 bytes
+ MIPS: 32 bytes (?)
+ Motorola 68k: 16 bytes
+ Motorola PowerPC: 32 bytes
+ Sun SPARC: 32 bytes
+ */
+} drmLock, *drmLockPtr;
+
+/**
+ * Indices here refer to the offset into
+ * list in drmBufInfo
+ */
+typedef struct _drmDMAReq {
+ drm_context_t context; /**< Context handle */
+ int send_count; /**< Number of buffers to send */
+ int *send_list; /**< List of handles to buffers */
+ int *send_sizes; /**< Lengths of data to send, in bytes */
+ drmDMAFlags flags; /**< Flags */
+ int request_count; /**< Number of buffers requested */
+ int request_size; /**< Desired size of buffers requested */
+ int *request_list; /**< Buffer information */
+ int *request_sizes; /**< Minimum acceptable sizes */
+ int granted_count; /**< Number of buffers granted at this size */
+} drmDMAReq, *drmDMAReqPtr;
+
+typedef struct _drmRegion {
+ drm_handle_t handle;
+ unsigned int offset;
+ drmSize size;
+ drmAddress map;
+} drmRegion, *drmRegionPtr;
+
+typedef struct _drmTextureRegion {
+ unsigned char next;
+ unsigned char prev;
+ unsigned char in_use;
+ unsigned char padding; /**< Explicitly pad this out */
+ unsigned int age;
+} drmTextureRegion, *drmTextureRegionPtr;
+
+
+typedef enum {
+ DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ /* bits 1-6 are reserved for high crtcs */
+ DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
+ DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
+ DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
+ DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
+} drmVBlankSeqType;
+#define DRM_VBLANK_HIGH_CRTC_SHIFT 1
+
+typedef struct _drmVBlankReq {
+ drmVBlankSeqType type;
+ unsigned int sequence;
+ unsigned long signal;
+} drmVBlankReq, *drmVBlankReqPtr;
+
+typedef struct _drmVBlankReply {
+ drmVBlankSeqType type;
+ unsigned int sequence;
+ long tval_sec;
+ long tval_usec;
+} drmVBlankReply, *drmVBlankReplyPtr;
+
+typedef union _drmVBlank {
+ drmVBlankReq request;
+ drmVBlankReply reply;
+} drmVBlank, *drmVBlankPtr;
+
+typedef struct _drmSetVersion {
+ int drm_di_major;
+ int drm_di_minor;
+ int drm_dd_major;
+ int drm_dd_minor;
+} drmSetVersion, *drmSetVersionPtr;
+
+#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
+
+#define DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
+
+#if defined(__GNUC__) && (__GNUC__ >= 2)
+# if defined(__i386) || defined(__AMD64__) || defined(__x86_64__) || defined(__amd64__)
+ /* Reflect changes here to drmP.h */
+#define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ int __dummy; /* Can't mark eax as clobbered */ \
+ __asm__ __volatile__( \
+ "lock ; cmpxchg %4,%1\n\t" \
+ "setnz %0" \
+ : "=d" (__ret), \
+ "=m" (__drm_dummy_lock(lock)), \
+ "=a" (__dummy) \
+ : "2" (old), \
+ "r" (new)); \
+ } while (0)
+
+#elif defined(__alpha__)
+
+#define DRM_CAS(lock, old, new, ret) \
+ do { \
+ int tmp, old32; \
+ __asm__ __volatile__( \
+ " addl $31, %5, %3\n" \
+ "1: ldl_l %0, %2\n" \
+ " cmpeq %0, %3, %1\n" \
+ " beq %1, 2f\n" \
+ " mov %4, %0\n" \
+ " stl_c %0, %2\n" \
+ " beq %0, 3f\n" \
+ " mb\n" \
+ "2: cmpeq %1, 0, %1\n" \
+ ".subsection 2\n" \
+ "3: br 1b\n" \
+ ".previous" \
+ : "=&r"(tmp), "=&r"(ret), \
+ "=m"(__drm_dummy_lock(lock)), \
+ "=&r"(old32) \
+ : "r"(new), "r"(old) \
+ : "memory"); \
+ } while (0)
+
+#elif defined(__sparc__)
+
+#define DRM_CAS(lock,old,new,__ret) \
+do { register unsigned int __old __asm("o0"); \
+ register unsigned int __new __asm("o1"); \
+ register volatile unsigned int *__lock __asm("o2"); \
+ __old = old; \
+ __new = new; \
+ __lock = (volatile unsigned int *)lock; \
+ __asm__ __volatile__( \
+ /*"cas [%2], %3, %0"*/ \
+ ".word 0xd3e29008\n\t" \
+ /*"membar #StoreStore | #StoreLoad"*/ \
+ ".word 0x8143e00a" \
+ : "=&r" (__new) \
+ : "0" (__new), \
+ "r" (__lock), \
+ "r" (__old) \
+ : "memory"); \
+ __ret = (__new != __old); \
+} while(0)
+
+#elif defined(__ia64__)
+
+#ifdef __INTEL_COMPILER
+/* this currently generates bad code (missing stop bits)... */
+#include <ia64intrin.h>
+
+#define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ unsigned long __result, __old = (old) & 0xffffffff; \
+ __mf(); \
+ __result = _InterlockedCompareExchange_acq(&__drm_dummy_lock(lock), (new), __old);\
+ __ret = (__result) != (__old); \
+/* __ret = (__sync_val_compare_and_swap(&__drm_dummy_lock(lock), \
+ (old), (new)) \
+ != (old)); */\
+ } while (0)
+
+#else
+#define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ unsigned int __result, __old = (old); \
+ __asm__ __volatile__( \
+ "mf\n" \
+ "mov ar.ccv=%2\n" \
+ ";;\n" \
+ "cmpxchg4.acq %0=%1,%3,ar.ccv" \
+ : "=r" (__result), "=m" (__drm_dummy_lock(lock)) \
+ : "r" ((unsigned long)__old), "r" (new) \
+ : "memory"); \
+ __ret = (__result) != (__old); \
+ } while (0)
+
+#endif
+
+#elif defined(__powerpc__)
+
+#define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ __asm__ __volatile__( \
+ "sync;" \
+ "0: lwarx %0,0,%1;" \
+ " xor. %0,%3,%0;" \
+ " bne 1f;" \
+ " stwcx. %2,0,%1;" \
+ " bne- 0b;" \
+ "1: " \
+ "sync;" \
+ : "=&r"(__ret) \
+ : "r"(lock), "r"(new), "r"(old) \
+ : "cr0", "memory"); \
+ } while (0)
+
+# elif defined (__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
+ || defined (__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
+ || defined (__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) \
+ || defined (__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
+ || defined(__ARM_ARCH_7EM__)
+ /* excluding ARMv4/ARMv5 and lower (lacking ldrex/strex support) */
+ #undef DRM_DEV_MODE
+ #define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH)
+
+ #define DRM_CAS(lock,old,new,__ret) \
+ do { \
+ __asm__ __volatile__ ( \
+ "1: ldrex %0, [%1]\n" \
+ " teq %0, %2\n" \
+ " ite eq\n" \
+ " strexeq %0, %3, [%1]\n" \
+ " movne %0, #1\n" \
+ : "=&r" (__ret) \
+ : "r" (lock), "r" (old), "r" (new) \
+ : "cc","memory"); \
+ } while (0)
+
+#endif /* architecture */
+#endif /* __GNUC__ >= 2 */
+
+#ifndef DRM_CAS
+#define DRM_CAS(lock,old,new,ret) do { ret=1; } while (0) /* FAST LOCK FAILS */
+#endif
+
+#if defined(__alpha__)
+#define DRM_CAS_RESULT(_result) long _result
+#elif defined(__powerpc__)
+#define DRM_CAS_RESULT(_result) int _result
+#else
+#define DRM_CAS_RESULT(_result) char _result
+#endif
+
+#define DRM_LIGHT_LOCK(fd,lock,context) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ DRM_CAS(lock,context,DRM_LOCK_HELD|context,__ret); \
+ if (__ret) drmGetLock(fd,context,0); \
+ } while(0)
+
+ /* This one counts fast locks -- for
+ benchmarking only. */
+#define DRM_LIGHT_LOCK_COUNT(fd,lock,context,count) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ DRM_CAS(lock,context,DRM_LOCK_HELD|context,__ret); \
+ if (__ret) drmGetLock(fd,context,0); \
+ else ++count; \
+ } while(0)
+
+#define DRM_LOCK(fd,lock,context,flags) \
+ do { \
+ if (flags) drmGetLock(fd,context,flags); \
+ else DRM_LIGHT_LOCK(fd,lock,context); \
+ } while(0)
+
+#define DRM_UNLOCK(fd,lock,context) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ DRM_CAS(lock,DRM_LOCK_HELD|context,context,__ret); \
+ if (__ret) drmUnlock(fd,context); \
+ } while(0)
+
+ /* Simple spin locks */
+#define DRM_SPINLOCK(spin,val) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ do { \
+ DRM_CAS(spin,0,val,__ret); \
+ if (__ret) while ((spin)->lock); \
+ } while (__ret); \
+ } while(0)
+
+#define DRM_SPINLOCK_TAKE(spin,val) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ int cur; \
+ do { \
+ cur = (*spin).lock; \
+ DRM_CAS(spin,cur,val,__ret); \
+ } while (__ret); \
+ } while(0)
+
+#define DRM_SPINLOCK_COUNT(spin,val,count,__ret) \
+ do { \
+ int __i; \
+ __ret = 1; \
+ for (__i = 0; __ret && __i < count; __i++) { \
+ DRM_CAS(spin,0,val,__ret); \
+ if (__ret) for (;__i < count && (spin)->lock; __i++); \
+ } \
+ } while(0)
+
+#define DRM_SPINUNLOCK(spin,val) \
+ do { \
+ DRM_CAS_RESULT(__ret); \
+ if ((*spin).lock == val) { /* else server stole lock */ \
+ do { \
+ DRM_CAS(spin,val,0,__ret); \
+ } while (__ret); \
+ } \
+ } while(0)
+
+
+
+/* General user-level programmer's API: unprivileged */
+extern int drmAvailable(void);
+extern int drmOpen(const char *name, const char *busid);
+
+#define DRM_NODE_PRIMARY 0
+#define DRM_NODE_CONTROL 1
+#define DRM_NODE_RENDER 2
+#define DRM_NODE_MAX 3
+
+extern int drmOpenWithType(const char *name, const char *busid,
+ int type);
+
+extern int drmOpenControl(int minor);
+extern int drmOpenRender(int minor);
+extern int drmClose(int fd);
+extern drmVersionPtr drmGetVersion(int fd);
+extern drmVersionPtr drmGetLibVersion(int fd);
+extern int drmGetCap(int fd, uint64_t capability, uint64_t *value);
+extern void drmFreeVersion(drmVersionPtr);
+extern int drmGetMagic(int fd, drm_magic_t * magic);
+extern char *drmGetBusid(int fd);
+extern int drmGetInterruptFromBusID(int fd, int busnum, int devnum,
+ int funcnum);
+extern int drmGetMap(int fd, int idx, drm_handle_t *offset,
+ drmSize *size, drmMapType *type,
+ drmMapFlags *flags, drm_handle_t *handle,
+ int *mtrr);
+extern int drmGetClient(int fd, int idx, int *auth, int *pid,
+ int *uid, unsigned long *magic,
+ unsigned long *iocs);
+extern int drmGetStats(int fd, drmStatsT *stats);
+extern int drmSetInterfaceVersion(int fd, drmSetVersion *version);
+extern int drmCommandNone(int fd, unsigned long drmCommandIndex);
+extern int drmCommandRead(int fd, unsigned long drmCommandIndex,
+ void *data, unsigned long size);
+extern int drmCommandWrite(int fd, unsigned long drmCommandIndex,
+ void *data, unsigned long size);
+extern int drmCommandWriteRead(int fd, unsigned long drmCommandIndex,
+ void *data, unsigned long size);
+
+/* General user-level programmer's API: X server (root) only */
+extern void drmFreeBusid(const char *busid);
+extern int drmSetBusid(int fd, const char *busid);
+extern int drmAuthMagic(int fd, drm_magic_t magic);
+extern int drmAddMap(int fd,
+ drm_handle_t offset,
+ drmSize size,
+ drmMapType type,
+ drmMapFlags flags,
+ drm_handle_t * handle);
+extern int drmRmMap(int fd, drm_handle_t handle);
+extern int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
+ drm_handle_t handle);
+
+extern int drmAddBufs(int fd, int count, int size,
+ drmBufDescFlags flags,
+ int agp_offset);
+extern int drmMarkBufs(int fd, double low, double high);
+extern int drmCreateContext(int fd, drm_context_t * handle);
+extern int drmSetContextFlags(int fd, drm_context_t context,
+ drm_context_tFlags flags);
+extern int drmGetContextFlags(int fd, drm_context_t context,
+ drm_context_tFlagsPtr flags);
+extern int drmAddContextTag(int fd, drm_context_t context, void *tag);
+extern int drmDelContextTag(int fd, drm_context_t context);
+extern void *drmGetContextTag(int fd, drm_context_t context);
+extern drm_context_t * drmGetReservedContextList(int fd, int *count);
+extern void drmFreeReservedContextList(drm_context_t *);
+extern int drmSwitchToContext(int fd, drm_context_t context);
+extern int drmDestroyContext(int fd, drm_context_t handle);
+extern int drmCreateDrawable(int fd, drm_drawable_t * handle);
+extern int drmDestroyDrawable(int fd, drm_drawable_t handle);
+extern int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
+ drm_drawable_info_type_t type,
+ unsigned int num, void *data);
+extern int drmCtlInstHandler(int fd, int irq);
+extern int drmCtlUninstHandler(int fd);
+extern int drmSetClientCap(int fd, uint64_t capability,
+ uint64_t value);
+
+extern int drmCrtcGetSequence(int fd, uint32_t crtcId,
+ uint64_t *sequence, uint64_t *ns);
+extern int drmCrtcQueueSequence(int fd, uint32_t crtcId,
+ uint32_t flags, uint64_t sequence,
+ uint64_t *sequence_queued,
+ uint64_t user_data);
+/* General user-level programmer's API: authenticated client and/or X */
+extern int drmMap(int fd,
+ drm_handle_t handle,
+ drmSize size,
+ drmAddressPtr address);
+extern int drmUnmap(drmAddress address, drmSize size);
+extern drmBufInfoPtr drmGetBufInfo(int fd);
+extern drmBufMapPtr drmMapBufs(int fd);
+extern int drmUnmapBufs(drmBufMapPtr bufs);
+extern int drmDMA(int fd, drmDMAReqPtr request);
+extern int drmFreeBufs(int fd, int count, int *list);
+extern int drmGetLock(int fd,
+ drm_context_t context,
+ drmLockFlags flags);
+extern int drmUnlock(int fd, drm_context_t context);
+extern int drmFinish(int fd, int context, drmLockFlags flags);
+extern int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
+ drm_handle_t * handle);
+
+/* AGP/GART support: X server (root) only */
+extern int drmAgpAcquire(int fd);
+extern int drmAgpRelease(int fd);
+extern int drmAgpEnable(int fd, unsigned long mode);
+extern int drmAgpAlloc(int fd, unsigned long size,
+ unsigned long type, unsigned long *address,
+ drm_handle_t *handle);
+extern int drmAgpFree(int fd, drm_handle_t handle);
+extern int drmAgpBind(int fd, drm_handle_t handle,
+ unsigned long offset);
+extern int drmAgpUnbind(int fd, drm_handle_t handle);
+
+/* AGP/GART info: authenticated client and/or X */
+extern int drmAgpVersionMajor(int fd);
+extern int drmAgpVersionMinor(int fd);
+extern unsigned long drmAgpGetMode(int fd);
+extern unsigned long drmAgpBase(int fd); /* Physical location */
+extern unsigned long drmAgpSize(int fd); /* Bytes */
+extern unsigned long drmAgpMemoryUsed(int fd);
+extern unsigned long drmAgpMemoryAvail(int fd);
+extern unsigned int drmAgpVendorId(int fd);
+extern unsigned int drmAgpDeviceId(int fd);
+
+/* PCI scatter/gather support: X server (root) only */
+extern int drmScatterGatherAlloc(int fd, unsigned long size,
+ drm_handle_t *handle);
+extern int drmScatterGatherFree(int fd, drm_handle_t handle);
+
+extern int drmWaitVBlank(int fd, drmVBlankPtr vbl);
+
+/* Support routines */
+extern void drmSetServerInfo(drmServerInfoPtr info);
+extern int drmError(int err, const char *label);
+extern void *drmMalloc(int size);
+extern void drmFree(void *pt);
+
+/* Hash table routines */
+extern void *drmHashCreate(void);
+extern int drmHashDestroy(void *t);
+extern int drmHashLookup(void *t, unsigned long key, void **value);
+extern int drmHashInsert(void *t, unsigned long key, void *value);
+extern int drmHashDelete(void *t, unsigned long key);
+extern int drmHashFirst(void *t, unsigned long *key, void **value);
+extern int drmHashNext(void *t, unsigned long *key, void **value);
+
+/* PRNG routines */
+extern void *drmRandomCreate(unsigned long seed);
+extern int drmRandomDestroy(void *state);
+extern unsigned long drmRandom(void *state);
+extern double drmRandomDouble(void *state);
+
+/* Skip list routines */
+
+extern void *drmSLCreate(void);
+extern int drmSLDestroy(void *l);
+extern int drmSLLookup(void *l, unsigned long key, void **value);
+extern int drmSLInsert(void *l, unsigned long key, void *value);
+extern int drmSLDelete(void *l, unsigned long key);
+extern int drmSLNext(void *l, unsigned long *key, void **value);
+extern int drmSLFirst(void *l, unsigned long *key, void **value);
+extern void drmSLDump(void *l);
+extern int drmSLLookupNeighbors(void *l, unsigned long key,
+ unsigned long *prev_key, void **prev_value,
+ unsigned long *next_key, void **next_value);
+
+extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
+extern int drmOpenOnceWithType(const char *BusID, int *newlyopened, int type);
+extern void drmCloseOnce(int fd);
+extern void drmMsg(const char *format, ...) DRM_PRINTFLIKE(1, 2);
+
+extern int drmSetMaster(int fd);
+extern int drmDropMaster(int fd);
+extern int drmIsMaster(int fd);
+
+#define DRM_EVENT_CONTEXT_VERSION 4
+
+typedef struct _drmEventContext {
+
+ /* This struct is versioned so we can add more pointers if we
+ * add more events. */
+ int version;
+
+ void (*vblank_handler)(int fd,
+ unsigned int sequence,
+ unsigned int tv_sec,
+ unsigned int tv_usec,
+ void *user_data);
+
+ void (*page_flip_handler)(int fd,
+ unsigned int sequence,
+ unsigned int tv_sec,
+ unsigned int tv_usec,
+ void *user_data);
+
+ void (*page_flip_handler2)(int fd,
+ unsigned int sequence,
+ unsigned int tv_sec,
+ unsigned int tv_usec,
+ unsigned int crtc_id,
+ void *user_data);
+
+ void (*sequence_handler)(int fd,
+ uint64_t sequence,
+ uint64_t ns,
+ uint64_t user_data);
+} drmEventContext, *drmEventContextPtr;
+
+extern int drmHandleEvent(int fd, drmEventContextPtr evctx);
+
+extern char *drmGetDeviceNameFromFd(int fd);
+
+/* Improved version of drmGetDeviceNameFromFd which attributes for any type of
+ * device/node - card, control or renderD.
+ */
+extern char *drmGetDeviceNameFromFd2(int fd);
+extern int drmGetNodeTypeFromFd(int fd);
+
+/* Convert between GEM handles and DMA-BUF file descriptors.
+ *
+ * Warning: since GEM handles are not reference-counted and are unique per
+ * DRM file description, the caller is expected to perform its own reference
+ * counting. drmPrimeFDToHandle is guaranteed to return the same handle for
+ * different FDs if they reference the same underlying buffer object. This
+ * could even be a buffer object originally created on the same DRM FD.
+ *
+ * When sharing a DRM FD with an API such as EGL or GBM, the caller must not
+ * use drmPrimeHandleToFD nor drmPrimeFDToHandle. A single user-space
+ * reference-counting implementation is necessary to avoid double-closing GEM
+ * handles.
+ *
+ * Two processes can't share the same DRM FD and both use it to create or
+ * import GEM handles, even when using a single user-space reference-counting
+ * implementation like GBM, because GBM doesn't share its state between
+ * processes.
+ */
+extern int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd);
+extern int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle);
+
+extern int drmCloseBufferHandle(int fd, uint32_t handle);
+
+extern char *drmGetPrimaryDeviceNameFromFd(int fd);
+extern char *drmGetRenderDeviceNameFromFd(int fd);
+
+#define DRM_BUS_PCI 0
+#define DRM_BUS_USB 1
+#define DRM_BUS_PLATFORM 2
+#define DRM_BUS_HOST1X 3
+
+typedef struct _drmPciBusInfo {
+ uint16_t domain;
+ uint8_t bus;
+ uint8_t dev;
+ uint8_t func;
+} drmPciBusInfo, *drmPciBusInfoPtr;
+
+typedef struct _drmPciDeviceInfo {
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint16_t subvendor_id;
+ uint16_t subdevice_id;
+ uint8_t revision_id;
+} drmPciDeviceInfo, *drmPciDeviceInfoPtr;
+
+typedef struct _drmUsbBusInfo {
+ uint8_t bus;
+ uint8_t dev;
+} drmUsbBusInfo, *drmUsbBusInfoPtr;
+
+typedef struct _drmUsbDeviceInfo {
+ uint16_t vendor;
+ uint16_t product;
+} drmUsbDeviceInfo, *drmUsbDeviceInfoPtr;
+
+#define DRM_PLATFORM_DEVICE_NAME_LEN 512
+
+typedef struct _drmPlatformBusInfo {
+ char fullname[DRM_PLATFORM_DEVICE_NAME_LEN];
+} drmPlatformBusInfo, *drmPlatformBusInfoPtr;
+
+typedef struct _drmPlatformDeviceInfo {
+ char **compatible; /* NULL terminated list of compatible strings */
+} drmPlatformDeviceInfo, *drmPlatformDeviceInfoPtr;
+
+#define DRM_HOST1X_DEVICE_NAME_LEN 512
+
+typedef struct _drmHost1xBusInfo {
+ char fullname[DRM_HOST1X_DEVICE_NAME_LEN];
+} drmHost1xBusInfo, *drmHost1xBusInfoPtr;
+
+typedef struct _drmHost1xDeviceInfo {
+ char **compatible; /* NULL terminated list of compatible strings */
+} drmHost1xDeviceInfo, *drmHost1xDeviceInfoPtr;
+
+typedef struct _drmDevice {
+ char **nodes; /* DRM_NODE_MAX sized array */
+ int available_nodes; /* DRM_NODE_* bitmask */
+ int bustype;
+ union {
+ drmPciBusInfoPtr pci;
+ drmUsbBusInfoPtr usb;
+ drmPlatformBusInfoPtr platform;
+ drmHost1xBusInfoPtr host1x;
+ } businfo;
+ union {
+ drmPciDeviceInfoPtr pci;
+ drmUsbDeviceInfoPtr usb;
+ drmPlatformDeviceInfoPtr platform;
+ drmHost1xDeviceInfoPtr host1x;
+ } deviceinfo;
+} drmDevice, *drmDevicePtr;
+
+extern int drmGetDevice(int fd, drmDevicePtr *device);
+extern void drmFreeDevice(drmDevicePtr *device);
+
+extern int drmGetDevices(drmDevicePtr devices[], int max_devices);
+extern void drmFreeDevices(drmDevicePtr devices[], int count);
+
+#define DRM_DEVICE_GET_PCI_REVISION (1 << 0)
+extern int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device);
+extern int drmGetDevices2(uint32_t flags, drmDevicePtr devices[], int max_devices);
+
+extern int drmGetDeviceFromDevId(dev_t dev_id, uint32_t flags, drmDevicePtr *device);
+
+extern int drmDevicesEqual(drmDevicePtr a, drmDevicePtr b);
+
+extern int drmSyncobjCreate(int fd, uint32_t flags, uint32_t *handle);
+extern int drmSyncobjDestroy(int fd, uint32_t handle);
+extern int drmSyncobjHandleToFD(int fd, uint32_t handle, int *obj_fd);
+extern int drmSyncobjFDToHandle(int fd, int obj_fd, uint32_t *handle);
+
+extern int drmSyncobjImportSyncFile(int fd, uint32_t handle, int sync_file_fd);
+extern int drmSyncobjExportSyncFile(int fd, uint32_t handle, int *sync_file_fd);
+extern int drmSyncobjWait(int fd, uint32_t *handles, unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled);
+extern int drmSyncobjReset(int fd, const uint32_t *handles, uint32_t handle_count);
+extern int drmSyncobjSignal(int fd, const uint32_t *handles, uint32_t handle_count);
+extern int drmSyncobjTimelineSignal(int fd, const uint32_t *handles,
+ uint64_t *points, uint32_t handle_count);
+extern int drmSyncobjTimelineWait(int fd, uint32_t *handles, uint64_t *points,
+ unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled);
+extern int drmSyncobjQuery(int fd, uint32_t *handles, uint64_t *points,
+ uint32_t handle_count);
+extern int drmSyncobjQuery2(int fd, uint32_t *handles, uint64_t *points,
+ uint32_t handle_count, uint32_t flags);
+extern int drmSyncobjTransfer(int fd,
+ uint32_t dst_handle, uint64_t dst_point,
+ uint32_t src_handle, uint64_t src_point,
+ uint32_t flags);
+
+extern char *
+drmGetFormatModifierVendor(uint64_t modifier);
+
+extern char *
+drmGetFormatModifierName(uint64_t modifier);
+
+extern char *
+drmGetFormatName(uint32_t format);
+
+#ifndef fourcc_mod_get_vendor
+#define fourcc_mod_get_vendor(modifier) \
+ (((modifier) >> 56) & 0xff)
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/include/libdrm/xf86drmMode.h b/include/libdrm/xf86drmMode.h
new file mode 100644
index 0000000..d911c9a
--- /dev/null
+++ b/include/libdrm/xf86drmMode.h
@@ -0,0 +1,536 @@
+/*
+ * \file xf86drmMode.h
+ * Header for DRM modesetting interface.
+ *
+ * \author Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ * \par Acknowledgements:
+ * Feb 2007, Dave Airlie <airlied@linux.ie>
+ */
+
+/*
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2007-2008 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007-2008 Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _XF86DRMMODE_H_
+#define _XF86DRMMODE_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <drm.h>
+#include <drm_mode.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+/*
+ * This is the interface for modesetting for drm.
+ *
+ * It aims to provide a randr1.2 compatible interface for modesettings in the
+ * kernel, the interface is also meant to be used by libraries like EGL.
+ *
+ * More information can be found in randrproto.txt which can be found here:
+ * http://gitweb.freedesktop.org/?p=xorg/proto/randrproto.git
+ *
+ * There are some major differences to be noted. Unlike the randr1.2 proto you
+ * need to create the memory object of the framebuffer yourself with the ttm
+ * buffer object interface. This object needs to be pinned.
+ */
+
+/*
+ * Feature defines
+ *
+ * Just because these are defined doesn't mean that the kernel
+ * can do that feature, its just for new code vs old libdrm.
+ */
+#define DRM_MODE_FEATURE_KMS 1
+#define DRM_MODE_FEATURE_DIRTYFB 1
+
+
+typedef struct _drmModeRes {
+
+ int count_fbs;
+ uint32_t *fbs;
+
+ int count_crtcs;
+ uint32_t *crtcs;
+
+ int count_connectors;
+ uint32_t *connectors;
+
+ int count_encoders;
+ uint32_t *encoders;
+
+ uint32_t min_width, max_width;
+ uint32_t min_height, max_height;
+} drmModeRes, *drmModeResPtr;
+
+typedef struct _drmModeModeInfo {
+ uint32_t clock;
+ uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew;
+ uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan;
+
+ uint32_t vrefresh;
+
+ uint32_t flags;
+ uint32_t type;
+ char name[DRM_DISPLAY_MODE_LEN];
+} drmModeModeInfo, *drmModeModeInfoPtr;
+
+typedef struct _drmModeFB {
+ uint32_t fb_id;
+ uint32_t width, height;
+ uint32_t pitch;
+ uint32_t bpp;
+ uint32_t depth;
+ /* driver specific handle */
+ uint32_t handle;
+} drmModeFB, *drmModeFBPtr;
+
+typedef struct _drmModeFB2 {
+ uint32_t fb_id;
+ uint32_t width, height;
+ uint32_t pixel_format; /* fourcc code from drm_fourcc.h */
+ uint64_t modifier; /* applies to all buffers */
+ uint32_t flags;
+
+ /* per-plane GEM handle; may be duplicate entries for multiple planes */
+ uint32_t handles[4];
+ uint32_t pitches[4]; /* bytes */
+ uint32_t offsets[4]; /* bytes */
+} drmModeFB2, *drmModeFB2Ptr;
+
+typedef struct drm_clip_rect drmModeClip, *drmModeClipPtr;
+
+typedef struct _drmModePropertyBlob {
+ uint32_t id;
+ uint32_t length;
+ void *data;
+} drmModePropertyBlobRes, *drmModePropertyBlobPtr;
+
+typedef struct _drmModeProperty {
+ uint32_t prop_id;
+ uint32_t flags;
+ char name[DRM_PROP_NAME_LEN];
+ int count_values;
+ uint64_t *values; /* store the blob lengths */
+ int count_enums;
+ struct drm_mode_property_enum *enums;
+ int count_blobs;
+ uint32_t *blob_ids; /* store the blob IDs */
+} drmModePropertyRes, *drmModePropertyPtr;
+
+static inline uint32_t drmModeGetPropertyType(const drmModePropertyRes *prop)
+{
+ return prop->flags & (DRM_MODE_PROP_LEGACY_TYPE | DRM_MODE_PROP_EXTENDED_TYPE);
+}
+
+static inline int drm_property_type_is(const drmModePropertyPtr property,
+ uint32_t type)
+{
+ return drmModeGetPropertyType(property) == type;
+}
+
+typedef struct _drmModeCrtc {
+ uint32_t crtc_id;
+ uint32_t buffer_id; /**< FB id to connect to 0 = disconnect */
+
+ uint32_t x, y; /**< Position on the framebuffer */
+ uint32_t width, height;
+ int mode_valid;
+ drmModeModeInfo mode;
+
+ int gamma_size; /**< Number of gamma stops */
+
+} drmModeCrtc, *drmModeCrtcPtr;
+
+typedef struct _drmModeEncoder {
+ uint32_t encoder_id;
+ uint32_t encoder_type;
+ uint32_t crtc_id;
+ uint32_t possible_crtcs;
+ uint32_t possible_clones;
+} drmModeEncoder, *drmModeEncoderPtr;
+
+/**
+ * Describes the connector status.
+ *
+ * DRM_MODE_CONNECTED means that the connector has a sink plugged in.
+ * DRM_MODE_DISCONNECTED means the contrary. DRM_MODE_UNKNOWNCONNECTION is used
+ * when it could be either.
+ *
+ * User-space should first try to enable DRM_MODE_CONNECTED connectors and
+ * ignore other connectors. If there are no DRM_MODE_CONNECTED connectors,
+ * user-space should then try to probe and enable DRM_MODE_UNKNOWNCONNECTION
+ * connectors.
+ */
+typedef enum {
+ DRM_MODE_CONNECTED = 1,
+ DRM_MODE_DISCONNECTED = 2,
+ DRM_MODE_UNKNOWNCONNECTION = 3
+} drmModeConnection;
+
+typedef enum {
+ DRM_MODE_SUBPIXEL_UNKNOWN = 1,
+ DRM_MODE_SUBPIXEL_HORIZONTAL_RGB = 2,
+ DRM_MODE_SUBPIXEL_HORIZONTAL_BGR = 3,
+ DRM_MODE_SUBPIXEL_VERTICAL_RGB = 4,
+ DRM_MODE_SUBPIXEL_VERTICAL_BGR = 5,
+ DRM_MODE_SUBPIXEL_NONE = 6
+} drmModeSubPixel;
+
+typedef struct _drmModeConnector {
+ uint32_t connector_id;
+ uint32_t encoder_id; /**< Encoder currently connected to */
+ uint32_t connector_type;
+ uint32_t connector_type_id;
+ drmModeConnection connection;
+ uint32_t mmWidth, mmHeight; /**< HxW in millimeters */
+ drmModeSubPixel subpixel;
+
+ int count_modes;
+ drmModeModeInfoPtr modes;
+
+ int count_props;
+ uint32_t *props; /**< List of property ids */
+ uint64_t *prop_values; /**< List of property values */
+
+ int count_encoders;
+ uint32_t *encoders; /**< List of encoder ids */
+} drmModeConnector, *drmModeConnectorPtr;
+
+#define DRM_PLANE_TYPE_OVERLAY 0
+#define DRM_PLANE_TYPE_PRIMARY 1
+#define DRM_PLANE_TYPE_CURSOR 2
+
+typedef struct _drmModeObjectProperties {
+ uint32_t count_props;
+ uint32_t *props;
+ uint64_t *prop_values;
+} drmModeObjectProperties, *drmModeObjectPropertiesPtr;
+
+typedef struct _drmModeFormatModifierIterator {
+ uint32_t fmt_idx, mod_idx;
+ uint32_t fmt;
+ uint64_t mod;
+} drmModeFormatModifierIterator;
+
+typedef struct _drmModePlane {
+ uint32_t count_formats;
+ uint32_t *formats;
+ uint32_t plane_id;
+
+ uint32_t crtc_id;
+ uint32_t fb_id;
+
+ uint32_t crtc_x, crtc_y;
+ uint32_t x, y;
+
+ uint32_t possible_crtcs;
+ uint32_t gamma_size;
+} drmModePlane, *drmModePlanePtr;
+
+typedef struct _drmModePlaneRes {
+ uint32_t count_planes;
+ uint32_t *planes;
+} drmModePlaneRes, *drmModePlaneResPtr;
+
+extern void drmModeFreeModeInfo( drmModeModeInfoPtr ptr );
+extern void drmModeFreeResources( drmModeResPtr ptr );
+extern void drmModeFreeFB( drmModeFBPtr ptr );
+extern void drmModeFreeFB2( drmModeFB2Ptr ptr );
+extern void drmModeFreeCrtc( drmModeCrtcPtr ptr );
+extern void drmModeFreeConnector( drmModeConnectorPtr ptr );
+extern void drmModeFreeEncoder( drmModeEncoderPtr ptr );
+extern void drmModeFreePlane( drmModePlanePtr ptr );
+extern void drmModeFreePlaneResources(drmModePlaneResPtr ptr);
+
+/**
+ * Check whether the DRM node supports Kernel Mode-Setting.
+ *
+ * Returns 1 if suitable for KMS, 0 otherwise.
+ */
+extern int drmIsKMS(int fd);
+
+/**
+ * Retrieves all of the resources associated with a card.
+ */
+extern drmModeResPtr drmModeGetResources(int fd);
+
+/*
+ * FrameBuffer manipulation.
+ */
+
+/**
+ * Retrieve information about framebuffer bufferId
+ */
+extern drmModeFBPtr drmModeGetFB(int fd, uint32_t bufferId);
+extern drmModeFB2Ptr drmModeGetFB2(int fd, uint32_t bufferId);
+
+/**
+ * Creates a new framebuffer with an buffer object as its scanout buffer.
+ */
+extern int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
+ uint8_t bpp, uint32_t pitch, uint32_t bo_handle,
+ uint32_t *buf_id);
+/* ...with a specific pixel format */
+extern int drmModeAddFB2(int fd, uint32_t width, uint32_t height,
+ uint32_t pixel_format, const uint32_t bo_handles[4],
+ const uint32_t pitches[4], const uint32_t offsets[4],
+ uint32_t *buf_id, uint32_t flags);
+
+/* ...with format modifiers */
+int drmModeAddFB2WithModifiers(int fd, uint32_t width, uint32_t height,
+ uint32_t pixel_format, const uint32_t bo_handles[4],
+ const uint32_t pitches[4], const uint32_t offsets[4],
+ const uint64_t modifier[4], uint32_t *buf_id,
+ uint32_t flags);
+
+/**
+ * Destroies the given framebuffer.
+ */
+extern int drmModeRmFB(int fd, uint32_t bufferId);
+
+/**
+ * Mark a region of a framebuffer as dirty.
+ */
+extern int drmModeDirtyFB(int fd, uint32_t bufferId,
+ drmModeClipPtr clips, uint32_t num_clips);
+
+
+/*
+ * Crtc functions
+ */
+
+/**
+ * Retrieve information about the ctrt crtcId
+ */
+extern drmModeCrtcPtr drmModeGetCrtc(int fd, uint32_t crtcId);
+
+/**
+ * Set the mode on a crtc crtcId with the given mode modeId.
+ */
+int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId,
+ uint32_t x, uint32_t y, uint32_t *connectors, int count,
+ drmModeModeInfoPtr mode);
+
+/*
+ * Cursor functions
+ */
+
+/**
+ * Set the cursor on crtc
+ */
+int drmModeSetCursor(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width, uint32_t height);
+
+int drmModeSetCursor2(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y);
+/**
+ * Move the cursor on crtc
+ */
+int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y);
+
+/**
+ * Encoder functions
+ */
+drmModeEncoderPtr drmModeGetEncoder(int fd, uint32_t encoder_id);
+
+/*
+ * Connector manipulation
+ */
+
+/**
+ * Retrieve all information about the connector connectorId. This will do a
+ * forced probe on the connector to retrieve remote information such as EDIDs
+ * from the display device.
+ */
+extern drmModeConnectorPtr drmModeGetConnector(int fd,
+ uint32_t connectorId);
+
+/**
+ * Retrieve current information, i.e the currently active mode and encoder,
+ * about the connector connectorId. This will not do any probing on the
+ * connector or remote device, and only reports what is currently known.
+ * For the complete set of modes and encoders associated with the connector
+ * use drmModeGetConnector() which will do a probe to determine any display
+ * link changes first.
+ */
+extern drmModeConnectorPtr drmModeGetConnectorCurrent(int fd,
+ uint32_t connector_id);
+
+/**
+ * Get a bitmask of CRTCs a connector is compatible with.
+ *
+ * The bits reference CRTC indices. If the n-th CRTC is compatible with the
+ * connector, the n-th bit will be set. The indices are taken from the array
+ * returned by drmModeGetResources(). The indices are different from the object
+ * IDs.
+ *
+ * Zero is returned on error.
+ */
+extern uint32_t drmModeConnectorGetPossibleCrtcs(int fd,
+ const drmModeConnector *connector);
+
+/**
+ * Attaches the given mode to an connector.
+ */
+extern int drmModeAttachMode(int fd, uint32_t connectorId, drmModeModeInfoPtr mode_info);
+
+/**
+ * Detaches a mode from the connector
+ * must be unused, by the given mode.
+ */
+extern int drmModeDetachMode(int fd, uint32_t connectorId, drmModeModeInfoPtr mode_info);
+
+extern drmModePropertyPtr drmModeGetProperty(int fd, uint32_t propertyId);
+extern void drmModeFreeProperty(drmModePropertyPtr ptr);
+
+extern drmModePropertyBlobPtr drmModeGetPropertyBlob(int fd, uint32_t blob_id);
+extern bool drmModeFormatModifierBlobIterNext(const drmModePropertyBlobRes *blob,
+ drmModeFormatModifierIterator *iter);
+extern void drmModeFreePropertyBlob(drmModePropertyBlobPtr ptr);
+extern int drmModeConnectorSetProperty(int fd, uint32_t connector_id, uint32_t property_id,
+ uint64_t value);
+extern int drmCheckModesettingSupported(const char *busid);
+
+extern int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size,
+ uint16_t *red, uint16_t *green, uint16_t *blue);
+extern int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
+ uint16_t *red, uint16_t *green, uint16_t *blue);
+extern int drmModePageFlip(int fd, uint32_t crtc_id, uint32_t fb_id,
+ uint32_t flags, void *user_data);
+extern int drmModePageFlipTarget(int fd, uint32_t crtc_id, uint32_t fb_id,
+ uint32_t flags, void *user_data,
+ uint32_t target_vblank);
+
+extern drmModePlaneResPtr drmModeGetPlaneResources(int fd);
+extern drmModePlanePtr drmModeGetPlane(int fd, uint32_t plane_id);
+extern int drmModeSetPlane(int fd, uint32_t plane_id, uint32_t crtc_id,
+ uint32_t fb_id, uint32_t flags,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+
+extern drmModeObjectPropertiesPtr drmModeObjectGetProperties(int fd,
+ uint32_t object_id,
+ uint32_t object_type);
+extern void drmModeFreeObjectProperties(drmModeObjectPropertiesPtr ptr);
+extern int drmModeObjectSetProperty(int fd, uint32_t object_id,
+ uint32_t object_type, uint32_t property_id,
+ uint64_t value);
+
+
+typedef struct _drmModeAtomicReq drmModeAtomicReq, *drmModeAtomicReqPtr;
+
+extern drmModeAtomicReqPtr drmModeAtomicAlloc(void);
+extern drmModeAtomicReqPtr drmModeAtomicDuplicate(const drmModeAtomicReqPtr req);
+extern int drmModeAtomicMerge(drmModeAtomicReqPtr base,
+ const drmModeAtomicReqPtr augment);
+extern void drmModeAtomicFree(drmModeAtomicReqPtr req);
+extern int drmModeAtomicGetCursor(const drmModeAtomicReqPtr req);
+extern void drmModeAtomicSetCursor(drmModeAtomicReqPtr req, int cursor);
+extern int drmModeAtomicAddProperty(drmModeAtomicReqPtr req,
+ uint32_t object_id,
+ uint32_t property_id,
+ uint64_t value);
+extern int drmModeAtomicCommit(int fd,
+ const drmModeAtomicReqPtr req,
+ uint32_t flags,
+ void *user_data);
+
+extern int drmModeCreatePropertyBlob(int fd, const void *data, size_t size,
+ uint32_t *id);
+extern int drmModeDestroyPropertyBlob(int fd, uint32_t id);
+
+/*
+ * DRM mode lease APIs. These create and manage new drm_masters with
+ * access to a subset of the available DRM resources
+ */
+
+extern int drmModeCreateLease(int fd, const uint32_t *objects, int num_objects, int flags, uint32_t *lessee_id);
+
+typedef struct drmModeLesseeList {
+ uint32_t count;
+ uint32_t lessees[];
+} drmModeLesseeListRes, *drmModeLesseeListPtr;
+
+extern drmModeLesseeListPtr drmModeListLessees(int fd);
+
+typedef struct drmModeObjectList {
+ uint32_t count;
+ uint32_t objects[];
+} drmModeObjectListRes, *drmModeObjectListPtr;
+
+extern drmModeObjectListPtr drmModeGetLease(int fd);
+
+extern int drmModeRevokeLease(int fd, uint32_t lessee_id);
+
+/**
+ * Get a string describing a connector type.
+ *
+ * NULL is returned if the connector type is unsupported. Callers should handle
+ * this gracefully, e.g. by falling back to "Unknown" or printing the raw value.
+ */
+extern const char *
+drmModeGetConnectorTypeName(uint32_t connector_type);
+
+/**
+ * Create a dumb buffer.
+ *
+ * Given a width, height and bits-per-pixel, the kernel will return a buffer
+ * handle, pitch and size. The flags must be zero.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+extern int
+drmModeCreateDumbBuffer(int fd, uint32_t width, uint32_t height, uint32_t bpp,
+ uint32_t flags, uint32_t *handle, uint32_t *pitch,
+ uint64_t *size);
+
+/**
+ * Destroy a dumb buffer.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+extern int
+drmModeDestroyDumbBuffer(int fd, uint32_t handle);
+
+/**
+ * Prepare a dumb buffer for mapping.
+ *
+ * The kernel returns an offset which can be used as an argument to mmap(2) on
+ * the DRM FD.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+extern int
+drmModeMapDumbBuffer(int fd, uint32_t handle, uint64_t *offset);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif