/drivers/include/asm/msr.h |
---|
File deleted |
/drivers/include/asm/cpumask.h |
---|
File deleted |
/drivers/include/asm/types.h |
---|
File deleted |
/drivers/include/asm/pgtable-2level.h |
---|
File deleted |
/drivers/include/asm/unaligned.h |
---|
File deleted |
/drivers/include/asm/spinlock_types.h |
---|
File deleted |
/drivers/include/asm/string.h |
---|
File deleted |
/drivers/include/asm/x86_init.h |
---|
File deleted |
/drivers/include/asm/irqflags.h |
---|
File deleted |
/drivers/include/asm/processor-flags.h |
---|
File deleted |
/drivers/include/asm/linkage.h |
---|
File deleted |
/drivers/include/asm/string_32.h |
---|
File deleted |
/drivers/include/asm/page.h |
---|
File deleted |
/drivers/include/asm/alternative.h |
---|
File deleted |
/drivers/include/asm/processor.h |
---|
File deleted |
/drivers/include/asm/nops.h |
---|
File deleted |
/drivers/include/asm/page_32.h |
---|
File deleted |
/drivers/include/asm/bitsperlong.h |
---|
File deleted |
/drivers/include/asm/cmpxchg.h |
---|
File deleted |
/drivers/include/asm/special_insns.h |
---|
File deleted |
/drivers/include/asm/atomic64_32.h |
---|
File deleted |
/drivers/include/asm/arch_hweight.h |
---|
File deleted |
/drivers/include/asm/div64.h |
---|
File deleted |
/drivers/include/asm/required-features.h |
---|
File deleted |
/drivers/include/asm/delay.h |
---|
File deleted |
/drivers/include/asm/percpu.h |
---|
File deleted |
/drivers/include/asm/bitops.h |
---|
File deleted |
/drivers/include/asm/scatterlist.h |
---|
File deleted |
/drivers/include/asm/pgtable_types.h |
---|
File deleted |
/drivers/include/asm/cpufeature.h |
---|
File deleted |
/drivers/include/asm/cache.h |
---|
File deleted |
/drivers/include/asm/cacheflush.h |
---|
File deleted |
/drivers/include/asm/pgtable_32_types.h |
---|
File deleted |
/drivers/include/asm/agp.h |
---|
File deleted |
/drivers/include/asm/rmwcc.h |
---|
File deleted |
/drivers/include/asm/atomic.h |
---|
File deleted |
/drivers/include/asm/pgtable.h |
---|
File deleted |
/drivers/include/asm/e820.h |
---|
File deleted |
/drivers/include/asm/desc_defs.h |
---|
File deleted |
/drivers/include/asm/asm.h |
---|
File deleted |
/drivers/include/asm/atomic_32.h |
---|
File deleted |
/drivers/include/asm/current.h |
---|
File deleted |
/drivers/include/asm/pgtable_32.h |
---|
File deleted |
/drivers/include/asm/disabled-features.h |
---|
File deleted |
/drivers/include/asm/posix_types.h |
---|
File deleted |
/drivers/include/asm/barrier.h |
---|
File deleted |
/drivers/include/asm/sigcontext.h |
---|
File deleted |
/drivers/include/asm/pgtable-2level_types.h |
---|
File deleted |
/drivers/include/asm/posix_types_32.h |
---|
File deleted |
/drivers/include/asm/swab.h |
---|
File deleted |
/drivers/include/asm/cmpxchg_32.h |
---|
File deleted |
/drivers/include/asm/byteorder.h |
---|
File deleted |
/drivers/include/asm/math_emu.h |
---|
File deleted |
/drivers/include/asm-generic/percpu.h |
---|
File deleted |
/drivers/include/asm-generic/ptrace.h |
---|
File deleted |
/drivers/include/asm-generic/delay.h |
---|
File deleted |
/drivers/include/asm-generic/pgtable-nopud.h |
---|
File deleted |
/drivers/include/asm-generic/types.h |
---|
File deleted |
/drivers/include/asm-generic/cacheflush.h |
---|
File deleted |
/drivers/include/asm-generic/memory_model.h |
---|
File deleted |
/drivers/include/asm-generic/bitsperlong.h |
---|
File deleted |
/drivers/include/asm-generic/int-ll64.h |
---|
File deleted |
/drivers/include/asm-generic/atomic-long.h |
---|
File deleted |
/drivers/include/asm-generic/pgtable-nopmd.h |
---|
File deleted |
/drivers/include/asm-generic/bitops/fls64.h |
---|
File deleted |
/drivers/include/asm-generic/bitops/hweight.h |
---|
File deleted |
/drivers/include/asm-generic/bitops/find.h |
---|
File deleted |
/drivers/include/asm-generic/bitops/le.h |
---|
File deleted |
/drivers/include/asm-generic/bitops/ext2-non-atomic.h |
---|
File deleted |
/drivers/include/asm-generic/bitops/ext2-atomic-setbit.h |
---|
File deleted |
/drivers/include/asm-generic/bitops/minix.h |
---|
File deleted |
/drivers/include/asm-generic/bitops/const_hweight.h |
---|
File deleted |
/drivers/include/asm-generic/bitops/sched.h |
---|
File deleted |
/drivers/include/asm-generic/getorder.h |
---|
File deleted |
/drivers/include/uapi/asm-generic/int-ll64.h |
---|
File deleted |
/drivers/include/uapi/asm-generic/ioctl.h |
---|
File deleted |
/drivers/include/uapi/asm-generic/int-l64.h |
---|
File deleted |
/drivers/include/uapi/asm-generic/errno-base.h |
---|
File deleted |
/drivers/include/uapi/asm-generic/types.h |
---|
File deleted |
/drivers/include/uapi/asm-generic/errno.h |
---|
File deleted |
/drivers/include/uapi/asm-generic/posix_types.h |
---|
File deleted |
/drivers/include/uapi/asm-generic/bitsperlong.h |
---|
File deleted |
/drivers/include/uapi/linux/time.h |
---|
File deleted |
/drivers/include/uapi/linux/kernel.h |
---|
File deleted |
/drivers/include/uapi/linux/types.h |
---|
File deleted |
/drivers/include/uapi/linux/errno.h |
---|
File deleted |
/drivers/include/uapi/linux/string.h |
---|
File deleted |
/drivers/include/uapi/linux/const.h |
---|
File deleted |
/drivers/include/uapi/linux/stddef.h |
---|
File deleted |
/drivers/include/uapi/linux/personality.h |
---|
File deleted |
/drivers/include/uapi/linux/sysinfo.h |
---|
File deleted |
/drivers/include/uapi/linux/ioctl.h |
---|
File deleted |
/drivers/include/uapi/asm/ioctl.h |
---|
File deleted |
/drivers/include/uapi/asm/ptrace.h |
---|
File deleted |
/drivers/include/uapi/asm/e820.h |
---|
File deleted |
/drivers/include/uapi/asm/vm86.h |
---|
File deleted |
/drivers/include/uapi/asm/page_types.h |
---|
File deleted |
/drivers/include/uapi/asm/msr.h |
---|
File deleted |
/drivers/include/uapi/asm/segment.h |
---|
File deleted |
/drivers/include/uapi/asm/page_32_types.h |
---|
File deleted |
/drivers/include/uapi/asm/errno.h |
---|
File deleted |
/drivers/include/uapi/asm/posix_types.h |
---|
File deleted |
/drivers/include/uapi/asm/msr-index.h |
---|
File deleted |
/drivers/include/uapi/asm/sigcontext.h |
---|
File deleted |
/drivers/include/uapi/asm/processor-flags.h |
---|
File deleted |
/drivers/include/uapi/drm/drm.h |
---|
File deleted |
/drivers/include/uapi/drm/drm_fourcc.h |
---|
File deleted |
/drivers/include/uapi/drm/drm_mode.h |
---|
File deleted |
/drivers/include/uapi/drm/i915_drm.h |
---|
File deleted |
/drivers/include/uapi/drm/radeon_drm.h |
---|
File deleted |
/drivers/include/uapi/drm/drm_sarea.h |
---|
File deleted |
/drivers/include/uapi/drm/vmwgfx_drm.h |
---|
File deleted |
/drivers/include/drm/drm_atomic_helper.h |
---|
File deleted |
/drivers/include/drm/drm_agpsupport.h |
---|
File deleted |
/drivers/include/drm/drm_cache.h |
---|
File deleted |
/drivers/include/drm/drm_os_linux.h |
---|
File deleted |
/drivers/include/drm/drm_atomic.h |
---|
File deleted |
/drivers/include/drm/drm_legacy.h |
---|
File deleted |
/drivers/include/drm/drm_displayid.h |
---|
File deleted |
/drivers/include/drm/drm_gem.h |
---|
File deleted |
/drivers/include/drm/drmP.h |
---|
1,14 → 1,17 |
/** |
* \file drmP.h |
* Private header for Direct Rendering Manager |
* |
* \author Rickard E. (Rik) Faith <faith@valinux.com> |
* \author Gareth Hughes <gareth@valinux.com> |
*/ |
/* |
* Internal Header for the Direct Rendering Manager |
* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* Copyright (c) 2009-2010, Code Aurora Forum. |
* All rights reserved. |
* |
* Author: Rickard E. (Rik) Faith <faith@valinux.com> |
* Author: Gareth Hughes <gareth@valinux.com> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
32,69 → 35,91 |
#ifndef _DRM_P_H_ |
#define _DRM_P_H_ |
#define iowrite32(v, addr) writel((v), (addr)) |
#ifdef __KERNEL__ |
#ifdef __alpha__ |
/* add include of current.h so that "current" is defined |
* before static inline funcs in wait.h. Doing this so we |
* can build the DRM (part of PI DRI). 4/21/2000 S + B */ |
#include <asm/current.h> |
#endif /* __alpha__ */ |
#include <syscall.h> |
#include <linux/agp_backend.h> |
#include <linux/dma-mapping.h> |
#include <linux/file.h> |
#include <linux/fs.h> |
#include <linux/idr.h> |
#include <linux/jiffies.h> |
#include <linux/kernel.h> |
#include <linux/export.h> |
#include <linux/errno.h> |
#include <linux/kref.h> |
#include <linux/mm.h> |
#include <linux/spinlock.h> |
#include <linux/wait.h> |
#include <linux/bug.h> |
#include <linux/mutex.h> |
#include <linux/pci.h> |
#include <linux/sched.h> |
#include <linux/firmware.h> |
#include <linux/err.h> |
#include <linux/fs.h> |
//#include <linux/init.h> |
#include <linux/file.h> |
#include <linux/pci.h> |
#include <linux/jiffies.h> |
#include <linux/dma-mapping.h> |
#include <linux/irqreturn.h> |
#include <linux/mutex.h> |
//#include <asm/io.h> |
#include <linux/slab.h> |
//#include <asm/uaccess.h> |
//#include <linux/workqueue.h> |
//#include <linux/poll.h> |
//#include <asm/pgalloc.h> |
#include <linux/types.h> |
#include <linux/vmalloc.h> |
#include <linux/workqueue.h> |
#include <uapi/drm/drm.h> |
#include <uapi/drm/drm_mode.h> |
#include <drm/drm_agpsupport.h> |
#include <drm/drm_crtc.h> |
#include <drm/drm_global.h> |
#include <drm/drm_hashtab.h> |
#include <drm/drm_mem_util.h> |
#include <drm/drm_mm.h> |
#include <drm/drm_os_linux.h> |
#include <drm/drm_sarea.h> |
#include <drm/drm.h> |
#include <drm/drm_vma_manager.h> |
#include <linux/idr.h> |
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) |
struct module; |
struct drm_file; |
struct drm_device; |
struct drm_agp_head; |
struct drm_local_map; |
struct drm_device_dma; |
struct drm_dma_handle; |
struct drm_gem_object; |
struct device_node; |
struct videomode; |
struct reservation_object; |
struct dma_buf_attachment; |
struct inode; |
struct poll_table_struct; |
struct drm_lock_data; |
struct sg_table; |
struct dma_buf; |
//#include <drm/drm_os_linux.h> |
#include <drm/drm_hashtab.h> |
#include <drm/drm_mm.h> |
#define KHZ2PICOS(a) (1000000000UL/(a)) |
/* Flags and return codes for get_vblank_timestamp() driver function. */ |
#define DRM_CALLED_FROM_VBLIRQ 1 |
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) |
#define DRM_VBLANKTIME_INVBL (1 << 1) |
/* get_scanout_position() return flags */ |
#define DRM_SCANOUTPOS_VALID (1 << 0) |
#define DRM_SCANOUTPOS_INVBL (1 << 1) |
#define DRM_SCANOUTPOS_ACCURATE (1 << 2) |
/* |
* 4 debug categories are defined: |
* |
131,8 → 156,8 |
extern __printf(2, 3) |
void drm_ut_debug_printk(const char *function_name, |
const char *format, ...); |
extern __printf(1, 2) |
void drm_err(const char *format, ...); |
extern __printf(2, 3) |
int drm_err(const char *func, const char *format, ...); |
/***********************************************************************/ |
/** \name DRM template customization defaults */ |
150,7 → 175,25 |
#define DRIVER_PRIME 0x4000 |
#define DRIVER_RENDER 0x8000 |
#define DRIVER_BUS_PCI 0x1 |
#define DRIVER_BUS_PLATFORM 0x2 |
#define DRIVER_BUS_USB 0x3 |
#define DRIVER_BUS_HOST1X 0x4 |
/***********************************************************************/ |
/** \name Begin the DRM... */ |
/*@{*/ |
#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then |
also include looping detection. */ |
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ |
#define DRM_MAP_HASH_OFFSET 0x10000000 |
/*@}*/ |
/***********************************************************************/ |
/** \name Macros to make printk easier */ |
/*@{*/ |
161,7 → 204,7 |
* \param arg arguments |
*/ |
#define DRM_ERROR(fmt, ...) \ |
drm_err(fmt, ##__VA_ARGS__) |
drm_err(__func__, fmt, ##__VA_ARGS__) |
/** |
* Rate limited error output. Like DRM_ERROR() but won't flood the log. |
176,7 → 219,7 |
DEFAULT_RATELIMIT_BURST); \ |
\ |
if (__ratelimit(&_rs)) \ |
drm_err(fmt, ##__VA_ARGS__); \ |
drm_err(__func__, fmt, ##__VA_ARGS__); \ |
}) |
#define DRM_INFO(fmt, ...) \ |
222,9 → 265,28 |
/** \name Internal types and structures */ |
/*@{*/ |
#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) |
#define DRM_IF_VERSION(maj, min) (maj << 16 | min) |
/** |
* Test that the hardware lock is held by the caller, returning otherwise. |
* |
* \param dev DRM device. |
* \param filp file pointer of the caller. |
*/ |
#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ |
do { \ |
if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ |
_file_priv->master->lock.file_priv != _file_priv) { \ |
DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ |
__func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ |
_file_priv->master->lock.file_priv, _file_priv); \ |
return -EINVAL; \ |
} \ |
} while (0) |
/** |
* Ioctl function type. |
* |
* \param inode device inode. |
264,6 → 326,83 |
#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ |
[DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl} |
#if 0 |
struct drm_magic_entry { |
struct list_head head; |
struct drm_hash_item hash_item; |
struct drm_file *priv; |
}; |
struct drm_vma_entry { |
struct list_head head; |
struct vm_area_struct *vma; |
pid_t pid; |
}; |
/** |
* DMA buffer. |
*/ |
struct drm_buf { |
int idx; /**< Index into master buflist */ |
int total; /**< Buffer size */ |
int order; /**< log-base-2(total) */ |
int used; /**< Amount of buffer in use (for DMA) */ |
unsigned long offset; /**< Byte offset (used internally) */ |
void *address; /**< Address of buffer */ |
unsigned long bus_address; /**< Bus address of buffer */ |
struct drm_buf *next; /**< Kernel-only: used for free list */ |
__volatile__ int waiting; /**< On kernel DMA queue */ |
__volatile__ int pending; /**< On hardware DMA queue */ |
struct drm_file *file_priv; /**< Private of holding file descr */ |
int context; /**< Kernel queue for this buffer */ |
int while_locked; /**< Dispatch this buffer while locked */ |
enum { |
DRM_LIST_NONE = 0, |
DRM_LIST_FREE = 1, |
DRM_LIST_WAIT = 2, |
DRM_LIST_PEND = 3, |
DRM_LIST_PRIO = 4, |
DRM_LIST_RECLAIM = 5 |
} list; /**< Which list we're on */ |
int dev_priv_size; /**< Size of buffer private storage */ |
void *dev_private; /**< Per-buffer private storage */ |
}; |
/** bufs is one longer than it has to be */ |
struct drm_waitlist { |
int count; /**< Number of possible buffers */ |
struct drm_buf **bufs; /**< List of pointers to buffers */ |
struct drm_buf **rp; /**< Read pointer */ |
struct drm_buf **wp; /**< Write pointer */ |
struct drm_buf **end; /**< End pointer */ |
spinlock_t read_lock; |
spinlock_t write_lock; |
}; |
#endif |
typedef struct drm_dma_handle { |
dma_addr_t busaddr; |
void *vaddr; |
size_t size; |
} drm_dma_handle_t; |
/** |
* Buffer entry. There is one of this for each buffer size order. |
*/ |
struct drm_buf_entry { |
int buf_size; /**< size */ |
int buf_count; /**< number of buffers */ |
struct drm_buf *buflist; /**< buffer list */ |
int seg_count; |
int page_order; |
struct drm_dma_handle **seglist; |
int low_mark; /**< Low water mark */ |
int high_mark; /**< High water mark */ |
}; |
/* Event queued up for userspace to read */ |
struct drm_pending_event { |
struct drm_event *event; |
318,6 → 457,7 |
int event_space; |
}; |
#if 0 |
/** |
* Lock data. |
*/ |
334,6 → 474,192 |
}; |
/** |
* DMA data. |
*/ |
struct drm_device_dma { |
struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ |
int buf_count; /**< total number of buffers */ |
struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ |
int seg_count; |
int page_count; /**< number of pages */ |
unsigned long *pagelist; /**< page list */ |
unsigned long byte_count; |
enum { |
_DRM_DMA_USE_AGP = 0x01, |
_DRM_DMA_USE_SG = 0x02, |
_DRM_DMA_USE_FB = 0x04, |
_DRM_DMA_USE_PCI_RO = 0x08 |
} flags; |
}; |
/** |
* AGP memory entry. Stored as a doubly linked list. |
*/ |
struct drm_agp_mem { |
unsigned long handle; /**< handle */ |
struct agp_memory *memory; |
unsigned long bound; /**< address */ |
int pages; |
struct list_head head; |
}; |
/** |
* AGP data. |
* |
* \sa drm_agp_init() and drm_device::agp. |
*/ |
struct drm_agp_head { |
struct agp_kern_info agp_info; /**< AGP device information */ |
struct list_head memory; |
unsigned long mode; /**< AGP mode */ |
struct agp_bridge_data *bridge; |
int enabled; /**< whether the AGP bus as been enabled */ |
int acquired; /**< whether the AGP device has been acquired */ |
unsigned long base; |
int agp_mtrr; |
int cant_use_aperture; |
unsigned long page_mask; |
}; |
/** |
* Scatter-gather memory. |
*/ |
struct drm_sg_mem { |
unsigned long handle; |
void *virtual; |
int pages; |
struct page **pagelist; |
dma_addr_t *busaddr; |
}; |
struct drm_sigdata { |
int context; |
struct drm_hw_lock *lock; |
}; |
#endif |
/** |
* Kernel side of a mapping |
*/ |
struct drm_local_map { |
resource_size_t offset; /**< Requested physical address (0 for SAREA)*/ |
unsigned long size; /**< Requested physical size (bytes) */ |
enum drm_map_type type; /**< Type of memory to map */ |
enum drm_map_flags flags; /**< Flags */ |
void *handle; /**< User-space: "Handle" to pass to mmap() */ |
/**< Kernel-space: kernel-virtual address */ |
int mtrr; /**< MTRR slot used */ |
}; |
typedef struct drm_local_map drm_local_map_t; |
/** |
* Mappings list |
*/ |
struct drm_map_list { |
struct list_head head; /**< list head */ |
struct drm_hash_item hash; |
struct drm_local_map *map; /**< mapping */ |
uint64_t user_token; |
struct drm_master *master; |
}; |
/* location of GART table */ |
#define DRM_ATI_GART_MAIN 1 |
#define DRM_ATI_GART_FB 2 |
#define DRM_ATI_GART_PCI 1 |
#define DRM_ATI_GART_PCIE 2 |
#define DRM_ATI_GART_IGP 3 |
struct drm_ati_pcigart_info { |
int gart_table_location; |
int gart_reg_if; |
void *addr; |
dma_addr_t bus_addr; |
dma_addr_t table_mask; |
struct drm_dma_handle *table_handle; |
struct drm_local_map mapping; |
int table_size; |
}; |
/** |
* This structure defines the drm_mm memory object, which will be used by the |
* DRM for its buffer objects. |
*/ |
struct drm_gem_object { |
/** Reference count of this object */ |
struct kref refcount; |
/** |
* handle_count - gem file_priv handle count of this object |
* |
* Each handle also holds a reference. Note that when the handle_count |
* drops to 0 any global names (e.g. the id in the flink namespace) will |
* be cleared. |
* |
* Protected by dev->object_name_lock. |
* */ |
unsigned handle_count; |
/** Related drm device */ |
struct drm_device *dev; |
/** File representing the shmem storage */ |
struct file *filp; |
/* Mapping info for this object */ |
struct drm_vma_offset_node vma_node; |
/** |
* Size of the object, in bytes. Immutable over the object's |
* lifetime. |
*/ |
size_t size; |
/** |
* Global name for this object, starts at 1. 0 means unnamed. |
* Access is covered by the object_name_lock in the related drm_device |
*/ |
int name; |
/** |
* Memory domains. These monitor which caches contain read/write data |
* related to the object. When transitioning from one set of domains |
* to another, the driver is called to ensure that caches are suitably |
* flushed and invalidated |
*/ |
uint32_t read_domains; |
uint32_t write_domain; |
/** |
* While validating an exec operation, the |
* new read/write domain values are computed here. |
* They will be transferred to the above values |
* at the point that any cache flushing occurs |
*/ |
uint32_t pending_read_domains; |
uint32_t pending_write_domain; |
/** |
* dma_buf - dma buf associated with this GEM object |
* |
* Pointer to the dma-buf associated with this gem object (either |
* through importing or exporting). We break the resulting reference |
* loop when the last gem handle for this object is released. |
* |
* Protected by obj->object_name_lock |
*/ |
struct dma_buf *dma_buf; |
}; |
#include <drm/drm_crtc.h> |
/** |
* struct drm_master - drm master structure |
* |
* @refcount: Refcount for this master object. |
340,6 → 666,7 |
* @minor: Link back to minor char device we are master for. Immutable. |
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. |
* @unique_len: Length of unique field. Protected by drm_global_mutex. |
* @unique_size: Amount allocated. Protected by drm_global_mutex. |
* @magiclist: Hash of used authentication tokens. Protected by struct_mutex. |
* @magicfree: List of used authentication tokens. Protected by struct_mutex. |
* @lock: DRI lock information. |
350,9 → 677,10 |
struct drm_minor *minor; |
char *unique; |
int unique_len; |
struct drm_open_hash magiclist; |
struct list_head magicfree; |
struct drm_lock_data lock; |
int unique_size; |
// struct drm_open_hash magiclist; |
// struct list_head magicfree; |
// struct drm_lock_data lock; |
void *driver_priv; |
}; |
364,13 → 692,17 |
/* Flags and return codes for get_vblank_timestamp() driver function. */ |
#define DRM_CALLED_FROM_VBLIRQ 1 |
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) |
#define DRM_VBLANKTIME_IN_VBLANK (1 << 1) |
#define DRM_VBLANKTIME_INVBL (1 << 1) |
/* get_scanout_position() return flags */ |
#define DRM_SCANOUTPOS_VALID (1 << 0) |
#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1) |
#define DRM_SCANOUTPOS_INVBL (1 << 1) |
#define DRM_SCANOUTPOS_ACCURATE (1 << 2) |
struct drm_bus { |
int (*set_busid)(struct drm_device *dev, struct drm_master *master); |
}; |
/** |
* DRM driver structure. This structure represent the common code for |
* a family of cards. There will one drm_device for each card present |
562,28 → 894,7 |
}; |
struct drm_pending_vblank_event { |
struct drm_pending_event base; |
int pipe; |
struct drm_event_vblank event; |
}; |
struct drm_vblank_crtc { |
struct drm_device *dev; /* pointer to the drm_device */ |
wait_queue_head_t queue; /**< VBLANK wait queue */ |
struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */ |
struct timer_list disable_timer; /* delayed disable timer */ |
atomic_t count; /**< number of VBLANK interrupts */ |
atomic_t refcount; /* number of users of vblank interruptsper crtc */ |
u32 last; /* protected by dev->vbl_lock, used */ |
/* for wraparound handling */ |
u32 last_wait; /* Last vblank seqno waited per CRTC */ |
unsigned int inmodeset; /* Display driver is setting mode */ |
int crtc; /* crtc index */ |
bool enabled; /* so we don't call enable more than |
once per disable */ |
}; |
/** |
* DRM device structure. This structure represent a complete card that |
* may contain multiple heads. |
592,9 → 903,6 |
struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ |
int if_version; /**< Highest interface version set */ |
/** \name Lifetime Management */ |
/*@{ */ |
struct kref ref; /**< Object ref-count */ |
struct device *dev; /**< Device structure of bus-device */ |
struct drm_driver *driver; /**< DRM driver managing the device */ |
void *dev_private; /**< DRM driver private data */ |
656,16 → 964,6 |
*/ |
bool vblank_disable_allowed; |
/* |
* If true, vblank interrupt will be disabled immediately when the |
* refcount drops to zero, as opposed to via the vblank disable |
* timer. |
* This can be set to true it the hardware has a working vblank |
* counter and the driver uses drm_vblank_on() and drm_vblank_off() |
* appropriately. |
*/ |
bool vblank_disable_immediate; |
/* array of size num_crtcs */ |
struct drm_vblank_crtc *vblank; |
688,10 → 986,6 |
unsigned int num_crtcs; /**< Number of CRTCs on this device */ |
struct { |
int context; |
struct drm_hw_lock *lock; |
} sigdata; |
struct drm_mode_config mode_config; /**< Current mode config */ |
738,9 → 1032,11 |
unsigned int cmd, unsigned long arg); |
extern long drm_compat_ioctl(struct file *filp, |
unsigned int cmd, unsigned long arg); |
extern int drm_lastclose(struct drm_device *dev); |
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); |
/* Device support (drm_fops.h) */ |
extern struct mutex drm_global_mutex; |
extern int drm_open(struct inode *inode, struct file *filp); |
extern ssize_t drm_read(struct file *filp, char __user *buffer, |
size_t count, loff_t *offset); |
747,23 → 1043,101 |
extern int drm_release(struct inode *inode, struct file *filp); |
/* Mapping support (drm_vm.h) */ |
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); |
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); |
extern void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma); |
extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma); |
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); |
/* Misc. IOCTL support (drm_ioctl.c) */ |
int drm_noop(struct drm_device *dev, void *data, |
/* Memory management support (drm_memory.h) */ |
#include <drm/drm_memory.h> |
/* Misc. IOCTL support (drm_ioctl.h) */ |
extern int drm_irq_by_busid(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getunique(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_setunique(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getmap(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getclient(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getstats(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getcap(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_setclientcap(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_setversion(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_noop(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* Authentication IOCTL support (drm_auth.h) */ |
extern int drm_getmagic(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_authmagic(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic); |
/* Cache management (drm_cache.c) */ |
void drm_clflush_pages(struct page *pages[], unsigned long num_pages); |
void drm_clflush_sg(struct sg_table *st); |
void drm_clflush_virt_range(void *addr, unsigned long length); |
/* Locking IOCTL support (drm_lock.h) */ |
extern int drm_lock(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_unlock(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); |
extern void drm_idlelock_take(struct drm_lock_data *lock_data); |
extern void drm_idlelock_release(struct drm_lock_data *lock_data); |
/* |
* These are exported to drivers so that they can implement fencing using |
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock. |
*/ |
extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv); |
/* Buffer management support (drm_bufs.h) */ |
extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); |
extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); |
extern int drm_addmap(struct drm_device *dev, resource_size_t offset, |
unsigned int size, enum drm_map_type type, |
enum drm_map_flags flags, struct drm_local_map **map_ptr); |
extern int drm_addmap_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map); |
extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map); |
extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_addbufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_infobufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_markbufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_freebufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_mapbufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_dma_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* DMA support (drm_dma.h) */ |
extern int drm_legacy_dma_setup(struct drm_device *dev); |
extern void drm_legacy_dma_takedown(struct drm_device *dev); |
extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); |
extern void drm_core_reclaim_buffers(struct drm_device *dev, |
struct drm_file *filp); |
/* IRQ support (drm_irq.h) */ |
extern int drm_control(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_irq_install(struct drm_device *dev, int irq); |
extern int drm_irq_uninstall(struct drm_device *dev); |
780,8 → 1154,6 |
extern void drm_vblank_put(struct drm_device *dev, int crtc); |
extern int drm_crtc_vblank_get(struct drm_crtc *crtc); |
extern void drm_crtc_vblank_put(struct drm_crtc *crtc); |
extern void drm_wait_one_vblank(struct drm_device *dev, int crtc); |
extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); |
extern void drm_vblank_off(struct drm_device *dev, int crtc); |
extern void drm_vblank_on(struct drm_device *dev, int crtc); |
extern void drm_crtc_vblank_off(struct drm_crtc *crtc); |
788,6 → 1160,8 |
extern void drm_crtc_vblank_on(struct drm_crtc *crtc); |
extern void drm_vblank_cleanup(struct drm_device *dev); |
extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, |
struct timeval *tvblank, unsigned flags); |
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, |
int crtc, int *max_error, |
struct timeval *vblank_time, |
797,23 → 1171,21 |
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, |
const struct drm_display_mode *mode); |
/** |
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC |
* @crtc: which CRTC's vblank waitqueue to retrieve |
* |
* This function returns a pointer to the vblank waitqueue for the CRTC. |
* Drivers can use this to implement vblank waits using wait_event() & co. |
*/ |
static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc) |
{ |
return &crtc->dev->vblank[drm_crtc_index(crtc)].queue; |
} |
/* Modesetting support */ |
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); |
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); |
extern int drm_modeset_ctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* AGP/GART support (drm_agpsupport.h) */ |
/* Stub support (drm_stub.h) */ |
extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
struct drm_master *drm_master_create(struct drm_minor *minor); |
extern struct drm_master *drm_master_get(struct drm_master *master); |
extern void drm_master_put(struct drm_master **master); |
821,14 → 1193,34 |
extern void drm_unplug_dev(struct drm_device *dev); |
extern unsigned int drm_debug; |
#if 0 |
extern unsigned int drm_vblank_offdelay; |
extern unsigned int drm_timestamp_precision; |
extern unsigned int drm_timestamp_monotonic; |
extern struct class *drm_class; |
extern struct drm_local_map *drm_getsarea(struct drm_device *dev); |
#endif |
/* Debugfs support */ |
#if defined(CONFIG_DEBUG_FS) |
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, |
struct dentry *root); |
extern int drm_debugfs_create_files(const struct drm_info_list *files, |
int count, struct dentry *root, |
struct drm_minor *minor); |
extern int drm_debugfs_remove_files(const struct drm_info_list *files, |
int count, struct drm_minor *minor); |
extern int drm_debugfs_cleanup(struct drm_minor *minor); |
extern int drm_debugfs_connector_add(struct drm_connector *connector); |
extern void drm_debugfs_connector_remove(struct drm_connector *connector); |
#else |
static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id, |
struct dentry *root) |
{ |
return 0; |
} |
static inline int drm_debugfs_create_files(const struct drm_info_list *files, |
int count, struct dentry *root, |
struct drm_minor *minor) |
841,44 → 1233,164 |
{ |
return 0; |
} |
static inline int drm_debugfs_cleanup(struct drm_minor *minor) |
{ |
return 0; |
} |
static inline int drm_debugfs_connector_add(struct drm_connector *connector) |
{ |
return 0; |
} |
static inline void drm_debugfs_connector_remove(struct drm_connector *connector) |
{ |
} |
#endif |
extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev, |
struct drm_gem_object *obj, int flags); |
extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, |
struct drm_file *file_priv, uint32_t handle, uint32_t flags, |
int *prime_fd); |
extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, |
struct dma_buf *dma_buf); |
extern int drm_gem_prime_fd_to_handle(struct drm_device *dev, |
struct drm_file *file_priv, int prime_fd, uint32_t *handle); |
extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf); |
/* Info file support */ |
extern int drm_name_info(struct seq_file *m, void *data); |
extern int drm_vm_info(struct seq_file *m, void *data); |
extern int drm_bufs_info(struct seq_file *m, void *data); |
extern int drm_vblank_info(struct seq_file *m, void *data); |
extern int drm_clients_info(struct seq_file *m, void* data); |
extern int drm_gem_name_info(struct seq_file *m, void *data); |
extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, |
dma_addr_t *addrs, int max_pages); |
extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); |
extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); |
#if DRM_DEBUG_CODE |
extern int drm_vma_info(struct seq_file *m, void *data); |
#endif |
/* Scatter Gather Support (drm_scatter.h) */ |
extern void drm_legacy_sg_cleanup(struct drm_device *dev); |
extern int drm_sg_alloc(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_sg_free(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, |
/* ATI PCIGART support (ati_pcigart.h) */ |
extern int drm_ati_pcigart_init(struct drm_device *dev, |
struct drm_ati_pcigart_info * gart_info); |
extern int drm_ati_pcigart_cleanup(struct drm_device *dev, |
struct drm_ati_pcigart_info * gart_info); |
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, |
size_t align); |
extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah); |
extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); |
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); |
#if 0 |
/* sysfs support (drm_sysfs.c) */ |
struct drm_sysfs_class; |
extern struct class *drm_sysfs_create(struct module *owner, char *name); |
extern void drm_sysfs_destroy(void); |
extern struct device *drm_sysfs_minor_alloc(struct drm_minor *minor); |
extern void drm_sysfs_hotplug_event(struct drm_device *dev); |
extern int drm_sysfs_connector_add(struct drm_connector *connector); |
extern void drm_sysfs_connector_remove(struct drm_connector *connector); |
#endif |
/* Graphics Execution Manager library functions (drm_gem.c) */ |
int drm_gem_init(struct drm_device *dev); |
void drm_gem_destroy(struct drm_device *dev); |
void drm_gem_object_release(struct drm_gem_object *obj); |
void drm_gem_object_free(struct kref *kref); |
int drm_gem_object_init(struct drm_device *dev, |
struct drm_gem_object *obj, size_t size); |
void drm_gem_private_object_init(struct drm_device *dev, |
struct drm_gem_object *obj, size_t size); |
void drm_gem_vm_open(struct vm_area_struct *vma); |
void drm_gem_vm_close(struct vm_area_struct *vma); |
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, |
struct vm_area_struct *vma); |
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
#include <drm/drm_global.h> |
static inline void |
drm_gem_object_reference(struct drm_gem_object *obj) |
{ |
kref_get(&obj->refcount); |
} |
static inline void |
drm_gem_object_unreference(struct drm_gem_object *obj) |
{ |
if (obj != NULL) |
kref_put(&obj->refcount, drm_gem_object_free); |
} |
static inline void |
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) |
{ |
if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) { |
struct drm_device *dev = obj->dev; |
mutex_lock(&dev->struct_mutex); |
if (likely(atomic_dec_and_test(&obj->refcount.refcount))) |
drm_gem_object_free(&obj->refcount); |
mutex_unlock(&dev->struct_mutex); |
} |
} |
int drm_gem_handle_create_tail(struct drm_file *file_priv, |
struct drm_gem_object *obj, |
u32 *handlep); |
int drm_gem_handle_create(struct drm_file *file_priv, |
struct drm_gem_object *obj, |
u32 *handlep); |
int drm_gem_handle_delete(struct drm_file *filp, u32 handle); |
void drm_gem_free_mmap_offset(struct drm_gem_object *obj); |
int drm_gem_create_mmap_offset(struct drm_gem_object *obj); |
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); |
struct page **drm_gem_get_pages(struct drm_gem_object *obj); |
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
bool dirty, bool accessed); |
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, |
struct drm_file *filp, |
u32 handle); |
int drm_gem_close_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_gem_open_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); |
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); |
extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev); |
extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); |
extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev); |
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev, |
unsigned int token) |
{ |
struct drm_map_list *_entry; |
list_for_each_entry(_entry, &dev->maplist, head) |
if (_entry->user_token == token) |
return _entry->map; |
return NULL; |
} |
static __inline__ void drm_core_dropmap(struct drm_local_map *map) |
{ |
} |
#include <drm/drm_mem_util.h> |
struct drm_device *drm_dev_alloc(struct drm_driver *driver, |
struct device *parent); |
void drm_dev_ref(struct drm_device *dev); |
void drm_dev_unref(struct drm_device *dev); |
int drm_dev_register(struct drm_device *dev, unsigned long flags); |
void drm_dev_unregister(struct drm_device *dev); |
int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...); |
struct drm_minor *drm_minor_acquire(unsigned int minor_id); |
void drm_minor_release(struct drm_minor *minor); |
extern int drm_fill_in_dev(struct drm_device *dev, |
const struct pci_device_id *ent, |
struct drm_driver *driver); |
int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type); |
/*@}*/ |
/* PCI section */ |
908,7 → 1420,11 |
{ |
return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); |
} |
#endif /* __KERNEL__ */ |
#define drm_sysfs_connector_add(connector) |
#define drm_sysfs_connector_remove(connector) |
#define LFB_SIZE 0x1000000 |
extern struct drm_device *main_device; |
extern struct drm_file *drm_file_handlers[256]; |
/drivers/include/drm/drm_crtc.h |
---|
31,8 → 31,8 |
#include <linux/idr.h> |
#include <linux/fb.h> |
#include <linux/hdmi.h> |
#include <uapi/drm/drm_mode.h> |
#include <uapi/drm/drm_fourcc.h> |
#include <drm/drm_mode.h> |
#include <drm/drm_fourcc.h> |
#include <drm/drm_modeset_lock.h> |
struct drm_device; |
42,7 → 42,6 |
struct drm_file; |
struct drm_clip_rect; |
struct device_node; |
struct fence; |
#define DRM_MODE_OBJECT_CRTC 0xcccccccc |
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 |
137,14 → 136,6 |
u8 cea_rev; |
}; |
/* data corresponds to displayid vend/prod/serial */ |
struct drm_tile_group { |
struct kref refcount; |
struct drm_device *dev; |
int id; |
u8 group_data[8]; |
}; |
struct drm_framebuffer_funcs { |
/* note: use drm_framebuffer_remove() */ |
void (*destroy)(struct drm_framebuffer *framebuffer); |
151,8 → 142,8 |
int (*create_handle)(struct drm_framebuffer *fb, |
struct drm_file *file_priv, |
unsigned int *handle); |
/* |
* Optional callback for the dirty fb ioctl. |
/** |
* Optinal callback for the dirty fb ioctl. |
* |
* Userspace can notify the driver via this callback |
* that a area of the framebuffer has changed and should |
205,7 → 196,7 |
struct drm_property_blob { |
struct drm_mode_object base; |
struct list_head head; |
size_t length; |
unsigned int length; |
unsigned char data[]; |
}; |
224,9 → 215,13 |
uint64_t *values; |
struct drm_device *dev; |
struct list_head enum_list; |
struct list_head enum_blob_list; |
}; |
void drm_modeset_lock_all(struct drm_device *dev); |
void drm_modeset_unlock_all(struct drm_device *dev); |
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); |
struct drm_crtc; |
struct drm_connector; |
struct drm_encoder; |
233,56 → 228,13 |
struct drm_pending_vblank_event; |
struct drm_plane; |
struct drm_bridge; |
struct drm_atomic_state; |
/** |
* struct drm_crtc_state - mutable CRTC state |
* @enable: whether the CRTC should be enabled, gates all other state |
* @mode_changed: for use by helpers and drivers when computing state updates |
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes |
* @last_vblank_count: for helpers and drivers to capture the vblank of the |
* update to ensure framebuffer cleanup isn't done too early |
* @planes_changed: for use by helpers and drivers when computing state updates |
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings |
* @mode: current mode timings |
* @event: optional pointer to a DRM event to signal upon completion of the |
* state update |
* @state: backpointer to global drm_atomic_state |
*/ |
struct drm_crtc_state { |
bool enable; |
/* computed state bits used by helpers and drivers */ |
bool planes_changed : 1; |
bool mode_changed : 1; |
/* attached planes bitmask: |
* WARNING: transitional helpers do not maintain plane_mask so |
* drivers not converted over to atomic helpers should not rely |
* on plane_mask being accurate! |
*/ |
u32 plane_mask; |
/* last_vblank_count: for vblank waits before cleanup */ |
u32 last_vblank_count; |
/* adjusted_mode: for use by helpers and drivers */ |
struct drm_display_mode adjusted_mode; |
struct drm_display_mode mode; |
struct drm_pending_vblank_event *event; |
struct drm_atomic_state *state; |
}; |
/** |
* struct drm_crtc_funcs - control CRTCs for a given device |
* drm_crtc_funcs - control CRTCs for a given device |
* @save: save CRTC state |
* @restore: restore CRTC state |
* @reset: reset CRTC after state has been invalidated (e.g. resume) |
* @cursor_set: setup the cursor |
* @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set |
* @cursor_move: move the cursor |
* @gamma_set: specify color ramp for CRTC |
* @destroy: deinit and free object |
289,9 → 241,6 |
* @set_property: called when a property is changed |
* @set_config: apply a new CRTC configuration |
* @page_flip: initiate a page flip |
* @atomic_duplicate_state: duplicate the atomic state for this CRTC |
* @atomic_destroy_state: destroy an atomic state for this CRTC |
* @atomic_set_property: set a property on an atomic state for this CRTC |
* |
* The drm_crtc_funcs structure is the central CRTC management structure |
* in the DRM. Each CRTC controls one or more connectors (note that the name |
342,28 → 291,16 |
int (*set_property)(struct drm_crtc *crtc, |
struct drm_property *property, uint64_t val); |
/* atomic update handling */ |
struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc); |
void (*atomic_destroy_state)(struct drm_crtc *crtc, |
struct drm_crtc_state *state); |
int (*atomic_set_property)(struct drm_crtc *crtc, |
struct drm_crtc_state *state, |
struct drm_property *property, |
uint64_t val); |
}; |
/** |
* struct drm_crtc - central CRTC control structure |
* drm_crtc - central CRTC control structure |
* @dev: parent DRM device |
* @port: OF node used by drm_of_find_possible_crtcs() |
* @head: list management |
* @mutex: per-CRTC locking |
* @base: base KMS object for ID tracking etc. |
* @primary: primary plane for this CRTC |
* @cursor: cursor plane for this CRTC |
* @cursor_x: current x position of the cursor, used for universal cursor planes |
* @cursor_y: current y position of the cursor, used for universal cursor planes |
* @enabled: is this CRTC enabled? |
* @mode: current mode timings |
* @hwmode: mode timings as programmed to hw regs |
376,13 → 313,10 |
* @gamma_size: size of gamma ramp |
* @gamma_store: gamma ramp values |
* @framedur_ns: precise frame timing |
* @linedur_ns: precise line timing |
* @framedur_ns: precise line timing |
* @pixeldur_ns: precise pixel timing |
* @helper_private: mid-layer private data |
* @properties: property tracking for this CRTC |
* @state: current atomic state for this CRTC |
* @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for |
* legacy ioctls |
* |
* Each CRTC may have one or more connectors associated with it. This structure |
* allows the CRTC to be controlled. |
392,7 → 326,7 |
struct device_node *port; |
struct list_head head; |
/* |
/** |
* crtc mutex |
* |
* This provides a read lock for the overall crtc state (mode, dpms |
411,6 → 345,10 |
int cursor_x; |
int cursor_y; |
/* Temporary tracking of the old fb while a modeset is ongoing. Used |
* by drm_mode_set_config_internal to implement correct refcounting. */ |
struct drm_framebuffer *old_fb; |
bool enabled; |
/* Requested mode from modesetting. */ |
437,32 → 375,11 |
void *helper_private; |
struct drm_object_properties properties; |
struct drm_crtc_state *state; |
/* |
* For legacy crtc ioctls so that atomic drivers can get at the locking |
* acquire context. |
*/ |
struct drm_modeset_acquire_ctx *acquire_ctx; |
}; |
/** |
* struct drm_connector_state - mutable connector state |
* @crtc: CRTC to connect connector to, NULL if disabled |
* @best_encoder: can be used by helpers and drivers to select the encoder |
* @state: backpointer to global drm_atomic_state |
*/ |
struct drm_connector_state { |
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */ |
struct drm_encoder *best_encoder; |
struct drm_atomic_state *state; |
}; |
/** |
* struct drm_connector_funcs - control connectors on a given device |
* drm_connector_funcs - control connectors on a given device |
* @dpms: set power state (see drm_crtc_funcs above) |
* @save: save connector state |
* @restore: restore connector state |
472,9 → 389,6 |
* @set_property: property for this connector may need an update |
* @destroy: make object go away |
* @force: notify the driver that the connector is forced on |
* @atomic_duplicate_state: duplicate the atomic state for this connector |
* @atomic_destroy_state: destroy an atomic state for this connector |
* @atomic_set_property: set a property on an atomic state for this connector |
* |
* Each CRTC may have one or more connectors attached to it. The functions |
* below allow the core DRM code to control connectors, enumerate available modes, |
499,19 → 413,10 |
uint64_t val); |
void (*destroy)(struct drm_connector *connector); |
void (*force)(struct drm_connector *connector); |
/* atomic update handling */ |
struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector); |
void (*atomic_destroy_state)(struct drm_connector *connector, |
struct drm_connector_state *state); |
int (*atomic_set_property)(struct drm_connector *connector, |
struct drm_connector_state *state, |
struct drm_property *property, |
uint64_t val); |
}; |
/** |
* struct drm_encoder_funcs - encoder controls |
* drm_encoder_funcs - encoder controls |
* @reset: reset state (e.g. at init or resume time) |
* @destroy: cleanup and free associated data |
* |
525,7 → 430,7 |
#define DRM_CONNECTOR_MAX_ENCODER 3 |
/** |
* struct drm_encoder - central DRM encoder structure |
* drm_encoder - central DRM encoder structure |
* @dev: parent DRM device |
* @head: list management |
* @base: base KMS object |
569,7 → 474,7 |
#define MAX_ELD_BYTES 128 |
/** |
* struct drm_connector - central DRM connector control structure |
* drm_connector - central DRM connector control structure |
* @dev: parent DRM device |
* @kdev: kernel device for sysfs attributes |
* @attr: sysfs attributes |
580,7 → 485,6 |
* @connector_type_id: index into connector type enum |
* @interlace_allowed: can this connector handle interlaced modes? |
* @doublescan_allowed: can this connector handle doublescan? |
* @stereo_allowed: can this connector handle stereo modes? |
* @modes: modes available on this connector (from fill_modes() + user) |
* @status: one of the drm_connector_status enums (connected, not, or unknown) |
* @probed_modes: list of modes derived directly from the display |
588,13 → 492,10 |
* @funcs: connector control functions |
* @edid_blob_ptr: DRM property containing EDID if present |
* @properties: property tracking for this connector |
* @path_blob_ptr: DRM blob property data for the DP MST path property |
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling |
* @dpms: current dpms state |
* @helper_private: mid-layer private data |
* @cmdline_mode: mode line parsed from the kernel cmdline for this connector |
* @force: a %DRM_FORCE_<foo> state for forced mode sets |
* @override_edid: has the EDID been overwritten through debugfs for testing? |
* @encoder_ids: valid encoders for this connector |
* @encoder: encoder driving this connector, if any |
* @eld: EDID-like data, if present |
604,18 → 505,6 |
* @video_latency: video latency info from ELD, if found |
* @audio_latency: audio latency info from ELD, if found |
* @null_edid_counter: track sinks that give us all zeros for the EDID |
* @bad_edid_counter: track sinks that give us an EDID with invalid checksum |
* @debugfs_entry: debugfs directory for this connector |
* @state: current atomic state for this connector |
* @has_tile: is this connector connected to a tiled monitor |
* @tile_group: tile group for the connected monitor |
* @tile_is_single_monitor: whether the tile is one monitor housing |
* @num_h_tile: number of horizontal tiles in the tile group |
* @num_v_tile: number of vertical tiles in the tile group |
* @tile_h_loc: horizontal location of this tile |
* @tile_v_loc: vertical location of this tile |
* @tile_h_size: horizontal size of this tile. |
* @tile_v_size: vertical size of this tile. |
* |
* Each connector may be connected to one or more CRTCs, or may be clonable by |
* another connector if they can share a CRTC. Each connector also has a specific |
651,8 → 540,6 |
struct drm_property_blob *path_blob_ptr; |
struct drm_property_blob *tile_blob_ptr; |
uint8_t polled; /* DRM_CONNECTOR_POLL_* */ |
/* requested DPMS state */ |
661,7 → 548,6 |
void *helper_private; |
/* forced on connector */ |
struct drm_cmdline_mode cmdline_mode; |
enum drm_connector_force force; |
bool override_edid; |
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; |
678,63 → 564,14 |
unsigned bad_edid_counter; |
struct dentry *debugfs_entry; |
struct drm_connector_state *state; |
/* DisplayID bits */ |
bool has_tile; |
struct drm_tile_group *tile_group; |
bool tile_is_single_monitor; |
uint8_t num_h_tile, num_v_tile; |
uint8_t tile_h_loc, tile_v_loc; |
uint16_t tile_h_size, tile_v_size; |
}; |
/** |
* struct drm_plane_state - mutable plane state |
* @crtc: currently bound CRTC, NULL if disabled |
* @fb: currently bound framebuffer |
* @fence: optional fence to wait for before scanning out @fb |
* @crtc_x: left position of visible portion of plane on crtc |
* @crtc_y: upper position of visible portion of plane on crtc |
* @crtc_w: width of visible portion of plane on crtc |
* @crtc_h: height of visible portion of plane on crtc |
* @src_x: left position of visible portion of plane within |
* plane (in 16.16) |
* @src_y: upper position of visible portion of plane within |
* plane (in 16.16) |
* @src_w: width of visible portion of plane (in 16.16) |
* @src_h: height of visible portion of plane (in 16.16) |
* @state: backpointer to global drm_atomic_state |
*/ |
struct drm_plane_state { |
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */ |
struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */ |
struct fence *fence; |
/* Signed dest location allows it to be partially off screen */ |
int32_t crtc_x, crtc_y; |
uint32_t crtc_w, crtc_h; |
/* Source values are 16.16 fixed point */ |
uint32_t src_x, src_y; |
uint32_t src_h, src_w; |
struct drm_atomic_state *state; |
}; |
/** |
* struct drm_plane_funcs - driver plane control functions |
* drm_plane_funcs - driver plane control functions |
* @update_plane: update the plane configuration |
* @disable_plane: shut down the plane |
* @destroy: clean up plane resources |
* @reset: reset plane after state has been invalidated (e.g. resume) |
* @set_property: called when a property is changed |
* @atomic_duplicate_state: duplicate the atomic state for this plane |
* @atomic_destroy_state: destroy an atomic state for this plane |
* @atomic_set_property: set a property on an atomic state for this plane |
*/ |
struct drm_plane_funcs { |
int (*update_plane)(struct drm_plane *plane, |
745,19 → 582,9 |
uint32_t src_w, uint32_t src_h); |
int (*disable_plane)(struct drm_plane *plane); |
void (*destroy)(struct drm_plane *plane); |
void (*reset)(struct drm_plane *plane); |
int (*set_property)(struct drm_plane *plane, |
struct drm_property *property, uint64_t val); |
/* atomic update handling */ |
struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane); |
void (*atomic_destroy_state)(struct drm_plane *plane, |
struct drm_plane_state *state); |
int (*atomic_set_property)(struct drm_plane *plane, |
struct drm_plane_state *state, |
struct drm_property *property, |
uint64_t val); |
}; |
enum drm_plane_type { |
767,7 → 594,7 |
}; |
/** |
* struct drm_plane - central DRM plane control structure |
* drm_plane - central DRM plane control structure |
* @dev: DRM device this plane belongs to |
* @head: for list management |
* @base: base mode object |
776,19 → 603,14 |
* @format_count: number of formats supported |
* @crtc: currently bound CRTC |
* @fb: currently bound fb |
* @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by |
* drm_mode_set_config_internal() to implement correct refcounting. |
* @funcs: helper functions |
* @properties: property tracking for this plane |
* @type: type of plane (overlay, primary, cursor) |
* @state: current atomic state for this plane |
*/ |
struct drm_plane { |
struct drm_device *dev; |
struct list_head head; |
struct drm_modeset_lock mutex; |
struct drm_mode_object base; |
uint32_t possible_crtcs; |
798,21 → 620,15 |
struct drm_crtc *crtc; |
struct drm_framebuffer *fb; |
struct drm_framebuffer *old_fb; |
const struct drm_plane_funcs *funcs; |
struct drm_object_properties properties; |
enum drm_plane_type type; |
void *helper_private; |
struct drm_plane_state *state; |
}; |
/** |
* struct drm_bridge_funcs - drm_bridge control functions |
* drm_bridge_funcs - drm_bridge control functions |
* @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge |
* @disable: Called right before encoder prepare, disables the bridge |
* @post_disable: Called right after encoder prepare, for lockstepped disable |
836,7 → 652,7 |
}; |
/** |
* struct drm_bridge - central DRM bridge control structure |
* drm_bridge - central DRM bridge control structure |
* @dev: DRM device this bridge belongs to |
* @head: list management |
* @base: base mode object |
854,35 → 670,8 |
}; |
/** |
* struct struct drm_atomic_state - the global state object for atomic updates |
* @dev: parent DRM device |
* @flags: state flags like async update |
* @planes: pointer to array of plane pointers |
* @plane_states: pointer to array of plane states pointers |
* @crtcs: pointer to array of CRTC pointers |
* @crtc_states: pointer to array of CRTC states pointers |
* @num_connector: size of the @connectors and @connector_states arrays |
* @connectors: pointer to array of connector pointers |
* @connector_states: pointer to array of connector states pointers |
* @acquire_ctx: acquire context for this atomic modeset state update |
*/ |
struct drm_atomic_state { |
struct drm_device *dev; |
uint32_t flags; |
struct drm_plane **planes; |
struct drm_plane_state **plane_states; |
struct drm_crtc **crtcs; |
struct drm_crtc_state **crtc_states; |
int num_connector; |
struct drm_connector **connectors; |
struct drm_connector_state **connector_states; |
struct drm_modeset_acquire_ctx *acquire_ctx; |
}; |
/** |
* struct drm_mode_set - new values for a CRTC config change |
* drm_mode_set - new values for a CRTC config change |
* @head: list management |
* @fb: framebuffer to use for new config |
* @crtc: CRTC whose configuration we're about to change |
* @mode: mode timings to use |
912,9 → 701,6 |
* struct drm_mode_config_funcs - basic driver provided mode setting functions |
* @fb_create: create a new framebuffer object |
* @output_poll_changed: function to handle output configuration changes |
* @atomic_check: check whether a give atomic state update is possible |
* @atomic_commit: commit an atomic state update previously verified with |
* atomic_check() |
* |
* Some global (i.e. not per-CRTC, connector, etc) mode setting functions that |
* involve drivers. |
924,20 → 710,13 |
struct drm_file *file_priv, |
struct drm_mode_fb_cmd2 *mode_cmd); |
void (*output_poll_changed)(struct drm_device *dev); |
int (*atomic_check)(struct drm_device *dev, |
struct drm_atomic_state *a); |
int (*atomic_commit)(struct drm_device *dev, |
struct drm_atomic_state *a, |
bool async); |
}; |
/** |
* struct drm_mode_group - group of mode setting resources for potential sub-grouping |
* drm_mode_group - group of mode setting resources for potential sub-grouping |
* @num_crtcs: CRTC count |
* @num_encoders: encoder count |
* @num_connectors: connector count |
* @num_bridges: bridge count |
* @id_list: list of KMS object IDs in this group |
* |
* Currently this simply tracks the global mode setting state. But in the |
957,14 → 736,10 |
}; |
/** |
* struct drm_mode_config - Mode configuration control structure |
* drm_mode_config - Mode configuration control structure |
* @mutex: mutex protecting KMS related lists and structures |
* @connection_mutex: ww mutex protecting connector state and routing |
* @acquire_ctx: global implicit acquire context used by atomic drivers for |
* legacy ioctls |
* @idr_mutex: mutex for KMS ID allocation and management |
* @crtc_idr: main KMS ID tracking object |
* @fb_lock: mutex to protect fb state and lists |
* @num_fb: number of fbs available |
* @fb_list: list of framebuffers available |
* @num_connector: number of connectors on this device |
973,12 → 748,8 |
* @bridge_list: list of bridge objects |
* @num_encoder: number of encoders on this device |
* @encoder_list: list of encoder objects |
* @num_overlay_plane: number of overlay planes on this device |
* @num_total_plane: number of universal (i.e. with primary/curso) planes on this device |
* @plane_list: list of plane objects |
* @num_crtc: number of CRTCs on this device |
* @crtc_list: list of CRTC objects |
* @property_list: list of property objects |
* @min_width: minimum pixel width on this device |
* @min_height: minimum pixel height on this device |
* @max_width: maximum pixel width on this device |
985,16 → 756,9 |
* @max_height: maximum pixel height on this device |
* @funcs: core driver provided mode setting functions |
* @fb_base: base address of the framebuffer |
* @poll_enabled: track polling support for this device |
* @poll_running: track polling status for this device |
* @poll_enabled: track polling status for this device |
* @output_poll_work: delayed work for polling in process context |
* @property_blob_list: list of all the blob property objects |
* @*_property: core property tracking |
* @preferred_depth: preferred RBG pixel depth, used by fb helpers |
* @prefer_shadow: hint to userspace to prefer shadow-fb rendering |
* @async_page_flip: does this device support async flips on the primary plane? |
* @cursor_width: hint to userspace for max cursor width |
* @cursor_height: hint to userspace for max cursor height |
* |
* Core mode resource tracking structure. All CRTC, encoders, and connectors |
* enumerated by the driver are added here, as are global properties. Some |
1006,10 → 770,16 |
struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */ |
struct mutex idr_mutex; /* for IDR management */ |
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ |
struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ |
/* this is limited to one for now */ |
struct mutex fb_lock; /* proctects global and per-file fb lists */ |
/** |
* fb_lock - mutex to protect fb state |
* |
* Besides the global fb list his also protects the fbs list in the |
* file_priv |
*/ |
struct mutex fb_lock; |
int num_fb; |
struct list_head fb_list; |
1050,9 → 820,7 |
struct drm_property *edid_property; |
struct drm_property *dpms_property; |
struct drm_property *path_property; |
struct drm_property *tile_property; |
struct drm_property *plane_type_property; |
struct drm_property *rotation_property; |
/* DVI-I properties */ |
struct drm_property *dvi_i_subconnector_property; |
1078,10 → 846,6 |
struct drm_property *aspect_ratio_property; |
struct drm_property *dirty_info_property; |
/* properties for virtual machine layout */ |
struct drm_property *suggested_x_property; |
struct drm_property *suggested_y_property; |
/* dumb ioctl parameters */ |
uint32_t preferred_depth, prefer_shadow; |
1092,19 → 856,6 |
uint32_t cursor_width, cursor_height; |
}; |
/** |
* drm_for_each_plane_mask - iterate over planes specified by bitmask |
* @plane: the loop cursor |
* @dev: the DRM device |
* @plane_mask: bitmask of plane indices |
* |
* Iterate over all planes specified by bitmask. |
*/ |
#define drm_for_each_plane_mask(plane, dev, plane_mask) \ |
list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ |
if ((plane_mask) & (1 << drm_plane_index(plane))) |
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) |
#define obj_to_connector(x) container_of(x, struct drm_connector, base) |
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base) |
1124,6 → 875,9 |
struct drm_plane *primary, |
struct drm_plane *cursor, |
const struct drm_crtc_funcs *funcs); |
extern int drm_crtc_init(struct drm_device *dev, |
struct drm_crtc *crtc, |
const struct drm_crtc_funcs *funcs); |
extern void drm_crtc_cleanup(struct drm_crtc *crtc); |
extern unsigned int drm_crtc_index(struct drm_crtc *crtc); |
1149,7 → 903,6 |
void drm_connector_unregister(struct drm_connector *connector); |
extern void drm_connector_cleanup(struct drm_connector *connector); |
extern unsigned int drm_connector_index(struct drm_connector *connector); |
/* helper to unplug all connectors from sysfs for device */ |
extern void drm_connector_unplug_all(struct drm_device *dev); |
1189,7 → 942,6 |
const uint32_t *formats, uint32_t format_count, |
bool is_primary); |
extern void drm_plane_cleanup(struct drm_plane *plane); |
extern unsigned int drm_plane_index(struct drm_plane *plane); |
extern void drm_plane_force_disable(struct drm_plane *plane); |
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc, |
int x, int y, |
1219,10 → 971,9 |
extern void drm_mode_config_cleanup(struct drm_device *dev); |
extern int drm_mode_connector_set_path_property(struct drm_connector *connector, |
const char *path); |
int drm_mode_connector_set_tile_property(struct drm_connector *connector); |
char *path); |
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, |
const struct edid *edid); |
struct edid *edid); |
static inline bool drm_property_type_is(struct drm_property *property, |
uint32_t type) |
1283,13 → 1034,11 |
extern int drm_property_add_enum(struct drm_property *property, int index, |
uint64_t value, const char *name); |
extern int drm_mode_create_dvi_i_properties(struct drm_device *dev); |
extern int drm_mode_create_tv_properties(struct drm_device *dev, |
unsigned int num_modes, |
char *modes[]); |
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, |
char *formats[]); |
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); |
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev); |
extern int drm_mode_create_dirty_info_property(struct drm_device *dev); |
extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev); |
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, |
struct drm_encoder *encoder); |
1357,13 → 1106,6 |
extern int drm_edid_header_is_valid(const u8 *raw_edid); |
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid); |
extern bool drm_edid_is_valid(struct edid *edid); |
extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, |
char topology[8]); |
extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, |
char topology[8]); |
extern void drm_mode_put_tile_group(struct drm_device *dev, |
struct drm_tile_group *tg); |
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, |
int hsize, int vsize, int fresh, |
bool rb); |
1378,9 → 1120,6 |
struct drm_file *file_priv); |
extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane, |
struct drm_property *property, |
uint64_t value); |
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, |
int *bpp); |
/drivers/include/drm/drm_crtc_helper.h |
---|
68,7 → 68,6 |
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, int x, int y, |
struct drm_framebuffer *old_fb); |
void (*mode_set_nofb)(struct drm_crtc *crtc); |
/* Move the crtc on the current fb to the given position *optional* */ |
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, |
82,12 → 81,6 |
/* disable crtc when not in use - more explicit than dpms off */ |
void (*disable)(struct drm_crtc *crtc); |
/* atomic helpers */ |
int (*atomic_check)(struct drm_crtc *crtc, |
struct drm_crtc_state *state); |
void (*atomic_begin)(struct drm_crtc *crtc); |
void (*atomic_flush)(struct drm_crtc *crtc); |
}; |
/** |
168,12 → 161,6 |
extern void drm_helper_resume_force_mode(struct drm_device *dev); |
int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, int x, int y, |
struct drm_framebuffer *old_fb); |
int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, |
struct drm_framebuffer *old_fb); |
/* drm_probe_helper.c */ |
extern int drm_helper_probe_single_connector_modes(struct drm_connector |
*connector, uint32_t maxX, |
/drivers/include/drm/drm_dp_helper.h |
---|
190,16 → 190,16 |
# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 |
# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 |
# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) |
# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) |
# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) |
# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3) |
# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3) |
# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3) |
# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 |
# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) |
304,7 → 304,6 |
#define DP_TEST_SINK_MISC 0x246 |
# define DP_TEST_CRC_SUPPORTED (1 << 5) |
# define DP_TEST_COUNT_MASK 0x7 |
#define DP_TEST_RESPONSE 0x260 |
# define DP_TEST_ACK (1 << 0) |
405,6 → 404,26 |
#define MODE_I2C_READ 4 |
#define MODE_I2C_STOP 8 |
/** |
* struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp |
* aux algorithm |
* @running: set by the algo indicating whether an i2c is ongoing or whether |
* the i2c bus is quiescent |
* @address: i2c target address for the currently ongoing transfer |
* @aux_ch: driver callback to transfer a single byte of the i2c payload |
*/ |
struct i2c_algo_dp_aux_data { |
bool running; |
u16 address; |
int (*aux_ch) (struct i2c_adapter *adapter, |
int mode, uint8_t write_byte, |
uint8_t *read_byte); |
}; |
int |
i2c_dp_aux_add_bus(struct i2c_adapter *adapter); |
#define DP_LINK_STATUS_SIZE 6 |
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], |
int lane_count); |
531,7 → 550,6 |
struct mutex hw_mutex; |
ssize_t (*transfer)(struct drm_dp_aux *aux, |
struct drm_dp_aux_msg *msg); |
unsigned i2c_nack_count, i2c_defer_count; |
}; |
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, |
/drivers/include/drm/drm_dp_mst_helper.h |
---|
28,7 → 28,7 |
struct drm_dp_mst_branch; |
/** |
* struct drm_dp_vcpi - Virtual Channel Payload Identifier |
* struct drm_dp_vcpi - Virtual Channel Payload Identifer |
* @vcpi: Virtual channel ID. |
* @pbn: Payload Bandwidth Number for this channel |
* @aligned_pbn: PBN aligned with slot size |
92,8 → 92,6 |
struct drm_dp_vcpi vcpi; |
struct drm_connector *connector; |
struct drm_dp_mst_topology_mgr *mgr; |
struct edid *cached_edid; /* for DP logical ports - make tiling work */ |
}; |
/** |
373,7 → 371,7 |
struct drm_dp_mst_topology_mgr; |
struct drm_dp_mst_topology_cbs { |
/* create a connector for a port */ |
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); |
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path); |
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, |
struct drm_connector *connector); |
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); |
390,7 → 388,6 |
int payload_state; |
int start_slot; |
int num_slots; |
int vcpi; |
}; |
/** |
457,7 → 454,6 |
struct drm_dp_vcpi **proposed_vcpis; |
struct drm_dp_payload *payloads; |
unsigned long payload_mask; |
unsigned long vcpi_mask; |
wait_queue_head_t tx_waitq; |
struct work_struct work; |
476,7 → 472,7 |
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); |
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); |
enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); |
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); |
/drivers/include/drm/drm_edid.h |
---|
27,7 → 27,6 |
#define EDID_LENGTH 128 |
#define DDC_ADDR 0x50 |
#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */ |
#define CEA_EXT 0x02 |
#define VTB_EXT 0x10 |
34,7 → 33,6 |
#define DI_EXT 0x40 |
#define LS_EXT 0x50 |
#define MI_EXT 0x60 |
#define DISPLAYID_EXT 0x70 |
struct est_timings { |
u8 t1; |
209,61 → 207,6 |
#define DRM_EDID_HDMI_DC_30 (1 << 4) |
#define DRM_EDID_HDMI_DC_Y444 (1 << 3) |
/* ELD Header Block */ |
#define DRM_ELD_HEADER_BLOCK_SIZE 4 |
#define DRM_ELD_VER 0 |
# define DRM_ELD_VER_SHIFT 3 |
# define DRM_ELD_VER_MASK (0x1f << 3) |
#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ |
/* ELD Baseline Block for ELD_Ver == 2 */ |
#define DRM_ELD_CEA_EDID_VER_MNL 4 |
# define DRM_ELD_CEA_EDID_VER_SHIFT 5 |
# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) |
# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) |
# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) |
# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) |
# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) |
# define DRM_ELD_MNL_SHIFT 0 |
# define DRM_ELD_MNL_MASK (0x1f << 0) |
#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 |
# define DRM_ELD_SAD_COUNT_SHIFT 4 |
# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) |
# define DRM_ELD_CONN_TYPE_SHIFT 2 |
# define DRM_ELD_CONN_TYPE_MASK (3 << 2) |
# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) |
# define DRM_ELD_CONN_TYPE_DP (1 << 2) |
# define DRM_ELD_SUPPORTS_AI (1 << 1) |
# define DRM_ELD_SUPPORTS_HDCP (1 << 0) |
#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ |
# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ |
#define DRM_ELD_SPEAKER 7 |
# define DRM_ELD_SPEAKER_RLRC (1 << 6) |
# define DRM_ELD_SPEAKER_FLRC (1 << 5) |
# define DRM_ELD_SPEAKER_RC (1 << 4) |
# define DRM_ELD_SPEAKER_RLR (1 << 3) |
# define DRM_ELD_SPEAKER_FC (1 << 2) |
# define DRM_ELD_SPEAKER_LFE (1 << 1) |
# define DRM_ELD_SPEAKER_FLR (1 << 0) |
#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ |
# define DRM_ELD_PORT_ID_LEN 8 |
#define DRM_ELD_MANUFACTURER_NAME0 16 |
#define DRM_ELD_MANUFACTURER_NAME1 17 |
#define DRM_ELD_PRODUCT_CODE0 18 |
#define DRM_ELD_PRODUCT_CODE1 19 |
#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ |
#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) |
struct edid { |
u8 header[8]; |
/* Vendor & product info */ |
336,56 → 279,4 |
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, |
const struct drm_display_mode *mode); |
/** |
* drm_eld_mnl - Get ELD monitor name length in bytes. |
* @eld: pointer to an eld memory structure with mnl set |
*/ |
static inline int drm_eld_mnl(const uint8_t *eld) |
{ |
return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; |
} |
/** |
* drm_eld_sad_count - Get ELD SAD count. |
* @eld: pointer to an eld memory structure with sad_count set |
*/ |
static inline int drm_eld_sad_count(const uint8_t *eld) |
{ |
return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> |
DRM_ELD_SAD_COUNT_SHIFT; |
} |
/** |
* drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes |
* @eld: pointer to an eld memory structure with mnl and sad_count set |
* |
* This is a helper for determining the payload size of the baseline block, in |
* bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. |
*/ |
static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) |
{ |
return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + |
drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; |
} |
/** |
* drm_eld_size - Get ELD size in bytes |
* @eld: pointer to a complete eld memory structure |
* |
* The returned value does not include the vendor block. It's vendor specific, |
* and comprises of the remaining bytes in the ELD memory buffer after |
* drm_eld_size() bytes of header and baseline block. |
* |
* The returned value is guaranteed to be a multiple of 4. |
*/ |
static inline int drm_eld_size(const uint8_t *eld) |
{ |
return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; |
} |
struct edid *drm_do_get_edid(struct drm_connector *connector, |
int (*get_edid_block)(void *data, u8 *buf, unsigned int block, |
size_t len), |
void *data); |
#endif /* __DRM_EDID_H__ */ |
/drivers/include/drm/drm_fb_helper.h |
---|
34,14 → 34,9 |
#include <linux/kgdb.h> |
struct drm_fb_offset { |
int x, y; |
}; |
struct drm_fb_helper_crtc { |
struct drm_mode_set mode_set; |
struct drm_display_mode *desired_mode; |
int x, y; |
}; |
struct drm_fb_helper_surface_size { |
77,12 → 72,12 |
bool (*initial_config)(struct drm_fb_helper *fb_helper, |
struct drm_fb_helper_crtc **crtcs, |
struct drm_display_mode **modes, |
struct drm_fb_offset *offsets, |
bool *enabled, int width, int height); |
}; |
struct drm_fb_helper_connector { |
struct drm_connector *connector; |
struct drm_cmdline_mode cmdline_mode; |
}; |
struct drm_fb_helper { |
/drivers/include/drm/drm_modeset_lock.h |
---|
29,11 → 29,10 |
struct drm_modeset_lock; |
/** |
* struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx) |
* drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx) |
* @ww_ctx: base acquire ctx |
* @contended: used internally for -EDEADLK handling |
* @locked: list of held locks |
* @trylock_only: trylock mode used in atomic contexts/panic notifiers |
* |
* Each thread competing for a set of locks must use one acquire |
* ctx. And if any lock fxn returns -EDEADLK, it must backoff and |
54,15 → 53,10 |
* list of held locks (drm_modeset_lock) |
*/ |
struct list_head locked; |
/** |
* Trylock mode, use only for panic handlers! |
*/ |
bool trylock_only; |
}; |
/** |
* struct drm_modeset_lock - used for locking modeset resources. |
* drm_modeset_lock - used for locking modeset resources. |
* @mutex: resource locking |
* @head: used to hold it's place on state->locked list when |
* part of an atomic update |
126,19 → 120,6 |
void drm_modeset_unlock(struct drm_modeset_lock *lock); |
struct drm_device; |
struct drm_crtc; |
struct drm_plane; |
void drm_modeset_lock_all(struct drm_device *dev); |
int __drm_modeset_lock_all(struct drm_device *dev, bool trylock); |
void drm_modeset_unlock_all(struct drm_device *dev); |
void drm_modeset_lock_crtc(struct drm_crtc *crtc, |
struct drm_plane *plane); |
void drm_modeset_unlock_crtc(struct drm_crtc *crtc); |
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); |
struct drm_modeset_acquire_ctx * |
drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc); |
int drm_modeset_lock_all_crtcs(struct drm_device *dev, |
struct drm_modeset_acquire_ctx *ctx); |
/drivers/include/drm/drm_pciids.h |
---|
17,7 → 17,6 |
{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
74,6 → 73,7 |
{0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
{0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
{0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
164,11 → 164,8 |
{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
178,8 → 175,6 |
{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
302,7 → 297,6 |
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
/drivers/include/drm/drm_plane_helper.h |
---|
25,7 → 25,6 |
#define DRM_PLANE_HELPER_H |
#include <drm/drm_rect.h> |
#include <drm/drm_crtc.h> |
/* |
* Drivers that don't allow primary plane scaling may pass this macro in place |
43,37 → 42,6 |
* planes. |
*/ |
extern int drm_crtc_init(struct drm_device *dev, |
struct drm_crtc *crtc, |
const struct drm_crtc_funcs *funcs); |
/** |
* drm_plane_helper_funcs - helper operations for CRTCs |
* @prepare_fb: prepare a framebuffer for use by the plane |
* @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane |
* @atomic_check: check that a given atomic state is valid and can be applied |
* @atomic_update: apply an atomic state to the plane |
* |
* The helper operations are called by the mid-layer CRTC helper. |
*/ |
struct drm_plane_helper_funcs { |
int (*prepare_fb)(struct drm_plane *plane, |
struct drm_framebuffer *fb); |
void (*cleanup_fb)(struct drm_plane *plane, |
struct drm_framebuffer *fb); |
int (*atomic_check)(struct drm_plane *plane, |
struct drm_plane_state *state); |
void (*atomic_update)(struct drm_plane *plane, |
struct drm_plane_state *old_state); |
}; |
static inline void drm_plane_helper_add(struct drm_plane *plane, |
const struct drm_plane_helper_funcs *funcs) |
{ |
plane->helper_private = (void *)funcs; |
} |
extern int drm_plane_helper_check_update(struct drm_plane *plane, |
struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
100,16 → 68,4 |
int num_formats); |
int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
int crtc_x, int crtc_y, |
unsigned int crtc_w, unsigned int crtc_h, |
uint32_t src_x, uint32_t src_y, |
uint32_t src_w, uint32_t src_h); |
int drm_plane_helper_disable(struct drm_plane *plane); |
/* For use by drm_crtc_helper.c */ |
int drm_plane_helper_commit(struct drm_plane *plane, |
struct drm_plane_state *plane_state, |
struct drm_framebuffer *old_fb); |
#endif |
/drivers/include/drm/i915_pciids.h |
---|
259,21 → 259,4 |
INTEL_VGA_DEVICE(0x22b2, info), \ |
INTEL_VGA_DEVICE(0x22b3, info) |
#define INTEL_SKL_IDS(info) \ |
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \ |
INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \ |
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ |
INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \ |
INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \ |
INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \ |
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \ |
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \ |
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \ |
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ |
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \ |
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \ |
INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \ |
INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \ |
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */ |
#endif /* _I915_PCIIDS_H */ |
/drivers/include/drm/ttm/ttm_bo_api.h |
---|
45,24 → 45,12 |
struct drm_mm_node; |
/** |
* struct ttm_place |
* struct ttm_placement |
* |
* @fpfn: first valid page frame number to put the object |
* @lpfn: last valid page frame number to put the object |
* @flags: memory domain and caching flags for the object |
* |
* Structure indicating a possible place to put an object. |
*/ |
struct ttm_place { |
unsigned fpfn; |
unsigned lpfn; |
uint32_t flags; |
}; |
/** |
* struct ttm_placement |
* |
* @num_placement: number of preferred placements |
* @placement: preferred placements |
* @num_busy_placement: number of preferred placements when need to evict buffer |
71,10 → 59,12 |
* Structure indicating the placement you request for an object. |
*/ |
struct ttm_placement { |
unsigned fpfn; |
unsigned lpfn; |
unsigned num_placement; |
const struct ttm_place *placement; |
const uint32_t *placement; |
unsigned num_busy_placement; |
const struct ttm_place *busy_placement; |
const uint32_t *busy_placement; |
}; |
/** |
173,6 → 163,7 |
* @lru: List head for the lru list. |
* @ddestroy: List head for the delayed destroy list. |
* @swap: List head for swap LRU list. |
* @sync_obj: Pointer to a synchronization object. |
* @priv_flags: Flags describing buffer object internal state. |
* @vma_node: Address space manager node. |
* @offset: The current GPU offset, which can have different meanings |
236,9 → 227,13 |
struct list_head io_reserve_lru; |
/** |
* Members protected by a bo reservation. |
* Members protected by struct buffer_object_device::fence_lock |
* In addition, setting sync_obj to anything else |
* than NULL requires bo::reserved to be held. This allows for |
* checking NULL while reserved but not holding the mentioned lock. |
*/ |
void *sync_obj; |
unsigned long priv_flags; |
struct drm_vma_offset_node vma_node; |
460,7 → 455,6 |
* point to the shmem object backing a GEM object if TTM is used to back a |
* GEM user interface. |
* @acc_size: Accounted size for this object. |
* @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. |
* @destroy: Destroy function. Use NULL for kfree(). |
* |
* This function initializes a pre-allocated struct ttm_buffer_object. |
488,7 → 482,6 |
struct file *persistent_swap_storage, |
size_t acc_size, |
struct sg_table *sg, |
struct reservation_object *resv, |
void (*destroy) (struct ttm_buffer_object *)); |
/** |
526,6 → 519,20 |
struct ttm_buffer_object **p_bo); |
/** |
* ttm_bo_check_placement |
* |
* @bo: the buffer object. |
* @placement: placements |
* |
* Performs minimal validity checking on an intended change of |
* placement flags. |
* Returns |
* -EINVAL: Intended change is invalid or not allowed. |
*/ |
extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
struct ttm_placement *placement); |
/** |
* ttm_bo_init_mm |
* |
* @bdev: Pointer to a ttm_bo_device struct. |
/drivers/include/drm/ttm/ttm_bo_driver.h |
---|
208,7 → 208,8 |
*/ |
int (*get_node)(struct ttm_mem_type_manager *man, |
struct ttm_buffer_object *bo, |
const struct ttm_place *place, |
struct ttm_placement *placement, |
uint32_t flags, |
struct ttm_mem_reg *mem); |
/** |
312,6 → 313,11 |
* @move: Callback for a driver to hook in accelerated functions to |
* move a buffer. |
* If set to NULL, a potentially slow memcpy() move is used. |
* @sync_obj_signaled: See ttm_fence_api.h |
* @sync_obj_wait: See ttm_fence_api.h |
* @sync_obj_flush: See ttm_fence_api.h |
* @sync_obj_unref: See ttm_fence_api.h |
* @sync_obj_ref: See ttm_fence_api.h |
*/ |
struct ttm_bo_driver { |
413,6 → 419,23 |
int (*verify_access) (struct ttm_buffer_object *bo, |
struct file *filp); |
/** |
* In case a driver writer dislikes the TTM fence objects, |
* the driver writer can replace those with sync objects of |
* his / her own. If it turns out that no driver writer is |
* using these. I suggest we remove these hooks and plug in |
* fences directly. The bo driver needs the following functionality: |
* See the corresponding functions in the fence object API |
* documentation. |
*/ |
bool (*sync_obj_signaled) (void *sync_obj); |
int (*sync_obj_wait) (void *sync_obj, |
bool lazy, bool interruptible); |
int (*sync_obj_flush) (void *sync_obj); |
void (*sync_obj_unref) (void **sync_obj); |
void *(*sync_obj_ref) (void *sync_obj); |
/* hook to notify driver about a driver move so it |
* can do tiling things */ |
void (*move_notify)(struct ttm_buffer_object *bo, |
499,6 → 522,8 |
* |
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. |
* @man: An array of mem_type_managers. |
* @fence_lock: Protects the synchronizing members on *all* bos belonging |
* to this device. |
* @vma_manager: Address space manager |
* lru_lock: Spinlock that protects the buffer+device lru lists and |
* ddestroy lists. |
518,6 → 543,7 |
struct ttm_bo_global *glob; |
struct ttm_bo_driver *driver; |
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; |
spinlock_t fence_lock; |
/* |
* Protected by internal locks. |
996,7 → 1022,7 |
* ttm_bo_move_accel_cleanup. |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* @fence: A fence object that signals when moving is complete. |
* @sync_obj: A sync object that signals when moving is complete. |
* @evict: This is an evict move. Don't return until the buffer is idle. |
* @no_wait_gpu: Return immediately if the GPU is busy. |
* @new_mem: struct ttm_mem_reg indicating where to move. |
1010,7 → 1036,7 |
*/ |
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
struct fence *fence, |
void *sync_obj, |
bool evict, bool no_wait_gpu, |
struct ttm_mem_reg *new_mem); |
/** |
/drivers/include/drm/ttm/ttm_execbuf_util.h |
---|
39,13 → 39,19 |
* |
* @head: list head for thread-private list. |
* @bo: refcounted buffer object pointer. |
* @shared: should the fence be added shared? |
* @reserved: Indicates whether @bo has been reserved for validation. |
* @removed: Indicates whether @bo has been removed from lru lists. |
* @put_count: Number of outstanding references on bo::list_kref. |
* @old_sync_obj: Pointer to a sync object about to be unreferenced |
*/ |
struct ttm_validate_buffer { |
struct list_head head; |
struct ttm_buffer_object *bo; |
bool shared; |
bool reserved; |
bool removed; |
int put_count; |
void *old_sync_obj; |
}; |
/** |
67,8 → 73,6 |
* @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only |
* non-blocking reserves should be tried. |
* @list: thread private list of ttm_validate_buffer structs. |
* @intr: should the wait be interruptible |
* @dups: [out] optional list of duplicates. |
* |
* Tries to reserve bos pointed to by the list entries for validation. |
* If the function returns 0, all buffers are marked as "unfenced", |
80,15 → 84,10 |
* CPU write reservations to be cleared, and for other threads to |
* unreserve their buffers. |
* |
* If intr is set to true, this function may return -ERESTARTSYS if the |
* calling process receives a signal while waiting. In that case, no |
* buffers on the list will be reserved upon return. |
* This function may return -ERESTART or -EAGAIN if the calling process |
* receives a signal while waiting. In that case, no buffers on the list |
* will be reserved upon return. |
* |
* If dups is non NULL all buffers already reserved by the current thread |
* (e.g. duplicates) are added to this list, otherwise -EALREADY is returned |
* on the first already reserved buffer and all buffers from the list are |
* unreserved again. |
* |
* Buffers reserved by this function should be unreserved by |
* a call to either ttm_eu_backoff_reservation() or |
* ttm_eu_fence_buffer_objects() when command submission is complete or |
96,8 → 95,7 |
*/ |
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, |
struct list_head *list, bool intr, |
struct list_head *dups); |
struct list_head *list); |
/** |
* function ttm_eu_fence_buffer_objects. |
104,7 → 102,7 |
* |
* @ticket: ww_acquire_ctx from reserve call |
* @list: thread private list of ttm_validate_buffer structs. |
* @fence: The new exclusive fence for the buffers. |
* @sync_obj: The new sync object for the buffers. |
* |
* This function should be called when command submission is complete, and |
* it will add a new sync object to bos pointed to by entries on @list. |
113,7 → 111,6 |
*/ |
extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, |
struct list_head *list, |
struct fence *fence); |
struct list_head *list, void *sync_obj); |
#endif |
/drivers/include/drm/drm.h |
---|
0,0 → 1,836 |
/** |
* \file drm.h |
* Header for the Direct Rendering Manager |
* |
* \author Rickard E. (Rik) Faith <faith@valinux.com> |
* |
* \par Acknowledgments: |
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg. |
*/ |
/* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef _DRM_H_ |
#define _DRM_H_ |
#include <linux/types.h> |
#include <errno-base.h> |
typedef unsigned int drm_handle_t; |
//#include <asm/ioctl.h> /* For _IO* macros */ |
#define DRM_MAJOR 226 |
#define DRM_MAX_MINOR 15 |
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ |
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ |
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ |
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ |
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ |
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ |
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) |
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) |
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) |
typedef unsigned int drm_context_t; |
typedef unsigned int drm_drawable_t; |
typedef unsigned int drm_magic_t; |
/** |
* Cliprect. |
* |
* \warning: If you change this structure, make sure you change |
* XF86DRIClipRectRec in the server as well |
* |
* \note KW: Actually it's illegal to change either for |
* backwards-compatibility reasons. |
*/ |
struct drm_clip_rect { |
unsigned short x1; |
unsigned short y1; |
unsigned short x2; |
unsigned short y2; |
}; |
/** |
* Drawable information. |
*/ |
struct drm_drawable_info { |
unsigned int num_rects; |
struct drm_clip_rect *rects; |
}; |
/** |
* Texture region, |
*/ |
struct drm_tex_region { |
unsigned char next; |
unsigned char prev; |
unsigned char in_use; |
unsigned char padding; |
unsigned int age; |
}; |
/** |
* Hardware lock. |
* |
* The lock structure is a simple cache-line aligned integer. To avoid |
* processor bus contention on a multiprocessor system, there should not be any |
* other data stored in the same cache line. |
*/ |
struct drm_hw_lock { |
__volatile__ unsigned int lock; /**< lock variable */ |
char padding[60]; /**< Pad to cache line */ |
}; |
/** |
* DRM_IOCTL_VERSION ioctl argument type. |
* |
* \sa drmGetVersion(). |
*/ |
struct drm_version { |
int version_major; /**< Major version */ |
int version_minor; /**< Minor version */ |
int version_patchlevel; /**< Patch level */ |
size_t name_len; /**< Length of name buffer */ |
char __user *name; /**< Name of driver */ |
size_t date_len; /**< Length of date buffer */ |
char __user *date; /**< User-space buffer to hold date */ |
size_t desc_len; /**< Length of desc buffer */ |
char __user *desc; /**< User-space buffer to hold desc */ |
}; |
/** |
* DRM_IOCTL_GET_UNIQUE ioctl argument type. |
* |
* \sa drmGetBusid() and drmSetBusId(). |
*/ |
struct drm_unique { |
size_t unique_len; /**< Length of unique */ |
char __user *unique; /**< Unique name for driver instantiation */ |
}; |
struct drm_list { |
int count; /**< Length of user-space structures */ |
struct drm_version __user *version; |
}; |
struct drm_block { |
int unused; |
}; |
/** |
* DRM_IOCTL_CONTROL ioctl argument type. |
* |
* \sa drmCtlInstHandler() and drmCtlUninstHandler(). |
*/ |
struct drm_control { |
enum { |
DRM_ADD_COMMAND, |
DRM_RM_COMMAND, |
DRM_INST_HANDLER, |
DRM_UNINST_HANDLER |
} func; |
int irq; |
}; |
/** |
* Type of memory to map. |
*/ |
enum drm_map_type { |
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ |
_DRM_REGISTERS = 1, /**< no caching, no core dump */ |
_DRM_SHM = 2, /**< shared, cached */ |
_DRM_AGP = 3, /**< AGP/GART */ |
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ |
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ |
}; |
/** |
* Memory mapping flags. |
*/ |
enum drm_map_flags { |
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ |
_DRM_READ_ONLY = 0x02, |
_DRM_LOCKED = 0x04, /**< shared, cached, locked */ |
_DRM_KERNEL = 0x08, /**< kernel requires access */ |
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ |
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ |
_DRM_REMOVABLE = 0x40, /**< Removable mapping */ |
_DRM_DRIVER = 0x80 /**< Managed by driver */ |
}; |
struct drm_ctx_priv_map { |
unsigned int ctx_id; /**< Context requesting private mapping */ |
void *handle; /**< Handle of map */ |
}; |
/** |
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls |
* argument type. |
* |
* \sa drmAddMap(). |
*/ |
struct drm_map { |
unsigned long offset; /**< Requested physical address (0 for SAREA)*/ |
unsigned long size; /**< Requested physical size (bytes) */ |
enum drm_map_type type; /**< Type of memory to map */ |
enum drm_map_flags flags; /**< Flags */ |
void *handle; /**< User-space: "Handle" to pass to mmap() */ |
/**< Kernel-space: kernel-virtual address */ |
int mtrr; /**< MTRR slot used */ |
/* Private data */ |
}; |
/** |
* DRM_IOCTL_GET_CLIENT ioctl argument type. |
*/ |
struct drm_client { |
int idx; /**< Which client desired? */ |
int auth; /**< Is client authenticated? */ |
unsigned long pid; /**< Process ID */ |
unsigned long uid; /**< User ID */ |
unsigned long magic; /**< Magic */ |
unsigned long iocs; /**< Ioctl count */ |
}; |
enum drm_stat_type { |
_DRM_STAT_LOCK, |
_DRM_STAT_OPENS, |
_DRM_STAT_CLOSES, |
_DRM_STAT_IOCTLS, |
_DRM_STAT_LOCKS, |
_DRM_STAT_UNLOCKS, |
_DRM_STAT_VALUE, /**< Generic value */ |
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ |
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ |
_DRM_STAT_IRQ, /**< IRQ */ |
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */ |
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ |
_DRM_STAT_DMA, /**< DMA */ |
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ |
_DRM_STAT_MISSED /**< Missed DMA opportunity */ |
/* Add to the *END* of the list */ |
}; |
/** |
* DRM_IOCTL_GET_STATS ioctl argument type. |
*/ |
struct drm_stats { |
unsigned long count; |
struct { |
unsigned long value; |
enum drm_stat_type type; |
} data[15]; |
}; |
/** |
* Hardware locking flags. |
*/ |
enum drm_lock_flags { |
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ |
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ |
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ |
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ |
/* These *HALT* flags aren't supported yet |
-- they will be used to support the |
full-screen DGA-like mode. */ |
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ |
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ |
}; |
/** |
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. |
* |
* \sa drmGetLock() and drmUnlock(). |
*/ |
struct drm_lock { |
int context; |
enum drm_lock_flags flags; |
}; |
/** |
* DMA flags |
* |
* \warning |
* These values \e must match xf86drm.h. |
* |
* \sa drm_dma. |
*/ |
enum drm_dma_flags { |
/* Flags for DMA buffer dispatch */ |
_DRM_DMA_BLOCK = 0x01, /**< |
* Block until buffer dispatched. |
* |
* \note The buffer may not yet have |
* been processed by the hardware -- |
* getting a hardware lock with the |
* hardware quiescent will ensure |
* that the buffer has been |
* processed. |
*/ |
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ |
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ |
/* Flags for DMA buffer request */ |
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ |
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ |
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ |
}; |
/** |
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. |
* |
* \sa drmAddBufs(). |
*/ |
struct drm_buf_desc { |
int count; /**< Number of buffers of this size */ |
int size; /**< Size in bytes */ |
int low_mark; /**< Low water mark */ |
int high_mark; /**< High water mark */ |
enum { |
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ |
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ |
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ |
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ |
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ |
} flags; |
unsigned long agp_start; /**< |
* Start address of where the AGP buffers are |
* in the AGP aperture |
*/ |
}; |
/** |
* DRM_IOCTL_INFO_BUFS ioctl argument type. |
*/ |
struct drm_buf_info { |
int count; /**< Entries in list */ |
struct drm_buf_desc __user *list; |
}; |
/** |
* DRM_IOCTL_FREE_BUFS ioctl argument type. |
*/ |
struct drm_buf_free { |
int count; |
int __user *list; |
}; |
/** |
* Buffer information |
* |
* \sa drm_buf_map. |
*/ |
struct drm_buf_pub { |
int idx; /**< Index into the master buffer list */ |
int total; /**< Buffer size */ |
int used; /**< Amount of buffer in use (for DMA) */ |
void __user *address; /**< Address of buffer */ |
}; |
/** |
* DRM_IOCTL_MAP_BUFS ioctl argument type. |
*/ |
struct drm_buf_map { |
int count; /**< Length of the buffer list */ |
void __user *virtual; /**< Mmap'd area in user-virtual */ |
struct drm_buf_pub __user *list; /**< Buffer information */ |
}; |
/** |
* DRM_IOCTL_DMA ioctl argument type. |
* |
* Indices here refer to the offset into the buffer list in drm_buf_get. |
* |
* \sa drmDMA(). |
*/ |
struct drm_dma { |
int context; /**< Context handle */ |
int send_count; /**< Number of buffers to send */ |
int __user *send_indices; /**< List of handles to buffers */ |
int __user *send_sizes; /**< Lengths of data to send */ |
enum drm_dma_flags flags; /**< Flags */ |
int request_count; /**< Number of buffers requested */ |
int request_size; /**< Desired size for buffers */ |
int __user *request_indices; /**< Buffer information */ |
int __user *request_sizes; |
int granted_count; /**< Number of buffers granted */ |
}; |
enum drm_ctx_flags { |
_DRM_CONTEXT_PRESERVED = 0x01, |
_DRM_CONTEXT_2DONLY = 0x02 |
}; |
/** |
* DRM_IOCTL_ADD_CTX ioctl argument type. |
* |
* \sa drmCreateContext() and drmDestroyContext(). |
*/ |
struct drm_ctx { |
drm_context_t handle; |
enum drm_ctx_flags flags; |
}; |
/** |
* DRM_IOCTL_RES_CTX ioctl argument type. |
*/ |
struct drm_ctx_res { |
int count; |
struct drm_ctx __user *contexts; |
}; |
/** |
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. |
*/ |
struct drm_draw { |
drm_drawable_t handle; |
}; |
/** |
* DRM_IOCTL_UPDATE_DRAW ioctl argument type. |
*/ |
typedef enum { |
DRM_DRAWABLE_CLIPRECTS, |
} drm_drawable_info_type_t; |
struct drm_update_draw { |
drm_drawable_t handle; |
unsigned int type; |
unsigned int num; |
unsigned long long data; |
}; |
/** |
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. |
*/ |
struct drm_auth { |
drm_magic_t magic; |
}; |
/** |
* DRM_IOCTL_IRQ_BUSID ioctl argument type. |
* |
* \sa drmGetInterruptFromBusID(). |
*/ |
struct drm_irq_busid { |
int irq; /**< IRQ number */ |
int busnum; /**< bus number */ |
int devnum; /**< device number */ |
int funcnum; /**< function number */ |
}; |
enum drm_vblank_seq_type { |
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ |
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ |
/* bits 1-6 are reserved for high crtcs */ |
_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, |
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ |
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ |
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ |
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ |
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ |
}; |
#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 |
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) |
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ |
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) |
struct drm_wait_vblank_request { |
enum drm_vblank_seq_type type; |
unsigned int sequence; |
unsigned long signal; |
}; |
struct drm_wait_vblank_reply { |
enum drm_vblank_seq_type type; |
unsigned int sequence; |
long tval_sec; |
long tval_usec; |
}; |
/** |
* DRM_IOCTL_WAIT_VBLANK ioctl argument type. |
* |
* \sa drmWaitVBlank(). |
*/ |
union drm_wait_vblank { |
struct drm_wait_vblank_request request; |
struct drm_wait_vblank_reply reply; |
}; |
#define _DRM_PRE_MODESET 1 |
#define _DRM_POST_MODESET 2 |
/** |
* DRM_IOCTL_MODESET_CTL ioctl argument type |
* |
* \sa drmModesetCtl(). |
*/ |
struct drm_modeset_ctl { |
__u32 crtc; |
__u32 cmd; |
}; |
/** |
* DRM_IOCTL_AGP_ENABLE ioctl argument type. |
* |
* \sa drmAgpEnable(). |
*/ |
struct drm_agp_mode { |
unsigned long mode; /**< AGP mode */ |
}; |
/** |
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. |
* |
* \sa drmAgpAlloc() and drmAgpFree(). |
*/ |
struct drm_agp_buffer { |
unsigned long size; /**< In bytes -- will round to page boundary */ |
unsigned long handle; /**< Used for binding / unbinding */ |
unsigned long type; /**< Type of memory to allocate */ |
unsigned long physical; /**< Physical used by i810 */ |
}; |
/** |
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. |
* |
* \sa drmAgpBind() and drmAgpUnbind(). |
*/ |
struct drm_agp_binding { |
unsigned long handle; /**< From drm_agp_buffer */ |
unsigned long offset; /**< In bytes -- will round to page boundary */ |
}; |
/** |
* DRM_IOCTL_AGP_INFO ioctl argument type. |
* |
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), |
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), |
* drmAgpVendorId() and drmAgpDeviceId(). |
*/ |
struct drm_agp_info { |
int agp_version_major; |
int agp_version_minor; |
unsigned long mode; |
unsigned long aperture_base; /* physical address */ |
unsigned long aperture_size; /* bytes */ |
unsigned long memory_allowed; /* bytes */ |
unsigned long memory_used; |
/* PCI information */ |
unsigned short id_vendor; |
unsigned short id_device; |
}; |
/** |
* DRM_IOCTL_SG_ALLOC ioctl argument type. |
*/ |
struct drm_scatter_gather { |
unsigned long size; /**< In bytes -- will round to page boundary */ |
unsigned long handle; /**< Used for mapping / unmapping */ |
}; |
/** |
* DRM_IOCTL_SET_VERSION ioctl argument type. |
*/ |
struct drm_set_version { |
int drm_di_major; |
int drm_di_minor; |
int drm_dd_major; |
int drm_dd_minor; |
}; |
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */ |
struct drm_gem_close { |
/** Handle of the object to be closed. */ |
__u32 handle; |
__u32 pad; |
}; |
/** DRM_IOCTL_GEM_FLINK ioctl argument type */ |
struct drm_gem_flink { |
/** Handle for the object being named */ |
__u32 handle; |
/** Returned global name */ |
__u32 name; |
}; |
/** DRM_IOCTL_GEM_OPEN ioctl argument type */ |
struct drm_gem_open { |
/** Name of object being opened */ |
__u32 name; |
/** Returned handle for the object */ |
__u32 handle; |
/** Returned size of the object */ |
__u64 size; |
}; |
#define DRM_CAP_DUMB_BUFFER 0x1 |
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2 |
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 |
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4 |
#define DRM_CAP_PRIME 0x5 |
#define DRM_PRIME_CAP_IMPORT 0x1 |
#define DRM_PRIME_CAP_EXPORT 0x2 |
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 |
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7 |
/** DRM_IOCTL_GET_CAP ioctl argument type */ |
struct drm_get_cap { |
__u64 capability; |
__u64 value; |
}; |
/** |
* DRM_CLIENT_CAP_STEREO_3D |
* |
* if set to 1, the DRM core will expose the stereo 3D capabilities of the |
* monitor by advertising the supported 3D layouts in the flags of struct |
* drm_mode_modeinfo. |
*/ |
#define DRM_CLIENT_CAP_STEREO_3D 1 |
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ |
struct drm_set_client_cap { |
__u64 capability; |
__u64 value; |
}; |
#define DRM_CLOEXEC O_CLOEXEC |
struct drm_prime_handle { |
__u32 handle; |
/** Flags.. only applicable for handle->fd */ |
__u32 flags; |
/** Returned dmabuf file descriptor */ |
__s32 fd; |
}; |
#include <drm/drm_mode.h> |
#if 0 |
#define DRM_IOCTL_BASE 'd' |
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) |
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) |
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) |
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) |
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) |
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) |
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) |
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) |
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) |
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) |
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) |
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) |
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) |
#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) |
#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap) |
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) |
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) |
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) |
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) |
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) |
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) |
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) |
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) |
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) |
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) |
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) |
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) |
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) |
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) |
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) |
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) |
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) |
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) |
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) |
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) |
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) |
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) |
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) |
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) |
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) |
#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) |
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) |
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) |
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) |
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle) |
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle) |
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) |
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) |
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) |
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) |
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) |
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) |
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) |
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) |
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) |
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) |
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) |
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) |
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) |
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) |
#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc) |
#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor) |
#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut) |
#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) |
#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) |
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) |
#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */ |
#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */ |
#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) |
#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) |
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) |
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) |
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) |
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) |
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) |
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) |
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) |
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) |
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) |
#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res) |
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane) |
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane) |
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2) |
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) |
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) |
#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) |
#endif |
/** |
* Device specific ioctls should only be in their respective headers |
* The device specific ioctl range is from 0x40 to 0x99. |
* Generic IOCTLS restart at 0xA0. |
* |
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and |
* drmCommandReadWrite(). |
*/ |
#define DRM_COMMAND_BASE 0x40 |
#define DRM_COMMAND_END 0xA0 |
/** |
* Header for events written back to userspace on the drm fd. The |
* type defines the type of event, the length specifies the total |
* length of the event (including the header), and user_data is |
* typically a 64 bit value passed with the ioctl that triggered the |
* event. A read on the drm fd will always only return complete |
* events, that is, if for example the read buffer is 100 bytes, and |
* there are two 64 byte events pending, only one will be returned. |
* |
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and |
* up are chipset specific. |
*/ |
struct drm_event { |
__u32 type; |
__u32 length; |
}; |
#define DRM_EVENT_VBLANK 0x01 |
#define DRM_EVENT_FLIP_COMPLETE 0x02 |
struct drm_event_vblank { |
struct drm_event base; |
__u64 user_data; |
__u32 tv_sec; |
__u32 tv_usec; |
__u32 sequence; |
__u32 reserved; |
}; |
/* typedef area */ |
#ifndef __KERNEL__ |
typedef struct drm_clip_rect drm_clip_rect_t; |
typedef struct drm_drawable_info drm_drawable_info_t; |
typedef struct drm_tex_region drm_tex_region_t; |
typedef struct drm_hw_lock drm_hw_lock_t; |
typedef struct drm_version drm_version_t; |
typedef struct drm_unique drm_unique_t; |
typedef struct drm_list drm_list_t; |
typedef struct drm_block drm_block_t; |
typedef struct drm_control drm_control_t; |
typedef enum drm_map_type drm_map_type_t; |
typedef enum drm_map_flags drm_map_flags_t; |
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; |
typedef struct drm_map drm_map_t; |
typedef struct drm_client drm_client_t; |
typedef enum drm_stat_type drm_stat_type_t; |
typedef struct drm_stats drm_stats_t; |
typedef enum drm_lock_flags drm_lock_flags_t; |
typedef struct drm_lock drm_lock_t; |
typedef enum drm_dma_flags drm_dma_flags_t; |
typedef struct drm_buf_desc drm_buf_desc_t; |
typedef struct drm_buf_info drm_buf_info_t; |
typedef struct drm_buf_free drm_buf_free_t; |
typedef struct drm_buf_pub drm_buf_pub_t; |
typedef struct drm_buf_map drm_buf_map_t; |
typedef struct drm_dma drm_dma_t; |
typedef union drm_wait_vblank drm_wait_vblank_t; |
typedef struct drm_agp_mode drm_agp_mode_t; |
typedef enum drm_ctx_flags drm_ctx_flags_t; |
typedef struct drm_ctx drm_ctx_t; |
typedef struct drm_ctx_res drm_ctx_res_t; |
typedef struct drm_draw drm_draw_t; |
typedef struct drm_update_draw drm_update_draw_t; |
typedef struct drm_auth drm_auth_t; |
typedef struct drm_irq_busid drm_irq_busid_t; |
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; |
typedef struct drm_agp_buffer drm_agp_buffer_t; |
typedef struct drm_agp_binding drm_agp_binding_t; |
typedef struct drm_agp_info drm_agp_info_t; |
typedef struct drm_scatter_gather drm_scatter_gather_t; |
typedef struct drm_set_version drm_set_version_t; |
#endif |
#endif |
/drivers/include/drm/drm_memory.h |
---|
0,0 → 1,58 |
/** |
* \file drm_memory.h |
* Memory management wrappers for DRM |
* |
* \author Rickard E. (Rik) Faith <faith@valinux.com> |
* \author Gareth Hughes <gareth@valinux.com> |
*/ |
/* |
* Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com |
* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#include <linux/vmalloc.h> |
#include <drm/drmP.h> |
/** |
* Cut down version of drm_memory_debug.h, which used to be called |
* drm_memory.h. |
*/ |
#if __OS_HAS_AGP |
#ifdef HAVE_PAGE_AGP |
#include <asm/agp.h> |
#else |
# ifdef __powerpc__ |
# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) |
# else |
# define PAGE_AGP PAGE_KERNEL |
# endif |
#endif |
#else /* __OS_HAS_AGP */ |
#endif |
/drivers/include/drm/intel-gtt.h |
---|
3,6 → 3,8 |
#ifndef _DRM_INTEL_GTT_H |
#define _DRM_INTEL_GTT_H |
struct agp_bridge_data; |
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, |
phys_addr_t *mappable_base, unsigned long *mappable_end); |
/drivers/include/linux/range.h |
---|
File deleted |
/drivers/include/linux/agp_backend.h |
---|
File deleted |
/drivers/include/linux/cache.h |
---|
File deleted |
/drivers/include/linux/threads.h |
---|
File deleted |
/drivers/include/linux/irqflags.h |
---|
File deleted |
/drivers/include/linux/seqlock.h |
---|
File deleted |
/drivers/include/linux/atomic.h |
---|
File deleted |
/drivers/include/linux/mmdebug.h |
---|
File deleted |
/drivers/include/linux/rcupdate.h |
---|
File deleted |
/drivers/include/linux/printk.h |
---|
File deleted |
/drivers/include/linux/vgaarb.h |
---|
File deleted |
/drivers/include/linux/uuid.h |
---|
File deleted |
/drivers/include/linux/completion.h |
---|
File deleted |
/drivers/include/linux/percpu-defs.h |
---|
File deleted |
/drivers/include/linux/rcutiny.h |
---|
File deleted |
/drivers/include/linux/linkage.h |
---|
File deleted |
/drivers/include/linux/gfp.h |
---|
File deleted |
/drivers/include/linux/personality.h |
---|
File deleted |
/drivers/include/linux/async.h |
---|
File deleted |
/drivers/include/linux/time64.h |
---|
File deleted |
/drivers/include/linux/fence.h |
---|
File deleted |
/drivers/include/linux/cpumask.h |
---|
File deleted |
/drivers/include/linux/kernel.h |
---|
1,19 → 1,22 |
#ifndef _LINUX_KERNEL_H |
#define _LINUX_KERNEL_H |
/* |
* 'kernel.h' contains some often-used function prototypes etc |
*/ |
#ifdef __KERNEL__ |
#include <stdarg.h> |
#include <linux/linkage.h> |
#include <linux/stddef.h> |
#include <linux/types.h> |
#include <linux/compiler.h> |
#include <linux/bitops.h> |
#include <linux/log2.h> |
#include <linux/errno.h> |
#include <linux/typecheck.h> |
#include <linux/printk.h> |
#include <asm/byteorder.h> |
#include <uapi/linux/kernel.h> |
#define __init |
#define USHRT_MAX ((u16)(~0U)) |
#define SHRT_MAX ((s16)(USHRT_MAX>>1)) |
#define SHRT_MIN ((s16)(-SHRT_MAX - 1)) |
41,12 → 44,8 |
#define S64_MAX ((s64)(U64_MAX>>1)) |
#define S64_MIN ((s64)(-S64_MAX - 1)) |
#define STACK_MAGIC 0xdeadbeef |
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) |
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) |
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) |
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) |
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) |
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) |
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) |
115,23 → 114,14 |
} \ |
) |
#define clamp_t(type, val, min, max) ({ \ |
type __val = (val); \ |
type __min = (min); \ |
type __max = (max); \ |
__val = __val < __min ? __min: __val; \ |
__val > __max ? __max: __val; }) |
#define _RET_IP_ (unsigned long)__builtin_return_address(0) |
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) |
#ifdef CONFIG_LBDAF |
# include <asm/div64.h> |
# define sector_div(a, b) do_div(a, b) |
#else |
# define sector_div(n, b)( \ |
{ \ |
int _res; \ |
_res = (n) % (b); \ |
(n) /= (b); \ |
_res; \ |
} \ |
) |
#endif |
/** |
* upper_32_bits - return bits 32-63 of a number |
150,23 → 140,6 |
#define lower_32_bits(n) ((u32)(n)) |
/* |
* abs() handles unsigned and signed longs, ints, shorts and chars. For all |
* input types abs() returns a signed long. |
* abs() should not be used for 64-bit types (s64, u64, long long) - use abs64() |
* for those. |
*/ |
#define abs(x) ({ \ |
long ret; \ |
if (sizeof(x) == sizeof(long)) { \ |
long __x = (x); \ |
ret = (__x < 0) ? -__x : __x; \ |
} else { \ |
int __x = (x); \ |
ret = (__x < 0) ? -__x : __x; \ |
} \ |
ret; \ |
}) |
#define abs64(x) ({ \ |
s64 __x = (x); \ |
181,60 → 154,11 |
#define KERN_NOTICE "<5>" /* normal but significant condition */ |
#define KERN_INFO "<6>" /* informational */ |
#define KERN_DEBUG "<7>" /* debug-level messages */ |
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); |
extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list); |
extern __printf(3, 4) |
int snprintf(char *buf, size_t size, const char *fmt, ...); |
extern __printf(3, 0) |
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); |
extern __printf(3, 4) |
int scnprintf(char *buf, size_t size, const char *fmt, ...); |
extern __printf(3, 0) |
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); |
extern __printf(2, 3) |
char *kasprintf(gfp_t gfp, const char *fmt, ...); |
extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); |
enum lockdep_ok { |
LOCKDEP_STILL_OK, |
LOCKDEP_NOW_UNRELIABLE |
}; |
extern void add_taint(unsigned flag, enum lockdep_ok); |
extern int test_taint(unsigned flag); |
extern unsigned long get_taint(void); |
extern int root_mountflags; |
extern bool early_boot_irqs_disabled; |
/* Values used for system_state */ |
extern enum system_states { |
SYSTEM_BOOTING, |
SYSTEM_RUNNING, |
SYSTEM_HALT, |
SYSTEM_POWER_OFF, |
SYSTEM_RESTART, |
} system_state; |
#define TAINT_PROPRIETARY_MODULE 0 |
#define TAINT_FORCED_MODULE 1 |
#define TAINT_CPU_OUT_OF_SPEC 2 |
#define TAINT_FORCED_RMMOD 3 |
#define TAINT_MACHINE_CHECK 4 |
#define TAINT_BAD_PAGE 5 |
#define TAINT_USER 6 |
#define TAINT_DIE 7 |
#define TAINT_OVERRIDDEN_ACPI_TABLE 8 |
#define TAINT_WARN 9 |
#define TAINT_CRAP 10 |
#define TAINT_FIRMWARE_WORKAROUND 11 |
#define TAINT_OOT_MODULE 12 |
#define TAINT_UNSIGNED_MODULE 13 |
#define TAINT_SOFTLOCKUP 14 |
extern const char hex_asc[]; |
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)] |
#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] |
static inline char *hex_byte_pack(char *buf, u8 byte) |
static inline char *pack_hex_byte(char *buf, u8 byte) |
{ |
*buf++ = hex_asc_hi(byte); |
*buf++ = hex_asc_lo(byte); |
241,223 → 165,25 |
return buf; |
} |
extern const char hex_asc_upper[]; |
#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)] |
#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4] |
static inline char *hex_byte_pack_upper(char *buf, u8 byte) |
{ |
*buf++ = hex_asc_upper_hi(byte); |
*buf++ = hex_asc_upper_lo(byte); |
return buf; |
} |
extern int hex_to_bin(char ch); |
extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); |
extern char *bin2hex(char *dst, const void *src, size_t count); |
bool mac_pton(const char *s, u8 *mac); |
/* |
* General tracing related utility functions - trace_printk(), |
* tracing_on/tracing_off and tracing_start()/tracing_stop |
* |
* Use tracing_on/tracing_off when you want to quickly turn on or off |
* tracing. It simply enables or disables the recording of the trace events. |
* This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on |
* file, which gives a means for the kernel and userspace to interact. |
* Place a tracing_off() in the kernel where you want tracing to end. |
* From user space, examine the trace, and then echo 1 > tracing_on |
* to continue tracing. |
* |
* tracing_stop/tracing_start has slightly more overhead. It is used |
* by things like suspend to ram where disabling the recording of the |
* trace is not enough, but tracing must actually stop because things |
* like calling smp_processor_id() may crash the system. |
* |
* Most likely, you want to use tracing_on/tracing_off. |
*/ |
#ifdef CONFIG_RING_BUFFER |
/* trace_off_permanent stops recording with no way to bring it back */ |
void tracing_off_permanent(void); |
#else |
static inline void tracing_off_permanent(void) { } |
#endif |
enum ftrace_dump_mode { |
DUMP_NONE, |
DUMP_ALL, |
DUMP_ORIG, |
enum { |
DUMP_PREFIX_NONE, |
DUMP_PREFIX_ADDRESS, |
DUMP_PREFIX_OFFSET |
}; |
#ifdef CONFIG_TRACING |
void tracing_on(void); |
void tracing_off(void); |
int tracing_is_on(void); |
void tracing_snapshot(void); |
void tracing_snapshot_alloc(void); |
int hex_to_bin(char ch); |
int hex2bin(u8 *dst, const char *src, size_t count); |
extern void tracing_start(void); |
extern void tracing_stop(void); |
static inline __printf(1, 2) |
void ____trace_printk_check_format(const char *fmt, ...) |
{ |
} |
#define __trace_printk_check_format(fmt, args...) \ |
do { \ |
if (0) \ |
____trace_printk_check_format(fmt, ##args); \ |
} while (0) |
//int printk(const char *fmt, ...); |
/** |
* trace_printk - printf formatting in the ftrace buffer |
* @fmt: the printf format for printing |
* |
* Note: __trace_printk is an internal function for trace_printk and |
* the @ip is passed in via the trace_printk macro. |
* |
* This function allows a kernel developer to debug fast path sections |
* that printk is not appropriate for. By scattering in various |
* printk like tracing in the code, a developer can quickly see |
* where problems are occurring. |
* |
* This is intended as a debugging tool for the developer only. |
* Please refrain from leaving trace_printks scattered around in |
* your code. (Extra memory is used for special buffers that are |
* allocated when trace_printk() is used) |
* |
* A little optization trick is done here. If there's only one |
* argument, there's no need to scan the string for printf formats. |
* The trace_puts() will suffice. But how can we take advantage of |
* using trace_puts() when trace_printk() has only one argument? |
* By stringifying the args and checking the size we can tell |
* whether or not there are args. __stringify((__VA_ARGS__)) will |
* turn into "()\0" with a size of 3 when there are no args, anything |
* else will be bigger. All we need to do is define a string to this, |
* and then take its size and compare to 3. If it's bigger, use |
* do_trace_printk() otherwise, optimize it to trace_puts(). Then just |
* let gcc optimize the rest. |
*/ |
#define printk(fmt, arg...) dbgprintf(fmt , ##arg) |
#define trace_printk(fmt, ...) \ |
do { \ |
char _______STR[] = __stringify((__VA_ARGS__)); \ |
if (sizeof(_______STR) > 3) \ |
do_trace_printk(fmt, ##__VA_ARGS__); \ |
else \ |
trace_puts(fmt); \ |
} while (0) |
#define do_trace_printk(fmt, args...) \ |
do { \ |
static const char *trace_printk_fmt \ |
__attribute__((section("__trace_printk_fmt"))) = \ |
__builtin_constant_p(fmt) ? fmt : NULL; \ |
\ |
__trace_printk_check_format(fmt, ##args); \ |
\ |
if (__builtin_constant_p(fmt)) \ |
__trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ |
else \ |
__trace_printk(_THIS_IP_, fmt, ##args); \ |
} while (0) |
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); |
extern __printf(2, 3) |
int __trace_bprintk(unsigned long ip, const char *fmt, ...); |
char *kasprintf(gfp_t gfp, const char *fmt, ...); |
extern __printf(2, 3) |
int __trace_printk(unsigned long ip, const char *fmt, ...); |
/** |
* trace_puts - write a string into the ftrace buffer |
* @str: the string to record |
* |
* Note: __trace_bputs is an internal function for trace_puts and |
* the @ip is passed in via the trace_puts macro. |
* |
* This is similar to trace_printk() but is made for those really fast |
* paths that a developer wants the least amount of "Heisenbug" affects, |
* where the processing of the print format is still too much. |
* |
* This function allows a kernel developer to debug fast path sections |
* that printk is not appropriate for. By scattering in various |
* printk like tracing in the code, a developer can quickly see |
* where problems are occurring. |
* |
* This is intended as a debugging tool for the developer only. |
* Please refrain from leaving trace_puts scattered around in |
* your code. (Extra memory is used for special buffers that are |
* allocated when trace_puts() is used) |
* |
* Returns: 0 if nothing was written, positive # if string was. |
* (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) |
*/ |
#define trace_puts(str) ({ \ |
static const char *trace_printk_fmt \ |
__attribute__((section("__trace_printk_fmt"))) = \ |
__builtin_constant_p(str) ? str : NULL; \ |
\ |
if (__builtin_constant_p(str)) \ |
__trace_bputs(_THIS_IP_, trace_printk_fmt); \ |
else \ |
__trace_puts(_THIS_IP_, str, strlen(str)); \ |
}) |
extern int __trace_bputs(unsigned long ip, const char *str); |
extern int __trace_puts(unsigned long ip, const char *str, int size); |
extern void trace_dump_stack(int skip); |
/* |
* The double __builtin_constant_p is because gcc will give us an error |
* if we try to allocate the static variable to fmt if it is not a |
* constant. Even with the outer if statement. |
*/ |
#define ftrace_vprintk(fmt, vargs) \ |
do { \ |
if (__builtin_constant_p(fmt)) { \ |
static const char *trace_printk_fmt \ |
__attribute__((section("__trace_printk_fmt"))) = \ |
__builtin_constant_p(fmt) ? fmt : NULL; \ |
\ |
__ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ |
} else \ |
__ftrace_vprintk(_THIS_IP_, fmt, vargs); \ |
} while (0) |
extern int |
__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); |
extern int |
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); |
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); |
#else |
static inline void tracing_start(void) { } |
static inline void tracing_stop(void) { } |
static inline void trace_dump_stack(int skip) { } |
static inline void tracing_on(void) { } |
static inline void tracing_off(void) { } |
static inline int tracing_is_on(void) { return 0; } |
static inline void tracing_snapshot(void) { } |
static inline void tracing_snapshot_alloc(void) { } |
static inline __printf(1, 2) |
int trace_printk(const char *fmt, ...) |
{ |
return 0; |
} |
static inline int |
ftrace_vprintk(const char *fmt, va_list ap) |
{ |
return 0; |
} |
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } |
#endif /* CONFIG_TRACING */ |
/* |
* min()/max()/clamp() macros that also do |
* strict type-checking.. See the |
* "unnecessary" pointer comparison. |
474,9 → 200,24 |
(void) (&_max1 == &_max2); \ |
_max1 > _max2 ? _max1 : _max2; }) |
#define min3(x, y, z) min((typeof(x))min(x, y), z) |
#define max3(x, y, z) max((typeof(x))max(x, y), z) |
#define min3(x, y, z) ({ \ |
typeof(x) _min1 = (x); \ |
typeof(y) _min2 = (y); \ |
typeof(z) _min3 = (z); \ |
(void) (&_min1 == &_min2); \ |
(void) (&_min1 == &_min3); \ |
_min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \ |
(_min2 < _min3 ? _min2 : _min3); }) |
#define max3(x, y, z) ({ \ |
typeof(x) _max1 = (x); \ |
typeof(y) _max2 = (y); \ |
typeof(z) _max3 = (z); \ |
(void) (&_max1 == &_max2); \ |
(void) (&_max1 == &_max3); \ |
_max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \ |
(_max2 > _max3 ? _max2 : _max3); }) |
/** |
* min_not_zero - return the minimum that is _not_ zero, unless both are zero |
* @x: value1 |
490,13 → 231,20 |
/** |
* clamp - return a value clamped to a given range with strict typechecking |
* @val: current value |
* @lo: lowest allowable value |
* @hi: highest allowable value |
* @min: minimum allowable value |
* @max: maximum allowable value |
* |
* This macro does strict typechecking of lo/hi to make sure they are of the |
* This macro does strict typechecking of min/max to make sure they are of the |
* same type as val. See the unnecessary pointer comparisons. |
*/ |
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) |
#define clamp(val, min, max) ({ \ |
typeof(val) __val = (val); \ |
typeof(min) __min = (min); \ |
typeof(max) __max = (max); \ |
(void) (&__val == &__min); \ |
(void) (&__val == &__max); \ |
__val = __val < __min ? __min: __val; \ |
__val > __max ? __max: __val; }) |
/* |
* ..and if you can't take the strict |
515,38 → 263,6 |
__max1 > __max2 ? __max1: __max2; }) |
/** |
* clamp_t - return a value clamped to a given range using a given type |
* @type: the type of variable to use |
* @val: current value |
* @lo: minimum allowable value |
* @hi: maximum allowable value |
* |
* This macro does no typechecking and uses temporary variables of type |
* 'type' to make all the comparisons. |
*/ |
#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) |
/** |
* clamp_val - return a value clamped to a given range using val's type |
* @val: current value |
* @lo: minimum allowable value |
* @hi: maximum allowable value |
* |
* This macro does no typechecking and uses temporary variables of whatever |
* type the input argument 'val' is. This is useful when val is an unsigned |
* type and min and max are literals that will otherwise be assigned a signed |
* integer type. |
*/ |
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) |
/* |
* swap - swap value of @a and @b |
*/ |
#define swap(a, b) \ |
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
/** |
* container_of - cast a member of a structure out to the containing structure |
* @ptr: the pointer to the member. |
* @type: the type of the container struct this is embedded in. |
557,28 → 273,22 |
const typeof( ((type *)0)->member ) *__mptr = (ptr); \ |
(type *)( (char *)__mptr - offsetof(type,member) );}) |
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ |
#ifdef CONFIG_FTRACE_MCOUNT_RECORD |
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD |
#endif |
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ |
#define VERIFY_OCTAL_PERMISSIONS(perms) \ |
(BUILD_BUG_ON_ZERO((perms) < 0) + \ |
BUILD_BUG_ON_ZERO((perms) > 0777) + \ |
/* User perms >= group perms >= other perms */ \ |
BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \ |
BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \ |
/* Other writable? Generally considered a bad idea. */ \ |
BUILD_BUG_ON_ZERO((perms) & 2) + \ |
(perms)) |
static inline void *kcalloc(size_t n, size_t size, uint32_t flags) |
{ |
if (n != 0 && size > ULONG_MAX / n) |
return NULL; |
return kzalloc(n * size, 0); |
} |
void free (void *ptr); |
#endif /* __KERNEL__ */ |
typedef unsigned long pgprotval_t; |
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; |
struct file |
{ |
642,7 → 352,17 |
# define del_timer_sync(t) del_timer(t) |
struct timespec { |
long tv_sec; /* seconds */ |
long tv_nsec; /* nanoseconds */ |
}; |
#define mb() asm volatile("mfence" : : : "memory") |
#define rmb() asm volatile("lfence" : : : "memory") |
#define wmb() asm volatile("sfence" : : : "memory") |
#define build_mmio_read(name, size, type, reg, barrier) \ |
static inline type name(const volatile void __iomem *addr) \ |
{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \ |
680,6 → 400,23 |
#define __raw_writew __writew |
#define __raw_writel __writel |
static inline __u64 readq(const volatile void __iomem *addr) |
{ |
const volatile u32 __iomem *p = addr; |
u32 low, high; |
low = readl(p); |
high = readl(p + 1); |
return low + ((u64)high << 32); |
} |
static inline void writeq(__u64 val, volatile void __iomem *addr) |
{ |
writel(val, addr); |
writel(val >> 32, addr+4); |
} |
#define swap(a, b) \ |
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
695,6 → 432,9 |
#define dev_info(dev, format, arg...) \ |
printk("Info %s " format , __func__, ## arg) |
//#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) |
#define BUILD_BUG_ON(condition) |
struct page |
{ |
unsigned int addr; |
727,6 → 467,8 |
#define get_page(a) |
#define put_page(a) |
#define set_pages_uc(a,b) |
#define set_pages_wb(a,b) |
#define pci_map_page(dev, page, offset, size, direction) \ |
(dma_addr_t)( (offset)+page_to_phys(page)) |
733,31 → 475,36 |
#define pci_unmap_page(dev, dma_address, size, direction) |
#define GFP_TEMPORARY 0 |
#define __GFP_NOWARN 0 |
#define __GFP_NORETRY 0 |
#define GFP_NOWAIT 0 |
#define IS_ENABLED(a) 0 |
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
//#define RCU_INIT_POINTER(p, v) \ |
// do { \ |
// p = (typeof(*v) __force __rcu *)(v); \ |
// } while (0) |
#define RCU_INIT_POINTER(p, v) \ |
do { \ |
p = (typeof(*v) __force __rcu *)(v); \ |
} while (0) |
//#define rcu_dereference_raw(p) ({ \ |
// typeof(p) _________p1 = ACCESS_ONCE(p); \ |
// (_________p1); \ |
// }) |
#define rcu_dereference_raw(p) ({ \ |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
(_________p1); \ |
}) |
#define rcu_assign_pointer(p, v) \ |
({ \ |
if (!__builtin_constant_p(v) || \ |
((v) != NULL)) \ |
(p) = (v); \ |
}) |
//#define rcu_assign_pointer(p, v) \ |
// ({ \ |
// if (!__builtin_constant_p(v) || \ |
// ((v) != NULL)) \ |
// (p) = (v); \ |
// }) |
unsigned int hweight16(unsigned int w); |
#define cpufreq_quick_get_max(x) GetCpuFreq() |
extern unsigned int tsc_khz; |
793,7 → 540,7 |
} |
} |
__builtin_memcpy((void __force *)to, from, n); |
memcpy((void __force *)to, from, n); |
return 0; |
} |
804,14 → 551,6 |
void kunmap(struct page *page); |
void kunmap_atomic(void *vaddr); |
typedef u64 async_cookie_t; |
#define iowrite32(v, addr) writel((v), (addr)) |
#endif |
#define __init |
#define CONFIG_PAGE_OFFSET 0 |
#endif |
/drivers/include/linux/mod_devicetable.h |
---|
9,7 → 9,7 |
#ifdef __KERNEL__ |
#include <linux/types.h> |
#include <linux/uuid.h> |
#include <mutex.h> |
typedef unsigned long kernel_ulong_t; |
#endif |
69,7 → 69,7 |
* @bDeviceClass: Class of device; numbers are assigned |
* by the USB forum. Products may choose to implement classes, |
* or be vendor-specific. Device classes specify behavior of all |
* the interfaces on a device. |
* the interfaces on a devices. |
* @bDeviceSubClass: Subclass of device; associated with bDeviceClass. |
* @bDeviceProtocol: Protocol of device; associated with bDeviceClass. |
* @bInterfaceClass: Class of interface; numbers are assigned |
/drivers/include/linux/uapi/drm/radeon_drm.h |
---|
0,0 → 1,1041 |
/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*- |
* |
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Fremont, California. |
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS IN THE SOFTWARE. |
* |
* Authors: |
* Kevin E. Martin <martin@valinux.com> |
* Gareth Hughes <gareth@valinux.com> |
* Keith Whitwell <keith@tungstengraphics.com> |
*/ |
#ifndef __RADEON_DRM_H__ |
#define __RADEON_DRM_H__ |
#include <drm/drm.h> |
/* WARNING: If you change any of these defines, make sure to change the |
* defines in the X server file (radeon_sarea.h) |
*/ |
#ifndef __RADEON_SAREA_DEFINES__ |
#define __RADEON_SAREA_DEFINES__ |
/* Old style state flags, required for sarea interface (1.1 and 1.2 |
* clears) and 1.2 drm_vertex2 ioctl. |
*/ |
#define RADEON_UPLOAD_CONTEXT 0x00000001 |
#define RADEON_UPLOAD_VERTFMT 0x00000002 |
#define RADEON_UPLOAD_LINE 0x00000004 |
#define RADEON_UPLOAD_BUMPMAP 0x00000008 |
#define RADEON_UPLOAD_MASKS 0x00000010 |
#define RADEON_UPLOAD_VIEWPORT 0x00000020 |
#define RADEON_UPLOAD_SETUP 0x00000040 |
#define RADEON_UPLOAD_TCL 0x00000080 |
#define RADEON_UPLOAD_MISC 0x00000100 |
#define RADEON_UPLOAD_TEX0 0x00000200 |
#define RADEON_UPLOAD_TEX1 0x00000400 |
#define RADEON_UPLOAD_TEX2 0x00000800 |
#define RADEON_UPLOAD_TEX0IMAGES 0x00001000 |
#define RADEON_UPLOAD_TEX1IMAGES 0x00002000 |
#define RADEON_UPLOAD_TEX2IMAGES 0x00004000 |
#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ |
#define RADEON_REQUIRE_QUIESCENCE 0x00010000 |
#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */ |
#define RADEON_UPLOAD_ALL 0x003effff |
#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff |
/* New style per-packet identifiers for use in cmd_buffer ioctl with |
* the RADEON_EMIT_PACKET command. Comments relate new packets to old |
* state bits and the packet size: |
*/ |
#define RADEON_EMIT_PP_MISC 0 /* context/7 */ |
#define RADEON_EMIT_PP_CNTL 1 /* context/3 */ |
#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */ |
#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */ |
#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */ |
#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */ |
#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */ |
#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */ |
#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */ |
#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */ |
#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */ |
#define RADEON_EMIT_RE_MISC 11 /* misc/1 */ |
#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */ |
#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */ |
#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */ |
#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */ |
#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */ |
#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */ |
#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */ |
#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */ |
#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */ |
#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */ |
#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */ |
#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */ |
#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */ |
#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */ |
#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */ |
#define R200_EMIT_TFACTOR_0 30 /* tf/7 */ |
#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */ |
#define R200_EMIT_VAP_CTL 32 /* vap/1 */ |
#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */ |
#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */ |
#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */ |
#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */ |
#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */ |
#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */ |
#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */ |
#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */ |
#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */ |
#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */ |
#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */ |
#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */ |
#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */ |
#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */ |
#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */ |
#define R200_EMIT_VTE_CNTL 48 /* vte/1 */ |
#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */ |
#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */ |
#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */ |
#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */ |
#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */ |
#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */ |
#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */ |
#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */ |
#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */ |
#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */ |
#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */ |
#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */ |
#define R200_EMIT_PP_CUBIC_FACES_0 61 |
#define R200_EMIT_PP_CUBIC_OFFSETS_0 62 |
#define R200_EMIT_PP_CUBIC_FACES_1 63 |
#define R200_EMIT_PP_CUBIC_OFFSETS_1 64 |
#define R200_EMIT_PP_CUBIC_FACES_2 65 |
#define R200_EMIT_PP_CUBIC_OFFSETS_2 66 |
#define R200_EMIT_PP_CUBIC_FACES_3 67 |
#define R200_EMIT_PP_CUBIC_OFFSETS_3 68 |
#define R200_EMIT_PP_CUBIC_FACES_4 69 |
#define R200_EMIT_PP_CUBIC_OFFSETS_4 70 |
#define R200_EMIT_PP_CUBIC_FACES_5 71 |
#define R200_EMIT_PP_CUBIC_OFFSETS_5 72 |
#define RADEON_EMIT_PP_TEX_SIZE_0 73 |
#define RADEON_EMIT_PP_TEX_SIZE_1 74 |
#define RADEON_EMIT_PP_TEX_SIZE_2 75 |
#define R200_EMIT_RB3D_BLENDCOLOR 76 |
#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77 |
#define RADEON_EMIT_PP_CUBIC_FACES_0 78 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79 |
#define RADEON_EMIT_PP_CUBIC_FACES_1 80 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81 |
#define RADEON_EMIT_PP_CUBIC_FACES_2 82 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83 |
#define R200_EMIT_PP_TRI_PERF_CNTL 84 |
#define R200_EMIT_PP_AFS_0 85 |
#define R200_EMIT_PP_AFS_1 86 |
#define R200_EMIT_ATF_TFACTOR 87 |
#define R200_EMIT_PP_TXCTLALL_0 88 |
#define R200_EMIT_PP_TXCTLALL_1 89 |
#define R200_EMIT_PP_TXCTLALL_2 90 |
#define R200_EMIT_PP_TXCTLALL_3 91 |
#define R200_EMIT_PP_TXCTLALL_4 92 |
#define R200_EMIT_PP_TXCTLALL_5 93 |
#define R200_EMIT_VAP_PVS_CNTL 94 |
#define RADEON_MAX_STATE_PACKETS 95 |
/* Commands understood by cmd_buffer ioctl. More can be added but |
* obviously these can't be removed or changed: |
*/ |
#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */ |
#define RADEON_CMD_SCALARS 2 /* emit scalar data */ |
#define RADEON_CMD_VECTORS 3 /* emit vector data */ |
#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */ |
#define RADEON_CMD_PACKET3 5 /* emit hw packet */ |
#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */ |
#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */ |
#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note: |
* doesn't make the cpu wait, just |
* the graphics hardware */ |
#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */ |
typedef union { |
int i; |
struct { |
unsigned char cmd_type, pad0, pad1, pad2; |
} header; |
struct { |
unsigned char cmd_type, packet_id, pad0, pad1; |
} packet; |
struct { |
unsigned char cmd_type, offset, stride, count; |
} scalars; |
struct { |
unsigned char cmd_type, offset, stride, count; |
} vectors; |
struct { |
unsigned char cmd_type, addr_lo, addr_hi, count; |
} veclinear; |
struct { |
unsigned char cmd_type, buf_idx, pad0, pad1; |
} dma; |
struct { |
unsigned char cmd_type, flags, pad0, pad1; |
} wait; |
} drm_radeon_cmd_header_t; |
#define RADEON_WAIT_2D 0x1 |
#define RADEON_WAIT_3D 0x2 |
/* Allowed parameters for R300_CMD_PACKET3 |
*/ |
#define R300_CMD_PACKET3_CLEAR 0 |
#define R300_CMD_PACKET3_RAW 1 |
/* Commands understood by cmd_buffer ioctl for R300. |
* The interface has not been stabilized, so some of these may be removed |
* and eventually reordered before stabilization. |
*/ |
#define R300_CMD_PACKET0 1 |
#define R300_CMD_VPU 2 /* emit vertex program upload */ |
#define R300_CMD_PACKET3 3 /* emit a packet3 */ |
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */ |
#define R300_CMD_CP_DELAY 5 |
#define R300_CMD_DMA_DISCARD 6 |
#define R300_CMD_WAIT 7 |
# define R300_WAIT_2D 0x1 |
# define R300_WAIT_3D 0x2 |
/* these two defines are DOING IT WRONG - however |
* we have userspace which relies on using these. |
* The wait interface is backwards compat new |
* code should use the NEW_WAIT defines below |
* THESE ARE NOT BIT FIELDS |
*/ |
# define R300_WAIT_2D_CLEAN 0x3 |
# define R300_WAIT_3D_CLEAN 0x4 |
# define R300_NEW_WAIT_2D_3D 0x3 |
# define R300_NEW_WAIT_2D_2D_CLEAN 0x4 |
# define R300_NEW_WAIT_3D_3D_CLEAN 0x6 |
# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8 |
#define R300_CMD_SCRATCH 8 |
#define R300_CMD_R500FP 9 |
typedef union { |
unsigned int u; |
struct { |
unsigned char cmd_type, pad0, pad1, pad2; |
} header; |
struct { |
unsigned char cmd_type, count, reglo, reghi; |
} packet0; |
struct { |
unsigned char cmd_type, count, adrlo, adrhi; |
} vpu; |
struct { |
unsigned char cmd_type, packet, pad0, pad1; |
} packet3; |
struct { |
unsigned char cmd_type, packet; |
unsigned short count; /* amount of packet2 to emit */ |
} delay; |
struct { |
unsigned char cmd_type, buf_idx, pad0, pad1; |
} dma; |
struct { |
unsigned char cmd_type, flags, pad0, pad1; |
} wait; |
struct { |
unsigned char cmd_type, reg, n_bufs, flags; |
} scratch; |
struct { |
unsigned char cmd_type, count, adrlo, adrhi_flags; |
} r500fp; |
} drm_r300_cmd_header_t; |
#define RADEON_FRONT 0x1 |
#define RADEON_BACK 0x2 |
#define RADEON_DEPTH 0x4 |
#define RADEON_STENCIL 0x8 |
#define RADEON_CLEAR_FASTZ 0x80000000 |
#define RADEON_USE_HIERZ 0x40000000 |
#define RADEON_USE_COMP_ZBUF 0x20000000 |
#define R500FP_CONSTANT_TYPE (1 << 1) |
#define R500FP_CONSTANT_CLAMP (1 << 2) |
/* Primitive types |
*/ |
#define RADEON_POINTS 0x1 |
#define RADEON_LINES 0x2 |
#define RADEON_LINE_STRIP 0x3 |
#define RADEON_TRIANGLES 0x4 |
#define RADEON_TRIANGLE_FAN 0x5 |
#define RADEON_TRIANGLE_STRIP 0x6 |
/* Vertex/indirect buffer size |
*/ |
#define RADEON_BUFFER_SIZE 65536 |
/* Byte offsets for indirect buffer data |
*/ |
#define RADEON_INDEX_PRIM_OFFSET 20 |
#define RADEON_SCRATCH_REG_OFFSET 32 |
#define R600_SCRATCH_REG_OFFSET 256 |
#define RADEON_NR_SAREA_CLIPRECTS 12 |
/* There are 2 heaps (local/GART). Each region within a heap is a |
* minimum of 64k, and there are at most 64 of them per heap. |
*/ |
#define RADEON_LOCAL_TEX_HEAP 0 |
#define RADEON_GART_TEX_HEAP 1 |
#define RADEON_NR_TEX_HEAPS 2 |
#define RADEON_NR_TEX_REGIONS 64 |
#define RADEON_LOG_TEX_GRANULARITY 16 |
#define RADEON_MAX_TEXTURE_LEVELS 12 |
#define RADEON_MAX_TEXTURE_UNITS 3 |
#define RADEON_MAX_SURFACES 8 |
/* Blits have strict offset rules. All blit offset must be aligned on |
* a 1K-byte boundary. |
*/ |
#define RADEON_OFFSET_SHIFT 10 |
#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT) |
#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1) |
#endif /* __RADEON_SAREA_DEFINES__ */ |
typedef struct { |
unsigned int red; |
unsigned int green; |
unsigned int blue; |
unsigned int alpha; |
} radeon_color_regs_t; |
typedef struct { |
/* Context state */ |
unsigned int pp_misc; /* 0x1c14 */ |
unsigned int pp_fog_color; |
unsigned int re_solid_color; |
unsigned int rb3d_blendcntl; |
unsigned int rb3d_depthoffset; |
unsigned int rb3d_depthpitch; |
unsigned int rb3d_zstencilcntl; |
unsigned int pp_cntl; /* 0x1c38 */ |
unsigned int rb3d_cntl; |
unsigned int rb3d_coloroffset; |
unsigned int re_width_height; |
unsigned int rb3d_colorpitch; |
unsigned int se_cntl; |
/* Vertex format state */ |
unsigned int se_coord_fmt; /* 0x1c50 */ |
/* Line state */ |
unsigned int re_line_pattern; /* 0x1cd0 */ |
unsigned int re_line_state; |
unsigned int se_line_width; /* 0x1db8 */ |
/* Bumpmap state */ |
unsigned int pp_lum_matrix; /* 0x1d00 */ |
unsigned int pp_rot_matrix_0; /* 0x1d58 */ |
unsigned int pp_rot_matrix_1; |
/* Mask state */ |
unsigned int rb3d_stencilrefmask; /* 0x1d7c */ |
unsigned int rb3d_ropcntl; |
unsigned int rb3d_planemask; |
/* Viewport state */ |
unsigned int se_vport_xscale; /* 0x1d98 */ |
unsigned int se_vport_xoffset; |
unsigned int se_vport_yscale; |
unsigned int se_vport_yoffset; |
unsigned int se_vport_zscale; |
unsigned int se_vport_zoffset; |
/* Setup state */ |
unsigned int se_cntl_status; /* 0x2140 */ |
/* Misc state */ |
unsigned int re_top_left; /* 0x26c0 */ |
unsigned int re_misc; |
} drm_radeon_context_regs_t; |
typedef struct { |
/* Zbias state */ |
unsigned int se_zbias_factor; /* 0x1dac */ |
unsigned int se_zbias_constant; |
} drm_radeon_context2_regs_t; |
/* Setup registers for each texture unit |
*/ |
typedef struct { |
unsigned int pp_txfilter; |
unsigned int pp_txformat; |
unsigned int pp_txoffset; |
unsigned int pp_txcblend; |
unsigned int pp_txablend; |
unsigned int pp_tfactor; |
unsigned int pp_border_color; |
} drm_radeon_texture_regs_t; |
typedef struct { |
unsigned int start; |
unsigned int finish; |
unsigned int prim:8; |
unsigned int stateidx:8; |
unsigned int numverts:16; /* overloaded as offset/64 for elt prims */ |
unsigned int vc_format; /* vertex format */ |
} drm_radeon_prim_t; |
typedef struct { |
drm_radeon_context_regs_t context; |
drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS]; |
drm_radeon_context2_regs_t context2; |
unsigned int dirty; |
} drm_radeon_state_t; |
typedef struct { |
/* The channel for communication of state information to the |
* kernel on firing a vertex buffer with either of the |
* obsoleted vertex/index ioctls. |
*/ |
drm_radeon_context_regs_t context_state; |
drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; |
unsigned int dirty; |
unsigned int vertsize; |
unsigned int vc_format; |
/* The current cliprects, or a subset thereof. |
*/ |
struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS]; |
unsigned int nbox; |
/* Counters for client-side throttling of rendering clients. |
*/ |
unsigned int last_frame; |
unsigned int last_dispatch; |
unsigned int last_clear; |
struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + |
1]; |
unsigned int tex_age[RADEON_NR_TEX_HEAPS]; |
int ctx_owner; |
int pfState; /* number of 3d windows (0,1,2ormore) */ |
int pfCurrentPage; /* which buffer is being displayed? */ |
int crtc2_base; /* CRTC2 frame offset */ |
int tiling_enabled; /* set by drm, read by 2d + 3d clients */ |
} drm_radeon_sarea_t; |
/* WARNING: If you change any of these defines, make sure to change the |
* defines in the Xserver file (xf86drmRadeon.h) |
* |
* KW: actually it's illegal to change any of this (backwards compatibility). |
*/ |
/* Radeon specific ioctls |
* The device specific ioctl range is 0x40 to 0x79. |
*/ |
#define DRM_RADEON_CP_INIT 0x00 |
#define DRM_RADEON_CP_START 0x01 |
#define DRM_RADEON_CP_STOP 0x02 |
#define DRM_RADEON_CP_RESET 0x03 |
#define DRM_RADEON_CP_IDLE 0x04 |
#define DRM_RADEON_RESET 0x05 |
#define DRM_RADEON_FULLSCREEN 0x06 |
#define DRM_RADEON_SWAP 0x07 |
#define DRM_RADEON_CLEAR 0x08 |
#define DRM_RADEON_VERTEX 0x09 |
#define DRM_RADEON_INDICES 0x0A |
#define DRM_RADEON_NOT_USED |
#define DRM_RADEON_STIPPLE 0x0C |
#define DRM_RADEON_INDIRECT 0x0D |
#define DRM_RADEON_TEXTURE 0x0E |
#define DRM_RADEON_VERTEX2 0x0F |
#define DRM_RADEON_CMDBUF 0x10 |
#define DRM_RADEON_GETPARAM 0x11 |
#define DRM_RADEON_FLIP 0x12 |
#define DRM_RADEON_ALLOC 0x13 |
#define DRM_RADEON_FREE 0x14 |
#define DRM_RADEON_INIT_HEAP 0x15 |
#define DRM_RADEON_IRQ_EMIT 0x16 |
#define DRM_RADEON_IRQ_WAIT 0x17 |
#define DRM_RADEON_CP_RESUME 0x18 |
#define DRM_RADEON_SETPARAM 0x19 |
#define DRM_RADEON_SURF_ALLOC 0x1a |
#define DRM_RADEON_SURF_FREE 0x1b |
/* KMS ioctl */ |
#define DRM_RADEON_GEM_INFO 0x1c |
#define DRM_RADEON_GEM_CREATE 0x1d |
#define DRM_RADEON_GEM_MMAP 0x1e |
#define DRM_RADEON_GEM_PREAD 0x21 |
#define DRM_RADEON_GEM_PWRITE 0x22 |
#define DRM_RADEON_GEM_SET_DOMAIN 0x23 |
#define DRM_RADEON_GEM_WAIT_IDLE 0x24 |
#define DRM_RADEON_CS 0x26 |
#define DRM_RADEON_INFO 0x27 |
#define DRM_RADEON_GEM_SET_TILING 0x28 |
#define DRM_RADEON_GEM_GET_TILING 0x29 |
#define DRM_RADEON_GEM_BUSY 0x2a |
#define DRM_RADEON_GEM_VA 0x2b |
#define DRM_RADEON_GEM_OP 0x2c |
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) |
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) |
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t) |
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET) |
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE) |
#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET) |
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t) |
#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP) |
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t) |
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t) |
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t) |
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t) |
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t) |
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t) |
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t) |
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t) |
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t) |
#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP) |
#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t) |
#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t) |
#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t) |
#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t) |
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t) |
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME) |
#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t) |
#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t) |
#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t) |
/* KMS */ |
#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info) |
#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create) |
#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap) |
#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread) |
#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite) |
#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain) |
#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) |
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) |
#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) |
#define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) |
#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) |
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) |
#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va) |
#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op) |
typedef struct drm_radeon_init { |
enum { |
RADEON_INIT_CP = 0x01, |
RADEON_CLEANUP_CP = 0x02, |
RADEON_INIT_R200_CP = 0x03, |
RADEON_INIT_R300_CP = 0x04, |
RADEON_INIT_R600_CP = 0x05 |
} func; |
unsigned long sarea_priv_offset; |
int is_pci; |
int cp_mode; |
int gart_size; |
int ring_size; |
int usec_timeout; |
unsigned int fb_bpp; |
unsigned int front_offset, front_pitch; |
unsigned int back_offset, back_pitch; |
unsigned int depth_bpp; |
unsigned int depth_offset, depth_pitch; |
unsigned long fb_offset; |
unsigned long mmio_offset; |
unsigned long ring_offset; |
unsigned long ring_rptr_offset; |
unsigned long buffers_offset; |
unsigned long gart_textures_offset; |
} drm_radeon_init_t; |
typedef struct drm_radeon_cp_stop { |
int flush; |
int idle; |
} drm_radeon_cp_stop_t; |
typedef struct drm_radeon_fullscreen { |
enum { |
RADEON_INIT_FULLSCREEN = 0x01, |
RADEON_CLEANUP_FULLSCREEN = 0x02 |
} func; |
} drm_radeon_fullscreen_t; |
#define CLEAR_X1 0 |
#define CLEAR_Y1 1 |
#define CLEAR_X2 2 |
#define CLEAR_Y2 3 |
#define CLEAR_DEPTH 4 |
typedef union drm_radeon_clear_rect { |
float f[5]; |
unsigned int ui[5]; |
} drm_radeon_clear_rect_t; |
typedef struct drm_radeon_clear { |
unsigned int flags; |
unsigned int clear_color; |
unsigned int clear_depth; |
unsigned int color_mask; |
unsigned int depth_mask; /* misnamed field: should be stencil */ |
drm_radeon_clear_rect_t __user *depth_boxes; |
} drm_radeon_clear_t; |
typedef struct drm_radeon_vertex { |
int prim; |
int idx; /* Index of vertex buffer */ |
int count; /* Number of vertices in buffer */ |
int discard; /* Client finished with buffer? */ |
} drm_radeon_vertex_t; |
typedef struct drm_radeon_indices { |
int prim; |
int idx; |
int start; |
int end; |
int discard; /* Client finished with buffer? */ |
} drm_radeon_indices_t; |
/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices |
* - allows multiple primitives and state changes in a single ioctl |
* - supports driver change to emit native primitives |
*/ |
typedef struct drm_radeon_vertex2 { |
int idx; /* Index of vertex buffer */ |
int discard; /* Client finished with buffer? */ |
int nr_states; |
drm_radeon_state_t __user *state; |
int nr_prims; |
drm_radeon_prim_t __user *prim; |
} drm_radeon_vertex2_t; |
/* v1.3 - obsoletes drm_radeon_vertex2 |
* - allows arbitrarily large cliprect list |
* - allows updating of tcl packet, vector and scalar state |
* - allows memory-efficient description of state updates |
* - allows state to be emitted without a primitive |
* (for clears, ctx switches) |
* - allows more than one dma buffer to be referenced per ioctl |
* - supports tcl driver |
* - may be extended in future versions with new cmd types, packets |
*/ |
typedef struct drm_radeon_cmd_buffer { |
int bufsz; |
char __user *buf; |
int nbox; |
struct drm_clip_rect __user *boxes; |
} drm_radeon_cmd_buffer_t; |
typedef struct drm_radeon_tex_image { |
unsigned int x, y; /* Blit coordinates */ |
unsigned int width, height; |
const void __user *data; |
} drm_radeon_tex_image_t; |
typedef struct drm_radeon_texture { |
unsigned int offset; |
int pitch; |
int format; |
int width; /* Texture image coordinates */ |
int height; |
drm_radeon_tex_image_t __user *image; |
} drm_radeon_texture_t; |
typedef struct drm_radeon_stipple { |
unsigned int __user *mask; |
} drm_radeon_stipple_t; |
typedef struct drm_radeon_indirect { |
int idx; |
int start; |
int end; |
int discard; |
} drm_radeon_indirect_t; |
/* enum for card type parameters */ |
#define RADEON_CARD_PCI 0 |
#define RADEON_CARD_AGP 1 |
#define RADEON_CARD_PCIE 2 |
/* 1.3: An ioctl to get parameters that aren't available to the 3d |
* client any other way. |
*/ |
#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */ |
#define RADEON_PARAM_LAST_FRAME 2 |
#define RADEON_PARAM_LAST_DISPATCH 3 |
#define RADEON_PARAM_LAST_CLEAR 4 |
/* Added with DRM version 1.6. */ |
#define RADEON_PARAM_IRQ_NR 5 |
#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */ |
/* Added with DRM version 1.8. */ |
#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */ |
#define RADEON_PARAM_STATUS_HANDLE 8 |
#define RADEON_PARAM_SAREA_HANDLE 9 |
#define RADEON_PARAM_GART_TEX_HANDLE 10 |
#define RADEON_PARAM_SCRATCH_OFFSET 11 |
#define RADEON_PARAM_CARD_TYPE 12 |
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ |
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */ |
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ |
#define RADEON_PARAM_DEVICE_ID 16 |
#define RADEON_PARAM_NUM_Z_PIPES 17 /* num Z pipes */ |
typedef struct drm_radeon_getparam { |
int param; |
void __user *value; |
} drm_radeon_getparam_t; |
/* 1.6: Set up a memory manager for regions of shared memory: |
*/ |
#define RADEON_MEM_REGION_GART 1 |
#define RADEON_MEM_REGION_FB 2 |
typedef struct drm_radeon_mem_alloc { |
int region; |
int alignment; |
int size; |
int __user *region_offset; /* offset from start of fb or GART */ |
} drm_radeon_mem_alloc_t; |
typedef struct drm_radeon_mem_free { |
int region; |
int region_offset; |
} drm_radeon_mem_free_t; |
typedef struct drm_radeon_mem_init_heap { |
int region; |
int size; |
int start; |
} drm_radeon_mem_init_heap_t; |
/* 1.6: Userspace can request & wait on irq's: |
*/ |
typedef struct drm_radeon_irq_emit { |
int __user *irq_seq; |
} drm_radeon_irq_emit_t; |
typedef struct drm_radeon_irq_wait { |
int irq_seq; |
} drm_radeon_irq_wait_t; |
/* 1.10: Clients tell the DRM where they think the framebuffer is located in |
* the card's address space, via a new generic ioctl to set parameters |
*/ |
typedef struct drm_radeon_setparam { |
unsigned int param; |
__s64 value; |
} drm_radeon_setparam_t; |
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */ |
#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */ |
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */ |
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */ |
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */ |
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */ |
/* 1.14: Clients can allocate/free a surface |
*/ |
typedef struct drm_radeon_surface_alloc { |
unsigned int address; |
unsigned int size; |
unsigned int flags; |
} drm_radeon_surface_alloc_t; |
typedef struct drm_radeon_surface_free { |
unsigned int address; |
} drm_radeon_surface_free_t; |
#define DRM_RADEON_VBLANK_CRTC1 1 |
#define DRM_RADEON_VBLANK_CRTC2 2 |
/* |
* Kernel modesetting world below. |
*/ |
#define RADEON_GEM_DOMAIN_CPU 0x1 |
#define RADEON_GEM_DOMAIN_GTT 0x2 |
#define RADEON_GEM_DOMAIN_VRAM 0x4 |
struct drm_radeon_gem_info { |
uint64_t gart_size; |
uint64_t vram_size; |
uint64_t vram_visible; |
}; |
#define RADEON_GEM_NO_BACKING_STORE (1 << 0) |
#define RADEON_GEM_GTT_UC (1 << 1) |
#define RADEON_GEM_GTT_WC (1 << 2) |
struct drm_radeon_gem_create { |
uint64_t size; |
uint64_t alignment; |
uint32_t handle; |
uint32_t initial_domain; |
uint32_t flags; |
}; |
#define RADEON_TILING_MACRO 0x1 |
#define RADEON_TILING_MICRO 0x2 |
#define RADEON_TILING_SWAP_16BIT 0x4 |
#define RADEON_TILING_SWAP_32BIT 0x8 |
/* this object requires a surface when mapped - i.e. front buffer */ |
#define RADEON_TILING_SURFACE 0x10 |
#define RADEON_TILING_MICRO_SQUARE 0x20 |
#define RADEON_TILING_EG_BANKW_SHIFT 8 |
#define RADEON_TILING_EG_BANKW_MASK 0xf |
#define RADEON_TILING_EG_BANKH_SHIFT 12 |
#define RADEON_TILING_EG_BANKH_MASK 0xf |
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16 |
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf |
#define RADEON_TILING_EG_TILE_SPLIT_SHIFT 24 |
#define RADEON_TILING_EG_TILE_SPLIT_MASK 0xf |
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28 |
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf |
struct drm_radeon_gem_set_tiling { |
uint32_t handle; |
uint32_t tiling_flags; |
uint32_t pitch; |
}; |
struct drm_radeon_gem_get_tiling { |
uint32_t handle; |
uint32_t tiling_flags; |
uint32_t pitch; |
}; |
struct drm_radeon_gem_mmap { |
uint32_t handle; |
uint32_t pad; |
uint64_t offset; |
uint64_t size; |
uint64_t addr_ptr; |
}; |
struct drm_radeon_gem_set_domain { |
uint32_t handle; |
uint32_t read_domains; |
uint32_t write_domain; |
}; |
struct drm_radeon_gem_wait_idle { |
uint32_t handle; |
uint32_t pad; |
}; |
struct drm_radeon_gem_busy { |
uint32_t handle; |
uint32_t domain; |
}; |
struct drm_radeon_gem_pread { |
/** Handle for the object being read. */ |
uint32_t handle; |
uint32_t pad; |
/** Offset into the object to read from */ |
uint64_t offset; |
/** Length of data to read */ |
uint64_t size; |
/** Pointer to write the data into. */ |
/* void *, but pointers are not 32/64 compatible */ |
uint64_t data_ptr; |
}; |
struct drm_radeon_gem_pwrite { |
/** Handle for the object being written to. */ |
uint32_t handle; |
uint32_t pad; |
/** Offset into the object to write to */ |
uint64_t offset; |
/** Length of data to write */ |
uint64_t size; |
/** Pointer to read the data from. */ |
/* void *, but pointers are not 32/64 compatible */ |
uint64_t data_ptr; |
}; |
/* Sets or returns a value associated with a buffer. */ |
struct drm_radeon_gem_op { |
uint32_t handle; /* buffer */ |
uint32_t op; /* RADEON_GEM_OP_* */ |
uint64_t value; /* input or return value */ |
}; |
#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 |
#define RADEON_GEM_OP_SET_INITIAL_DOMAIN 1 |
#define RADEON_VA_MAP 1 |
#define RADEON_VA_UNMAP 2 |
#define RADEON_VA_RESULT_OK 0 |
#define RADEON_VA_RESULT_ERROR 1 |
#define RADEON_VA_RESULT_VA_EXIST 2 |
#define RADEON_VM_PAGE_VALID (1 << 0) |
#define RADEON_VM_PAGE_READABLE (1 << 1) |
#define RADEON_VM_PAGE_WRITEABLE (1 << 2) |
#define RADEON_VM_PAGE_SYSTEM (1 << 3) |
#define RADEON_VM_PAGE_SNOOPED (1 << 4) |
struct drm_radeon_gem_va { |
uint32_t handle; |
uint32_t operation; |
uint32_t vm_id; |
uint32_t flags; |
uint64_t offset; |
}; |
#define RADEON_CHUNK_ID_RELOCS 0x01 |
#define RADEON_CHUNK_ID_IB 0x02 |
#define RADEON_CHUNK_ID_FLAGS 0x03 |
#define RADEON_CHUNK_ID_CONST_IB 0x04 |
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */ |
#define RADEON_CS_KEEP_TILING_FLAGS 0x01 |
#define RADEON_CS_USE_VM 0x02 |
#define RADEON_CS_END_OF_FRAME 0x04 /* a hint from userspace which CS is the last one */ |
/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */ |
#define RADEON_CS_RING_GFX 0 |
#define RADEON_CS_RING_COMPUTE 1 |
#define RADEON_CS_RING_DMA 2 |
#define RADEON_CS_RING_UVD 3 |
#define RADEON_CS_RING_VCE 4 |
/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */ |
/* 0 = normal, + = higher priority, - = lower priority */ |
struct drm_radeon_cs_chunk { |
uint32_t chunk_id; |
uint32_t length_dw; |
uint64_t chunk_data; |
}; |
/* drm_radeon_cs_reloc.flags */ |
#define RADEON_RELOC_PRIO_MASK (0xf << 0) |
struct drm_radeon_cs_reloc { |
uint32_t handle; |
uint32_t read_domains; |
uint32_t write_domain; |
uint32_t flags; |
}; |
struct drm_radeon_cs { |
uint32_t num_chunks; |
uint32_t cs_id; |
/* this points to uint64_t * which point to cs chunks */ |
uint64_t chunks; |
/* updates to the limits after this CS ioctl */ |
uint64_t gart_limit; |
uint64_t vram_limit; |
}; |
#define RADEON_INFO_DEVICE_ID 0x00 |
#define RADEON_INFO_NUM_GB_PIPES 0x01 |
#define RADEON_INFO_NUM_Z_PIPES 0x02 |
#define RADEON_INFO_ACCEL_WORKING 0x03 |
#define RADEON_INFO_CRTC_FROM_ID 0x04 |
#define RADEON_INFO_ACCEL_WORKING2 0x05 |
#define RADEON_INFO_TILING_CONFIG 0x06 |
#define RADEON_INFO_WANT_HYPERZ 0x07 |
#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */ |
#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */ |
#define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */ |
#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */ |
#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */ |
#define RADEON_INFO_BACKEND_MAP 0x0d /* pipe to backend map, needed by mesa */ |
/* virtual address start, va < start are reserved by the kernel */ |
#define RADEON_INFO_VA_START 0x0e |
/* maximum size of ib using the virtual memory cs */ |
#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f |
/* max pipes - needed for compute shaders */ |
#define RADEON_INFO_MAX_PIPES 0x10 |
/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */ |
#define RADEON_INFO_TIMESTAMP 0x11 |
/* max shader engines (SE) - needed for geometry shaders, etc. */ |
#define RADEON_INFO_MAX_SE 0x12 |
/* max SH per SE */ |
#define RADEON_INFO_MAX_SH_PER_SE 0x13 |
/* fast fb access is enabled */ |
#define RADEON_INFO_FASTFB_WORKING 0x14 |
/* query if a RADEON_CS_RING_* submission is supported */ |
#define RADEON_INFO_RING_WORKING 0x15 |
/* SI tile mode array */ |
#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16 |
/* query if CP DMA is supported on the compute ring */ |
#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 |
/* CIK macrotile mode array */ |
#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 |
/* query the number of render backends */ |
#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 |
/* max engine clock - needed for OpenCL */ |
#define RADEON_INFO_MAX_SCLK 0x1a |
/* version of VCE firmware */ |
#define RADEON_INFO_VCE_FW_VERSION 0x1b |
/* version of VCE feedback */ |
#define RADEON_INFO_VCE_FB_VERSION 0x1c |
#define RADEON_INFO_NUM_BYTES_MOVED 0x1d |
#define RADEON_INFO_VRAM_USAGE 0x1e |
#define RADEON_INFO_GTT_USAGE 0x1f |
#define RADEON_INFO_ACTIVE_CU_COUNT 0x20 |
struct drm_radeon_info { |
uint32_t request; |
uint32_t pad; |
uint64_t value; |
}; |
/* Those correspond to the tile index to use, this is to explicitly state |
* the API that is implicitly defined by the tile mode array. |
*/ |
#define SI_TILE_MODE_COLOR_LINEAR_ALIGNED 8 |
#define SI_TILE_MODE_COLOR_1D 13 |
#define SI_TILE_MODE_COLOR_1D_SCANOUT 9 |
#define SI_TILE_MODE_COLOR_2D_8BPP 14 |
#define SI_TILE_MODE_COLOR_2D_16BPP 15 |
#define SI_TILE_MODE_COLOR_2D_32BPP 16 |
#define SI_TILE_MODE_COLOR_2D_64BPP 17 |
#define SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP 11 |
#define SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP 12 |
#define SI_TILE_MODE_DEPTH_STENCIL_1D 4 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D 0 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D_2AA 3 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2 |
#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5 |
#endif |
/drivers/include/linux/uapi/drm/drm.h |
---|
0,0 → 1,866 |
/** |
* \file drm.h |
* Header for the Direct Rendering Manager |
* |
* \author Rickard E. (Rik) Faith <faith@valinux.com> |
* |
* \par Acknowledgments: |
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg. |
*/ |
/* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef _DRM_H_ |
#define _DRM_H_ |
#if defined(__KERNEL__) || defined(__linux__) |
#include <linux/types.h> |
//#include <asm/ioctl.h> |
typedef unsigned int drm_handle_t; |
#else /* One of the BSDs */ |
#include <sys/ioccom.h> |
#include <sys/types.h> |
typedef int8_t __s8; |
typedef uint8_t __u8; |
typedef int16_t __s16; |
typedef uint16_t __u16; |
typedef int32_t __s32; |
typedef uint32_t __u32; |
typedef int64_t __s64; |
typedef uint64_t __u64; |
typedef unsigned long drm_handle_t; |
#endif |
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ |
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ |
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ |
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ |
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ |
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ |
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) |
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) |
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) |
typedef unsigned int drm_context_t; |
typedef unsigned int drm_drawable_t; |
typedef unsigned int drm_magic_t; |
/** |
* Cliprect. |
* |
* \warning: If you change this structure, make sure you change |
* XF86DRIClipRectRec in the server as well |
* |
* \note KW: Actually it's illegal to change either for |
* backwards-compatibility reasons. |
*/ |
struct drm_clip_rect { |
unsigned short x1; |
unsigned short y1; |
unsigned short x2; |
unsigned short y2; |
}; |
/** |
* Drawable information. |
*/ |
struct drm_drawable_info { |
unsigned int num_rects; |
struct drm_clip_rect *rects; |
}; |
/** |
* Texture region, |
*/ |
struct drm_tex_region { |
unsigned char next; |
unsigned char prev; |
unsigned char in_use; |
unsigned char padding; |
unsigned int age; |
}; |
/** |
* Hardware lock. |
* |
* The lock structure is a simple cache-line aligned integer. To avoid |
* processor bus contention on a multiprocessor system, there should not be any |
* other data stored in the same cache line. |
*/ |
struct drm_hw_lock { |
__volatile__ unsigned int lock; /**< lock variable */ |
char padding[60]; /**< Pad to cache line */ |
}; |
/** |
* DRM_IOCTL_VERSION ioctl argument type. |
* |
* \sa drmGetVersion(). |
*/ |
struct drm_version { |
int version_major; /**< Major version */ |
int version_minor; /**< Minor version */ |
int version_patchlevel; /**< Patch level */ |
size_t name_len; /**< Length of name buffer */ |
char __user *name; /**< Name of driver */ |
size_t date_len; /**< Length of date buffer */ |
char __user *date; /**< User-space buffer to hold date */ |
size_t desc_len; /**< Length of desc buffer */ |
char __user *desc; /**< User-space buffer to hold desc */ |
}; |
/** |
* DRM_IOCTL_GET_UNIQUE ioctl argument type. |
* |
* \sa drmGetBusid() and drmSetBusId(). |
*/ |
struct drm_unique { |
size_t unique_len; /**< Length of unique */ |
char __user *unique; /**< Unique name for driver instantiation */ |
}; |
struct drm_list { |
int count; /**< Length of user-space structures */ |
struct drm_version __user *version; |
}; |
struct drm_block { |
int unused; |
}; |
/** |
* DRM_IOCTL_CONTROL ioctl argument type. |
* |
* \sa drmCtlInstHandler() and drmCtlUninstHandler(). |
*/ |
struct drm_control { |
enum { |
DRM_ADD_COMMAND, |
DRM_RM_COMMAND, |
DRM_INST_HANDLER, |
DRM_UNINST_HANDLER |
} func; |
int irq; |
}; |
/** |
* Type of memory to map. |
*/ |
enum drm_map_type { |
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ |
_DRM_REGISTERS = 1, /**< no caching, no core dump */ |
_DRM_SHM = 2, /**< shared, cached */ |
_DRM_AGP = 3, /**< AGP/GART */ |
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ |
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ |
}; |
/** |
* Memory mapping flags. |
*/ |
enum drm_map_flags { |
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ |
_DRM_READ_ONLY = 0x02, |
_DRM_LOCKED = 0x04, /**< shared, cached, locked */ |
_DRM_KERNEL = 0x08, /**< kernel requires access */ |
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ |
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ |
_DRM_REMOVABLE = 0x40, /**< Removable mapping */ |
_DRM_DRIVER = 0x80 /**< Managed by driver */ |
}; |
struct drm_ctx_priv_map { |
unsigned int ctx_id; /**< Context requesting private mapping */ |
void *handle; /**< Handle of map */ |
}; |
/** |
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls |
* argument type. |
* |
* \sa drmAddMap(). |
*/ |
struct drm_map { |
unsigned long offset; /**< Requested physical address (0 for SAREA)*/ |
unsigned long size; /**< Requested physical size (bytes) */ |
enum drm_map_type type; /**< Type of memory to map */ |
enum drm_map_flags flags; /**< Flags */ |
void *handle; /**< User-space: "Handle" to pass to mmap() */ |
/**< Kernel-space: kernel-virtual address */ |
int mtrr; /**< MTRR slot used */ |
/* Private data */ |
}; |
/** |
* DRM_IOCTL_GET_CLIENT ioctl argument type. |
*/ |
struct drm_client { |
int idx; /**< Which client desired? */ |
int auth; /**< Is client authenticated? */ |
unsigned long pid; /**< Process ID */ |
unsigned long uid; /**< User ID */ |
unsigned long magic; /**< Magic */ |
unsigned long iocs; /**< Ioctl count */ |
}; |
enum drm_stat_type { |
_DRM_STAT_LOCK, |
_DRM_STAT_OPENS, |
_DRM_STAT_CLOSES, |
_DRM_STAT_IOCTLS, |
_DRM_STAT_LOCKS, |
_DRM_STAT_UNLOCKS, |
_DRM_STAT_VALUE, /**< Generic value */ |
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ |
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ |
_DRM_STAT_IRQ, /**< IRQ */ |
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */ |
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ |
_DRM_STAT_DMA, /**< DMA */ |
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ |
_DRM_STAT_MISSED /**< Missed DMA opportunity */ |
/* Add to the *END* of the list */ |
}; |
/** |
* DRM_IOCTL_GET_STATS ioctl argument type. |
*/ |
struct drm_stats { |
unsigned long count; |
struct { |
unsigned long value; |
enum drm_stat_type type; |
} data[15]; |
}; |
/** |
* Hardware locking flags. |
*/ |
enum drm_lock_flags { |
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ |
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ |
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ |
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ |
/* These *HALT* flags aren't supported yet |
-- they will be used to support the |
full-screen DGA-like mode. */ |
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ |
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ |
}; |
/** |
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. |
* |
* \sa drmGetLock() and drmUnlock(). |
*/ |
struct drm_lock { |
int context; |
enum drm_lock_flags flags; |
}; |
/** |
* DMA flags |
* |
* \warning |
* These values \e must match xf86drm.h. |
* |
* \sa drm_dma. |
*/ |
enum drm_dma_flags { |
/* Flags for DMA buffer dispatch */ |
_DRM_DMA_BLOCK = 0x01, /**< |
* Block until buffer dispatched. |
* |
* \note The buffer may not yet have |
* been processed by the hardware -- |
* getting a hardware lock with the |
* hardware quiescent will ensure |
* that the buffer has been |
* processed. |
*/ |
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ |
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ |
/* Flags for DMA buffer request */ |
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ |
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ |
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ |
}; |
/** |
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. |
* |
* \sa drmAddBufs(). |
*/ |
struct drm_buf_desc { |
int count; /**< Number of buffers of this size */ |
int size; /**< Size in bytes */ |
int low_mark; /**< Low water mark */ |
int high_mark; /**< High water mark */ |
enum { |
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ |
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ |
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ |
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ |
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ |
} flags; |
unsigned long agp_start; /**< |
* Start address of where the AGP buffers are |
* in the AGP aperture |
*/ |
}; |
/** |
* DRM_IOCTL_INFO_BUFS ioctl argument type. |
*/ |
struct drm_buf_info { |
int count; /**< Entries in list */ |
struct drm_buf_desc __user *list; |
}; |
/** |
* DRM_IOCTL_FREE_BUFS ioctl argument type. |
*/ |
struct drm_buf_free { |
int count; |
int __user *list; |
}; |
/** |
* Buffer information |
* |
* \sa drm_buf_map. |
*/ |
struct drm_buf_pub { |
int idx; /**< Index into the master buffer list */ |
int total; /**< Buffer size */ |
int used; /**< Amount of buffer in use (for DMA) */ |
void __user *address; /**< Address of buffer */ |
}; |
/** |
* DRM_IOCTL_MAP_BUFS ioctl argument type. |
*/ |
struct drm_buf_map { |
int count; /**< Length of the buffer list */ |
void __user *virtual; /**< Mmap'd area in user-virtual */ |
struct drm_buf_pub __user *list; /**< Buffer information */ |
}; |
/** |
* DRM_IOCTL_DMA ioctl argument type. |
* |
* Indices here refer to the offset into the buffer list in drm_buf_get. |
* |
* \sa drmDMA(). |
*/ |
struct drm_dma { |
int context; /**< Context handle */ |
int send_count; /**< Number of buffers to send */ |
int __user *send_indices; /**< List of handles to buffers */ |
int __user *send_sizes; /**< Lengths of data to send */ |
enum drm_dma_flags flags; /**< Flags */ |
int request_count; /**< Number of buffers requested */ |
int request_size; /**< Desired size for buffers */ |
int __user *request_indices; /**< Buffer information */ |
int __user *request_sizes; |
int granted_count; /**< Number of buffers granted */ |
}; |
enum drm_ctx_flags { |
_DRM_CONTEXT_PRESERVED = 0x01, |
_DRM_CONTEXT_2DONLY = 0x02 |
}; |
/** |
* DRM_IOCTL_ADD_CTX ioctl argument type. |
* |
* \sa drmCreateContext() and drmDestroyContext(). |
*/ |
struct drm_ctx { |
drm_context_t handle; |
enum drm_ctx_flags flags; |
}; |
/** |
* DRM_IOCTL_RES_CTX ioctl argument type. |
*/ |
struct drm_ctx_res { |
int count; |
struct drm_ctx __user *contexts; |
}; |
/** |
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. |
*/ |
struct drm_draw { |
drm_drawable_t handle; |
}; |
/** |
* DRM_IOCTL_UPDATE_DRAW ioctl argument type. |
*/ |
typedef enum { |
DRM_DRAWABLE_CLIPRECTS, |
} drm_drawable_info_type_t; |
struct drm_update_draw { |
drm_drawable_t handle; |
unsigned int type; |
unsigned int num; |
unsigned long long data; |
}; |
/** |
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. |
*/ |
struct drm_auth { |
drm_magic_t magic; |
}; |
/** |
* DRM_IOCTL_IRQ_BUSID ioctl argument type. |
* |
* \sa drmGetInterruptFromBusID(). |
*/ |
struct drm_irq_busid { |
int irq; /**< IRQ number */ |
int busnum; /**< bus number */ |
int devnum; /**< device number */ |
int funcnum; /**< function number */ |
}; |
enum drm_vblank_seq_type { |
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ |
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ |
/* bits 1-6 are reserved for high crtcs */ |
_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, |
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ |
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ |
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ |
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ |
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ |
}; |
#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 |
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) |
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ |
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) |
struct drm_wait_vblank_request { |
enum drm_vblank_seq_type type; |
unsigned int sequence; |
unsigned long signal; |
}; |
struct drm_wait_vblank_reply { |
enum drm_vblank_seq_type type; |
unsigned int sequence; |
long tval_sec; |
long tval_usec; |
}; |
/** |
* DRM_IOCTL_WAIT_VBLANK ioctl argument type. |
* |
* \sa drmWaitVBlank(). |
*/ |
union drm_wait_vblank { |
struct drm_wait_vblank_request request; |
struct drm_wait_vblank_reply reply; |
}; |
#define _DRM_PRE_MODESET 1 |
#define _DRM_POST_MODESET 2 |
/** |
* DRM_IOCTL_MODESET_CTL ioctl argument type |
* |
* \sa drmModesetCtl(). |
*/ |
struct drm_modeset_ctl { |
__u32 crtc; |
__u32 cmd; |
}; |
/** |
* DRM_IOCTL_AGP_ENABLE ioctl argument type. |
* |
* \sa drmAgpEnable(). |
*/ |
struct drm_agp_mode { |
unsigned long mode; /**< AGP mode */ |
}; |
/** |
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. |
* |
* \sa drmAgpAlloc() and drmAgpFree(). |
*/ |
struct drm_agp_buffer { |
unsigned long size; /**< In bytes -- will round to page boundary */ |
unsigned long handle; /**< Used for binding / unbinding */ |
unsigned long type; /**< Type of memory to allocate */ |
unsigned long physical; /**< Physical used by i810 */ |
}; |
/** |
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. |
* |
* \sa drmAgpBind() and drmAgpUnbind(). |
*/ |
struct drm_agp_binding { |
unsigned long handle; /**< From drm_agp_buffer */ |
unsigned long offset; /**< In bytes -- will round to page boundary */ |
}; |
/** |
* DRM_IOCTL_AGP_INFO ioctl argument type. |
* |
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), |
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), |
* drmAgpVendorId() and drmAgpDeviceId(). |
*/ |
struct drm_agp_info { |
int agp_version_major; |
int agp_version_minor; |
unsigned long mode; |
unsigned long aperture_base; /* physical address */ |
unsigned long aperture_size; /* bytes */ |
unsigned long memory_allowed; /* bytes */ |
unsigned long memory_used; |
/* PCI information */ |
unsigned short id_vendor; |
unsigned short id_device; |
}; |
/** |
* DRM_IOCTL_SG_ALLOC ioctl argument type. |
*/ |
struct drm_scatter_gather { |
unsigned long size; /**< In bytes -- will round to page boundary */ |
unsigned long handle; /**< Used for mapping / unmapping */ |
}; |
/** |
* DRM_IOCTL_SET_VERSION ioctl argument type. |
*/ |
struct drm_set_version { |
int drm_di_major; |
int drm_di_minor; |
int drm_dd_major; |
int drm_dd_minor; |
}; |
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */ |
struct drm_gem_close { |
/** Handle of the object to be closed. */ |
__u32 handle; |
__u32 pad; |
}; |
/** DRM_IOCTL_GEM_FLINK ioctl argument type */ |
struct drm_gem_flink { |
/** Handle for the object being named */ |
__u32 handle; |
/** Returned global name */ |
__u32 name; |
}; |
/** DRM_IOCTL_GEM_OPEN ioctl argument type */ |
struct drm_gem_open { |
/** Name of object being opened */ |
__u32 name; |
/** Returned handle for the object */ |
__u32 handle; |
/** Returned size of the object */ |
__u64 size; |
}; |
#define DRM_CAP_DUMB_BUFFER 0x1 |
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2 |
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 |
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4 |
#define DRM_CAP_PRIME 0x5 |
#define DRM_PRIME_CAP_IMPORT 0x1 |
#define DRM_PRIME_CAP_EXPORT 0x2 |
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 |
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7 |
/* |
* The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight |
* combination for the hardware cursor. The intention is that a hardware |
* agnostic userspace can query a cursor plane size to use. |
* |
* Note that the cross-driver contract is to merely return a valid size; |
* drivers are free to attach another meaning on top, eg. i915 returns the |
* maximum plane size. |
*/ |
#define DRM_CAP_CURSOR_WIDTH 0x8 |
#define DRM_CAP_CURSOR_HEIGHT 0x9 |
/** DRM_IOCTL_GET_CAP ioctl argument type */ |
struct drm_get_cap { |
__u64 capability; |
__u64 value; |
}; |
/** |
* DRM_CLIENT_CAP_STEREO_3D |
* |
* if set to 1, the DRM core will expose the stereo 3D capabilities of the |
* monitor by advertising the supported 3D layouts in the flags of struct |
* drm_mode_modeinfo. |
*/ |
#define DRM_CLIENT_CAP_STEREO_3D 1 |
/** |
* DRM_CLIENT_CAP_UNIVERSAL_PLANES |
* |
* If set to 1, the DRM core will expose all planes (overlay, primary, and |
* cursor) to userspace. |
*/ |
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 |
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ |
struct drm_set_client_cap { |
__u64 capability; |
__u64 value; |
}; |
#define DRM_CLOEXEC O_CLOEXEC |
struct drm_prime_handle { |
__u32 handle; |
/** Flags.. only applicable for handle->fd */ |
__u32 flags; |
/** Returned dmabuf file descriptor */ |
__s32 fd; |
}; |
#include <drm/drm_mode.h> |
#define DRM_IOCTL_BASE 'd' |
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) |
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) |
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) |
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) |
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) |
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) |
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) |
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) |
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) |
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) |
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) |
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) |
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) |
#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) |
#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap) |
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) |
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) |
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) |
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) |
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) |
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) |
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) |
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) |
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) |
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) |
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) |
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) |
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) |
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) |
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) |
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) |
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) |
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) |
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) |
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) |
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) |
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) |
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) |
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) |
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) |
#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) |
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) |
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) |
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) |
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle) |
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle) |
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) |
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) |
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) |
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) |
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) |
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) |
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) |
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) |
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) |
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) |
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) |
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) |
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) |
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) |
#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc) |
#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor) |
#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut) |
#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) |
#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) |
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) |
#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */ |
#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */ |
#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) |
#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) |
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) |
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) |
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) |
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) |
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) |
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) |
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) |
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) |
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) |
#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res) |
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane) |
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane) |
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2) |
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) |
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) |
#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) |
/** |
* Device specific ioctls should only be in their respective headers |
* The device specific ioctl range is from 0x40 to 0x9f. |
* Generic IOCTLS restart at 0xA0. |
* |
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and |
* drmCommandReadWrite(). |
*/ |
#define DRM_COMMAND_BASE 0x40 |
#define DRM_COMMAND_END 0xA0 |
/** |
* Header for events written back to userspace on the drm fd. The |
* type defines the type of event, the length specifies the total |
* length of the event (including the header), and user_data is |
* typically a 64 bit value passed with the ioctl that triggered the |
* event. A read on the drm fd will always only return complete |
* events, that is, if for example the read buffer is 100 bytes, and |
* there are two 64 byte events pending, only one will be returned. |
* |
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and |
* up are chipset specific. |
*/ |
struct drm_event { |
__u32 type; |
__u32 length; |
}; |
#define DRM_EVENT_VBLANK 0x01 |
#define DRM_EVENT_FLIP_COMPLETE 0x02 |
struct drm_event_vblank { |
struct drm_event base; |
__u64 user_data; |
__u32 tv_sec; |
__u32 tv_usec; |
__u32 sequence; |
__u32 reserved; |
}; |
/* typedef area */ |
#ifndef __KERNEL__ |
typedef struct drm_clip_rect drm_clip_rect_t; |
typedef struct drm_drawable_info drm_drawable_info_t; |
typedef struct drm_tex_region drm_tex_region_t; |
typedef struct drm_hw_lock drm_hw_lock_t; |
typedef struct drm_version drm_version_t; |
typedef struct drm_unique drm_unique_t; |
typedef struct drm_list drm_list_t; |
typedef struct drm_block drm_block_t; |
typedef struct drm_control drm_control_t; |
typedef enum drm_map_type drm_map_type_t; |
typedef enum drm_map_flags drm_map_flags_t; |
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; |
typedef struct drm_map drm_map_t; |
typedef struct drm_client drm_client_t; |
typedef enum drm_stat_type drm_stat_type_t; |
typedef struct drm_stats drm_stats_t; |
typedef enum drm_lock_flags drm_lock_flags_t; |
typedef struct drm_lock drm_lock_t; |
typedef enum drm_dma_flags drm_dma_flags_t; |
typedef struct drm_buf_desc drm_buf_desc_t; |
typedef struct drm_buf_info drm_buf_info_t; |
typedef struct drm_buf_free drm_buf_free_t; |
typedef struct drm_buf_pub drm_buf_pub_t; |
typedef struct drm_buf_map drm_buf_map_t; |
typedef struct drm_dma drm_dma_t; |
typedef union drm_wait_vblank drm_wait_vblank_t; |
typedef struct drm_agp_mode drm_agp_mode_t; |
typedef enum drm_ctx_flags drm_ctx_flags_t; |
typedef struct drm_ctx drm_ctx_t; |
typedef struct drm_ctx_res drm_ctx_res_t; |
typedef struct drm_draw drm_draw_t; |
typedef struct drm_update_draw drm_update_draw_t; |
typedef struct drm_auth drm_auth_t; |
typedef struct drm_irq_busid drm_irq_busid_t; |
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; |
typedef struct drm_agp_buffer drm_agp_buffer_t; |
typedef struct drm_agp_binding drm_agp_binding_t; |
typedef struct drm_agp_info drm_agp_info_t; |
typedef struct drm_scatter_gather drm_scatter_gather_t; |
typedef struct drm_set_version drm_set_version_t; |
#endif |
#endif |
/drivers/include/linux/uapi/drm/drm_fourcc.h |
---|
0,0 → 1,135 |
/* |
* Copyright 2011 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef DRM_FOURCC_H |
#define DRM_FOURCC_H |
#include <linux/types.h> |
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ |
((__u32)(c) << 16) | ((__u32)(d) << 24)) |
#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */ |
/* color index */ |
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */ |
/* 8 bpp RGB */ |
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ |
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ |
/* 16 bpp RGB */ |
#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */ |
#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */ |
#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */ |
#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */ |
#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */ |
#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */ |
#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */ |
#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */ |
#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */ |
#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */ |
#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */ |
#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */ |
#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */ |
#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */ |
#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */ |
#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */ |
#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */ |
#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */ |
/* 24 bpp RGB */ |
#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */ |
#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */ |
/* 32 bpp RGB */ |
#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */ |
#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */ |
#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */ |
#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */ |
#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */ |
#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */ |
#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */ |
#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */ |
#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */ |
#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */ |
#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */ |
#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */ |
#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */ |
#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */ |
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */ |
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */ |
/* packed YCbCr */ |
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */ |
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */ |
#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */ |
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */ |
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */ |
/* |
* 2 plane YCbCr |
* index 0 = Y plane, [7:0] Y |
* index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian |
* or |
* index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian |
*/ |
#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */ |
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */ |
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */ |
/* special NV12 tiled format */ |
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */ |
/* |
* 3 plane YCbCr |
* index 0: Y plane, [7:0] Y |
* index 1: Cb plane, [7:0] Cb |
* index 2: Cr plane, [7:0] Cr |
* or |
* index 1: Cr plane, [7:0] Cr |
* index 2: Cb plane, [7:0] Cb |
*/ |
#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */ |
#endif /* DRM_FOURCC_H */ |
/drivers/include/linux/uapi/drm/drm_mode.h |
---|
0,0 → 1,520 |
/* |
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie> |
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com> |
* Copyright (c) 2008 Red Hat Inc. |
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA |
* Copyright (c) 2007-2008 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
*/ |
#ifndef _DRM_MODE_H |
#define _DRM_MODE_H |
#include <linux/types.h> |
#define DRM_DISPLAY_INFO_LEN 32 |
#define DRM_CONNECTOR_NAME_LEN 32 |
#define DRM_DISPLAY_MODE_LEN 32 |
#define DRM_PROP_NAME_LEN 32 |
#define DRM_MODE_TYPE_BUILTIN (1<<0) |
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) |
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) |
#define DRM_MODE_TYPE_PREFERRED (1<<3) |
#define DRM_MODE_TYPE_DEFAULT (1<<4) |
#define DRM_MODE_TYPE_USERDEF (1<<5) |
#define DRM_MODE_TYPE_DRIVER (1<<6) |
/* Video mode flags */ |
/* bit compatible with the xorg definitions. */ |
#define DRM_MODE_FLAG_PHSYNC (1<<0) |
#define DRM_MODE_FLAG_NHSYNC (1<<1) |
#define DRM_MODE_FLAG_PVSYNC (1<<2) |
#define DRM_MODE_FLAG_NVSYNC (1<<3) |
#define DRM_MODE_FLAG_INTERLACE (1<<4) |
#define DRM_MODE_FLAG_DBLSCAN (1<<5) |
#define DRM_MODE_FLAG_CSYNC (1<<6) |
#define DRM_MODE_FLAG_PCSYNC (1<<7) |
#define DRM_MODE_FLAG_NCSYNC (1<<8) |
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ |
#define DRM_MODE_FLAG_BCAST (1<<10) |
#define DRM_MODE_FLAG_PIXMUX (1<<11) |
#define DRM_MODE_FLAG_DBLCLK (1<<12) |
#define DRM_MODE_FLAG_CLKDIV2 (1<<13) |
/* |
* When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX |
* (define not exposed to user space). |
*/ |
#define DRM_MODE_FLAG_3D_MASK (0x1f<<14) |
#define DRM_MODE_FLAG_3D_NONE (0<<14) |
#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14) |
#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14) |
#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14) |
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14) |
#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14) |
#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14) |
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14) |
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14) |
/* DPMS flags */ |
/* bit compatible with the xorg definitions. */ |
#define DRM_MODE_DPMS_ON 0 |
#define DRM_MODE_DPMS_STANDBY 1 |
#define DRM_MODE_DPMS_SUSPEND 2 |
#define DRM_MODE_DPMS_OFF 3 |
/* Scaling mode options */ |
#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or |
software can still scale) */ |
#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */ |
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ |
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ |
/* Picture aspect ratio options */ |
#define DRM_MODE_PICTURE_ASPECT_NONE 0 |
#define DRM_MODE_PICTURE_ASPECT_4_3 1 |
#define DRM_MODE_PICTURE_ASPECT_16_9 2 |
/* Dithering mode options */ |
#define DRM_MODE_DITHERING_OFF 0 |
#define DRM_MODE_DITHERING_ON 1 |
#define DRM_MODE_DITHERING_AUTO 2 |
/* Dirty info options */ |
#define DRM_MODE_DIRTY_OFF 0 |
#define DRM_MODE_DIRTY_ON 1 |
#define DRM_MODE_DIRTY_ANNOTATE 2 |
struct drm_mode_modeinfo { |
__u32 clock; |
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew; |
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan; |
__u32 vrefresh; |
__u32 flags; |
__u32 type; |
char name[DRM_DISPLAY_MODE_LEN]; |
}; |
struct drm_mode_card_res { |
__u64 fb_id_ptr; |
__u64 crtc_id_ptr; |
__u64 connector_id_ptr; |
__u64 encoder_id_ptr; |
__u32 count_fbs; |
__u32 count_crtcs; |
__u32 count_connectors; |
__u32 count_encoders; |
__u32 min_width, max_width; |
__u32 min_height, max_height; |
}; |
struct drm_mode_crtc { |
__u64 set_connectors_ptr; |
__u32 count_connectors; |
__u32 crtc_id; /**< Id */ |
__u32 fb_id; /**< Id of framebuffer */ |
__u32 x, y; /**< Position on the frameuffer */ |
__u32 gamma_size; |
__u32 mode_valid; |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_PRESENT_TOP_FIELD (1<<0) |
#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1) |
/* Planes blend with or override other bits on the CRTC */ |
struct drm_mode_set_plane { |
__u32 plane_id; |
__u32 crtc_id; |
__u32 fb_id; /* fb object contains surface format type */ |
__u32 flags; /* see above flags */ |
/* Signed dest location allows it to be partially off screen */ |
__s32 crtc_x, crtc_y; |
__u32 crtc_w, crtc_h; |
/* Source values are 16.16 fixed point */ |
__u32 src_x, src_y; |
__u32 src_h, src_w; |
}; |
struct drm_mode_get_plane { |
__u32 plane_id; |
__u32 crtc_id; |
__u32 fb_id; |
__u32 possible_crtcs; |
__u32 gamma_size; |
__u32 count_format_types; |
__u64 format_type_ptr; |
}; |
struct drm_mode_get_plane_res { |
__u64 plane_id_ptr; |
__u32 count_planes; |
}; |
#define DRM_MODE_ENCODER_NONE 0 |
#define DRM_MODE_ENCODER_DAC 1 |
#define DRM_MODE_ENCODER_TMDS 2 |
#define DRM_MODE_ENCODER_LVDS 3 |
#define DRM_MODE_ENCODER_TVDAC 4 |
#define DRM_MODE_ENCODER_VIRTUAL 5 |
#define DRM_MODE_ENCODER_DSI 6 |
#define DRM_MODE_ENCODER_DPMST 7 |
struct drm_mode_get_encoder { |
__u32 encoder_id; |
__u32 encoder_type; |
__u32 crtc_id; /**< Id of crtc */ |
__u32 possible_crtcs; |
__u32 possible_clones; |
}; |
/* This is for connectors with multiple signal types. */ |
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ |
#define DRM_MODE_SUBCONNECTOR_Automatic 0 |
#define DRM_MODE_SUBCONNECTOR_Unknown 0 |
#define DRM_MODE_SUBCONNECTOR_DVID 3 |
#define DRM_MODE_SUBCONNECTOR_DVIA 4 |
#define DRM_MODE_SUBCONNECTOR_Composite 5 |
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6 |
#define DRM_MODE_SUBCONNECTOR_Component 8 |
#define DRM_MODE_SUBCONNECTOR_SCART 9 |
#define DRM_MODE_CONNECTOR_Unknown 0 |
#define DRM_MODE_CONNECTOR_VGA 1 |
#define DRM_MODE_CONNECTOR_DVII 2 |
#define DRM_MODE_CONNECTOR_DVID 3 |
#define DRM_MODE_CONNECTOR_DVIA 4 |
#define DRM_MODE_CONNECTOR_Composite 5 |
#define DRM_MODE_CONNECTOR_SVIDEO 6 |
#define DRM_MODE_CONNECTOR_LVDS 7 |
#define DRM_MODE_CONNECTOR_Component 8 |
#define DRM_MODE_CONNECTOR_9PinDIN 9 |
#define DRM_MODE_CONNECTOR_DisplayPort 10 |
#define DRM_MODE_CONNECTOR_HDMIA 11 |
#define DRM_MODE_CONNECTOR_HDMIB 12 |
#define DRM_MODE_CONNECTOR_TV 13 |
#define DRM_MODE_CONNECTOR_eDP 14 |
#define DRM_MODE_CONNECTOR_VIRTUAL 15 |
#define DRM_MODE_CONNECTOR_DSI 16 |
struct drm_mode_get_connector { |
__u64 encoders_ptr; |
__u64 modes_ptr; |
__u64 props_ptr; |
__u64 prop_values_ptr; |
__u32 count_modes; |
__u32 count_props; |
__u32 count_encoders; |
__u32 encoder_id; /**< Current Encoder */ |
__u32 connector_id; /**< Id */ |
__u32 connector_type; |
__u32 connector_type_id; |
__u32 connection; |
__u32 mm_width, mm_height; /**< HxW in millimeters */ |
__u32 subpixel; |
__u32 pad; |
}; |
#define DRM_MODE_PROP_PENDING (1<<0) |
#define DRM_MODE_PROP_RANGE (1<<1) |
#define DRM_MODE_PROP_IMMUTABLE (1<<2) |
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ |
#define DRM_MODE_PROP_BLOB (1<<4) |
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */ |
/* non-extended types: legacy bitmask, one bit per type: */ |
#define DRM_MODE_PROP_LEGACY_TYPE ( \ |
DRM_MODE_PROP_RANGE | \ |
DRM_MODE_PROP_ENUM | \ |
DRM_MODE_PROP_BLOB | \ |
DRM_MODE_PROP_BITMASK) |
/* extended-types: rather than continue to consume a bit per type, |
* grab a chunk of the bits to use as integer type id. |
*/ |
#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0 |
#define DRM_MODE_PROP_TYPE(n) ((n) << 6) |
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1) |
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2) |
struct drm_mode_property_enum { |
__u64 value; |
char name[DRM_PROP_NAME_LEN]; |
}; |
struct drm_mode_get_property { |
__u64 values_ptr; /* values and blob lengths */ |
__u64 enum_blob_ptr; /* enum and blob id ptrs */ |
__u32 prop_id; |
__u32 flags; |
char name[DRM_PROP_NAME_LEN]; |
__u32 count_values; |
__u32 count_enum_blobs; |
}; |
struct drm_mode_connector_set_property { |
__u64 value; |
__u32 prop_id; |
__u32 connector_id; |
}; |
struct drm_mode_obj_get_properties { |
__u64 props_ptr; |
__u64 prop_values_ptr; |
__u32 count_props; |
__u32 obj_id; |
__u32 obj_type; |
}; |
struct drm_mode_obj_set_property { |
__u64 value; |
__u32 prop_id; |
__u32 obj_id; |
__u32 obj_type; |
}; |
struct drm_mode_get_blob { |
__u32 blob_id; |
__u32 length; |
__u64 data; |
}; |
struct drm_mode_fb_cmd { |
__u32 fb_id; |
__u32 width, height; |
__u32 pitch; |
__u32 bpp; |
__u32 depth; |
/* driver specific handle */ |
__u32 handle; |
}; |
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ |
struct drm_mode_fb_cmd2 { |
__u32 fb_id; |
__u32 width, height; |
__u32 pixel_format; /* fourcc code from drm_fourcc.h */ |
__u32 flags; /* see above flags */ |
/* |
* In case of planar formats, this ioctl allows up to 4 |
* buffer objects with offets and pitches per plane. |
* The pitch and offset order is dictated by the fourcc, |
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as: |
* |
* YUV 4:2:0 image with a plane of 8 bit Y samples |
* followed by an interleaved U/V plane containing |
* 8 bit 2x2 subsampled colour difference samples. |
* |
* So it would consist of Y as offset[0] and UV as |
* offeset[1]. Note that offset[0] will generally |
* be 0. |
*/ |
__u32 handles[4]; |
__u32 pitches[4]; /* pitch for each plane */ |
__u32 offsets[4]; /* offset of each plane */ |
}; |
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 |
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 |
#define DRM_MODE_FB_DIRTY_FLAGS 0x03 |
#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256 |
/* |
* Mark a region of a framebuffer as dirty. |
* |
* Some hardware does not automatically update display contents |
* as a hardware or software draw to a framebuffer. This ioctl |
* allows userspace to tell the kernel and the hardware what |
* regions of the framebuffer have changed. |
* |
* The kernel or hardware is free to update more then just the |
* region specified by the clip rects. The kernel or hardware |
* may also delay and/or coalesce several calls to dirty into a |
* single update. |
* |
* Userspace may annotate the updates, the annotates are a |
* promise made by the caller that the change is either a copy |
* of pixels or a fill of a single color in the region specified. |
* |
* If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then |
* the number of updated regions are half of num_clips given, |
* where the clip rects are paired in src and dst. The width and |
* height of each one of the pairs must match. |
* |
* If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller |
* promises that the region specified of the clip rects is filled |
* completely with a single color as given in the color argument. |
*/ |
struct drm_mode_fb_dirty_cmd { |
__u32 fb_id; |
__u32 flags; |
__u32 color; |
__u32 num_clips; |
__u64 clips_ptr; |
}; |
struct drm_mode_mode_cmd { |
__u32 connector_id; |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_CURSOR_BO 0x01 |
#define DRM_MODE_CURSOR_MOVE 0x02 |
#define DRM_MODE_CURSOR_FLAGS 0x03 |
/* |
* depending on the value in flags different members are used. |
* |
* CURSOR_BO uses |
* crtc_id |
* width |
* height |
* handle - if 0 turns the cursor off |
* |
* CURSOR_MOVE uses |
* crtc_id |
* x |
* y |
*/ |
struct drm_mode_cursor { |
__u32 flags; |
__u32 crtc_id; |
__s32 x; |
__s32 y; |
__u32 width; |
__u32 height; |
/* driver specific handle */ |
__u32 handle; |
}; |
struct drm_mode_cursor2 { |
__u32 flags; |
__u32 crtc_id; |
__s32 x; |
__s32 y; |
__u32 width; |
__u32 height; |
/* driver specific handle */ |
__u32 handle; |
__s32 hot_x; |
__s32 hot_y; |
}; |
struct drm_mode_crtc_lut { |
__u32 crtc_id; |
__u32 gamma_size; |
/* pointers to arrays */ |
__u64 red; |
__u64 green; |
__u64 blue; |
}; |
#define DRM_MODE_PAGE_FLIP_EVENT 0x01 |
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02 |
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC) |
/* |
* Request a page flip on the specified crtc. |
* |
* This ioctl will ask KMS to schedule a page flip for the specified |
* crtc. Once any pending rendering targeting the specified fb (as of |
* ioctl time) has completed, the crtc will be reprogrammed to display |
* that fb after the next vertical refresh. The ioctl returns |
* immediately, but subsequent rendering to the current fb will block |
* in the execbuffer ioctl until the page flip happens. If a page |
* flip is already pending as the ioctl is called, EBUSY will be |
* returned. |
* |
* Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank |
* event (see drm.h: struct drm_event_vblank) when the page flip is |
* done. The user_data field passed in with this ioctl will be |
* returned as the user_data field in the vblank event struct. |
* |
* Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen |
* 'as soon as possible', meaning that it not delay waiting for vblank. |
* This may cause tearing on the screen. |
* |
* The reserved field must be zero until we figure out something |
* clever to use it for. |
*/ |
struct drm_mode_crtc_page_flip { |
__u32 crtc_id; |
__u32 fb_id; |
__u32 flags; |
__u32 reserved; |
__u64 user_data; |
}; |
/* create a dumb scanout buffer */ |
struct drm_mode_create_dumb { |
uint32_t height; |
uint32_t width; |
uint32_t bpp; |
uint32_t flags; |
/* handle, pitch, size will be returned */ |
uint32_t handle; |
uint32_t pitch; |
uint64_t size; |
}; |
/* set up for mmap of a dumb scanout buffer */ |
struct drm_mode_map_dumb { |
/** Handle for the object being mapped. */ |
__u32 handle; |
__u32 pad; |
/** |
* Fake offset to use for subsequent mmap call |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 offset; |
}; |
struct drm_mode_destroy_dumb { |
uint32_t handle; |
}; |
#endif |
/drivers/include/linux/uapi/drm/i915_drm.h |
---|
0,0 → 1,1099 |
/* |
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
*/ |
#ifndef _UAPI_I915_DRM_H_ |
#define _UAPI_I915_DRM_H_ |
#include <drm/drm.h> |
/* Please note that modifications to all structs defined here are |
* subject to backwards-compatibility constraints. |
*/ |
/** |
* DOC: uevents generated by i915 on it's device node |
* |
* I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch |
* event from the gpu l3 cache. Additional information supplied is ROW, |
* BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep |
* track of these events and if a specific cache-line seems to have a |
* persistent error remap it with the l3 remapping tool supplied in |
* intel-gpu-tools. The value supplied with the event is always 1. |
* |
* I915_ERROR_UEVENT - Generated upon error detection, currently only via |
* hangcheck. The error detection event is a good indicator of when things |
* began to go badly. The value supplied with the event is a 1 upon error |
* detection, and a 0 upon reset completion, signifying no more error |
* exists. NOTE: Disabling hangcheck or reset via module parameter will |
* cause the related events to not be seen. |
* |
* I915_RESET_UEVENT - Event is generated just before an attempt to reset the |
* the GPU. The value supplied with the event is always 1. NOTE: Disable |
* reset via module parameter will cause this event to not be seen. |
*/ |
#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" |
#define I915_ERROR_UEVENT "ERROR" |
#define I915_RESET_UEVENT "RESET" |
/* Each region is a minimum of 16k, and there are at most 255 of them. |
*/ |
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use |
* of chars for next/prev indices */ |
#define I915_LOG_MIN_TEX_REGION_SIZE 14 |
typedef struct _drm_i915_init { |
enum { |
I915_INIT_DMA = 0x01, |
I915_CLEANUP_DMA = 0x02, |
I915_RESUME_DMA = 0x03 |
} func; |
unsigned int mmio_offset; |
int sarea_priv_offset; |
unsigned int ring_start; |
unsigned int ring_end; |
unsigned int ring_size; |
unsigned int front_offset; |
unsigned int back_offset; |
unsigned int depth_offset; |
unsigned int w; |
unsigned int h; |
unsigned int pitch; |
unsigned int pitch_bits; |
unsigned int back_pitch; |
unsigned int depth_pitch; |
unsigned int cpp; |
unsigned int chipset; |
} drm_i915_init_t; |
typedef struct _drm_i915_sarea { |
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; |
int last_upload; /* last time texture was uploaded */ |
int last_enqueue; /* last time a buffer was enqueued */ |
int last_dispatch; /* age of the most recently dispatched buffer */ |
int ctxOwner; /* last context to upload state */ |
int texAge; |
int pf_enabled; /* is pageflipping allowed? */ |
int pf_active; |
int pf_current_page; /* which buffer is being displayed? */ |
int perf_boxes; /* performance boxes to be displayed */ |
int width, height; /* screen size in pixels */ |
drm_handle_t front_handle; |
int front_offset; |
int front_size; |
drm_handle_t back_handle; |
int back_offset; |
int back_size; |
drm_handle_t depth_handle; |
int depth_offset; |
int depth_size; |
drm_handle_t tex_handle; |
int tex_offset; |
int tex_size; |
int log_tex_granularity; |
int pitch; |
int rotation; /* 0, 90, 180 or 270 */ |
int rotated_offset; |
int rotated_size; |
int rotated_pitch; |
int virtualX, virtualY; |
unsigned int front_tiled; |
unsigned int back_tiled; |
unsigned int depth_tiled; |
unsigned int rotated_tiled; |
unsigned int rotated2_tiled; |
int pipeA_x; |
int pipeA_y; |
int pipeA_w; |
int pipeA_h; |
int pipeB_x; |
int pipeB_y; |
int pipeB_w; |
int pipeB_h; |
/* fill out some space for old userspace triple buffer */ |
drm_handle_t unused_handle; |
__u32 unused1, unused2, unused3; |
/* buffer object handles for static buffers. May change |
* over the lifetime of the client. |
*/ |
__u32 front_bo_handle; |
__u32 back_bo_handle; |
__u32 unused_bo_handle; |
__u32 depth_bo_handle; |
} drm_i915_sarea_t; |
/* due to userspace building against these headers we need some compat here */ |
#define planeA_x pipeA_x |
#define planeA_y pipeA_y |
#define planeA_w pipeA_w |
#define planeA_h pipeA_h |
#define planeB_x pipeB_x |
#define planeB_y pipeB_y |
#define planeB_w pipeB_w |
#define planeB_h pipeB_h |
/* Flags for perf_boxes |
*/ |
#define I915_BOX_RING_EMPTY 0x1 |
#define I915_BOX_FLIP 0x2 |
#define I915_BOX_WAIT 0x4 |
#define I915_BOX_TEXTURE_LOAD 0x8 |
#define I915_BOX_LOST_CONTEXT 0x10 |
/* I915 specific ioctls |
* The device specific ioctl range is 0x40 to 0x79. |
*/ |
#define DRM_I915_INIT 0x00 |
#define DRM_I915_FLUSH 0x01 |
#define DRM_I915_FLIP 0x02 |
#define DRM_I915_BATCHBUFFER 0x03 |
#define DRM_I915_IRQ_EMIT 0x04 |
#define DRM_I915_IRQ_WAIT 0x05 |
#define DRM_I915_GETPARAM 0x06 |
#define DRM_I915_SETPARAM 0x07 |
#define DRM_I915_ALLOC 0x08 |
#define DRM_I915_FREE 0x09 |
#define DRM_I915_INIT_HEAP 0x0a |
#define DRM_I915_CMDBUFFER 0x0b |
#define DRM_I915_DESTROY_HEAP 0x0c |
#define DRM_I915_SET_VBLANK_PIPE 0x0d |
#define DRM_I915_GET_VBLANK_PIPE 0x0e |
#define DRM_I915_VBLANK_SWAP 0x0f |
#define DRM_I915_HWS_ADDR 0x11 |
#define DRM_I915_GEM_INIT 0x13 |
#define DRM_I915_GEM_EXECBUFFER 0x14 |
#define DRM_I915_GEM_PIN 0x15 |
#define DRM_I915_GEM_UNPIN 0x16 |
#define DRM_I915_GEM_BUSY 0x17 |
#define DRM_I915_GEM_THROTTLE 0x18 |
#define DRM_I915_GEM_ENTERVT 0x19 |
#define DRM_I915_GEM_LEAVEVT 0x1a |
#define DRM_I915_GEM_CREATE 0x1b |
#define DRM_I915_GEM_PREAD 0x1c |
#define DRM_I915_GEM_PWRITE 0x1d |
#define DRM_I915_GEM_MMAP 0x1e |
#define DRM_I915_GEM_SET_DOMAIN 0x1f |
#define DRM_I915_GEM_SW_FINISH 0x20 |
#define DRM_I915_GEM_SET_TILING 0x21 |
#define DRM_I915_GEM_GET_TILING 0x22 |
#define DRM_I915_GEM_GET_APERTURE 0x23 |
#define DRM_I915_GEM_MMAP_GTT 0x24 |
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 |
#define DRM_I915_GEM_MADVISE 0x26 |
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27 |
#define DRM_I915_OVERLAY_ATTRS 0x28 |
#define DRM_I915_GEM_EXECBUFFER2 0x29 |
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a |
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b |
#define DRM_I915_GEM_WAIT 0x2c |
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d |
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e |
#define DRM_I915_GEM_SET_CACHING 0x2f |
#define DRM_I915_GEM_GET_CACHING 0x30 |
#define DRM_I915_REG_READ 0x31 |
#define DRM_I915_GET_RESET_STATS 0x32 |
#define DRM_I915_GEM_USERPTR 0x33 |
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) |
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) |
#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) |
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) |
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) |
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) |
#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) |
#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) |
#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) |
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) |
#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) |
#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) |
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) |
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) |
#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) |
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) |
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) |
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) |
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) |
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) |
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) |
#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) |
#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) |
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) |
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) |
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) |
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) |
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) |
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) |
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) |
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) |
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) |
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) |
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) |
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) |
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) |
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) |
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) |
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) |
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) |
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) |
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) |
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) |
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) |
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) |
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) |
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) |
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) |
/* Allow drivers to submit batchbuffers directly to hardware, relying |
* on the security mechanisms provided by hardware. |
*/ |
typedef struct drm_i915_batchbuffer { |
int start; /* agp offset */ |
int used; /* nr bytes in use */ |
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ |
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ |
int num_cliprects; /* mulitpass with multiple cliprects? */ |
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ |
} drm_i915_batchbuffer_t; |
/* As above, but pass a pointer to userspace buffer which can be |
* validated by the kernel prior to sending to hardware. |
*/ |
typedef struct _drm_i915_cmdbuffer { |
char __user *buf; /* pointer to userspace command buffer */ |
int sz; /* nr bytes in buf */ |
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ |
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ |
int num_cliprects; /* mulitpass with multiple cliprects? */ |
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ |
} drm_i915_cmdbuffer_t; |
/* Userspace can request & wait on irq's: |
*/ |
typedef struct drm_i915_irq_emit { |
int __user *irq_seq; |
} drm_i915_irq_emit_t; |
typedef struct drm_i915_irq_wait { |
int irq_seq; |
} drm_i915_irq_wait_t; |
/* Ioctl to query kernel params: |
*/ |
#define I915_PARAM_IRQ_ACTIVE 1 |
#define I915_PARAM_ALLOW_BATCHBUFFER 2 |
#define I915_PARAM_LAST_DISPATCH 3 |
#define I915_PARAM_CHIPSET_ID 4 |
#define I915_PARAM_HAS_GEM 5 |
#define I915_PARAM_NUM_FENCES_AVAIL 6 |
#define I915_PARAM_HAS_OVERLAY 7 |
#define I915_PARAM_HAS_PAGEFLIPPING 8 |
#define I915_PARAM_HAS_EXECBUF2 9 |
#define I915_PARAM_HAS_BSD 10 |
#define I915_PARAM_HAS_BLT 11 |
#define I915_PARAM_HAS_RELAXED_FENCING 12 |
#define I915_PARAM_HAS_COHERENT_RINGS 13 |
#define I915_PARAM_HAS_EXEC_CONSTANTS 14 |
#define I915_PARAM_HAS_RELAXED_DELTA 15 |
#define I915_PARAM_HAS_GEN7_SOL_RESET 16 |
#define I915_PARAM_HAS_LLC 17 |
#define I915_PARAM_HAS_ALIASING_PPGTT 18 |
#define I915_PARAM_HAS_WAIT_TIMEOUT 19 |
#define I915_PARAM_HAS_SEMAPHORES 20 |
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 |
#define I915_PARAM_HAS_VEBOX 22 |
#define I915_PARAM_HAS_SECURE_BATCHES 23 |
#define I915_PARAM_HAS_PINNED_BATCHES 24 |
#define I915_PARAM_HAS_EXEC_NO_RELOC 25 |
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 |
#define I915_PARAM_HAS_WT 27 |
#define I915_PARAM_CMD_PARSER_VERSION 28 |
typedef struct drm_i915_getparam { |
int param; |
int __user *value; |
} drm_i915_getparam_t; |
/* Ioctl to set kernel params: |
*/ |
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 |
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 |
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3 |
#define I915_SETPARAM_NUM_USED_FENCES 4 |
typedef struct drm_i915_setparam { |
int param; |
int value; |
} drm_i915_setparam_t; |
/* A memory manager for regions of shared memory: |
*/ |
#define I915_MEM_REGION_AGP 1 |
typedef struct drm_i915_mem_alloc { |
int region; |
int alignment; |
int size; |
int __user *region_offset; /* offset from start of fb or agp */ |
} drm_i915_mem_alloc_t; |
typedef struct drm_i915_mem_free { |
int region; |
int region_offset; |
} drm_i915_mem_free_t; |
typedef struct drm_i915_mem_init_heap { |
int region; |
int size; |
int start; |
} drm_i915_mem_init_heap_t; |
/* Allow memory manager to be torn down and re-initialized (eg on |
* rotate): |
*/ |
typedef struct drm_i915_mem_destroy_heap { |
int region; |
} drm_i915_mem_destroy_heap_t; |
/* Allow X server to configure which pipes to monitor for vblank signals |
*/ |
#define DRM_I915_VBLANK_PIPE_A 1 |
#define DRM_I915_VBLANK_PIPE_B 2 |
typedef struct drm_i915_vblank_pipe { |
int pipe; |
} drm_i915_vblank_pipe_t; |
/* Schedule buffer swap at given vertical blank: |
*/ |
typedef struct drm_i915_vblank_swap { |
drm_drawable_t drawable; |
enum drm_vblank_seq_type seqtype; |
unsigned int sequence; |
} drm_i915_vblank_swap_t; |
typedef struct drm_i915_hws_addr { |
__u64 addr; |
} drm_i915_hws_addr_t; |
struct drm_i915_gem_init { |
/** |
* Beginning offset in the GTT to be managed by the DRM memory |
* manager. |
*/ |
__u64 gtt_start; |
/** |
* Ending offset in the GTT to be managed by the DRM memory |
* manager. |
*/ |
__u64 gtt_end; |
}; |
struct drm_i915_gem_create { |
/** |
* Requested size for the object. |
* |
* The (page-aligned) allocated size for the object will be returned. |
*/ |
__u64 size; |
/** |
* Returned handle for the object. |
* |
* Object handles are nonzero. |
*/ |
__u32 handle; |
__u32 pad; |
}; |
struct drm_i915_gem_pread { |
/** Handle for the object being read. */ |
__u32 handle; |
__u32 pad; |
/** Offset into the object to read from */ |
__u64 offset; |
/** Length of data to read */ |
__u64 size; |
/** |
* Pointer to write the data into. |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 data_ptr; |
}; |
struct drm_i915_gem_pwrite { |
/** Handle for the object being written to. */ |
__u32 handle; |
__u32 pad; |
/** Offset into the object to write to */ |
__u64 offset; |
/** Length of data to write */ |
__u64 size; |
/** |
* Pointer to read the data from. |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 data_ptr; |
}; |
struct drm_i915_gem_mmap { |
/** Handle for the object being mapped. */ |
__u32 handle; |
__u32 pad; |
/** Offset in the object to map. */ |
__u64 offset; |
/** |
* Length of data to map. |
* |
* The value will be page-aligned. |
*/ |
__u64 size; |
/** |
* Returned pointer the data was mapped at. |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 addr_ptr; |
}; |
struct drm_i915_gem_mmap_gtt { |
/** Handle for the object being mapped. */ |
__u32 handle; |
__u32 pad; |
/** |
* Fake offset to use for subsequent mmap call |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 offset; |
}; |
struct drm_i915_gem_set_domain { |
/** Handle for the object */ |
__u32 handle; |
/** New read domains */ |
__u32 read_domains; |
/** New write domain */ |
__u32 write_domain; |
}; |
struct drm_i915_gem_sw_finish { |
/** Handle for the object */ |
__u32 handle; |
}; |
struct drm_i915_gem_relocation_entry { |
/** |
* Handle of the buffer being pointed to by this relocation entry. |
* |
* It's appealing to make this be an index into the mm_validate_entry |
* list to refer to the buffer, but this allows the driver to create |
* a relocation list for state buffers and not re-write it per |
* exec using the buffer. |
*/ |
__u32 target_handle; |
/** |
* Value to be added to the offset of the target buffer to make up |
* the relocation entry. |
*/ |
__u32 delta; |
/** Offset in the buffer the relocation entry will be written into */ |
__u64 offset; |
/** |
* Offset value of the target buffer that the relocation entry was last |
* written as. |
* |
* If the buffer has the same offset as last time, we can skip syncing |
* and writing the relocation. This value is written back out by |
* the execbuffer ioctl when the relocation is written. |
*/ |
__u64 presumed_offset; |
/** |
* Target memory domains read by this operation. |
*/ |
__u32 read_domains; |
/** |
* Target memory domains written by this operation. |
* |
* Note that only one domain may be written by the whole |
* execbuffer operation, so that where there are conflicts, |
* the application will get -EINVAL back. |
*/ |
__u32 write_domain; |
}; |
/** @{ |
* Intel memory domains |
* |
* Most of these just align with the various caches in |
* the system and are used to flush and invalidate as |
* objects end up cached in different domains. |
*/ |
/** CPU cache */ |
#define I915_GEM_DOMAIN_CPU 0x00000001 |
/** Render cache, used by 2D and 3D drawing */ |
#define I915_GEM_DOMAIN_RENDER 0x00000002 |
/** Sampler cache, used by texture engine */ |
#define I915_GEM_DOMAIN_SAMPLER 0x00000004 |
/** Command queue, used to load batch buffers */ |
#define I915_GEM_DOMAIN_COMMAND 0x00000008 |
/** Instruction cache, used by shader programs */ |
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 |
/** Vertex address cache */ |
#define I915_GEM_DOMAIN_VERTEX 0x00000020 |
/** GTT domain - aperture and scanout */ |
#define I915_GEM_DOMAIN_GTT 0x00000040 |
/** @} */ |
struct drm_i915_gem_exec_object { |
/** |
* User's handle for a buffer to be bound into the GTT for this |
* operation. |
*/ |
__u32 handle; |
/** Number of relocations to be performed on this buffer */ |
__u32 relocation_count; |
/** |
* Pointer to array of struct drm_i915_gem_relocation_entry containing |
* the relocations to be performed in this buffer. |
*/ |
__u64 relocs_ptr; |
/** Required alignment in graphics aperture */ |
__u64 alignment; |
/** |
* Returned value of the updated offset of the object, for future |
* presumed_offset writes. |
*/ |
__u64 offset; |
}; |
struct drm_i915_gem_execbuffer { |
/** |
* List of buffers to be validated with their relocations to be |
* performend on them. |
* |
* This is a pointer to an array of struct drm_i915_gem_validate_entry. |
* |
* These buffers must be listed in an order such that all relocations |
* a buffer is performing refer to buffers that have already appeared |
* in the validate list. |
*/ |
__u64 buffers_ptr; |
__u32 buffer_count; |
/** Offset in the batchbuffer to start execution from. */ |
__u32 batch_start_offset; |
/** Bytes used in batchbuffer from batch_start_offset */ |
__u32 batch_len; |
__u32 DR1; |
__u32 DR4; |
__u32 num_cliprects; |
/** This is a struct drm_clip_rect *cliprects */ |
__u64 cliprects_ptr; |
}; |
struct drm_i915_gem_exec_object2 { |
/** |
* User's handle for a buffer to be bound into the GTT for this |
* operation. |
*/ |
__u32 handle; |
/** Number of relocations to be performed on this buffer */ |
__u32 relocation_count; |
/** |
* Pointer to array of struct drm_i915_gem_relocation_entry containing |
* the relocations to be performed in this buffer. |
*/ |
__u64 relocs_ptr; |
/** Required alignment in graphics aperture */ |
__u64 alignment; |
/** |
* Returned value of the updated offset of the object, for future |
* presumed_offset writes. |
*/ |
__u64 offset; |
#define EXEC_OBJECT_NEEDS_FENCE (1<<0) |
#define EXEC_OBJECT_NEEDS_GTT (1<<1) |
#define EXEC_OBJECT_WRITE (1<<2) |
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1) |
__u64 flags; |
__u64 rsvd1; |
__u64 rsvd2; |
}; |
struct drm_i915_gem_execbuffer2 { |
/** |
* List of gem_exec_object2 structs |
*/ |
__u64 buffers_ptr; |
__u32 buffer_count; |
/** Offset in the batchbuffer to start execution from. */ |
__u32 batch_start_offset; |
/** Bytes used in batchbuffer from batch_start_offset */ |
__u32 batch_len; |
__u32 DR1; |
__u32 DR4; |
__u32 num_cliprects; |
/** This is a struct drm_clip_rect *cliprects */ |
__u64 cliprects_ptr; |
#define I915_EXEC_RING_MASK (7<<0) |
#define I915_EXEC_DEFAULT (0<<0) |
#define I915_EXEC_RENDER (1<<0) |
#define I915_EXEC_BSD (2<<0) |
#define I915_EXEC_BLT (3<<0) |
#define I915_EXEC_VEBOX (4<<0) |
/* Used for switching the constants addressing mode on gen4+ RENDER ring. |
* Gen6+ only supports relative addressing to dynamic state (default) and |
* absolute addressing. |
* |
* These flags are ignored for the BSD and BLT rings. |
*/ |
#define I915_EXEC_CONSTANTS_MASK (3<<6) |
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ |
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) |
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ |
__u64 flags; |
__u64 rsvd1; /* now used for context info */ |
__u64 rsvd2; |
}; |
/** Resets the SO write offset registers for transform feedback on gen7. */ |
#define I915_EXEC_GEN7_SOL_RESET (1<<8) |
/** Request a privileged ("secure") batch buffer. Note only available for |
* DRM_ROOT_ONLY | DRM_MASTER processes. |
*/ |
#define I915_EXEC_SECURE (1<<9) |
/** Inform the kernel that the batch is and will always be pinned. This |
* negates the requirement for a workaround to be performed to avoid |
* an incoherent CS (such as can be found on 830/845). If this flag is |
* not passed, the kernel will endeavour to make sure the batch is |
* coherent with the CS before execution. If this flag is passed, |
* userspace assumes the responsibility for ensuring the same. |
*/ |
#define I915_EXEC_IS_PINNED (1<<10) |
/** Provide a hint to the kernel that the command stream and auxiliary |
* state buffers already holds the correct presumed addresses and so the |
* relocation process may be skipped if no buffers need to be moved in |
* preparation for the execbuffer. |
*/ |
#define I915_EXEC_NO_RELOC (1<<11) |
/** Use the reloc.handle as an index into the exec object array rather |
* than as the per-file handle. |
*/ |
#define I915_EXEC_HANDLE_LUT (1<<12) |
#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1) |
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) |
#define i915_execbuffer2_set_context_id(eb2, context) \ |
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK |
#define i915_execbuffer2_get_context_id(eb2) \ |
((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) |
struct drm_i915_gem_pin { |
/** Handle of the buffer to be pinned. */ |
__u32 handle; |
__u32 pad; |
/** alignment required within the aperture */ |
__u64 alignment; |
/** Returned GTT offset of the buffer. */ |
__u64 offset; |
}; |
struct drm_i915_gem_unpin { |
/** Handle of the buffer to be unpinned. */ |
__u32 handle; |
__u32 pad; |
}; |
struct drm_i915_gem_busy { |
/** Handle of the buffer to check for busy */ |
__u32 handle; |
/** Return busy status (1 if busy, 0 if idle). |
* The high word is used to indicate on which rings the object |
* currently resides: |
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) |
*/ |
__u32 busy; |
}; |
/** |
* I915_CACHING_NONE |
* |
* GPU access is not coherent with cpu caches. Default for machines without an |
* LLC. |
*/ |
#define I915_CACHING_NONE 0 |
/** |
* I915_CACHING_CACHED |
* |
* GPU access is coherent with cpu caches and furthermore the data is cached in |
* last-level caches shared between cpu cores and the gpu GT. Default on |
* machines with HAS_LLC. |
*/ |
#define I915_CACHING_CACHED 1 |
/** |
* I915_CACHING_DISPLAY |
* |
* Special GPU caching mode which is coherent with the scanout engines. |
* Transparently falls back to I915_CACHING_NONE on platforms where no special |
* cache mode (like write-through or gfdt flushing) is available. The kernel |
* automatically sets this mode when using a buffer as a scanout target. |
* Userspace can manually set this mode to avoid a costly stall and clflush in |
* the hotpath of drawing the first frame. |
*/ |
#define I915_CACHING_DISPLAY 2 |
struct drm_i915_gem_caching { |
/** |
* Handle of the buffer to set/get the caching level of. */ |
__u32 handle; |
/** |
* Cacheing level to apply or return value |
* |
* bits0-15 are for generic caching control (i.e. the above defined |
* values). bits16-31 are reserved for platform-specific variations |
* (e.g. l3$ caching on gen7). */ |
__u32 caching; |
}; |
#define I915_TILING_NONE 0 |
#define I915_TILING_X 1 |
#define I915_TILING_Y 2 |
#define I915_BIT_6_SWIZZLE_NONE 0 |
#define I915_BIT_6_SWIZZLE_9 1 |
#define I915_BIT_6_SWIZZLE_9_10 2 |
#define I915_BIT_6_SWIZZLE_9_11 3 |
#define I915_BIT_6_SWIZZLE_9_10_11 4 |
/* Not seen by userland */ |
#define I915_BIT_6_SWIZZLE_UNKNOWN 5 |
/* Seen by userland. */ |
#define I915_BIT_6_SWIZZLE_9_17 6 |
#define I915_BIT_6_SWIZZLE_9_10_17 7 |
struct drm_i915_gem_set_tiling { |
/** Handle of the buffer to have its tiling state updated */ |
__u32 handle; |
/** |
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, |
* I915_TILING_Y). |
* |
* This value is to be set on request, and will be updated by the |
* kernel on successful return with the actual chosen tiling layout. |
* |
* The tiling mode may be demoted to I915_TILING_NONE when the system |
* has bit 6 swizzling that can't be managed correctly by GEM. |
* |
* Buffer contents become undefined when changing tiling_mode. |
*/ |
__u32 tiling_mode; |
/** |
* Stride in bytes for the object when in I915_TILING_X or |
* I915_TILING_Y. |
*/ |
__u32 stride; |
/** |
* Returned address bit 6 swizzling required for CPU access through |
* mmap mapping. |
*/ |
__u32 swizzle_mode; |
}; |
struct drm_i915_gem_get_tiling { |
/** Handle of the buffer to get tiling state for. */ |
__u32 handle; |
/** |
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, |
* I915_TILING_Y). |
*/ |
__u32 tiling_mode; |
/** |
* Returned address bit 6 swizzling required for CPU access through |
* mmap mapping. |
*/ |
__u32 swizzle_mode; |
}; |
struct drm_i915_gem_get_aperture { |
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */ |
__u64 aper_size; |
/** |
* Available space in the aperture used by i915_gem_execbuffer, in |
* bytes |
*/ |
__u64 aper_available_size; |
}; |
struct drm_i915_get_pipe_from_crtc_id { |
/** ID of CRTC being requested **/ |
__u32 crtc_id; |
/** pipe of requested CRTC **/ |
__u32 pipe; |
}; |
#define I915_MADV_WILLNEED 0 |
#define I915_MADV_DONTNEED 1 |
#define __I915_MADV_PURGED 2 /* internal state */ |
struct drm_i915_gem_madvise { |
/** Handle of the buffer to change the backing store advice */ |
__u32 handle; |
/* Advice: either the buffer will be needed again in the near future, |
* or wont be and could be discarded under memory pressure. |
*/ |
__u32 madv; |
/** Whether the backing store still exists. */ |
__u32 retained; |
}; |
/* flags */ |
#define I915_OVERLAY_TYPE_MASK 0xff |
#define I915_OVERLAY_YUV_PLANAR 0x01 |
#define I915_OVERLAY_YUV_PACKED 0x02 |
#define I915_OVERLAY_RGB 0x03 |
#define I915_OVERLAY_DEPTH_MASK 0xff00 |
#define I915_OVERLAY_RGB24 0x1000 |
#define I915_OVERLAY_RGB16 0x2000 |
#define I915_OVERLAY_RGB15 0x3000 |
#define I915_OVERLAY_YUV422 0x0100 |
#define I915_OVERLAY_YUV411 0x0200 |
#define I915_OVERLAY_YUV420 0x0300 |
#define I915_OVERLAY_YUV410 0x0400 |
#define I915_OVERLAY_SWAP_MASK 0xff0000 |
#define I915_OVERLAY_NO_SWAP 0x000000 |
#define I915_OVERLAY_UV_SWAP 0x010000 |
#define I915_OVERLAY_Y_SWAP 0x020000 |
#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 |
#define I915_OVERLAY_FLAGS_MASK 0xff000000 |
#define I915_OVERLAY_ENABLE 0x01000000 |
struct drm_intel_overlay_put_image { |
/* various flags and src format description */ |
__u32 flags; |
/* source picture description */ |
__u32 bo_handle; |
/* stride values and offsets are in bytes, buffer relative */ |
__u16 stride_Y; /* stride for packed formats */ |
__u16 stride_UV; |
__u32 offset_Y; /* offset for packet formats */ |
__u32 offset_U; |
__u32 offset_V; |
/* in pixels */ |
__u16 src_width; |
__u16 src_height; |
/* to compensate the scaling factors for partially covered surfaces */ |
__u16 src_scan_width; |
__u16 src_scan_height; |
/* output crtc description */ |
__u32 crtc_id; |
__u16 dst_x; |
__u16 dst_y; |
__u16 dst_width; |
__u16 dst_height; |
}; |
/* flags */ |
#define I915_OVERLAY_UPDATE_ATTRS (1<<0) |
#define I915_OVERLAY_UPDATE_GAMMA (1<<1) |
struct drm_intel_overlay_attrs { |
__u32 flags; |
__u32 color_key; |
__s32 brightness; |
__u32 contrast; |
__u32 saturation; |
__u32 gamma0; |
__u32 gamma1; |
__u32 gamma2; |
__u32 gamma3; |
__u32 gamma4; |
__u32 gamma5; |
}; |
/* |
* Intel sprite handling |
* |
* Color keying works with a min/mask/max tuple. Both source and destination |
* color keying is allowed. |
* |
* Source keying: |
* Sprite pixels within the min & max values, masked against the color channels |
* specified in the mask field, will be transparent. All other pixels will |
* be displayed on top of the primary plane. For RGB surfaces, only the min |
* and mask fields will be used; ranged compares are not allowed. |
* |
* Destination keying: |
* Primary plane pixels that match the min value, masked against the color |
* channels specified in the mask field, will be replaced by corresponding |
* pixels from the sprite plane. |
* |
* Note that source & destination keying are exclusive; only one can be |
* active on a given plane. |
*/ |
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ |
#define I915_SET_COLORKEY_DESTINATION (1<<1) |
#define I915_SET_COLORKEY_SOURCE (1<<2) |
struct drm_intel_sprite_colorkey { |
__u32 plane_id; |
__u32 min_value; |
__u32 channel_mask; |
__u32 max_value; |
__u32 flags; |
}; |
struct drm_i915_gem_wait { |
/** Handle of BO we shall wait on */ |
__u32 bo_handle; |
__u32 flags; |
/** Number of nanoseconds to wait, Returns time remaining. */ |
__s64 timeout_ns; |
}; |
struct drm_i915_gem_context_create { |
/* output: id of new context*/ |
__u32 ctx_id; |
__u32 pad; |
}; |
struct drm_i915_gem_context_destroy { |
__u32 ctx_id; |
__u32 pad; |
}; |
struct drm_i915_reg_read { |
__u64 offset; |
__u64 val; /* Return value */ |
}; |
struct drm_i915_reset_stats { |
__u32 ctx_id; |
__u32 flags; |
/* All resets since boot/module reload, for all contexts */ |
__u32 reset_count; |
/* Number of batches lost when active in GPU, for this context */ |
__u32 batch_active; |
/* Number of batches lost pending for execution, for this context */ |
__u32 batch_pending; |
__u32 pad; |
}; |
struct drm_i915_gem_userptr { |
__u64 user_ptr; |
__u64 user_size; |
__u32 flags; |
#define I915_USERPTR_READ_ONLY 0x1 |
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000 |
/** |
* Returned handle for the object. |
* |
* Object handles are nonzero. |
*/ |
__u32 handle; |
}; |
struct drm_i915_mask { |
__u32 handle; |
__u32 width; |
__u32 height; |
__u32 bo_size; |
__u32 bo_pitch; |
__u32 bo_map; |
}; |
struct drm_i915_fb_info { |
__u32 name; |
__u32 width; |
__u32 height; |
__u32 pitch; |
__u32 tiling; |
__u32 crtc; |
__u32 pipe; |
}; |
struct drm_i915_mask_update { |
__u32 handle; |
__u32 dx; |
__u32 dy; |
__u32 width; |
__u32 height; |
__u32 bo_pitch; |
__u32 bo_map; |
}; |
#endif /* _UAPI_I915_DRM_H_ */ |
/drivers/include/linux/uapi/drm/vmwgfx_drm.h |
---|
0,0 → 1,1062 |
/************************************************************************** |
* |
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
**************************************************************************/ |
#ifndef __VMWGFX_DRM_H__ |
#define __VMWGFX_DRM_H__ |
#ifndef __KERNEL__ |
#include <drm.h> |
#endif |
#define DRM_VMW_MAX_SURFACE_FACES 6 |
#define DRM_VMW_MAX_MIP_LEVELS 24 |
#define DRM_VMW_GET_PARAM 0 |
#define DRM_VMW_ALLOC_DMABUF 1 |
#define DRM_VMW_UNREF_DMABUF 2 |
#define DRM_VMW_CURSOR_BYPASS 3 |
/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ |
#define DRM_VMW_CONTROL_STREAM 4 |
#define DRM_VMW_CLAIM_STREAM 5 |
#define DRM_VMW_UNREF_STREAM 6 |
/* guarded by DRM_VMW_PARAM_3D == 1 */ |
#define DRM_VMW_CREATE_CONTEXT 7 |
#define DRM_VMW_UNREF_CONTEXT 8 |
#define DRM_VMW_CREATE_SURFACE 9 |
#define DRM_VMW_UNREF_SURFACE 10 |
#define DRM_VMW_REF_SURFACE 11 |
#define DRM_VMW_EXECBUF 12 |
#define DRM_VMW_GET_3D_CAP 13 |
#define DRM_VMW_FENCE_WAIT 14 |
#define DRM_VMW_FENCE_SIGNALED 15 |
#define DRM_VMW_FENCE_UNREF 16 |
#define DRM_VMW_FENCE_EVENT 17 |
#define DRM_VMW_PRESENT 18 |
#define DRM_VMW_PRESENT_READBACK 19 |
#define DRM_VMW_UPDATE_LAYOUT 20 |
#define DRM_VMW_CREATE_SHADER 21 |
#define DRM_VMW_UNREF_SHADER 22 |
#define DRM_VMW_GB_SURFACE_CREATE 23 |
#define DRM_VMW_GB_SURFACE_REF 24 |
#define DRM_VMW_SYNCCPU 25 |
/*************************************************************************/ |
/** |
* DRM_VMW_GET_PARAM - get device information. |
* |
* DRM_VMW_PARAM_FIFO_OFFSET: |
* Offset to use to map the first page of the FIFO read-only. |
* The fifo is mapped using the mmap() system call on the drm device. |
* |
* DRM_VMW_PARAM_OVERLAY_IOCTL: |
* Does the driver support the overlay ioctl. |
*/ |
#define DRM_VMW_PARAM_NUM_STREAMS 0 |
#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 |
#define DRM_VMW_PARAM_3D 2 |
#define DRM_VMW_PARAM_HW_CAPS 3 |
#define DRM_VMW_PARAM_FIFO_CAPS 4 |
#define DRM_VMW_PARAM_MAX_FB_SIZE 5 |
#define DRM_VMW_PARAM_FIFO_HW_VERSION 6 |
#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 |
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8 |
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 |
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10 |
/** |
* enum drm_vmw_handle_type - handle type for ref ioctls |
* |
*/ |
enum drm_vmw_handle_type { |
DRM_VMW_HANDLE_LEGACY = 0, |
DRM_VMW_HANDLE_PRIME = 1 |
}; |
/** |
* struct drm_vmw_getparam_arg |
* |
* @value: Returned value. //Out |
* @param: Parameter to query. //In. |
* |
* Argument to the DRM_VMW_GET_PARAM Ioctl. |
*/ |
struct drm_vmw_getparam_arg { |
uint64_t value; |
uint32_t param; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CREATE_CONTEXT - Create a host context. |
* |
* Allocates a device unique context id, and queues a create context command |
* for the host. Does not wait for host completion. |
*/ |
/** |
* struct drm_vmw_context_arg |
* |
* @cid: Device unique context ID. |
* |
* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. |
* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. |
*/ |
struct drm_vmw_context_arg { |
int32_t cid; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_CONTEXT - Create a host context. |
* |
* Frees a global context id, and queues a destroy host command for the host. |
* Does not wait for host completion. The context ID can be used directly |
* in the command stream and shows up as the same context ID on the host. |
*/ |
/*************************************************************************/ |
/** |
* DRM_VMW_CREATE_SURFACE - Create a host suface. |
* |
* Allocates a device unique surface id, and queues a create surface command |
* for the host. Does not wait for host completion. The surface ID can be |
* used directly in the command stream and shows up as the same surface |
* ID on the host. |
*/ |
/** |
* struct drm_wmv_surface_create_req |
* |
* @flags: Surface flags as understood by the host. |
* @format: Surface format as understood by the host. |
* @mip_levels: Number of mip levels for each face. |
* An unused face should have 0 encoded. |
* @size_addr: Address of a user-space array of sruct drm_vmw_size |
* cast to an uint64_t for 32-64 bit compatibility. |
* The size of the array should equal the total number of mipmap levels. |
* @shareable: Boolean whether other clients (as identified by file descriptors) |
* may reference this surface. |
* @scanout: Boolean whether the surface is intended to be used as a |
* scanout. |
* |
* Input data to the DRM_VMW_CREATE_SURFACE Ioctl. |
* Output data from the DRM_VMW_REF_SURFACE Ioctl. |
*/ |
struct drm_vmw_surface_create_req { |
uint32_t flags; |
uint32_t format; |
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; |
uint64_t size_addr; |
int32_t shareable; |
int32_t scanout; |
}; |
/** |
* struct drm_wmv_surface_arg |
* |
* @sid: Surface id of created surface or surface to destroy or reference. |
* @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl. |
* |
* Output data from the DRM_VMW_CREATE_SURFACE Ioctl. |
* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. |
* Input argument to the DRM_VMW_REF_SURFACE Ioctl. |
*/ |
struct drm_vmw_surface_arg { |
int32_t sid; |
enum drm_vmw_handle_type handle_type; |
}; |
/** |
* struct drm_vmw_size ioctl. |
* |
* @width - mip level width |
* @height - mip level height |
* @depth - mip level depth |
* |
* Description of a mip level. |
* Input data to the DRM_WMW_CREATE_SURFACE Ioctl. |
*/ |
struct drm_vmw_size { |
uint32_t width; |
uint32_t height; |
uint32_t depth; |
uint32_t pad64; |
}; |
/** |
* union drm_vmw_surface_create_arg |
* |
* @rep: Output data as described above. |
* @req: Input data as described above. |
* |
* Argument to the DRM_VMW_CREATE_SURFACE Ioctl. |
*/ |
union drm_vmw_surface_create_arg { |
struct drm_vmw_surface_arg rep; |
struct drm_vmw_surface_create_req req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_REF_SURFACE - Reference a host surface. |
* |
* Puts a reference on a host surface with a give sid, as previously |
* returned by the DRM_VMW_CREATE_SURFACE ioctl. |
* A reference will make sure the surface isn't destroyed while we hold |
* it and will allow the calling client to use the surface ID in the command |
* stream. |
* |
* On successful return, the Ioctl returns the surface information given |
* in the DRM_VMW_CREATE_SURFACE ioctl. |
*/ |
/** |
* union drm_vmw_surface_reference_arg |
* |
* @rep: Output data as described above. |
* @req: Input data as described above. |
* |
* Argument to the DRM_VMW_REF_SURFACE Ioctl. |
*/ |
union drm_vmw_surface_reference_arg { |
struct drm_vmw_surface_create_req rep; |
struct drm_vmw_surface_arg req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_SURFACE - Unreference a host surface. |
* |
* Clear a reference previously put on a host surface. |
* When all references are gone, including the one implicitly placed |
* on creation, |
* a destroy surface command will be queued for the host. |
* Does not wait for completion. |
*/ |
/*************************************************************************/ |
/** |
* DRM_VMW_EXECBUF |
* |
* Submit a command buffer for execution on the host, and return a |
* fence seqno that when signaled, indicates that the command buffer has |
* executed. |
*/ |
/** |
* struct drm_vmw_execbuf_arg |
* |
* @commands: User-space address of a command buffer cast to an uint64_t. |
* @command-size: Size in bytes of the command buffer. |
* @throttle-us: Sleep until software is less than @throttle_us |
* microseconds ahead of hardware. The driver may round this value |
* to the nearest kernel tick. |
* @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an |
* uint64_t. |
* @version: Allows expanding the execbuf ioctl parameters without breaking |
* backwards compatibility, since user-space will always tell the kernel |
* which version it uses. |
* @flags: Execbuf flags. None currently. |
* |
* Argument to the DRM_VMW_EXECBUF Ioctl. |
*/ |
#define DRM_VMW_EXECBUF_VERSION 1 |
struct drm_vmw_execbuf_arg { |
uint64_t commands; |
uint32_t command_size; |
uint32_t throttle_us; |
uint64_t fence_rep; |
uint32_t version; |
uint32_t flags; |
}; |
/** |
* struct drm_vmw_fence_rep |
* |
* @handle: Fence object handle for fence associated with a command submission. |
* @mask: Fence flags relevant for this fence object. |
* @seqno: Fence sequence number in fifo. A fence object with a lower |
* seqno will signal the EXEC flag before a fence object with a higher |
* seqno. This can be used by user-space to avoid kernel calls to determine |
* whether a fence has signaled the EXEC flag. Note that @seqno will |
* wrap at 32-bit. |
* @passed_seqno: The highest seqno number processed by the hardware |
* so far. This can be used to mark user-space fence objects as signaled, and |
* to determine whether a fence seqno might be stale. |
* @error: This member should've been set to -EFAULT on submission. |
* The following actions should be take on completion: |
* error == -EFAULT: Fence communication failed. The host is synchronized. |
* Use the last fence id read from the FIFO fence register. |
* error != 0 && error != -EFAULT: |
* Fence submission failed. The host is synchronized. Use the fence_seq member. |
* error == 0: All is OK, The host may not be synchronized. |
* Use the fence_seq member. |
* |
* Input / Output data to the DRM_VMW_EXECBUF Ioctl. |
*/ |
struct drm_vmw_fence_rep { |
uint32_t handle; |
uint32_t mask; |
uint32_t seqno; |
uint32_t passed_seqno; |
uint32_t pad64; |
int32_t error; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_ALLOC_DMABUF |
* |
* Allocate a DMA buffer that is visible also to the host. |
* NOTE: The buffer is |
* identified by a handle and an offset, which are private to the guest, but |
* useable in the command stream. The guest kernel may translate these |
* and patch up the command stream accordingly. In the future, the offset may |
* be zero at all times, or it may disappear from the interface before it is |
* fixed. |
* |
* The DMA buffer may stay user-space mapped in the guest at all times, |
* and is thus suitable for sub-allocation. |
* |
* DMA buffers are mapped using the mmap() syscall on the drm device. |
*/ |
/** |
* struct drm_vmw_alloc_dmabuf_req |
* |
* @size: Required minimum size of the buffer. |
* |
* Input data to the DRM_VMW_ALLOC_DMABUF Ioctl. |
*/ |
struct drm_vmw_alloc_dmabuf_req { |
uint32_t size; |
uint32_t pad64; |
}; |
/** |
* struct drm_vmw_dmabuf_rep |
* |
* @map_handle: Offset to use in the mmap() call used to map the buffer. |
* @handle: Handle unique to this buffer. Used for unreferencing. |
* @cur_gmr_id: GMR id to use in the command stream when this buffer is |
* referenced. See not above. |
* @cur_gmr_offset: Offset to use in the command stream when this buffer is |
* referenced. See note above. |
* |
* Output data from the DRM_VMW_ALLOC_DMABUF Ioctl. |
*/ |
struct drm_vmw_dmabuf_rep { |
uint64_t map_handle; |
uint32_t handle; |
uint32_t cur_gmr_id; |
uint32_t cur_gmr_offset; |
uint32_t pad64; |
}; |
/** |
* union drm_vmw_dmabuf_arg |
* |
* @req: Input data as described above. |
* @rep: Output data as described above. |
* |
* Argument to the DRM_VMW_ALLOC_DMABUF Ioctl. |
*/ |
union drm_vmw_alloc_dmabuf_arg { |
struct drm_vmw_alloc_dmabuf_req req; |
struct drm_vmw_dmabuf_rep rep; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_DMABUF - Free a DMA buffer. |
* |
*/ |
/** |
* struct drm_vmw_unref_dmabuf_arg |
* |
* @handle: Handle indicating what buffer to free. Obtained from the |
* DRM_VMW_ALLOC_DMABUF Ioctl. |
* |
* Argument to the DRM_VMW_UNREF_DMABUF Ioctl. |
*/ |
struct drm_vmw_unref_dmabuf_arg { |
uint32_t handle; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. |
* |
* This IOCTL controls the overlay units of the svga device. |
* The SVGA overlay units does not work like regular hardware units in |
* that they do not automaticaly read back the contents of the given dma |
* buffer. But instead only read back for each call to this ioctl, and |
* at any point between this call being made and a following call that |
* either changes the buffer or disables the stream. |
*/ |
/** |
* struct drm_vmw_rect |
* |
* Defines a rectangle. Used in the overlay ioctl to define |
* source and destination rectangle. |
*/ |
struct drm_vmw_rect { |
int32_t x; |
int32_t y; |
uint32_t w; |
uint32_t h; |
}; |
/** |
* struct drm_vmw_control_stream_arg |
* |
* @stream_id: Stearm to control |
* @enabled: If false all following arguments are ignored. |
* @handle: Handle to buffer for getting data from. |
* @format: Format of the overlay as understood by the host. |
* @width: Width of the overlay. |
* @height: Height of the overlay. |
* @size: Size of the overlay in bytes. |
* @pitch: Array of pitches, the two last are only used for YUV12 formats. |
* @offset: Offset from start of dma buffer to overlay. |
* @src: Source rect, must be within the defined area above. |
* @dst: Destination rect, x and y may be negative. |
* |
* Argument to the DRM_VMW_CONTROL_STREAM Ioctl. |
*/ |
struct drm_vmw_control_stream_arg { |
uint32_t stream_id; |
uint32_t enabled; |
uint32_t flags; |
uint32_t color_key; |
uint32_t handle; |
uint32_t offset; |
int32_t format; |
uint32_t size; |
uint32_t width; |
uint32_t height; |
uint32_t pitch[3]; |
uint32_t pad64; |
struct drm_vmw_rect src; |
struct drm_vmw_rect dst; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass. |
* |
*/ |
#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0) |
#define DRM_VMW_CURSOR_BYPASS_FLAGS (1) |
/** |
* struct drm_vmw_cursor_bypass_arg |
* |
* @flags: Flags. |
* @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed. |
* @xpos: X position of cursor. |
* @ypos: Y position of cursor. |
* @xhot: X hotspot. |
* @yhot: Y hotspot. |
* |
* Argument to the DRM_VMW_CURSOR_BYPASS Ioctl. |
*/ |
struct drm_vmw_cursor_bypass_arg { |
uint32_t flags; |
uint32_t crtc_id; |
int32_t xpos; |
int32_t ypos; |
int32_t xhot; |
int32_t yhot; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CLAIM_STREAM - Claim a single stream. |
*/ |
/** |
* struct drm_vmw_context_arg |
* |
* @stream_id: Device unique context ID. |
* |
* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. |
* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. |
*/ |
struct drm_vmw_stream_arg { |
uint32_t stream_id; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_STREAM - Unclaim a stream. |
* |
* Return a single stream that was claimed by this process. Also makes |
* sure that the stream has been stopped. |
*/ |
/*************************************************************************/ |
/** |
* DRM_VMW_GET_3D_CAP |
* |
* Read 3D capabilities from the FIFO |
* |
*/ |
/** |
* struct drm_vmw_get_3d_cap_arg |
* |
* @buffer: Pointer to a buffer for capability data, cast to an uint64_t |
* @size: Max size to copy |
* |
* Input argument to the DRM_VMW_GET_3D_CAP_IOCTL |
* ioctls. |
*/ |
struct drm_vmw_get_3d_cap_arg { |
uint64_t buffer; |
uint32_t max_size; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_WAIT |
* |
* Waits for a fence object to signal. The wait is interruptible, so that |
* signals may be delivered during the interrupt. The wait may timeout, |
* in which case the calls returns -EBUSY. If the wait is restarted, |
* that is restarting without resetting @cookie_valid to zero, |
* the timeout is computed from the first call. |
* |
* The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait |
* on: |
* DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command |
* stream |
* have executed. |
* DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish |
* commands |
* in the buffer given to the EXECBUF ioctl returning the fence object handle |
* are available to user-space. |
* |
* DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the |
* fenc wait ioctl returns 0, the fence object has been unreferenced after |
* the wait. |
*/ |
#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0) |
#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1) |
#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0) |
/** |
* struct drm_vmw_fence_wait_arg |
* |
* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. |
* @cookie_valid: Must be reset to 0 on first call. Left alone on restart. |
* @kernel_cookie: Set to 0 on first call. Left alone on restart. |
* @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout. |
* @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick |
* before returning. |
* @flags: Fence flags to wait on. |
* @wait_options: Options that control the behaviour of the wait ioctl. |
* |
* Input argument to the DRM_VMW_FENCE_WAIT ioctl. |
*/ |
struct drm_vmw_fence_wait_arg { |
uint32_t handle; |
int32_t cookie_valid; |
uint64_t kernel_cookie; |
uint64_t timeout_us; |
int32_t lazy; |
int32_t flags; |
int32_t wait_options; |
int32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_SIGNALED |
* |
* Checks if a fence object is signaled.. |
*/ |
/** |
* struct drm_vmw_fence_signaled_arg |
* |
* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. |
* @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl |
* @signaled: Out: Flags signaled. |
* @sequence: Out: Highest sequence passed so far. Can be used to signal the |
* EXEC flag of user-space fence objects. |
* |
* Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF |
* ioctls. |
*/ |
struct drm_vmw_fence_signaled_arg { |
uint32_t handle; |
uint32_t flags; |
int32_t signaled; |
uint32_t passed_seqno; |
uint32_t signaled_flags; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_UNREF |
* |
* Unreferences a fence object, and causes it to be destroyed if there are no |
* other references to it. |
* |
*/ |
/** |
* struct drm_vmw_fence_arg |
* |
* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. |
* |
* Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl.. |
*/ |
struct drm_vmw_fence_arg { |
uint32_t handle; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_EVENT |
* |
* Queues an event on a fence to be delivered on the drm character device |
* when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag. |
* Optionally the approximate time when the fence signaled is |
* given by the event. |
*/ |
/* |
* The event type |
*/ |
#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000 |
struct drm_vmw_event_fence { |
struct drm_event base; |
uint64_t user_data; |
uint32_t tv_sec; |
uint32_t tv_usec; |
}; |
/* |
* Flags that may be given to the command. |
*/ |
/* Request fence signaled time on the event. */ |
#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0) |
/** |
* struct drm_vmw_fence_event_arg |
* |
* @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if |
* the fence is not supposed to be referenced by user-space. |
* @user_info: Info to be delivered with the event. |
* @handle: Attach the event to this fence only. |
* @flags: A set of flags as defined above. |
*/ |
struct drm_vmw_fence_event_arg { |
uint64_t fence_rep; |
uint64_t user_data; |
uint32_t handle; |
uint32_t flags; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_PRESENT |
* |
* Executes an SVGA present on a given fb for a given surface. The surface |
* is placed on the framebuffer. Cliprects are given relative to the given |
* point (the point disignated by dest_{x|y}). |
* |
*/ |
/** |
* struct drm_vmw_present_arg |
* @fb_id: framebuffer id to present / read back from. |
* @sid: Surface id to present from. |
* @dest_x: X placement coordinate for surface. |
* @dest_y: Y placement coordinate for surface. |
* @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. |
* @num_clips: Number of cliprects given relative to the framebuffer origin, |
* in the same coordinate space as the frame buffer. |
* @pad64: Unused 64-bit padding. |
* |
* Input argument to the DRM_VMW_PRESENT ioctl. |
*/ |
struct drm_vmw_present_arg { |
uint32_t fb_id; |
uint32_t sid; |
int32_t dest_x; |
int32_t dest_y; |
uint64_t clips_ptr; |
uint32_t num_clips; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_PRESENT_READBACK |
* |
* Executes an SVGA present readback from a given fb to the dma buffer |
* currently bound as the fb. If there is no dma buffer bound to the fb, |
* an error will be returned. |
* |
*/ |
/** |
* struct drm_vmw_present_arg |
* @fb_id: fb_id to present / read back from. |
* @num_clips: Number of cliprects. |
* @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. |
* @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t. |
* If this member is NULL, then the ioctl should not return a fence. |
*/ |
struct drm_vmw_present_readback_arg { |
uint32_t fb_id; |
uint32_t num_clips; |
uint64_t clips_ptr; |
uint64_t fence_rep; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UPDATE_LAYOUT - Update layout |
* |
* Updates the preferred modes and connection status for connectors. The |
* command consists of one drm_vmw_update_layout_arg pointing to an array |
* of num_outputs drm_vmw_rect's. |
*/ |
/** |
* struct drm_vmw_update_layout_arg |
* |
* @num_outputs: number of active connectors |
* @rects: pointer to array of drm_vmw_rect cast to an uint64_t |
* |
* Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. |
*/ |
struct drm_vmw_update_layout_arg { |
uint32_t num_outputs; |
uint32_t pad64; |
uint64_t rects; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CREATE_SHADER - Create shader |
* |
* Creates a shader and optionally binds it to a dma buffer containing |
* the shader byte-code. |
*/ |
/** |
* enum drm_vmw_shader_type - Shader types |
*/ |
enum drm_vmw_shader_type { |
drm_vmw_shader_type_vs = 0, |
drm_vmw_shader_type_ps, |
drm_vmw_shader_type_gs |
}; |
/** |
* struct drm_vmw_shader_create_arg |
* |
* @shader_type: Shader type of the shader to create. |
* @size: Size of the byte-code in bytes. |
* where the shader byte-code starts |
* @buffer_handle: Buffer handle identifying the buffer containing the |
* shader byte-code |
* @shader_handle: On successful completion contains a handle that |
* can be used to subsequently identify the shader. |
* @offset: Offset in bytes into the buffer given by @buffer_handle, |
* |
* Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl. |
*/ |
struct drm_vmw_shader_create_arg { |
enum drm_vmw_shader_type shader_type; |
uint32_t size; |
uint32_t buffer_handle; |
uint32_t shader_handle; |
uint64_t offset; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_SHADER - Unreferences a shader |
* |
* Destroys a user-space reference to a shader, optionally destroying |
* it. |
*/ |
/** |
* struct drm_vmw_shader_arg |
* |
* @handle: Handle identifying the shader to destroy. |
* |
* Input argument to the DRM_VMW_UNREF_SHADER ioctl. |
*/ |
struct drm_vmw_shader_arg { |
uint32_t handle; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface. |
* |
* Allocates a surface handle and queues a create surface command |
* for the host on the first use of the surface. The surface ID can |
* be used as the surface ID in commands referencing the surface. |
*/ |
/** |
* enum drm_vmw_surface_flags |
* |
* @drm_vmw_surface_flag_shareable: Whether the surface is shareable |
* @drm_vmw_surface_flag_scanout: Whether the surface is a scanout |
* surface. |
* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is |
* given. |
*/ |
enum drm_vmw_surface_flags { |
drm_vmw_surface_flag_shareable = (1 << 0), |
drm_vmw_surface_flag_scanout = (1 << 1), |
drm_vmw_surface_flag_create_buffer = (1 << 2) |
}; |
/** |
* struct drm_vmw_gb_surface_create_req |
* |
* @svga3d_flags: SVGA3d surface flags for the device. |
* @format: SVGA3d format. |
* @mip_level: Number of mip levels for all faces. |
* @drm_surface_flags Flags as described above. |
* @multisample_count Future use. Set to 0. |
* @autogen_filter Future use. Set to 0. |
* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID |
* if none. |
* @base_size Size of the base mip level for all faces. |
* |
* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. |
* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. |
*/ |
struct drm_vmw_gb_surface_create_req { |
uint32_t svga3d_flags; |
uint32_t format; |
uint32_t mip_levels; |
enum drm_vmw_surface_flags drm_surface_flags; |
uint32_t multisample_count; |
uint32_t autogen_filter; |
uint32_t buffer_handle; |
uint32_t pad64; |
struct drm_vmw_size base_size; |
}; |
/** |
* struct drm_vmw_gb_surface_create_rep |
* |
* @handle: Surface handle. |
* @backup_size: Size of backup buffers for this surface. |
* @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none. |
* @buffer_size: Actual size of the buffer identified by |
* @buffer_handle |
* @buffer_map_handle: Offset into device address space for the buffer |
* identified by @buffer_handle. |
* |
* Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl. |
* Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. |
*/ |
struct drm_vmw_gb_surface_create_rep { |
uint32_t handle; |
uint32_t backup_size; |
uint32_t buffer_handle; |
uint32_t buffer_size; |
uint64_t buffer_map_handle; |
}; |
/** |
* union drm_vmw_gb_surface_create_arg |
* |
* @req: Input argument as described above. |
* @rep: Output argument as described above. |
* |
* Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl. |
*/ |
union drm_vmw_gb_surface_create_arg { |
struct drm_vmw_gb_surface_create_rep rep; |
struct drm_vmw_gb_surface_create_req req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_GB_SURFACE_REF - Reference a host surface. |
* |
* Puts a reference on a host surface with a given handle, as previously |
* returned by the DRM_VMW_GB_SURFACE_CREATE ioctl. |
* A reference will make sure the surface isn't destroyed while we hold |
* it and will allow the calling client to use the surface handle in |
* the command stream. |
* |
* On successful return, the Ioctl returns the surface information given |
* to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl. |
*/ |
/** |
* struct drm_vmw_gb_surface_reference_arg |
* |
* @creq: The data used as input when the surface was created, as described |
* above at "struct drm_vmw_gb_surface_create_req" |
* @crep: Additional data output when the surface was created, as described |
* above at "struct drm_vmw_gb_surface_create_rep" |
* |
* Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl. |
*/ |
struct drm_vmw_gb_surface_ref_rep { |
struct drm_vmw_gb_surface_create_req creq; |
struct drm_vmw_gb_surface_create_rep crep; |
}; |
/** |
* union drm_vmw_gb_surface_reference_arg |
* |
* @req: Input data as described above at "struct drm_vmw_surface_arg" |
* @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep" |
* |
* Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. |
*/ |
union drm_vmw_gb_surface_reference_arg { |
struct drm_vmw_gb_surface_ref_rep rep; |
struct drm_vmw_surface_arg req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access. |
* |
* Idles any previously submitted GPU operations on the buffer and |
* by default blocks command submissions that reference the buffer. |
* If the file descriptor used to grab a blocking CPU sync is closed, the |
* cpu sync is released. |
* The flags argument indicates how the grab / release operation should be |
* performed: |
*/ |
/** |
* enum drm_vmw_synccpu_flags - Synccpu flags: |
* |
* @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a |
* hint to the kernel to allow command submissions that references the buffer |
* for read-only. |
* @drm_vmw_synccpu_write: Sync for write. Block all command submissions |
* referencing this buffer. |
* @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return |
* -EBUSY should the buffer be busy. |
* @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer |
* while the buffer is synced for CPU. This is similar to the GEM bo idle |
* behavior. |
*/ |
enum drm_vmw_synccpu_flags { |
drm_vmw_synccpu_read = (1 << 0), |
drm_vmw_synccpu_write = (1 << 1), |
drm_vmw_synccpu_dontblock = (1 << 2), |
drm_vmw_synccpu_allow_cs = (1 << 3) |
}; |
/** |
* enum drm_vmw_synccpu_op - Synccpu operations: |
* |
* @drm_vmw_synccpu_grab: Grab the buffer for CPU operations |
* @drm_vmw_synccpu_release: Release a previous grab. |
*/ |
enum drm_vmw_synccpu_op { |
drm_vmw_synccpu_grab, |
drm_vmw_synccpu_release |
}; |
/** |
* struct drm_vmw_synccpu_arg |
* |
* @op: The synccpu operation as described above. |
* @handle: Handle identifying the buffer object. |
* @flags: Flags as described above. |
*/ |
struct drm_vmw_synccpu_arg { |
enum drm_vmw_synccpu_op op; |
enum drm_vmw_synccpu_flags flags; |
uint32_t handle; |
uint32_t pad64; |
}; |
#endif |
/drivers/include/linux/bitmap.h |
---|
45,7 → 45,6 |
* bitmap_set(dst, pos, nbits) Set specified bit area |
* bitmap_clear(dst, pos, nbits) Clear specified bit area |
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area |
* bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above |
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n |
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n |
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) |
61,7 → 60,6 |
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region |
* bitmap_release_region(bitmap, pos, order) Free specified bit region |
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region |
* bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex |
*/ |
/* |
116,37 → 114,12 |
extern void bitmap_set(unsigned long *map, unsigned int start, int len); |
extern void bitmap_clear(unsigned long *map, unsigned int start, int len); |
extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, |
extern unsigned long bitmap_find_next_zero_area(unsigned long *map, |
unsigned long size, |
unsigned long start, |
unsigned int nr, |
unsigned long align_mask, |
unsigned long align_offset); |
unsigned long align_mask); |
/** |
* bitmap_find_next_zero_area - find a contiguous aligned zero area |
* @map: The address to base the search on |
* @size: The bitmap size in bits |
* @start: The bitnumber to start searching at |
* @nr: The number of zeroed bits we're looking for |
* @align_mask: Alignment mask for zero area |
* |
* The @align_mask should be one less than a power of 2; the effect is that |
* the bit offset of all zero areas this function finds is multiples of that |
* power of 2. A @align_mask of 0 means no alignment is required. |
*/ |
static inline unsigned long |
bitmap_find_next_zero_area(unsigned long *map, |
unsigned long size, |
unsigned long start, |
unsigned int nr, |
unsigned long align_mask) |
{ |
return bitmap_find_next_zero_area_off(map, size, start, nr, |
align_mask, 0); |
} |
extern int bitmap_scnprintf(char *buf, unsigned int len, |
const unsigned long *src, int nbits); |
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, |
172,8 → 145,6 |
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); |
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); |
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); |
extern int bitmap_print_to_pagebuf(bool list, char *buf, |
const unsigned long *maskp, int nmaskbits); |
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) |
#define BITMAP_LAST_WORD_MASK(nbits) \ |
/drivers/include/linux/bitops.h |
---|
18,12 → 18,9 |
* position @h. For example |
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. |
*/ |
#define GENMASK(h, l) \ |
(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) |
#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) |
#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) |
#define GENMASK_ULL(h, l) \ |
(((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) |
extern unsigned int __sw_hweight8(unsigned int w); |
extern unsigned int __sw_hweight16(unsigned int w); |
extern unsigned int __sw_hweight32(unsigned int w); |
/drivers/include/linux/bug.h |
---|
1,8 → 1,15 |
#ifndef _ASM_GENERIC_BUG_H |
#define _ASM_GENERIC_BUG_H |
#include <linux/compiler.h> |
//extern __printf(3, 4) |
//void warn_slowpath_fmt(const char *file, const int line, |
// const char *fmt, ...); |
//extern __printf(4, 5) |
//void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint, |
// const char *fmt, ...); |
//extern void warn_slowpath_null(const char *file, const int line); |
#define __WARN() printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__) |
//#define __WARN_printf(arg...) printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__) |
#define __WARN_printf(arg...) do { printf(arg); __WARN(); } while (0) |
54,66 → 61,18 |
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ |
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) |
/* Force a compilation error if condition is true, but also produce a |
result (of value 0 and type size_t), so the expression can be used |
e.g. in a structure initializer (or where-ever else comma expressions |
aren't permitted). */ |
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) |
#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) |
/* |
* BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the |
* expression but avoids the generation of any code, even if that expression |
* has side-effects. |
*/ |
#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e)))) |
#define printk_once(fmt, ...) \ |
({ \ |
static bool __print_once; \ |
\ |
if (!__print_once) { \ |
__print_once = true; \ |
printk(fmt, ##__VA_ARGS__); \ |
} \ |
}) |
/** |
* BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied |
* error message. |
* @condition: the condition which the compiler should know is false. |
* |
* See BUILD_BUG_ON for description. |
*/ |
#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) |
/** |
* BUILD_BUG_ON - break compile if a condition is true. |
* @condition: the condition which the compiler should know is false. |
* |
* If you have some code which relies on certain constants being equal, or |
* some other compile-time-evaluated condition, you should use BUILD_BUG_ON to |
* detect if someone changes it. |
* |
* The implementation uses gcc's reluctance to create a negative array, but gcc |
* (as of 4.4) only emits that error for obvious cases (e.g. not arguments to |
* inline functions). Luckily, in 4.3 they added the "error" function |
* attribute just for this type of case. Thus, we use a negative sized array |
* (should always create an error on gcc versions older than 4.4) and then call |
* an undefined function with the error attribute (should always create an |
* error on gcc 4.3 and later). If for some reason, neither creates a |
* compile-time error, we'll still have a link-time error, which is harder to |
* track down. |
*/ |
#ifndef __OPTIMIZE__ |
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) |
#else |
#define BUILD_BUG_ON(condition) \ |
BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) |
#endif |
/** |
* BUILD_BUG - break compile if used. |
* |
* If you have some code that you expect the compiler to eliminate at |
* build time, you should use BUILD_BUG to detect if it is |
* unexpectedly used. |
*/ |
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") |
#define pr_warn_once(fmt, ...) \ |
printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) |
/drivers/include/linux/compiler-gcc4.h |
---|
71,6 → 71,7 |
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 |
* |
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek. |
* Fixed in GCC 4.8.2 and later versions. |
* |
* (asm goto is automatically volatile - the naming reflects this.) |
*/ |
/drivers/include/linux/compiler.h |
---|
186,80 → 186,6 |
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
#endif |
#include <uapi/linux/types.h> |
static __always_inline void data_access_exceeds_word_size(void) |
#ifdef __compiletime_warning |
__compiletime_warning("data access exceeds word size and won't be atomic") |
#endif |
; |
static __always_inline void data_access_exceeds_word_size(void) |
{ |
} |
static __always_inline void __read_once_size(volatile void *p, void *res, int size) |
{ |
switch (size) { |
case 1: *(__u8 *)res = *(volatile __u8 *)p; break; |
case 2: *(__u16 *)res = *(volatile __u16 *)p; break; |
case 4: *(__u32 *)res = *(volatile __u32 *)p; break; |
#ifdef CONFIG_64BIT |
case 8: *(__u64 *)res = *(volatile __u64 *)p; break; |
#endif |
default: |
barrier(); |
__builtin_memcpy((void *)res, (const void *)p, size); |
data_access_exceeds_word_size(); |
barrier(); |
} |
} |
static __always_inline void __assign_once_size(volatile void *p, void *res, int size) |
{ |
switch (size) { |
case 1: *(volatile __u8 *)p = *(__u8 *)res; break; |
case 2: *(volatile __u16 *)p = *(__u16 *)res; break; |
case 4: *(volatile __u32 *)p = *(__u32 *)res; break; |
#ifdef CONFIG_64BIT |
case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
#endif |
default: |
barrier(); |
__builtin_memcpy((void *)p, (const void *)res, size); |
data_access_exceeds_word_size(); |
barrier(); |
} |
} |
/* |
* Prevent the compiler from merging or refetching reads or writes. The |
* compiler is also forbidden from reordering successive instances of |
* READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the |
* compiler is aware of some particular ordering. One way to make the |
* compiler aware of ordering is to put the two invocations of READ_ONCE, |
* ASSIGN_ONCE or ACCESS_ONCE() in different C statements. |
* |
* In contrast to ACCESS_ONCE these two macros will also work on aggregate |
* data types like structs or unions. If the size of the accessed data |
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits) |
* READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a |
* compile-time warning. |
* |
* Their two major use cases are: (1) Mediating communication between |
* process-level code and irq/NMI handlers, all running on the same CPU, |
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
* mutilate accesses that either do not require ordering or that interact |
* with an explicit memory barrier or atomic instruction that provides the |
* required ordering. |
*/ |
#define READ_ONCE(x) \ |
({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) |
#define ASSIGN_ONCE(val, x) \ |
({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) |
#endif /* __KERNEL__ */ |
#endif /* __ASSEMBLY__ */ |
/drivers/include/linux/dma-buf.h |
---|
30,8 → 30,6 |
#include <linux/list.h> |
#include <linux/dma-mapping.h> |
#include <linux/fs.h> |
#include <linux/fence.h> |
#include <linux/wait.h> |
struct device; |
struct dma_buf; |
/drivers/include/linux/err.h |
---|
4,7 → 4,7 |
#include <linux/compiler.h> |
#include <linux/types.h> |
#include <asm/errno.h> |
#include <errno.h> |
/* |
* Kernel pointers have redundant information, so we can use a |
/drivers/include/linux/hdmi.h |
---|
1,24 → 1,9 |
/* |
* Copyright (C) 2012 Avionic Design GmbH |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sub license, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS IN THE SOFTWARE. |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License version 2 as |
* published by the Free Software Foundation. |
*/ |
#ifndef __LINUX_HDMI_H_ |
/drivers/include/linux/i2c.h |
---|
31,9 → 31,6 |
#include <linux/module.h> |
#include <linux/i2c-id.h> |
#include <linux/mod_devicetable.h> |
#include <linux/sched.h> /* for completion */ |
#include <linux/mutex.h> |
#include <linux/jiffies.h> |
extern struct bus_type i2c_bus_type; |
extern struct device_type i2c_adapter_type; |
142,8 → 139,6 |
* @irq: indicates the IRQ generated by this device (if any) |
* @detected: member of an i2c_driver.clients list or i2c-core's |
* userspace_devices list |
* @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter |
* calls it to pass on slave events to the slave driver. |
* |
* An i2c_client identifies a single device (i.e. chip) connected to an |
* i2c bus. The behaviour exposed to Linux is defined by the driver |
165,13 → 160,6 |
extern struct i2c_client *i2c_verify_client(struct device *dev); |
extern struct i2c_adapter *i2c_verify_adapter(struct device *dev); |
enum i2c_slave_event { |
I2C_SLAVE_REQ_READ_START, |
I2C_SLAVE_REQ_READ_END, |
I2C_SLAVE_REQ_WRITE_START, |
I2C_SLAVE_REQ_WRITE_END, |
I2C_SLAVE_STOP, |
}; |
/** |
* struct i2c_board_info - template for device creation |
* @type: chip type, to initialize i2c_client.name |
222,7 → 210,7 |
* to name two of the most common. |
* |
* The return codes from the @master_xfer field should indicate the type of |
* error code that occurred during the transfer, as documented in the kernel |
* error code that occured during the transfer, as documented in the kernel |
* Documentation file Documentation/i2c/fault-codes. |
*/ |
struct i2c_algorithm { |
242,12 → 230,6 |
u32 (*functionality) (struct i2c_adapter *); |
}; |
int i2c_recover_bus(struct i2c_adapter *adap); |
/* Generic recovery routines */ |
int i2c_generic_gpio_recovery(struct i2c_adapter *adap); |
int i2c_generic_scl_recovery(struct i2c_adapter *adap); |
/* |
* i2c_adapter is the structure used to identify a physical i2c bus along |
* with the access algorithms necessary to access it. |
/drivers/include/linux/idr.h |
---|
14,10 → 14,15 |
#include <syscall.h> |
#include <linux/types.h> |
#include <errno-base.h> |
#include <linux/bitops.h> |
//#include <linux/init.h> |
#include <linux/rcupdate.h> |
//#include <linux/rcupdate.h> |
#include <linux/spinlock.h> |
#include <linux/bitmap.h> |
#include <linux/bug.h> |
/* |
* We want shallower trees and thus more bits covered at each layer. 8 |
* bits gives us large enough first layer for most use cases and maximum |
/drivers/include/linux/jiffies.h |
---|
77,8 → 77,8 |
* without sampling the sequence number in jiffies_lock. |
* get_jiffies_64() will do this for you as appropriate. |
*/ |
extern u64 __jiffy_data jiffies_64; |
extern unsigned long volatile __jiffy_data jiffies; |
extern u64 jiffies_64; |
extern unsigned long volatile jiffies; |
#if (BITS_PER_LONG < 64) |
u64 get_jiffies_64(void); |
262,12 → 262,24 |
#define SEC_JIFFIE_SC (32 - SHIFT_HZ) |
#endif |
#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29) |
#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19) |
#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\ |
TICK_NSEC -1) / (u64)TICK_NSEC)) |
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ |
TICK_NSEC -1) / (u64)TICK_NSEC)) |
#define USEC_CONVERSION \ |
((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\ |
TICK_NSEC -1) / (u64)TICK_NSEC)) |
/* |
* USEC_ROUND is used in the timeval to jiffie conversion. See there |
* for more details. It is the scaled resolution rounding value. Note |
* that it is a 64-bit value. Since, when it is applied, we are already |
* in jiffies (albit scaled), it is nothing but the bits we will shift |
* off. |
*/ |
#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1) |
/* |
* The maximum jiffie value is (MAX_INT >> 1). Here we translate that |
* into seconds. The 64-bit case will overflow if we are not careful, |
* so use the messy SH_DIV macro to do it. Still all constants. |
313,6 → 325,35 |
extern u64 nsecs_to_jiffies64(u64 n); |
extern unsigned long nsecs_to_jiffies(u64 n); |
static unsigned long round_jiffies_common(unsigned long j, bool force_up) |
{ |
int rem; |
unsigned long original = j; |
rem = j % HZ; |
/* |
* If the target jiffie is just after a whole second (which can happen |
* due to delays of the timer irq, long irq off times etc etc) then |
* we should round down to the whole second, not up. Use 1/4th second |
* as cutoff for this rounding as an extreme upper bound for this. |
* But never round down if @force_up is set. |
*/ |
if (rem < HZ/4 && !force_up) /* round down */ |
j = j - rem; |
else /* round up */ |
j = j - rem + HZ; |
if (j <= GetTimerTicks()) /* rounding ate our timeout entirely; */ |
return original; |
return j; |
} |
unsigned long round_jiffies_up_relative(unsigned long j); |
#define TIMESTAMP_SIZE 30 |
#endif |
/drivers/include/linux/kobject.h |
---|
25,8 → 25,7 |
//#include <linux/kobject_ns.h> |
#include <linux/kernel.h> |
#include <linux/wait.h> |
#include <linux/atomic.h> |
#include <linux/workqueue.h> |
//#include <linux/atomic.h> |
#define UEVENT_HELPER_PATH_LEN 256 |
#define UEVENT_NUM_ENVP 32 /* number of env pointers */ |
/drivers/include/linux/list.h |
---|
4,8 → 4,6 |
#include <linux/types.h> |
#include <linux/stddef.h> |
#include <linux/poison.h> |
#include <linux/const.h> |
#include <linux/kernel.h> |
/* |
* Simple doubly linked list implementation. |
346,7 → 344,7 |
* list_entry - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_entry(ptr, type, member) \ |
container_of(ptr, type, member) |
355,7 → 353,7 |
* list_first_entry - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* Note, that list is expected to be not empty. |
*/ |
366,7 → 364,7 |
* list_last_entry - get the last element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* Note, that list is expected to be not empty. |
*/ |
377,7 → 375,7 |
* list_first_entry_or_null - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* Note that if the list is empty, it returns NULL. |
*/ |
387,7 → 385,7 |
/** |
* list_next_entry - get the next element in list |
* @pos: the type * to cursor |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_next_entry(pos, member) \ |
list_entry((pos)->member.next, typeof(*(pos)), member) |
395,7 → 393,7 |
/** |
* list_prev_entry - get the prev element in list |
* @pos: the type * to cursor |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_prev_entry(pos, member) \ |
list_entry((pos)->member.prev, typeof(*(pos)), member) |
441,7 → 439,7 |
* list_for_each_entry - iterate over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry(pos, head, member) \ |
for (pos = list_first_entry(head, typeof(*pos), member); \ |
452,7 → 450,7 |
* list_for_each_entry_reverse - iterate backwards over list of given type. |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry_reverse(pos, head, member) \ |
for (pos = list_last_entry(head, typeof(*pos), member); \ |
463,7 → 461,7 |
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() |
* @pos: the type * to use as a start point |
* @head: the head of the list |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* Prepares a pos entry for use as a start point in list_for_each_entry_continue(). |
*/ |
474,7 → 472,7 |
* list_for_each_entry_continue - continue iteration over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* Continue to iterate over list of given type, continuing after |
* the current position. |
488,7 → 486,7 |
* list_for_each_entry_continue_reverse - iterate backwards from the given point |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* Start to iterate over list of given type backwards, continuing after |
* the current position. |
502,7 → 500,7 |
* list_for_each_entry_from - iterate over list of given type from the current point |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate over list of given type, continuing from current position. |
*/ |
515,7 → 513,7 |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry_safe(pos, n, head, member) \ |
for (pos = list_first_entry(head, typeof(*pos), member), \ |
528,7 → 526,7 |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate over list of given type, continuing after current point, |
* safe against removal of list entry. |
544,7 → 542,7 |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate over list of given type from current point, safe against |
* removal of list entry. |
559,7 → 557,7 |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate backwards over list of given type, safe against removal |
* of list entry. |
574,7 → 572,7 |
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop |
* @pos: the loop cursor used in the list_for_each_entry_safe loop |
* @n: temporary storage used in list_for_each_entry_safe |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* list_safe_reset_next is not safe to use in general if the list may be |
* modified concurrently (eg. the lock is dropped in the loop body). An |
/drivers/include/linux/lockdep.h |
---|
4,7 → 4,7 |
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
* |
* see Documentation/locking/lockdep-design.txt for more details. |
* see Documentation/lockdep-design.txt for more details. |
*/ |
#ifndef __LINUX_LOCKDEP_H |
#define __LINUX_LOCKDEP_H |
12,10 → 12,6 |
struct task_struct; |
struct lockdep_map; |
/* for sysctl */ |
extern int prove_locking; |
extern int lock_stat; |
#ifdef CONFIG_LOCKDEP |
#include <linux/linkage.h> |
55,8 → 51,6 |
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
}; |
extern struct lock_class_key __lockdep_no_validate__; |
#define LOCKSTAT_POINTS 4 |
/* |
157,25 → 151,7 |
#endif |
}; |
static inline void lockdep_copy_map(struct lockdep_map *to, |
struct lockdep_map *from) |
{ |
int i; |
*to = *from; |
/* |
* Since the class cache can be modified concurrently we could observe |
* half pointers (64bit arch using 32bit copy insns). Therefore clear |
* the caches and take the performance hit. |
* |
* XXX it doesn't work well with lockdep_set_class_and_subclass(), since |
* that relies on cache abuse. |
*/ |
for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) |
to->class_cache[i] = NULL; |
} |
/* |
* Every lock has a list of other locks that were taken after it. |
* We only grow the list, never remove from it: |
*/ |
362,10 → 338,6 |
WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
} while (0) |
#define lockdep_assert_held_once(l) do { \ |
WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ |
} while (0) |
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
#else /* !CONFIG_LOCKDEP */ |
416,7 → 388,6 |
#define lockdep_depth(tsk) (0) |
#define lockdep_assert_held(l) do { (void)(l); } while (0) |
#define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
#define lockdep_recursing(tsk) (0) |
483,35 → 454,82 |
* on the per lock-class debug mode: |
*/ |
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) |
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) |
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
# else |
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# endif |
#define spin_release(l, n, i) lock_release(l, n, i) |
#else |
# define spin_acquire(l, s, t, i) do { } while (0) |
# define spin_release(l, n, i) do { } while (0) |
#endif |
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) |
# else |
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) |
# endif |
#define rwlock_release(l, n, i) lock_release(l, n, i) |
#else |
# define rwlock_acquire(l, s, t, i) do { } while (0) |
# define rwlock_acquire_read(l, s, t, i) do { } while (0) |
# define rwlock_release(l, n, i) do { } while (0) |
#endif |
#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
#define seqcount_release(l, n, i) lock_release(l, n, i) |
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
# else |
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
# endif |
#define mutex_release(l, n, i) lock_release(l, n, i) |
#else |
# define mutex_acquire(l, s, t, i) do { } while (0) |
# define mutex_acquire_nest(l, s, t, n, i) do { } while (0) |
# define mutex_release(l, n, i) do { } while (0) |
#endif |
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) |
# else |
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) |
# endif |
#define rwsem_release(l, n, i) lock_release(l, n, i) |
#else |
# define rwsem_acquire(l, s, t, i) do { } while (0) |
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0) |
# define rwsem_acquire_read(l, s, t, i) do { } while (0) |
# define rwsem_release(l, n, i) do { } while (0) |
#endif |
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) |
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) |
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_) |
# else |
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) |
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_) |
# endif |
#define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
#else |
# define lock_map_acquire(l) do { } while (0) |
# define lock_map_acquire_read(l) do { } while (0) |
# define lock_map_release(l) do { } while (0) |
#endif |
#ifdef CONFIG_PROVE_LOCKING |
# define might_lock(lock) \ |
/drivers/include/linux/mutex.h |
---|
10,12 → 10,8 |
#ifndef __LINUX_MUTEX_H |
#define __LINUX_MUTEX_H |
#include <asm/current.h> |
#include <linux/list.h> |
#include <asm/atomic.h> |
#include <linux/linkage.h> |
#include <linux/lockdep.h> |
#include <asm/processor.h> |
/* |
* Simple, straightforward mutexes with strict semantics: |
/drivers/include/linux/pci.h |
---|
17,13 → 17,9 |
#define LINUX_PCI_H |
#include <linux/types.h> |
#include <linux/list.h> |
#include <linux/compiler.h> |
#include <linux/errno.h> |
#include <linux/atomic.h> |
#include <list.h> |
#include <linux/pci_regs.h> /* The pci register defines */ |
#include <linux/ioport.h> |
#include <ioport.h> |
#define PCI_CFG_SPACE_SIZE 256 |
315,19 → 311,6 |
PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, |
}; |
/* These values come from the PCI Express Spec */ |
enum pcie_link_width { |
PCIE_LNK_WIDTH_RESRV = 0x00, |
PCIE_LNK_X1 = 0x01, |
PCIE_LNK_X2 = 0x02, |
PCIE_LNK_X4 = 0x04, |
PCIE_LNK_X8 = 0x08, |
PCIE_LNK_X12 = 0x0C, |
PCIE_LNK_X16 = 0x10, |
PCIE_LNK_X32 = 0x20, |
PCIE_LNK_WIDTH_UNKNOWN = 0xFF, |
}; |
/* Based on the PCI Hotplug Spec, but some values are made up by us */ |
enum pci_bus_speed { |
PCI_SPEED_33MHz = 0x00, |
355,23 → 338,6 |
PCI_SPEED_UNKNOWN = 0xff, |
}; |
struct pci_cap_saved_data { |
u16 cap_nr; |
bool cap_extended; |
unsigned int size; |
u32 data[0]; |
}; |
struct pci_cap_saved_state { |
struct hlist_node next; |
struct pci_cap_saved_data cap; |
}; |
struct pcie_link_state; |
struct pci_vpd; |
struct pci_sriov; |
struct pci_ats; |
/* |
* The pci_dev structure is used to describe PCI devices. |
*/ |
383,7 → 349,7 |
void *sysdata; /* hook for sys-specific extension */ |
// struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ |
struct pci_slot *slot; /* Physical slot this device is in */ |
u32 busnr; |
u32_t busnr; |
unsigned int devfn; /* encoded device & function index */ |
unsigned short vendor; |
unsigned short device; |
399,7 → 365,7 |
u16 pcie_flags_reg; /* cached PCI-E Capabilities Register */ |
// struct pci_driver *driver; /* which driver has allocated this device */ |
u64 dma_mask; /* Mask of the bits of bus address this |
uint64_t dma_mask; /* Mask of the bits of bus address this |
device implements. Normally this is |
0xffffffff. You only need to change |
this if your device has broken DMA |
582,7 → 548,7 |
case PCIBIOS_FUNC_NOT_SUPPORTED: |
return -ENOENT; |
case PCIBIOS_BAD_VENDOR_ID: |
return -ENOTTY; |
return -EINVAL; |
case PCIBIOS_DEVICE_NOT_FOUND: |
return -ENODEV; |
case PCIBIOS_BAD_REGISTER_NUMBER: |
593,7 → 559,7 |
return -ENOSPC; |
} |
return -ERANGE; |
return -ENOTTY; |
} |
/* Low-level architecture-dependent routines */ |
603,20 → 569,7 |
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); |
}; |
/* |
* ACPI needs to be able to access PCI config space before we've done a |
* PCI bus scan and created pci_bus structures. |
*/ |
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, |
int reg, int len, u32 *val); |
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, |
int reg, int len, u32 val); |
struct pci_bus_region { |
dma_addr_t start; |
dma_addr_t end; |
}; |
enum pci_bar_type { |
pci_bar_unknown, /* Standard PCI BAR probe */ |
pci_bar_io, /* An io port BAR */ |
/drivers/include/linux/rculist.h |
---|
7,7 → 7,7 |
* RCU-protected list version |
*/ |
#include <linux/list.h> |
#include <linux/rcupdate.h> |
//#include <linux/rcupdate.h> |
/* |
* Why is there no list_empty_rcu()? Because list_empty() serves this |
19,21 → 19,6 |
*/ |
/* |
* INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers |
* @list: list to be initialized |
* |
* You should instead use INIT_LIST_HEAD() for normal initialization and |
* cleanup tasks, when readers have no access to the list being initialized. |
* However, if the list being initialized is visible to readers, you |
* need to keep the compiler from being too mischievous. |
*/ |
static inline void INIT_LIST_HEAD_RCU(struct list_head *list) |
{ |
ACCESS_ONCE(list->next) = list; |
ACCESS_ONCE(list->prev) = list; |
} |
/* |
* return the ->next pointer of a list_head in an rcu safe |
* way, we must not access it directly |
*/ |
212,7 → 197,7 |
* instead of INIT_LIST_HEAD(). |
*/ |
INIT_LIST_HEAD_RCU(list); |
INIT_LIST_HEAD(list); |
/* |
* At this point, the list body still points to the source list. |
241,7 → 226,7 |
* list_entry_rcu - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* This primitive may safely run concurrently with the _rcu list-mutation |
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
278,7 → 263,7 |
* list_first_or_null_rcu - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* Note that if the list is empty, it returns NULL. |
* |
296,7 → 281,7 |
* list_for_each_entry_rcu - iterate over rcu list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* This list-traversal primitive may safely run concurrently with |
* the _rcu list-mutation primitives such as list_add_rcu() |
311,7 → 296,7 |
* list_for_each_entry_continue_rcu - continue iteration over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_head within the struct. |
* @member: the name of the list_struct within the struct. |
* |
* Continue to iterate over list of given type, continuing after |
* the current position. |
542,15 → 527,6 |
pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ |
typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point |
* @pos: the type * to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_from_rcu(pos, member) \ |
for (; pos; \ |
pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ |
typeof(*(pos)), member)) |
#endif /* __KERNEL__ */ |
#endif |
/drivers/include/linux/reservation.h |
---|
40,103 → 40,23 |
#define _LINUX_RESERVATION_H |
#include <linux/ww_mutex.h> |
#include <linux/fence.h> |
#include <linux/slab.h> |
#include <linux/seqlock.h> |
#include <linux/rcupdate.h> |
extern struct ww_class reservation_ww_class; |
extern struct lock_class_key reservation_seqcount_class; |
extern const char reservation_seqcount_string[]; |
struct reservation_object_list { |
struct rcu_head rcu; |
u32 shared_count, shared_max; |
struct fence __rcu *shared[]; |
}; |
struct reservation_object { |
struct ww_mutex lock; |
seqcount_t seq; |
struct fence __rcu *fence_excl; |
struct reservation_object_list __rcu *fence; |
struct reservation_object_list *staged; |
}; |
#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) |
#define reservation_object_assert_held(obj) \ |
lockdep_assert_held(&(obj)->lock.base) |
static inline void |
reservation_object_init(struct reservation_object *obj) |
{ |
ww_mutex_init(&obj->lock, &reservation_ww_class); |
__seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); |
RCU_INIT_POINTER(obj->fence, NULL); |
RCU_INIT_POINTER(obj->fence_excl, NULL); |
obj->staged = NULL; |
} |
static inline void |
reservation_object_fini(struct reservation_object *obj) |
{ |
int i; |
struct reservation_object_list *fobj; |
struct fence *excl; |
/* |
* This object should be dead and all references must have |
* been released to it, so no need to be protected with rcu. |
*/ |
excl = rcu_dereference_protected(obj->fence_excl, 1); |
if (excl) |
fence_put(excl); |
fobj = rcu_dereference_protected(obj->fence, 1); |
if (fobj) { |
for (i = 0; i < fobj->shared_count; ++i) |
fence_put(rcu_dereference_protected(fobj->shared[i], 1)); |
kfree(fobj); |
} |
kfree(obj->staged); |
ww_mutex_destroy(&obj->lock); |
} |
static inline struct reservation_object_list * |
reservation_object_get_list(struct reservation_object *obj) |
{ |
return rcu_dereference_protected(obj->fence, |
reservation_object_held(obj)); |
} |
static inline struct fence * |
reservation_object_get_excl(struct reservation_object *obj) |
{ |
return rcu_dereference_protected(obj->fence_excl, |
reservation_object_held(obj)); |
} |
int reservation_object_reserve_shared(struct reservation_object *obj); |
void reservation_object_add_shared_fence(struct reservation_object *obj, |
struct fence *fence); |
void reservation_object_add_excl_fence(struct reservation_object *obj, |
struct fence *fence); |
int reservation_object_get_fences_rcu(struct reservation_object *obj, |
struct fence **pfence_excl, |
unsigned *pshared_count, |
struct fence ***pshared); |
long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
bool wait_all, bool intr, |
unsigned long timeout); |
bool reservation_object_test_signaled_rcu(struct reservation_object *obj, |
bool test_all); |
#endif /* _LINUX_RESERVATION_H */ |
/drivers/include/linux/scatterlist.h |
---|
101,22 → 101,6 |
return (struct page *)((sg)->page_link & ~0x3); |
} |
/** |
* sg_set_buf - Set sg entry to point at given data |
* @sg: SG entry |
* @buf: Data |
* @buflen: Data length |
* |
**/ |
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf, |
// unsigned int buflen) |
//{ |
//#ifdef CONFIG_DEBUG_SG |
// BUG_ON(!virt_addr_valid(buf)); |
//#endif |
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); |
//} |
/* |
* Loop over each sg element, following the pointer to a new list if necessary |
*/ |
136,7 → 120,7 |
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, |
struct scatterlist *sgl) |
{ |
#ifndef CONFIG_ARCH_HAS_SG_CHAIN |
#ifndef ARCH_HAS_SG_CHAIN |
BUG(); |
#endif |
/drivers/include/linux/sched.h |
---|
7,6 → 7,5 |
#define TASK_COMM_LEN 16 |
#define schedule_timeout(x) delay(x) |
#define MAX_SCHEDULE_TIMEOUT LONG_MAX |
#endif |
/drivers/include/linux/string.h |
---|
6,7 → 6,6 |
#include <linux/types.h> /* for size_t */ |
#include <linux/stddef.h> /* for NULL */ |
#include <stdarg.h> |
#include <uapi/linux/string.h> |
extern char *strndup_user(const char __user *, long); |
extern void *memdup_user(const void __user *, size_t); |
41,7 → 40,7 |
extern int strncmp(const char *,const char *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRNICMP |
#define strnicmp strncasecmp |
extern int strnicmp(const char *, const char *, __kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRCASECMP |
extern int strcasecmp(const char *s1, const char *s2); |
144,8 → 143,7 |
return strncmp(str, prefix, strlen(prefix)) == 0; |
} |
size_t memweight(const void *ptr, size_t bytes); |
void memzero_explicit(void *s, size_t count); |
extern size_t memweight(const void *ptr, size_t bytes); |
/** |
* kbasename - return the last part of a pathname. |
/drivers/include/linux/types.h |
---|
1,14 → 1,23 |
#ifndef _LINUX_TYPES_H |
#define _LINUX_TYPES_H |
#define __EXPORTED_HEADERS__ |
#include <uapi/linux/types.h> |
#include <asm/types.h> |
#ifndef __ASSEMBLY__ |
#ifdef __KERNEL__ |
#define DECLARE_BITMAP(name,bits) \ |
unsigned long name[BITS_TO_LONGS(bits)] |
#else |
#ifndef __EXPORTED_HEADERS__ |
#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders" |
#endif /* __EXPORTED_HEADERS__ */ |
#endif |
#include <linux/posix_types.h> |
#ifdef __KERNEL__ |
typedef __u32 __kernel_dev_t; |
typedef __kernel_fd_set fd_set; |
149,12 → 158,48 |
typedef u32 dma_addr_t; |
#endif /* dma_addr_t */ |
#endif /* __KERNEL__ */ |
/* |
* Below are truly Linux-specific types that should never collide with |
* any application/library that wants linux/types.h. |
*/ |
#ifdef __CHECKER__ |
#define __bitwise__ __attribute__((bitwise)) |
#else |
#define __bitwise__ |
#endif |
#ifdef __CHECK_ENDIAN__ |
#define __bitwise __bitwise__ |
#else |
#define __bitwise |
#endif |
typedef __u16 __bitwise __le16; |
typedef __u16 __bitwise __be16; |
typedef __u32 __bitwise __le32; |
typedef __u32 __bitwise __be32; |
typedef __u64 __bitwise __le64; |
typedef __u64 __bitwise __be64; |
typedef __u16 __bitwise __sum16; |
typedef __u32 __bitwise __wsum; |
/* |
* aligned_u64 should be used in defining kernel<->userspace ABIs to avoid |
* common 32/64-bit compat problems. |
* 64-bit values align to 4-byte boundaries on x86_32 (and possibly other |
* architectures) and to 8-byte boundaries on 64-bit architetures. The new |
* aligned_64 type enforces 8-byte alignment so that structs containing |
* aligned_64 values have the same alignment on 32-bit and 64-bit architectures. |
* No conversions are necessary between 32-bit user-space and a 64-bit kernel. |
*/ |
#define __aligned_u64 __u64 __attribute__((aligned(8))) |
#define __aligned_be64 __be64 __attribute__((aligned(8))) |
#define __aligned_le64 __le64 __attribute__((aligned(8))) |
#ifdef __KERNEL__ |
typedef unsigned __bitwise__ gfp_t; |
typedef unsigned __bitwise__ fmode_t; |
typedef unsigned __bitwise__ oom_flags_t; |
202,6 → 247,111 |
char f_fpack[6]; |
}; |
#endif /* __KERNEL__ */ |
#endif /* __ASSEMBLY__ */ |
typedef unsigned char u8_t; |
typedef unsigned short u16_t; |
typedef unsigned long u32_t; |
typedef unsigned long long u64_t; |
typedef unsigned int addr_t; |
typedef unsigned int count_t; |
#define false 0 |
#define true 1 |
#define likely(x) __builtin_expect(!!(x), 1) |
#define unlikely(x) __builtin_expect(!!(x), 0) |
#define BITS_PER_LONG 32 |
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) |
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) |
#define MTRR_TYPE_UNCACHABLE 0 |
#define MTRR_TYPE_WRCOMB 1 |
#define MTRR_TYPE_WRTHROUGH 4 |
#define MTRR_TYPE_WRPROT 5 |
#define MTRR_TYPE_WRBACK 6 |
#define MTRR_NUM_TYPES 7 |
int dbgprintf(const char* format, ...); |
#define GFP_KERNEL 0 |
#define GFP_ATOMIC 0 |
//#include <stdio.h> |
int snprintf(char *str, size_t size, const char *format, ...); |
//#include <string.h> |
void* memcpy(void *s1, const void *s2, size_t n); |
void* memset(void *s, int c, size_t n); |
size_t strlen(const char *s); |
char *strcpy(char *s1, const char *s2); |
char *strncpy (char *dst, const char *src, size_t len); |
void *malloc(size_t size); |
void* realloc(void* oldmem, size_t bytes); |
#define kfree free |
static inline void *krealloc(void *p, size_t new_size, gfp_t flags) |
{ |
return realloc(p, new_size); |
} |
static inline void *kzalloc(size_t size, uint32_t flags) |
{ |
void *ret = malloc(size); |
memset(ret, 0, size); |
return ret; |
} |
#define kmalloc(s,f) kzalloc((s), (f)) |
struct drm_file; |
#define PAGE_SHIFT 12 |
#define PAGE_SIZE (1UL << PAGE_SHIFT) |
#define PAGE_MASK (~(PAGE_SIZE-1)) |
#define ENTER() dbgprintf("enter %s\n",__FUNCTION__) |
#define LEAVE() dbgprintf("leave %s\n",__FUNCTION__) |
struct timeval |
{ |
__kernel_time_t tv_sec; /* seconds */ |
__kernel_suseconds_t tv_usec; /* microseconds */ |
}; |
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 |
#ifndef __read_mostly |
#define __read_mostly |
#endif |
/** |
* struct callback_head - callback structure for use with RCU and task_work |
* @next: next update requests in a list |
213,5 → 363,4 |
}; |
#define rcu_head callback_head |
#endif /* __ASSEMBLY__ */ |
#endif /* _LINUX_TYPES_H */ |
/drivers/include/linux/wait.h |
---|
1,15 → 1,8 |
#ifndef _LINUX_WAIT_H |
#define _LINUX_WAIT_H |
/* |
* Linux wait queue related types and methods |
*/ |
#include <linux/list.h> |
#include <linux/stddef.h> |
#include <linux/spinlock.h> |
#include <asm/current.h> |
#include <linux/list.h> |
#include <syscall.h> |
typedef struct __wait_queue wait_queue_t; |
35,10 → 28,6 |
return !list_empty(&q->task_list); |
} |
extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); |
extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); |
extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); |
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) |
{ |
list_add(&new->task_list, &head->task_list); |
156,10 → 145,10 |
}; |
//struct completion { |
// unsigned int done; |
// wait_queue_head_t wait; |
//}; |
struct completion { |
unsigned int done; |
wait_queue_head_t wait; |
}; |
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
/drivers/include/linux/ww_mutex.h |
---|
17,6 → 17,8 |
#include <linux/mutex.h> |
#include <syscall.h> |
#define current (void*)GetPid() |
struct ww_class { |
atomic_long_t stamp; |
struct lock_class_key acquire_key; |
/drivers/include/linux/delay.h |
---|
7,49 → 7,6 |
* Delay routines, using a pre-computed "loops_per_jiffy" value. |
*/ |
#include <linux/kernel.h> |
#define usleep_range(min, max) udelay(max) |
extern unsigned long loops_per_jiffy; |
#include <asm/delay.h> |
/* |
* Using udelay() for intervals greater than a few milliseconds can |
* risk overflow for high loops_per_jiffy (high bogomips) machines. The |
* mdelay() provides a wrapper to prevent this. For delays greater |
* than MAX_UDELAY_MS milliseconds, the wrapper is used. Architecture |
* specific values can be defined in asm-???/delay.h as an override. |
* The 2nd mdelay() definition ensures GCC will optimize away the |
* while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G. |
*/ |
#ifndef MAX_UDELAY_MS |
#define MAX_UDELAY_MS 5 |
#endif |
#ifndef mdelay |
#define mdelay(n) (\ |
(__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \ |
({unsigned long __ms=(n); while (__ms--) udelay(1000);})) |
#endif |
#ifndef ndelay |
static inline void ndelay(unsigned long x) |
{ |
udelay(DIV_ROUND_UP(x, 1000)); |
} |
#define ndelay(x) ndelay(x) |
#endif |
extern unsigned long lpj_fine; |
void calibrate_delay(void); |
void msleep(unsigned int msecs); |
unsigned long msleep_interruptible(unsigned int msecs); |
void usleep_range(unsigned long min, unsigned long max); |
static inline void ssleep(unsigned int seconds) |
{ |
msleep(seconds * 1000); |
} |
#endif /* defined(_LINUX_DELAY_H) */ |
/drivers/include/linux/mm.h |
---|
1,13 → 1,13 |
#ifndef _LINUX_MM_H |
#define _LINUX_MM_H |
#include <linux/errno.h> |
#include <kernel.h> |
#define VM_NORESERVE 0x00200000 |
#define nth_page(page,n) ((void*)(((page_to_phys(page)>>12)+(n))<<12)) |
#define __page_to_pfn(page) (page_to_phys(page)>>12) |
#define page_to_pfn(page) (page_to_phys(page)>>12) |
/* to align the pointer to the (next) page boundary */ |
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) |
/drivers/include/linux/workqueue.h |
---|
1,21 → 1,11 |
/* |
* workqueue.h --- work queue handling for Linux. |
*/ |
#ifndef _LINUX_WORKQUEUE_H |
#define _LINUX_WORKQUEUE_H |
#include <linux/list.h> |
#include <linux/linkage.h> |
#include <linux/lockdep.h> |
#include <linux/threads.h> |
#include <syscall.h> |
struct workqueue_struct; |
struct work_struct; |
typedef void (*work_func_t)(struct work_struct *work); |
void __stdcall delayed_work_timer_fn(unsigned long __data); |
/* |
* Workqueue flags and constants. For details, please refer to |
48,9 → 38,6 |
struct list_head entry; |
struct workqueue_struct *data; |
work_func_t func; |
#ifdef CONFIG_LOCKDEP |
struct lockdep_map lockdep_map; |
#endif |
}; |
struct delayed_work { |
/drivers/include/linux/module.h |
---|
8,13 → 8,9 |
*/ |
#include <linux/list.h> |
#include <linux/compiler.h> |
#include <linux/cache.h> |
#include <linux/compiler.h> |
#include <linux/kernel.h> |
#include <linux/moduleparam.h> |
#include <linux/export.h> |
#include <linux/printk.h> |
#define MODULE_FIRMWARE(x) |
/drivers/include/linux/rbtree_augmented.h |
---|
43,16 → 43,6 |
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, |
void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); |
/* |
* Fixup the rbtree and update the augmented information when rebalancing. |
* |
* On insertion, the user must update the augmented information on the path |
* leading to the inserted node, then call rb_link_node() as usual and |
* rb_augment_inserted() instead of the usual rb_insert_color() call. |
* If rb_augment_inserted() rebalances the rbtree, it will callback into |
* a user provided function to update the augmented information on the |
* affected subtrees. |
*/ |
static inline void |
rb_insert_augmented(struct rb_node *node, struct rb_root *root, |
const struct rb_augment_callbacks *augment) |
/drivers/include/linux/slab.h |
---|
11,140 → 11,6 |
#ifndef _LINUX_SLAB_H |
#define _LINUX_SLAB_H |
#include <linux/gfp.h> |
#include <linux/types.h> |
#include <linux/workqueue.h> |
/* |
* Flags to pass to kmem_cache_create(). |
* The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. |
*/ |
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ |
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ |
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ |
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ |
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ |
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ |
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ |
/* |
* SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! |
* |
* This delays freeing the SLAB page by a grace period, it does _NOT_ |
* delay object freeing. This means that if you do kmem_cache_free() |
* that memory location is free to be reused at any time. Thus it may |
* be possible to see another object there in the same RCU grace period. |
* |
* This feature only ensures the memory location backing the object |
* stays valid, the trick to using this is relying on an independent |
* object validation pass. Something like: |
* |
* rcu_read_lock() |
* again: |
* obj = lockless_lookup(key); |
* if (obj) { |
* if (!try_get_ref(obj)) // might fail for free objects |
* goto again; |
* |
* if (obj->key != key) { // not the object we expected |
* put_ref(obj); |
* goto again; |
* } |
* } |
* rcu_read_unlock(); |
* |
* This is useful if we need to approach a kernel structure obliquely, |
* from its address obtained without the usual locking. We can lock |
* the structure to stabilize it and check it's still at the given address, |
* only if we can be sure that the memory has not been meanwhile reused |
* for some other kind of object (which our subsystem's lock might corrupt). |
* |
* rcu_read_lock before reading the address, then rcu_read_unlock after |
* taking the spinlock within the structure expected at that address. |
*/ |
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ |
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ |
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ |
/* Flag to prevent checks on free */ |
#ifdef CONFIG_DEBUG_OBJECTS |
# define SLAB_DEBUG_OBJECTS 0x00400000UL |
#else |
# define SLAB_DEBUG_OBJECTS 0x00000000UL |
#endif |
#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ |
/* Don't track use of uninitialized memory */ |
#ifdef CONFIG_KMEMCHECK |
# define SLAB_NOTRACK 0x01000000UL |
#else |
# define SLAB_NOTRACK 0x00000000UL |
#endif |
#ifdef CONFIG_FAILSLAB |
# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ |
#else |
# define SLAB_FAILSLAB 0x00000000UL |
#endif |
/* The following flags affect the page allocator grouping pages by mobility */ |
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ |
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ |
/* |
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. |
* |
* Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. |
* |
* ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. |
* Both make kfree a no-op. |
*/ |
#define ZERO_SIZE_PTR ((void *)16) |
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ |
(unsigned long)ZERO_SIZE_PTR) |
void __init kmem_cache_init(void); |
int slab_is_available(void); |
void kmem_cache_destroy(struct kmem_cache *); |
int kmem_cache_shrink(struct kmem_cache *); |
void kmem_cache_free(struct kmem_cache *, void *); |
static inline void *krealloc(void *p, size_t new_size, gfp_t flags) |
{ |
return __builtin_realloc(p, new_size); |
} |
static inline void kfree(void *p) |
{ |
__builtin_free(p); |
} |
static __always_inline void *kmalloc(size_t size, gfp_t flags) |
{ |
return __builtin_malloc(size); |
} |
/** |
* kzalloc - allocate memory. The memory is set to zero. |
* @size: how many bytes of memory are required. |
* @flags: the type of memory to allocate (see kmalloc). |
*/ |
static inline void *kzalloc(size_t size, gfp_t flags) |
{ |
void *ret = __builtin_malloc(size); |
memset(ret, 0, size); |
return ret; |
} |
static inline void *kcalloc(size_t n, size_t size, uint32_t flags) |
{ |
return (void*)kzalloc(n * size, 0); |
} |
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) |
{ |
// if (size != 0 && n > SIZE_MAX / size) |
// return NULL; |
return (void*)kmalloc(n * size, flags); |
} |
#include <errno.h> |
// stub |
#endif /* _LINUX_SLAB_H */ |
/drivers/include/linux/hash.h |
---|
36,9 → 36,6 |
{ |
u64 hash = val; |
#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 |
hash = hash * GOLDEN_RATIO_PRIME_64; |
#else |
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */ |
u64 n = hash; |
n <<= 18; |
53,7 → 50,6 |
hash += n; |
n <<= 2; |
hash += n; |
#endif |
/* High bits are more random, so use them. */ |
return hash >> (64 - bits); |
82,5 → 78,4 |
#endif |
return (u32)val; |
} |
#endif /* _LINUX_HASH_H */ |
/drivers/include/linux/time.h |
---|
1,13 → 1,22 |
#ifndef _LINUX_TIME_H |
#define _LINUX_TIME_H |
# include <linux/cache.h> |
# include <linux/seqlock.h> |
//# include <linux/cache.h> |
//# include <linux/seqlock.h> |
# include <linux/math64.h> |
# include <linux/time64.h> |
//#include <uapi/linux/time.h> |
extern struct timezone sys_tz; |
/* Parameters used to convert the timespec values: */ |
#define MSEC_PER_SEC 1000L |
#define USEC_PER_MSEC 1000L |
#define NSEC_PER_USEC 1000L |
#define NSEC_PER_MSEC 1000000L |
#define USEC_PER_SEC 1000000L |
#define NSEC_PER_SEC 1000000000L |
#define FSEC_PER_SEC 1000000000000000LL |
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) |
static inline int timespec_equal(const struct timespec *a, |
39,21 → 48,10 |
return lhs->tv_usec - rhs->tv_usec; |
} |
extern time64_t mktime64(const unsigned int year, const unsigned int mon, |
extern unsigned long mktime(const unsigned int year, const unsigned int mon, |
const unsigned int day, const unsigned int hour, |
const unsigned int min, const unsigned int sec); |
/** |
* Deprecated. Use mktime64(). |
*/ |
static inline unsigned long mktime(const unsigned int year, |
const unsigned int mon, const unsigned int day, |
const unsigned int hour, const unsigned int min, |
const unsigned int sec) |
{ |
return mktime64(year, mon, day, hour, min, sec); |
} |
extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); |
/* |
86,6 → 84,13 |
return ts_delta; |
} |
#define KTIME_MAX ((s64)~((u64)1 << 63)) |
#if (BITS_PER_LONG == 64) |
# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) |
#else |
# define KTIME_SEC_MAX LONG_MAX |
#endif |
/* |
* Returns true if the timespec is norm, false if denorm: |
*/ |
110,8 → 115,28 |
return true; |
} |
extern struct timespec timespec_trunc(struct timespec t, unsigned gran); |
extern bool persistent_clock_exist; |
static inline bool has_persistent_clock(void) |
{ |
return persistent_clock_exist; |
} |
extern void read_persistent_clock(struct timespec *ts); |
extern void read_boot_clock(struct timespec *ts); |
extern int persistent_clock_is_local; |
extern int update_persistent_clock(struct timespec now); |
void timekeeping_init(void); |
extern int timekeeping_suspended; |
unsigned long get_seconds(void); |
struct timespec current_kernel_time(void); |
struct timespec __current_kernel_time(void); /* does not take xtime_lock */ |
struct timespec get_monotonic_coarse(void); |
void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, |
struct timespec *wtom, struct timespec *sleep); |
void timekeeping_inject_sleeptime(struct timespec *delta); |
#define CURRENT_TIME (current_kernel_time()) |
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) |
128,15 → 153,34 |
extern u32 (*arch_gettimeoffset)(void); |
#endif |
extern void do_gettimeofday(struct timeval *tv); |
extern int do_settimeofday(const struct timespec *tv); |
extern int do_sys_settimeofday(const struct timespec *tv, |
const struct timezone *tz); |
#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) |
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); |
struct itimerval; |
extern int do_setitimer(int which, struct itimerval *value, |
struct itimerval *ovalue); |
extern unsigned int alarm_setitimer(unsigned int seconds); |
extern int do_getitimer(int which, struct itimerval *value); |
extern int __getnstimeofday(struct timespec *tv); |
extern void getnstimeofday(struct timespec *tv); |
extern void getrawmonotonic(struct timespec *ts); |
extern void getnstime_raw_and_real(struct timespec *ts_raw, |
struct timespec *ts_real); |
extern void getboottime(struct timespec *ts); |
extern void monotonic_to_bootbased(struct timespec *ts); |
extern void get_monotonic_boottime(struct timespec *ts); |
extern unsigned int alarm_setitimer(unsigned int seconds); |
extern struct timespec timespec_trunc(struct timespec t, unsigned gran); |
extern int timekeeping_valid_for_hres(void); |
extern u64 timekeeping_max_deferment(void); |
extern int timekeeping_inject_offset(struct timespec *ts); |
extern s32 timekeeping_get_tai_offset(void); |
extern void timekeeping_set_tai_offset(s32 tai_offset); |
extern void timekeeping_clocktai(struct timespec *ts); |
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); |
struct tms; |
extern void do_sys_times(struct tms *); |
/drivers/include/linux/asm/scatterlist.h |
---|
0,0 → 1,41 |
#ifndef __ASM_GENERIC_SCATTERLIST_H |
#define __ASM_GENERIC_SCATTERLIST_H |
#include <linux/types.h> |
struct scatterlist { |
#ifdef CONFIG_DEBUG_SG |
unsigned long sg_magic; |
#endif |
unsigned long page_link; |
unsigned int offset; |
unsigned int length; |
dma_addr_t dma_address; |
#ifdef CONFIG_NEED_SG_DMA_LENGTH |
unsigned int dma_length; |
#endif |
}; |
/* |
* These macros should be used after a dma_map_sg call has been done |
* to get bus addresses of each of the SG entries and their lengths. |
* You should only work with the number of sg entries pci_map_sg |
* returns, or alternatively stop on the first sg_dma_len(sg) which |
* is 0. |
*/ |
#define sg_dma_address(sg) ((sg)->dma_address) |
#ifdef CONFIG_NEED_SG_DMA_LENGTH |
#define sg_dma_len(sg) ((sg)->dma_length) |
#else |
#define sg_dma_len(sg) ((sg)->length) |
#endif |
#define ARCH_HAS_SG_CHAIN |
int dma_map_sg(struct device *dev, struct scatterlist *sglist, |
int nelems, int dir); |
#define dma_unmap_sg(d, s, n, r) |
#endif /* __ASM_GENERIC_SCATTERLIST_H */ |
/drivers/include/linux/asm/alternative.h |
---|
0,0 → 1,164 |
#ifndef _ASM_X86_ALTERNATIVE_H |
#define _ASM_X86_ALTERNATIVE_H |
#include <linux/types.h> |
#include <linux/stddef.h> |
#include <linux/stringify.h> |
#include <asm/asm.h> |
/* |
* Alternative inline assembly for SMP. |
* |
* The LOCK_PREFIX macro defined here replaces the LOCK and |
* LOCK_PREFIX macros used everywhere in the source tree. |
* |
* SMP alternatives use the same data structures as the other |
* alternatives and the X86_FEATURE_UP flag to indicate the case of a |
* UP system running a SMP kernel. The existing apply_alternatives() |
* works fine for patching a SMP kernel for UP. |
* |
* The SMP alternative tables can be kept after boot and contain both |
* UP and SMP versions of the instructions to allow switching back to |
* SMP at runtime, when hotplugging in a new CPU, which is especially |
* useful in virtualized environments. |
* |
* The very common lock prefix is handled as special case in a |
* separate table which is a pure address list without replacement ptr |
* and size information. That keeps the table sizes small. |
*/ |
#ifdef CONFIG_SMP |
#define LOCK_PREFIX \ |
".section .smp_locks,\"a\"\n" \ |
_ASM_ALIGN "\n" \ |
_ASM_PTR "661f\n" /* address */ \ |
".previous\n" \ |
"661:\n\tlock; " |
#else /* ! CONFIG_SMP */ |
#define LOCK_PREFIX "" |
#endif |
/* This must be included *after* the definition of LOCK_PREFIX */ |
#include <asm/cpufeature.h> |
struct alt_instr { |
u8 *instr; /* original instruction */ |
u8 *replacement; |
u8 cpuid; /* cpuid bit set for replacement */ |
u8 instrlen; /* length of original instruction */ |
u8 replacementlen; /* length of new instruction, <= instrlen */ |
u8 pad1; |
#ifdef CONFIG_X86_64 |
u32 pad2; |
#endif |
}; |
extern void alternative_instructions(void); |
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); |
struct module; |
#ifdef CONFIG_SMP |
extern void alternatives_smp_module_add(struct module *mod, char *name, |
void *locks, void *locks_end, |
void *text, void *text_end); |
extern void alternatives_smp_module_del(struct module *mod); |
extern void alternatives_smp_switch(int smp); |
#else |
static inline void alternatives_smp_module_add(struct module *mod, char *name, |
void *locks, void *locks_end, |
void *text, void *text_end) {} |
static inline void alternatives_smp_module_del(struct module *mod) {} |
static inline void alternatives_smp_switch(int smp) {} |
#endif /* CONFIG_SMP */ |
/* alternative assembly primitive: */ |
#define ALTERNATIVE(oldinstr, newinstr, feature) \ |
\ |
"661:\n\t" oldinstr "\n662:\n" \ |
".section .altinstructions,\"a\"\n" \ |
_ASM_ALIGN "\n" \ |
_ASM_PTR "661b\n" /* label */ \ |
_ASM_PTR "663f\n" /* new instruction */ \ |
" .byte " __stringify(feature) "\n" /* feature bit */ \ |
" .byte 662b-661b\n" /* sourcelen */ \ |
" .byte 664f-663f\n" /* replacementlen */ \ |
" .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ |
".previous\n" \ |
".section .altinstr_replacement, \"ax\"\n" \ |
"663:\n\t" newinstr "\n664:\n" /* replacement */ \ |
".previous" |
/* |
* Alternative instructions for different CPU types or capabilities. |
* |
* This allows to use optimized instructions even on generic binary |
* kernels. |
* |
* length of oldinstr must be longer or equal the length of newinstr |
* It can be padded with nops as needed. |
* |
* For non barrier like inlines please define new variants |
* without volatile and memory clobber. |
*/ |
#define alternative(oldinstr, newinstr, feature) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") |
/* |
* Alternative inline assembly with input. |
* |
* Pecularities: |
* No memory clobber here. |
* Argument numbers start with 1. |
* Best is to use constraints that are fixed size (like (%1) ... "r") |
* If you use variable sized constraints like "m" or "g" in the |
* replacement make sure to pad to the worst case length. |
* Leaving an unused argument 0 to keep API compatibility. |
*/ |
#define alternative_input(oldinstr, newinstr, feature, input...) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
: : "i" (0), ## input) |
/* Like alternative_input, but with a single output argument */ |
#define alternative_io(oldinstr, newinstr, feature, output, input...) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
: output : "i" (0), ## input) |
/* |
* use this macro(s) if you need more than one output parameter |
* in alternative_io |
*/ |
#define ASM_OUTPUT2(a) a |
struct paravirt_patch_site; |
#ifdef CONFIG_PARAVIRT |
void apply_paravirt(struct paravirt_patch_site *start, |
struct paravirt_patch_site *end); |
#else |
static inline void apply_paravirt(struct paravirt_patch_site *start, |
struct paravirt_patch_site *end) |
{} |
#define __parainstructions NULL |
#define __parainstructions_end NULL |
#endif |
/* |
* Clear and restore the kernel write-protection flag on the local CPU. |
* Allows the kernel to edit read-only pages. |
* Side-effect: any interrupt handler running between save and restore will have |
* the ability to write to read-only pages. |
* |
* Warning: |
* Code patching in the UP case is safe if NMIs and MCE handlers are stopped and |
* no thread can be preempted in the instructions being modified (no iret to an |
* invalid instruction possible) or if the instructions are changed from a |
* consistent state to another consistent state atomically. |
* More care must be taken when modifying code in the SMP case because of |
* Intel's errata. |
* On the local CPU you need to be protected again NMI or MCE handlers seeing an |
* inconsistent instruction while you patch. |
*/ |
extern void *text_poke(void *addr, const void *opcode, size_t len); |
#endif /* _ASM_X86_ALTERNATIVE_H */ |
/drivers/include/linux/asm/atomic_32.h |
---|
0,0 → 1,441 |
#ifndef _ASM_X86_ATOMIC_32_H |
#define _ASM_X86_ATOMIC_32_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
//#include <asm/processor.h> |
#include <asm/cmpxchg.h> |
/* |
* Atomic operations that C can't guarantee us. Useful for |
* resource counting etc.. |
*/ |
#define ATOMIC_INIT(i) { (i) } |
/** |
* atomic_read - read atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically reads the value of @v. |
*/ |
static inline int atomic_read(const atomic_t *v) |
{ |
return v->counter; |
} |
/** |
* atomic_set - set atomic variable |
* @v: pointer of type atomic_t |
* @i: required value |
* |
* Atomically sets the value of @v to @i. |
*/ |
static inline void atomic_set(atomic_t *v, int i) |
{ |
v->counter = i; |
} |
/** |
* atomic_add - add integer to atomic variable |
* @i: integer value to add |
* @v: pointer of type atomic_t |
* |
* Atomically adds @i to @v. |
*/ |
static inline void atomic_add(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "addl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub - subtract integer from atomic variable |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v. |
*/ |
static inline void atomic_sub(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "subl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub_and_test - subtract value from variable and test result |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_sub_and_test(int i, atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: "ir" (i) : "memory"); |
return c; |
} |
/** |
* atomic_inc - increment atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1. |
*/ |
static inline void atomic_inc(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "incl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec - decrement atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1. |
*/ |
static inline void atomic_dec(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "decl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec_and_test - decrement and test |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
static inline int atomic_dec_and_test(atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "decl %0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: : "memory"); |
return c != 0; |
} |
/** |
* atomic_inc_and_test - increment and test |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_inc_and_test(atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "incl %0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: : "memory"); |
return c != 0; |
} |
/** |
* atomic_add_negative - add and test if negative |
* @v: pointer of type atomic_t |
* @i: integer value to add |
* |
* Atomically adds @i to @v and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
static inline int atomic_add_negative(int i, atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" |
: "+m" (v->counter), "=qm" (c) |
: "ir" (i) : "memory"); |
return c; |
} |
/** |
* atomic_add_return - add integer and return |
* @v: pointer of type atomic_t |
* @i: integer value to add |
* |
* Atomically adds @i to @v and returns @i + @v |
*/ |
static inline int atomic_add_return(int i, atomic_t *v) |
{ |
int __i; |
#ifdef CONFIG_M386 |
unsigned long flags; |
if (unlikely(boot_cpu_data.x86 <= 3)) |
goto no_xadd; |
#endif |
/* Modern 486+ processor */ |
__i = i; |
asm volatile(LOCK_PREFIX "xaddl %0, %1" |
: "+r" (i), "+m" (v->counter) |
: : "memory"); |
return i + __i; |
#ifdef CONFIG_M386 |
no_xadd: /* Legacy 386 processor */ |
local_irq_save(flags); |
__i = atomic_read(v); |
atomic_set(v, i + __i); |
local_irq_restore(flags); |
return i + __i; |
#endif |
} |
/** |
* atomic_sub_return - subtract integer and return |
* @v: pointer of type atomic_t |
* @i: integer value to subtract |
* |
* Atomically subtracts @i from @v and returns @v - @i |
*/ |
static inline int atomic_sub_return(int i, atomic_t *v) |
{ |
return atomic_add_return(-i, v); |
} |
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
{ |
return cmpxchg(&v->counter, old, new); |
} |
static inline int atomic_xchg(atomic_t *v, int new) |
{ |
return xchg(&v->counter, new); |
} |
/** |
* atomic_add_unless - add unless the number is already a given value |
* @v: pointer of type atomic_t |
* @a: the amount to add to v... |
* @u: ...unless v is equal to u. |
* |
* Atomically adds @a to @v, so long as @v was not already @u. |
* Returns non-zero if @v was not @u, and zero otherwise. |
*/ |
static inline int atomic_add_unless(atomic_t *v, int a, int u) |
{ |
int c, old; |
c = atomic_read(v); |
for (;;) { |
if (unlikely(c == (u))) |
break; |
old = atomic_cmpxchg((v), c, c + (a)); |
if (likely(old == c)) |
break; |
c = old; |
} |
return c != (u); |
} |
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
#define atomic_inc_return(v) (atomic_add_return(1, v)) |
#define atomic_dec_return(v) (atomic_sub_return(1, v)) |
/* These are x86-specific, used by some header files */ |
#define atomic_clear_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "andl %0,%1" \ |
: : "r" (~(mask)), "m" (*(addr)) : "memory") |
#define atomic_set_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "orl %0,%1" \ |
: : "r" (mask), "m" (*(addr)) : "memory") |
/* Atomic operations are already serializing on x86 */ |
#define smp_mb__before_atomic_dec() barrier() |
#define smp_mb__after_atomic_dec() barrier() |
#define smp_mb__before_atomic_inc() barrier() |
#define smp_mb__after_atomic_inc() barrier() |
/* An 64bit atomic type */ |
typedef struct { |
u64 __aligned(8) counter; |
} atomic64_t; |
extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); |
/** |
* atomic64_xchg - xchg atomic64 variable |
* @ptr: pointer to type atomic64_t |
* @new_val: value to assign |
* |
* Atomically xchgs the value of @ptr to @new_val and returns |
* the old value. |
*/ |
static inline long long atomic64_xchg(atomic64_t *v, long long n) |
{ |
long long o; |
unsigned high = (unsigned)(n >> 32); |
unsigned low = (unsigned)n; |
asm volatile( |
"1: \n\t" |
"cmpxchg8b (%%esi) \n\t" |
"jnz 1b \n\t" |
:"=&A" (o) |
:"S" (v), "b" (low), "c" (high) |
: "memory", "cc"); |
return o; |
} |
/** |
* atomic64_set - set atomic64 variable |
* @ptr: pointer to type atomic64_t |
* @new_val: value to assign |
* |
* Atomically sets the value of @ptr to @new_val. |
*/ |
static inline void atomic64_set(atomic64_t *v, long long i) |
{ |
unsigned high = (unsigned)(i >> 32); |
unsigned low = (unsigned)i; |
asm volatile ( |
"1: \n\t" |
"cmpxchg8b (%%esi) \n\t" |
"jnz 1b \n\t" |
: |
:"S" (v), "b" (low), "c" (high) |
: "eax", "edx", "memory", "cc"); |
} |
/** |
* atomic64_read - read atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically reads the value of @ptr and returns it. |
*/ |
static inline u64 atomic64_read(atomic64_t *ptr) |
{ |
u64 res; |
/* |
* Note, we inline this atomic64_t primitive because |
* it only clobbers EAX/EDX and leaves the others |
* untouched. We also (somewhat subtly) rely on the |
* fact that cmpxchg8b returns the current 64-bit value |
* of the memory location we are touching: |
*/ |
asm volatile( |
"mov %%ebx, %%eax\n\t" |
"mov %%ecx, %%edx\n\t" |
LOCK_PREFIX "cmpxchg8b %1\n" |
: "=&A" (res) |
: "m" (*ptr) |
); |
return res; |
} |
/** |
* atomic64_add_return - add and return |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr and returns @delta + *@ptr |
*/ |
extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); |
/* |
* Other variants with different arithmetic operators: |
*/ |
extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); |
extern u64 atomic64_inc_return(atomic64_t *ptr); |
extern u64 atomic64_dec_return(atomic64_t *ptr); |
/** |
* atomic64_add - add integer to atomic64 variable |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr. |
*/ |
extern void atomic64_add(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_sub - subtract the atomic64 variable |
* @delta: integer value to subtract |
* @ptr: pointer to type atomic64_t |
* |
* Atomically subtracts @delta from @ptr. |
*/ |
extern void atomic64_sub(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_sub_and_test - subtract value from variable and test result |
* @delta: integer value to subtract |
* @ptr: pointer to type atomic64_t |
* |
* Atomically subtracts @delta from @ptr and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_inc - increment atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically increments @ptr by 1. |
*/ |
extern void atomic64_inc(atomic64_t *ptr); |
/** |
* atomic64_dec - decrement atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically decrements @ptr by 1. |
*/ |
extern void atomic64_dec(atomic64_t *ptr); |
/** |
* atomic64_dec_and_test - decrement and test |
* @ptr: pointer to type atomic64_t |
* |
* Atomically decrements @ptr by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
extern int atomic64_dec_and_test(atomic64_t *ptr); |
/** |
* atomic64_inc_and_test - increment and test |
* @ptr: pointer to type atomic64_t |
* |
* Atomically increments @ptr by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
extern int atomic64_inc_and_test(atomic64_t *ptr); |
/** |
* atomic64_add_negative - add and test if negative |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); |
#include <asm-generic/atomic-long.h> |
#endif /* _ASM_X86_ATOMIC_32_H */ |
/drivers/include/linux/asm/bitops.h |
---|
0,0 → 1,476 |
#ifndef _ASM_X86_BITOPS_H |
#define _ASM_X86_BITOPS_H |
/* |
* Copyright 1992, Linus Torvalds. |
* |
* Note: inlines with more than a single statement should be marked |
* __always_inline to avoid problems with older gcc's inlining heuristics. |
*/ |
#ifndef _LINUX_BITOPS_H |
#error only <linux/bitops.h> can be included directly |
#endif |
#include <linux/compiler.h> |
#include <asm/alternative.h> |
#define BIT_64(n) (U64_C(1) << (n)) |
/* |
* These have to be done with inline assembly: that way the bit-setting |
* is guaranteed to be atomic. All bit operations return 0 if the bit |
* was cleared before the operation and != 0 if it was not. |
* |
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
*/ |
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) |
/* Technically wrong, but this avoids compilation errors on some gcc |
versions. */ |
#define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) |
#else |
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) |
#endif |
#define ADDR BITOP_ADDR(addr) |
/* |
* We do the locked ops that don't return the old value as |
* a mask operation on a byte. |
*/ |
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) |
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) |
#define CONST_MASK(nr) (1 << ((nr) & 7)) |
/** |
* set_bit - Atomically set a bit in memory |
* @nr: the bit to set |
* @addr: the address to start counting from |
* |
* This function is atomic and may not be reordered. See __set_bit() |
* if you do not require the atomic guarantees. |
* |
* Note: there are no guarantees that this function will not be reordered |
* on non x86 architectures, so if you are writing portable code, |
* make sure not to rely on its reordering guarantees. |
* |
* Note that @nr may be almost arbitrarily large; this function is not |
* restricted to acting on a single-word quantity. |
*/ |
static __always_inline void |
set_bit(unsigned int nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "orb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)CONST_MASK(nr)) |
: "memory"); |
} else { |
asm volatile(LOCK_PREFIX "bts %1,%0" |
: BITOP_ADDR(addr) : "Ir" (nr) : "memory"); |
} |
} |
/** |
* __set_bit - Set a bit in memory |
* @nr: the bit to set |
* @addr: the address to start counting from |
* |
* Unlike set_bit(), this function is non-atomic and may be reordered. |
* If it's called on the same region of memory simultaneously, the effect |
* may be that only one operation succeeds. |
*/ |
static inline void __set_bit(int nr, volatile unsigned long *addr) |
{ |
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
} |
/** |
* clear_bit - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* clear_bit() is atomic and may not be reordered. However, it does |
* not contain a memory barrier, so if it is used for locking purposes, |
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
* in order to ensure changes are visible on other processors. |
*/ |
static __always_inline void |
clear_bit(int nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "andb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)~CONST_MASK(nr))); |
} else { |
asm volatile(LOCK_PREFIX "btr %1,%0" |
: BITOP_ADDR(addr) |
: "Ir" (nr)); |
} |
} |
/* |
* clear_bit_unlock - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* clear_bit() is atomic and implies release semantics before the memory |
* operation. It can be used for an unlock. |
*/ |
static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
{ |
barrier(); |
clear_bit(nr, addr); |
} |
static inline void __clear_bit(int nr, volatile unsigned long *addr) |
{ |
asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
} |
/* |
* __clear_bit_unlock - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* __clear_bit() is non-atomic and implies release semantics before the memory |
* operation. It can be used for an unlock if no other CPUs can concurrently |
* modify other bits in the word. |
* |
* No memory barrier is required here, because x86 cannot reorder stores past |
* older loads. Same principle as spin_unlock. |
*/ |
static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
{ |
barrier(); |
__clear_bit(nr, addr); |
} |
#define smp_mb__before_clear_bit() barrier() |
#define smp_mb__after_clear_bit() barrier() |
/** |
* __change_bit - Toggle a bit in memory |
* @nr: the bit to change |
* @addr: the address to start counting from |
* |
* Unlike change_bit(), this function is non-atomic and may be reordered. |
* If it's called on the same region of memory simultaneously, the effect |
* may be that only one operation succeeds. |
*/ |
static inline void __change_bit(int nr, volatile unsigned long *addr) |
{ |
asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
} |
/** |
* change_bit - Toggle a bit in memory |
* @nr: Bit to change |
* @addr: Address to start counting from |
* |
* change_bit() is atomic and may not be reordered. |
* Note that @nr may be almost arbitrarily large; this function is not |
* restricted to acting on a single-word quantity. |
*/ |
static inline void change_bit(int nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "xorb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)CONST_MASK(nr))); |
} else { |
asm volatile(LOCK_PREFIX "btc %1,%0" |
: BITOP_ADDR(addr) |
: "Ir" (nr)); |
} |
} |
/** |
* test_and_set_bit - Set a bit and return its old value |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile(LOCK_PREFIX "bts %2,%1\n\t" |
"sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* test_and_set_bit_lock - Set a bit and return its old value for lock |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This is the same as test_and_set_bit on x86. |
*/ |
static __always_inline int |
test_and_set_bit_lock(int nr, volatile unsigned long *addr) |
{ |
return test_and_set_bit(nr, addr); |
} |
/** |
* __test_and_set_bit - Set a bit and return its old value |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This operation is non-atomic and can be reordered. |
* If two examples of this operation race, one can appear to succeed |
* but actually fail. You must protect multiple accesses with a lock. |
*/ |
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm("bts %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr)); |
return oldbit; |
} |
/** |
* test_and_clear_bit - Clear a bit and return its old value |
* @nr: Bit to clear |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile(LOCK_PREFIX "btr %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* __test_and_clear_bit - Clear a bit and return its old value |
* @nr: Bit to clear |
* @addr: Address to count from |
* |
* This operation is non-atomic and can be reordered. |
* If two examples of this operation race, one can appear to succeed |
* but actually fail. You must protect multiple accesses with a lock. |
* |
* Note: the operation is performed atomically with respect to |
* the local CPU, but not other CPUs. Portable code should not |
* rely on this behaviour. |
* KVM relies on this behaviour on x86 for modifying memory that is also |
* accessed from a hypervisor on the same CPU if running in a VM: don't change |
* this without also updating arch/x86/kernel/kvm.c |
*/ |
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile("btr %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr)); |
return oldbit; |
} |
/* WARNING: non atomic and it can be reordered! */ |
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile("btc %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* test_and_change_bit - Change a bit and return its old value |
* @nr: Bit to change |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile(LOCK_PREFIX "btc %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
return oldbit; |
} |
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
{ |
return ((1UL << (nr % BITS_PER_LONG)) & |
(addr[nr / BITS_PER_LONG])) != 0; |
} |
static inline int variable_test_bit(int nr, volatile const unsigned long *addr) |
{ |
int oldbit; |
asm volatile("bt %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit) |
: "m" (*(unsigned long *)addr), "Ir" (nr)); |
return oldbit; |
} |
#if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
/** |
* test_bit - Determine whether a bit is set |
* @nr: bit number to test |
* @addr: Address to start counting from |
*/ |
static int test_bit(int nr, const volatile unsigned long *addr); |
#endif |
#define test_bit(nr, addr) \ |
(__builtin_constant_p((nr)) \ |
? constant_test_bit((nr), (addr)) \ |
: variable_test_bit((nr), (addr))) |
/** |
* __ffs - find first set bit in word |
* @word: The word to search |
* |
* Undefined if no bit exists, so code should check against 0 first. |
*/ |
static inline unsigned long __ffs(unsigned long word) |
{ |
asm("rep; bsf %1,%0" |
: "=r" (word) |
: "rm" (word)); |
return word; |
} |
/** |
* ffz - find first zero bit in word |
* @word: The word to search |
* |
* Undefined if no zero exists, so code should check against ~0UL first. |
*/ |
static inline unsigned long ffz(unsigned long word) |
{ |
asm("rep; bsf %1,%0" |
: "=r" (word) |
: "r" (~word)); |
return word; |
} |
/* |
* __fls: find last set bit in word |
* @word: The word to search |
* |
* Undefined if no set bit exists, so code should check against 0 first. |
*/ |
static inline unsigned long __fls(unsigned long word) |
{ |
asm("bsr %1,%0" |
: "=r" (word) |
: "rm" (word)); |
return word; |
} |
#undef ADDR |
#ifdef __KERNEL__ |
/** |
* ffs - find first set bit in word |
* @x: the word to search |
* |
* This is defined the same way as the libc and compiler builtin ffs |
* routines, therefore differs in spirit from the other bitops. |
* |
* ffs(value) returns 0 if value is 0 or the position of the first |
* set bit if value is nonzero. The first (least significant) bit |
* is at position 1. |
*/ |
static inline int ffs(int x) |
{ |
int r; |
#ifdef CONFIG_X86_CMOV |
asm("bsfl %1,%0\n\t" |
"cmovzl %2,%0" |
: "=&r" (r) : "rm" (x), "r" (-1)); |
#else |
asm("bsfl %1,%0\n\t" |
"jnz 1f\n\t" |
"movl $-1,%0\n" |
"1:" : "=r" (r) : "rm" (x)); |
#endif |
return r + 1; |
} |
/** |
* fls - find last set bit in word |
* @x: the word to search |
* |
* This is defined in a similar way as the libc and compiler builtin |
* ffs, but returns the position of the most significant set bit. |
* |
* fls(value) returns 0 if value is 0 or the position of the last |
* set bit if value is nonzero. The last (most significant) bit is |
* at position 32. |
*/ |
static inline int fls(int x) |
{ |
int r; |
#ifdef CONFIG_X86_CMOV |
asm("bsrl %1,%0\n\t" |
"cmovzl %2,%0" |
: "=&r" (r) : "rm" (x), "rm" (-1)); |
#else |
asm("bsrl %1,%0\n\t" |
"jnz 1f\n\t" |
"movl $-1,%0\n" |
"1:" : "=r" (r) : "rm" (x)); |
#endif |
return r + 1; |
} |
#endif /* __KERNEL__ */ |
#undef ADDR |
#ifdef __KERNEL__ |
#include <asm-generic/bitops/sched.h> |
#define ARCH_HAS_FAST_MULTIPLIER 1 |
#include <asm-generic/bitops/hweight.h> |
#endif /* __KERNEL__ */ |
#include <asm-generic/bitops/fls64.h> |
#ifdef __KERNEL__ |
#include <asm-generic/bitops/ext2-non-atomic.h> |
#define ext2_set_bit_atomic(lock, nr, addr) \ |
test_and_set_bit((nr), (unsigned long *)(addr)) |
#define ext2_clear_bit_atomic(lock, nr, addr) \ |
test_and_clear_bit((nr), (unsigned long *)(addr)) |
#include <asm-generic/bitops/minix.h> |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_BITOPS_H */ |
/drivers/include/linux/asm/cpufeature.h |
---|
0,0 → 1,342 |
/* |
* Defines x86 CPU feature bits |
*/ |
#ifndef _ASM_X86_CPUFEATURE_H |
#define _ASM_X86_CPUFEATURE_H |
#include <asm/required-features.h> |
#define NCAPINTS 10 /* N 32-bit words worth of info */ |
/* |
* Note: If the comment begins with a quoted string, that string is used |
* in /proc/cpuinfo instead of the macro name. If the string is "", |
* this feature bit is not displayed in /proc/cpuinfo at all. |
*/ |
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ |
#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ |
#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ |
#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ |
#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ |
#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ |
#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */ |
#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ |
#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Exception */ |
#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ |
#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ |
#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ |
#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ |
#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ |
#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ |
#define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */ |
/* (plus FCMOVcc, FCOMI with FPU) */ |
#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ |
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ |
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */ |
#define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */ |
#define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */ |
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ |
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ |
#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ |
#define X86_FEATURE_XMM (0*32+25) /* "sse" */ |
#define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */ |
#define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */ |
#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ |
#define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */ |
#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ |
#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */ |
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ |
/* Don't duplicate feature flags which are redundant with Intel! */ |
#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ |
#define X86_FEATURE_MP (1*32+19) /* MP Capable. */ |
#define X86_FEATURE_NX (1*32+20) /* Execute Disable */ |
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ |
#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */ |
#define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */ |
#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ |
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ |
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ |
#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ |
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ |
#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ |
#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ |
#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ |
/* Other features, Linux-defined mapping, word 3 */ |
/* This range is used for feature bits which conflict or are synthesized */ |
#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ |
#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ |
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ |
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ |
/* cpu types for specific tunings: */ |
#define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */ |
#define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */ |
#define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */ |
#define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */ |
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ |
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ |
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ |
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ |
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ |
#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ |
#define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */ |
#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */ |
#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */ |
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ |
#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ |
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ |
/* 21 available, was AMD_C1E */ |
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ |
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ |
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ |
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ |
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ |
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ |
#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ |
#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */ |
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ |
#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */ |
#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */ |
#define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */ |
#define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ |
#define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */ |
#define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */ |
#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ |
#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ |
#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */ |
#define X86_FEATURE_CID (4*32+10) /* Context ID */ |
#define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */ |
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ |
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ |
#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */ |
#define X86_FEATURE_PCID (4*32+17) /* Process Context Identifiers */ |
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ |
#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ |
#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ |
#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ |
#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */ |
#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */ |
#define X86_FEATURE_TSC_DEADLINE_TIMER (4*32+24) /* Tsc deadline timer */ |
#define X86_FEATURE_AES (4*32+25) /* AES instructions */ |
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ |
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ |
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ |
#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */ |
#define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */ |
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */ |
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ |
#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */ |
#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */ |
#define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ |
#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */ |
#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ |
#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ |
#define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */ |
#define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */ |
#define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */ |
#define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */ |
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ |
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ |
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ |
#define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */ |
#define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */ |
#define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */ |
#define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */ |
#define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */ |
#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */ |
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ |
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ |
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ |
#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */ |
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ |
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ |
#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */ |
#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */ |
#define X86_FEATURE_TCE (6*32+17) /* translation cache extension */ |
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ |
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ |
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ |
#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ |
/* |
* Auxiliary flags: Linux defined - For features scattered in various |
* CPUID levels like 0x6, 0xA etc, word 7 |
*/ |
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ |
#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ |
#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */ |
#define X86_FEATURE_EPB (7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ |
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ |
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ |
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ |
#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */ |
#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */ |
/* Virtualization flags: Linux defined, word 8 */ |
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ |
#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */ |
#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ |
#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ |
#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ |
#define X86_FEATURE_NPT (8*32+ 5) /* AMD Nested Page Table support */ |
#define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */ |
#define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ |
#define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ |
#define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */ |
#define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */ |
#define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */ |
#define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */ |
#define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */ |
#define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */ |
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ |
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ |
#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */ |
#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */ |
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */ |
#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */ |
#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */ |
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ |
#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ |
#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ |
#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ |
#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ |
#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ |
#if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
#include <linux/bitops.h> |
extern const char * const x86_cap_flags[NCAPINTS*32]; |
extern const char * const x86_power_flags[32]; |
#define test_cpu_cap(c, bit) \ |
test_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define REQUIRED_MASK_BIT_SET(bit) \ |
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ |
(((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ |
(((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ |
(((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ |
(((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ |
(((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ |
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ |
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \ |
(((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \ |
(((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) |
#define cpu_has(c, bit) \ |
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ |
test_cpu_cap(c, bit)) |
#define this_cpu_has(bit) \ |
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ |
x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability)) |
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) |
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define setup_clear_cpu_cap(bit) do { \ |
clear_cpu_cap(&boot_cpu_data, bit); \ |
set_bit(bit, (unsigned long *)cpu_caps_cleared); \ |
} while (0) |
#define setup_force_cpu_cap(bit) do { \ |
set_cpu_cap(&boot_cpu_data, bit); \ |
set_bit(bit, (unsigned long *)cpu_caps_set); \ |
} while (0) |
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) |
#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) |
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE) |
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) |
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) |
#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) |
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) |
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) |
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) |
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) |
#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) |
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) |
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) |
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) |
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) |
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3) |
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) |
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) |
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) |
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) |
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) |
#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) |
#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) |
#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) |
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) |
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) |
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) |
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) |
#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) |
#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) |
#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) |
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) |
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) |
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) |
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) |
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) |
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) |
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) |
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) |
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) |
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) |
#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1) |
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) |
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) |
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) |
#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT) |
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) |
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) |
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) |
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) |
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) |
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) |
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) |
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
# define cpu_has_invlpg 1 |
#else |
# define cpu_has_invlpg (boot_cpu_data.x86 > 3) |
#endif |
#ifdef CONFIG_X86_64 |
#undef cpu_has_vme |
#define cpu_has_vme 0 |
#undef cpu_has_pae |
#define cpu_has_pae ___BUG___ |
#undef cpu_has_mp |
#define cpu_has_mp 1 |
#undef cpu_has_k6_mtrr |
#define cpu_has_k6_mtrr 0 |
#undef cpu_has_cyrix_arr |
#define cpu_has_cyrix_arr 0 |
#undef cpu_has_centaur_mcr |
#define cpu_has_centaur_mcr 0 |
#endif /* CONFIG_X86_64 */ |
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ |
#endif /* _ASM_X86_CPUFEATURE_H */ |
/drivers/include/linux/asm/div64.h |
---|
0,0 → 1,66 |
#ifndef _ASM_X86_DIV64_H |
#define _ASM_X86_DIV64_H |
#ifdef CONFIG_X86_32 |
#include <linux/types.h> |
#include <linux/log2.h> |
/* |
* do_div() is NOT a C function. It wants to return |
* two values (the quotient and the remainder), but |
* since that doesn't work very well in C, what it |
* does is: |
* |
* - modifies the 64-bit dividend _in_place_ |
* - returns the 32-bit remainder |
* |
* This ends up being the most efficient "calling |
* convention" on x86. |
*/ |
#define do_div(n, base) \ |
({ \ |
unsigned long __upper, __low, __high, __mod, __base; \ |
__base = (base); \ |
if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \ |
__mod = n & (__base - 1); \ |
n >>= ilog2(__base); \ |
} else { \ |
asm("" : "=a" (__low), "=d" (__high) : "A" (n));\ |
__upper = __high; \ |
if (__high) { \ |
__upper = __high % (__base); \ |
__high = __high / (__base); \ |
} \ |
asm("divl %2" : "=a" (__low), "=d" (__mod) \ |
: "rm" (__base), "0" (__low), "1" (__upper)); \ |
asm("" : "=A" (n) : "a" (__low), "d" (__high)); \ |
} \ |
__mod; \ |
}) |
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
{ |
union { |
u64 v64; |
u32 v32[2]; |
} d = { dividend }; |
u32 upper; |
upper = d.v32[1]; |
d.v32[1] = 0; |
if (upper >= divisor) { |
d.v32[1] = upper / divisor; |
upper %= divisor; |
} |
asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) : |
"rm" (divisor), "0" (d.v32[0]), "1" (upper)); |
return d.v64; |
} |
#define div_u64_rem div_u64_rem |
#else |
# include <asm-generic/div64.h> |
#endif /* CONFIG_X86_32 */ |
#endif /* _ASM_X86_DIV64_H */ |
/drivers/include/linux/asm/required-features.h |
---|
0,0 → 1,90 |
#ifndef _ASM_X86_REQUIRED_FEATURES_H |
#define _ASM_X86_REQUIRED_FEATURES_H |
/* Define minimum CPUID feature set for kernel These bits are checked |
really early to actually display a visible error message before the |
kernel dies. Make sure to assign features to the proper mask! |
Some requirements that are not in CPUID yet are also in the |
CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too. |
The real information is in arch/x86/Kconfig.cpu, this just converts |
the CONFIGs into a bitmask */ |
#ifndef CONFIG_MATH_EMULATION |
# define NEED_FPU (1<<(X86_FEATURE_FPU & 31)) |
#else |
# define NEED_FPU 0 |
#endif |
#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) |
# define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) |
#else |
# define NEED_PAE 0 |
#endif |
#ifdef CONFIG_X86_CMPXCHG64 |
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) |
#else |
# define NEED_CX8 0 |
#endif |
#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64) |
# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31)) |
#else |
# define NEED_CMOV 0 |
#endif |
#ifdef CONFIG_X86_USE_3DNOW |
# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31)) |
#else |
# define NEED_3DNOW 0 |
#endif |
#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64) |
# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31)) |
#else |
# define NEED_NOPL 0 |
#endif |
#ifdef CONFIG_X86_64 |
#ifdef CONFIG_PARAVIRT |
/* Paravirtualized systems may not have PSE or PGE available */ |
#define NEED_PSE 0 |
#define NEED_PGE 0 |
#else |
#define NEED_PSE (1<<(X86_FEATURE_PSE) & 31) |
#define NEED_PGE (1<<(X86_FEATURE_PGE) & 31) |
#endif |
#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) |
#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) |
#define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) |
#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) |
#define NEED_LM (1<<(X86_FEATURE_LM & 31)) |
#else |
#define NEED_PSE 0 |
#define NEED_MSR 0 |
#define NEED_PGE 0 |
#define NEED_FXSR 0 |
#define NEED_XMM 0 |
#define NEED_XMM2 0 |
#define NEED_LM 0 |
#endif |
#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\ |
NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\ |
NEED_XMM|NEED_XMM2) |
#define SSE_MASK (NEED_XMM|NEED_XMM2) |
#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) |
#define REQUIRED_MASK2 0 |
#define REQUIRED_MASK3 (NEED_NOPL) |
#define REQUIRED_MASK4 0 |
#define REQUIRED_MASK5 0 |
#define REQUIRED_MASK6 0 |
#define REQUIRED_MASK7 0 |
#define REQUIRED_MASK8 0 |
#define REQUIRED_MASK9 0 |
#endif /* _ASM_X86_REQUIRED_FEATURES_H */ |
/drivers/include/linux/asm/types.h |
---|
0,0 → 1,16 |
#ifndef _ASM_X86_TYPES_H |
#define _ASM_X86_TYPES_H |
#define dma_addr_t dma_addr_t |
#include <asm-generic/types.h> |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
typedef u64 dma64_addr_t; |
#endif /* __ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_TYPES_H */ |
/drivers/include/linux/asm/atomic.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include "atomic_32.h" |
#else |
# include "atomic_64.h" |
#endif |
/drivers/include/linux/asm/cmpxchg_32.h |
---|
0,0 → 1,278 |
#ifndef _ASM_X86_CMPXCHG_32_H |
#define _ASM_X86_CMPXCHG_32_H |
#include <linux/bitops.h> /* for LOCK_PREFIX */ |
/* |
* Note: if you use set64_bit(), __cmpxchg64(), or their variants, you |
* you need to test for the feature in boot_cpu_data. |
*/ |
extern void __xchg_wrong_size(void); |
/* |
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
* Note 2: xchg has side effect, so that attribute volatile is necessary, |
* but generally the primitive is invalid, *ptr is output argument. --ANK |
*/ |
struct __xchg_dummy { |
unsigned long a[100]; |
}; |
#define __xg(x) ((struct __xchg_dummy *)(x)) |
#define __xchg(x, ptr, size) \ |
({ \ |
__typeof(*(ptr)) __x = (x); \ |
switch (size) { \ |
case 1: \ |
{ \ |
volatile u8 *__ptr = (volatile u8 *)(ptr); \ |
asm volatile("xchgb %0,%1" \ |
: "=q" (__x), "+m" (*__ptr) \ |
: "0" (__x) \ |
: "memory"); \ |
break; \ |
} \ |
case 2: \ |
{ \ |
volatile u16 *__ptr = (volatile u16 *)(ptr); \ |
asm volatile("xchgw %0,%1" \ |
: "=r" (__x), "+m" (*__ptr) \ |
: "0" (__x) \ |
: "memory"); \ |
break; \ |
} \ |
case 4: \ |
{ \ |
volatile u32 *__ptr = (volatile u32 *)(ptr); \ |
asm volatile("xchgl %0,%1" \ |
: "=r" (__x), "+m" (*__ptr) \ |
: "0" (__x) \ |
: "memory"); \ |
break; \ |
} \ |
default: \ |
__xchg_wrong_size(); \ |
} \ |
__x; \ |
}) |
#define xchg(ptr, v) \ |
__xchg((v), (ptr), sizeof(*ptr)) |
/* |
* CMPXCHG8B only writes to the target if we had the previous |
* value in registers, otherwise it acts as a read and gives us the |
* "new previous" value. That is why there is a loop. Preloading |
* EDX:EAX is a performance optimization: in the common case it means |
* we need only one locked operation. |
* |
* A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very |
* least an FPU save and/or %cr0.ts manipulation. |
* |
* cmpxchg8b must be used with the lock prefix here to allow the |
* instruction to be executed atomically. We need to have the reader |
* side to see the coherent 64bit value. |
*/ |
static inline void set_64bit(volatile u64 *ptr, u64 value) |
{ |
u32 low = value; |
u32 high = value >> 32; |
u64 prev = *ptr; |
asm volatile("\n1:\t" |
LOCK_PREFIX "cmpxchg8b %0\n\t" |
"jnz 1b" |
: "=m" (*ptr), "+A" (prev) |
: "b" (low), "c" (high) |
: "memory"); |
} |
extern void __cmpxchg_wrong_size(void); |
/* |
* Atomic compare and exchange. Compare OLD with MEM, if identical, |
* store NEW in MEM. Return the initial value in MEM. Success is |
* indicated by comparing RETURN with OLD. |
*/ |
#define __raw_cmpxchg(ptr, old, new, size, lock) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (old); \ |
__typeof__(*(ptr)) __new = (new); \ |
switch (size) { \ |
case 1: \ |
{ \ |
volatile u8 *__ptr = (volatile u8 *)(ptr); \ |
asm volatile(lock "cmpxchgb %2,%1" \ |
: "=a" (__ret), "+m" (*__ptr) \ |
: "q" (__new), "0" (__old) \ |
: "memory"); \ |
break; \ |
} \ |
case 2: \ |
{ \ |
volatile u16 *__ptr = (volatile u16 *)(ptr); \ |
asm volatile(lock "cmpxchgw %2,%1" \ |
: "=a" (__ret), "+m" (*__ptr) \ |
: "r" (__new), "0" (__old) \ |
: "memory"); \ |
break; \ |
} \ |
case 4: \ |
{ \ |
volatile u32 *__ptr = (volatile u32 *)(ptr); \ |
asm volatile(lock "cmpxchgl %2,%1" \ |
: "=a" (__ret), "+m" (*__ptr) \ |
: "r" (__new), "0" (__old) \ |
: "memory"); \ |
break; \ |
} \ |
default: \ |
__cmpxchg_wrong_size(); \ |
} \ |
__ret; \ |
}) |
#define __cmpxchg(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) |
#define __sync_cmpxchg(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), "lock; ") |
#define __cmpxchg_local(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), "") |
#ifdef CONFIG_X86_CMPXCHG |
#define __HAVE_ARCH_CMPXCHG 1 |
#define cmpxchg(ptr, old, new) \ |
__cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
#define sync_cmpxchg(ptr, old, new) \ |
__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
#define cmpxchg_local(ptr, old, new) \ |
__cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) |
#endif |
#ifdef CONFIG_X86_CMPXCHG64 |
#define cmpxchg64(ptr, o, n) \ |
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ |
(unsigned long long)(n))) |
#define cmpxchg64_local(ptr, o, n) \ |
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ |
(unsigned long long)(n))) |
#endif |
static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new) |
{ |
u64 prev; |
asm volatile(LOCK_PREFIX "cmpxchg8b %1" |
: "=A" (prev), |
"+m" (*ptr) |
: "b" ((u32)new), |
"c" ((u32)(new >> 32)), |
"0" (old) |
: "memory"); |
return prev; |
} |
static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) |
{ |
u64 prev; |
asm volatile("cmpxchg8b %1" |
: "=A" (prev), |
"+m" (*ptr) |
: "b" ((u32)new), |
"c" ((u32)(new >> 32)), |
"0" (old) |
: "memory"); |
return prev; |
} |
#ifndef CONFIG_X86_CMPXCHG |
/* |
* Building a kernel capable running on 80386. It may be necessary to |
* simulate the cmpxchg on the 80386 CPU. For that purpose we define |
* a function for each of the sizes we support. |
*/ |
extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); |
extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); |
extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); |
static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, |
unsigned long new, int size) |
{ |
switch (size) { |
case 1: |
return cmpxchg_386_u8(ptr, old, new); |
case 2: |
return cmpxchg_386_u16(ptr, old, new); |
case 4: |
return cmpxchg_386_u32(ptr, old, new); |
} |
return old; |
} |
#define cmpxchg(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \ |
(unsigned long)(o), (unsigned long)(n), \ |
sizeof(*(ptr))); \ |
__ret; \ |
}) |
#define cmpxchg_local(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \ |
(unsigned long)(o), (unsigned long)(n), \ |
sizeof(*(ptr))); \ |
__ret; \ |
}) |
#endif |
#ifndef CONFIG_X86_CMPXCHG64 |
/* |
* Building a kernel capable running on 80386 and 80486. It may be necessary |
* to simulate the cmpxchg8b on the 80386 and 80486 CPU. |
*/ |
#define cmpxchg64(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (o); \ |
__typeof__(*(ptr)) __new = (n); \ |
alternative_io(LOCK_PREFIX_HERE \ |
"call cmpxchg8b_emu", \ |
"lock; cmpxchg8b (%%esi)" , \ |
X86_FEATURE_CX8, \ |
"=A" (__ret), \ |
"S" ((ptr)), "0" (__old), \ |
"b" ((unsigned int)__new), \ |
"c" ((unsigned int)(__new>>32)) \ |
: "memory"); \ |
__ret; }) |
#define cmpxchg64_local(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (o); \ |
__typeof__(*(ptr)) __new = (n); \ |
alternative_io("call cmpxchg8b_emu", \ |
"cmpxchg8b (%%esi)" , \ |
X86_FEATURE_CX8, \ |
"=A" (__ret), \ |
"S" ((ptr)), "0" (__old), \ |
"b" ((unsigned int)__new), \ |
"c" ((unsigned int)(__new>>32)) \ |
: "memory"); \ |
__ret; }) |
#endif |
#endif /* _ASM_X86_CMPXCHG_32_H */ |
/drivers/include/linux/asm/unaligned.h |
---|
0,0 → 1,14 |
#ifndef _ASM_X86_UNALIGNED_H |
#define _ASM_X86_UNALIGNED_H |
/* |
* The x86 can do unaligned accesses itself. |
*/ |
#include <linux/unaligned/access_ok.h> |
#include <linux/unaligned/generic.h> |
#define get_unaligned __get_unaligned_le |
#define put_unaligned __put_unaligned_le |
#endif /* _ASM_X86_UNALIGNED_H */ |
/drivers/include/linux/asm/asm.h |
---|
0,0 → 1,55 |
#ifndef _ASM_X86_ASM_H |
#define _ASM_X86_ASM_H |
#ifdef __ASSEMBLY__ |
# define __ASM_FORM(x) x |
# define __ASM_EX_SEC .section __ex_table, "a" |
#else |
# define __ASM_FORM(x) " " #x " " |
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n" |
#endif |
#ifdef CONFIG_X86_32 |
# define __ASM_SEL(a,b) __ASM_FORM(a) |
#else |
# define __ASM_SEL(a,b) __ASM_FORM(b) |
#endif |
#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q) |
#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg) |
#define _ASM_PTR __ASM_SEL(.long, .quad) |
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) |
#define _ASM_MOV __ASM_SIZE(mov) |
#define _ASM_INC __ASM_SIZE(inc) |
#define _ASM_DEC __ASM_SIZE(dec) |
#define _ASM_ADD __ASM_SIZE(add) |
#define _ASM_SUB __ASM_SIZE(sub) |
#define _ASM_XADD __ASM_SIZE(xadd) |
#define _ASM_AX __ASM_REG(ax) |
#define _ASM_BX __ASM_REG(bx) |
#define _ASM_CX __ASM_REG(cx) |
#define _ASM_DX __ASM_REG(dx) |
#define _ASM_SP __ASM_REG(sp) |
#define _ASM_BP __ASM_REG(bp) |
#define _ASM_SI __ASM_REG(si) |
#define _ASM_DI __ASM_REG(di) |
/* Exception table entry */ |
#ifdef __ASSEMBLY__ |
# define _ASM_EXTABLE(from,to) \ |
__ASM_EX_SEC ; \ |
_ASM_ALIGN ; \ |
_ASM_PTR from , to ; \ |
.previous |
#else |
# define _ASM_EXTABLE(from,to) \ |
__ASM_EX_SEC \ |
_ASM_ALIGN "\n" \ |
_ASM_PTR #from "," #to "\n" \ |
" .previous\n" |
#endif |
#endif /* _ASM_X86_ASM_H */ |
/drivers/include/linux/asm/bitsperlong.h |
---|
0,0 → 1,13 |
#ifndef __ASM_X86_BITSPERLONG_H |
#define __ASM_X86_BITSPERLONG_H |
#ifdef __x86_64__ |
# define __BITS_PER_LONG 64 |
#else |
# define __BITS_PER_LONG 32 |
#endif |
#include <asm-generic/bitsperlong.h> |
#endif /* __ASM_X86_BITSPERLONG_H */ |
/drivers/include/linux/asm/byteorder.h |
---|
0,0 → 1,6 |
#ifndef _ASM_X86_BYTEORDER_H |
#define _ASM_X86_BYTEORDER_H |
#include <linux/byteorder/little_endian.h> |
#endif /* _ASM_X86_BYTEORDER_H */ |
/drivers/include/linux/asm/cmpxchg.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include "cmpxchg_32.h" |
#else |
# include "cmpxchg_64.h" |
#endif |
/drivers/include/linux/asm/posix_types.h |
---|
0,0 → 1,13 |
#ifdef __KERNEL__ |
# ifdef CONFIG_X86_32 |
# include "posix_types_32.h" |
# else |
# include "posix_types_64.h" |
# endif |
#else |
# ifdef __i386__ |
# include "posix_types_32.h" |
# else |
# include "posix_types_64.h" |
# endif |
#endif |
/drivers/include/linux/asm/posix_types_32.h |
---|
0,0 → 1,85 |
#ifndef _ASM_X86_POSIX_TYPES_32_H |
#define _ASM_X86_POSIX_TYPES_32_H |
/* |
* This file is generally used by user-level software, so you need to |
* be a little careful about namespace pollution etc. Also, we cannot |
* assume GCC is being used. |
*/ |
typedef unsigned long __kernel_ino_t; |
typedef unsigned short __kernel_mode_t; |
typedef unsigned short __kernel_nlink_t; |
typedef long __kernel_off_t; |
typedef int __kernel_pid_t; |
typedef unsigned short __kernel_ipc_pid_t; |
typedef unsigned short __kernel_uid_t; |
typedef unsigned short __kernel_gid_t; |
typedef unsigned int __kernel_size_t; |
typedef int __kernel_ssize_t; |
typedef int __kernel_ptrdiff_t; |
typedef long __kernel_time_t; |
typedef long __kernel_suseconds_t; |
typedef long __kernel_clock_t; |
typedef int __kernel_timer_t; |
typedef int __kernel_clockid_t; |
typedef int __kernel_daddr_t; |
typedef char * __kernel_caddr_t; |
typedef unsigned short __kernel_uid16_t; |
typedef unsigned short __kernel_gid16_t; |
typedef unsigned int __kernel_uid32_t; |
typedef unsigned int __kernel_gid32_t; |
typedef unsigned short __kernel_old_uid_t; |
typedef unsigned short __kernel_old_gid_t; |
typedef unsigned short __kernel_old_dev_t; |
#ifdef __GNUC__ |
typedef long long __kernel_loff_t; |
#endif |
typedef struct { |
int val[2]; |
} __kernel_fsid_t; |
#if defined(__KERNEL__) |
#undef __FD_SET |
#define __FD_SET(fd,fdsetp) \ |
asm volatile("btsl %1,%0": \ |
"+m" (*(__kernel_fd_set *)(fdsetp)) \ |
: "r" ((int)(fd))) |
#undef __FD_CLR |
#define __FD_CLR(fd,fdsetp) \ |
asm volatile("btrl %1,%0": \ |
"+m" (*(__kernel_fd_set *)(fdsetp)) \ |
: "r" ((int) (fd))) |
#undef __FD_ISSET |
#define __FD_ISSET(fd,fdsetp) \ |
(__extension__ \ |
({ \ |
unsigned char __result; \ |
asm volatile("btl %1,%2 ; setb %0" \ |
: "=q" (__result) \ |
: "r" ((int)(fd)), \ |
"m" (*(__kernel_fd_set *)(fdsetp))); \ |
__result; \ |
})) |
#undef __FD_ZERO |
#define __FD_ZERO(fdsetp) \ |
do { \ |
int __d0, __d1; \ |
asm volatile("cld ; rep ; stosl" \ |
: "=m" (*(__kernel_fd_set *)(fdsetp)), \ |
"=&c" (__d0), "=&D" (__d1) \ |
: "a" (0), "1" (__FDSET_LONGS), \ |
"2" ((__kernel_fd_set *)(fdsetp)) \ |
: "memory"); \ |
} while (0) |
#endif /* defined(__KERNEL__) */ |
#endif /* _ASM_X86_POSIX_TYPES_32_H */ |
/drivers/include/linux/asm/spinlock_types.h |
---|
0,0 → 1,20 |
#ifndef _ASM_X86_SPINLOCK_TYPES_H |
#define _ASM_X86_SPINLOCK_TYPES_H |
#ifndef __LINUX_SPINLOCK_TYPES_H |
# error "please don't include this file directly" |
#endif |
typedef struct raw_spinlock { |
unsigned int slock; |
} raw_spinlock_t; |
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } |
typedef struct { |
unsigned int lock; |
} raw_rwlock_t; |
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
#endif /* _ASM_X86_SPINLOCK_TYPES_H */ |
/drivers/include/linux/asm/string.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include "string_32.h" |
#else |
# include "string_64.h" |
#endif |
/drivers/include/linux/asm/string_32.h |
---|
0,0 → 1,342 |
#ifndef _ASM_X86_STRING_32_H |
#define _ASM_X86_STRING_32_H |
#ifdef __KERNEL__ |
/* Let gcc decide whether to inline or use the out of line functions */ |
#define __HAVE_ARCH_STRCPY |
extern char *strcpy(char *dest, const char *src); |
#define __HAVE_ARCH_STRNCPY |
extern char *strncpy(char *dest, const char *src, size_t count); |
#define __HAVE_ARCH_STRCAT |
extern char *strcat(char *dest, const char *src); |
#define __HAVE_ARCH_STRNCAT |
extern char *strncat(char *dest, const char *src, size_t count); |
#define __HAVE_ARCH_STRCMP |
extern int strcmp(const char *cs, const char *ct); |
#define __HAVE_ARCH_STRNCMP |
extern int strncmp(const char *cs, const char *ct, size_t count); |
#define __HAVE_ARCH_STRCHR |
extern char *strchr(const char *s, int c); |
#define __HAVE_ARCH_STRLEN |
extern size_t strlen(const char *s); |
static __always_inline void *__memcpy(void *to, const void *from, size_t n) |
{ |
int d0, d1, d2; |
asm volatile("rep ; movsl\n\t" |
"movl %4,%%ecx\n\t" |
"andl $3,%%ecx\n\t" |
"jz 1f\n\t" |
"rep ; movsb\n\t" |
"1:" |
: "=&c" (d0), "=&D" (d1), "=&S" (d2) |
: "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from) |
: "memory"); |
return to; |
} |
/* |
* This looks ugly, but the compiler can optimize it totally, |
* as the count is constant. |
*/ |
static __always_inline void *__constant_memcpy(void *to, const void *from, |
size_t n) |
{ |
long esi, edi; |
if (!n) |
return to; |
switch (n) { |
case 1: |
*(char *)to = *(char *)from; |
return to; |
case 2: |
*(short *)to = *(short *)from; |
return to; |
case 4: |
*(int *)to = *(int *)from; |
return to; |
case 3: |
*(short *)to = *(short *)from; |
*((char *)to + 2) = *((char *)from + 2); |
return to; |
case 5: |
*(int *)to = *(int *)from; |
*((char *)to + 4) = *((char *)from + 4); |
return to; |
case 6: |
*(int *)to = *(int *)from; |
*((short *)to + 2) = *((short *)from + 2); |
return to; |
case 8: |
*(int *)to = *(int *)from; |
*((int *)to + 1) = *((int *)from + 1); |
return to; |
} |
esi = (long)from; |
edi = (long)to; |
if (n >= 5 * 4) { |
/* large block: use rep prefix */ |
int ecx; |
asm volatile("rep ; movsl" |
: "=&c" (ecx), "=&D" (edi), "=&S" (esi) |
: "0" (n / 4), "1" (edi), "2" (esi) |
: "memory" |
); |
} else { |
/* small block: don't clobber ecx + smaller code */ |
if (n >= 4 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 3 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 2 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 1 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
} |
switch (n % 4) { |
/* tail */ |
case 0: |
return to; |
case 1: |
asm volatile("movsb" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
case 2: |
asm volatile("movsw" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
default: |
asm volatile("movsw\n\tmovsb" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
} |
} |
#define __HAVE_ARCH_MEMCPY |
#ifdef CONFIG_X86_USE_3DNOW |
#include <asm/mmx.h> |
/* |
* This CPU favours 3DNow strongly (eg AMD Athlon) |
*/ |
static inline void *__constant_memcpy3d(void *to, const void *from, size_t len) |
{ |
if (len < 512) |
return __constant_memcpy(to, from, len); |
return _mmx_memcpy(to, from, len); |
} |
static inline void *__memcpy3d(void *to, const void *from, size_t len) |
{ |
if (len < 512) |
return __memcpy(to, from, len); |
return _mmx_memcpy(to, from, len); |
} |
#define memcpy(t, f, n) \ |
(__builtin_constant_p((n)) \ |
? __constant_memcpy3d((t), (f), (n)) \ |
: __memcpy3d((t), (f), (n))) |
#else |
/* |
* No 3D Now! |
*/ |
#ifndef CONFIG_KMEMCHECK |
#if (__GNUC__ >= 4) |
#define memcpy(t, f, n) __builtin_memcpy(t, f, n) |
#else |
#define memcpy(t, f, n) \ |
(__builtin_constant_p((n)) \ |
? __constant_memcpy((t), (f), (n)) \ |
: __memcpy((t), (f), (n))) |
#endif |
#else |
/* |
* kmemcheck becomes very happy if we use the REP instructions unconditionally, |
* because it means that we know both memory operands in advance. |
*/ |
#define memcpy(t, f, n) __memcpy((t), (f), (n)) |
#endif |
#endif |
#define __HAVE_ARCH_MEMMOVE |
void *memmove(void *dest, const void *src, size_t n); |
#define memcmp __builtin_memcmp |
#define __HAVE_ARCH_MEMCHR |
extern void *memchr(const void *cs, int c, size_t count); |
static inline void *__memset_generic(void *s, char c, size_t count) |
{ |
int d0, d1; |
asm volatile("rep\n\t" |
"stosb" |
: "=&c" (d0), "=&D" (d1) |
: "a" (c), "1" (s), "0" (count) |
: "memory"); |
return s; |
} |
/* we might want to write optimized versions of these later */ |
#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count)) |
/* |
* memset(x, 0, y) is a reasonably common thing to do, so we want to fill |
* things 32 bits at a time even when we don't know the size of the |
* area at compile-time.. |
*/ |
static __always_inline |
void *__constant_c_memset(void *s, unsigned long c, size_t count) |
{ |
int d0, d1; |
asm volatile("rep ; stosl\n\t" |
"testb $2,%b3\n\t" |
"je 1f\n\t" |
"stosw\n" |
"1:\ttestb $1,%b3\n\t" |
"je 2f\n\t" |
"stosb\n" |
"2:" |
: "=&c" (d0), "=&D" (d1) |
: "a" (c), "q" (count), "0" (count/4), "1" ((long)s) |
: "memory"); |
return s; |
} |
/* Added by Gertjan van Wingerde to make minix and sysv module work */ |
#define __HAVE_ARCH_STRNLEN |
extern size_t strnlen(const char *s, size_t count); |
/* end of additional stuff */ |
#define __HAVE_ARCH_STRSTR |
extern char *strstr(const char *cs, const char *ct); |
/* |
* This looks horribly ugly, but the compiler can optimize it totally, |
* as we by now know that both pattern and count is constant.. |
*/ |
static __always_inline |
void *__constant_c_and_count_memset(void *s, unsigned long pattern, |
size_t count) |
{ |
switch (count) { |
case 0: |
return s; |
case 1: |
*(unsigned char *)s = pattern & 0xff; |
return s; |
case 2: |
*(unsigned short *)s = pattern & 0xffff; |
return s; |
case 3: |
*(unsigned short *)s = pattern & 0xffff; |
*((unsigned char *)s + 2) = pattern & 0xff; |
return s; |
case 4: |
*(unsigned long *)s = pattern; |
return s; |
} |
#define COMMON(x) \ |
asm volatile("rep ; stosl" \ |
x \ |
: "=&c" (d0), "=&D" (d1) \ |
: "a" (eax), "0" (count/4), "1" ((long)s) \ |
: "memory") |
{ |
int d0, d1; |
#if __GNUC__ == 4 && __GNUC_MINOR__ == 0 |
/* Workaround for broken gcc 4.0 */ |
register unsigned long eax asm("%eax") = pattern; |
#else |
unsigned long eax = pattern; |
#endif |
switch (count % 4) { |
case 0: |
COMMON(""); |
return s; |
case 1: |
COMMON("\n\tstosb"); |
return s; |
case 2: |
COMMON("\n\tstosw"); |
return s; |
default: |
COMMON("\n\tstosw\n\tstosb"); |
return s; |
} |
} |
#undef COMMON |
} |
#define __constant_c_x_memset(s, c, count) \ |
(__builtin_constant_p(count) \ |
? __constant_c_and_count_memset((s), (c), (count)) \ |
: __constant_c_memset((s), (c), (count))) |
#define __memset(s, c, count) \ |
(__builtin_constant_p(count) \ |
? __constant_count_memset((s), (c), (count)) \ |
: __memset_generic((s), (c), (count))) |
#define __HAVE_ARCH_MEMSET |
#if (__GNUC__ >= 4) |
#define memset(s, c, count) __builtin_memset(s, c, count) |
#else |
#define memset(s, c, count) \ |
(__builtin_constant_p(c) \ |
? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ |
(count)) \ |
: __memset((s), (c), (count))) |
#endif |
/* |
* find the first occurrence of byte 'c', or 1 past the area if none |
*/ |
#define __HAVE_ARCH_MEMSCAN |
extern void *memscan(void *addr, int c, size_t size); |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_STRING_32_H */ |
/drivers/include/linux/asm/swab.h |
---|
0,0 → 1,61 |
#ifndef _ASM_X86_SWAB_H |
#define _ASM_X86_SWAB_H |
#include <linux/types.h> |
#include <linux/compiler.h> |
static inline __attribute_const__ __u32 __arch_swab32(__u32 val) |
{ |
#ifdef __i386__ |
# ifdef CONFIG_X86_BSWAP |
asm("bswap %0" : "=r" (val) : "0" (val)); |
# else |
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ |
"rorl $16,%0\n\t" /* swap words */ |
"xchgb %b0,%h0" /* swap higher bytes */ |
: "=q" (val) |
: "0" (val)); |
# endif |
#else /* __i386__ */ |
asm("bswapl %0" |
: "=r" (val) |
: "0" (val)); |
#endif |
return val; |
} |
#define __arch_swab32 __arch_swab32 |
static inline __attribute_const__ __u64 __arch_swab64(__u64 val) |
{ |
#ifdef __i386__ |
union { |
struct { |
__u32 a; |
__u32 b; |
} s; |
__u64 u; |
} v; |
v.u = val; |
# ifdef CONFIG_X86_BSWAP |
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" |
: "=r" (v.s.a), "=r" (v.s.b) |
: "0" (v.s.a), "1" (v.s.b)); |
# else |
v.s.a = __arch_swab32(v.s.a); |
v.s.b = __arch_swab32(v.s.b); |
asm("xchgl %0,%1" |
: "=r" (v.s.a), "=r" (v.s.b) |
: "0" (v.s.a), "1" (v.s.b)); |
# endif |
return v.u; |
#else /* __i386__ */ |
asm("bswapq %0" |
: "=r" (val) |
: "0" (val)); |
return val; |
#endif |
} |
#define __arch_swab64 __arch_swab64 |
#endif /* _ASM_X86_SWAB_H */ |
/drivers/include/linux/asm |
---|
Property changes: |
Added: svn:ignore |
+*.o |
+*.obj |
/drivers/include/linux/shmem_fs.h |
---|
1,9 → 1,8 |
#ifndef __SHMEM_FS_H |
#define __SHMEM_FS_H |
#include <linux/file.h> |
#include <kernel.h> |
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); |
struct page *shmem_read_mapping_page_gfp(struct file *filep, |
pgoff_t index, gfp_t gfp); |
/drivers/include/linux/stddef.h |
---|
1,9 → 1,8 |
#ifndef _LINUX_STDDEF_H |
#define _LINUX_STDDEF_H |
#include <uapi/linux/stddef.h> |
#include <linux/compiler.h> |
#undef NULL |
#define NULL ((void *)0) |
/drivers/include/linux/moduleparam.h |
---|
1,10 → 1,3 |
#ifndef _LINUX_MODULE_PARAMS_H |
#define _LINUX_MODULE_PARAMS_H |
/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */ |
#include <linux/kernel.h> |
#define MODULE_PARM_DESC(_parm, desc) |
#define module_param_named(name, value, type, perm) |
#define module_param_named_unsafe(name, value, type, perm) |
#endif |
/drivers/include/linux/spinlock.h |
---|
48,14 → 48,14 |
#include <linux/typecheck.h> |
//#include <linux/preempt.h> |
#include <linux/linkage.h> |
//#include <linux/linkage.h> |
#include <linux/compiler.h> |
//#include <linux/thread_info.h> |
#include <linux/kernel.h> |
#include <linux/stringify.h> |
//#include <linux/bottom_half.h> |
#include <asm/barrier.h> |
//#include <asm/system.h> |
/* |
* Must define these before including other files, inline functions need them |
/drivers/include/linux/errno.h |
---|
1,32 → 1,116 |
#ifndef _LINUX_ERRNO_H |
#define _LINUX_ERRNO_H |
#ifndef _ASM_GENERIC_ERRNO_H |
#define _ASM_GENERIC_ERRNO_H |
#include <uapi/linux/errno.h> |
#include <errno-base.h> |
/* |
* These should never be seen by user programs. To return one of ERESTART* |
* codes, signal_pending() MUST be set. Note that ptrace can observe these |
* at syscall exit tracing, but they will never be left for the debugged user |
* process to see. |
*/ |
#define ERESTARTSYS 512 |
#define ERESTARTNOINTR 513 |
#define ERESTARTNOHAND 514 /* restart if no handler.. */ |
#define ENOIOCTLCMD 515 /* No ioctl command */ |
#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ |
#define EPROBE_DEFER 517 /* Driver requests probe retry */ |
#define EOPENSTALE 518 /* open found a stale dentry */ |
/* Defined for the NFSv3 protocol */ |
#define EBADHANDLE 521 /* Illegal NFS file handle */ |
#define ENOTSYNC 522 /* Update synchronization mismatch */ |
#define EBADCOOKIE 523 /* Cookie is stale */ |
#define EDEADLK 35 /* Resource deadlock would occur */ |
#define ENAMETOOLONG 36 /* File name too long */ |
#define ENOLCK 37 /* No record locks available */ |
#define ENOSYS 38 /* Function not implemented */ |
#define ENOTEMPTY 39 /* Directory not empty */ |
#define ELOOP 40 /* Too many symbolic links encountered */ |
#define EWOULDBLOCK EAGAIN /* Operation would block */ |
#define ENOMSG 42 /* No message of desired type */ |
#define EIDRM 43 /* Identifier removed */ |
#define ECHRNG 44 /* Channel number out of range */ |
#define EL2NSYNC 45 /* Level 2 not synchronized */ |
#define EL3HLT 46 /* Level 3 halted */ |
#define EL3RST 47 /* Level 3 reset */ |
#define ELNRNG 48 /* Link number out of range */ |
#define EUNATCH 49 /* Protocol driver not attached */ |
#define ENOCSI 50 /* No CSI structure available */ |
#define EL2HLT 51 /* Level 2 halted */ |
#define EBADE 52 /* Invalid exchange */ |
#define EBADR 53 /* Invalid request descriptor */ |
#define EXFULL 54 /* Exchange full */ |
#define ENOANO 55 /* No anode */ |
#define EBADRQC 56 /* Invalid request code */ |
#define EBADSLT 57 /* Invalid slot */ |
#define EDEADLOCK EDEADLK |
#define EBFONT 59 /* Bad font file format */ |
#define ENOSTR 60 /* Device not a stream */ |
#define ENODATA 61 /* No data available */ |
#define ETIME 62 /* Timer expired */ |
#define ENOSR 63 /* Out of streams resources */ |
#define ENONET 64 /* Machine is not on the network */ |
#define ENOPKG 65 /* Package not installed */ |
#define EREMOTE 66 /* Object is remote */ |
#define ENOLINK 67 /* Link has been severed */ |
#define EADV 68 /* Advertise error */ |
#define ESRMNT 69 /* Srmount error */ |
#define ECOMM 70 /* Communication error on send */ |
#define EPROTO 71 /* Protocol error */ |
#define EMULTIHOP 72 /* Multihop attempted */ |
#define EDOTDOT 73 /* RFS specific error */ |
#define EBADMSG 74 /* Not a data message */ |
#define EOVERFLOW 75 /* Value too large for defined data type */ |
#define ENOTUNIQ 76 /* Name not unique on network */ |
#define EBADFD 77 /* File descriptor in bad state */ |
#define EREMCHG 78 /* Remote address changed */ |
#define ELIBACC 79 /* Can not access a needed shared library */ |
#define ELIBBAD 80 /* Accessing a corrupted shared library */ |
#define ELIBSCN 81 /* .lib section in a.out corrupted */ |
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ |
#define ELIBEXEC 83 /* Cannot exec a shared library directly */ |
#define EILSEQ 84 /* Illegal byte sequence */ |
#define ERESTART 85 /* Interrupted system call should be restarted */ |
#define ESTRPIPE 86 /* Streams pipe error */ |
#define EUSERS 87 /* Too many users */ |
#define ENOTSOCK 88 /* Socket operation on non-socket */ |
#define EDESTADDRREQ 89 /* Destination address required */ |
#define EMSGSIZE 90 /* Message too long */ |
#define EPROTOTYPE 91 /* Protocol wrong type for socket */ |
#define ENOPROTOOPT 92 /* Protocol not available */ |
#define EPROTONOSUPPORT 93 /* Protocol not supported */ |
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ |
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ |
#define EPFNOSUPPORT 96 /* Protocol family not supported */ |
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ |
#define EADDRINUSE 98 /* Address already in use */ |
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ |
#define ENETDOWN 100 /* Network is down */ |
#define ENETUNREACH 101 /* Network is unreachable */ |
#define ENETRESET 102 /* Network dropped connection because of reset */ |
#define ECONNABORTED 103 /* Software caused connection abort */ |
#define ECONNRESET 104 /* Connection reset by peer */ |
#define ENOBUFS 105 /* No buffer space available */ |
#define EISCONN 106 /* Transport endpoint is already connected */ |
#define ENOTCONN 107 /* Transport endpoint is not connected */ |
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ |
#define ETOOMANYREFS 109 /* Too many references: cannot splice */ |
#define ETIMEDOUT 110 /* Connection timed out */ |
#define ECONNREFUSED 111 /* Connection refused */ |
#define EHOSTDOWN 112 /* Host is down */ |
#define EHOSTUNREACH 113 /* No route to host */ |
#define EALREADY 114 /* Operation already in progress */ |
#define EINPROGRESS 115 /* Operation now in progress */ |
#define ESTALE 116 /* Stale NFS file handle */ |
#define EUCLEAN 117 /* Structure needs cleaning */ |
#define ENOTNAM 118 /* Not a XENIX named type file */ |
#define ENAVAIL 119 /* No XENIX semaphores available */ |
#define EISNAM 120 /* Is a named type file */ |
#define EREMOTEIO 121 /* Remote I/O error */ |
#define EDQUOT 122 /* Quota exceeded */ |
#define ENOMEDIUM 123 /* No medium found */ |
#define EMEDIUMTYPE 124 /* Wrong medium type */ |
#define ECANCELED 125 /* Operation Canceled */ |
#define ENOKEY 126 /* Required key not available */ |
#define EKEYEXPIRED 127 /* Key has expired */ |
#define EKEYREVOKED 128 /* Key has been revoked */ |
#define EKEYREJECTED 129 /* Key was rejected by service */ |
/* for robust mutexes */ |
#define EOWNERDEAD 130 /* Owner died */ |
#define ENOTRECOVERABLE 131 /* State not recoverable */ |
#define ERFKILL 132 /* Operation not possible due to RF-kill */ |
#define ENOTSUPP 524 /* Operation is not supported */ |
#define ETOOSMALL 525 /* Buffer or request is too small */ |
#define ESERVERFAULT 526 /* An untranslatable error occurred */ |
#define EBADTYPE 527 /* Type not supported by server */ |
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ |
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */ |
#endif |
/drivers/include/linux/kref.h |
---|
15,11 → 15,7 |
#ifndef _KREF_H_ |
#define _KREF_H_ |
#include <linux/bug.h> |
#include <linux/atomic.h> |
#include <linux/kernel.h> |
#include <linux/mutex.h> |
#include <linux/spinlock.h> |
#include <linux/types.h> |
struct kref { |
atomic_t refcount; |
/drivers/include/linux/seq_file.h |
---|
4,6 → 4,5 |
#include <errno.h> |
#endif |
/drivers/include/linux/asm-generic/bitops/ext2-non-atomic.h |
---|
0,0 → 1,20 |
#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ |
#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ |
#include <asm-generic/bitops/le.h> |
#define ext2_set_bit(nr,addr) \ |
generic___test_and_set_le_bit((nr),(unsigned long *)(addr)) |
#define ext2_clear_bit(nr,addr) \ |
generic___test_and_clear_le_bit((nr),(unsigned long *)(addr)) |
#define ext2_test_bit(nr,addr) \ |
generic_test_le_bit((nr),(unsigned long *)(addr)) |
#define ext2_find_first_zero_bit(addr, size) \ |
generic_find_first_zero_le_bit((unsigned long *)(addr), (size)) |
#define ext2_find_next_zero_bit(addr, size, off) \ |
generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) |
#define ext2_find_next_bit(addr, size, off) \ |
generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) |
#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */ |
/drivers/include/linux/asm-generic/bitops/fls64.h |
---|
0,0 → 1,36 |
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_ |
#define _ASM_GENERIC_BITOPS_FLS64_H_ |
#include <asm/types.h> |
/** |
* fls64 - find last set bit in a 64-bit word |
* @x: the word to search |
* |
* This is defined in a similar way as the libc and compiler builtin |
* ffsll, but returns the position of the most significant set bit. |
* |
* fls64(value) returns 0 if value is 0 or the position of the last |
* set bit if value is nonzero. The last (most significant) bit is |
* at position 64. |
*/ |
#if BITS_PER_LONG == 32 |
static __always_inline int fls64(__u64 x) |
{ |
__u32 h = x >> 32; |
if (h) |
return fls(h) + 32; |
return fls(x); |
} |
#elif BITS_PER_LONG == 64 |
static __always_inline int fls64(__u64 x) |
{ |
if (x == 0) |
return 0; |
return __fls(x) + 1; |
} |
#else |
#error BITS_PER_LONG not 32 or 64 |
#endif |
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ |
/drivers/include/linux/asm-generic/bitops/hweight.h |
---|
0,0 → 1,11 |
#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ |
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_ |
#include <asm/types.h> |
extern unsigned int hweight32(unsigned int w); |
extern unsigned int hweight16(unsigned int w); |
extern unsigned int hweight8(unsigned int w); |
extern unsigned long hweight64(__u64 w); |
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ |
/drivers/include/linux/asm-generic/bitops/le.h |
---|
0,0 → 1,57 |
#ifndef _ASM_GENERIC_BITOPS_LE_H_ |
#define _ASM_GENERIC_BITOPS_LE_H_ |
#include <asm/types.h> |
#include <asm/byteorder.h> |
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) |
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) |
#if defined(__LITTLE_ENDIAN) |
#define generic_test_le_bit(nr, addr) test_bit(nr, addr) |
#define generic___set_le_bit(nr, addr) __set_bit(nr, addr) |
#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr) |
#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr) |
#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr) |
#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr) |
#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr) |
#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset) |
#define generic_find_next_le_bit(addr, size, offset) \ |
find_next_bit(addr, size, offset) |
#elif defined(__BIG_ENDIAN) |
#define generic_test_le_bit(nr, addr) \ |
test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic___set_le_bit(nr, addr) \ |
__set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic___clear_le_bit(nr, addr) \ |
__clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic_test_and_set_le_bit(nr, addr) \ |
test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic_test_and_clear_le_bit(nr, addr) \ |
test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic___test_and_set_le_bit(nr, addr) \ |
__test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic___test_and_clear_le_bit(nr, addr) \ |
__test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, |
unsigned long size, unsigned long offset); |
extern unsigned long generic_find_next_le_bit(const unsigned long *addr, |
unsigned long size, unsigned long offset); |
#else |
#error "Please fix <asm/byteorder.h>" |
#endif |
#define generic_find_first_zero_le_bit(addr, size) \ |
generic_find_next_zero_le_bit((addr), (size), 0) |
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */ |
/drivers/include/linux/asm-generic/bitops/minix.h |
---|
0,0 → 1,15 |
#ifndef _ASM_GENERIC_BITOPS_MINIX_H_ |
#define _ASM_GENERIC_BITOPS_MINIX_H_ |
#define minix_test_and_set_bit(nr,addr) \ |
__test_and_set_bit((nr),(unsigned long *)(addr)) |
#define minix_set_bit(nr,addr) \ |
__set_bit((nr),(unsigned long *)(addr)) |
#define minix_test_and_clear_bit(nr,addr) \ |
__test_and_clear_bit((nr),(unsigned long *)(addr)) |
#define minix_test_bit(nr,addr) \ |
test_bit((nr),(unsigned long *)(addr)) |
#define minix_find_first_zero_bit(addr,size) \ |
find_first_zero_bit((unsigned long *)(addr),(size)) |
#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */ |
/drivers/include/linux/asm-generic/bitops/sched.h |
---|
0,0 → 1,31 |
#ifndef _ASM_GENERIC_BITOPS_SCHED_H_ |
#define _ASM_GENERIC_BITOPS_SCHED_H_ |
#include <linux/compiler.h> /* unlikely() */ |
#include <asm/types.h> |
/* |
* Every architecture must define this function. It's the fastest |
* way of searching a 100-bit bitmap. It's guaranteed that at least |
* one of the 100 bits is cleared. |
*/ |
static inline int sched_find_first_bit(const unsigned long *b) |
{ |
#if BITS_PER_LONG == 64 |
if (b[0]) |
return __ffs(b[0]); |
return __ffs(b[1]) + 64; |
#elif BITS_PER_LONG == 32 |
if (b[0]) |
return __ffs(b[0]); |
if (b[1]) |
return __ffs(b[1]) + 32; |
if (b[2]) |
return __ffs(b[2]) + 64; |
return __ffs(b[3]) + 96; |
#else |
#error BITS_PER_LONG not defined |
#endif |
} |
#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */ |
/drivers/include/linux/asm-generic/bitops |
---|
Property changes: |
Added: svn:ignore |
+*.o |
+*.obj |
/drivers/include/linux/asm-generic/atomic-long.h |
---|
0,0 → 1,258 |
#ifndef _ASM_GENERIC_ATOMIC_LONG_H |
#define _ASM_GENERIC_ATOMIC_LONG_H |
/* |
* Copyright (C) 2005 Silicon Graphics, Inc. |
* Christoph Lameter |
* |
* Allows to provide arch independent atomic definitions without the need to |
* edit all arch specific atomic.h files. |
*/ |
#include <asm/types.h> |
/* |
* Suppport for atomic_long_t |
* |
* Casts for parameters are avoided for existing atomic functions in order to |
* avoid issues with cast-as-lval under gcc 4.x and other limitations that the |
* macros of a platform may have. |
*/ |
#if BITS_PER_LONG == 64 |
typedef atomic64_t atomic_long_t; |
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) |
static inline long atomic_long_read(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_read(v); |
} |
static inline void atomic_long_set(atomic_long_t *l, long i) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_set(v, i); |
} |
static inline void atomic_long_inc(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_inc(v); |
} |
static inline void atomic_long_dec(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_dec(v); |
} |
static inline void atomic_long_add(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_add(i, v); |
} |
static inline void atomic_long_sub(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_sub(i, v); |
} |
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_sub_and_test(i, v); |
} |
static inline int atomic_long_dec_and_test(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_dec_and_test(v); |
} |
static inline int atomic_long_inc_and_test(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_inc_and_test(v); |
} |
static inline int atomic_long_add_negative(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_add_negative(i, v); |
} |
static inline long atomic_long_add_return(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_add_return(i, v); |
} |
static inline long atomic_long_sub_return(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_sub_return(i, v); |
} |
static inline long atomic_long_inc_return(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_inc_return(v); |
} |
static inline long atomic_long_dec_return(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_dec_return(v); |
} |
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_add_unless(v, a, u); |
} |
#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) |
#define atomic_long_cmpxchg(l, old, new) \ |
(atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) |
#define atomic_long_xchg(v, new) \ |
(atomic64_xchg((atomic64_t *)(v), (new))) |
#else /* BITS_PER_LONG == 64 */ |
typedef atomic_t atomic_long_t; |
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) |
static inline long atomic_long_read(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_read(v); |
} |
static inline void atomic_long_set(atomic_long_t *l, long i) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_set(v, i); |
} |
static inline void atomic_long_inc(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_inc(v); |
} |
static inline void atomic_long_dec(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_dec(v); |
} |
static inline void atomic_long_add(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_add(i, v); |
} |
static inline void atomic_long_sub(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_sub(i, v); |
} |
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_sub_and_test(i, v); |
} |
static inline int atomic_long_dec_and_test(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_dec_and_test(v); |
} |
static inline int atomic_long_inc_and_test(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_inc_and_test(v); |
} |
static inline int atomic_long_add_negative(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_add_negative(i, v); |
} |
static inline long atomic_long_add_return(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_add_return(i, v); |
} |
static inline long atomic_long_sub_return(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_sub_return(i, v); |
} |
static inline long atomic_long_inc_return(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_inc_return(v); |
} |
static inline long atomic_long_dec_return(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_dec_return(v); |
} |
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_add_unless(v, a, u); |
} |
#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) |
#define atomic_long_cmpxchg(l, old, new) \ |
(atomic_cmpxchg((atomic_t *)(l), (old), (new))) |
#define atomic_long_xchg(v, new) \ |
(atomic_xchg((atomic_t *)(v), (new))) |
#endif /* BITS_PER_LONG == 64 */ |
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ |
/drivers/include/linux/asm-generic/bitsperlong.h |
---|
0,0 → 1,32 |
#ifndef __ASM_GENERIC_BITS_PER_LONG |
#define __ASM_GENERIC_BITS_PER_LONG |
/* |
* There seems to be no way of detecting this automatically from user |
* space, so 64 bit architectures should override this in their |
* bitsperlong.h. In particular, an architecture that supports |
* both 32 and 64 bit user space must not rely on CONFIG_64BIT |
* to decide it, but rather check a compiler provided macro. |
*/ |
#ifndef __BITS_PER_LONG |
#define __BITS_PER_LONG 32 |
#endif |
#ifdef __KERNEL__ |
#ifdef CONFIG_64BIT |
#define BITS_PER_LONG 64 |
#else |
#define BITS_PER_LONG 32 |
#endif /* CONFIG_64BIT */ |
/* |
* FIXME: The check currently breaks x86-64 build, so it's |
* temporarily disabled. Please fix x86-64 and reenable |
*/ |
#if 0 && BITS_PER_LONG != __BITS_PER_LONG |
#error Inconsistent word size. Check asm/bitsperlong.h |
#endif |
#endif /* __KERNEL__ */ |
#endif /* __ASM_GENERIC_BITS_PER_LONG */ |
/drivers/include/linux/asm-generic/int-ll64.h |
---|
0,0 → 1,78 |
/* |
* asm-generic/int-ll64.h |
* |
* Integer declarations for architectures which use "long long" |
* for 64-bit types. |
*/ |
#ifndef _ASM_GENERIC_INT_LL64_H |
#define _ASM_GENERIC_INT_LL64_H |
#include <asm/bitsperlong.h> |
#ifndef __ASSEMBLY__ |
/* |
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the |
* header files exported to user space |
*/ |
typedef __signed__ char __s8; |
typedef unsigned char __u8; |
typedef __signed__ short __s16; |
typedef unsigned short __u16; |
typedef __signed__ int __s32; |
typedef unsigned int __u32; |
#ifdef __GNUC__ |
__extension__ typedef __signed__ long long __s64; |
__extension__ typedef unsigned long long __u64; |
#else |
typedef __signed__ long long __s64; |
typedef unsigned long long __u64; |
#endif |
#endif /* __ASSEMBLY__ */ |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
typedef signed char s8; |
typedef unsigned char u8; |
typedef signed short s16; |
typedef unsigned short u16; |
typedef signed int s32; |
typedef unsigned int u32; |
typedef signed long long s64; |
typedef unsigned long long u64; |
#define S8_C(x) x |
#define U8_C(x) x ## U |
#define S16_C(x) x |
#define U16_C(x) x ## U |
#define S32_C(x) x |
#define U32_C(x) x ## U |
#define S64_C(x) x ## LL |
#define U64_C(x) x ## ULL |
#else /* __ASSEMBLY__ */ |
#define S8_C(x) x |
#define U8_C(x) x |
#define S16_C(x) x |
#define U16_C(x) x |
#define S32_C(x) x |
#define U32_C(x) x |
#define S64_C(x) x |
#define U64_C(x) x |
#endif /* __ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_GENERIC_INT_LL64_H */ |
/drivers/include/linux/asm-generic/types.h |
---|
0,0 → 1,42 |
#ifndef _ASM_GENERIC_TYPES_H |
#define _ASM_GENERIC_TYPES_H |
/* |
* int-ll64 is used practically everywhere now, |
* so use it as a reasonable default. |
*/ |
#include <asm-generic/int-ll64.h> |
#ifndef __ASSEMBLY__ |
typedef unsigned short umode_t; |
#endif /* __ASSEMBLY__ */ |
/* |
* These aren't exported outside the kernel to avoid name space clashes |
*/ |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
/* |
* DMA addresses may be very different from physical addresses |
* and pointers. i386 and powerpc may have 64 bit DMA on 32 bit |
* systems, while sparc64 uses 32 bit DMA addresses for 64 bit |
* physical addresses. |
* This default defines dma_addr_t to have the same size as |
* phys_addr_t, which is the most common way. |
* Do not define the dma64_addr_t type, which never really |
* worked. |
*/ |
#ifndef dma_addr_t |
#ifdef CONFIG_PHYS_ADDR_T_64BIT |
typedef u64 dma_addr_t; |
#else |
typedef u32 dma_addr_t; |
#endif /* CONFIG_PHYS_ADDR_T_64BIT */ |
#endif /* dma_addr_t */ |
#endif /* __ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_GENERIC_TYPES_H */ |
/drivers/include/linux/asm-generic |
---|
Property changes: |
Added: svn:ignore |
+*.o |
+*.obj |
/drivers/include/ddk.h |
---|
3,10 → 3,10 |
#ifndef __DDK_H__ |
#define __DDK_H__ |
#include <linux/kernel.h> |
#include <kernel.h> |
#include <linux/errno.h> |
#include <linux/spinlock.h> |
#include <linux/mutex.h> |
#include <mutex.h> |
#include <linux/pci.h> |
17,6 → 17,12 |
#define PG_NOCACHE 0x018 |
#define PG_SHARED 0x200 |
#define _PAGE_PRESENT (1<<0) |
#define _PAGE_RW (1<<1) |
#define _PAGE_PWT (1<<3) |
#define _PAGE_PCD (1<<4) |
#define _PAGE_PAT (1<<7) |
#define MANUAL_DESTROY 0x80000000 |
#define ENTER() dbgprintf("enter %s\n",__FUNCTION__) |
25,8 → 31,8 |
typedef struct |
{ |
u32 code; |
u32 data[5]; |
u32_t code; |
u32_t data[5]; |
}kevent_t; |
typedef union |
33,16 → 39,16 |
{ |
struct |
{ |
u32 handle; |
u32 euid; |
u32_t handle; |
u32_t euid; |
}; |
u64 raw; |
u64_t raw; |
}evhandle_t; |
typedef struct |
{ |
u32 handle; |
u32 io_code; |
u32_t handle; |
u32_t io_code; |
void *input; |
int inp_size; |
void *output; |
59,10 → 65,16 |
int ddk_init(struct ddk_params *params); |
u32 drvEntry(int, char *)__asm__("_drvEntry"); |
u32_t drvEntry(int, char *)__asm__("_drvEntry"); |
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) |
{ |
// if (size != 0 && n > SIZE_MAX / size) |
// return NULL; |
return kmalloc(n * size, flags); |
} |
#endif /* DDK_H */ |
/drivers/include/syscall.h |
---|
4,9 → 4,6 |
#ifndef __SYSCALL_H__ |
#define __SYSCALL_H__ |
typedef u32 addr_t; |
typedef u32 count_t; |
/////////////////////////////////////////////////////////////////////////////// |
#define STDCALL __attribute__ ((stdcall)) __attribute__ ((dllimport)) |
28,7 → 25,7 |
void* STDCALL AllocKernelSpace(size_t size)__asm__("AllocKernelSpace"); |
void STDCALL FreeKernelSpace(void *mem)__asm__("FreeKernelSpace"); |
addr_t STDCALL MapIoMem(addr_t base, size_t size, u32 flags)__asm__("MapIoMem"); |
addr_t STDCALL MapIoMem(addr_t base, size_t size, u32_t flags)__asm__("MapIoMem"); |
void* STDCALL KernelAlloc(size_t size)__asm__("KernelAlloc"); |
void* STDCALL KernelFree(void *mem)__asm__("KernelFree"); |
void* STDCALL UserAlloc(size_t size)__asm__("UserAlloc"); |
36,20 → 33,20 |
void* STDCALL GetDisplay(void)__asm__("GetDisplay"); |
u32 IMPORT GetTimerTicks(void)__asm__("GetTimerTicks"); |
u32_t IMPORT GetTimerTicks(void)__asm__("GetTimerTicks"); |
addr_t STDCALL AllocPage(void)__asm__("AllocPage"); |
addr_t STDCALL AllocPages(count_t count)__asm__("AllocPages"); |
void IMPORT __attribute__((regparm(1))) |
FreePage(addr_t page)__asm__("FreePage"); |
void STDCALL MapPage(void *vaddr, addr_t paddr, u32 flags)__asm__("MapPage"); |
void STDCALL MapPage(void *vaddr, addr_t paddr, u32_t flags)__asm__("MapPage"); |
void* STDCALL CreateRingBuffer(size_t size, u32 map)__asm__("CreateRingBuffer"); |
void* STDCALL CreateRingBuffer(size_t size, u32_t map)__asm__("CreateRingBuffer"); |
u32 STDCALL RegService(char *name, srv_proc_t proc)__asm__("RegService"); |
u32_t STDCALL RegService(char *name, srv_proc_t proc)__asm__("RegService"); |
int STDCALL AttachIntHandler(int irq, void *handler, u32 access) __asm__("AttachIntHandler"); |
int STDCALL AttachIntHandler(int irq, void *handler, u32_t access) __asm__("AttachIntHandler"); |
void FASTCALL MutexInit(struct mutex*)__asm__("MutexInit"); |
void FASTCALL MutexLock(struct mutex*)__asm__("MutexLock"); |
56,7 → 53,7 |
void FASTCALL MutexUnlock(struct mutex*)__asm__("MutexUnlock"); |
addr_t IMPORT GetStackBase(void)__asm__("GetStackBase"); |
u32 IMPORT GetPid(void)__asm__("GetPid"); |
u32_t IMPORT GetPid(void)__asm__("GetPid"); |
u32 STDCALL TimerHS(u32 delay, u32 interval, |
void *fn, void *data)asm("TimerHS"); |
70,16 → 67,16 |
void STDCALL SetMouseData(int btn, int x, int y, |
int z, int h)__asm__("SetMouseData"); |
void FASTCALL SetKeyboardData(u32 data)__asm__("SetKeyboardData"); |
void FASTCALL SetKeyboardData(u32_t data)__asm__("SetKeyboardData"); |
u8 STDCALL PciRead8 (u32 bus, u32 devfn, u32 reg)__asm__("PciRead8"); |
u16 STDCALL PciRead16(u32 bus, u32 devfn, u32 reg)__asm__("PciRead16"); |
u32 STDCALL PciRead32(u32 bus, u32 devfn, u32 reg)__asm__("PciRead32"); |
u8_t STDCALL PciRead8 (u32_t bus, u32_t devfn, u32_t reg)__asm__("PciRead8"); |
u16_t STDCALL PciRead16(u32_t bus, u32_t devfn, u32_t reg)__asm__("PciRead16"); |
u32_t STDCALL PciRead32(u32_t bus, u32_t devfn, u32_t reg)__asm__("PciRead32"); |
u32 STDCALL PciWrite8 (u32 bus, u32 devfn, u32 reg,u8 val) __asm__("PciWrite8"); |
u32 STDCALL PciWrite16(u32 bus, u32 devfn, u32 reg,u16 val)__asm__("PciWrite16"); |
u32 STDCALL PciWrite32(u32 bus, u32 devfn, u32 reg,u32 val)__asm__("PciWrite32"); |
u32_t STDCALL PciWrite8 (u32_t bus, u32_t devfn, u32_t reg,u8_t val) __asm__("PciWrite8"); |
u32_t STDCALL PciWrite16(u32_t bus, u32_t devfn, u32_t reg,u16_t val)__asm__("PciWrite16"); |
u32_t STDCALL PciWrite32(u32_t bus, u32_t devfn, u32_t reg,u32_t val)__asm__("PciWrite32"); |
#define pciReadByte(tag, reg) \ |
PciRead8(PCI_BUS_FROM_TAG(tag),PCI_DFN_FROM_TAG(tag),(reg)) |
161,7 → 158,7 |
}; |
static inline evhandle_t CreateEvent(kevent_t *ev, u32 flags) |
static inline evhandle_t CreateEvent(kevent_t *ev, u32_t flags) |
{ |
evhandle_t evh; |
175,7 → 172,7 |
return evh; |
}; |
static inline void RaiseEvent(evhandle_t evh, u32 flags, kevent_t *ev) |
static inline void RaiseEvent(evhandle_t evh, u32_t flags, kevent_t *ev) |
{ |
__asm__ __volatile__ ( |
"call *__imp__RaiseEvent" |
212,9 → 209,9 |
__asm__ __volatile__ ("":::"ebx","ecx","edx","esi","edi"); |
}; |
static inline u32 GetEvent(kevent_t *ev) |
static inline u32_t GetEvent(kevent_t *ev) |
{ |
u32 handle; |
u32_t handle; |
__asm__ __volatile__ ( |
"call *__imp__GetEvent" |
256,9 → 253,9 |
return retval; |
} |
static inline u32 GetPgAddr(void *mem) |
static inline u32_t GetPgAddr(void *mem) |
{ |
u32 retval; |
u32_t retval; |
__asm__ __volatile__ ( |
"call *__imp__GetPgAddr \n\t" |
267,7 → 264,7 |
return retval; |
}; |
static inline void CommitPages(void *mem, u32 page, u32 size) |
static inline void CommitPages(void *mem, u32_t page, u32_t size) |
{ |
size = (size+4095) & ~4095; |
__asm__ __volatile__ ( |
287,7 → 284,7 |
__asm__ __volatile__ ("":::"eax","ecx"); |
}; |
static inline void usleep(u32 delay) |
static inline void usleep(u32_t delay) |
{ |
if( !delay ) |
delay++; |
300,7 → 297,7 |
:::"eax","ebx","ecx","edx"); |
}; |
static inline void udelay1(u32 delay) |
static inline void udelay(u32_t delay) |
{ |
if(!delay) delay++; |
delay*= 100; |
314,7 → 311,7 |
} |
} |
static inline void msleep1(unsigned int msecs) |
static inline void msleep(unsigned int msecs) |
{ |
msecs /= 10; |
if(!msecs) msecs = 1; |
327,7 → 324,7 |
}; |
static inline void mdelay1(u32 time) |
static inline void mdelay(u32_t time) |
{ |
time /= 10; |
if(!time) time = 1; |
340,9 → 337,9 |
}; |
static inline u32 __PciApi(int cmd) |
static inline u32_t __PciApi(int cmd) |
{ |
u32 retval; |
u32_t retval; |
__asm__ __volatile__ ( |
"call *__imp__PciApi \n\t" |
354,7 → 351,7 |
return retval; |
}; |
static inline void* __CreateObject(u32 pid, size_t size) |
static inline void* __CreateObject(u32_t pid, size_t size) |
{ |
void *retval; |
377,9 → 374,9 |
:::"eax","ebx","ecx","edx","esi","edi","cc","memory"); |
} |
static inline u32 GetService(const char *name) |
static inline u32_t GetService(const char *name) |
{ |
u32 handle; |
u32_t handle; |
__asm__ __volatile__ |
( |
392,9 → 389,9 |
return handle; |
}; |
static inline u32 safe_cli(void) |
static inline u32_t safe_cli(void) |
{ |
u32 ifl; |
u32_t ifl; |
__asm__ __volatile__ ( |
"pushf\n\t" |
"popl %0\n\t" |
403,15 → 400,15 |
return ifl; |
} |
static inline void safe_sti(u32 efl) |
static inline void safe_sti(u32_t efl) |
{ |
if (efl & (1<<9)) |
__asm__ __volatile__ ("sti"); |
} |
static inline u32 get_eflags(void) |
static inline u32_t get_eflags(void) |
{ |
u32 val; |
u32_t val; |
asm volatile ( |
"pushfl\n\t" |
"popl %0\n" |
421,7 → 418,7 |
static inline void __clear (void * dst, unsigned len) |
{ |
u32 tmp; |
u32_t tmp; |
__asm__ __volatile__ ( |
"cld \n\t" |
"rep stosb \n" |
430,43 → 427,43 |
__asm__ __volatile__ ("":::"ecx","edi"); |
}; |
static inline void out8(const u16 port, const u8 val) |
static inline void out8(const u16_t port, const u8_t val) |
{ |
__asm__ __volatile__ |
("outb %1, %0\n" : : "dN"(port), "a"(val)); |
} |
static inline void out16(const u16 port, const u16 val) |
static inline void out16(const u16_t port, const u16_t val) |
{ |
__asm__ __volatile__ |
("outw %1, %0\n" : : "dN"(port), "a"(val)); |
} |
static inline void out32(const u16 port, const u32 val) |
static inline void out32(const u16_t port, const u32_t val) |
{ |
__asm__ __volatile__ |
("outl %1, %0\n" : : "dN"(port), "a"(val)); |
} |
static inline u8 in8(const u16 port) |
static inline u8_t in8(const u16_t port) |
{ |
u8 tmp; |
u8_t tmp; |
__asm__ __volatile__ |
("inb %1, %0\n" : "=a"(tmp) : "dN"(port)); |
return tmp; |
}; |
static inline u16 in16(const u16 port) |
static inline u16_t in16(const u16_t port) |
{ |
u16 tmp; |
u16_t tmp; |
__asm__ __volatile__ |
("inw %1, %0\n" : "=a"(tmp) : "dN"(port)); |
return tmp; |
}; |
static inline u32 in32(const u16 port) |
static inline u32_t in32(const u16_t port) |
{ |
u32 tmp; |
u32_t tmp; |
__asm__ __volatile__ |
("inl %1, %0\n" : "=a"(tmp) : "dN"(port)); |
return tmp; |
502,12 → 499,12 |
int drm_order(unsigned long size); |
static inline void __iomem *ioremap(u32 offset, size_t size) |
static inline void __iomem *ioremap(uint32_t offset, size_t size) |
{ |
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100); |
} |
static inline void __iomem *ioremap_wc(u32 offset, size_t size) |
static inline void __iomem *ioremap_wc(uint32_t offset, size_t size) |
{ |
return (void __iomem*) MapIoMem(offset, size, PG_SW|0x100); |
} |
/drivers/ddk/Makefile |
---|
1,4 → 1,5 |
CC = gcc |
AS = as |
5,14 → 6,8 |
DRV_TOPDIR = $(CURDIR)/.. |
DRV_INCLUDES = $(DRV_TOPDIR)/include |
INCLUDES = -I$(DRV_INCLUDES) \ |
-I$(DRV_INCLUDES)/asm \ |
-I$(DRV_INCLUDES)/uapi |
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DMI -DCONFIG_TINY_RCU |
DEFINES+= -DCONFIG_X86_L1_CACHE_SHIFT=6 -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE |
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm |
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DMI |
CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf \ |
-mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 |
30,7 → 25,6 |
io/write.c \ |
linux/bitmap.c \ |
linux/dmi.c \ |
linux/find_next_bit.c \ |
linux/idr.c \ |
linux/interval_tree.c \ |
linux/firmware.c \ |
/drivers/ddk/linux/find_next_bit.c |
---|
File deleted |
/drivers/ddk/linux/dmi.c |
---|
7,9 → 7,12 |
#include <linux/dmi.h> |
#include <syscall.h> |
#define pr_debug dbgprintf |
#define pr_info printf |
static void *dmi_alloc(unsigned len) |
{ |
return __builtin_malloc(len); |
return malloc(len); |
}; |
/* |
/drivers/ddk/linux/bitmap.c |
---|
132,9 → 132,7 |
lower = src[off + k]; |
if (left && off + k == lim - 1) |
lower &= mask; |
dst[k] = lower >> rem; |
if (rem) |
dst[k] |= upper << (BITS_PER_LONG - rem); |
dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; |
if (left && k == lim - 1) |
dst[k] &= mask; |
} |
175,9 → 173,7 |
upper = src[k]; |
if (left && k == lim - 1) |
upper &= (1UL << left) - 1; |
dst[k + off] = upper << rem; |
if (rem) |
dst[k + off] |= lower >> (BITS_PER_LONG - rem); |
dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; |
if (left && k + off == lim - 1) |
dst[k + off] &= (1UL << left) - 1; |
} |
327,25 → 323,23 |
} |
EXPORT_SYMBOL(bitmap_clear); |
/** |
* bitmap_find_next_zero_area_off - find a contiguous aligned zero area |
/* |
* bitmap_find_next_zero_area - find a contiguous aligned zero area |
* @map: The address to base the search on |
* @size: The bitmap size in bits |
* @start: The bitnumber to start searching at |
* @nr: The number of zeroed bits we're looking for |
* @align_mask: Alignment mask for zero area |
* @align_offset: Alignment offset for zero area. |
* |
* The @align_mask should be one less than a power of 2; the effect is that |
* the bit offset of all zero areas this function finds plus @align_offset |
* is multiple of that power of 2. |
* the bit offset of all zero areas this function finds is multiples of that |
* power of 2. A @align_mask of 0 means no alignment is required. |
*/ |
unsigned long bitmap_find_next_zero_area_off(unsigned long *map, |
unsigned long bitmap_find_next_zero_area(unsigned long *map, |
unsigned long size, |
unsigned long start, |
unsigned int nr, |
unsigned long align_mask, |
unsigned long align_offset) |
unsigned long align_mask) |
{ |
unsigned long index, end, i; |
again: |
352,7 → 346,7 |
index = find_next_zero_bit(map, size, start); |
/* Align allocation */ |
index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset; |
index = __ALIGN_MASK(index, align_mask); |
end = index + nr; |
if (end > size) |
364,7 → 358,7 |
} |
return index; |
} |
EXPORT_SYMBOL(bitmap_find_next_zero_area_off); |
EXPORT_SYMBOL(bitmap_find_next_zero_area); |
/* |
* Bitmap printing & parsing functions: first version by Nadia Yvette Chambers, |
605,7 → 599,7 |
* |
* Further lets say we use the following code, invoking |
* bitmap_fold() then bitmap_onto, as suggested above to |
* avoid the possibility of an empty @dst result: |
* avoid the possitility of an empty @dst result: |
* |
* unsigned long *tmp; // a temporary bitmap's bits |
* |
/drivers/ddk/linux/idr.c |
---|
20,16 → 20,20 |
* that id to this code and it returns your pointer. |
*/ |
#ifndef TEST // to test in user space... |
#include <linux/slab.h> |
#include <linux/kernel.h> |
#include <linux/export.h> |
#endif |
#include <linux/err.h> |
#include <linux/string.h> |
#include <linux/bitops.h> |
#include <linux/idr.h> |
#include <linux/spinlock.h> |
//#include <stdlib.h> |
static inline void * __must_check ERR_PTR(long error) |
{ |
return (void *) error; |
} |
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset); |
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) |
128,7 → 132,7 |
{ |
if (idr->hint == p) |
RCU_INIT_POINTER(idr->hint, NULL); |
call_rcu(&p->rcu_head, idr_layer_rcu_free); |
idr_layer_rcu_free(&p->rcu_head); |
} |
/* only called when idp->lock is held */ |
496,7 → 500,7 |
n = id & IDR_MASK; |
if (likely(p != NULL && test_bit(n, p->bitmap))) { |
__clear_bit(n, p->bitmap); |
RCU_INIT_POINTER(p->ary[n], NULL); |
rcu_assign_pointer(p->ary[n], NULL); |
to_free = NULL; |
while(*paa && ! --((**paa)->count)){ |
if (to_free) |
560,7 → 564,7 |
n = idp->layers * IDR_BITS; |
*paa = idp->top; |
RCU_INIT_POINTER(idp->top, NULL); |
rcu_assign_pointer(idp->top, NULL); |
max = idr_max(idp->layers); |
id = 0; |
595,7 → 599,7 |
* idr_destroy(). |
* |
* A typical clean-up sequence for objects stored in an idr tree will use |
* idr_for_each() to free all objects, if necessary, then idr_destroy() to |
* idr_for_each() to free all objects, if necessay, then idr_destroy() to |
* free up the id mappings and cached idr_layers. |
*/ |
void idr_destroy(struct idr *idp) |
1115,3 → 1119,129 |
} |
EXPORT_SYMBOL(ida_init); |
unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
{ |
const unsigned long *p = addr; |
unsigned long result = 0; |
unsigned long tmp; |
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found: |
return result + __ffs(tmp); |
} |
unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset) |
{ |
const unsigned long *p = addr + BITOP_WORD(offset); |
unsigned long result = offset & ~(BITS_PER_LONG-1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
size -= result; |
offset %= BITS_PER_LONG; |
if (offset) { |
tmp = *(p++); |
tmp &= (~0UL << offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found_middle; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = *p; |
found_first: |
tmp &= (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + __ffs(tmp); |
} |
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset) |
{ |
const unsigned long *p = addr + BITOP_WORD(offset); |
unsigned long result = offset & ~(BITS_PER_LONG-1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
size -= result; |
offset %= BITS_PER_LONG; |
if (offset) { |
tmp = *(p++); |
tmp |= ~0UL >> (BITS_PER_LONG - offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (~tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if (~(tmp = *(p++))) |
goto found_middle; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = *p; |
found_first: |
tmp |= ~0UL << size; |
if (tmp == ~0UL) /* Are any bits zero? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + ffz(tmp); |
} |
unsigned int hweight32(unsigned int w) |
{ |
unsigned int res = w - ((w >> 1) & 0x55555555); |
res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
res = (res + (res >> 4)) & 0x0F0F0F0F; |
res = res + (res >> 8); |
return (res + (res >> 16)) & 0x000000FF; |
} |
unsigned long hweight64(__u64 w) |
{ |
#if BITS_PER_LONG == 32 |
return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); |
#elif BITS_PER_LONG == 64 |
__u64 res = w - ((w >> 1) & 0x5555555555555555ul); |
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); |
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; |
res = res + (res >> 8); |
res = res + (res >> 16); |
return (res + (res >> 32)) & 0x00000000000000FFul; |
#endif |
} |
/drivers/ddk/linux/scatterlist.c |
---|
7,7 → 7,6 |
* Version 2. See the file COPYING for more details. |
*/ |
#include <linux/export.h> |
#include <linux/slab.h> |
#include <linux/scatterlist.h> |
/** |
71,7 → 70,7 |
**/ |
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) |
{ |
#ifndef CONFIG_ARCH_HAS_SG_CHAIN |
#ifndef ARCH_HAS_SG_CHAIN |
struct scatterlist *ret = &sgl[nents - 1]; |
#else |
struct scatterlist *sg, *ret = NULL; |
183,10 → 182,10 |
} |
table->orig_nents -= sg_size; |
if (skip_first_chunk) |
if (!skip_first_chunk) { |
free_fn(sgl, alloc_size); |
skip_first_chunk = false; |
else |
free_fn(sgl, alloc_size); |
} |
sgl = next; |
} |
235,7 → 234,7 |
if (nents == 0) |
return -EINVAL; |
#ifndef CONFIG_ARCH_HAS_SG_CHAIN |
#ifndef ARCH_HAS_SG_CHAIN |
if (WARN_ON_ONCE(nents > max_ents)) |
return -EINVAL; |
#endif |
/drivers/ddk/linux/workqueue.c |
---|
1,39 → 1,5 |
/* |
* kernel/workqueue.c - generic async execution with shared worker pool |
* |
* Copyright (C) 2002 Ingo Molnar |
* |
* Derived from the taskqueue/keventd code by: |
* David Woodhouse <dwmw2@infradead.org> |
* Andrew Morton |
* Kai Petzke <wpp@marie.physik.tu-berlin.de> |
* Theodore Ts'o <tytso@mit.edu> |
* |
* Made to use alloc_percpu by Christoph Lameter. |
* |
* Copyright (C) 2010 SUSE Linux Products GmbH |
* Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
* |
* This is the generic async execution mechanism. Work items as are |
* executed in process context. The worker pool is shared and |
* automatically managed. There are two worker pools for each CPU (one for |
* normal work items and the other for high priority ones) and some extra |
* pools for workqueues which are not bound to any specific CPU - the |
* number of these backing pools is dynamic. |
* |
* Please read Documentation/workqueue.txt for details. |
*/ |
#include <linux/export.h> |
#include <linux/kernel.h> |
#include <linux/sched.h> |
#include <linux/completion.h> |
#include <linux/workqueue.h> |
#include <linux/slab.h> |
#include <linux/lockdep.h> |
#include <linux/idr.h> |
#include <ddk.h> |
extern int driver_wq_state; |
/drivers/ddk/linux/time.c |
---|
1,4 → 1,4 |
#include <linux/jiffies.h> |
#include <jiffies.h> |
131,7 → 131,6 |
>> MSEC_TO_HZ_SHR32; |
#endif |
} |
EXPORT_SYMBOL(msecs_to_jiffies); |
unsigned long usecs_to_jiffies(const unsigned int u) |
{ |
146,27 → 145,12 |
>> USEC_TO_HZ_SHR32; |
#endif |
} |
EXPORT_SYMBOL(usecs_to_jiffies); |
/* |
* The TICK_NSEC - 1 rounds up the value to the next resolution. Note |
* that a remainder subtract here would not do the right thing as the |
* resolution values don't fall on second boundries. I.e. the line: |
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. |
* Note that due to the small error in the multiplier here, this |
* rounding is incorrect for sufficiently large values of tv_nsec, but |
* well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're |
* OK. |
* |
* Rather, we just shift the bits off the right. |
* |
* The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec |
* value to a scaled second value. |
*/ |
static unsigned long |
__timespec_to_jiffies(unsigned long sec, long nsec) |
unsigned long |
timespec_to_jiffies(const struct timespec *value) |
{ |
nsec = nsec + TICK_NSEC - 1; |
unsigned long sec = value->tv_sec; |
long nsec = value->tv_nsec + TICK_NSEC - 1; |
if (sec >= MAX_SEC_IN_JIFFIES){ |
sec = MAX_SEC_IN_JIFFIES; |
178,28 → 162,6 |
} |
unsigned long |
timespec_to_jiffies(const struct timespec *value) |
{ |
return __timespec_to_jiffies(value->tv_sec, value->tv_nsec); |
} |
EXPORT_SYMBOL(timespec_to_jiffies); |
void |
jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) |
{ |
/* |
* Convert jiffies to nanoseconds and separate with |
* one divide. |
*/ |
u32 rem; |
value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, |
NSEC_PER_SEC, &rem); |
value->tv_nsec = rem; |
} |
EXPORT_SYMBOL(jiffies_to_timespec); |
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) |
{ |
u64 quotient; |
/drivers/ddk/linux/rbtree.c |
---|
101,7 → 101,7 |
* / \ / \ |
* p u --> P U |
* / / |
* n n |
* n N |
* |
* However, since g's parent might be red, and |
* 4) does not allow this, we need to recurse |
/drivers/ddk/linux/string.c |
---|
27,7 → 27,7 |
#ifndef __HAVE_ARCH_STRLCPY |
/** |
* strlcpy - Copy a C-string into a sized buffer |
* strlcpy - Copy a %NUL terminated string into a sized buffer |
* @dest: Where to copy the string to |
* @src: Where to copy the string from |
* @size: size of destination buffer |
/drivers/ddk/linux/dmapool.c |
---|
24,11 → 24,9 |
#include <ddk.h> |
#include <linux/slab.h> |
#include <linux/errno.h> |
#include <linux/mutex.h> |
#include <linux/pci.h> |
#include <linux/gfp.h> |
#include <pci.h> |
#include <syscall.h> |
144,7 → 142,7 |
{ |
struct dma_page *page; |
page = __builtin_malloc(sizeof(*page)); |
page = malloc(sizeof(*page)); |
if (!page) |
return NULL; |
page->vaddr = (void*)KernelAlloc(pool->allocation); |
230,7 → 228,7 |
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
dma_addr_t *handle) |
{ |
u32 efl; |
u32_t efl; |
struct dma_page *page; |
size_t offset; |
void *retval; |
264,7 → 262,7 |
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
{ |
struct dma_page *page; |
u32 efl; |
u32_t efl; |
efl = safe_cli(); |
296,7 → 294,7 |
unsigned long flags; |
unsigned int offset; |
u32 efl; |
u32_t efl; |
page = pool_find_page(pool, dma); |
if (!page) { |
/drivers/ddk/linux/firmware.c |
---|
1,8 → 1,6 |
#include <linux/kernel.h> |
#include <linux/slab.h> |
#include <linux/byteorder/little_endian.h> |
#include <linux/gfp.h> |
#include <linux/errno.h> |
#include <linux/firmware.h> |
/drivers/ddk/linux/list_sort.c |
---|
1,145 → 1,101 |
#define pr_fmt(fmt) "list_sort_test: " fmt |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/list_sort.h> |
#include <linux/slab.h> |
#include <linux/list.h> |
#define MAX_LIST_LENGTH_BITS 20 |
/* |
* Returns a list organized in an intermediate format suited |
* to chaining of merge() calls: null-terminated, no reserved or |
* sentinel head node, "prev" links not maintained. |
*/ |
static struct list_head *merge(void *priv, |
int (*cmp)(void *priv, struct list_head *a, |
struct list_head *b), |
struct list_head *a, struct list_head *b) |
{ |
struct list_head head, *tail = &head; |
while (a && b) { |
/* if equal, take 'a' -- important for sort stability */ |
if ((*cmp)(priv, a, b) <= 0) { |
tail->next = a; |
a = a->next; |
} else { |
tail->next = b; |
b = b->next; |
} |
tail = tail->next; |
} |
tail->next = a?:b; |
return head.next; |
} |
/* |
* Combine final list merge with restoration of standard doubly-linked |
* list structure. This approach duplicates code from merge(), but |
* runs faster than the tidier alternatives of either a separate final |
* prev-link restoration pass, or maintaining the prev links |
* throughout. |
*/ |
static void merge_and_restore_back_links(void *priv, |
int (*cmp)(void *priv, struct list_head *a, |
struct list_head *b), |
struct list_head *head, |
struct list_head *a, struct list_head *b) |
{ |
struct list_head *tail = head; |
u8 count = 0; |
while (a && b) { |
/* if equal, take 'a' -- important for sort stability */ |
if ((*cmp)(priv, a, b) <= 0) { |
tail->next = a; |
a->prev = tail; |
a = a->next; |
} else { |
tail->next = b; |
b->prev = tail; |
b = b->next; |
} |
tail = tail->next; |
} |
tail->next = a ? : b; |
do { |
/* |
* In worst cases this loop may run many iterations. |
* Continue callbacks to the client even though no |
* element comparison is needed, so the client's cmp() |
* routine can invoke cond_resched() periodically. |
*/ |
if (unlikely(!(++count))) |
(*cmp)(priv, tail->next, tail->next); |
tail->next->prev = tail; |
tail = tail->next; |
} while (tail->next); |
tail->next = head; |
head->prev = tail; |
} |
/** |
* list_sort - sort a list |
* @priv: private data, opaque to list_sort(), passed to @cmp |
* list_sort - sort a list. |
* @priv: private data, passed to @cmp |
* @head: the list to sort |
* @cmp: the elements comparison function |
* |
* This function implements "merge sort", which has O(nlog(n)) |
* complexity. |
* This function has been implemented by Mark J Roberts <mjr@znex.org>. It |
* implements "merge sort" which has O(nlog(n)) complexity. The list is sorted |
* in ascending order. |
* |
* The comparison function @cmp must return a negative value if @a |
* should sort before @b, and a positive value if @a should sort after |
* @b. If @a and @b are equivalent, and their original relative |
* ordering is to be preserved, @cmp must return 0. |
* The comparison function @cmp is supposed to return a negative value if @a is |
* less than @b, and a positive value if @a is greater than @b. If @a and @b |
* are equivalent, then it does not matter what this function returns. |
*/ |
void list_sort(void *priv, struct list_head *head, |
int (*cmp)(void *priv, struct list_head *a, |
struct list_head *b)) |
{ |
struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists |
-- last slot is a sentinel */ |
int lev; /* index into part[] */ |
int max_lev = 0; |
struct list_head *list; |
struct list_head *p, *q, *e, *list, *tail, *oldhead; |
int insize, nmerges, psize, qsize, i; |
if (list_empty(head)) |
return; |
memset(part, 0, sizeof(part)); |
head->prev->next = NULL; |
list = head->next; |
list_del(head); |
insize = 1; |
for (;;) { |
p = oldhead = list; |
list = tail = NULL; |
nmerges = 0; |
while (list) { |
struct list_head *cur = list; |
list = list->next; |
cur->next = NULL; |
while (p) { |
nmerges++; |
q = p; |
psize = 0; |
for (i = 0; i < insize; i++) { |
psize++; |
q = q->next == oldhead ? NULL : q->next; |
if (!q) |
break; |
} |
for (lev = 0; part[lev]; lev++) { |
cur = merge(priv, cmp, part[lev], cur); |
part[lev] = NULL; |
qsize = insize; |
while (psize > 0 || (qsize > 0 && q)) { |
if (!psize) { |
e = q; |
q = q->next; |
qsize--; |
if (q == oldhead) |
q = NULL; |
} else if (!qsize || !q) { |
e = p; |
p = p->next; |
psize--; |
if (p == oldhead) |
p = NULL; |
} else if (cmp(priv, p, q) <= 0) { |
e = p; |
p = p->next; |
psize--; |
if (p == oldhead) |
p = NULL; |
} else { |
e = q; |
q = q->next; |
qsize--; |
if (q == oldhead) |
q = NULL; |
} |
if (lev > max_lev) { |
if (unlikely(lev >= ARRAY_SIZE(part)-1)) { |
printk_once(KERN_DEBUG "list too long for efficiency\n"); |
lev--; |
if (tail) |
tail->next = e; |
else |
list = e; |
e->prev = tail; |
tail = e; |
} |
max_lev = lev; |
p = q; |
} |
part[lev] = cur; |
tail->next = list; |
list->prev = tail; |
if (nmerges <= 1) |
break; |
insize *= 2; |
} |
for (lev = 0; lev < max_lev; lev++) |
if (part[lev]) |
list = merge(priv, cmp, part[lev], list); |
head->next = list; |
head->prev = list->prev; |
list->prev->next = head; |
list->prev = head; |
} |
merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); |
} |
EXPORT_SYMBOL(list_sort); |
/drivers/ddk/stdio/vsprintf.c |
---|
22,12 → 22,11 |
#include <linux/string.h> |
#include <linux/ctype.h> |
#include <linux/kernel.h> |
#include <errno-base.h> |
#include <linux/ioport.h> |
#include <linux/export.h> |
#include <asm/div64.h> |
#include <asm/page.h> /* for PAGE_SIZE */ |
static inline u64 div_u64(u64 dividend, u32 divisor) |
42,6 → 41,10 |
return div_s64_rem(dividend, divisor, &remainder); |
} |
struct va_format { |
const char *fmt; |
va_list *va; |
}; |
#define ZERO_SIZE_PTR ((void *)16) |
59,7 → 62,14 |
/* Works only for digits and letters, but small and fast */ |
#define TOLOWER(x) ((x) | 0x20) |
static inline char *hex_byte_pack(char *buf, u8 byte) |
{ |
*buf++ = hex_asc_hi(byte); |
*buf++ = hex_asc_lo(byte); |
return buf; |
} |
char *skip_spaces(const char *str) |
{ |
while (isspace(*str)) |
1287,7 → 1297,6 |
* %piS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address |
* %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper |
* case. |
* %*pE[achnops] print an escaped buffer |
* %*ph[CDN] a variable-length hex string with a separator (supports up to 64 |
* bytes of the input) |
* %n is ignored |
/drivers/ddk/debug/dbglog.c |
---|
1,6 → 1,6 |
#include <ddk.h> |
#include <linux/mutex.h> |
#include <mutex.h> |
#include <syscall.h> |
#pragma pack(push, 1) |
/drivers/ddk/malloc/malloc.c |
---|
522,7 → 522,7 |
*/ |
#include <ddk.h> |
#include <linux/mutex.h> |
#include <mutex.h> |
#include <syscall.h> |
/* Version identifier to allow people to support multiple versions */ |