/drivers/ddk/linux/idr.c |
---|
175,20 → 175,7 |
} |
} |
/** |
* idr_pre_get - reserve resources for idr allocation |
* @idp: idr handle |
* @gfp_mask: memory allocation flags |
* |
* This function should be called prior to calling the idr_get_new* functions. |
* It preallocates enough memory to satisfy the worst possible allocation. The |
* caller should pass in GFP_KERNEL if possible. This of course requires that |
* no spinning locks be held. |
* |
* If the system is REALLY out of memory this function returns %0, |
* otherwise %1. |
*/ |
int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
{ |
while (idp->id_free_cnt < MAX_IDR_FREE) { |
struct idr_layer *new; |
199,13 → 186,12 |
} |
return 1; |
} |
EXPORT_SYMBOL(idr_pre_get); |
EXPORT_SYMBOL(__idr_pre_get); |
/** |
* sub_alloc - try to allocate an id without growing the tree depth |
* @idp: idr handle |
* @starting_id: id to start search at |
* @id: pointer to the allocated handle |
* @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer |
* @gfp_mask: allocation mask for idr_layer_alloc() |
* @layer_idr: optional idr passed to idr_layer_alloc() |
367,25 → 353,7 |
idr_mark_full(pa, id); |
} |
/** |
* idr_get_new_above - allocate new idr entry above or equal to a start id |
* @idp: idr handle |
* @ptr: pointer you want associated with the id |
* @starting_id: id to start search at |
* @id: pointer to the allocated handle |
* |
* This is the allocate id function. It should be called with any |
* required locks. |
* |
* If allocation from IDR's private freelist fails, idr_get_new_above() will |
* return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
* IDR's preallocation and then retry the idr_get_new_above() call. |
* |
* If the idr is full idr_get_new_above() will return %-ENOSPC. |
* |
* @id returns a value in the range @starting_id ... %0x7fffffff |
*/ |
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
{ |
struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
int rv; |
398,7 → 366,7 |
*id = rv; |
return 0; |
} |
EXPORT_SYMBOL(idr_get_new_above); |
EXPORT_SYMBOL(__idr_get_new_above); |
/** |
* idr_preload - preload for idr_alloc() |
496,9 → 464,7 |
static void idr_remove_warning(int id) |
{ |
printk(KERN_WARNING |
"idr_remove called for id=%d which is not allocated.\n", id); |
// dump_stack(); |
WARN(1, "idr_remove called for id=%d which is not allocated.\n", id); |
} |
static void sub_remove(struct idr *idp, int shift, int id) |
548,8 → 514,7 |
struct idr_layer *p; |
struct idr_layer *to_free; |
/* see comment in idr_find_slowpath() */ |
if (WARN_ON_ONCE(id < 0)) |
if (id < 0) |
return; |
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
646,15 → 611,7 |
int n; |
struct idr_layer *p; |
/* |
* If @id is negative, idr_find() used to ignore the sign bit and |
* performed lookup with the rest of bits, which is weird and can |
* lead to very obscure bugs. We're now returning NULL for all |
* negative IDs but just in case somebody was depending on the sign |
* bit being ignored, let's trigger WARN_ON_ONCE() so that they can |
* be detected and fixed. WARN_ON_ONCE() can later be removed. |
*/ |
if (WARN_ON_ONCE(id < 0)) |
if (id < 0) |
return NULL; |
p = rcu_dereference_raw(idp->top); |
804,8 → 761,7 |
int n; |
struct idr_layer *p, *old_p; |
/* see comment in idr_find_slowpath() */ |
if (WARN_ON_ONCE(id < 0)) |
if (id < 0) |
return ERR_PTR(-EINVAL); |
p = idp->top; |
838,7 → 794,7 |
#endif |
void idr_init_cache(void) |
void __init idr_init_cache(void) |
{ |
//idr_layer_cache = kmem_cache_create("idr_layer_cache", |
// sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
858,7 → 814,6 |
} |
EXPORT_SYMBOL(idr_init); |
#if 0 |
/** |
* DOC: IDA description |
1002,7 → 957,7 |
if (ida->idr.id_free_cnt || ida->free_bitmap) { |
struct idr_layer *p = get_from_free_list(&ida->idr); |
if (p) |
kmem_cache_free(idr_layer_cache, p); |
kfree(p); |
} |
return 0; |
1052,8 → 1007,7 |
return; |
err: |
printk(KERN_WARNING |
"ida_remove called for id=%d which is not allocated.\n", id); |
WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); |
} |
EXPORT_SYMBOL(ida_remove); |
1084,9 → 1038,7 |
EXPORT_SYMBOL(ida_init); |
#endif |
unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
{ |
const unsigned long *p = addr; |
/drivers/include/drm/drmP.h |
---|
388,6 → 388,7 |
spinlock_t read_lock; |
spinlock_t write_lock; |
}; |
#endif |
struct drm_freelist { |
int initialized; /**< Freelist in use */ |
400,7 → 401,6 |
atomic_t wfh; /**< If waiting for high mark */ |
spinlock_t lock; |
}; |
#endif |
typedef struct drm_dma_handle { |
dma_addr_t busaddr; |
408,7 → 408,6 |
size_t size; |
} drm_dma_handle_t; |
#if 0 |
/** |
* Buffer entry. There is one of this for each buffer size order. |
*/ |
438,7 → 437,6 |
struct list_head head; |
struct mutex lock; |
}; |
#endif |
/** File private data */ |
struct drm_file { |
1027,7 → 1025,7 |
struct drm_info_node { |
struct list_head list; |
struct drm_minor *minor; |
struct drm_info_list *info_ent; |
const struct drm_info_list *info_ent; |
struct dentry *dent; |
}; |
1226,12 → 1224,6 |
#if 0 |
#ifdef __alpha__ |
#define drm_get_pci_domain(dev) dev->hose->index |
#else |
#define drm_get_pci_domain(dev) 0 |
#endif |
#if __OS_HAS_AGP |
static inline int drm_core_has_AGP(struct drm_device *dev) |
{ |
1246,39 → 1238,28 |
{ |
return drm_core_check_feature(dev, DRIVER_USE_MTRR); |
} |
#else |
#define drm_core_has_MTRR(dev) (0) |
#endif |
#define DRM_MTRR_WC MTRR_TYPE_WRCOMB |
static inline int drm_mtrr_add(unsigned long offset, unsigned long size, |
unsigned int flags) |
static inline void drm_device_set_unplugged(struct drm_device *dev) |
{ |
return mtrr_add(offset, size, flags, 1); |
smp_wmb(); |
atomic_set(&dev->unplugged, 1); |
} |
static inline int drm_mtrr_del(int handle, unsigned long offset, |
unsigned long size, unsigned int flags) |
static inline int drm_device_is_unplugged(struct drm_device *dev) |
{ |
return mtrr_del(handle, offset, size); |
int ret = atomic_read(&dev->unplugged); |
smp_rmb(); |
return ret; |
} |
#else |
#define drm_core_has_MTRR(dev) (0) |
#define DRM_MTRR_WC 0 |
static inline int drm_mtrr_add(unsigned long offset, unsigned long size, |
unsigned int flags) |
static inline bool drm_modeset_is_locked(struct drm_device *dev) |
{ |
return 0; |
return mutex_is_locked(&dev->mode_config.mutex); |
} |
static inline int drm_mtrr_del(int handle, unsigned long offset, |
unsigned long size, unsigned int flags) |
{ |
return 0; |
} |
#endif |
/******************************************************************/ |
/** \name Internal function definitions */ |
/*@{*/ |
1528,8 → 1509,7 |
extern struct drm_local_map *drm_getsarea(struct drm_device *dev); |
/* Proc support (drm_proc.h) */ |
extern int drm_proc_init(struct drm_minor *minor, int minor_id, |
struct proc_dir_entry *root); |
extern int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root); |
extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root); |
/* Debugfs support */ |
/drivers/include/drm/drm_crtc.h |
---|
339,6 → 339,9 |
/* cursor controls */ |
int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, |
uint32_t handle, uint32_t width, uint32_t height); |
int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv, |
uint32_t handle, uint32_t width, uint32_t height, |
int32_t hot_x, int32_t hot_y); |
int (*cursor_move)(struct drm_crtc *crtc, int x, int y); |
/* Set gamma on the CRTC */ |
409,6 → 412,10 |
/* framebuffer the connector is currently bound to */ |
struct drm_framebuffer *fb; |
/* Temporary tracking of the old fb while a modeset is ongoing. Used |
* by drm_mode_set_config_internal to implement correct refcounting. */ |
struct drm_framebuffer *old_fb; |
bool enabled; |
/* Requested mode from modesetting. */ |
654,11 → 661,7 |
* @format_count: number of formats supported |
* @crtc: currently bound CRTC |
* @fb: currently bound fb |
* @gamma_size: size of gamma table |
* @gamma_store: gamma correction table |
* @enabled: enabled flag |
* @funcs: helper functions |
* @helper_private: storage for drver layer |
* @properties: property tracking for this plane |
*/ |
struct drm_plane { |
674,14 → 677,7 |
struct drm_crtc *crtc; |
struct drm_framebuffer *fb; |
/* CRTC gamma size for reporting to userspace */ |
uint32_t gamma_size; |
uint16_t *gamma_store; |
bool enabled; |
const struct drm_plane_funcs *funcs; |
void *helper_private; |
struct drm_object_properties properties; |
}; |
894,15 → 890,17 |
const uint32_t *formats, uint32_t format_count, |
bool priv); |
extern void drm_plane_cleanup(struct drm_plane *plane); |
extern void drm_plane_force_disable(struct drm_plane *plane); |
extern void drm_encoder_cleanup(struct drm_encoder *encoder); |
extern char *drm_get_connector_name(struct drm_connector *connector); |
extern char *drm_get_dpms_name(int val); |
extern char *drm_get_dvi_i_subconnector_name(int val); |
extern char *drm_get_dvi_i_select_name(int val); |
extern char *drm_get_tv_subconnector_name(int val); |
extern char *drm_get_tv_select_name(int val); |
extern const char *drm_get_connector_name(const struct drm_connector *connector); |
extern const char *drm_get_connector_status_name(enum drm_connector_status status); |
extern const char *drm_get_dpms_name(int val); |
extern const char *drm_get_dvi_i_subconnector_name(int val); |
extern const char *drm_get_dvi_i_select_name(int val); |
extern const char *drm_get_tv_subconnector_name(int val); |
extern const char *drm_get_tv_select_name(int val); |
extern void drm_fb_release(struct drm_file *file_priv); |
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); |
extern bool drm_probe_ddc(struct i2c_adapter *adapter); |
994,7 → 992,7 |
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); |
extern int drm_mode_create_dithering_property(struct drm_device *dev); |
extern int drm_mode_create_dirty_info_property(struct drm_device *dev); |
extern char *drm_get_encoder_name(struct drm_encoder *encoder); |
extern const char *drm_get_encoder_name(const struct drm_encoder *encoder); |
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, |
struct drm_encoder *encoder); |
1022,6 → 1020,8 |
void *data, struct drm_file *file_priv); |
extern int drm_mode_cursor_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_cursor2_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_addfb(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_addfb2(struct drm_device *dev, |
1094,5 → 1094,6 |
extern int drm_format_plane_cpp(uint32_t format, int plane); |
extern int drm_format_horz_chroma_subsampling(uint32_t format); |
extern int drm_format_vert_chroma_subsampling(uint32_t format); |
extern const char *drm_get_format_name(uint32_t format); |
#endif /* __DRM_CRTC_H__ */ |
/drivers/include/drm/drm_fixed.h |
---|
20,6 → 20,7 |
* OTHER DEALINGS IN THE SOFTWARE. |
* |
* Authors: Dave Airlie |
* Christian Konig |
*/ |
#ifndef DRM_FIXED_H |
#define DRM_FIXED_H |
65,4 → 66,95 |
tmp /= 2; |
return lower_32_bits(tmp); |
} |
#define DRM_FIXED_POINT 32 |
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT) |
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1) |
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK) |
static inline s64 drm_int2fixp(int a) |
{ |
return ((s64)a) << DRM_FIXED_POINT; |
} |
static inline int drm_fixp2int(int64_t a) |
{ |
return ((s64)a) >> DRM_FIXED_POINT; |
} |
static inline unsigned drm_fixp_msbset(int64_t a) |
{ |
unsigned shift, sign = (a >> 63) & 1; |
for (shift = 62; shift > 0; --shift) |
if (((a >> shift) & 1) != sign) |
return shift; |
return 0; |
} |
static inline s64 drm_fixp_mul(s64 a, s64 b) |
{ |
unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b); |
s64 result; |
if (shift > 61) { |
shift = shift - 61; |
a >>= (shift >> 1) + (shift & 1); |
b >>= shift >> 1; |
} else |
shift = 0; |
result = a * b; |
if (shift > DRM_FIXED_POINT) |
return result << (shift - DRM_FIXED_POINT); |
if (shift < DRM_FIXED_POINT) |
return result >> (DRM_FIXED_POINT - shift); |
return result; |
} |
static inline s64 drm_fixp_div(s64 a, s64 b) |
{ |
unsigned shift = 62 - drm_fixp_msbset(a); |
s64 result; |
a <<= shift; |
if (shift < DRM_FIXED_POINT) |
b >>= (DRM_FIXED_POINT - shift); |
result = div64_s64(a, b); |
if (shift > DRM_FIXED_POINT) |
return result >> (shift - DRM_FIXED_POINT); |
return result; |
} |
static inline s64 drm_fixp_exp(s64 x) |
{ |
s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000); |
s64 sum = DRM_FIXED_ONE, term, y = x; |
u64 count = 1; |
if (x < 0) |
y = -1 * x; |
term = y; |
while (term >= tolerance) { |
sum = sum + term; |
count = count + 1; |
term = drm_fixp_mul(term, div64_s64(y, count)); |
} |
if (x < 0) |
sum = drm_fixp_div(DRM_FIXED_ONE, sum); |
return sum; |
} |
#endif |
/drivers/include/drm/drm_memory.h |
---|
0,0 → 1,57 |
/** |
* \file drm_memory.h |
* Memory management wrappers for DRM |
* |
* \author Rickard E. (Rik) Faith <faith@valinux.com> |
* \author Gareth Hughes <gareth@valinux.com> |
*/ |
/* |
* Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com |
* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#include <drm/drmP.h> |
/** |
* Cut down version of drm_memory_debug.h, which used to be called |
* drm_memory.h. |
*/ |
#if __OS_HAS_AGP |
#ifdef HAVE_PAGE_AGP |
#include <asm/agp.h> |
#else |
# ifdef __powerpc__ |
# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) |
# else |
# define PAGE_AGP PAGE_KERNEL |
# endif |
#endif |
#else /* __OS_HAS_AGP */ |
#endif |
/drivers/include/drm/drm_mm.h |
---|
177,17 → 177,6 |
return drm_mm_get_block_range_generic(parent, size, alignment, 0, |
start, end, 0); |
} |
static inline struct drm_mm_node *drm_mm_get_color_block_range( |
struct drm_mm_node *parent, |
unsigned long size, |
unsigned alignment, |
unsigned long color, |
unsigned long start, |
unsigned long end) |
{ |
return drm_mm_get_block_range_generic(parent, size, alignment, color, |
start, end, 0); |
} |
static inline struct drm_mm_node *drm_mm_get_block_atomic_range( |
struct drm_mm_node *parent, |
unsigned long size, |
255,28 → 244,9 |
return drm_mm_search_free_in_range_generic(mm, size, alignment, 0, |
start, end, best_match); |
} |
static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm, |
unsigned long size, |
unsigned alignment, |
unsigned long color, |
bool best_match) |
{ |
return drm_mm_search_free_generic(mm,size, alignment, color, best_match); |
} |
static inline struct drm_mm_node *drm_mm_search_free_in_range_color( |
const struct drm_mm *mm, |
unsigned long size, |
unsigned alignment, |
unsigned long color, |
extern void drm_mm_init(struct drm_mm *mm, |
unsigned long start, |
unsigned long end, |
bool best_match) |
{ |
return drm_mm_search_free_in_range_generic(mm, size, alignment, color, |
start, end, best_match); |
} |
extern int drm_mm_init(struct drm_mm *mm, |
unsigned long start, |
unsigned long size); |
extern void drm_mm_takedown(struct drm_mm *mm); |
extern int drm_mm_clean(struct drm_mm *mm); |
/drivers/include/drm/drm_pciids.h |
---|
152,6 → 152,14 |
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
580,6 → 588,22 |
{0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
/drivers/include/drm/drm_rect.h |
---|
0,0 → 1,167 |
/* |
* Copyright (C) 2011-2013 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
* SOFTWARE. |
*/ |
#ifndef DRM_RECT_H |
#define DRM_RECT_H |
/** |
* DOC: rect utils |
* |
* Utility functions to help manage rectangular areas for |
* clipping, scaling, etc. calculations. |
*/ |
/** |
* struct drm_rect - two dimensional rectangle |
* @x1: horizontal starting coordinate (inclusive) |
* @x2: horizontal ending coordinate (exclusive) |
* @y1: vertical starting coordinate (inclusive) |
* @y2: vertical ending coordinate (exclusive) |
*/ |
struct drm_rect { |
int x1, y1, x2, y2; |
}; |
/** |
* drm_rect_adjust_size - adjust the size of the rectangle |
* @r: rectangle to be adjusted |
* @dw: horizontal adjustment |
* @dh: vertical adjustment |
* |
* Change the size of rectangle @r by @dw in the horizontal direction, |
* and by @dh in the vertical direction, while keeping the center |
* of @r stationary. |
* |
* Positive @dw and @dh increase the size, negative values decrease it. |
*/ |
static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh) |
{ |
r->x1 -= dw >> 1; |
r->y1 -= dh >> 1; |
r->x2 += (dw + 1) >> 1; |
r->y2 += (dh + 1) >> 1; |
} |
/** |
* drm_rect_translate - translate the rectangle |
* @r: rectangle to be tranlated |
* @dx: horizontal translation |
* @dy: vertical translation |
* |
* Move rectangle @r by @dx in the horizontal direction, |
* and by @dy in the vertical direction. |
*/ |
static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy) |
{ |
r->x1 += dx; |
r->y1 += dy; |
r->x2 += dx; |
r->y2 += dy; |
} |
/** |
* drm_rect_downscale - downscale a rectangle |
* @r: rectangle to be downscaled |
* @horz: horizontal downscale factor |
* @vert: vertical downscale factor |
* |
* Divide the coordinates of rectangle @r by @horz and @vert. |
*/ |
static inline void drm_rect_downscale(struct drm_rect *r, int horz, int vert) |
{ |
r->x1 /= horz; |
r->y1 /= vert; |
r->x2 /= horz; |
r->y2 /= vert; |
} |
/** |
* drm_rect_width - determine the rectangle width |
* @r: rectangle whose width is returned |
* |
* RETURNS: |
* The width of the rectangle. |
*/ |
static inline int drm_rect_width(const struct drm_rect *r) |
{ |
return r->x2 - r->x1; |
} |
/** |
* drm_rect_height - determine the rectangle height |
* @r: rectangle whose height is returned |
* |
* RETURNS: |
* The height of the rectangle. |
*/ |
static inline int drm_rect_height(const struct drm_rect *r) |
{ |
return r->y2 - r->y1; |
} |
/** |
* drm_rect_visible - determine if the the rectangle is visible |
* @r: rectangle whose visibility is returned |
* |
* RETURNS: |
* %true if the rectangle is visible, %false otherwise. |
*/ |
static inline bool drm_rect_visible(const struct drm_rect *r) |
{ |
return drm_rect_width(r) > 0 && drm_rect_height(r) > 0; |
} |
/** |
* drm_rect_equals - determine if two rectangles are equal |
* @r1: first rectangle |
* @r2: second rectangle |
* |
* RETURNS: |
* %true if the rectangles are equal, %false otherwise. |
*/ |
static inline bool drm_rect_equals(const struct drm_rect *r1, |
const struct drm_rect *r2) |
{ |
return r1->x1 == r2->x1 && r1->x2 == r2->x2 && |
r1->y1 == r2->y1 && r1->y2 == r2->y2; |
} |
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip); |
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, |
const struct drm_rect *clip, |
int hscale, int vscale); |
int drm_rect_calc_hscale(const struct drm_rect *src, |
const struct drm_rect *dst, |
int min_hscale, int max_hscale); |
int drm_rect_calc_vscale(const struct drm_rect *src, |
const struct drm_rect *dst, |
int min_vscale, int max_vscale); |
int drm_rect_calc_hscale_relaxed(struct drm_rect *src, |
struct drm_rect *dst, |
int min_hscale, int max_hscale); |
int drm_rect_calc_vscale_relaxed(struct drm_rect *src, |
struct drm_rect *dst, |
int min_vscale, int max_vscale); |
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point); |
#endif |
/drivers/include/drm/ttm/ttm_bo_api.h |
---|
44,7 → 44,11 |
struct drm_mm_node; |
struct reservation_object { |
struct mutex lock; |
}; |
/** |
* struct ttm_placement |
* |
153,7 → 157,6 |
* Lru lists may keep one refcount, the delayed delete list, and kref != 0 |
* keeps one refcount. When this refcount reaches zero, |
* the object is destroyed. |
* @event_queue: Queue for processes waiting on buffer object status change. |
* @mem: structure describing current placement. |
* @persistent_swap_storage: Usually the swap storage is deleted for buffers |
* pinned in physical memory. If this behaviour is not desired, this member |
164,12 → 167,6 |
* @lru: List head for the lru list. |
* @ddestroy: List head for the delayed destroy list. |
* @swap: List head for swap LRU list. |
* @val_seq: Sequence of the validation holding the @reserved lock. |
* Used to avoid starvation when many processes compete to validate the |
* buffer. This member is protected by the bo_device::lru_lock. |
* @seq_valid: The value of @val_seq is valid. This value is protected by |
* the bo_device::lru_lock. |
* @reserved: Deadlock-free lock used for synchronization state transitions. |
* @sync_obj: Pointer to a synchronization object. |
* @priv_flags: Flags describing buffer object internal state. |
* @vm_rb: Rb node for the vm rb tree. |
209,10 → 206,9 |
struct kref kref; |
struct kref list_kref; |
wait_queue_head_t event_queue; |
/** |
* Members protected by the bo::reserved lock. |
* Members protected by the bo::resv::reserved lock. |
*/ |
struct ttm_mem_reg mem; |
234,17 → 230,8 |
struct list_head ddestroy; |
struct list_head swap; |
struct list_head io_reserve_lru; |
uint32_t val_seq; |
bool seq_valid; |
/** |
* Members protected by the bdev::lru_lock |
* only when written to. |
*/ |
atomic_t reserved; |
/** |
* Members protected by struct buffer_object_device::fence_lock |
* In addition, setting sync_obj to anything else |
* than NULL requires bo::reserved to be held. This allows for |
272,6 → 259,9 |
uint32_t cur_placement; |
struct sg_table *sg; |
struct reservation_object *resv; |
struct reservation_object ttm_resv; |
}; |
/** |
725,18 → 715,4 |
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); |
/** |
* ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved |
* |
* @bo: The buffer object to check. |
* |
* This function returns an indication if a bo is reserved or not, and should |
* only be used to print an error when it is not from incorrect api usage, since |
* there's no guarantee that it is the caller that is holding the reservation. |
*/ |
static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo) |
{ |
return atomic_read(&bo->reserved); |
} |
#endif |
/drivers/include/drm/ttm/ttm_bo_driver.h |
---|
33,6 → 33,7 |
#include <ttm/ttm_bo_api.h> |
#include <ttm/ttm_memory.h> |
#include <ttm/ttm_module.h> |
#include <ttm/ttm_placement.h> |
#include <drm/drm_mm.h> |
#include <drm/drm_global.h> |
//#include <linux/workqueue.h> |
39,6 → 40,8 |
//#include <linux/fs.h> |
#include <linux/spinlock.h> |
struct ww_acquire_ctx; |
struct ttm_backend_func { |
/** |
* struct ttm_backend_func member bind |
778,8 → 781,8 |
* @bo: A pointer to a struct ttm_buffer_object. |
* @interruptible: Sleep interruptible if waiting. |
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. |
* @use_sequence: If @bo is already reserved, Only sleep waiting for |
* it to become unreserved if @sequence < (@bo)->sequence. |
* @use_ticket: If @bo is already reserved, Only sleep waiting for |
* it to become unreserved if @ticket->stamp is older. |
* |
* Locks a buffer object for validation. (Or prevents other processes from |
* locking it for validation) and removes it from lru lists, while taking |
790,19 → 793,10 |
* to make room for a buffer already reserved. (Buffers are reserved before |
* they are evicted). The following algorithm prevents such deadlocks from |
* occurring: |
* 1) Buffers are reserved with the lru spinlock held. Upon successful |
* reservation they are removed from the lru list. This stops a reserved buffer |
* from being evicted. However the lru spinlock is released between the time |
* a buffer is selected for eviction and the time it is reserved. |
* Therefore a check is made when a buffer is reserved for eviction, that it |
* is still the first buffer in the lru list, before it is removed from the |
* list. @check_lru == 1 forces this check. If it fails, the function returns |
* -EINVAL, and the caller should then choose a new buffer to evict and repeat |
* the procedure. |
* 2) Processes attempting to reserve multiple buffers other than for eviction, |
* Processes attempting to reserve multiple buffers other than for eviction, |
* (typically execbuf), should first obtain a unique 32-bit |
* validation sequence number, |
* and call this function with @use_sequence == 1 and @sequence == the unique |
* and call this function with @use_ticket == 1 and @ticket->stamp == the unique |
* sequence number. If upon call of this function, the buffer object is already |
* reserved, the validation sequence is checked against the validation |
* sequence of the process currently reserving the buffer, |
817,84 → 811,102 |
* will eventually succeed, preventing both deadlocks and starvation. |
* |
* Returns: |
* -EAGAIN: The reservation may cause a deadlock. |
* -EDEADLK: The reservation may cause a deadlock. |
* Release all buffer reservations, wait for @bo to become unreserved and |
* try again. (only if use_sequence == 1). |
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
* a signal. Release all buffer reservations and return to user-space. |
* -EBUSY: The function needed to sleep, but @no_wait was true |
* -EDEADLK: Bo already reserved using @sequence. This error code will only |
* be returned if @use_sequence is set to true. |
* -EALREADY: Bo already reserved using @ticket. This error code will only |
* be returned if @use_ticket is set to true. |
*/ |
extern int ttm_bo_reserve(struct ttm_buffer_object *bo, |
static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, |
bool interruptible, |
bool no_wait, bool use_sequence, uint32_t sequence); |
bool no_wait, bool use_ticket, |
struct ww_acquire_ctx *ticket) |
{ |
int ret; |
WARN_ON(!atomic_read(&bo->kref.refcount)); |
ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket, |
ticket); |
if (likely(ret == 0)) |
ttm_bo_del_sub_from_lru(bo); |
return ret; |
} |
/** |
* ttm_bo_reserve_locked: |
* |
* ttm_bo_reserve_slowpath: |
* @bo: A pointer to a struct ttm_buffer_object. |
* @interruptible: Sleep interruptible if waiting. |
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. |
* @use_sequence: If @bo is already reserved, Only sleep waiting for |
* it to become unreserved if @sequence < (@bo)->sequence. |
* @sequence: Set (@bo)->sequence to this value after lock |
* |
* Must be called with struct ttm_bo_global::lru_lock held, |
* and will not remove reserved buffers from the lru lists. |
* The function may release the LRU spinlock if it needs to sleep. |
* Otherwise identical to ttm_bo_reserve. |
* |
* Returns: |
* -EAGAIN: The reservation may cause a deadlock. |
* Release all buffer reservations, wait for @bo to become unreserved and |
* try again. (only if use_sequence == 1). |
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
* a signal. Release all buffer reservations and return to user-space. |
* -EBUSY: The function needed to sleep, but @no_wait was true |
* -EDEADLK: Bo already reserved using @sequence. This error code will only |
* be returned if @use_sequence is set to true. |
* This is called after ttm_bo_reserve returns -EAGAIN and we backed off |
* from all our other reservations. Because there are no other reservations |
* held by us, this function cannot deadlock any more. |
*/ |
extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, |
static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, |
bool interruptible, |
bool no_wait, bool use_sequence, |
uint32_t sequence); |
struct ww_acquire_ctx *ticket) |
{ |
int ret = 0; |
WARN_ON(!atomic_read(&bo->kref.refcount)); |
if (interruptible) |
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, |
ticket); |
else |
ww_mutex_lock_slow(&bo->resv->lock, ticket); |
if (likely(ret == 0)) |
ttm_bo_del_sub_from_lru(bo); |
else if (ret == -EINTR) |
ret = -ERESTARTSYS; |
return ret; |
} |
/** |
* ttm_bo_unreserve |
* |
* ttm_bo_unreserve_ticket |
* @bo: A pointer to a struct ttm_buffer_object. |
* @ticket: ww_acquire_ctx used for reserving |
* |
* Unreserve a previous reservation of @bo. |
* Unreserve a previous reservation of @bo made with @ticket. |
*/ |
extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); |
static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, |
struct ww_acquire_ctx *t) |
{ |
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
spin_lock(&bo->glob->lru_lock); |
ttm_bo_add_to_lru(bo); |
spin_unlock(&bo->glob->lru_lock); |
} |
ww_mutex_unlock(&bo->resv->lock); |
} |
/** |
* ttm_bo_unreserve_locked |
* ttm_bo_unreserve |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* |
* Unreserve a previous reservation of @bo. |
* Needs to be called with struct ttm_bo_global::lru_lock held. |
*/ |
extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); |
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) |
{ |
ttm_bo_unreserve_ticket(bo, NULL); |
} |
/** |
* ttm_bo_wait_unreserved |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* |
* Wait for a struct ttm_buffer_object to become unreserved. |
* This is typically used in the execbuf code to relax cpu-usage when |
* a potential deadlock condition backoff. |
*/ |
extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, |
bool interruptible); |
/* |
* ttm_bo_util.c |
*/ |
int ttm_mem_io_reserve(struct ttm_bo_device *bdev, |
struct ttm_mem_reg *mem); |
void ttm_mem_io_free(struct ttm_bo_device *bdev, |
struct ttm_mem_reg *mem); |
/** |
* ttm_bo_move_ttm |
* |
/drivers/include/drm/ttm/ttm_execbuf_util.h |
---|
57,6 → 57,7 |
/** |
* function ttm_eu_backoff_reservation |
* |
* @ticket: ww_acquire_ctx from reserve call |
* @list: thread private list of ttm_validate_buffer structs. |
* |
* Undoes all buffer validation reservations for bos pointed to by |
63,11 → 64,13 |
* the list entries. |
*/ |
extern void ttm_eu_backoff_reservation(struct list_head *list); |
extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, |
struct list_head *list); |
/** |
* function ttm_eu_reserve_buffers |
* |
* @ticket: [out] ww_acquire_ctx returned by call. |
* @list: thread private list of ttm_validate_buffer structs. |
* |
* Tries to reserve bos pointed to by the list entries for validation. |
90,11 → 93,13 |
* has failed. |
*/ |
extern int ttm_eu_reserve_buffers(struct list_head *list); |
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, |
struct list_head *list); |
/** |
* function ttm_eu_fence_buffer_objects. |
* |
* @ticket: ww_acquire_ctx from reserve call |
* @list: thread private list of ttm_validate_buffer structs. |
* @sync_obj: The new sync object for the buffers. |
* |
104,6 → 109,7 |
* |
*/ |
extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj); |
extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, |
struct list_head *list, void *sync_obj); |
#endif |
/drivers/include/drm/ttm/ttm_object.h |
---|
40,7 → 40,7 |
#include <linux/list.h> |
#include <drm/drm_hashtab.h> |
#include <linux/kref.h> |
#include <linux/rcupdate.h> |
//#include <linux/rcupdate.h> |
#include <ttm/ttm_memory.h> |
/** |
/drivers/include/drm/vmwgfx_drm.h |
---|
0,0 → 1,790 |
/************************************************************************** |
* |
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
**************************************************************************/ |
#ifndef __VMWGFX_DRM_H__ |
#define __VMWGFX_DRM_H__ |
#define DRM_VMW_MAX_SURFACE_FACES 6 |
#define DRM_VMW_MAX_MIP_LEVELS 24 |
#define DRM_VMW_GET_PARAM 0 |
#define DRM_VMW_ALLOC_DMABUF 1 |
#define DRM_VMW_UNREF_DMABUF 2 |
#define DRM_VMW_CURSOR_BYPASS 3 |
/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ |
#define DRM_VMW_CONTROL_STREAM 4 |
#define DRM_VMW_CLAIM_STREAM 5 |
#define DRM_VMW_UNREF_STREAM 6 |
/* guarded by DRM_VMW_PARAM_3D == 1 */ |
#define DRM_VMW_CREATE_CONTEXT 7 |
#define DRM_VMW_UNREF_CONTEXT 8 |
#define DRM_VMW_CREATE_SURFACE 9 |
#define DRM_VMW_UNREF_SURFACE 10 |
#define DRM_VMW_REF_SURFACE 11 |
#define DRM_VMW_EXECBUF 12 |
#define DRM_VMW_GET_3D_CAP 13 |
#define DRM_VMW_FENCE_WAIT 14 |
#define DRM_VMW_FENCE_SIGNALED 15 |
#define DRM_VMW_FENCE_UNREF 16 |
#define DRM_VMW_FENCE_EVENT 17 |
#define DRM_VMW_PRESENT 18 |
#define DRM_VMW_PRESENT_READBACK 19 |
#define DRM_VMW_UPDATE_LAYOUT 20 |
/*************************************************************************/ |
/** |
* DRM_VMW_GET_PARAM - get device information. |
* |
* DRM_VMW_PARAM_FIFO_OFFSET: |
* Offset to use to map the first page of the FIFO read-only. |
* The fifo is mapped using the mmap() system call on the drm device. |
* |
* DRM_VMW_PARAM_OVERLAY_IOCTL: |
* Does the driver support the overlay ioctl. |
*/ |
#define DRM_VMW_PARAM_NUM_STREAMS 0 |
#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 |
#define DRM_VMW_PARAM_3D 2 |
#define DRM_VMW_PARAM_HW_CAPS 3 |
#define DRM_VMW_PARAM_FIFO_CAPS 4 |
#define DRM_VMW_PARAM_MAX_FB_SIZE 5 |
#define DRM_VMW_PARAM_FIFO_HW_VERSION 6 |
/** |
* struct drm_vmw_getparam_arg |
* |
* @value: Returned value. //Out |
* @param: Parameter to query. //In. |
* |
* Argument to the DRM_VMW_GET_PARAM Ioctl. |
*/ |
struct drm_vmw_getparam_arg { |
uint64_t value; |
uint32_t param; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CREATE_CONTEXT - Create a host context. |
* |
* Allocates a device unique context id, and queues a create context command |
* for the host. Does not wait for host completion. |
*/ |
/** |
* struct drm_vmw_context_arg |
* |
* @cid: Device unique context ID. |
* |
* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. |
* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. |
*/ |
struct drm_vmw_context_arg { |
int32_t cid; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_CONTEXT - Create a host context. |
* |
* Frees a global context id, and queues a destroy host command for the host. |
* Does not wait for host completion. The context ID can be used directly |
* in the command stream and shows up as the same context ID on the host. |
*/ |
/*************************************************************************/ |
/** |
* DRM_VMW_CREATE_SURFACE - Create a host suface. |
* |
* Allocates a device unique surface id, and queues a create surface command |
* for the host. Does not wait for host completion. The surface ID can be |
* used directly in the command stream and shows up as the same surface |
* ID on the host. |
*/ |
/** |
* struct drm_wmv_surface_create_req |
* |
* @flags: Surface flags as understood by the host. |
* @format: Surface format as understood by the host. |
* @mip_levels: Number of mip levels for each face. |
* An unused face should have 0 encoded. |
* @size_addr: Address of a user-space array of sruct drm_vmw_size |
* cast to an uint64_t for 32-64 bit compatibility. |
* The size of the array should equal the total number of mipmap levels. |
* @shareable: Boolean whether other clients (as identified by file descriptors) |
* may reference this surface. |
* @scanout: Boolean whether the surface is intended to be used as a |
* scanout. |
* |
* Input data to the DRM_VMW_CREATE_SURFACE Ioctl. |
* Output data from the DRM_VMW_REF_SURFACE Ioctl. |
*/ |
struct drm_vmw_surface_create_req { |
uint32_t flags; |
uint32_t format; |
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; |
uint64_t size_addr; |
int32_t shareable; |
int32_t scanout; |
}; |
/** |
* struct drm_wmv_surface_arg |
* |
* @sid: Surface id of created surface or surface to destroy or reference. |
* |
* Output data from the DRM_VMW_CREATE_SURFACE Ioctl. |
* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. |
* Input argument to the DRM_VMW_REF_SURFACE Ioctl. |
*/ |
struct drm_vmw_surface_arg { |
int32_t sid; |
uint32_t pad64; |
}; |
/** |
* struct drm_vmw_size ioctl. |
* |
* @width - mip level width |
* @height - mip level height |
* @depth - mip level depth |
* |
* Description of a mip level. |
* Input data to the DRM_WMW_CREATE_SURFACE Ioctl. |
*/ |
struct drm_vmw_size { |
uint32_t width; |
uint32_t height; |
uint32_t depth; |
uint32_t pad64; |
}; |
/** |
* union drm_vmw_surface_create_arg |
* |
* @rep: Output data as described above. |
* @req: Input data as described above. |
* |
* Argument to the DRM_VMW_CREATE_SURFACE Ioctl. |
*/ |
union drm_vmw_surface_create_arg { |
struct drm_vmw_surface_arg rep; |
struct drm_vmw_surface_create_req req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_REF_SURFACE - Reference a host surface. |
* |
* Puts a reference on a host surface with a give sid, as previously |
* returned by the DRM_VMW_CREATE_SURFACE ioctl. |
* A reference will make sure the surface isn't destroyed while we hold |
* it and will allow the calling client to use the surface ID in the command |
* stream. |
* |
* On successful return, the Ioctl returns the surface information given |
* in the DRM_VMW_CREATE_SURFACE ioctl. |
*/ |
/** |
* union drm_vmw_surface_reference_arg |
* |
* @rep: Output data as described above. |
* @req: Input data as described above. |
* |
* Argument to the DRM_VMW_REF_SURFACE Ioctl. |
*/ |
union drm_vmw_surface_reference_arg { |
struct drm_vmw_surface_create_req rep; |
struct drm_vmw_surface_arg req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_SURFACE - Unreference a host surface. |
* |
* Clear a reference previously put on a host surface. |
* When all references are gone, including the one implicitly placed |
* on creation, |
* a destroy surface command will be queued for the host. |
* Does not wait for completion. |
*/ |
/*************************************************************************/ |
/** |
* DRM_VMW_EXECBUF |
* |
* Submit a command buffer for execution on the host, and return a |
* fence seqno that when signaled, indicates that the command buffer has |
* executed. |
*/ |
/** |
* struct drm_vmw_execbuf_arg |
* |
* @commands: User-space address of a command buffer cast to an uint64_t. |
* @command-size: Size in bytes of the command buffer. |
* @throttle-us: Sleep until software is less than @throttle_us |
* microseconds ahead of hardware. The driver may round this value |
* to the nearest kernel tick. |
* @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an |
* uint64_t. |
* @version: Allows expanding the execbuf ioctl parameters without breaking |
* backwards compatibility, since user-space will always tell the kernel |
* which version it uses. |
* @flags: Execbuf flags. None currently. |
* |
* Argument to the DRM_VMW_EXECBUF Ioctl. |
*/ |
#define DRM_VMW_EXECBUF_VERSION 1 |
struct drm_vmw_execbuf_arg { |
uint64_t commands; |
uint32_t command_size; |
uint32_t throttle_us; |
uint64_t fence_rep; |
uint32_t version; |
uint32_t flags; |
}; |
/** |
* struct drm_vmw_fence_rep |
* |
* @handle: Fence object handle for fence associated with a command submission. |
* @mask: Fence flags relevant for this fence object. |
* @seqno: Fence sequence number in fifo. A fence object with a lower |
* seqno will signal the EXEC flag before a fence object with a higher |
* seqno. This can be used by user-space to avoid kernel calls to determine |
* whether a fence has signaled the EXEC flag. Note that @seqno will |
* wrap at 32-bit. |
* @passed_seqno: The highest seqno number processed by the hardware |
* so far. This can be used to mark user-space fence objects as signaled, and |
* to determine whether a fence seqno might be stale. |
* @error: This member should've been set to -EFAULT on submission. |
* The following actions should be take on completion: |
* error == -EFAULT: Fence communication failed. The host is synchronized. |
* Use the last fence id read from the FIFO fence register. |
* error != 0 && error != -EFAULT: |
* Fence submission failed. The host is synchronized. Use the fence_seq member. |
* error == 0: All is OK, The host may not be synchronized. |
* Use the fence_seq member. |
* |
* Input / Output data to the DRM_VMW_EXECBUF Ioctl. |
*/ |
struct drm_vmw_fence_rep { |
uint32_t handle; |
uint32_t mask; |
uint32_t seqno; |
uint32_t passed_seqno; |
uint32_t pad64; |
int32_t error; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_ALLOC_DMABUF |
* |
* Allocate a DMA buffer that is visible also to the host. |
* NOTE: The buffer is |
* identified by a handle and an offset, which are private to the guest, but |
* useable in the command stream. The guest kernel may translate these |
* and patch up the command stream accordingly. In the future, the offset may |
* be zero at all times, or it may disappear from the interface before it is |
* fixed. |
* |
* The DMA buffer may stay user-space mapped in the guest at all times, |
* and is thus suitable for sub-allocation. |
* |
* DMA buffers are mapped using the mmap() syscall on the drm device. |
*/ |
/** |
* struct drm_vmw_alloc_dmabuf_req |
* |
* @size: Required minimum size of the buffer. |
* |
* Input data to the DRM_VMW_ALLOC_DMABUF Ioctl. |
*/ |
struct drm_vmw_alloc_dmabuf_req { |
uint32_t size; |
uint32_t pad64; |
}; |
/** |
* struct drm_vmw_dmabuf_rep |
* |
* @map_handle: Offset to use in the mmap() call used to map the buffer. |
* @handle: Handle unique to this buffer. Used for unreferencing. |
* @cur_gmr_id: GMR id to use in the command stream when this buffer is |
* referenced. See not above. |
* @cur_gmr_offset: Offset to use in the command stream when this buffer is |
* referenced. See note above. |
* |
* Output data from the DRM_VMW_ALLOC_DMABUF Ioctl. |
*/ |
struct drm_vmw_dmabuf_rep { |
uint64_t map_handle; |
uint32_t handle; |
uint32_t cur_gmr_id; |
uint32_t cur_gmr_offset; |
uint32_t pad64; |
}; |
/** |
* union drm_vmw_dmabuf_arg |
* |
* @req: Input data as described above. |
* @rep: Output data as described above. |
* |
* Argument to the DRM_VMW_ALLOC_DMABUF Ioctl. |
*/ |
union drm_vmw_alloc_dmabuf_arg { |
struct drm_vmw_alloc_dmabuf_req req; |
struct drm_vmw_dmabuf_rep rep; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_DMABUF - Free a DMA buffer. |
* |
*/ |
/** |
* struct drm_vmw_unref_dmabuf_arg |
* |
* @handle: Handle indicating what buffer to free. Obtained from the |
* DRM_VMW_ALLOC_DMABUF Ioctl. |
* |
* Argument to the DRM_VMW_UNREF_DMABUF Ioctl. |
*/ |
struct drm_vmw_unref_dmabuf_arg { |
uint32_t handle; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. |
* |
* This IOCTL controls the overlay units of the svga device. |
* The SVGA overlay units does not work like regular hardware units in |
* that they do not automaticaly read back the contents of the given dma |
* buffer. But instead only read back for each call to this ioctl, and |
* at any point between this call being made and a following call that |
* either changes the buffer or disables the stream. |
*/ |
/** |
* struct drm_vmw_rect |
* |
* Defines a rectangle. Used in the overlay ioctl to define |
* source and destination rectangle. |
*/ |
struct drm_vmw_rect { |
int32_t x; |
int32_t y; |
uint32_t w; |
uint32_t h; |
}; |
/** |
* struct drm_vmw_control_stream_arg |
* |
* @stream_id: Stearm to control |
* @enabled: If false all following arguments are ignored. |
* @handle: Handle to buffer for getting data from. |
* @format: Format of the overlay as understood by the host. |
* @width: Width of the overlay. |
* @height: Height of the overlay. |
* @size: Size of the overlay in bytes. |
* @pitch: Array of pitches, the two last are only used for YUV12 formats. |
* @offset: Offset from start of dma buffer to overlay. |
* @src: Source rect, must be within the defined area above. |
* @dst: Destination rect, x and y may be negative. |
* |
* Argument to the DRM_VMW_CONTROL_STREAM Ioctl. |
*/ |
struct drm_vmw_control_stream_arg { |
uint32_t stream_id; |
uint32_t enabled; |
uint32_t flags; |
uint32_t color_key; |
uint32_t handle; |
uint32_t offset; |
int32_t format; |
uint32_t size; |
uint32_t width; |
uint32_t height; |
uint32_t pitch[3]; |
uint32_t pad64; |
struct drm_vmw_rect src; |
struct drm_vmw_rect dst; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass. |
* |
*/ |
#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0) |
#define DRM_VMW_CURSOR_BYPASS_FLAGS (1) |
/** |
* struct drm_vmw_cursor_bypass_arg |
* |
* @flags: Flags. |
* @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed. |
* @xpos: X position of cursor. |
* @ypos: Y position of cursor. |
* @xhot: X hotspot. |
* @yhot: Y hotspot. |
* |
* Argument to the DRM_VMW_CURSOR_BYPASS Ioctl. |
*/ |
struct drm_vmw_cursor_bypass_arg { |
uint32_t flags; |
uint32_t crtc_id; |
int32_t xpos; |
int32_t ypos; |
int32_t xhot; |
int32_t yhot; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CLAIM_STREAM - Claim a single stream. |
*/ |
/** |
* struct drm_vmw_context_arg |
* |
* @stream_id: Device unique context ID. |
* |
* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. |
* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. |
*/ |
struct drm_vmw_stream_arg { |
uint32_t stream_id; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_STREAM - Unclaim a stream. |
* |
* Return a single stream that was claimed by this process. Also makes |
* sure that the stream has been stopped. |
*/ |
/*************************************************************************/ |
/** |
* DRM_VMW_GET_3D_CAP |
* |
* Read 3D capabilities from the FIFO |
* |
*/ |
/** |
* struct drm_vmw_get_3d_cap_arg |
* |
* @buffer: Pointer to a buffer for capability data, cast to an uint64_t |
* @size: Max size to copy |
* |
* Input argument to the DRM_VMW_GET_3D_CAP_IOCTL |
* ioctls. |
*/ |
struct drm_vmw_get_3d_cap_arg { |
uint64_t buffer; |
uint32_t max_size; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_WAIT |
* |
* Waits for a fence object to signal. The wait is interruptible, so that |
* signals may be delivered during the interrupt. The wait may timeout, |
* in which case the calls returns -EBUSY. If the wait is restarted, |
* that is restarting without resetting @cookie_valid to zero, |
* the timeout is computed from the first call. |
* |
* The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait |
* on: |
* DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command |
* stream |
* have executed. |
* DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish |
* commands |
* in the buffer given to the EXECBUF ioctl returning the fence object handle |
* are available to user-space. |
* |
* DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the |
* fenc wait ioctl returns 0, the fence object has been unreferenced after |
* the wait. |
*/ |
#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0) |
#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1) |
#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0) |
/** |
* struct drm_vmw_fence_wait_arg |
* |
* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. |
* @cookie_valid: Must be reset to 0 on first call. Left alone on restart. |
* @kernel_cookie: Set to 0 on first call. Left alone on restart. |
* @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout. |
* @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick |
* before returning. |
* @flags: Fence flags to wait on. |
* @wait_options: Options that control the behaviour of the wait ioctl. |
* |
* Input argument to the DRM_VMW_FENCE_WAIT ioctl. |
*/ |
struct drm_vmw_fence_wait_arg { |
uint32_t handle; |
int32_t cookie_valid; |
uint64_t kernel_cookie; |
uint64_t timeout_us; |
int32_t lazy; |
int32_t flags; |
int32_t wait_options; |
int32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_SIGNALED |
* |
* Checks if a fence object is signaled.. |
*/ |
/** |
* struct drm_vmw_fence_signaled_arg |
* |
* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. |
* @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl |
* @signaled: Out: Flags signaled. |
* @sequence: Out: Highest sequence passed so far. Can be used to signal the |
* EXEC flag of user-space fence objects. |
* |
* Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF |
* ioctls. |
*/ |
struct drm_vmw_fence_signaled_arg { |
uint32_t handle; |
uint32_t flags; |
int32_t signaled; |
uint32_t passed_seqno; |
uint32_t signaled_flags; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_UNREF |
* |
* Unreferences a fence object, and causes it to be destroyed if there are no |
* other references to it. |
* |
*/ |
/** |
* struct drm_vmw_fence_arg |
* |
* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. |
* |
* Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl.. |
*/ |
struct drm_vmw_fence_arg { |
uint32_t handle; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_EVENT |
* |
* Queues an event on a fence to be delivered on the drm character device |
* when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag. |
* Optionally the approximate time when the fence signaled is |
* given by the event. |
*/ |
/* |
* The event type |
*/ |
#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000 |
struct drm_vmw_event_fence { |
struct drm_event base; |
uint64_t user_data; |
uint32_t tv_sec; |
uint32_t tv_usec; |
}; |
/* |
* Flags that may be given to the command. |
*/ |
/* Request fence signaled time on the event. */ |
#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0) |
/** |
* struct drm_vmw_fence_event_arg |
* |
* @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if |
* the fence is not supposed to be referenced by user-space. |
* @user_info: Info to be delivered with the event. |
* @handle: Attach the event to this fence only. |
* @flags: A set of flags as defined above. |
*/ |
struct drm_vmw_fence_event_arg { |
uint64_t fence_rep; |
uint64_t user_data; |
uint32_t handle; |
uint32_t flags; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_PRESENT |
* |
* Executes an SVGA present on a given fb for a given surface. The surface |
* is placed on the framebuffer. Cliprects are given relative to the given |
* point (the point disignated by dest_{x|y}). |
* |
*/ |
/** |
* struct drm_vmw_present_arg |
* @fb_id: framebuffer id to present / read back from. |
* @sid: Surface id to present from. |
* @dest_x: X placement coordinate for surface. |
* @dest_y: Y placement coordinate for surface. |
* @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. |
* @num_clips: Number of cliprects given relative to the framebuffer origin, |
* in the same coordinate space as the frame buffer. |
* @pad64: Unused 64-bit padding. |
* |
* Input argument to the DRM_VMW_PRESENT ioctl. |
*/ |
struct drm_vmw_present_arg { |
uint32_t fb_id; |
uint32_t sid; |
int32_t dest_x; |
int32_t dest_y; |
uint64_t clips_ptr; |
uint32_t num_clips; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_PRESENT_READBACK |
* |
* Executes an SVGA present readback from a given fb to the dma buffer |
* currently bound as the fb. If there is no dma buffer bound to the fb, |
* an error will be returned. |
* |
*/ |
/** |
* struct drm_vmw_present_arg |
* @fb_id: fb_id to present / read back from. |
* @num_clips: Number of cliprects. |
* @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. |
* @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t. |
* If this member is NULL, then the ioctl should not return a fence. |
*/ |
struct drm_vmw_present_readback_arg { |
uint32_t fb_id; |
uint32_t num_clips; |
uint64_t clips_ptr; |
uint64_t fence_rep; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UPDATE_LAYOUT - Update layout |
* |
* Updates the preferred modes and connection status for connectors. The |
* command consists of one drm_vmw_update_layout_arg pointing to an array |
* of num_outputs drm_vmw_rect's. |
*/ |
/** |
* struct drm_vmw_update_layout_arg |
* |
* @num_outputs: number of active connectors |
* @rects: pointer to array of drm_vmw_rect cast to an uint64_t |
* |
* Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. |
*/ |
struct drm_vmw_update_layout_arg { |
uint32_t num_outputs; |
uint32_t pad64; |
uint64_t rects; |
}; |
#endif |
/drivers/include/linux/compiler-gcc4.h |
---|
13,7 → 13,7 |
#define __must_check __attribute__((warn_unused_result)) |
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b) |
#if GCC_VERSION >= 40100 |
#if GCC_VERSION >= 40100 && GCC_VERSION < 40600 |
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0) |
#endif |
/drivers/include/linux/ctype.h |
---|
61,4 → 61,10 |
return c | 0x20; |
} |
/* Fast check for octal digit */ |
static inline int isodigit(const char c) |
{ |
return c >= '0' && c <= '7'; |
} |
#endif |
/drivers/include/linux/err.h |
---|
24,17 → 24,17 |
return (void *) error; |
} |
static inline long __must_check PTR_ERR(const void *ptr) |
static inline long __must_check PTR_ERR(__force const void *ptr) |
{ |
return (long) ptr; |
} |
static inline long __must_check IS_ERR(const void *ptr) |
static inline long __must_check IS_ERR(__force const void *ptr) |
{ |
return IS_ERR_VALUE((unsigned long)ptr); |
} |
static inline long __must_check IS_ERR_OR_NULL(const void *ptr) |
static inline long __must_check IS_ERR_OR_NULL(__force const void *ptr) |
{ |
return !ptr || IS_ERR_VALUE((unsigned long)ptr); |
} |
46,13 → 46,13 |
* Explicitly cast an error-valued pointer to another pointer type in such a |
* way as to make it clear that's what's going on. |
*/ |
static inline void * __must_check ERR_CAST(const void *ptr) |
static inline void * __must_check ERR_CAST(__force const void *ptr) |
{ |
/* cast away the const */ |
return (void *) ptr; |
} |
static inline int __must_check PTR_RET(const void *ptr) |
static inline int __must_check PTR_RET(__force const void *ptr) |
{ |
if (IS_ERR(ptr)) |
return PTR_ERR(ptr); |
/drivers/include/linux/hash.h |
---|
0,0 → 1,81 |
#ifndef _LINUX_HASH_H |
#define _LINUX_HASH_H |
/* Fast hashing routine for ints, longs and pointers. |
(C) 2002 Nadia Yvette Chambers, IBM */ |
/* |
* Knuth recommends primes in approximately golden ratio to the maximum |
* integer representable by a machine word for multiplicative hashing. |
* Chuck Lever verified the effectiveness of this technique: |
* http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf |
* |
* These primes are chosen to be bit-sparse, that is operations on |
* them can use shifts and additions instead of multiplications for |
* machines where multiplications are slow. |
*/ |
#include <asm/types.h> |
#include <linux/compiler.h> |
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ |
#define GOLDEN_RATIO_PRIME_32 0x9e370001UL |
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ |
#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL |
#if BITS_PER_LONG == 32 |
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32 |
#define hash_long(val, bits) hash_32(val, bits) |
#elif BITS_PER_LONG == 64 |
#define hash_long(val, bits) hash_64(val, bits) |
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64 |
#else |
#error Wordsize not 32 or 64 |
#endif |
static __always_inline u64 hash_64(u64 val, unsigned int bits) |
{ |
u64 hash = val; |
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */ |
u64 n = hash; |
n <<= 18; |
hash -= n; |
n <<= 33; |
hash -= n; |
n <<= 3; |
hash += n; |
n <<= 3; |
hash -= n; |
n <<= 4; |
hash += n; |
n <<= 2; |
hash += n; |
/* High bits are more random, so use them. */ |
return hash >> (64 - bits); |
} |
static inline u32 hash_32(u32 val, unsigned int bits) |
{ |
/* On some cpus multiply is faster, on others gcc will do shifts */ |
u32 hash = val * GOLDEN_RATIO_PRIME_32; |
/* High bits are more random, so use them. */ |
return hash >> (32 - bits); |
} |
static inline unsigned long hash_ptr(const void *ptr, unsigned int bits) |
{ |
return hash_long((unsigned long)ptr, bits); |
} |
static inline u32 hash32_ptr(const void *ptr) |
{ |
unsigned long val = (unsigned long)ptr; |
#if BITS_PER_LONG == 64 |
val ^= (val >> 32); |
#endif |
return (u32)val; |
} |
#endif /* _LINUX_HASH_H */ |
/drivers/include/linux/i2c.h |
---|
55,7 → 55,6 |
* struct i2c_driver - represent an I2C device driver |
* @class: What kind of i2c device we instantiate (for detect) |
* @attach_adapter: Callback for bus addition (deprecated) |
* @detach_adapter: Callback for bus removal (deprecated) |
* @probe: Callback for device binding |
* @remove: Callback for device unbinding |
* @shutdown: Callback for device shutdown |
92,12 → 91,10 |
struct i2c_driver { |
unsigned int class; |
/* Notifies the driver that a new bus has appeared or is about to be |
* removed. You should avoid using this, it will be removed in a |
* near future. |
/* Notifies the driver that a new bus has appeared. You should avoid |
* using this, it will be removed in a near future. |
*/ |
int (*attach_adapter)(struct i2c_adapter *) __deprecated; |
int (*detach_adapter)(struct i2c_adapter *) __deprecated; |
/* Standard driver model interfaces */ |
int (*probe)(struct i2c_client *, const struct i2c_device_id *); |
192,9 → 189,6 |
unsigned short addr; |
void *platform_data; |
struct dev_archdata *archdata; |
#ifdef CONFIG_OF |
struct device_node *of_node; |
#endif |
int irq; |
}; |
/drivers/include/linux/idr.h |
---|
48,6 → 48,7 |
struct idr_layer *id_free; |
int layers; /* only valid w/o concurrent changes */ |
int id_free_cnt; |
int cur; /* current pos for cyclic allocation */ |
spinlock_t lock; |
}; |
79,10 → 80,9 |
*/ |
void *idr_find_slowpath(struct idr *idp, int id); |
int idr_pre_get(struct idr *idp, gfp_t gfp_mask); |
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); |
void idr_preload(gfp_t gfp_mask); |
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); |
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); |
int idr_for_each(struct idr *idp, |
int (*fn)(int id, void *p, void *data), void *data); |
void *idr_get_next(struct idr *idp, int *nextid); |
105,7 → 105,7 |
/** |
* idr_find - return pointer for given id |
* @idp: idr handle |
* @idr: idr handle |
* @id: lookup key |
* |
* Return the pointer given the id it has been registered with. A %NULL |
126,31 → 126,69 |
} |
/** |
* idr_get_new - allocate new idr entry |
* idr_for_each_entry - iterate over an idr's elements of a given type |
* @idp: idr handle |
* @entry: the type * to use as cursor |
* @id: id entry's key |
* |
* @entry and @id do not need to be initialized before the loop, and |
* after normal terminatinon @entry is left with the value NULL. This |
* is convenient for a "not found" value. |
*/ |
#define idr_for_each_entry(idp, entry, id) \ |
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) |
/* |
* Don't use the following functions. These exist only to suppress |
* deprecated warnings on EXPORT_SYMBOL()s. |
*/ |
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask); |
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); |
void __idr_remove_all(struct idr *idp); |
/** |
* idr_pre_get - reserve resources for idr allocation |
* @idp: idr handle |
* @gfp_mask: memory allocation flags |
* |
* Part of old alloc interface. This is going away. Use |
* idr_preload[_end]() and idr_alloc() instead. |
*/ |
static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
{ |
return __idr_pre_get(idp, gfp_mask); |
} |
/** |
* idr_get_new_above - allocate new idr entry above or equal to a start id |
* @idp: idr handle |
* @ptr: pointer you want associated with the id |
* @starting_id: id to start search at |
* @id: pointer to the allocated handle |
* |
* Simple wrapper around idr_get_new_above() w/ @starting_id of zero. |
* Part of old alloc interface. This is going away. Use |
* idr_preload[_end]() and idr_alloc() instead. |
*/ |
static inline int idr_get_new(struct idr *idp, void *ptr, int *id) |
static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr, |
int starting_id, int *id) |
{ |
return idr_get_new_above(idp, ptr, 0, id); |
return __idr_get_new_above(idp, ptr, starting_id, id); |
} |
/** |
* idr_for_each_entry - iterate over an idr's elements of a given type |
* idr_get_new - allocate new idr entry |
* @idp: idr handle |
* @entry: the type * to use as cursor |
* @id: id entry's key |
* @ptr: pointer you want associated with the id |
* @id: pointer to the allocated handle |
* |
* Part of old alloc interface. This is going away. Use |
* idr_preload[_end]() and idr_alloc() instead. |
*/ |
#define idr_for_each_entry(idp, entry, id) \ |
for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \ |
entry != NULL; \ |
++id, entry = (typeof(entry))idr_get_next((idp), &(id))) |
static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id) |
{ |
return __idr_get_new_above(idp, ptr, 0, id); |
} |
void __idr_remove_all(struct idr *idp); /* don't use */ |
/** |
* idr_remove_all - remove all ids from the given idr tree |
* @idp: idr handle |
193,8 → 231,22 |
void ida_destroy(struct ida *ida); |
void ida_init(struct ida *ida); |
void __init idr_init_cache(void); |
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, |
gfp_t gfp_mask); |
void ida_simple_remove(struct ida *ida, unsigned int id); |
/** |
* ida_get_new - allocate new ID |
* @ida: idr handle |
* @p_id: pointer to the allocated handle |
* |
* Simple wrapper around ida_get_new_above() w/ @starting_id of zero. |
*/ |
static inline int ida_get_new(struct ida *ida, int *p_id) |
{ |
return ida_get_new_above(ida, 0, p_id); |
} |
void __init idr_init_cache(void); |
#endif /* __IDR_H__ */ |
/drivers/include/linux/jiffies.h |
---|
130,6 → 130,10 |
((__s64)(a) - (__s64)(b) >= 0)) |
#define time_before_eq64(a,b) time_after_eq64(b,a) |
#define time_in_range64(a, b, c) \ |
(time_after_eq64(a, b) && \ |
time_before_eq64(a, c)) |
/* |
* These four macros compare jiffies and 'a' for convenience. |
*/ |
/drivers/include/linux/list.h |
---|
361,22 → 361,22 |
list_entry((ptr)->next, type, member) |
/** |
* list_for_each - iterate over a list |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
* list_first_entry_or_null - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* |
* Note that if the list is empty, it returns NULL. |
*/ |
#define list_for_each(pos, head) \ |
for (pos = (head)->next; pos != (head); pos = pos->next) |
#define list_first_entry_or_null(ptr, type, member) \ |
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) |
/** |
* __list_for_each - iterate over a list |
* list_for_each - iterate over a list |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
* |
* This variant doesn't differ from list_for_each() any more. |
* We don't do prefetching in either case. |
*/ |
#define __list_for_each(pos, head) \ |
#define list_for_each(pos, head) \ |
for (pos = (head)->next; pos != (head); pos = pos->next) |
/** |
665,54 → 665,51 |
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ |
pos = n) |
#define hlist_entry_safe(ptr, type, member) \ |
({ typeof(ptr) ____ptr = (ptr); \ |
____ptr ? hlist_entry(____ptr, type, member) : NULL; \ |
}) |
/** |
* hlist_for_each_entry - iterate over list of given type |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry(tpos, pos, head, member) \ |
for (pos = (head)->first; \ |
pos && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
#define hlist_for_each_entry(pos, head, member) \ |
for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ |
pos; \ |
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @pos: the type * to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_continue(tpos, pos, member) \ |
for (pos = (pos)->next; \ |
pos && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
#define hlist_for_each_entry_continue(pos, member) \ |
for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ |
pos; \ |
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_from - iterate over a hlist continuing from current point |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @pos: the type * to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_from(tpos, pos, member) \ |
for (; pos && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
#define hlist_for_each_entry_from(pos, member) \ |
for (; pos; \ |
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @pos: the type * to use as a loop cursor. |
* @n: another &struct hlist_node to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ |
for (pos = (head)->first; \ |
pos && ({ n = pos->next; 1; }) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = n) |
#define hlist_for_each_entry_safe(pos, n, head, member) \ |
for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ |
pos && ({ n = pos->member.next; 1; }); \ |
pos = hlist_entry_safe(n, typeof(*pos), member)) |
#endif |
/drivers/include/linux/math64.h |
---|
7,6 → 7,7 |
#if BITS_PER_LONG == 64 |
#define div64_long(x,y) div64_s64((x),(y)) |
#define div64_ul(x, y) div64_u64((x), (y)) |
/** |
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder |
48,6 → 49,7 |
#elif BITS_PER_LONG == 32 |
#define div64_long(x,y) div_s64((x),(y)) |
#define div64_ul(x, y) div_u64((x), (y)) |
#ifndef div_u64_rem |
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
/drivers/include/linux/mod_devicetable.h |
---|
33,8 → 33,7 |
__u32 model_id; |
__u32 specifier_id; |
__u32 version; |
kernel_ulong_t driver_data |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
kernel_ulong_t driver_data; |
}; |
147,8 → 146,7 |
__u16 group; |
__u32 vendor; |
__u32 product; |
kernel_ulong_t driver_data |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
kernel_ulong_t driver_data; |
}; |
/* s390 CCW devices */ |
172,8 → 170,6 |
struct ap_device_id { |
__u16 match_flags; /* which fields to match against */ |
__u8 dev_type; /* device type */ |
__u8 pad1; |
__u32 pad2; |
kernel_ulong_t driver_info; |
}; |
183,13 → 179,10 |
struct css_device_id { |
__u8 match_flags; |
__u8 type; /* subchannel type */ |
__u16 pad2; |
__u32 pad3; |
kernel_ulong_t driver_data; |
}; |
#define ACPI_ID_LEN 16 /* only 9 bytes needed here, 16 bytes are used */ |
/* to workaround crosscompile issues */ |
#define ACPI_ID_LEN 9 |
struct acpi_device_id { |
__u8 id[ACPI_ID_LEN]; |
230,11 → 223,7 |
char name[32]; |
char type[32]; |
char compatible[128]; |
#ifdef __KERNEL__ |
const void *data; |
#else |
kernel_ulong_t data; |
#endif |
}; |
/* VIO */ |
259,24 → 248,14 |
/* for pseudo multi-function devices */ |
__u8 device_no; |
__u32 prod_id_hash[4] |
__attribute__((aligned(sizeof(__u32)))); |
__u32 prod_id_hash[4]; |
/* not matched against in kernelspace*/ |
#ifdef __KERNEL__ |
const char * prod_id[4]; |
#else |
kernel_ulong_t prod_id[4] |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
#endif |
/* not matched against */ |
kernel_ulong_t driver_info; |
#ifdef __KERNEL__ |
char * cisfile; |
#else |
kernel_ulong_t cisfile; |
#endif |
}; |
#define PCMCIA_DEV_ID_MATCH_MANF_ID 0x0001 |
372,8 → 351,7 |
__u8 class; /* Standard interface or SDIO_ANY_ID */ |
__u16 vendor; /* Vendor or SDIO_ANY_ID */ |
__u16 device; /* Device ID or SDIO_ANY_ID */ |
kernel_ulong_t driver_data /* Data private to the driver */ |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
kernel_ulong_t driver_data; /* Data private to the driver */ |
}; |
/* SSB core, see drivers/ssb/ */ |
381,7 → 359,8 |
__u16 vendor; |
__u16 coreid; |
__u8 revision; |
}; |
__u8 __pad; |
} __attribute__((packed, aligned(2))); |
#define SSB_DEVICE(_vendor, _coreid, _revision) \ |
{ .vendor = _vendor, .coreid = _coreid, .revision = _revision, } |
#define SSB_DEVTABLE_END \ |
397,7 → 376,7 |
__u16 id; |
__u8 rev; |
__u8 class; |
}; |
} __attribute__((packed,aligned(2))); |
#define BCMA_CORE(_manuf, _id, _rev, _class) \ |
{ .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, } |
#define BCMA_CORETABLE_END \ |
414,6 → 393,23 |
}; |
#define VIRTIO_DEV_ANY_ID 0xffffffff |
/* |
* For Hyper-V devices we use the device guid as the id. |
*/ |
struct hv_vmbus_device_id { |
__u8 guid[16]; |
kernel_ulong_t driver_data; /* Data private to the driver */ |
}; |
/* rpmsg */ |
#define RPMSG_NAME_SIZE 32 |
#define RPMSG_DEVICE_MODALIAS_FMT "rpmsg:%s" |
struct rpmsg_device_id { |
char name[RPMSG_NAME_SIZE]; |
}; |
/* i2c */ |
#define I2C_NAME_SIZE 20 |
421,8 → 417,7 |
struct i2c_device_id { |
char name[I2C_NAME_SIZE]; |
kernel_ulong_t driver_data /* Data private to the driver */ |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
kernel_ulong_t driver_data; /* Data private to the driver */ |
}; |
/* spi */ |
432,8 → 427,7 |
struct spi_device_id { |
char name[SPI_NAME_SIZE]; |
kernel_ulong_t driver_data /* Data private to the driver */ |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
kernel_ulong_t driver_data; /* Data private to the driver */ |
}; |
/* dmi */ |
461,20 → 455,12 |
}; |
struct dmi_strmatch { |
unsigned char slot; |
unsigned char slot:7; |
unsigned char exact_match:1; |
char substr[79]; |
}; |
#ifndef __KERNEL__ |
struct dmi_system_id { |
kernel_ulong_t callback; |
kernel_ulong_t ident; |
struct dmi_strmatch matches[4]; |
kernel_ulong_t driver_data |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
}; |
#else |
struct dmi_system_id { |
int (*callback)(const struct dmi_system_id *); |
const char *ident; |
struct dmi_strmatch matches[4]; |
487,9 → 473,9 |
* error: storage size of '__mod_dmi_device_table' isn't known |
*/ |
#define dmi_device_id dmi_system_id |
#endif |
#define DMI_MATCH(a, b) { a, b } |
#define DMI_MATCH(a, b) { .slot = a, .substr = b } |
#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 } |
#define PLATFORM_NAME_SIZE 20 |
#define PLATFORM_MODULE_PREFIX "platform:" |
496,8 → 482,7 |
struct platform_device_id { |
char name[PLATFORM_NAME_SIZE]; |
kernel_ulong_t driver_data |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
kernel_ulong_t driver_data; |
}; |
#define MDIO_MODULE_PREFIX "mdio:" |
542,4 → 527,74 |
kernel_ulong_t driver_data; /* data private to the driver */ |
}; |
/** |
* struct amba_id - identifies a device on an AMBA bus |
* @id: The significant bits if the hardware device ID |
* @mask: Bitmask specifying which bits of the id field are significant when |
* matching. A driver binds to a device when ((hardware device ID) & mask) |
* == id. |
* @data: Private data used by the driver. |
*/ |
struct amba_id { |
unsigned int id; |
unsigned int mask; |
void *data; |
}; |
/* |
* Match x86 CPUs for CPU specific drivers. |
* See documentation of "x86_match_cpu" for details. |
*/ |
struct x86_cpu_id { |
__u16 vendor; |
__u16 family; |
__u16 model; |
__u16 feature; /* bit index */ |
kernel_ulong_t driver_data; |
}; |
#define X86_FEATURE_MATCH(x) \ |
{ X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x } |
#define X86_VENDOR_ANY 0xffff |
#define X86_FAMILY_ANY 0 |
#define X86_MODEL_ANY 0 |
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ |
#define IPACK_ANY_FORMAT 0xff |
#define IPACK_ANY_ID (~0) |
struct ipack_device_id { |
__u8 format; /* Format version or IPACK_ANY_ID */ |
__u32 vendor; /* Vendor ID or IPACK_ANY_ID */ |
__u32 device; /* Device ID or IPACK_ANY_ID */ |
}; |
#define MEI_CL_MODULE_PREFIX "mei:" |
#define MEI_CL_NAME_SIZE 32 |
struct mei_cl_device_id { |
char name[MEI_CL_NAME_SIZE]; |
kernel_ulong_t driver_info; |
}; |
/* RapidIO */ |
#define RIO_ANY_ID 0xffff |
/** |
* struct rio_device_id - RIO device identifier |
* @did: RapidIO device ID |
* @vid: RapidIO vendor ID |
* @asm_did: RapidIO assembly device ID |
* @asm_vid: RapidIO assembly vendor ID |
* |
* Identifies a RapidIO device based on both the device/vendor IDs and |
* the assembly device/vendor IDs. |
*/ |
struct rio_device_id { |
__u16 did, vid; |
__u16 asm_did, asm_vid; |
}; |
#endif /* LINUX_MOD_DEVICETABLE_H */ |
/drivers/include/linux/rculist.h |
---|
0,0 → 1,526 |
#ifndef _LINUX_RCULIST_H |
#define _LINUX_RCULIST_H |
#ifdef __KERNEL__ |
/* |
* RCU-protected list version |
*/ |
#include <linux/list.h> |
//#include <linux/rcupdate.h> |
/* |
* Why is there no list_empty_rcu()? Because list_empty() serves this |
* purpose. The list_empty() function fetches the RCU-protected pointer |
* and compares it to the address of the list head, but neither dereferences |
* this pointer itself nor provides this pointer to the caller. Therefore, |
* it is not necessary to use rcu_dereference(), so that list_empty() can |
* be used anywhere you would want to use a list_empty_rcu(). |
*/ |
/* |
* return the ->next pointer of a list_head in an rcu safe |
* way, we must not access it directly |
*/ |
#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) |
/* |
* Insert a new entry between two known consecutive entries. |
* |
* This is only for internal list manipulation where we know |
* the prev/next entries already! |
*/ |
#ifndef CONFIG_DEBUG_LIST |
static inline void __list_add_rcu(struct list_head *new, |
struct list_head *prev, struct list_head *next) |
{ |
new->next = next; |
new->prev = prev; |
rcu_assign_pointer(list_next_rcu(prev), new); |
next->prev = new; |
} |
#else |
extern void __list_add_rcu(struct list_head *new, |
struct list_head *prev, struct list_head *next); |
#endif |
/** |
* list_add_rcu - add a new entry to rcu-protected list |
* @new: new entry to be added |
* @head: list head to add it after |
* |
* Insert a new entry after the specified head. |
* This is good for implementing stacks. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as list_add_rcu() |
* or list_del_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* list_for_each_entry_rcu(). |
*/ |
static inline void list_add_rcu(struct list_head *new, struct list_head *head) |
{ |
__list_add_rcu(new, head, head->next); |
} |
/** |
* list_add_tail_rcu - add a new entry to rcu-protected list |
* @new: new entry to be added |
* @head: list head to add it before |
* |
* Insert a new entry before the specified head. |
* This is useful for implementing queues. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as list_add_tail_rcu() |
* or list_del_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* list_for_each_entry_rcu(). |
*/ |
static inline void list_add_tail_rcu(struct list_head *new, |
struct list_head *head) |
{ |
__list_add_rcu(new, head->prev, head); |
} |
/** |
* list_del_rcu - deletes entry from list without re-initialization |
* @entry: the element to delete from the list. |
* |
* Note: list_empty() on entry does not return true after this, |
* the entry is in an undefined state. It is useful for RCU based |
* lockfree traversal. |
* |
* In particular, it means that we can not poison the forward |
* pointers that may still be used for walking the list. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as list_del_rcu() |
* or list_add_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* list_for_each_entry_rcu(). |
* |
* Note that the caller is not permitted to immediately free |
* the newly deleted entry. Instead, either synchronize_rcu() |
* or call_rcu() must be used to defer freeing until an RCU |
* grace period has elapsed. |
*/ |
static inline void list_del_rcu(struct list_head *entry) |
{ |
__list_del_entry(entry); |
entry->prev = LIST_POISON2; |
} |
/** |
* hlist_del_init_rcu - deletes entry from hash list with re-initialization |
* @n: the element to delete from the hash list. |
* |
* Note: list_unhashed() on the node return true after this. It is |
* useful for RCU based read lockfree traversal if the writer side |
* must know if the list entry is still hashed or already unhashed. |
* |
* In particular, it means that we can not poison the forward pointers |
* that may still be used for walking the hash list and we can only |
* zero the pprev pointer so list_unhashed() will return true after |
* this. |
* |
* The caller must take whatever precautions are necessary (such as |
* holding appropriate locks) to avoid racing with another |
* list-mutation primitive, such as hlist_add_head_rcu() or |
* hlist_del_rcu(), running on this same list. However, it is |
* perfectly legal to run concurrently with the _rcu list-traversal |
* primitives, such as hlist_for_each_entry_rcu(). |
*/ |
static inline void hlist_del_init_rcu(struct hlist_node *n) |
{ |
if (!hlist_unhashed(n)) { |
__hlist_del(n); |
n->pprev = NULL; |
} |
} |
/** |
* list_replace_rcu - replace old entry by new one |
* @old : the element to be replaced |
* @new : the new element to insert |
* |
* The @old entry will be replaced with the @new entry atomically. |
* Note: @old should not be empty. |
*/ |
static inline void list_replace_rcu(struct list_head *old, |
struct list_head *new) |
{ |
new->next = old->next; |
new->prev = old->prev; |
rcu_assign_pointer(list_next_rcu(new->prev), new); |
new->next->prev = new; |
old->prev = LIST_POISON2; |
} |
/** |
* list_splice_init_rcu - splice an RCU-protected list into an existing list. |
* @list: the RCU-protected list to splice |
* @head: the place in the list to splice the first list into |
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... |
* |
* @head can be RCU-read traversed concurrently with this function. |
* |
* Note that this function blocks. |
* |
* Important note: the caller must take whatever action is necessary to |
* prevent any other updates to @head. In principle, it is possible |
* to modify the list as soon as sync() begins execution. |
* If this sort of thing becomes necessary, an alternative version |
* based on call_rcu() could be created. But only if -really- |
* needed -- there is no shortage of RCU API members. |
*/ |
static inline void list_splice_init_rcu(struct list_head *list, |
struct list_head *head, |
void (*sync)(void)) |
{ |
struct list_head *first = list->next; |
struct list_head *last = list->prev; |
struct list_head *at = head->next; |
if (list_empty(list)) |
return; |
/* "first" and "last" tracking list, so initialize it. */ |
INIT_LIST_HEAD(list); |
/* |
* At this point, the list body still points to the source list. |
* Wait for any readers to finish using the list before splicing |
* the list body into the new list. Any new readers will see |
* an empty list. |
*/ |
sync(); |
/* |
* Readers are finished with the source list, so perform splice. |
* The order is important if the new list is global and accessible |
* to concurrent RCU readers. Note that RCU readers are not |
* permitted to traverse the prev pointers without excluding |
* this function. |
*/ |
last->next = at; |
rcu_assign_pointer(list_next_rcu(head), first); |
first->prev = head; |
at->prev = last; |
} |
/** |
* list_entry_rcu - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* |
* This primitive may safely run concurrently with the _rcu list-mutation |
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
*/ |
#define list_entry_rcu(ptr, type, member) \ |
({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \ |
container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ |
}) |
/** |
* Where are list_empty_rcu() and list_first_entry_rcu()? |
* |
* Implementing those functions following their counterparts list_empty() and |
* list_first_entry() is not advisable because they lead to subtle race |
* conditions as the following snippet shows: |
* |
* if (!list_empty_rcu(mylist)) { |
* struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); |
* do_something(bar); |
* } |
* |
* The list may not be empty when list_empty_rcu checks it, but it may be when |
* list_first_entry_rcu rereads the ->next pointer. |
* |
* Rereading the ->next pointer is not a problem for list_empty() and |
* list_first_entry() because they would be protected by a lock that blocks |
* writers. |
* |
* See list_first_or_null_rcu for an alternative. |
*/ |
/** |
* list_first_or_null_rcu - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* |
* Note that if the list is empty, it returns NULL. |
* |
* This primitive may safely run concurrently with the _rcu list-mutation |
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
*/ |
#define list_first_or_null_rcu(ptr, type, member) \ |
({struct list_head *__ptr = (ptr); \ |
struct list_head __rcu *__next = list_next_rcu(__ptr); \ |
likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \ |
}) |
/** |
* list_for_each_entry_rcu - iterate over rcu list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* This list-traversal primitive may safely run concurrently with |
* the _rcu list-mutation primitives such as list_add_rcu() |
* as long as the traversal is guarded by rcu_read_lock(). |
*/ |
#define list_for_each_entry_rcu(pos, head, member) \ |
for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
/** |
* list_for_each_entry_continue_rcu - continue iteration over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Continue to iterate over list of given type, continuing after |
* the current position. |
*/ |
#define list_for_each_entry_continue_rcu(pos, head, member) \ |
for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
/** |
* hlist_del_rcu - deletes entry from hash list without re-initialization |
* @n: the element to delete from the hash list. |
* |
* Note: list_unhashed() on entry does not return true after this, |
* the entry is in an undefined state. It is useful for RCU based |
* lockfree traversal. |
* |
* In particular, it means that we can not poison the forward |
* pointers that may still be used for walking the hash list. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as hlist_add_head_rcu() |
* or hlist_del_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* hlist_for_each_entry(). |
*/ |
static inline void hlist_del_rcu(struct hlist_node *n) |
{ |
__hlist_del(n); |
n->pprev = LIST_POISON2; |
} |
/** |
* hlist_replace_rcu - replace old entry by new one |
* @old : the element to be replaced |
* @new : the new element to insert |
* |
* The @old entry will be replaced with the @new entry atomically. |
*/ |
static inline void hlist_replace_rcu(struct hlist_node *old, |
struct hlist_node *new) |
{ |
struct hlist_node *next = old->next; |
new->next = next; |
new->pprev = old->pprev; |
rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); |
if (next) |
new->next->pprev = &new->next; |
old->pprev = LIST_POISON2; |
} |
/* |
* return the first or the next element in an RCU protected hlist |
*/ |
#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) |
#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) |
#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) |
/** |
* hlist_add_head_rcu |
* @n: the element to add to the hash list. |
* @h: the list to add to. |
* |
* Description: |
* Adds the specified element to the specified hlist, |
* while permitting racing traversals. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as hlist_add_head_rcu() |
* or hlist_del_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* hlist_for_each_entry_rcu(), used to prevent memory-consistency |
* problems on Alpha CPUs. Regardless of the type of CPU, the |
* list-traversal primitive must be guarded by rcu_read_lock(). |
*/ |
static inline void hlist_add_head_rcu(struct hlist_node *n, |
struct hlist_head *h) |
{ |
struct hlist_node *first = h->first; |
n->next = first; |
n->pprev = &h->first; |
rcu_assign_pointer(hlist_first_rcu(h), n); |
if (first) |
first->pprev = &n->next; |
} |
/** |
* hlist_add_before_rcu |
* @n: the new element to add to the hash list. |
* @next: the existing element to add the new element before. |
* |
* Description: |
* Adds the specified element to the specified hlist |
* before the specified node while permitting racing traversals. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as hlist_add_head_rcu() |
* or hlist_del_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* hlist_for_each_entry_rcu(), used to prevent memory-consistency |
* problems on Alpha CPUs. |
*/ |
static inline void hlist_add_before_rcu(struct hlist_node *n, |
struct hlist_node *next) |
{ |
n->pprev = next->pprev; |
n->next = next; |
rcu_assign_pointer(hlist_pprev_rcu(n), n); |
next->pprev = &n->next; |
} |
/** |
* hlist_add_after_rcu |
* @prev: the existing element to add the new element after. |
* @n: the new element to add to the hash list. |
* |
* Description: |
* Adds the specified element to the specified hlist |
* after the specified node while permitting racing traversals. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as hlist_add_head_rcu() |
* or hlist_del_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* hlist_for_each_entry_rcu(), used to prevent memory-consistency |
* problems on Alpha CPUs. |
*/ |
static inline void hlist_add_after_rcu(struct hlist_node *prev, |
struct hlist_node *n) |
{ |
n->next = prev->next; |
n->pprev = &prev->next; |
rcu_assign_pointer(hlist_next_rcu(prev), n); |
if (n->next) |
n->next->pprev = &n->next; |
} |
#define __hlist_for_each_rcu(pos, head) \ |
for (pos = rcu_dereference(hlist_first_rcu(head)); \ |
pos; \ |
pos = rcu_dereference(hlist_next_rcu(pos))) |
/** |
* hlist_for_each_entry_rcu - iterate over rcu list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
* |
* This list-traversal primitive may safely run concurrently with |
* the _rcu list-mutation primitives such as hlist_add_head_rcu() |
* as long as the traversal is guarded by rcu_read_lock(). |
*/ |
#define hlist_for_each_entry_rcu(pos, head, member) \ |
for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ |
typeof(*(pos)), member); \ |
pos; \ |
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ |
&(pos)->member)), typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
* |
* This list-traversal primitive may safely run concurrently with |
* the _rcu list-mutation primitives such as hlist_add_head_rcu() |
* as long as the traversal is guarded by rcu_read_lock(). |
* |
* This is the same as hlist_for_each_entry_rcu() except that it does |
* not do any RCU debugging or tracing. |
*/ |
#define hlist_for_each_entry_rcu_notrace(pos, head, member) \ |
for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ |
typeof(*(pos)), member); \ |
pos; \ |
pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\ |
&(pos)->member)), typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
* |
* This list-traversal primitive may safely run concurrently with |
* the _rcu list-mutation primitives such as hlist_add_head_rcu() |
* as long as the traversal is guarded by rcu_read_lock(). |
*/ |
#define hlist_for_each_entry_rcu_bh(pos, head, member) \ |
for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ |
typeof(*(pos)), member); \ |
pos; \ |
pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ |
&(pos)->member)), typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point |
* @pos: the type * to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_continue_rcu(pos, member) \ |
for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ |
typeof(*(pos)), member); \ |
pos; \ |
pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ |
typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point |
* @pos: the type * to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_continue_rcu_bh(pos, member) \ |
for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ |
typeof(*(pos)), member); \ |
pos; \ |
pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ |
typeof(*(pos)), member)) |
#endif /* __KERNEL__ */ |
#endif |
/drivers/include/linux/slab.h |
---|
1,3 → 1,14 |
/* |
* Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). |
* |
* (C) SGI 2006, Christoph Lameter |
* Cleaned up and restructured to ease the addition of alternative |
* implementations of SLAB allocators. |
*/ |
#ifndef _LINUX_SLAB_H |
#define _LINUX_SLAB_H |
#include <errno.h> |
// stub |
#endif /* _LINUX_SLAB_H */ |
/drivers/include/linux/spinlock_up.h |
---|
14,7 → 14,10 |
* In the debug case, 1 means unlocked, 0 means locked. (the values |
* are inverted, to catch initialization bugs) |
* |
* No atomicity anywhere, we are on UP. |
* No atomicity anywhere, we are on UP. However, we still need |
* the compiler barriers, because we do not want the compiler to |
* move potentially faulting instructions (notably user accesses) |
* into the locked sequence, resulting in non-atomic execution. |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
/drivers/include/linux/string.h |
---|
142,4 → 142,15 |
extern size_t memweight(const void *ptr, size_t bytes); |
/** |
* kbasename - return the last part of a pathname. |
* |
* @path: path to extract the filename from. |
*/ |
static inline const char *kbasename(const char *path) |
{ |
const char *tail = strrchr(path, '/'); |
return tail ? tail + 1 : path; |
} |
#endif /* _LINUX_STRING_H_ */ |
/drivers/include/linux/time.h |
---|
0,0 → 1,270 |
#ifndef _LINUX_TIME_H |
#define _LINUX_TIME_H |
//# include <linux/cache.h> |
//# include <linux/seqlock.h> |
# include <linux/math64.h> |
//#include <uapi/linux/time.h> |
extern struct timezone sys_tz; |
/* Parameters used to convert the timespec values: */ |
#define MSEC_PER_SEC 1000L |
#define USEC_PER_MSEC 1000L |
#define NSEC_PER_USEC 1000L |
#define NSEC_PER_MSEC 1000000L |
#define USEC_PER_SEC 1000000L |
#define NSEC_PER_SEC 1000000000L |
#define FSEC_PER_SEC 1000000000000000LL |
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) |
static inline int timespec_equal(const struct timespec *a, |
const struct timespec *b) |
{ |
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); |
} |
/* |
* lhs < rhs: return <0 |
* lhs == rhs: return 0 |
* lhs > rhs: return >0 |
*/ |
static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs) |
{ |
if (lhs->tv_sec < rhs->tv_sec) |
return -1; |
if (lhs->tv_sec > rhs->tv_sec) |
return 1; |
return lhs->tv_nsec - rhs->tv_nsec; |
} |
static inline int timeval_compare(const struct timeval *lhs, const struct timeval *rhs) |
{ |
if (lhs->tv_sec < rhs->tv_sec) |
return -1; |
if (lhs->tv_sec > rhs->tv_sec) |
return 1; |
return lhs->tv_usec - rhs->tv_usec; |
} |
extern unsigned long mktime(const unsigned int year, const unsigned int mon, |
const unsigned int day, const unsigned int hour, |
const unsigned int min, const unsigned int sec); |
extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); |
/* |
* timespec_add_safe assumes both values are positive and checks |
* for overflow. It will return TIME_T_MAX if the reutrn would be |
* smaller then either of the arguments. |
*/ |
extern struct timespec timespec_add_safe(const struct timespec lhs, |
const struct timespec rhs); |
static inline struct timespec timespec_add(struct timespec lhs, |
struct timespec rhs) |
{ |
struct timespec ts_delta; |
set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, |
lhs.tv_nsec + rhs.tv_nsec); |
return ts_delta; |
} |
/* |
* sub = lhs - rhs, in normalized form |
*/ |
static inline struct timespec timespec_sub(struct timespec lhs, |
struct timespec rhs) |
{ |
struct timespec ts_delta; |
set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, |
lhs.tv_nsec - rhs.tv_nsec); |
return ts_delta; |
} |
#define KTIME_MAX ((s64)~((u64)1 << 63)) |
#if (BITS_PER_LONG == 64) |
# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) |
#else |
# define KTIME_SEC_MAX LONG_MAX |
#endif |
/* |
* Returns true if the timespec is norm, false if denorm: |
*/ |
static inline bool timespec_valid(const struct timespec *ts) |
{ |
/* Dates before 1970 are bogus */ |
if (ts->tv_sec < 0) |
return false; |
/* Can't have more nanoseconds then a second */ |
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
return false; |
return true; |
} |
static inline bool timespec_valid_strict(const struct timespec *ts) |
{ |
if (!timespec_valid(ts)) |
return false; |
/* Disallow values that could overflow ktime_t */ |
if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) |
return false; |
return true; |
} |
extern bool persistent_clock_exist; |
static inline bool has_persistent_clock(void) |
{ |
return persistent_clock_exist; |
} |
extern void read_persistent_clock(struct timespec *ts); |
extern void read_boot_clock(struct timespec *ts); |
extern int persistent_clock_is_local; |
extern int update_persistent_clock(struct timespec now); |
void timekeeping_init(void); |
extern int timekeeping_suspended; |
unsigned long get_seconds(void); |
struct timespec current_kernel_time(void); |
struct timespec __current_kernel_time(void); /* does not take xtime_lock */ |
struct timespec get_monotonic_coarse(void); |
void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, |
struct timespec *wtom, struct timespec *sleep); |
void timekeeping_inject_sleeptime(struct timespec *delta); |
#define CURRENT_TIME (current_kernel_time()) |
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) |
/* Some architectures do not supply their own clocksource. |
* This is mainly the case in architectures that get their |
* inter-tick times by reading the counter on their interval |
* timer. Since these timers wrap every tick, they're not really |
* useful as clocksources. Wrapping them to act like one is possible |
* but not very efficient. So we provide a callout these arches |
* can implement for use with the jiffies clocksource to provide |
* finer then tick granular time. |
*/ |
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
extern u32 (*arch_gettimeoffset)(void); |
#endif |
extern void do_gettimeofday(struct timeval *tv); |
extern int do_settimeofday(const struct timespec *tv); |
extern int do_sys_settimeofday(const struct timespec *tv, |
const struct timezone *tz); |
#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) |
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); |
struct itimerval; |
extern int do_setitimer(int which, struct itimerval *value, |
struct itimerval *ovalue); |
extern unsigned int alarm_setitimer(unsigned int seconds); |
extern int do_getitimer(int which, struct itimerval *value); |
extern int __getnstimeofday(struct timespec *tv); |
extern void getnstimeofday(struct timespec *tv); |
extern void getrawmonotonic(struct timespec *ts); |
extern void getnstime_raw_and_real(struct timespec *ts_raw, |
struct timespec *ts_real); |
extern void getboottime(struct timespec *ts); |
extern void monotonic_to_bootbased(struct timespec *ts); |
extern void get_monotonic_boottime(struct timespec *ts); |
extern struct timespec timespec_trunc(struct timespec t, unsigned gran); |
extern int timekeeping_valid_for_hres(void); |
extern u64 timekeeping_max_deferment(void); |
extern int timekeeping_inject_offset(struct timespec *ts); |
extern s32 timekeeping_get_tai_offset(void); |
extern void timekeeping_set_tai_offset(s32 tai_offset); |
extern void timekeeping_clocktai(struct timespec *ts); |
struct tms; |
extern void do_sys_times(struct tms *); |
/* |
* Similar to the struct tm in userspace <time.h>, but it needs to be here so |
* that the kernel source is self contained. |
*/ |
struct tm { |
/* |
* the number of seconds after the minute, normally in the range |
* 0 to 59, but can be up to 60 to allow for leap seconds |
*/ |
int tm_sec; |
/* the number of minutes after the hour, in the range 0 to 59*/ |
int tm_min; |
/* the number of hours past midnight, in the range 0 to 23 */ |
int tm_hour; |
/* the day of the month, in the range 1 to 31 */ |
int tm_mday; |
/* the number of months since January, in the range 0 to 11 */ |
int tm_mon; |
/* the number of years since 1900 */ |
long tm_year; |
/* the number of days since Sunday, in the range 0 to 6 */ |
int tm_wday; |
/* the number of days since January 1, in the range 0 to 365 */ |
int tm_yday; |
}; |
void time_to_tm(time_t totalsecs, int offset, struct tm *result); |
/** |
* timespec_to_ns - Convert timespec to nanoseconds |
* @ts: pointer to the timespec variable to be converted |
* |
* Returns the scalar nanosecond representation of the timespec |
* parameter. |
*/ |
static inline s64 timespec_to_ns(const struct timespec *ts) |
{ |
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; |
} |
/** |
* timeval_to_ns - Convert timeval to nanoseconds |
* @ts: pointer to the timeval variable to be converted |
* |
* Returns the scalar nanosecond representation of the timeval |
* parameter. |
*/ |
static inline s64 timeval_to_ns(const struct timeval *tv) |
{ |
return ((s64) tv->tv_sec * NSEC_PER_SEC) + |
tv->tv_usec * NSEC_PER_USEC; |
} |
/** |
* ns_to_timespec - Convert nanoseconds to timespec |
* @nsec: the nanoseconds value to be converted |
* |
* Returns the timespec representation of the nsec parameter. |
*/ |
extern struct timespec ns_to_timespec(const s64 nsec); |
/** |
* ns_to_timeval - Convert nanoseconds to timeval |
* @nsec: the nanoseconds value to be converted |
* |
* Returns the timeval representation of the nsec parameter. |
*/ |
extern struct timeval ns_to_timeval(const s64 nsec); |
/** |
* timespec_add_ns - Adds nanoseconds to a timespec |
* @a: pointer to timespec to be incremented |
* @ns: unsigned nanoseconds value to be added |
* |
* This must always be inlined because its used from the x86-64 vdso, |
* which cannot call other kernel functions. |
*/ |
static __always_inline void timespec_add_ns(struct timespec *a, u64 ns) |
{ |
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); |
a->tv_nsec = ns; |
} |
#endif |