Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4065 → Rev 3880

/drivers/ddk/linux/idr.c
175,7 → 175,20
}
}
 
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
/**
* idr_pre_get - reserve resources for idr allocation
* @idp: idr handle
* @gfp_mask: memory allocation flags
*
* This function should be called prior to calling the idr_get_new* functions.
* It preallocates enough memory to satisfy the worst possible allocation. The
* caller should pass in GFP_KERNEL if possible. This of course requires that
* no spinning locks be held.
*
* If the system is REALLY out of memory this function returns %0,
* otherwise %1.
*/
int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new;
186,12 → 199,13
}
return 1;
}
EXPORT_SYMBOL(__idr_pre_get);
EXPORT_SYMBOL(idr_pre_get);
 
/**
* sub_alloc - try to allocate an id without growing the tree depth
* @idp: idr handle
* @starting_id: id to start search at
* @id: pointer to the allocated handle
* @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
* @gfp_mask: allocation mask for idr_layer_alloc()
* @layer_idr: optional idr passed to idr_layer_alloc()
353,7 → 367,25
idr_mark_full(pa, id);
}
 
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
/**
* idr_get_new_above - allocate new idr entry above or equal to a start id
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @starting_id: id to start search at
* @id: pointer to the allocated handle
*
* This is the allocate id function. It should be called with any
* required locks.
*
* If allocation from IDR's private freelist fails, idr_get_new_above() will
* return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
* IDR's preallocation and then retry the idr_get_new_above() call.
*
* If the idr is full idr_get_new_above() will return %-ENOSPC.
*
* @id returns a value in the range @starting_id ... %0x7fffffff
*/
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
int rv;
366,7 → 398,7
*id = rv;
return 0;
}
EXPORT_SYMBOL(__idr_get_new_above);
EXPORT_SYMBOL(idr_get_new_above);
 
/**
* idr_preload - preload for idr_alloc()
464,7 → 496,9
 
static void idr_remove_warning(int id)
{
WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
printk(KERN_WARNING
"idr_remove called for id=%d which is not allocated.\n", id);
// dump_stack();
}
 
static void sub_remove(struct idr *idp, int shift, int id)
514,7 → 548,8
struct idr_layer *p;
struct idr_layer *to_free;
 
if (id < 0)
/* see comment in idr_find_slowpath() */
if (WARN_ON_ONCE(id < 0))
return;
 
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
611,7 → 646,15
int n;
struct idr_layer *p;
 
if (id < 0)
/*
* If @id is negative, idr_find() used to ignore the sign bit and
* performed lookup with the rest of bits, which is weird and can
* lead to very obscure bugs. We're now returning NULL for all
* negative IDs but just in case somebody was depending on the sign
* bit being ignored, let's trigger WARN_ON_ONCE() so that they can
* be detected and fixed. WARN_ON_ONCE() can later be removed.
*/
if (WARN_ON_ONCE(id < 0))
return NULL;
 
p = rcu_dereference_raw(idp->top);
761,7 → 804,8
int n;
struct idr_layer *p, *old_p;
 
if (id < 0)
/* see comment in idr_find_slowpath() */
if (WARN_ON_ONCE(id < 0))
return ERR_PTR(-EINVAL);
 
p = idp->top;
794,7 → 838,7
#endif
 
 
void __init idr_init_cache(void)
void idr_init_cache(void)
{
//idr_layer_cache = kmem_cache_create("idr_layer_cache",
// sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
814,6 → 858,7
}
EXPORT_SYMBOL(idr_init);
 
#if 0
 
/**
* DOC: IDA description
957,7 → 1002,7
if (ida->idr.id_free_cnt || ida->free_bitmap) {
struct idr_layer *p = get_from_free_list(&ida->idr);
if (p)
kfree(p);
kmem_cache_free(idr_layer_cache, p);
}
 
return 0;
1007,7 → 1052,8
return;
 
err:
WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
printk(KERN_WARNING
"ida_remove called for id=%d which is not allocated.\n", id);
}
EXPORT_SYMBOL(ida_remove);
 
1038,7 → 1084,9
EXPORT_SYMBOL(ida_init);
 
 
#endif
 
 
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
const unsigned long *p = addr;
/drivers/include/drm/drm_memory.h
File deleted
/drivers/include/drm/vmwgfx_drm.h
File deleted
/drivers/include/drm/drm_rect.h
File deleted
/drivers/include/drm/drmP.h
388,7 → 388,6
spinlock_t read_lock;
spinlock_t write_lock;
};
#endif
 
struct drm_freelist {
int initialized; /**< Freelist in use */
401,6 → 400,7
atomic_t wfh; /**< If waiting for high mark */
spinlock_t lock;
};
#endif
 
typedef struct drm_dma_handle {
dma_addr_t busaddr;
408,6 → 408,7
size_t size;
} drm_dma_handle_t;
 
#if 0
/**
* Buffer entry. There is one of this for each buffer size order.
*/
437,6 → 438,7
struct list_head head;
struct mutex lock;
};
#endif
 
/** File private data */
struct drm_file {
1025,7 → 1027,7
struct drm_info_node {
struct list_head list;
struct drm_minor *minor;
const struct drm_info_list *info_ent;
struct drm_info_list *info_ent;
struct dentry *dent;
};
 
1224,6 → 1226,12
 
#if 0
 
#ifdef __alpha__
#define drm_get_pci_domain(dev) dev->hose->index
#else
#define drm_get_pci_domain(dev) 0
#endif
 
#if __OS_HAS_AGP
static inline int drm_core_has_AGP(struct drm_device *dev)
{
1238,28 → 1246,39
{
return drm_core_check_feature(dev, DRIVER_USE_MTRR);
}
#else
#define drm_core_has_MTRR(dev) (0)
#endif
 
static inline void drm_device_set_unplugged(struct drm_device *dev)
#define DRM_MTRR_WC MTRR_TYPE_WRCOMB
 
static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
unsigned int flags)
{
smp_wmb();
atomic_set(&dev->unplugged, 1);
return mtrr_add(offset, size, flags, 1);
}
 
static inline int drm_device_is_unplugged(struct drm_device *dev)
static inline int drm_mtrr_del(int handle, unsigned long offset,
unsigned long size, unsigned int flags)
{
int ret = atomic_read(&dev->unplugged);
smp_rmb();
return ret;
return mtrr_del(handle, offset, size);
}
 
static inline bool drm_modeset_is_locked(struct drm_device *dev)
#else
#define drm_core_has_MTRR(dev) (0)
 
#define DRM_MTRR_WC 0
 
static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
unsigned int flags)
{
return mutex_is_locked(&dev->mode_config.mutex);
return 0;
}
 
static inline int drm_mtrr_del(int handle, unsigned long offset,
unsigned long size, unsigned int flags)
{
return 0;
}
#endif
 
/******************************************************************/
/** \name Internal function definitions */
/*@{*/
1509,7 → 1528,8
extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
 
/* Proc support (drm_proc.h) */
extern int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root);
extern int drm_proc_init(struct drm_minor *minor, int minor_id,
struct proc_dir_entry *root);
extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
 
/* Debugfs support */
/drivers/include/drm/ttm/ttm_execbuf_util.h
57,7 → 57,6
/**
* function ttm_eu_backoff_reservation
*
* @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs.
*
* Undoes all buffer validation reservations for bos pointed to by
64,13 → 63,11
* the list entries.
*/
 
extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list);
extern void ttm_eu_backoff_reservation(struct list_head *list);
 
/**
* function ttm_eu_reserve_buffers
*
* @ticket: [out] ww_acquire_ctx returned by call.
* @list: thread private list of ttm_validate_buffer structs.
*
* Tries to reserve bos pointed to by the list entries for validation.
93,13 → 90,11
* has failed.
*/
 
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list);
extern int ttm_eu_reserve_buffers(struct list_head *list);
 
/**
* function ttm_eu_fence_buffer_objects.
*
* @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs.
* @sync_obj: The new sync object for the buffers.
*
109,7 → 104,6
*
*/
 
extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct list_head *list, void *sync_obj);
extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
 
#endif
/drivers/include/drm/ttm/ttm_object.h
40,7 → 40,7
#include <linux/list.h>
#include <drm/drm_hashtab.h>
#include <linux/kref.h>
//#include <linux/rcupdate.h>
#include <linux/rcupdate.h>
#include <ttm/ttm_memory.h>
 
/**
/drivers/include/drm/ttm/ttm_bo_api.h
44,11 → 44,7
 
struct drm_mm_node;
 
struct reservation_object {
struct mutex lock;
};
 
 
/**
* struct ttm_placement
*
157,6 → 153,7
* Lru lists may keep one refcount, the delayed delete list, and kref != 0
* keeps one refcount. When this refcount reaches zero,
* the object is destroyed.
* @event_queue: Queue for processes waiting on buffer object status change.
* @mem: structure describing current placement.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
167,6 → 164,12
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
* @val_seq: Sequence of the validation holding the @reserved lock.
* Used to avoid starvation when many processes compete to validate the
* buffer. This member is protected by the bo_device::lru_lock.
* @seq_valid: The value of @val_seq is valid. This value is protected by
* the bo_device::lru_lock.
* @reserved: Deadlock-free lock used for synchronization state transitions.
* @sync_obj: Pointer to a synchronization object.
* @priv_flags: Flags describing buffer object internal state.
* @vm_rb: Rb node for the vm rb tree.
206,9 → 209,10
 
struct kref kref;
struct kref list_kref;
wait_queue_head_t event_queue;
 
/**
* Members protected by the bo::resv::reserved lock.
* Members protected by the bo::reserved lock.
*/
 
struct ttm_mem_reg mem;
230,8 → 234,17
struct list_head ddestroy;
struct list_head swap;
struct list_head io_reserve_lru;
uint32_t val_seq;
bool seq_valid;
 
/**
* Members protected by the bdev::lru_lock
* only when written to.
*/
 
atomic_t reserved;
 
/**
* Members protected by struct buffer_object_device::fence_lock
* In addition, setting sync_obj to anything else
* than NULL requires bo::reserved to be held. This allows for
259,9 → 272,6
uint32_t cur_placement;
 
struct sg_table *sg;
 
struct reservation_object *resv;
struct reservation_object ttm_resv;
};
 
/**
715,4 → 725,18
 
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
 
/**
* ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
*
* @bo: The buffer object to check.
*
* This function returns an indication if a bo is reserved or not, and should
* only be used to print an error when it is not from incorrect api usage, since
* there's no guarantee that it is the caller that is holding the reservation.
*/
static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
{
return atomic_read(&bo->reserved);
}
 
#endif
/drivers/include/drm/ttm/ttm_bo_driver.h
33,7 → 33,6
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_memory.h>
#include <ttm/ttm_module.h>
#include <ttm/ttm_placement.h>
#include <drm/drm_mm.h>
#include <drm/drm_global.h>
//#include <linux/workqueue.h>
40,8 → 39,6
//#include <linux/fs.h>
#include <linux/spinlock.h>
 
struct ww_acquire_ctx;
 
struct ttm_backend_func {
/**
* struct ttm_backend_func member bind
781,8 → 778,8
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
* @use_ticket: If @bo is already reserved, Only sleep waiting for
* it to become unreserved if @ticket->stamp is older.
* @use_sequence: If @bo is already reserved, Only sleep waiting for
* it to become unreserved if @sequence < (@bo)->sequence.
*
* Locks a buffer object for validation. (Or prevents other processes from
* locking it for validation) and removes it from lru lists, while taking
793,10 → 790,19
* to make room for a buffer already reserved. (Buffers are reserved before
* they are evicted). The following algorithm prevents such deadlocks from
* occurring:
* Processes attempting to reserve multiple buffers other than for eviction,
* 1) Buffers are reserved with the lru spinlock held. Upon successful
* reservation they are removed from the lru list. This stops a reserved buffer
* from being evicted. However the lru spinlock is released between the time
* a buffer is selected for eviction and the time it is reserved.
* Therefore a check is made when a buffer is reserved for eviction, that it
* is still the first buffer in the lru list, before it is removed from the
* list. @check_lru == 1 forces this check. If it fails, the function returns
* -EINVAL, and the caller should then choose a new buffer to evict and repeat
* the procedure.
* 2) Processes attempting to reserve multiple buffers other than for eviction,
* (typically execbuf), should first obtain a unique 32-bit
* validation sequence number,
* and call this function with @use_ticket == 1 and @ticket->stamp == the unique
* and call this function with @use_sequence == 1 and @sequence == the unique
* sequence number. If upon call of this function, the buffer object is already
* reserved, the validation sequence is checked against the validation
* sequence of the process currently reserving the buffer,
811,102 → 817,84
* will eventually succeed, preventing both deadlocks and starvation.
*
* Returns:
* -EDEADLK: The reservation may cause a deadlock.
* -EAGAIN: The reservation may cause a deadlock.
* Release all buffer reservations, wait for @bo to become unreserved and
* try again. (only if use_sequence == 1).
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
* -EBUSY: The function needed to sleep, but @no_wait was true
* -EALREADY: Bo already reserved using @ticket. This error code will only
* be returned if @use_ticket is set to true.
* -EDEADLK: Bo already reserved using @sequence. This error code will only
* be returned if @use_sequence is set to true.
*/
static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_ticket,
struct ww_acquire_ctx *ticket)
{
int ret;
bool no_wait, bool use_sequence, uint32_t sequence);
 
WARN_ON(!atomic_read(&bo->kref.refcount));
 
ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
ticket);
if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo);
 
return ret;
}
 
/**
* ttm_bo_reserve_slowpath:
* ttm_bo_reserve_locked:
*
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
* @sequence: Set (@bo)->sequence to this value after lock
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
* @use_sequence: If @bo is already reserved, Only sleep waiting for
* it to become unreserved if @sequence < (@bo)->sequence.
*
* This is called after ttm_bo_reserve returns -EAGAIN and we backed off
* from all our other reservations. Because there are no other reservations
* held by us, this function cannot deadlock any more.
* Must be called with struct ttm_bo_global::lru_lock held,
* and will not remove reserved buffers from the lru lists.
* The function may release the LRU spinlock if it needs to sleep.
* Otherwise identical to ttm_bo_reserve.
*
* Returns:
* -EAGAIN: The reservation may cause a deadlock.
* Release all buffer reservations, wait for @bo to become unreserved and
* try again. (only if use_sequence == 1).
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
* -EBUSY: The function needed to sleep, but @no_wait was true
* -EDEADLK: Bo already reserved using @sequence. This error code will only
* be returned if @use_sequence is set to true.
*/
static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
bool interruptible,
struct ww_acquire_ctx *ticket)
{
int ret = 0;
bool no_wait, bool use_sequence,
uint32_t sequence);
 
WARN_ON(!atomic_read(&bo->kref.refcount));
 
if (interruptible)
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
ticket);
else
ww_mutex_lock_slow(&bo->resv->lock, ticket);
 
if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo);
else if (ret == -EINTR)
ret = -ERESTARTSYS;
 
return ret;
}
 
/**
* ttm_bo_unreserve_ticket
* ttm_bo_unreserve
*
* @bo: A pointer to a struct ttm_buffer_object.
* @ticket: ww_acquire_ctx used for reserving
*
* Unreserve a previous reservation of @bo made with @ticket.
* Unreserve a previous reservation of @bo.
*/
static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
struct ww_acquire_ctx *t)
{
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bo->glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&bo->glob->lru_lock);
}
ww_mutex_unlock(&bo->resv->lock);
}
extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
 
/**
* ttm_bo_unreserve
* ttm_bo_unreserve_locked
*
* @bo: A pointer to a struct ttm_buffer_object.
*
* Unreserve a previous reservation of @bo.
* Needs to be called with struct ttm_bo_global::lru_lock held.
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
ttm_bo_unreserve_ticket(bo, NULL);
}
extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
 
/**
* ttm_bo_wait_unreserved
*
* @bo: A pointer to a struct ttm_buffer_object.
*
* Wait for a struct ttm_buffer_object to become unreserved.
* This is typically used in the execbuf code to relax cpu-usage when
* a potential deadlock condition backoff.
*/
extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
bool interruptible);
 
/*
* ttm_bo_util.c
*/
 
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
/**
* ttm_bo_move_ttm
*
/drivers/include/drm/drm_crtc.h
339,9 → 339,6
/* cursor controls */
int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height,
int32_t hot_x, int32_t hot_y);
int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
 
/* Set gamma on the CRTC */
412,10 → 409,6
/* framebuffer the connector is currently bound to */
struct drm_framebuffer *fb;
 
/* Temporary tracking of the old fb while a modeset is ongoing. Used
* by drm_mode_set_config_internal to implement correct refcounting. */
struct drm_framebuffer *old_fb;
 
bool enabled;
 
/* Requested mode from modesetting. */
661,7 → 654,11
* @format_count: number of formats supported
* @crtc: currently bound CRTC
* @fb: currently bound fb
* @gamma_size: size of gamma table
* @gamma_store: gamma correction table
* @enabled: enabled flag
* @funcs: helper functions
* @helper_private: storage for drver layer
* @properties: property tracking for this plane
*/
struct drm_plane {
677,7 → 674,14
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
 
/* CRTC gamma size for reporting to userspace */
uint32_t gamma_size;
uint16_t *gamma_store;
 
bool enabled;
 
const struct drm_plane_funcs *funcs;
void *helper_private;
 
struct drm_object_properties properties;
};
890,17 → 894,15
const uint32_t *formats, uint32_t format_count,
bool priv);
extern void drm_plane_cleanup(struct drm_plane *plane);
extern void drm_plane_force_disable(struct drm_plane *plane);
 
extern void drm_encoder_cleanup(struct drm_encoder *encoder);
 
extern const char *drm_get_connector_name(const struct drm_connector *connector);
extern const char *drm_get_connector_status_name(enum drm_connector_status status);
extern const char *drm_get_dpms_name(int val);
extern const char *drm_get_dvi_i_subconnector_name(int val);
extern const char *drm_get_dvi_i_select_name(int val);
extern const char *drm_get_tv_subconnector_name(int val);
extern const char *drm_get_tv_select_name(int val);
extern char *drm_get_connector_name(struct drm_connector *connector);
extern char *drm_get_dpms_name(int val);
extern char *drm_get_dvi_i_subconnector_name(int val);
extern char *drm_get_dvi_i_select_name(int val);
extern char *drm_get_tv_subconnector_name(int val);
extern char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
992,7 → 994,7
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_dithering_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
extern char *drm_get_encoder_name(struct drm_encoder *encoder);
 
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
1020,8 → 1022,6
void *data, struct drm_file *file_priv);
extern int drm_mode_cursor_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_cursor2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_addfb2(struct drm_device *dev,
1094,6 → 1094,5
extern int drm_format_plane_cpp(uint32_t format, int plane);
extern int drm_format_horz_chroma_subsampling(uint32_t format);
extern int drm_format_vert_chroma_subsampling(uint32_t format);
extern const char *drm_get_format_name(uint32_t format);
 
#endif /* __DRM_CRTC_H__ */
/drivers/include/drm/drm_pciids.h
152,14 → 152,6
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
588,22 → 580,6
{0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
/drivers/include/drm/drm_mm.h
177,6 → 177,17
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_color_block_range(
struct drm_mm_node *parent,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment, color,
start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
struct drm_mm_node *parent,
unsigned long size,
244,9 → 255,28
return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
start, end, best_match);
}
 
extern void drm_mm_init(struct drm_mm *mm,
static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
bool best_match)
{
return drm_mm_search_free_generic(mm,size, alignment, color, best_match);
}
static inline struct drm_mm_node *drm_mm_search_free_in_range_color(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
bool best_match)
{
return drm_mm_search_free_in_range_generic(mm, size, alignment, color,
start, end, best_match);
}
extern int drm_mm_init(struct drm_mm *mm,
unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
/drivers/include/drm/drm_fixed.h
20,7 → 20,6
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Christian Konig
*/
#ifndef DRM_FIXED_H
#define DRM_FIXED_H
66,95 → 65,4
tmp /= 2;
return lower_32_bits(tmp);
}
 
#define DRM_FIXED_POINT 32
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
 
static inline s64 drm_int2fixp(int a)
{
return ((s64)a) << DRM_FIXED_POINT;
}
 
static inline int drm_fixp2int(int64_t a)
{
return ((s64)a) >> DRM_FIXED_POINT;
}
 
static inline unsigned drm_fixp_msbset(int64_t a)
{
unsigned shift, sign = (a >> 63) & 1;
 
for (shift = 62; shift > 0; --shift)
if (((a >> shift) & 1) != sign)
return shift;
 
return 0;
}
 
static inline s64 drm_fixp_mul(s64 a, s64 b)
{
unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b);
s64 result;
 
if (shift > 61) {
shift = shift - 61;
a >>= (shift >> 1) + (shift & 1);
b >>= shift >> 1;
} else
shift = 0;
 
result = a * b;
 
if (shift > DRM_FIXED_POINT)
return result << (shift - DRM_FIXED_POINT);
 
if (shift < DRM_FIXED_POINT)
return result >> (DRM_FIXED_POINT - shift);
 
return result;
}
 
static inline s64 drm_fixp_div(s64 a, s64 b)
{
unsigned shift = 62 - drm_fixp_msbset(a);
s64 result;
 
a <<= shift;
 
if (shift < DRM_FIXED_POINT)
b >>= (DRM_FIXED_POINT - shift);
 
result = div64_s64(a, b);
 
if (shift > DRM_FIXED_POINT)
return result >> (shift - DRM_FIXED_POINT);
 
return result;
}
 
static inline s64 drm_fixp_exp(s64 x)
{
s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
s64 sum = DRM_FIXED_ONE, term, y = x;
u64 count = 1;
 
if (x < 0)
y = -1 * x;
 
term = y;
 
while (term >= tolerance) {
sum = sum + term;
count = count + 1;
term = drm_fixp_mul(term, div64_s64(y, count));
}
 
if (x < 0)
sum = drm_fixp_div(DRM_FIXED_ONE, sum);
 
return sum;
}
 
#endif
/drivers/include/linux/time.h
File deleted
/drivers/include/linux/rculist.h
File deleted
/drivers/include/linux/hash.h
File deleted
/drivers/include/linux/jiffies.h
130,10 → 130,6
((__s64)(a) - (__s64)(b) >= 0))
#define time_before_eq64(a,b) time_after_eq64(b,a)
 
#define time_in_range64(a, b, c) \
(time_after_eq64(a, b) && \
time_before_eq64(a, c))
 
/*
* These four macros compare jiffies and 'a' for convenience.
*/
/drivers/include/linux/compiler-gcc4.h
13,7 → 13,7
#define __must_check __attribute__((warn_unused_result))
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
 
#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
#if GCC_VERSION >= 40100
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
 
/drivers/include/linux/idr.h
48,7 → 48,6
struct idr_layer *id_free;
int layers; /* only valid w/o concurrent changes */
int id_free_cnt;
int cur; /* current pos for cyclic allocation */
spinlock_t lock;
};
 
80,9 → 79,10
*/
 
void *idr_find_slowpath(struct idr *idp, int id);
int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
void idr_preload(gfp_t gfp_mask);
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
int idr_for_each(struct idr *idp,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *idp, int *nextid);
105,7 → 105,7
 
/**
* idr_find - return pointer for given id
* @idr: idr handle
* @idp: idr handle
* @id: lookup key
*
* Return the pointer given the id it has been registered with. A %NULL
126,69 → 126,31
}
 
/**
* idr_for_each_entry - iterate over an idr's elements of a given type
* idr_get_new - allocate new idr entry
* @idp: idr handle
* @entry: the type * to use as cursor
* @id: id entry's key
*
* @entry and @id do not need to be initialized before the loop, and
* after normal terminatinon @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
#define idr_for_each_entry(idp, entry, id) \
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
 
/*
* Don't use the following functions. These exist only to suppress
* deprecated warnings on EXPORT_SYMBOL()s.
*/
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask);
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
void __idr_remove_all(struct idr *idp);
 
/**
* idr_pre_get - reserve resources for idr allocation
* @idp: idr handle
* @gfp_mask: memory allocation flags
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
return __idr_pre_get(idp, gfp_mask);
}
 
/**
* idr_get_new_above - allocate new idr entry above or equal to a start id
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @starting_id: id to start search at
* @id: pointer to the allocated handle
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
* Simple wrapper around idr_get_new_above() w/ @starting_id of zero.
*/
static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr,
int starting_id, int *id)
static inline int idr_get_new(struct idr *idp, void *ptr, int *id)
{
return __idr_get_new_above(idp, ptr, starting_id, id);
return idr_get_new_above(idp, ptr, 0, id);
}
 
/**
* idr_get_new - allocate new idr entry
* idr_for_each_entry - iterate over an idr's elements of a given type
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @id: pointer to the allocated handle
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
* @entry: the type * to use as cursor
* @id: id entry's key
*/
static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id)
{
return __idr_get_new_above(idp, ptr, 0, id);
}
#define idr_for_each_entry(idp, entry, id) \
for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
entry != NULL; \
++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
 
void __idr_remove_all(struct idr *idp); /* don't use */
 
/**
* idr_remove_all - remove all ids from the given idr tree
* @idp: idr handle
231,22 → 193,8
void ida_destroy(struct ida *ida);
void ida_init(struct ida *ida);
 
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
gfp_t gfp_mask);
void ida_simple_remove(struct ida *ida, unsigned int id);
void __init idr_init_cache(void);
 
/**
* ida_get_new - allocate new ID
* @ida: idr handle
* @p_id: pointer to the allocated handle
*
* Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
*/
static inline int ida_get_new(struct ida *ida, int *p_id)
{
return ida_get_new_above(ida, 0, p_id);
}
 
void __init idr_init_cache(void);
 
#endif /* __IDR_H__ */
/drivers/include/linux/list.h
361,22 → 361,22
list_entry((ptr)->next, type, member)
 
/**
* list_first_entry_or_null - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*
* Note that if the list is empty, it returns NULL.
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_first_entry_or_null(ptr, type, member) \
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
#define list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)
 
/**
* list_for_each - iterate over a list
* __list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*
* This variant doesn't differ from list_for_each() any more.
* We don't do prefetching in either case.
*/
#define list_for_each(pos, head) \
#define __list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)
 
/**
665,51 → 665,54
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
pos = n)
 
#define hlist_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? hlist_entry(____ptr, type, member) : NULL; \
})
 
/**
* hlist_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry(pos, head, member) \
for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
pos; \
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
#define hlist_for_each_entry(tpos, pos, head, member) \
for (pos = (head)->first; \
pos && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
 
/**
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point
* @pos: the type * to use as a loop cursor.
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue(pos, member) \
for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
pos; \
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
#define hlist_for_each_entry_continue(tpos, pos, member) \
for (pos = (pos)->next; \
pos && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
 
/**
* hlist_for_each_entry_from - iterate over a hlist continuing from current point
* @pos: the type * to use as a loop cursor.
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_from(pos, member) \
for (; pos; \
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
#define hlist_for_each_entry_from(tpos, pos, member) \
for (; pos && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
 
/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @n: another &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_safe(pos, n, head, member) \
for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
pos && ({ n = pos->member.next; 1; }); \
pos = hlist_entry_safe(n, typeof(*pos), member))
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
for (pos = (head)->first; \
pos && ({ n = pos->next; 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
 
#endif
/drivers/include/linux/err.h
24,17 → 24,17
return (void *) error;
}
 
static inline long __must_check PTR_ERR(__force const void *ptr)
static inline long __must_check PTR_ERR(const void *ptr)
{
return (long) ptr;
}
 
static inline long __must_check IS_ERR(__force const void *ptr)
static inline long __must_check IS_ERR(const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
 
static inline long __must_check IS_ERR_OR_NULL(__force const void *ptr)
static inline long __must_check IS_ERR_OR_NULL(const void *ptr)
{
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
}
46,13 → 46,13
* Explicitly cast an error-valued pointer to another pointer type in such a
* way as to make it clear that's what's going on.
*/
static inline void * __must_check ERR_CAST(__force const void *ptr)
static inline void * __must_check ERR_CAST(const void *ptr)
{
/* cast away the const */
return (void *) ptr;
}
 
static inline int __must_check PTR_RET(__force const void *ptr)
static inline int __must_check PTR_RET(const void *ptr)
{
if (IS_ERR(ptr))
return PTR_ERR(ptr);
/drivers/include/linux/i2c.h
55,6 → 55,7
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
* @attach_adapter: Callback for bus addition (deprecated)
* @detach_adapter: Callback for bus removal (deprecated)
* @probe: Callback for device binding
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
91,10 → 92,12
struct i2c_driver {
unsigned int class;
 
/* Notifies the driver that a new bus has appeared. You should avoid
* using this, it will be removed in a near future.
/* Notifies the driver that a new bus has appeared or is about to be
* removed. You should avoid using this, it will be removed in a
* near future.
*/
int (*attach_adapter)(struct i2c_adapter *) __deprecated;
int (*detach_adapter)(struct i2c_adapter *) __deprecated;
 
/* Standard driver model interfaces */
int (*probe)(struct i2c_client *, const struct i2c_device_id *);
189,6 → 192,9
unsigned short addr;
void *platform_data;
struct dev_archdata *archdata;
#ifdef CONFIG_OF
struct device_node *of_node;
#endif
int irq;
};
 
/drivers/include/linux/slab.h
1,14 → 1,3
/*
* Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
*
* (C) SGI 2006, Christoph Lameter
* Cleaned up and restructured to ease the addition of alternative
* implementations of SLAB allocators.
*/
 
#ifndef _LINUX_SLAB_H
#define _LINUX_SLAB_H
 
#include <errno.h>
// stub
#endif /* _LINUX_SLAB_H */
/drivers/include/linux/mod_devicetable.h
33,7 → 33,8
__u32 model_id;
__u32 specifier_id;
__u32 version;
kernel_ulong_t driver_data;
kernel_ulong_t driver_data
__attribute__((aligned(sizeof(kernel_ulong_t))));
};
 
 
146,7 → 147,8
__u16 group;
__u32 vendor;
__u32 product;
kernel_ulong_t driver_data;
kernel_ulong_t driver_data
__attribute__((aligned(sizeof(kernel_ulong_t))));
};
 
/* s390 CCW devices */
170,6 → 172,8
struct ap_device_id {
__u16 match_flags; /* which fields to match against */
__u8 dev_type; /* device type */
__u8 pad1;
__u32 pad2;
kernel_ulong_t driver_info;
};
 
179,10 → 183,13
struct css_device_id {
__u8 match_flags;
__u8 type; /* subchannel type */
__u16 pad2;
__u32 pad3;
kernel_ulong_t driver_data;
};
 
#define ACPI_ID_LEN 9
#define ACPI_ID_LEN 16 /* only 9 bytes needed here, 16 bytes are used */
/* to workaround crosscompile issues */
 
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
223,7 → 230,11
char name[32];
char type[32];
char compatible[128];
#ifdef __KERNEL__
const void *data;
#else
kernel_ulong_t data;
#endif
};
 
/* VIO */
248,14 → 259,24
/* for pseudo multi-function devices */
__u8 device_no;
 
__u32 prod_id_hash[4];
__u32 prod_id_hash[4]
__attribute__((aligned(sizeof(__u32))));
 
/* not matched against in kernelspace*/
#ifdef __KERNEL__
const char * prod_id[4];
#else
kernel_ulong_t prod_id[4]
__attribute__((aligned(sizeof(kernel_ulong_t))));
#endif
 
/* not matched against */
kernel_ulong_t driver_info;
#ifdef __KERNEL__
char * cisfile;
#else
kernel_ulong_t cisfile;
#endif
};
 
#define PCMCIA_DEV_ID_MATCH_MANF_ID 0x0001
351,7 → 372,8
__u8 class; /* Standard interface or SDIO_ANY_ID */
__u16 vendor; /* Vendor or SDIO_ANY_ID */
__u16 device; /* Device ID or SDIO_ANY_ID */
kernel_ulong_t driver_data; /* Data private to the driver */
kernel_ulong_t driver_data /* Data private to the driver */
__attribute__((aligned(sizeof(kernel_ulong_t))));
};
 
/* SSB core, see drivers/ssb/ */
359,8 → 381,7
__u16 vendor;
__u16 coreid;
__u8 revision;
__u8 __pad;
} __attribute__((packed, aligned(2)));
};
#define SSB_DEVICE(_vendor, _coreid, _revision) \
{ .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
#define SSB_DEVTABLE_END \
376,7 → 397,7
__u16 id;
__u8 rev;
__u8 class;
} __attribute__((packed,aligned(2)));
};
#define BCMA_CORE(_manuf, _id, _rev, _class) \
{ .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, }
#define BCMA_CORETABLE_END \
393,23 → 414,6
};
#define VIRTIO_DEV_ANY_ID 0xffffffff
 
/*
* For Hyper-V devices we use the device guid as the id.
*/
struct hv_vmbus_device_id {
__u8 guid[16];
kernel_ulong_t driver_data; /* Data private to the driver */
};
 
/* rpmsg */
 
#define RPMSG_NAME_SIZE 32
#define RPMSG_DEVICE_MODALIAS_FMT "rpmsg:%s"
 
struct rpmsg_device_id {
char name[RPMSG_NAME_SIZE];
};
 
/* i2c */
 
#define I2C_NAME_SIZE 20
417,7 → 421,8
 
struct i2c_device_id {
char name[I2C_NAME_SIZE];
kernel_ulong_t driver_data; /* Data private to the driver */
kernel_ulong_t driver_data /* Data private to the driver */
__attribute__((aligned(sizeof(kernel_ulong_t))));
};
 
/* spi */
427,7 → 432,8
 
struct spi_device_id {
char name[SPI_NAME_SIZE];
kernel_ulong_t driver_data; /* Data private to the driver */
kernel_ulong_t driver_data /* Data private to the driver */
__attribute__((aligned(sizeof(kernel_ulong_t))));
};
 
/* dmi */
455,12 → 461,20
};
 
struct dmi_strmatch {
unsigned char slot:7;
unsigned char exact_match:1;
unsigned char slot;
char substr[79];
};
 
#ifndef __KERNEL__
struct dmi_system_id {
kernel_ulong_t callback;
kernel_ulong_t ident;
struct dmi_strmatch matches[4];
kernel_ulong_t driver_data
__attribute__((aligned(sizeof(kernel_ulong_t))));
};
#else
struct dmi_system_id {
int (*callback)(const struct dmi_system_id *);
const char *ident;
struct dmi_strmatch matches[4];
473,9 → 487,9
* error: storage size of '__mod_dmi_device_table' isn't known
*/
#define dmi_device_id dmi_system_id
#endif
 
#define DMI_MATCH(a, b) { .slot = a, .substr = b }
#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 }
#define DMI_MATCH(a, b) { a, b }
 
#define PLATFORM_NAME_SIZE 20
#define PLATFORM_MODULE_PREFIX "platform:"
482,7 → 496,8
 
struct platform_device_id {
char name[PLATFORM_NAME_SIZE];
kernel_ulong_t driver_data;
kernel_ulong_t driver_data
__attribute__((aligned(sizeof(kernel_ulong_t))));
};
 
#define MDIO_MODULE_PREFIX "mdio:"
527,74 → 542,4
kernel_ulong_t driver_data; /* data private to the driver */
};
 
/**
* struct amba_id - identifies a device on an AMBA bus
* @id: The significant bits if the hardware device ID
* @mask: Bitmask specifying which bits of the id field are significant when
* matching. A driver binds to a device when ((hardware device ID) & mask)
* == id.
* @data: Private data used by the driver.
*/
struct amba_id {
unsigned int id;
unsigned int mask;
void *data;
};
 
/*
* Match x86 CPUs for CPU specific drivers.
* See documentation of "x86_match_cpu" for details.
*/
 
struct x86_cpu_id {
__u16 vendor;
__u16 family;
__u16 model;
__u16 feature; /* bit index */
kernel_ulong_t driver_data;
};
 
#define X86_FEATURE_MATCH(x) \
{ X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x }
 
#define X86_VENDOR_ANY 0xffff
#define X86_FAMILY_ANY 0
#define X86_MODEL_ANY 0
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
 
#define IPACK_ANY_FORMAT 0xff
#define IPACK_ANY_ID (~0)
struct ipack_device_id {
__u8 format; /* Format version or IPACK_ANY_ID */
__u32 vendor; /* Vendor ID or IPACK_ANY_ID */
__u32 device; /* Device ID or IPACK_ANY_ID */
};
 
#define MEI_CL_MODULE_PREFIX "mei:"
#define MEI_CL_NAME_SIZE 32
 
struct mei_cl_device_id {
char name[MEI_CL_NAME_SIZE];
kernel_ulong_t driver_info;
};
 
/* RapidIO */
 
#define RIO_ANY_ID 0xffff
 
/**
* struct rio_device_id - RIO device identifier
* @did: RapidIO device ID
* @vid: RapidIO vendor ID
* @asm_did: RapidIO assembly device ID
* @asm_vid: RapidIO assembly vendor ID
*
* Identifies a RapidIO device based on both the device/vendor IDs and
* the assembly device/vendor IDs.
*/
struct rio_device_id {
__u16 did, vid;
__u16 asm_did, asm_vid;
};
 
#endif /* LINUX_MOD_DEVICETABLE_H */
/drivers/include/linux/string.h
142,15 → 142,4
 
extern size_t memweight(const void *ptr, size_t bytes);
 
/**
* kbasename - return the last part of a pathname.
*
* @path: path to extract the filename from.
*/
static inline const char *kbasename(const char *path)
{
const char *tail = strrchr(path, '/');
return tail ? tail + 1 : path;
}
 
#endif /* _LINUX_STRING_H_ */
/drivers/include/linux/math64.h
7,7 → 7,6
#if BITS_PER_LONG == 64
 
#define div64_long(x,y) div64_s64((x),(y))
#define div64_ul(x, y) div64_u64((x), (y))
 
/**
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
49,7 → 48,6
#elif BITS_PER_LONG == 32
 
#define div64_long(x,y) div_s64((x),(y))
#define div64_ul(x, y) div_u64((x), (y))
 
#ifndef div_u64_rem
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
/drivers/include/linux/ctype.h
61,10 → 61,4
return c | 0x20;
}
 
/* Fast check for octal digit */
static inline int isodigit(const char c)
{
return c >= '0' && c <= '7';
}
 
#endif
/drivers/include/linux/spinlock_up.h
14,10 → 14,7
* In the debug case, 1 means unlocked, 0 means locked. (the values
* are inverted, to catch initialization bugs)
*
* No atomicity anywhere, we are on UP. However, we still need
* the compiler barriers, because we do not want the compiler to
* move potentially faulting instructions (notably user accesses)
* into the locked sequence, resulting in non-atomic execution.
* No atomicity anywhere, we are on UP.
*/
 
#ifdef CONFIG_DEBUG_SPINLOCK