Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 3242 → Rev 3243

/drivers/include/drm/drmP.h
60,6 → 60,7
//#include <linux/file.h>
#include <linux/pci.h>
#include <linux/jiffies.h>
#include <linux/irqreturn.h>
//#include <linux/smp_lock.h> /* For (un)lock_kernel */
//#include <linux/dma-mapping.h>
//#include <linux/mm.h>
170,7 → 171,7
/** \name Begin the DRM... */
/*@{*/
 
#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
#define DRM_DEBUG_CODE 0 /**< Include debugging code if > 1, then
also include looping detection. */
 
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
965,6 → 966,15
 
#endif
 
#define DRM_IRQ_ARGS int irq, void *arg
 
struct drm_driver {
irqreturn_t (*irq_handler) (DRM_IRQ_ARGS);
void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev);
};
 
 
#define DRM_MINOR_UNASSIGNED 0
#define DRM_MINOR_LEGACY 1
#define DRM_MINOR_CONTROL 2
1172,7 → 1182,7
// struct drm_sigdata sigdata; /**< For block_all_signals */
// sigset_t sigmask;
 
// struct drm_driver *driver;
struct drm_driver *driver;
// struct drm_local_map *agp_buffer_map;
// unsigned int agp_buffer_token;
// struct drm_minor *control; /**< Control node for card */
/drivers/include/drm/drm_global.h
0,0 → 1,53
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
 
#ifndef _DRM_GLOBAL_H_
#define _DRM_GLOBAL_H_
enum drm_global_types {
DRM_GLOBAL_TTM_MEM = 0,
DRM_GLOBAL_TTM_BO,
DRM_GLOBAL_TTM_OBJECT,
DRM_GLOBAL_NUM
};
 
struct drm_global_reference {
enum drm_global_types global_type;
size_t size;
void *object;
int (*init) (struct drm_global_reference *);
void (*release) (struct drm_global_reference *);
};
 
extern void drm_global_init(void);
extern void drm_global_release(void);
extern int drm_global_item_ref(struct drm_global_reference *ref);
extern void drm_global_item_unref(struct drm_global_reference *ref);
 
#endif
/drivers/include/drm/intel-gtt.h
19,6 → 19,7
unsigned int do_idle_maps : 1;
/* Share the scratch page dma with ppgtts. */
dma_addr_t scratch_page_dma;
struct page *scratch_page;
/* for ppgtt PDE access */
u32 __iomem *gtt;
/* needed for ioremap in drm/i915 */
32,7 → 33,8
bool intel_enable_gtt(void);
 
void intel_gtt_chipset_flush(void);
void intel_gtt_insert_sg_entries(struct pagelist *st, unsigned int pg_start,
void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags);
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
 
/drivers/include/linux/asm/scatterlist.h
0,0 → 1,34
#ifndef __ASM_GENERIC_SCATTERLIST_H
#define __ASM_GENERIC_SCATTERLIST_H
 
#include <linux/types.h>
 
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
};
 
/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries pci_map_sg
* returns, or alternatively stop on the first sg_dma_len(sg) which
* is 0.
*/
#define sg_dma_address(sg) ((sg)->dma_address)
 
#ifdef CONFIG_NEED_SG_DMA_LENGTH
#define sg_dma_len(sg) ((sg)->dma_length)
#else
#define sg_dma_len(sg) ((sg)->length)
#endif
 
#endif /* __ASM_GENERIC_SCATTERLIST_H */
/drivers/include/linux/compiler-gcc4.h
63,3 → 63,13
#define __compiletime_warning(message) __attribute__((warning(message)))
#define __compiletime_error(message) __attribute__((error(message)))
#endif
 
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if __GNUC_MINOR__ >= 4
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#endif
#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6)
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif
/drivers/include/linux/compiler.h
10,6 → 10,7
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
# define __iomem __attribute__((noderef, address_space(2)))
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
# define __releases(x) __attribute__((context(x,1,0)))
# define __acquire(x) __context__(x,1)
33,6 → 34,7
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
# define __builtin_warning(x, y...) (1)
# define __must_hold(x)
# define __acquires(x)
# define __releases(x)
# define __acquire(x) (void)0
42,6 → 44,10
# define __rcu
#endif
 
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
#define ___PASTE(a,b) a##b
#define __PASTE(a,b) ___PASTE(a,b)
 
#ifdef __KERNEL__
 
#ifdef __GNUC__
/drivers/include/linux/err.h
0,0 → 1,65
#ifndef _LINUX_ERR_H
#define _LINUX_ERR_H
 
#include <linux/compiler.h>
 
#include <errno.h>
 
/*
* Kernel pointers have redundant information, so we can use a
* scheme where we can return either an error code or a dentry
* pointer with the same return value.
*
* This should be a per-architecture thing, to allow different
* error and pointer decisions.
*/
#define MAX_ERRNO 4095
 
#ifndef __ASSEMBLY__
 
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
 
static inline void * __must_check ERR_PTR(long error)
{
return (void *) error;
}
 
static inline long __must_check PTR_ERR(const void *ptr)
{
return (long) ptr;
}
 
static inline long __must_check IS_ERR(const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
 
static inline long __must_check IS_ERR_OR_NULL(const void *ptr)
{
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
}
 
/**
* ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
* @ptr: The pointer to cast.
*
* Explicitly cast an error-valued pointer to another pointer type in such a
* way as to make it clear that's what's going on.
*/
static inline void * __must_check ERR_CAST(const void *ptr)
{
/* cast away the const */
return (void *) ptr;
}
 
static inline int __must_check PTR_RET(const void *ptr)
{
if (IS_ERR(ptr))
return PTR_ERR(ptr);
else
return 0;
}
 
#endif
 
#endif /* _LINUX_ERR_H */
/drivers/include/linux/i2c.h
172,6 → 172,7
* @platform_data: stored in i2c_client.dev.platform_data
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
* @acpi_node: ACPI device node
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
/drivers/include/linux/kernel.h
331,47 → 331,15
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
 
 
struct scatterlist {
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
unsigned int dma_length;
};
 
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
unsigned int orig_nents; /* original size of list */
};
 
#define SG_MAX_SINGLE_ALLOC (4096 / sizeof(struct scatterlist))
 
struct scatterlist *sg_next(struct scatterlist *sg);
 
#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
 
#define sg_is_chain(sg) ((sg)->page_link & 0x01)
#define sg_is_last(sg) ((sg)->page_link & 0x02)
#define sg_chain_ptr(sg) \
((struct scatterlist *) ((sg)->page_link & ~0x03))
 
static inline addr_t sg_page(struct scatterlist *sg)
{
return (addr_t)((sg)->page_link & ~0x3);
}
 
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
 
 
 
struct page
{
unsigned int addr;
};
 
#define page_to_phys(page) ((dma_addr_t)(page))
 
struct vm_fault {
unsigned int flags; /* FAULT_FLAG_xxx flags */
390,5 → 358,9
unsigned int nents;
};
 
#define page_cache_release(page) FreePage((addr_t)(page))
 
#define alloc_page(gfp_mask) (struct page*)AllocPage()
 
#endif
 
/drivers/include/linux/lockdep.h
498,14 → 498,17
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
# else
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
# endif
# define rwsem_release(l, n, i) lock_release(l, n, i)
#else
# define rwsem_acquire(l, s, t, i) do { } while (0)
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0)
# define rwsem_acquire_read(l, s, t, i) do { } while (0)
# define rwsem_release(l, n, i) do { } while (0)
#endif
/drivers/include/linux/mm.h
0,0 → 1,0
 
/drivers/include/linux/module.h
9,6 → 9,7
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/export.h>
 
 
 
/drivers/include/linux/rbtree.h
0,0 → 1,84
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
 
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 
linux/include/linux/rbtree.h
 
To use rbtrees you'll have to implement your own insert and search cores.
This will avoid us to use callbacks and to drop drammatically performances.
I know it's not the cleaner way, but in C (not in C++) to get
performances and genericity...
 
See Documentation/rbtree.txt for documentation and samples.
*/
 
#ifndef _LINUX_RBTREE_H
#define _LINUX_RBTREE_H
 
#include <linux/kernel.h>
#include <linux/stddef.h>
 
struct rb_node {
unsigned long __rb_parent_color;
struct rb_node *rb_right;
struct rb_node *rb_left;
} __attribute__((aligned(sizeof(long))));
/* The alignment might seem pointless, but allegedly CRIS needs it */
 
struct rb_root {
struct rb_node *rb_node;
};
 
 
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
 
#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
 
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
 
/* 'empty' nodes are nodes that are known not to be inserted in an rbree */
#define RB_EMPTY_NODE(node) \
((node)->__rb_parent_color == (unsigned long)(node))
#define RB_CLEAR_NODE(node) \
((node)->__rb_parent_color = (unsigned long)(node))
 
 
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
 
 
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
 
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
 
static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
struct rb_node ** rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
 
*rb_link = node;
}
 
#endif /* _LINUX_RBTREE_H */
/drivers/include/linux/rwlock.h
0,0 → 1,125
#ifndef __LINUX_RWLOCK_H
#define __LINUX_RWLOCK_H
 
#ifndef __LINUX_SPINLOCK_H
# error "please don't include this file directly"
#endif
 
/*
* rwlock related methods
*
* split out from spinlock.h
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
 
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key);
# define rwlock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__rwlock_init((lock), #lock, &__key); \
} while (0)
#else
# define rwlock_init(lock) \
do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
#endif
 
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
extern int do_raw_read_trylock(rwlock_t *lock);
extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_read_lock_flags(lock, flags) \
do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_write_lock_flags(lock, flags) \
do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
#endif
 
#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
 
/*
* Define the various rw_lock methods. Note we define these
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required.
*/
#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
 
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
 
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_read_lock_irqsave(lock); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_write_lock_irqsave(lock); \
} while (0)
 
#else
 
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_read_lock_irqsave(lock, flags); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_write_lock_irqsave(lock, flags); \
} while (0)
 
#endif
 
#define read_lock_irq(lock) _raw_read_lock_irq(lock)
#define read_lock_bh(lock) _raw_read_lock_bh(lock)
#define write_lock_irq(lock) _raw_write_lock_irq(lock)
#define write_lock_bh(lock) _raw_write_lock_bh(lock)
#define read_unlock(lock) _raw_read_unlock(lock)
#define write_unlock(lock) _raw_write_unlock(lock)
#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
 
#define read_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_read_unlock_irqrestore(lock, flags); \
} while (0)
#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
 
#define write_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_write_unlock_irqrestore(lock, flags); \
} while (0)
#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
 
#define write_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
write_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
 
#endif /* __LINUX_RWLOCK_H */
/drivers/include/linux/scatterlist.h
0,0 → 1,274
#ifndef _LINUX_SCATTERLIST_H
#define _LINUX_SCATTERLIST_H
 
#include <linux/string.h>
#include <linux/bug.h>
#include <linux/mm.h>
 
#include <asm/types.h>
#include <asm/scatterlist.h>
//#include <asm/io.h>
 
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
unsigned int orig_nents; /* original size of list */
};
 
/*
* Notes on SG table design.
*
* Architectures must provide an unsigned long page_link field in the
* scatterlist struct. We use that to place the page pointer AND encode
* information about the sg table as well. The two lower bits are reserved
* for this information.
*
* If bit 0 is set, then the page_link contains a pointer to the next sg
* table list. Otherwise the next entry is at sg + 1.
*
* If bit 1 is set, then this sg entry is the last element in a list.
*
* See sg_next().
*
*/
 
#define SG_MAGIC 0x87654321
 
/*
* We overload the LSB of the page pointer to indicate whether it's
* a valid sg entry, or whether it points to the start of a new scatterlist.
* Those low bits are there for everyone! (thanks mason :-)
*/
#define sg_is_chain(sg) ((sg)->page_link & 0x01)
#define sg_is_last(sg) ((sg)->page_link & 0x02)
#define sg_chain_ptr(sg) \
((struct scatterlist *) ((sg)->page_link & ~0x03))
 
/**
* sg_assign_page - Assign a given page to an SG entry
* @sg: SG entry
* @page: The page
*
* Description:
* Assign page to sg entry. Also see sg_set_page(), the most commonly used
* variant.
*
**/
static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
{
unsigned long page_link = sg->page_link & 0x3;
 
/*
* In order for the low bit stealing approach to work, pages
* must be aligned at a 32-bit boundary as a minimum.
*/
BUG_ON((unsigned long) page & 0x03);
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
sg->page_link = page_link | (unsigned long) page;
}
 
/**
* sg_set_page - Set sg entry to point at given page
* @sg: SG entry
* @page: The page
* @len: Length of data
* @offset: Offset into page
*
* Description:
* Use this function to set an sg entry pointing at a page, never assign
* the page directly. We encode sg table information in the lower bits
* of the page pointer. See sg_page() for looking up the page belonging
* to an sg entry.
*
**/
static inline void sg_set_page(struct scatterlist *sg, struct page *page,
unsigned int len, unsigned int offset)
{
sg_assign_page(sg, page);
sg->offset = offset;
sg->length = len;
}
 
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
return (struct page *)((sg)->page_link & ~0x3);
}
 
/**
* sg_set_buf - Set sg entry to point at given data
* @sg: SG entry
* @buf: Data
* @buflen: Data length
*
**/
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
// unsigned int buflen)
//{
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
//}
 
/*
* Loop over each sg element, following the pointer to a new list if necessary
*/
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
 
/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
* @prv_nents: Number of entries in prv
* @sgl: Second scatterlist
*
* Description:
* Links @prv@ and @sgl@ together, to form a longer scatterlist.
*
**/
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
#ifndef ARCH_HAS_SG_CHAIN
BUG();
#endif
 
/*
* offset and length are unused for chain entry. Clear them.
*/
prv[prv_nents - 1].offset = 0;
prv[prv_nents - 1].length = 0;
 
/*
* Set lowest bit to indicate a link pointer, and make sure to clear
* the termination bit if it happens to be set.
*/
prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
}
 
/**
* sg_mark_end - Mark the end of the scatterlist
* @sg: SG entryScatterlist
*
* Description:
* Marks the passed in sg entry as the termination point for the sg
* table. A call to sg_next() on this entry will return NULL.
*
**/
static inline void sg_mark_end(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
/*
* Set termination bit, clear potential chain bit
*/
sg->page_link |= 0x02;
sg->page_link &= ~0x01;
}
 
/**
* sg_phys - Return physical address of an sg entry
* @sg: SG entry
*
* Description:
* This calls page_to_phys() on the page in this sg entry, and adds the
* sg offset. The caller must know that it is legal to call page_to_phys()
* on the sg page.
*
**/
static inline dma_addr_t sg_phys(struct scatterlist *sg)
{
return page_to_phys(sg_page(sg)) + sg->offset;
}
 
/**
* sg_virt - Return virtual address of an sg entry
* @sg: SG entry
*
* Description:
* This calls page_address() on the page in this sg entry, and adds the
* sg offset. The caller must know that the sg page has a valid virtual
* mapping.
*
**/
//static inline void *sg_virt(struct scatterlist *sg)
//{
// return page_address(sg_page(sg)) + sg->offset;
//}
 
int sg_nents(struct scatterlist *sg);
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
 
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
 
void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
void sg_free_table(struct sg_table *);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
int sg_alloc_table_from_pages(struct sg_table *sgt,
struct page **pages, unsigned int n_pages,
unsigned long offset, unsigned long size,
gfp_t gfp_mask);
 
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
 
/*
* Maximum number of entries that will be allocated in one piece, if
* a list larger than this is required then chaining will be utilized.
*/
#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
 
 
/*
* Mapping sg iterator
*
* Iterates over sg entries mapping page-by-page. On each successful
* iteration, @miter->page points to the mapped page and
* @miter->length bytes of data can be accessed at @miter->addr. As
* long as an interation is enclosed between start and stop, the user
* is free to choose control structure and when to stop.
*
* @miter->consumed is set to @miter->length on each iteration. It
* can be adjusted if the user can't consume all the bytes in one go.
* Also, a stopped iteration can be resumed by calling next on it.
* This is useful when iteration needs to release all resources and
* continue later (e.g. at the next interrupt).
*/
 
#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
#define SG_MITER_FROM_SG (1 << 2) /* nop */
 
struct sg_mapping_iter {
/* the following three fields can be accessed directly */
struct page *page; /* currently mapped page */
void *addr; /* pointer to the mapped area */
size_t length; /* length of the mapped area */
size_t consumed; /* number of consumed bytes */
 
/* these are internal states, keep away */
struct scatterlist *__sg; /* current entry */
unsigned int __nents; /* nr of remaining entries */
unsigned int __offset; /* offset within sg */
unsigned int __flags;
};
 
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
unsigned int nents, unsigned int flags);
bool sg_miter_next(struct sg_mapping_iter *miter);
void sg_miter_stop(struct sg_mapping_iter *miter);
 
#endif /* _LINUX_SCATTERLIST_H */
/drivers/include/linux/slab.h
1,2 → 1,3
 
#include <errno.h>
// stub
/drivers/include/linux/spinlock_types.h
17,7 → 17,7
 
#include <linux/lockdep.h>
 
typedef struct {
typedef struct spinlock {
raw_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
/drivers/include/syscall.h
516,11 → 516,11
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
 
static void init_rwsem(struct rw_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
//static void init_rwsem(struct rw_semaphore *sem)
//{
// sem->count = RWSEM_UNLOCKED_VALUE;
// spin_lock_init(&sem->wait_lock);
// INIT_LIST_HEAD(&sem->wait_list);
//}
 
#endif