Subversion Repositories Kolibri OS

Rev

Rev 3747 | Rev 4110 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3747 Rev 4103
1
#ifndef _LINUX_KERNEL_H
1
#ifndef _LINUX_KERNEL_H
2
#define _LINUX_KERNEL_H
2
#define _LINUX_KERNEL_H
3
 
3
 
4
/*
4
/*
5
 * 'kernel.h' contains some often-used function prototypes etc
5
 * 'kernel.h' contains some often-used function prototypes etc
6
 */
6
 */
7
 
7
 
8
#ifdef __KERNEL__
8
#ifdef __KERNEL__
9
 
9
 
10
#include 
10
#include 
11
#include 
11
#include 
12
#include 
12
#include 
13
#include 
13
#include 
14
#include 
14
#include 
15
 
15
 
16
#include 
16
#include 
17
 
17
 
18
#define __init
18
#define __init
19
 
19
 
20
#define USHRT_MAX	((u16)(~0U))
20
#define USHRT_MAX	((u16)(~0U))
21
#define SHRT_MAX	((s16)(USHRT_MAX>>1))
21
#define SHRT_MAX	((s16)(USHRT_MAX>>1))
22
#define SHRT_MIN	((s16)(-SHRT_MAX - 1))
22
#define SHRT_MIN	((s16)(-SHRT_MAX - 1))
23
#define INT_MAX     ((int)(~0U>>1))
23
#define INT_MAX     ((int)(~0U>>1))
24
#define INT_MIN     (-INT_MAX - 1)
24
#define INT_MIN     (-INT_MAX - 1)
25
#define UINT_MAX    (~0U)
25
#define UINT_MAX    (~0U)
26
#define LONG_MAX    ((long)(~0UL>>1))
26
#define LONG_MAX    ((long)(~0UL>>1))
27
#define LONG_MIN    (-LONG_MAX - 1)
27
#define LONG_MIN    (-LONG_MAX - 1)
28
#define ULONG_MAX   (~0UL)
28
#define ULONG_MAX   (~0UL)
29
#define LLONG_MAX   ((long long)(~0ULL>>1))
29
#define LLONG_MAX   ((long long)(~0ULL>>1))
30
#define LLONG_MIN   (-LLONG_MAX - 1)
30
#define LLONG_MIN   (-LLONG_MAX - 1)
31
#define ULLONG_MAX  (~0ULL)
31
#define ULLONG_MAX  (~0ULL)
32
#define SIZE_MAX	(~(size_t)0)
32
#define SIZE_MAX	(~(size_t)0)
33
 
33
 
34
#define ALIGN(x,a)      __ALIGN_MASK(x,(typeof(x))(a)-1)
34
#define ALIGN(x,a)      __ALIGN_MASK(x,(typeof(x))(a)-1)
35
#define __ALIGN_MASK(x,mask)    (((x)+(mask))&~(mask))
35
#define __ALIGN_MASK(x,mask)    (((x)+(mask))&~(mask))
36
#define PTR_ALIGN(p, a)     ((typeof(p))ALIGN((unsigned long)(p), (a)))
36
#define PTR_ALIGN(p, a)     ((typeof(p))ALIGN((unsigned long)(p), (a)))
37
#define IS_ALIGNED(x, a)        (((x) & ((typeof(x))(a) - 1)) == 0)
37
#define IS_ALIGNED(x, a)        (((x) & ((typeof(x))(a) - 1)) == 0)
38
 
38
 
39
 
39
 
40
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
40
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
41
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
41
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
42
 
42
 
43
/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
43
/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
44
#define roundup(x, y) (                                 \
44
#define roundup(x, y) (                                 \
45
{                                                       \
45
{                                                       \
46
        const typeof(y) __y = y;                        \
46
        const typeof(y) __y = y;                        \
47
        (((x) + (__y - 1)) / __y) * __y;                \
47
        (((x) + (__y - 1)) / __y) * __y;                \
48
}                                                       \
48
}                                                       \
49
)
49
)
50
 
50
 
51
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
51
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-
 
52
#define DIV_ROUND_UP_ULL(ll,d) \
-
 
53
        ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
52
#define DIV_ROUND_CLOSEST(x, divisor)(                  \
54
#define DIV_ROUND_CLOSEST(x, divisor)(                  \
53
{                                                       \
55
{                                                       \
54
         typeof(divisor) __divisor = divisor;            \
56
         typeof(divisor) __divisor = divisor;            \
55
         (((x) + ((__divisor) / 2)) / (__divisor));      \
57
         (((x) + ((__divisor) / 2)) / (__divisor));      \
56
}                                                       \
58
}                                                       \
57
)
59
)
-
 
60
 
-
 
61
 
-
 
62
#define clamp_t(type, val, min, max) ({         \
-
 
63
        type __val = (val);                     \
-
 
64
        type __min = (min);                     \
-
 
65
        type __max = (max);                     \
-
 
66
        __val = __val < __min ? __min: __val;   \
-
 
67
        __val > __max ? __max: __val; })
-
 
68
 
-
 
69
 
58
 
70
 
59
/**
71
/**
60
 * upper_32_bits - return bits 32-63 of a number
72
 * upper_32_bits - return bits 32-63 of a number
61
 * @n: the number we're accessing
73
 * @n: the number we're accessing
62
 *
74
 *
63
 * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
75
 * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
64
 * the "right shift count >= width of type" warning when that quantity is
76
 * the "right shift count >= width of type" warning when that quantity is
65
 * 32-bits.
77
 * 32-bits.
66
 */
78
 */
67
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
79
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
68
 
80
 
69
/**
81
/**
70
 * lower_32_bits - return bits 0-31 of a number
82
 * lower_32_bits - return bits 0-31 of a number
71
 * @n: the number we're accessing
83
 * @n: the number we're accessing
72
 */
84
 */
73
#define lower_32_bits(n) ((u32)(n))
85
#define lower_32_bits(n) ((u32)(n))
74
 
86
 
75
#define KERN_EMERG      "<0>"   /* system is unusable                   */
87
#define KERN_EMERG      "<0>"   /* system is unusable                   */
76
#define KERN_ALERT      "<1>"   /* action must be taken immediately     */
88
#define KERN_ALERT      "<1>"   /* action must be taken immediately     */
77
#define KERN_CRIT       "<2>"   /* critical conditions                  */
89
#define KERN_CRIT       "<2>"   /* critical conditions                  */
78
#define KERN_ERR        "<3>"   /* error conditions                     */
90
#define KERN_ERR        "<3>"   /* error conditions                     */
79
#define KERN_WARNING    "<4>"   /* warning conditions                   */
91
#define KERN_WARNING    "<4>"   /* warning conditions                   */
80
#define KERN_NOTICE     "<5>"   /* normal but significant condition     */
92
#define KERN_NOTICE     "<5>"   /* normal but significant condition     */
81
#define KERN_INFO       "<6>"   /* informational                        */
93
#define KERN_INFO       "<6>"   /* informational                        */
82
#define KERN_DEBUG      "<7>"   /* debug-level messages                 */
94
#define KERN_DEBUG      "<7>"   /* debug-level messages                 */
83
extern const char hex_asc[];
95
extern const char hex_asc[];
84
#define hex_asc_lo(x)	hex_asc[((x) & 0x0f)]
96
#define hex_asc_lo(x)	hex_asc[((x) & 0x0f)]
85
#define hex_asc_hi(x)	hex_asc[((x) & 0xf0) >> 4]
97
#define hex_asc_hi(x)	hex_asc[((x) & 0xf0) >> 4]
86
 
98
 
87
static inline char *pack_hex_byte(char *buf, u8 byte)
99
static inline char *pack_hex_byte(char *buf, u8 byte)
88
{
100
{
89
	*buf++ = hex_asc_hi(byte);
101
	*buf++ = hex_asc_hi(byte);
90
	*buf++ = hex_asc_lo(byte);
102
	*buf++ = hex_asc_lo(byte);
91
	return buf;
103
	return buf;
92
}
104
}
93
 
105
 
94
enum {
106
enum {
95
    DUMP_PREFIX_NONE,
107
    DUMP_PREFIX_NONE,
96
    DUMP_PREFIX_ADDRESS,
108
    DUMP_PREFIX_ADDRESS,
97
    DUMP_PREFIX_OFFSET
109
    DUMP_PREFIX_OFFSET
98
};
110
};
99
 
111
 
100
int hex_to_bin(char ch);
112
int hex_to_bin(char ch);
101
int hex2bin(u8 *dst, const char *src, size_t count);
113
int hex2bin(u8 *dst, const char *src, size_t count);
102
 
114
 
103
 
115
 
104
//int printk(const char *fmt, ...);
116
//int printk(const char *fmt, ...);
105
 
117
 
106
#define printk(fmt, arg...)    dbgprintf(fmt , ##arg)
118
#define printk(fmt, arg...)    dbgprintf(fmt , ##arg)
107
 
119
 
108
 
120
 
109
/*
121
/*
110
 * min()/max()/clamp() macros that also do
122
 * min()/max()/clamp() macros that also do
111
 * strict type-checking.. See the
123
 * strict type-checking.. See the
112
 * "unnecessary" pointer comparison.
124
 * "unnecessary" pointer comparison.
113
 */
125
 */
114
#define min(x, y) ({                \
126
#define min(x, y) ({                \
115
    typeof(x) _min1 = (x);          \
127
    typeof(x) _min1 = (x);          \
116
    typeof(y) _min2 = (y);          \
128
    typeof(y) _min2 = (y);          \
117
    (void) (&_min1 == &_min2);      \
129
    (void) (&_min1 == &_min2);      \
118
    _min1 < _min2 ? _min1 : _min2; })
130
    _min1 < _min2 ? _min1 : _min2; })
119
 
131
 
120
#define max(x, y) ({                \
132
#define max(x, y) ({                \
121
    typeof(x) _max1 = (x);          \
133
    typeof(x) _max1 = (x);          \
122
    typeof(y) _max2 = (y);          \
134
    typeof(y) _max2 = (y);          \
123
    (void) (&_max1 == &_max2);      \
135
    (void) (&_max1 == &_max2);      \
124
    _max1 > _max2 ? _max1 : _max2; })
136
    _max1 > _max2 ? _max1 : _max2; })
125
 
137
 
126
#define min3(x, y, z) ({			\
138
#define min3(x, y, z) ({			\
127
	typeof(x) _min1 = (x);			\
139
	typeof(x) _min1 = (x);			\
128
	typeof(y) _min2 = (y);			\
140
	typeof(y) _min2 = (y);			\
129
	typeof(z) _min3 = (z);			\
141
	typeof(z) _min3 = (z);			\
130
	(void) (&_min1 == &_min2);		\
142
	(void) (&_min1 == &_min2);		\
131
	(void) (&_min1 == &_min3);		\
143
	(void) (&_min1 == &_min3);		\
132
	_min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \
144
	_min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \
133
		(_min2 < _min3 ? _min2 : _min3); })
145
		(_min2 < _min3 ? _min2 : _min3); })
134
 
146
 
135
#define max3(x, y, z) ({			\
147
#define max3(x, y, z) ({			\
136
	typeof(x) _max1 = (x);			\
148
	typeof(x) _max1 = (x);			\
137
	typeof(y) _max2 = (y);			\
149
	typeof(y) _max2 = (y);			\
138
	typeof(z) _max3 = (z);			\
150
	typeof(z) _max3 = (z);			\
139
	(void) (&_max1 == &_max2);		\
151
	(void) (&_max1 == &_max2);		\
140
	(void) (&_max1 == &_max3);		\
152
	(void) (&_max1 == &_max3);		\
141
	_max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
153
	_max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
142
		(_max2 > _max3 ? _max2 : _max3); })
154
		(_max2 > _max3 ? _max2 : _max3); })
143
 
155
 
144
/**
156
/**
145
 * min_not_zero - return the minimum that is _not_ zero, unless both are zero
157
 * min_not_zero - return the minimum that is _not_ zero, unless both are zero
146
 * @x: value1
158
 * @x: value1
147
 * @y: value2
159
 * @y: value2
148
 */
160
 */
149
#define min_not_zero(x, y) ({			\
161
#define min_not_zero(x, y) ({			\
150
	typeof(x) __x = (x);			\
162
	typeof(x) __x = (x);			\
151
	typeof(y) __y = (y);			\
163
	typeof(y) __y = (y);			\
152
	__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
164
	__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
153
 
165
 
154
/**
166
/**
155
 * clamp - return a value clamped to a given range with strict typechecking
167
 * clamp - return a value clamped to a given range with strict typechecking
156
 * @val: current value
168
 * @val: current value
157
 * @min: minimum allowable value
169
 * @min: minimum allowable value
158
 * @max: maximum allowable value
170
 * @max: maximum allowable value
159
 *
171
 *
160
 * This macro does strict typechecking of min/max to make sure they are of the
172
 * This macro does strict typechecking of min/max to make sure they are of the
161
 * same type as val.  See the unnecessary pointer comparisons.
173
 * same type as val.  See the unnecessary pointer comparisons.
162
 */
174
 */
163
#define clamp(val, min, max) ({			\
175
#define clamp(val, min, max) ({			\
164
	typeof(val) __val = (val);		\
176
	typeof(val) __val = (val);		\
165
	typeof(min) __min = (min);		\
177
	typeof(min) __min = (min);		\
166
	typeof(max) __max = (max);		\
178
	typeof(max) __max = (max);		\
167
	(void) (&__val == &__min);		\
179
	(void) (&__val == &__min);		\
168
	(void) (&__val == &__max);		\
180
	(void) (&__val == &__max);		\
169
	__val = __val < __min ? __min: __val;	\
181
	__val = __val < __min ? __min: __val;	\
170
	__val > __max ? __max: __val; })
182
	__val > __max ? __max: __val; })
171
 
183
 
172
/*
184
/*
173
 * ..and if you can't take the strict
185
 * ..and if you can't take the strict
174
 * types, you can specify one yourself.
186
 * types, you can specify one yourself.
175
 *
187
 *
176
 * Or not use min/max/clamp at all, of course.
188
 * Or not use min/max/clamp at all, of course.
177
 */
189
 */
178
#define min_t(type, x, y) ({            \
190
#define min_t(type, x, y) ({            \
179
    type __min1 = (x);          \
191
    type __min1 = (x);          \
180
    type __min2 = (y);          \
192
    type __min2 = (y);          \
181
    __min1 < __min2 ? __min1: __min2; })
193
    __min1 < __min2 ? __min1: __min2; })
182
 
194
 
183
#define max_t(type, x, y) ({            \
195
#define max_t(type, x, y) ({            \
184
    type __max1 = (x);          \
196
    type __max1 = (x);          \
185
    type __max2 = (y);          \
197
    type __max2 = (y);          \
186
    __max1 > __max2 ? __max1: __max2; })
198
    __max1 > __max2 ? __max1: __max2; })
187
 
199
 
188
/**
200
/**
189
 * container_of - cast a member of a structure out to the containing structure
201
 * container_of - cast a member of a structure out to the containing structure
190
 * @ptr:    the pointer to the member.
202
 * @ptr:    the pointer to the member.
191
 * @type:   the type of the container struct this is embedded in.
203
 * @type:   the type of the container struct this is embedded in.
192
 * @member: the name of the member within the struct.
204
 * @member: the name of the member within the struct.
193
 *
205
 *
194
 */
206
 */
195
#define container_of(ptr, type, member) ({          \
207
#define container_of(ptr, type, member) ({          \
196
    const typeof( ((type *)0)->member ) *__mptr = (ptr);    \
208
    const typeof( ((type *)0)->member ) *__mptr = (ptr);    \
197
    (type *)( (char *)__mptr - offsetof(type,member) );})
209
    (type *)( (char *)__mptr - offsetof(type,member) );})
198
 
210
 
199
 
211
 
200
static inline void *kcalloc(size_t n, size_t size, uint32_t flags)
212
static inline void *kcalloc(size_t n, size_t size, uint32_t flags)
201
{
213
{
202
        if (n != 0 && size > ULONG_MAX / n)
214
        if (n != 0 && size > ULONG_MAX / n)
203
                return NULL;
215
                return NULL;
204
        return kzalloc(n * size, 0);
216
        return kzalloc(n * size, 0);
205
}
217
}
206
 
218
 
207
 
219
 
208
void free (void *ptr);
220
void free (void *ptr);
209
 
221
 
210
#endif /* __KERNEL__ */
222
#endif /* __KERNEL__ */
211
 
223
 
212
typedef unsigned long   pgprotval_t;
224
typedef unsigned long   pgprotval_t;
213
 
225
 
214
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
226
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
215
 
227
 
216
struct file
228
struct file
217
{
229
{
218
    struct page  **pages;         /* physical memory backend */
230
    struct page  **pages;         /* physical memory backend */
219
    unsigned int   count;
231
    unsigned int   count;
220
    unsigned int   allocated;
232
    unsigned int   allocated;
221
    void           *vma;
233
    void           *vma;
222
};
234
};
223
 
235
 
224
struct vm_area_struct {};
236
struct vm_area_struct {};
225
struct address_space {};
237
struct address_space {};
226
 
238
 
227
struct device
239
struct device
228
{
240
{
229
    struct device   *parent;
241
    struct device   *parent;
230
    void            *driver_data;
242
    void            *driver_data;
231
};
243
};
232
 
244
 
233
static inline void dev_set_drvdata(struct device *dev, void *data)
245
static inline void dev_set_drvdata(struct device *dev, void *data)
234
{
246
{
235
    dev->driver_data = data;
247
    dev->driver_data = data;
236
}
248
}
237
 
249
 
238
static inline void *dev_get_drvdata(struct device *dev)
250
static inline void *dev_get_drvdata(struct device *dev)
239
{
251
{
240
    return dev->driver_data;
252
    return dev->driver_data;
241
}
253
}
242
 
254
 
243
#define preempt_disable()       do { } while (0)
255
#define preempt_disable()       do { } while (0)
244
#define preempt_enable_no_resched() do { } while (0)
256
#define preempt_enable_no_resched() do { } while (0)
245
#define preempt_enable()        do { } while (0)
257
#define preempt_enable()        do { } while (0)
246
#define preempt_check_resched()     do { } while (0)
258
#define preempt_check_resched()     do { } while (0)
247
 
259
 
248
#define preempt_disable_notrace()       do { } while (0)
260
#define preempt_disable_notrace()       do { } while (0)
249
#define preempt_enable_no_resched_notrace() do { } while (0)
261
#define preempt_enable_no_resched_notrace() do { } while (0)
250
#define preempt_enable_notrace()        do { } while (0)
262
#define preempt_enable_notrace()        do { } while (0)
251
 
263
 
252
#define in_dbg_master() (0)
264
#define in_dbg_master() (0)
253
 
265
 
254
#define HZ 100
266
#define HZ 100
255
 
-
 
256
#define time_after(a,b)         \
-
 
257
        (typecheck(unsigned long, a) && \
-
 
258
        typecheck(unsigned long, b) && \
-
 
259
        ((long)(b) - (long)(a) < 0))
-
 
260
 
267
 
261
struct tvec_base;
268
struct tvec_base;
262
 
269
 
263
struct timer_list {
270
struct timer_list {
264
         struct list_head entry;
271
         struct list_head entry;
265
         unsigned long expires;
272
         unsigned long expires;
266
 
273
 
267
         void (*function)(unsigned long);
274
         void (*function)(unsigned long);
268
         unsigned long data;
275
         unsigned long data;
269
 
276
 
270
//         struct tvec_base *base;
277
//         struct tvec_base *base;
271
};
278
};
272
 
279
 
273
struct timespec {
280
struct timespec {
274
    long tv_sec;                 /* seconds */
281
    long tv_sec;                 /* seconds */
275
    long tv_nsec;                /* nanoseconds */
282
    long tv_nsec;                /* nanoseconds */
276
};
283
};
277
 
284
 
278
 
285
 
279
#define build_mmio_read(name, size, type, reg, barrier)     \
286
#define build_mmio_read(name, size, type, reg, barrier)     \
280
static inline type name(const volatile void __iomem *addr)  \
287
static inline type name(const volatile void __iomem *addr)  \
281
{ type ret; asm volatile("mov" size " %1,%0":reg (ret)      \
288
{ type ret; asm volatile("mov" size " %1,%0":reg (ret)      \
282
:"m" (*(volatile type __force *)addr) barrier); return ret; }
289
:"m" (*(volatile type __force *)addr) barrier); return ret; }
283
 
290
 
284
#define build_mmio_write(name, size, type, reg, barrier) \
291
#define build_mmio_write(name, size, type, reg, barrier) \
285
static inline void name(type val, volatile void __iomem *addr) \
292
static inline void name(type val, volatile void __iomem *addr) \
286
{ asm volatile("mov" size " %0,%1": :reg (val), \
293
{ asm volatile("mov" size " %0,%1": :reg (val), \
287
"m" (*(volatile type __force *)addr) barrier); }
294
"m" (*(volatile type __force *)addr) barrier); }
288
 
295
 
289
build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
296
build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
290
build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
297
build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
291
build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
298
build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
292
 
299
 
293
build_mmio_read(__readb, "b", unsigned char, "=q", )
300
build_mmio_read(__readb, "b", unsigned char, "=q", )
294
build_mmio_read(__readw, "w", unsigned short, "=r", )
301
build_mmio_read(__readw, "w", unsigned short, "=r", )
295
build_mmio_read(__readl, "l", unsigned int, "=r", )
302
build_mmio_read(__readl, "l", unsigned int, "=r", )
296
 
303
 
297
build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
304
build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
298
build_mmio_write(writew, "w", unsigned short, "r", :"memory")
305
build_mmio_write(writew, "w", unsigned short, "r", :"memory")
299
build_mmio_write(writel, "l", unsigned int, "r", :"memory")
306
build_mmio_write(writel, "l", unsigned int, "r", :"memory")
300
 
307
 
301
build_mmio_write(__writeb, "b", unsigned char, "q", )
308
build_mmio_write(__writeb, "b", unsigned char, "q", )
302
build_mmio_write(__writew, "w", unsigned short, "r", )
309
build_mmio_write(__writew, "w", unsigned short, "r", )
303
build_mmio_write(__writel, "l", unsigned int, "r", )
310
build_mmio_write(__writel, "l", unsigned int, "r", )
304
 
311
 
305
#define readb_relaxed(a) __readb(a)
312
#define readb_relaxed(a) __readb(a)
306
#define readw_relaxed(a) __readw(a)
313
#define readw_relaxed(a) __readw(a)
307
#define readl_relaxed(a) __readl(a)
314
#define readl_relaxed(a) __readl(a)
308
#define __raw_readb __readb
315
#define __raw_readb __readb
309
#define __raw_readw __readw
316
#define __raw_readw __readw
310
#define __raw_readl __readl
317
#define __raw_readl __readl
311
 
318
 
312
#define __raw_writeb __writeb
319
#define __raw_writeb __writeb
313
#define __raw_writew __writew
320
#define __raw_writew __writew
314
#define __raw_writel __writel
321
#define __raw_writel __writel
315
 
322
 
316
static inline __u64 readq(const volatile void __iomem *addr)
323
static inline __u64 readq(const volatile void __iomem *addr)
317
{
324
{
318
        const volatile u32 __iomem *p = addr;
325
        const volatile u32 __iomem *p = addr;
319
        u32 low, high;
326
        u32 low, high;
320
 
327
 
321
        low = readl(p);
328
        low = readl(p);
322
        high = readl(p + 1);
329
        high = readl(p + 1);
323
 
330
 
324
        return low + ((u64)high << 32);
331
        return low + ((u64)high << 32);
325
}
332
}
326
 
333
 
327
static inline void writeq(__u64 val, volatile void __iomem *addr)
334
static inline void writeq(__u64 val, volatile void __iomem *addr)
328
{
335
{
329
        writel(val, addr);
336
        writel(val, addr);
330
        writel(val >> 32, addr+4);
337
        writel(val >> 32, addr+4);
331
}
338
}
332
 
339
 
333
#define swap(a, b) \
340
#define swap(a, b) \
334
        do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
341
        do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
335
 
342
 
336
 
343
 
337
#define mmiowb() barrier()
344
#define mmiowb() barrier()
338
 
345
 
339
#define dev_err(dev, format, arg...)            \
346
#define dev_err(dev, format, arg...)            \
340
        printk("Error %s " format, __func__ , ## arg)
347
        printk("Error %s " format, __func__ , ## arg)
341
 
348
 
342
#define dev_warn(dev, format, arg...)            \
349
#define dev_warn(dev, format, arg...)            \
343
        printk("Warning %s " format, __func__ , ## arg)
350
        printk("Warning %s " format, __func__ , ## arg)
344
 
351
 
345
#define dev_info(dev, format, arg...)       \
352
#define dev_info(dev, format, arg...)       \
346
        printk("Info %s " format , __func__, ## arg)
353
        printk("Info %s " format , __func__, ## arg)
347
 
354
 
348
//#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
355
//#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
349
#define BUILD_BUG_ON(condition)
356
#define BUILD_BUG_ON(condition)
350
 
357
 
351
struct page
358
struct page
352
{
359
{
353
    unsigned int addr;
360
    unsigned int addr;
354
};
361
};
355
 
362
 
356
#define page_to_phys(page)    ((dma_addr_t)(page))
363
#define page_to_phys(page)    ((dma_addr_t)(page))
357
 
364
 
358
struct vm_fault {
365
struct vm_fault {
359
    unsigned int flags;             /* FAULT_FLAG_xxx flags */
366
    unsigned int flags;             /* FAULT_FLAG_xxx flags */
360
    pgoff_t pgoff;                  /* Logical page offset based on vma */
367
    pgoff_t pgoff;                  /* Logical page offset based on vma */
361
    void __user *virtual_address;   /* Faulting virtual address */
368
    void __user *virtual_address;   /* Faulting virtual address */
362
 
369
 
363
    struct page *page;              /* ->fault handlers should return a
370
    struct page *page;              /* ->fault handlers should return a
364
                                     * page here, unless VM_FAULT_NOPAGE
371
                                     * page here, unless VM_FAULT_NOPAGE
365
                                     * is set (which is also implied by
372
                                     * is set (which is also implied by
366
                                     * VM_FAULT_ERROR).
373
                                     * VM_FAULT_ERROR).
367
                                     */
374
                                     */
368
};
375
};
369
 
376
 
370
struct pagelist {
377
struct pagelist {
371
    dma_addr_t    *page;
378
    dma_addr_t    *page;
372
    unsigned int   nents;
379
    unsigned int   nents;
373
};
380
};
374
 
381
 
375
#define page_cache_release(page)        FreePage(page_to_phys(page))
382
#define page_cache_release(page)        FreePage(page_to_phys(page))
376
 
383
 
377
#define alloc_page(gfp_mask) (struct page*)AllocPage()
384
#define alloc_page(gfp_mask) (struct page*)AllocPage()
378
 
385
 
379
#define __free_page(page) FreePage(page_to_phys(page))
386
#define __free_page(page) FreePage(page_to_phys(page))
380
 
387
 
381
#define get_page(a)
388
#define get_page(a)
382
#define put_page(a)
389
#define put_page(a)
383
#define set_pages_uc(a,b)
390
#define set_pages_uc(a,b)
384
#define set_pages_wb(a,b)
391
#define set_pages_wb(a,b)
385
 
392
 
386
#define pci_map_page(dev, page, offset, size, direction) \
393
#define pci_map_page(dev, page, offset, size, direction) \
387
        (dma_addr_t)( (offset)+page_to_phys(page))
394
        (dma_addr_t)( (offset)+page_to_phys(page))
388
 
395
 
389
#define pci_unmap_page(dev, dma_address, size, direction)
396
#define pci_unmap_page(dev, dma_address, size, direction)
390
 
397
 
391
#define GFP_TEMPORARY  0
398
#define GFP_TEMPORARY  0
392
#define __GFP_NOWARN   0
399
#define __GFP_NOWARN   0
393
#define __GFP_NORETRY  0
400
#define __GFP_NORETRY  0
394
#define GFP_NOWAIT     0
401
#define GFP_NOWAIT     0
395
 
402
 
396
#define IS_ENABLED(a)  0
403
#define IS_ENABLED(a)  0
397
 
404
 
398
 
405
 
399
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
406
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
400
 
407
 
401
#define RCU_INIT_POINTER(p, v) \
408
#define RCU_INIT_POINTER(p, v) \
402
        do { \
409
        do { \
403
                p = (typeof(*v) __force __rcu *)(v); \
410
                p = (typeof(*v) __force __rcu *)(v); \
404
        } while (0)
411
        } while (0)
405
 
412
 
406
 
413
 
407
#define rcu_dereference_raw(p)  ({ \
414
#define rcu_dereference_raw(p)  ({ \
408
                                typeof(p) _________p1 = ACCESS_ONCE(p); \
415
                                typeof(p) _________p1 = ACCESS_ONCE(p); \
409
                                (_________p1); \
416
                                (_________p1); \
410
                                })
417
                                })
411
#define rcu_assign_pointer(p, v) \
418
#define rcu_assign_pointer(p, v) \
412
        ({ \
419
        ({ \
413
                if (!__builtin_constant_p(v) || \
420
                if (!__builtin_constant_p(v) || \
414
                    ((v) != NULL)) \
421
                    ((v) != NULL)) \
415
                (p) = (v); \
422
                (p) = (v); \
416
        })
423
        })
417
 
424
 
418
 
425
 
419
unsigned int hweight16(unsigned int w);
426
unsigned int hweight16(unsigned int w);
420
 
427
 
421
#define cpufreq_quick_get_max(x) GetCpuFreq()
428
#define cpufreq_quick_get_max(x) GetCpuFreq()
422
 
429
 
423
extern unsigned int tsc_khz;
430
extern unsigned int tsc_khz;
424
 
431
 
425
#define on_each_cpu(func,info,wait)             \
432
#define on_each_cpu(func,info,wait)             \
426
        ({                                      \
433
        ({                                      \
427
                func(info);                     \
434
                func(info);                     \
428
                0;                              \
435
                0;                              \
429
        })
436
        })
430
 
437
 
431
 
438
 
432
#endif
439
#endif