Subversion Repositories Kolibri OS

Rev

Rev 5271 | Rev 6321 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5271 Rev 6104
1
#include 
1
#include 
2
#include 
2
#include 
3
#include 
3
#include 
4
#include 
4
#include 
5
#include "radeon.h"
5
#include "radeon.h"
6
 
6
 
7
int x86_clflush_size;
7
int x86_clflush_size;
8
unsigned int tsc_khz;
8
unsigned int tsc_khz;
9
 
9
 
10
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
10
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
11
{
11
{
12
    struct file *filep;
12
    struct file *filep;
13
    int count;
13
    int count;
14
 
14
 
15
    filep = __builtin_malloc(sizeof(*filep));
15
    filep = __builtin_malloc(sizeof(*filep));
16
 
16
 
17
    if(unlikely(filep == NULL))
17
    if(unlikely(filep == NULL))
18
        return ERR_PTR(-ENOMEM);
18
        return ERR_PTR(-ENOMEM);
19
 
19
 
20
    count = size / PAGE_SIZE;
20
    count = size / PAGE_SIZE;
21
 
21
 
22
    filep->pages = kzalloc(sizeof(struct page *) * count, 0);
22
    filep->pages = kzalloc(sizeof(struct page *) * count, 0);
23
    if(unlikely(filep->pages == NULL))
23
    if(unlikely(filep->pages == NULL))
24
    {
24
    {
25
        kfree(filep);
25
        kfree(filep);
26
        return ERR_PTR(-ENOMEM);
26
        return ERR_PTR(-ENOMEM);
27
    };
27
    };
28
 
28
 
29
    filep->count     = count;
29
    filep->count     = count;
30
    filep->allocated = 0;
30
    filep->allocated = 0;
31
    filep->vma       = NULL;
31
    filep->vma       = NULL;
32
 
32
 
33
//    printf("%s file %p pages %p count %d\n",
33
//    printf("%s file %p pages %p count %d\n",
34
//              __FUNCTION__,filep, filep->pages, count);
34
//              __FUNCTION__,filep, filep->pages, count);
35
 
35
 
36
    return filep;
36
    return filep;
37
}
37
}
38
 
38
 
39
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
39
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
40
{
40
{
41
        while (bytes) {
41
        while (bytes) {
42
                if (*start != value)
42
                if (*start != value)
43
                        return (void *)start;
43
                        return (void *)start;
44
                start++;
44
                start++;
45
                bytes--;
45
                bytes--;
46
        }
46
        }
47
        return NULL;
47
        return NULL;
48
}
48
}
49
 
49
 
50
/**
50
/**
51
 * memchr_inv - Find an unmatching character in an area of memory.
51
 * memchr_inv - Find an unmatching character in an area of memory.
52
 * @start: The memory area
52
 * @start: The memory area
53
 * @c: Find a character other than c
53
 * @c: Find a character other than c
54
 * @bytes: The size of the area.
54
 * @bytes: The size of the area.
55
 *
55
 *
56
 * returns the address of the first character other than @c, or %NULL
56
 * returns the address of the first character other than @c, or %NULL
57
 * if the whole buffer contains just @c.
57
 * if the whole buffer contains just @c.
58
 */
58
 */
59
void *memchr_inv(const void *start, int c, size_t bytes)
59
void *memchr_inv(const void *start, int c, size_t bytes)
60
{
60
{
61
        u8 value = c;
61
        u8 value = c;
62
        u64 value64;
62
        u64 value64;
63
        unsigned int words, prefix;
63
        unsigned int words, prefix;
64
 
64
 
65
        if (bytes <= 16)
65
        if (bytes <= 16)
66
                return check_bytes8(start, value, bytes);
66
                return check_bytes8(start, value, bytes);
67
 
67
 
68
        value64 = value;
68
        value64 = value;
69
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
69
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
70
        value64 *= 0x0101010101010101;
70
        value64 *= 0x0101010101010101;
71
#elif defined(ARCH_HAS_FAST_MULTIPLIER)
71
#elif defined(ARCH_HAS_FAST_MULTIPLIER)
72
        value64 *= 0x01010101;
72
        value64 *= 0x01010101;
73
        value64 |= value64 << 32;
73
        value64 |= value64 << 32;
74
#else
74
#else
75
        value64 |= value64 << 8;
75
        value64 |= value64 << 8;
76
        value64 |= value64 << 16;
76
        value64 |= value64 << 16;
77
        value64 |= value64 << 32;
77
        value64 |= value64 << 32;
78
#endif
78
#endif
79
 
79
 
80
        prefix = (unsigned long)start % 8;
80
        prefix = (unsigned long)start % 8;
81
        if (prefix) {
81
        if (prefix) {
82
                u8 *r;
82
                u8 *r;
83
 
83
 
84
                prefix = 8 - prefix;
84
                prefix = 8 - prefix;
85
                r = check_bytes8(start, value, prefix);
85
                r = check_bytes8(start, value, prefix);
86
                if (r)
86
                if (r)
87
                        return r;
87
                        return r;
88
                start += prefix;
88
                start += prefix;
89
                bytes -= prefix;
89
                bytes -= prefix;
90
        }
90
        }
91
 
91
 
92
        words = bytes / 8;
92
        words = bytes / 8;
93
 
93
 
94
        while (words) {
94
        while (words) {
95
                if (*(u64 *)start != value64)
95
                if (*(u64 *)start != value64)
96
                        return check_bytes8(start, value, 8);
96
                        return check_bytes8(start, value, 8);
97
                start += 8;
97
                start += 8;
98
                words--;
98
                words--;
99
        }
99
        }
100
 
100
 
101
        return check_bytes8(start, value, bytes % 8);
101
        return check_bytes8(start, value, bytes % 8);
102
}
102
}
103
 
103
 
104
 
104
 
105
 
105
 
106
#define _U  0x01    /* upper */
106
#define _U  0x01    /* upper */
107
#define _L  0x02    /* lower */
107
#define _L  0x02    /* lower */
108
#define _D  0x04    /* digit */
108
#define _D  0x04    /* digit */
109
#define _C  0x08    /* cntrl */
109
#define _C  0x08    /* cntrl */
110
#define _P  0x10    /* punct */
110
#define _P  0x10    /* punct */
111
#define _S  0x20    /* white space (space/lf/tab) */
111
#define _S  0x20    /* white space (space/lf/tab) */
112
#define _X  0x40    /* hex digit */
112
#define _X  0x40    /* hex digit */
113
#define _SP 0x80    /* hard space (0x20) */
113
#define _SP 0x80    /* hard space (0x20) */
114
 
114
 
115
extern const unsigned char _ctype[];
115
extern const unsigned char _ctype[];
116
 
116
 
117
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
117
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
118
 
118
 
119
#define isalnum(c)  ((__ismask(c)&(_U|_L|_D)) != 0)
119
#define isalnum(c)  ((__ismask(c)&(_U|_L|_D)) != 0)
120
#define isalpha(c)  ((__ismask(c)&(_U|_L)) != 0)
120
#define isalpha(c)  ((__ismask(c)&(_U|_L)) != 0)
121
#define iscntrl(c)  ((__ismask(c)&(_C)) != 0)
121
#define iscntrl(c)  ((__ismask(c)&(_C)) != 0)
122
#define isdigit(c)  ((__ismask(c)&(_D)) != 0)
122
#define isdigit(c)  ((__ismask(c)&(_D)) != 0)
123
#define isgraph(c)  ((__ismask(c)&(_P|_U|_L|_D)) != 0)
123
#define isgraph(c)  ((__ismask(c)&(_P|_U|_L|_D)) != 0)
124
#define islower(c)  ((__ismask(c)&(_L)) != 0)
124
#define islower(c)  ((__ismask(c)&(_L)) != 0)
125
#define isprint(c)  ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
125
#define isprint(c)  ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
126
#define ispunct(c)  ((__ismask(c)&(_P)) != 0)
126
#define ispunct(c)  ((__ismask(c)&(_P)) != 0)
127
/* Note: isspace() must return false for %NUL-terminator */
127
/* Note: isspace() must return false for %NUL-terminator */
128
#define isspace(c)  ((__ismask(c)&(_S)) != 0)
128
#define isspace(c)  ((__ismask(c)&(_S)) != 0)
129
#define isupper(c)  ((__ismask(c)&(_U)) != 0)
129
#define isupper(c)  ((__ismask(c)&(_U)) != 0)
130
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
130
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
131
 
131
 
132
#define isascii(c) (((unsigned char)(c))<=0x7f)
132
#define isascii(c) (((unsigned char)(c))<=0x7f)
133
#define toascii(c) (((unsigned char)(c))&0x7f)
133
#define toascii(c) (((unsigned char)(c))&0x7f)
134
 
134
 
135
static inline unsigned char __tolower(unsigned char c)
135
static inline unsigned char __tolower(unsigned char c)
136
{
136
{
137
    if (isupper(c))
137
    if (isupper(c))
138
        c -= 'A'-'a';
138
        c -= 'A'-'a';
139
    return c;
139
    return c;
140
}
140
}
141
 
141
 
142
static inline unsigned char __toupper(unsigned char c)
142
static inline unsigned char __toupper(unsigned char c)
143
{
143
{
144
    if (islower(c))
144
    if (islower(c))
145
        c -= 'a'-'A';
145
        c -= 'a'-'A';
146
    return c;
146
    return c;
147
}
147
}
148
 
148
 
149
#define tolower(c) __tolower(c)
149
#define tolower(c) __tolower(c)
150
#define toupper(c) __toupper(c)
150
#define toupper(c) __toupper(c)
151
 
151
 
152
/*
152
/*
153
 * Fast implementation of tolower() for internal usage. Do not use in your
153
 * Fast implementation of tolower() for internal usage. Do not use in your
154
 * code.
154
 * code.
155
 */
155
 */
156
static inline char _tolower(const char c)
156
static inline char _tolower(const char c)
157
{
157
{
158
    return c | 0x20;
158
    return c | 0x20;
159
}
159
}
160
 
160
 
161
 
161
 
162
//const char hex_asc[] = "0123456789abcdef";
162
//const char hex_asc[] = "0123456789abcdef";
163
 
163
 
164
/**
164
/**
165
 * hex_to_bin - convert a hex digit to its real value
165
 * hex_to_bin - convert a hex digit to its real value
166
 * @ch: ascii character represents hex digit
166
 * @ch: ascii character represents hex digit
167
 *
167
 *
168
 * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
168
 * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
169
 * input.
169
 * input.
170
 */
170
 */
171
int hex_to_bin(char ch)
171
int hex_to_bin(char ch)
172
{
172
{
173
    if ((ch >= '0') && (ch <= '9'))
173
    if ((ch >= '0') && (ch <= '9'))
174
        return ch - '0';
174
        return ch - '0';
175
    ch = tolower(ch);
175
    ch = tolower(ch);
176
    if ((ch >= 'a') && (ch <= 'f'))
176
    if ((ch >= 'a') && (ch <= 'f'))
177
        return ch - 'a' + 10;
177
        return ch - 'a' + 10;
178
    return -1;
178
    return -1;
179
}
179
}
180
EXPORT_SYMBOL(hex_to_bin);
180
EXPORT_SYMBOL(hex_to_bin);
181
 
181
 
182
/**
182
/**
183
 * hex2bin - convert an ascii hexadecimal string to its binary representation
183
 * hex2bin - convert an ascii hexadecimal string to its binary representation
184
 * @dst: binary result
184
 * @dst: binary result
185
 * @src: ascii hexadecimal string
185
 * @src: ascii hexadecimal string
186
 * @count: result length
186
 * @count: result length
187
 *
187
 *
188
 * Return 0 on success, -1 in case of bad input.
188
 * Return 0 on success, -1 in case of bad input.
189
 */
189
 */
190
int hex2bin(u8 *dst, const char *src, size_t count)
190
int hex2bin(u8 *dst, const char *src, size_t count)
191
{
191
{
192
    while (count--) {
192
    while (count--) {
193
        int hi = hex_to_bin(*src++);
193
        int hi = hex_to_bin(*src++);
194
        int lo = hex_to_bin(*src++);
194
        int lo = hex_to_bin(*src++);
195
 
195
 
196
        if ((hi < 0) || (lo < 0))
196
        if ((hi < 0) || (lo < 0))
197
            return -1;
197
            return -1;
198
 
198
 
199
        *dst++ = (hi << 4) | lo;
199
        *dst++ = (hi << 4) | lo;
200
    }
200
    }
201
    return 0;
201
    return 0;
202
}
202
}
203
EXPORT_SYMBOL(hex2bin);
203
EXPORT_SYMBOL(hex2bin);
204
 
204
 
205
/**
205
/**
206
 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
206
 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
207
 * @buf: data blob to dump
207
 * @buf: data blob to dump
208
 * @len: number of bytes in the @buf
208
 * @len: number of bytes in the @buf
209
 * @rowsize: number of bytes to print per line; must be 16 or 32
209
 * @rowsize: number of bytes to print per line; must be 16 or 32
210
 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
210
 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
211
 * @linebuf: where to put the converted data
211
 * @linebuf: where to put the converted data
212
 * @linebuflen: total size of @linebuf, including space for terminating NUL
212
 * @linebuflen: total size of @linebuf, including space for terminating NUL
213
 * @ascii: include ASCII after the hex output
213
 * @ascii: include ASCII after the hex output
214
 *
214
 *
215
 * hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
215
 * hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
216
 * 16 or 32 bytes of input data converted to hex + ASCII output.
216
 * 16 or 32 bytes of input data converted to hex + ASCII output.
217
 *
217
 *
218
 * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
218
 * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
219
 * to a hex + ASCII dump at the supplied memory location.
219
 * to a hex + ASCII dump at the supplied memory location.
220
 * The converted output is always NUL-terminated.
220
 * The converted output is always NUL-terminated.
221
 *
221
 *
222
 * E.g.:
222
 * E.g.:
223
 *   hex_dump_to_buffer(frame->data, frame->len, 16, 1,
223
 *   hex_dump_to_buffer(frame->data, frame->len, 16, 1,
224
 *          linebuf, sizeof(linebuf), true);
224
 *          linebuf, sizeof(linebuf), true);
225
 *
225
 *
226
 * example output buffer:
226
 * example output buffer:
227
 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f  @ABCDEFGHIJKLMNO
227
 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f  @ABCDEFGHIJKLMNO
228
 */
228
 */
229
void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
229
int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
230
            int groupsize, char *linebuf, size_t linebuflen,
230
               char *linebuf, size_t linebuflen, bool ascii)
231
            bool ascii)
-
 
232
{
231
{
233
    const u8 *ptr = buf;
232
    const u8 *ptr = buf;
-
 
233
    int ngroups;
234
    u8 ch;
234
    u8 ch;
235
    int j, lx = 0;
235
    int j, lx = 0;
236
    int ascii_column;
236
    int ascii_column;
-
 
237
    int ret;
237
 
238
 
238
    if (rowsize != 16 && rowsize != 32)
239
    if (rowsize != 16 && rowsize != 32)
239
        rowsize = 16;
240
        rowsize = 16;
240
 
-
 
241
    if (!len)
-
 
242
        goto nil;
241
 
243
    if (len > rowsize)      /* limit to one line at a time */
242
    if (len > rowsize)      /* limit to one line at a time */
-
 
243
        len = rowsize;
-
 
244
    if (!is_power_of_2(groupsize) || groupsize > 8)
244
        len = rowsize;
245
        groupsize = 1;
245
    if ((len % groupsize) != 0) /* no mixed size output */
246
    if ((len % groupsize) != 0) /* no mixed size output */
246
        groupsize = 1;
247
        groupsize = 1;
247
 
248
 
-
 
249
    ngroups = len / groupsize;
-
 
250
    ascii_column = rowsize * 2 + rowsize / groupsize + 1;
-
 
251
 
-
 
252
    if (!linebuflen)
-
 
253
        goto overflow1;
248
    switch (groupsize) {
254
 
-
 
255
    if (!len)
-
 
256
        goto nil;
-
 
257
 
249
    case 8: {
258
    if (groupsize == 8) {
250
        const u64 *ptr8 = buf;
-
 
251
        int ngroups = len / groupsize;
259
        const u64 *ptr8 = buf;
252
 
260
 
253
        for (j = 0; j < ngroups; j++)
261
        for (j = 0; j < ngroups; j++) {
254
            lx += scnprintf(linebuf + lx, linebuflen - lx,
262
            ret = snprintf(linebuf + lx, linebuflen - lx,
255
                    "%s%16.16llx", j ? " " : "",
263
                       "%s%16.16llx", j ? " " : "",
-
 
264
                       (unsigned long long)*(ptr8 + j));
256
                    (unsigned long long)*(ptr8 + j));
265
            if (ret >= linebuflen - lx)
257
        ascii_column = 17 * ngroups + 2;
266
                goto overflow1;
258
        break;
-
 
259
    }
267
            lx += ret;
260
 
268
        }
261
    case 4: {
-
 
262
        const u32 *ptr4 = buf;
269
    } else if (groupsize == 4) {
263
        int ngroups = len / groupsize;
270
        const u32 *ptr4 = buf;
264
 
271
 
265
        for (j = 0; j < ngroups; j++)
272
        for (j = 0; j < ngroups; j++) {
-
 
273
            ret = snprintf(linebuf + lx, linebuflen - lx,
-
 
274
                       "%s%8.8x", j ? " " : "",
266
            lx += scnprintf(linebuf + lx, linebuflen - lx,
275
                       *(ptr4 + j));
267
                    "%s%8.8x", j ? " " : "", *(ptr4 + j));
276
            if (ret >= linebuflen - lx)
268
        ascii_column = 9 * ngroups + 2;
-
 
269
        break;
277
                goto overflow1;
270
    }
278
            lx += ret;
271
 
-
 
272
    case 2: {
279
        }
273
        const u16 *ptr2 = buf;
280
    } else if (groupsize == 2) {
274
        int ngroups = len / groupsize;
281
        const u16 *ptr2 = buf;
275
 
282
 
-
 
283
        for (j = 0; j < ngroups; j++) {
-
 
284
            ret = snprintf(linebuf + lx, linebuflen - lx,
276
        for (j = 0; j < ngroups; j++)
285
                       "%s%4.4x", j ? " " : "",
277
            lx += scnprintf(linebuf + lx, linebuflen - lx,
286
                       *(ptr2 + j));
278
                    "%s%4.4x", j ? " " : "", *(ptr2 + j));
-
 
279
        ascii_column = 5 * ngroups + 2;
287
            if (ret >= linebuflen - lx)
280
        break;
288
                goto overflow1;
-
 
289
            lx += ret;
-
 
290
        }
281
    }
291
    } else {
282
 
292
        for (j = 0; j < len; j++) {
283
    default:
293
            if (linebuflen < lx + 3)
284
        for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
294
                goto overflow2;
285
            ch = ptr[j];
295
            ch = ptr[j];
286
            linebuf[lx++] = hex_asc_hi(ch);
296
            linebuf[lx++] = hex_asc_hi(ch);
287
            linebuf[lx++] = hex_asc_lo(ch);
297
            linebuf[lx++] = hex_asc_lo(ch);
288
            linebuf[lx++] = ' ';
298
            linebuf[lx++] = ' ';
289
        }
299
        }
290
        if (j)
300
        if (j)
291
            lx--;
301
            lx--;
292
 
-
 
293
        ascii_column = 3 * rowsize + 2;
-
 
294
        break;
-
 
295
    }
302
    }
296
    if (!ascii)
303
    if (!ascii)
297
        goto nil;
304
        goto nil;
298
 
305
 
-
 
306
    while (lx < ascii_column) {
-
 
307
        if (linebuflen < lx + 2)
299
    while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
308
            goto overflow2;
-
 
309
        linebuf[lx++] = ' ';
300
        linebuf[lx++] = ' ';
310
    }
-
 
311
    for (j = 0; j < len; j++) {
-
 
312
        if (linebuflen < lx + 2)
301
    for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
313
            goto overflow2;
302
        ch = ptr[j];
314
        ch = ptr[j];
303
        linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
315
        linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
304
    }
316
    }
305
nil:
317
nil:
-
 
318
    linebuf[lx] = '\0';
-
 
319
    return lx;
-
 
320
overflow2:
306
    linebuf[lx++] = '\0';
321
    linebuf[lx++] = '\0';
-
 
322
overflow1:
-
 
323
    return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1;
307
}
324
}
308
 
-
 
309
/**
325
/**
310
 * print_hex_dump - print a text hex dump to syslog for a binary blob of data
326
 * print_hex_dump - print a text hex dump to syslog for a binary blob of data
311
 * @level: kernel log level (e.g. KERN_DEBUG)
327
 * @level: kernel log level (e.g. KERN_DEBUG)
312
 * @prefix_str: string to prefix each line with;
328
 * @prefix_str: string to prefix each line with;
313
 *  caller supplies trailing spaces for alignment if desired
329
 *  caller supplies trailing spaces for alignment if desired
314
 * @prefix_type: controls whether prefix of an offset, address, or none
330
 * @prefix_type: controls whether prefix of an offset, address, or none
315
 *  is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
331
 *  is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
316
 * @rowsize: number of bytes to print per line; must be 16 or 32
332
 * @rowsize: number of bytes to print per line; must be 16 or 32
317
 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
333
 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
318
 * @buf: data blob to dump
334
 * @buf: data blob to dump
319
 * @len: number of bytes in the @buf
335
 * @len: number of bytes in the @buf
320
 * @ascii: include ASCII after the hex output
336
 * @ascii: include ASCII after the hex output
321
 *
337
 *
322
 * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
338
 * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
323
 * to the kernel log at the specified kernel log level, with an optional
339
 * to the kernel log at the specified kernel log level, with an optional
324
 * leading prefix.
340
 * leading prefix.
325
 *
341
 *
326
 * print_hex_dump() works on one "line" of output at a time, i.e.,
342
 * print_hex_dump() works on one "line" of output at a time, i.e.,
327
 * 16 or 32 bytes of input data converted to hex + ASCII output.
343
 * 16 or 32 bytes of input data converted to hex + ASCII output.
328
 * print_hex_dump() iterates over the entire input @buf, breaking it into
344
 * print_hex_dump() iterates over the entire input @buf, breaking it into
329
 * "line size" chunks to format and print.
345
 * "line size" chunks to format and print.
330
 *
346
 *
331
 * E.g.:
347
 * E.g.:
332
 *   print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
348
 *   print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
333
 *          16, 1, frame->data, frame->len, true);
349
 *          16, 1, frame->data, frame->len, true);
334
 *
350
 *
335
 * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
351
 * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
336
 * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f  @ABCDEFGHIJKLMNO
352
 * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f  @ABCDEFGHIJKLMNO
337
 * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
353
 * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
338
 * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c  pqrstuvwxyz{|}~.
354
 * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c  pqrstuvwxyz{|}~.
339
 */
355
 */
340
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
356
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
341
            int rowsize, int groupsize,
357
            int rowsize, int groupsize,
342
            const void *buf, size_t len, bool ascii)
358
            const void *buf, size_t len, bool ascii)
343
{
359
{
344
    const u8 *ptr = buf;
360
    const u8 *ptr = buf;
345
    int i, linelen, remaining = len;
361
    int i, linelen, remaining = len;
346
    unsigned char linebuf[32 * 3 + 2 + 32 + 1];
362
    unsigned char linebuf[32 * 3 + 2 + 32 + 1];
347
 
363
 
348
    if (rowsize != 16 && rowsize != 32)
364
    if (rowsize != 16 && rowsize != 32)
349
        rowsize = 16;
365
        rowsize = 16;
350
 
366
 
351
    for (i = 0; i < len; i += rowsize) {
367
    for (i = 0; i < len; i += rowsize) {
352
        linelen = min(remaining, rowsize);
368
        linelen = min(remaining, rowsize);
353
        remaining -= rowsize;
369
        remaining -= rowsize;
354
 
370
 
355
        hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
371
        hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
356
                   linebuf, sizeof(linebuf), ascii);
372
                   linebuf, sizeof(linebuf), ascii);
357
 
373
 
358
        switch (prefix_type) {
374
        switch (prefix_type) {
359
        case DUMP_PREFIX_ADDRESS:
375
        case DUMP_PREFIX_ADDRESS:
360
            printk("%s%s%p: %s\n",
376
            printk("%s%s%p: %s\n",
361
                   level, prefix_str, ptr + i, linebuf);
377
                   level, prefix_str, ptr + i, linebuf);
362
            break;
378
            break;
363
        case DUMP_PREFIX_OFFSET:
379
        case DUMP_PREFIX_OFFSET:
364
            printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
380
            printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
365
            break;
381
            break;
366
        default:
382
        default:
367
            printk("%s%s%s\n", level, prefix_str, linebuf);
383
            printk("%s%s%s\n", level, prefix_str, linebuf);
368
            break;
384
            break;
369
        }
385
        }
370
    }
386
    }
371
}
387
}
372
 
388
 
373
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
389
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
374
                          const void *buf, size_t len)
390
                          const void *buf, size_t len)
375
{
391
{
376
    print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
392
    print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
377
                       buf, len, true);
393
                       buf, len, true);
378
}
394
}
-
 
395
 
-
 
396
#define KMAP_MAX    256
-
 
397
 
-
 
398
static struct mutex kmap_mutex;
-
 
399
static struct page* kmap_table[KMAP_MAX];
-
 
400
static int kmap_av;
-
 
401
static int kmap_first;
-
 
402
static void* kmap_base;
-
 
403
 
-
 
404
 
-
 
405
int kmap_init()
-
 
406
{
-
 
407
    kmap_base = AllocKernelSpace(KMAP_MAX*4096);
-
 
408
    if(kmap_base == NULL)
-
 
409
        return -1;
-
 
410
 
-
 
411
    kmap_av = KMAP_MAX;
-
 
412
    MutexInit(&kmap_mutex);
-
 
413
    return 0;
-
 
414
};
-
 
415
 
-
 
416
void *kmap(struct page *page)
-
 
417
{
-
 
418
    void *vaddr = NULL;
-
 
419
    int i;
-
 
420
 
-
 
421
    do
-
 
422
    {
-
 
423
        MutexLock(&kmap_mutex);
-
 
424
        if(kmap_av != 0)
-
 
425
        {
-
 
426
            for(i = kmap_first; i < KMAP_MAX; i++)
-
 
427
            {
-
 
428
                if(kmap_table[i] == NULL)
-
 
429
                {
-
 
430
                    kmap_av--;
-
 
431
                    kmap_first = i;
-
 
432
                    kmap_table[i] = page;
-
 
433
                    vaddr = kmap_base + (i<<12);
-
 
434
                    MapPage(vaddr,(addr_t)page,3);
-
 
435
                    break;
-
 
436
                };
-
 
437
            };
-
 
438
        };
-
 
439
        MutexUnlock(&kmap_mutex);
-
 
440
    }while(vaddr == NULL);
-
 
441
 
-
 
442
    return vaddr;
-
 
443
};
-
 
444
 
-
 
445
void *kmap_atomic(struct page *page) __attribute__ ((alias ("kmap")));
-
 
446
 
-
 
447
void kunmap(struct page *page)
-
 
448
{
-
 
449
    void *vaddr;
-
 
450
    int   i;
-
 
451
 
-
 
452
    MutexLock(&kmap_mutex);
-
 
453
 
-
 
454
    for(i = 0; i < KMAP_MAX; i++)
-
 
455
    {
-
 
456
        if(kmap_table[i] == page)
-
 
457
        {
-
 
458
            kmap_av++;
-
 
459
            if(i < kmap_first)
-
 
460
                kmap_first = i;
-
 
461
            kmap_table[i] = NULL;
-
 
462
            vaddr = kmap_base + (i<<12);
-
 
463
            MapPage(vaddr,0,0);
-
 
464
            break;
-
 
465
        };
-
 
466
    };
-
 
467
 
-
 
468
    MutexUnlock(&kmap_mutex);
-
 
469
};
-
 
470
 
-
 
471
void kunmap_atomic(void *vaddr)
-
 
472
{
-
 
473
    int i;
-
 
474
 
-
 
475
    MapPage(vaddr,0,0);
-
 
476
 
-
 
477
    i = (vaddr - kmap_base) >> 12;
-
 
478
 
-
 
479
    MutexLock(&kmap_mutex);
-
 
480
 
-
 
481
    kmap_av++;
-
 
482
    if(i < kmap_first)
-
 
483
        kmap_first = i;
-
 
484
    kmap_table[i] = NULL;
-
 
485
 
-
 
486
    MutexUnlock(&kmap_mutex);
379
 
487
}
380
void msleep(unsigned int msecs)
488
void msleep(unsigned int msecs)
381
{
489
{
382
    msecs /= 10;
490
    msecs /= 10;
383
    if(!msecs) msecs = 1;
491
    if(!msecs) msecs = 1;
384
 
492
 
385
     __asm__ __volatile__ (
493
     __asm__ __volatile__ (
386
     "call *__imp__Delay"
494
     "call *__imp__Delay"
387
     ::"b" (msecs));
495
     ::"b" (msecs));
388
     __asm__ __volatile__ (
496
     __asm__ __volatile__ (
389
     "":::"ebx");
497
     "":::"ebx");
390
 
498
 
391
};
499
};
392
 
500
 
393
 
501
 
394
/* simple loop based delay: */
502
/* simple loop based delay: */
395
static void delay_loop(unsigned long loops)
503
static void delay_loop(unsigned long loops)
396
{
504
{
397
        asm volatile(
505
        asm volatile(
398
                "       test %0,%0      \n"
506
                "       test %0,%0      \n"
399
                "       jz 3f           \n"
507
                "       jz 3f           \n"
400
                "       jmp 1f          \n"
508
                "       jmp 1f          \n"
401
 
509
 
402
                ".align 16              \n"
510
                ".align 16              \n"
403
                "1:     jmp 2f          \n"
511
                "1:     jmp 2f          \n"
404
 
512
 
405
                ".align 16              \n"
513
                ".align 16              \n"
406
                "2:     dec %0          \n"
514
                "2:     dec %0          \n"
407
                "       jnz 2b          \n"
515
                "       jnz 2b          \n"
408
                "3:     dec %0          \n"
516
                "3:     dec %0          \n"
409
 
517
 
410
                : /* we don't need output */
518
                : /* we don't need output */
411
                :"a" (loops)
519
                :"a" (loops)
412
        );
520
        );
413
}
521
}
414
 
522
 
415
 
523
 
416
static void (*delay_fn)(unsigned long) = delay_loop;
524
static void (*delay_fn)(unsigned long) = delay_loop;
417
 
525
 
418
void __delay(unsigned long loops)
526
void __delay(unsigned long loops)
419
{
527
{
420
        delay_fn(loops);
528
        delay_fn(loops);
421
}
529
}
422
 
530
 
423
 
531
 
424
inline void __const_udelay(unsigned long xloops)
532
inline void __const_udelay(unsigned long xloops)
425
{
533
{
426
        int d0;
534
        int d0;
427
 
535
 
428
        xloops *= 4;
536
        xloops *= 4;
429
        asm("mull %%edx"
537
        asm("mull %%edx"
430
                : "=d" (xloops), "=&a" (d0)
538
                : "=d" (xloops), "=&a" (d0)
431
                : "1" (xloops), ""
539
                : "1" (xloops), ""
432
                (loops_per_jiffy * (HZ/4)));
540
                (loops_per_jiffy * (HZ/4)));
433
 
541
 
434
        __delay(++xloops);
542
        __delay(++xloops);
435
}
543
}
436
 
544
 
437
void __udelay(unsigned long usecs)
545
void __udelay(unsigned long usecs)
438
{
546
{
439
        __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
547
        __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
440
}
548
}
441
 
549
 
442
unsigned int _sw_hweight32(unsigned int w)
550
unsigned int _sw_hweight32(unsigned int w)
443
{
551
{
444
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
552
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
445
        w -= (w >> 1) & 0x55555555;
553
        w -= (w >> 1) & 0x55555555;
446
        w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
554
        w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
447
        w =  (w + (w >> 4)) & 0x0f0f0f0f;
555
        w =  (w + (w >> 4)) & 0x0f0f0f0f;
448
        return (w * 0x01010101) >> 24;
556
        return (w * 0x01010101) >> 24;
449
#else
557
#else
450
        unsigned int res = w - ((w >> 1) & 0x55555555);
558
        unsigned int res = w - ((w >> 1) & 0x55555555);
451
        res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
559
        res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
452
        res = (res + (res >> 4)) & 0x0F0F0F0F;
560
        res = (res + (res >> 4)) & 0x0F0F0F0F;
453
        res = res + (res >> 8);
561
        res = res + (res >> 8);
454
        return (res + (res >> 16)) & 0x000000FF;
562
        return (res + (res >> 16)) & 0x000000FF;
455
#endif
563
#endif
456
}
564
}
457
EXPORT_SYMBOL(_sw_hweight32);
565
EXPORT_SYMBOL(_sw_hweight32);
458
 
566
 
459
 
567
 
460
void usleep_range(unsigned long min, unsigned long max)
568
void usleep_range(unsigned long min, unsigned long max)
461
{
569
{
462
    udelay(max);
570
    udelay(max);
463
}
571
}
464
EXPORT_SYMBOL(usleep_range);
572
EXPORT_SYMBOL(usleep_range);
465
 
573
 
466
 
574
 
467
void *kmemdup(const void *src, size_t len, gfp_t gfp)
575
void *kmemdup(const void *src, size_t len, gfp_t gfp)
468
{
576
{
469
    void *p;
577
    void *p;
470
 
578
 
471
    p = kmalloc(len, gfp);
579
    p = kmalloc(len, gfp);
472
    if (p)
580
    if (p)
473
        memcpy(p, src, len);
581
        memcpy(p, src, len);
474
    return p;
582
    return p;
475
}
583
}
476
 
584
 
477
void cpu_detect1()
585
void cpu_detect1()
478
{
586
{
479
 
587
 
480
    u32 junk, tfms, cap0, misc;
588
    u32 junk, tfms, cap0, misc;
481
    int i;
589
    int i;
482
 
590
 
483
    cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
591
    cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
484
 
592
 
485
    if (cap0 & (1<<19))
593
    if (cap0 & (1<<19))
486
    {
594
    {
487
        x86_clflush_size = ((misc >> 8) & 0xff) * 8;
595
        x86_clflush_size = ((misc >> 8) & 0xff) * 8;
488
    }
596
    }
489
 
597
 
490
#if 0
598
#if 0
491
    cpuid(0x80000002, (unsigned int*)&cpuinfo.model_name[0], (unsigned int*)&cpuinfo.model_name[4],
599
    cpuid(0x80000002, (unsigned int*)&cpuinfo.model_name[0], (unsigned int*)&cpuinfo.model_name[4],
492
          (unsigned int*)&cpuinfo.model_name[8], (unsigned int*)&cpuinfo.model_name[12]);
600
          (unsigned int*)&cpuinfo.model_name[8], (unsigned int*)&cpuinfo.model_name[12]);
493
    cpuid(0x80000003, (unsigned int*)&cpuinfo.model_name[16], (unsigned int*)&cpuinfo.model_name[20],
601
    cpuid(0x80000003, (unsigned int*)&cpuinfo.model_name[16], (unsigned int*)&cpuinfo.model_name[20],
494
          (unsigned int*)&cpuinfo.model_name[24], (unsigned int*)&cpuinfo.model_name[28]);
602
          (unsigned int*)&cpuinfo.model_name[24], (unsigned int*)&cpuinfo.model_name[28]);
495
    cpuid(0x80000004, (unsigned int*)&cpuinfo.model_name[32], (unsigned int*)&cpuinfo.model_name[36],
603
    cpuid(0x80000004, (unsigned int*)&cpuinfo.model_name[32], (unsigned int*)&cpuinfo.model_name[36],
496
          (unsigned int*)&cpuinfo.model_name[40], (unsigned int*)&cpuinfo.model_name[44]);
604
          (unsigned int*)&cpuinfo.model_name[40], (unsigned int*)&cpuinfo.model_name[44]);
497
 
605
 
498
    printf("\n%s\n\n",cpuinfo.model_name);
606
    printf("\n%s\n\n",cpuinfo.model_name);
499
 
607
 
500
    cpuinfo.def_mtrr = read_msr(MSR_MTRRdefType);
608
    cpuinfo.def_mtrr = read_msr(MSR_MTRRdefType);
501
    cpuinfo.mtrr_cap = read_msr(IA32_MTRRCAP);
609
    cpuinfo.mtrr_cap = read_msr(IA32_MTRRCAP);
502
 
610
 
503
    printf("MSR_MTRRdefType %016llx\n\n", cpuinfo.def_mtrr);
611
    printf("MSR_MTRRdefType %016llx\n\n", cpuinfo.def_mtrr);
504
 
612
 
505
    cpuinfo.var_mtrr_count = (u8_t)cpuinfo.mtrr_cap;
613
    cpuinfo.var_mtrr_count = (u8_t)cpuinfo.mtrr_cap;
506
 
614
 
507
    for(i = 0; i < cpuinfo.var_mtrr_count; i++)
615
    for(i = 0; i < cpuinfo.var_mtrr_count; i++)
508
    {
616
    {
509
        u64_t mtrr_base;
617
        u64_t mtrr_base;
510
        u64_t mtrr_mask;
618
        u64_t mtrr_mask;
511
 
619
 
512
        cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
620
        cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
513
        cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
621
        cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
514
 
622
 
515
        printf("MTRR_%d base: %016llx mask: %016llx\n", i,
623
        printf("MTRR_%d base: %016llx mask: %016llx\n", i,
516
               cpuinfo.var_mtrr[i].base,
624
               cpuinfo.var_mtrr[i].base,
517
               cpuinfo.var_mtrr[i].mask);
625
               cpuinfo.var_mtrr[i].mask);
518
    };
626
    };
519
 
627
 
520
    unsigned int cr0, cr3, cr4, eflags;
628
    unsigned int cr0, cr3, cr4, eflags;
521
 
629
 
522
    eflags = safe_cli();
630
    eflags = safe_cli();
523
 
631
 
524
    /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
632
    /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
525
    cr0 = read_cr0() | (1<<30);
633
    cr0 = read_cr0() | (1<<30);
526
    write_cr0(cr0);
634
    write_cr0(cr0);
527
    wbinvd();
635
    wbinvd();
528
 
636
 
529
    cr4 = read_cr4();
637
    cr4 = read_cr4();
530
    write_cr4(cr4 & ~(1<<7));
638
    write_cr4(cr4 & ~(1<<7));
531
 
639
 
532
    cr3 = read_cr3();
640
    cr3 = read_cr3();
533
    write_cr3(cr3);
641
    write_cr3(cr3);
534
 
642
 
535
    /* Save MTRR state */
643
    /* Save MTRR state */
536
    rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
644
    rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
537
 
645
 
538
    /* Disable MTRRs, and set the default type to uncached */
646
    /* Disable MTRRs, and set the default type to uncached */
539
    native_write_msr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
647
    native_write_msr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
540
    wbinvd();
648
    wbinvd();
541
 
649
 
542
    i = 0;
650
    i = 0;
543
    set_mtrr(i++,0,0x80000000>>12,MTRR_WB);
651
    set_mtrr(i++,0,0x80000000>>12,MTRR_WB);
544
    set_mtrr(i++,0x80000000>>12,0x40000000>>12,MTRR_WB);
652
    set_mtrr(i++,0x80000000>>12,0x40000000>>12,MTRR_WB);
545
    set_mtrr(i++,0xC0000000>>12,0x20000000>>12,MTRR_WB);
653
    set_mtrr(i++,0xC0000000>>12,0x20000000>>12,MTRR_WB);
546
    set_mtrr(i++,0xdb800000>>12,0x00800000>>12,MTRR_UC);
654
    set_mtrr(i++,0xdb800000>>12,0x00800000>>12,MTRR_UC);
547
    set_mtrr(i++,0xdc000000>>12,0x04000000>>12,MTRR_UC);
655
    set_mtrr(i++,0xdc000000>>12,0x04000000>>12,MTRR_UC);
548
    set_mtrr(i++,0xE0000000>>12,0x10000000>>12,MTRR_WC);
656
    set_mtrr(i++,0xE0000000>>12,0x10000000>>12,MTRR_WC);
549
 
657
 
550
    for(; i < cpuinfo.var_mtrr_count; i++)
658
    for(; i < cpuinfo.var_mtrr_count; i++)
551
        set_mtrr(i,0,0,0);
659
        set_mtrr(i,0,0,0);
552
 
660
 
553
    write_cr3(cr3);
661
    write_cr3(cr3);
554
 
662
 
555
    /* Intel (P6) standard MTRRs */
663
    /* Intel (P6) standard MTRRs */
556
    native_write_msr(MSR_MTRRdefType, deftype_lo, deftype_hi);
664
    native_write_msr(MSR_MTRRdefType, deftype_lo, deftype_hi);
557
 
665
 
558
    /* Enable caches */
666
    /* Enable caches */
559
    write_cr0(read_cr0() & ~(1<<30));
667
    write_cr0(read_cr0() & ~(1<<30));
560
 
668
 
561
    /* Restore value of CR4 */
669
    /* Restore value of CR4 */
562
    write_cr4(cr4);
670
    write_cr4(cr4);
563
 
671
 
564
    safe_sti(eflags);
672
    safe_sti(eflags);
565
 
673
 
566
    printf("\nnew MTRR map\n\n");
674
    printf("\nnew MTRR map\n\n");
567
 
675
 
568
    for(i = 0; i < cpuinfo.var_mtrr_count; i++)
676
    for(i = 0; i < cpuinfo.var_mtrr_count; i++)
569
    {
677
    {
570
        u64_t mtrr_base;
678
        u64_t mtrr_base;
571
        u64_t mtrr_mask;
679
        u64_t mtrr_mask;
572
 
680
 
573
        cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
681
        cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
574
        cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
682
        cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
575
 
683
 
576
        printf("MTRR_%d base: %016llx mask: %016llx\n", i,
684
        printf("MTRR_%d base: %016llx mask: %016llx\n", i,
577
               cpuinfo.var_mtrr[i].base,
685
               cpuinfo.var_mtrr[i].base,
578
               cpuinfo.var_mtrr[i].mask);
686
               cpuinfo.var_mtrr[i].mask);
579
    };
687
    };
580
#endif
688
#endif
581
 
689
 
582
    tsc_khz = (unsigned int)(GetCpuFreq()/1000);
690
    tsc_khz = (unsigned int)(GetCpuFreq()/1000);
583
}
691
}
584
 
692
 
585
 
693
 
586
static atomic_t fence_context_counter = ATOMIC_INIT(0);
694
static atomic_t fence_context_counter = ATOMIC_INIT(0);
587
 
695
 
588
/**
696
/**
589
 * fence_context_alloc - allocate an array of fence contexts
697
 * fence_context_alloc - allocate an array of fence contexts
590
 * @num:        [in]    amount of contexts to allocate
698
 * @num:        [in]    amount of contexts to allocate
591
 *
699
 *
592
 * This function will return the first index of the number of fences allocated.
700
 * This function will return the first index of the number of fences allocated.
593
 * The fence context is used for setting fence->context to a unique number.
701
 * The fence context is used for setting fence->context to a unique number.
594
 */
702
 */
595
unsigned fence_context_alloc(unsigned num)
703
unsigned fence_context_alloc(unsigned num)
596
{
704
{
597
        BUG_ON(!num);
705
        BUG_ON(!num);
598
        return atomic_add_return(num, &fence_context_counter) - num;
706
        return atomic_add_return(num, &fence_context_counter) - num;
599
}
707
}
600
EXPORT_SYMBOL(fence_context_alloc);
708
EXPORT_SYMBOL(fence_context_alloc);
601
 
709
 
602
 
710
 
603
int fence_signal(struct fence *fence)
711
int fence_signal(struct fence *fence)
604
{
712
{
605
        unsigned long flags;
713
        unsigned long flags;
606
 
714
 
607
        if (!fence)
715
        if (!fence)
608
                return -EINVAL;
716
                return -EINVAL;
609
 
717
 
610
//        if (!ktime_to_ns(fence->timestamp)) {
718
//        if (!ktime_to_ns(fence->timestamp)) {
611
//                fence->timestamp = ktime_get();
719
//                fence->timestamp = ktime_get();
612
//                smp_mb__before_atomic();
720
//                smp_mb__before_atomic();
613
//        }
721
//        }
614
 
722
 
615
        if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
723
        if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
616
                return -EINVAL;
724
                return -EINVAL;
617
 
725
 
618
//        trace_fence_signaled(fence);
726
//        trace_fence_signaled(fence);
619
 
727
 
620
        if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
728
        if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
621
                struct fence_cb *cur, *tmp;
729
                struct fence_cb *cur, *tmp;
622
 
730
 
623
                spin_lock_irqsave(fence->lock, flags);
731
                spin_lock_irqsave(fence->lock, flags);
624
                list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
732
                list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
625
                        list_del_init(&cur->node);
733
                        list_del_init(&cur->node);
626
                        cur->func(fence, cur);
734
                        cur->func(fence, cur);
627
                }
735
                }
628
                spin_unlock_irqrestore(fence->lock, flags);
736
                spin_unlock_irqrestore(fence->lock, flags);
629
        }
737
        }
630
        return 0;
738
        return 0;
631
}
739
}
632
EXPORT_SYMBOL(fence_signal);
740
EXPORT_SYMBOL(fence_signal);
633
 
741
 
634
int fence_signal_locked(struct fence *fence)
742
int fence_signal_locked(struct fence *fence)
635
{
743
{
636
        struct fence_cb *cur, *tmp;
744
        struct fence_cb *cur, *tmp;
637
        int ret = 0;
745
        int ret = 0;
638
 
746
 
639
        if (WARN_ON(!fence))
747
        if (WARN_ON(!fence))
640
                return -EINVAL;
748
                return -EINVAL;
641
 
749
 
642
//        if (!ktime_to_ns(fence->timestamp)) {
750
//        if (!ktime_to_ns(fence->timestamp)) {
643
//                fence->timestamp = ktime_get();
751
//                fence->timestamp = ktime_get();
644
//                smp_mb__before_atomic();
752
//                smp_mb__before_atomic();
645
//        }
753
//        }
646
 
754
 
647
        if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
755
        if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
648
                ret = -EINVAL;
756
                ret = -EINVAL;
649
 
757
 
650
                /*
758
                /*
651
                 * we might have raced with the unlocked fence_signal,
759
                 * we might have raced with the unlocked fence_signal,
652
                 * still run through all callbacks
760
                 * still run through all callbacks
653
                 */
761
                 */
654
        }// else
762
        }// else
655
//                trace_fence_signaled(fence);
763
//                trace_fence_signaled(fence);
656
 
764
 
657
        list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
765
        list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
658
                list_del_init(&cur->node);
766
                list_del_init(&cur->node);
659
                cur->func(fence, cur);
767
                cur->func(fence, cur);
660
        }
768
        }
661
        return ret;
769
        return ret;
662
}
770
}
663
EXPORT_SYMBOL(fence_signal_locked);
771
EXPORT_SYMBOL(fence_signal_locked);
664
 
772
 
665
 
773
 
666
void fence_enable_sw_signaling(struct fence *fence)
774
void fence_enable_sw_signaling(struct fence *fence)
667
{
775
{
668
        unsigned long flags;
776
        unsigned long flags;
669
 
777
 
670
        if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
778
        if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
671
            !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
779
            !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
672
//                trace_fence_enable_signal(fence);
780
//                trace_fence_enable_signal(fence);
673
 
781
 
674
                spin_lock_irqsave(fence->lock, flags);
782
                spin_lock_irqsave(fence->lock, flags);
675
 
783
 
676
                if (!fence->ops->enable_signaling(fence))
784
                if (!fence->ops->enable_signaling(fence))
677
                        fence_signal_locked(fence);
785
                        fence_signal_locked(fence);
678
 
786
 
679
                spin_unlock_irqrestore(fence->lock, flags);
787
                spin_unlock_irqrestore(fence->lock, flags);
680
        }
788
        }
681
}
789
}
682
EXPORT_SYMBOL(fence_enable_sw_signaling);
790
EXPORT_SYMBOL(fence_enable_sw_signaling);
683
 
791
 
684
 
792
 
685
 
793
 
686
signed long
794
signed long
687
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
795
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
688
{
796
{
689
        signed long ret;
797
        signed long ret;
690
 
798
 
691
        if (WARN_ON(timeout < 0))
799
        if (WARN_ON(timeout < 0))
692
                return -EINVAL;
800
                return -EINVAL;
693
 
801
 
694
//        trace_fence_wait_start(fence);
802
//        trace_fence_wait_start(fence);
695
        ret = fence->ops->wait(fence, intr, timeout);
803
        ret = fence->ops->wait(fence, intr, timeout);
696
//        trace_fence_wait_end(fence);
804
//        trace_fence_wait_end(fence);
697
        return ret;
805
        return ret;
698
}
806
}
699
EXPORT_SYMBOL(fence_wait_timeout);
807
EXPORT_SYMBOL(fence_wait_timeout);
700
 
808
 
701
void fence_release(struct kref *kref)
809
void fence_release(struct kref *kref)
702
{
810
{
703
        struct fence *fence =
811
        struct fence *fence =
704
                        container_of(kref, struct fence, refcount);
812
                        container_of(kref, struct fence, refcount);
705
 
813
 
706
//        trace_fence_destroy(fence);
814
//        trace_fence_destroy(fence);
707
 
815
 
708
        BUG_ON(!list_empty(&fence->cb_list));
816
        BUG_ON(!list_empty(&fence->cb_list));
709
 
817
 
710
        if (fence->ops->release)
818
        if (fence->ops->release)
711
                fence->ops->release(fence);
819
                fence->ops->release(fence);
712
        else
820
        else
713
                fence_free(fence);
821
                fence_free(fence);
714
}
822
}
715
EXPORT_SYMBOL(fence_release);
823
EXPORT_SYMBOL(fence_release);
716
 
824
 
717
void fence_free(struct fence *fence)
825
void fence_free(struct fence *fence)
718
{
826
{
719
        kfree_rcu(fence, rcu);
827
        kfree_rcu(fence, rcu);
720
}
828
}
721
EXPORT_SYMBOL(fence_free);
829
EXPORT_SYMBOL(fence_free);
722
 
830
 
723
 
831
 
724
reservation_object_add_shared_inplace(struct reservation_object *obj,
832
reservation_object_add_shared_inplace(struct reservation_object *obj,
725
                                      struct reservation_object_list *fobj,
833
                                      struct reservation_object_list *fobj,
726
                                      struct fence *fence)
834
                                      struct fence *fence)
727
{
835
{
728
        u32 i;
836
        u32 i;
729
 
837
 
730
        fence_get(fence);
838
        fence_get(fence);
731
 
839
 
732
//        preempt_disable();
840
//        preempt_disable();
733
        write_seqcount_begin(&obj->seq);
841
        write_seqcount_begin(&obj->seq);
734
 
842
 
735
        for (i = 0; i < fobj->shared_count; ++i) {
843
        for (i = 0; i < fobj->shared_count; ++i) {
736
                struct fence *old_fence;
844
                struct fence *old_fence;
737
 
845
 
738
                old_fence = rcu_dereference_protected(fobj->shared[i],
846
                old_fence = rcu_dereference_protected(fobj->shared[i],
739
                                                reservation_object_held(obj));
847
                                                reservation_object_held(obj));
740
 
848
 
741
                if (old_fence->context == fence->context) {
849
                if (old_fence->context == fence->context) {
742
                        /* memory barrier is added by write_seqcount_begin */
850
                        /* memory barrier is added by write_seqcount_begin */
743
                        RCU_INIT_POINTER(fobj->shared[i], fence);
851
                        RCU_INIT_POINTER(fobj->shared[i], fence);
744
                        write_seqcount_end(&obj->seq);
852
                        write_seqcount_end(&obj->seq);
745
                        preempt_enable();
853
                        preempt_enable();
746
 
854
 
747
                        fence_put(old_fence);
855
                        fence_put(old_fence);
748
                        return;
856
                        return;
749
                }
857
                }
750
        }
858
        }
751
 
859
 
752
        /*
860
        /*
753
         * memory barrier is added by write_seqcount_begin,
861
         * memory barrier is added by write_seqcount_begin,
754
         * fobj->shared_count is protected by this lock too
862
         * fobj->shared_count is protected by this lock too
755
         */
863
         */
756
        RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
864
        RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
757
        fobj->shared_count++;
865
        fobj->shared_count++;
758
 
866
 
759
        write_seqcount_end(&obj->seq);
867
        write_seqcount_end(&obj->seq);
760
//        preempt_enable();
868
//        preempt_enable();
761
}
869
}
762
 
870
 
763
 
871
 
764
 
872
 
765
static void
873
static void
766
reservation_object_add_shared_replace(struct reservation_object *obj,
874
reservation_object_add_shared_replace(struct reservation_object *obj,
767
                                      struct reservation_object_list *old,
875
                                      struct reservation_object_list *old,
768
                                      struct reservation_object_list *fobj,
876
                                      struct reservation_object_list *fobj,
769
                                      struct fence *fence)
877
                                      struct fence *fence)
770
{
878
{
771
        unsigned i;
879
        unsigned i;
772
        struct fence *old_fence = NULL;
880
        struct fence *old_fence = NULL;
773
 
881
 
774
        fence_get(fence);
882
        fence_get(fence);
775
 
883
 
776
        if (!old) {
884
        if (!old) {
777
                RCU_INIT_POINTER(fobj->shared[0], fence);
885
                RCU_INIT_POINTER(fobj->shared[0], fence);
778
                fobj->shared_count = 1;
886
                fobj->shared_count = 1;
779
                goto done;
887
                goto done;
780
        }
888
        }
781
 
889
 
782
        /*
890
        /*
783
         * no need to bump fence refcounts, rcu_read access
891
         * no need to bump fence refcounts, rcu_read access
784
         * requires the use of kref_get_unless_zero, and the
892
         * requires the use of kref_get_unless_zero, and the
785
         * references from the old struct are carried over to
893
         * references from the old struct are carried over to
786
         * the new.
894
         * the new.
787
         */
895
         */
788
        fobj->shared_count = old->shared_count;
896
        fobj->shared_count = old->shared_count;
789
 
897
 
790
        for (i = 0; i < old->shared_count; ++i) {
898
        for (i = 0; i < old->shared_count; ++i) {
791
                struct fence *check;
899
                struct fence *check;
792
 
900
 
793
                check = rcu_dereference_protected(old->shared[i],
901
                check = rcu_dereference_protected(old->shared[i],
794
                                                reservation_object_held(obj));
902
                                                reservation_object_held(obj));
795
 
903
 
796
                if (!old_fence && check->context == fence->context) {
904
                if (!old_fence && check->context == fence->context) {
797
                        old_fence = check;
905
                        old_fence = check;
798
                        RCU_INIT_POINTER(fobj->shared[i], fence);
906
                        RCU_INIT_POINTER(fobj->shared[i], fence);
799
                } else
907
                } else
800
                        RCU_INIT_POINTER(fobj->shared[i], check);
908
                        RCU_INIT_POINTER(fobj->shared[i], check);
801
        }
909
        }
802
        if (!old_fence) {
910
        if (!old_fence) {
803
                RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
911
                RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
804
                fobj->shared_count++;
912
                fobj->shared_count++;
805
        }
913
        }
806
 
914
 
807
done:
915
done:
808
//        preempt_disable();
916
//        preempt_disable();
809
        write_seqcount_begin(&obj->seq);
917
        write_seqcount_begin(&obj->seq);
810
        /*
918
        /*
811
         * RCU_INIT_POINTER can be used here,
919
         * RCU_INIT_POINTER can be used here,
812
         * seqcount provides the necessary barriers
920
         * seqcount provides the necessary barriers
813
         */
921
         */
814
        RCU_INIT_POINTER(obj->fence, fobj);
922
        RCU_INIT_POINTER(obj->fence, fobj);
815
        write_seqcount_end(&obj->seq);
923
        write_seqcount_end(&obj->seq);
816
//        preempt_enable();
924
//        preempt_enable();
817
 
925
 
818
        if (old)
926
        if (old)
819
                kfree_rcu(old, rcu);
927
                kfree_rcu(old, rcu);
820
 
928
 
821
        if (old_fence)
929
        if (old_fence)
822
                fence_put(old_fence);
930
                fence_put(old_fence);
823
}
931
}
824
 
932
 
825
 
933
 
826
int reservation_object_reserve_shared(struct reservation_object *obj)
934
int reservation_object_reserve_shared(struct reservation_object *obj)
827
{
935
{
828
        struct reservation_object_list *fobj, *old;
936
        struct reservation_object_list *fobj, *old;
829
        u32 max;
937
        u32 max;
830
 
938
 
831
        old = reservation_object_get_list(obj);
939
        old = reservation_object_get_list(obj);
832
 
940
 
833
        if (old && old->shared_max) {
941
        if (old && old->shared_max) {
834
                if (old->shared_count < old->shared_max) {
942
                if (old->shared_count < old->shared_max) {
835
                        /* perform an in-place update */
943
                        /* perform an in-place update */
836
                        kfree(obj->staged);
944
                        kfree(obj->staged);
837
                        obj->staged = NULL;
945
                        obj->staged = NULL;
838
                        return 0;
946
                        return 0;
839
                } else
947
                } else
840
                        max = old->shared_max * 2;
948
                        max = old->shared_max * 2;
841
        } else
949
        } else
842
                max = 4;
950
                max = 4;
843
 
951
 
844
        /*
952
        /*
845
         * resize obj->staged or allocate if it doesn't exist,
953
         * resize obj->staged or allocate if it doesn't exist,
846
         * noop if already correct size
954
         * noop if already correct size
847
         */
955
         */
848
        fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
956
        fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
849
                        GFP_KERNEL);
957
                        GFP_KERNEL);
850
        if (!fobj)
958
        if (!fobj)
851
                return -ENOMEM;
959
                return -ENOMEM;
852
 
960
 
853
        obj->staged = fobj;
961
        obj->staged = fobj;
854
        fobj->shared_max = max;
962
        fobj->shared_max = max;
855
        return 0;
963
        return 0;
856
}
964
}
857
EXPORT_SYMBOL(reservation_object_reserve_shared);
965
EXPORT_SYMBOL(reservation_object_reserve_shared);
858
 
966
 
859
void reservation_object_add_shared_fence(struct reservation_object *obj,
967
void reservation_object_add_shared_fence(struct reservation_object *obj,
860
                                         struct fence *fence)
968
                                         struct fence *fence)
861
{
969
{
862
        struct reservation_object_list *old, *fobj = obj->staged;
970
        struct reservation_object_list *old, *fobj = obj->staged;
863
 
971
 
864
        old = reservation_object_get_list(obj);
972
        old = reservation_object_get_list(obj);
865
        obj->staged = NULL;
973
        obj->staged = NULL;
866
 
974
 
867
        if (!fobj) {
975
        if (!fobj) {
868
                BUG_ON(old->shared_count >= old->shared_max);
976
                BUG_ON(old->shared_count >= old->shared_max);
869
                reservation_object_add_shared_inplace(obj, old, fence);
977
                reservation_object_add_shared_inplace(obj, old, fence);
870
        } else
978
        } else
871
                reservation_object_add_shared_replace(obj, old, fobj, fence);
979
                reservation_object_add_shared_replace(obj, old, fobj, fence);
872
}
980
}
873
EXPORT_SYMBOL(reservation_object_add_shared_fence);
981
EXPORT_SYMBOL(reservation_object_add_shared_fence);
874
 
982
 
875
 
983
 
876
void reservation_object_add_excl_fence(struct reservation_object *obj,
984
void reservation_object_add_excl_fence(struct reservation_object *obj,
877
                                       struct fence *fence)
985
                                       struct fence *fence)
878
{
986
{
879
        struct fence *old_fence = reservation_object_get_excl(obj);
987
        struct fence *old_fence = reservation_object_get_excl(obj);
880
        struct reservation_object_list *old;
988
        struct reservation_object_list *old;
881
        u32 i = 0;
989
        u32 i = 0;
882
 
990
 
883
        old = reservation_object_get_list(obj);
991
        old = reservation_object_get_list(obj);
884
        if (old)
992
        if (old)
885
                i = old->shared_count;
993
                i = old->shared_count;
886
 
994
 
887
        if (fence)
995
        if (fence)
888
                fence_get(fence);
996
                fence_get(fence);
889
 
997
 
890
//        preempt_disable();
998
//        preempt_disable();
891
        write_seqcount_begin(&obj->seq);
999
        write_seqcount_begin(&obj->seq);
892
        /* write_seqcount_begin provides the necessary memory barrier */
1000
        /* write_seqcount_begin provides the necessary memory barrier */
893
        RCU_INIT_POINTER(obj->fence_excl, fence);
1001
        RCU_INIT_POINTER(obj->fence_excl, fence);
894
        if (old)
1002
        if (old)
895
                old->shared_count = 0;
1003
                old->shared_count = 0;
896
        write_seqcount_end(&obj->seq);
1004
        write_seqcount_end(&obj->seq);
897
//        preempt_enable();
1005
//        preempt_enable();
898
 
1006
 
899
        /* inplace update, no shared fences */
1007
        /* inplace update, no shared fences */
900
        while (i--)
1008
        while (i--)
901
                fence_put(rcu_dereference_protected(old->shared[i],
1009
                fence_put(rcu_dereference_protected(old->shared[i],
902
                                                reservation_object_held(obj)));
1010
                                                reservation_object_held(obj)));
903
 
1011
 
904
        if (old_fence)
1012
        if (old_fence)
905
                fence_put(old_fence);
1013
                fence_put(old_fence);
906
}
1014
}
907
EXPORT_SYMBOL(reservation_object_add_excl_fence);
1015
EXPORT_SYMBOL(reservation_object_add_excl_fence);
908
 
1016
 
909
void
1017
void
910
fence_init(struct fence *fence, const struct fence_ops *ops,
1018
fence_init(struct fence *fence, const struct fence_ops *ops,
911
             spinlock_t *lock, unsigned context, unsigned seqno)
1019
             spinlock_t *lock, unsigned context, unsigned seqno)
912
{
1020
{
913
        BUG_ON(!lock);
1021
        BUG_ON(!lock);
914
        BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
1022
        BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
915
               !ops->get_driver_name || !ops->get_timeline_name);
1023
               !ops->get_driver_name || !ops->get_timeline_name);
916
 
1024
 
917
        kref_init(&fence->refcount);
1025
        kref_init(&fence->refcount);
918
        fence->ops = ops;
1026
        fence->ops = ops;
919
        INIT_LIST_HEAD(&fence->cb_list);
1027
        INIT_LIST_HEAD(&fence->cb_list);
920
        fence->lock = lock;
1028
        fence->lock = lock;
921
        fence->context = context;
1029
        fence->context = context;
922
        fence->seqno = seqno;
1030
        fence->seqno = seqno;
923
        fence->flags = 0UL;
1031
        fence->flags = 0UL;
924
 
1032
 
925
//        trace_fence_init(fence);
1033
//        trace_fence_init(fence);
926
}
1034
}
927
EXPORT_SYMBOL(fence_init);
1035
EXPORT_SYMBOL(fence_init);
928
 
1036
 
929
 
1037
 
930
#include 
1038
#include 
931
 
1039
 
932
struct rcu_ctrlblk {
1040
struct rcu_ctrlblk {
933
        struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
1041
        struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
934
        struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
1042
        struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
935
        struct rcu_head **curtail;      /* ->next pointer of last CB. */
1043
        struct rcu_head **curtail;      /* ->next pointer of last CB. */
936
//        RCU_TRACE(long qlen);           /* Number of pending CBs. */
1044
//        RCU_TRACE(long qlen);           /* Number of pending CBs. */
937
//        RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
1045
//        RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
938
//        RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
1046
//        RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
939
//        RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
1047
//        RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
940
//        RCU_TRACE(const char *name);    /* Name of RCU type. */
1048
//        RCU_TRACE(const char *name);    /* Name of RCU type. */
941
};
1049
};
942
 
1050
 
943
/* Definition for rcupdate control block. */
1051
/* Definition for rcupdate control block. */
944
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
1052
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
945
        .donetail       = &rcu_sched_ctrlblk.rcucblist,
1053
        .donetail       = &rcu_sched_ctrlblk.rcucblist,
946
        .curtail        = &rcu_sched_ctrlblk.rcucblist,
1054
        .curtail        = &rcu_sched_ctrlblk.rcucblist,
947
//        RCU_TRACE(.name = "rcu_sched")
1055
//        RCU_TRACE(.name = "rcu_sched")
948
};
1056
};
949
 
1057
 
950
static void __call_rcu(struct rcu_head *head,
1058
static void __call_rcu(struct rcu_head *head,
951
                       void (*func)(struct rcu_head *rcu),
1059
                       void (*func)(struct rcu_head *rcu),
952
                       struct rcu_ctrlblk *rcp)
1060
                       struct rcu_ctrlblk *rcp)
953
{
1061
{
954
        unsigned long flags;
1062
        unsigned long flags;
955
 
1063
 
956
//        debug_rcu_head_queue(head);
1064
//        debug_rcu_head_queue(head);
957
        head->func = func;
1065
        head->func = func;
958
        head->next = NULL;
1066
        head->next = NULL;
959
 
1067
 
960
        local_irq_save(flags);
1068
        local_irq_save(flags);
961
        *rcp->curtail = head;
1069
        *rcp->curtail = head;
962
        rcp->curtail = &head->next;
1070
        rcp->curtail = &head->next;
963
//        RCU_TRACE(rcp->qlen++);
1071
//        RCU_TRACE(rcp->qlen++);
964
        local_irq_restore(flags);
1072
        local_irq_restore(flags);
965
}
1073
}
966
 
1074
 
967
/*
1075
/*
968
 * Post an RCU callback to be invoked after the end of an RCU-sched grace
1076
 * Post an RCU callback to be invoked after the end of an RCU-sched grace
969
 * period.  But since we have but one CPU, that would be after any
1077
 * period.  But since we have but one CPU, that would be after any
970
 * quiescent state.
1078
 * quiescent state.
971
 */
1079
 */
972
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1080
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
973
{
1081
{
974
        __call_rcu(head, func, &rcu_sched_ctrlblk);
1082
        __call_rcu(head, func, &rcu_sched_ctrlblk);
975
}
1083
}
-
 
1084
 
-
 
1085
fb_get_options(const char *name, char **option)
-
 
1086
{
-
 
1087
    return 1;
-
 
1088
 
-
 
1089
}
-
 
1090
 
-
 
1091
ktime_t ktime_get(void)
-
 
1092
{
-
 
1093
    ktime_t t;
-
 
1094
 
-
 
1095
    t.tv64 = GetClockNs();
-
 
1096
 
-
 
1097
    return t;
-
 
1098
}
-
 
1099
 
-
 
1100
void radeon_cursor_reset(struct drm_crtc *crtc)
-
 
1101
{
-
 
1102
 
-
 
1103
}
-
 
1104
 
-
 
1105
/* Greatest common divisor */
-
 
1106
unsigned long gcd(unsigned long a, unsigned long b)
-
 
1107
{
-
 
1108
        unsigned long r;
-
 
1109
 
-
 
1110
        if (a < b)
-
 
1111
                swap(a, b);
-
 
1112
 
-
 
1113
        if (!b)
-
 
1114
                return a;
-
 
1115
        while ((r = a % b) != 0) {
-
 
1116
                a = b;
-
 
1117
                b = r;
-
 
1118
        }
-
 
1119
        return b;
976
 
1120
}
977
>
1121
 
978
 
1122
>
979
>
1123
 
980
 
1124
>
981
>
1125
 
982
 
1126
>
983
>
1127
 
984
>
1128
>
985
>
1129
>
986
>
1130
>
987
>
1131
>
988
#define>
1132
>
-
 
1133
>
-
 
1134
>
-
 
1135
>
-
 
1136
>
-
 
1137
#define>