Subversion Repositories Kolibri OS

Rev

Rev 5271 | Rev 6321 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5078 serge 1
#include 
2
#include 
3
#include 
4
#include 
5271 serge 5
#include "radeon.h"
5078 serge 6
 
7
int x86_clflush_size;
8
unsigned int tsc_khz;
9
 
10
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
11
{
12
    struct file *filep;
13
    int count;
14
 
5271 serge 15
    filep = __builtin_malloc(sizeof(*filep));
5078 serge 16
 
17
    if(unlikely(filep == NULL))
18
        return ERR_PTR(-ENOMEM);
19
 
20
    count = size / PAGE_SIZE;
21
 
22
    filep->pages = kzalloc(sizeof(struct page *) * count, 0);
23
    if(unlikely(filep->pages == NULL))
24
    {
25
        kfree(filep);
26
        return ERR_PTR(-ENOMEM);
27
    };
28
 
29
    filep->count     = count;
30
    filep->allocated = 0;
31
    filep->vma       = NULL;
32
 
33
//    printf("%s file %p pages %p count %d\n",
34
//              __FUNCTION__,filep, filep->pages, count);
35
 
36
    return filep;
37
}
38
 
39
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
40
{
41
        while (bytes) {
42
                if (*start != value)
43
                        return (void *)start;
44
                start++;
45
                bytes--;
46
        }
47
        return NULL;
48
}
49
 
50
/**
51
 * memchr_inv - Find an unmatching character in an area of memory.
52
 * @start: The memory area
53
 * @c: Find a character other than c
54
 * @bytes: The size of the area.
55
 *
56
 * returns the address of the first character other than @c, or %NULL
57
 * if the whole buffer contains just @c.
58
 */
59
void *memchr_inv(const void *start, int c, size_t bytes)
60
{
61
        u8 value = c;
62
        u64 value64;
63
        unsigned int words, prefix;
64
 
65
        if (bytes <= 16)
66
                return check_bytes8(start, value, bytes);
67
 
68
        value64 = value;
69
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
70
        value64 *= 0x0101010101010101;
71
#elif defined(ARCH_HAS_FAST_MULTIPLIER)
72
        value64 *= 0x01010101;
73
        value64 |= value64 << 32;
74
#else
75
        value64 |= value64 << 8;
76
        value64 |= value64 << 16;
77
        value64 |= value64 << 32;
78
#endif
79
 
80
        prefix = (unsigned long)start % 8;
81
        if (prefix) {
82
                u8 *r;
83
 
84
                prefix = 8 - prefix;
85
                r = check_bytes8(start, value, prefix);
86
                if (r)
87
                        return r;
88
                start += prefix;
89
                bytes -= prefix;
90
        }
91
 
92
        words = bytes / 8;
93
 
94
        while (words) {
95
                if (*(u64 *)start != value64)
96
                        return check_bytes8(start, value, 8);
97
                start += 8;
98
                words--;
99
        }
100
 
101
        return check_bytes8(start, value, bytes % 8);
102
}
103
 
104
 
105
 
106
#define _U  0x01    /* upper */
107
#define _L  0x02    /* lower */
108
#define _D  0x04    /* digit */
109
#define _C  0x08    /* cntrl */
110
#define _P  0x10    /* punct */
111
#define _S  0x20    /* white space (space/lf/tab) */
112
#define _X  0x40    /* hex digit */
113
#define _SP 0x80    /* hard space (0x20) */
114
 
115
extern const unsigned char _ctype[];
116
 
117
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
118
 
119
#define isalnum(c)  ((__ismask(c)&(_U|_L|_D)) != 0)
120
#define isalpha(c)  ((__ismask(c)&(_U|_L)) != 0)
121
#define iscntrl(c)  ((__ismask(c)&(_C)) != 0)
122
#define isdigit(c)  ((__ismask(c)&(_D)) != 0)
123
#define isgraph(c)  ((__ismask(c)&(_P|_U|_L|_D)) != 0)
124
#define islower(c)  ((__ismask(c)&(_L)) != 0)
125
#define isprint(c)  ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
126
#define ispunct(c)  ((__ismask(c)&(_P)) != 0)
127
/* Note: isspace() must return false for %NUL-terminator */
128
#define isspace(c)  ((__ismask(c)&(_S)) != 0)
129
#define isupper(c)  ((__ismask(c)&(_U)) != 0)
130
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
131
 
132
#define isascii(c) (((unsigned char)(c))<=0x7f)
133
#define toascii(c) (((unsigned char)(c))&0x7f)
134
 
135
static inline unsigned char __tolower(unsigned char c)
136
{
137
    if (isupper(c))
138
        c -= 'A'-'a';
139
    return c;
140
}
141
 
142
static inline unsigned char __toupper(unsigned char c)
143
{
144
    if (islower(c))
145
        c -= 'a'-'A';
146
    return c;
147
}
148
 
149
#define tolower(c) __tolower(c)
150
#define toupper(c) __toupper(c)
151
 
152
/*
153
 * Fast implementation of tolower() for internal usage. Do not use in your
154
 * code.
155
 */
156
static inline char _tolower(const char c)
157
{
158
    return c | 0x20;
159
}
160
 
161
 
162
//const char hex_asc[] = "0123456789abcdef";
163
 
164
/**
165
 * hex_to_bin - convert a hex digit to its real value
166
 * @ch: ascii character represents hex digit
167
 *
168
 * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
169
 * input.
170
 */
171
int hex_to_bin(char ch)
172
{
173
    if ((ch >= '0') && (ch <= '9'))
174
        return ch - '0';
175
    ch = tolower(ch);
176
    if ((ch >= 'a') && (ch <= 'f'))
177
        return ch - 'a' + 10;
178
    return -1;
179
}
180
EXPORT_SYMBOL(hex_to_bin);
181
 
182
/**
183
 * hex2bin - convert an ascii hexadecimal string to its binary representation
184
 * @dst: binary result
185
 * @src: ascii hexadecimal string
186
 * @count: result length
187
 *
188
 * Return 0 on success, -1 in case of bad input.
189
 */
190
int hex2bin(u8 *dst, const char *src, size_t count)
191
{
192
    while (count--) {
193
        int hi = hex_to_bin(*src++);
194
        int lo = hex_to_bin(*src++);
195
 
196
        if ((hi < 0) || (lo < 0))
197
            return -1;
198
 
199
        *dst++ = (hi << 4) | lo;
200
    }
201
    return 0;
202
}
203
EXPORT_SYMBOL(hex2bin);
204
 
205
/**
206
 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
207
 * @buf: data blob to dump
208
 * @len: number of bytes in the @buf
209
 * @rowsize: number of bytes to print per line; must be 16 or 32
210
 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
211
 * @linebuf: where to put the converted data
212
 * @linebuflen: total size of @linebuf, including space for terminating NUL
213
 * @ascii: include ASCII after the hex output
214
 *
215
 * hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
216
 * 16 or 32 bytes of input data converted to hex + ASCII output.
217
 *
218
 * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
219
 * to a hex + ASCII dump at the supplied memory location.
220
 * The converted output is always NUL-terminated.
221
 *
222
 * E.g.:
223
 *   hex_dump_to_buffer(frame->data, frame->len, 16, 1,
224
 *          linebuf, sizeof(linebuf), true);
225
 *
226
 * example output buffer:
227
 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f  @ABCDEFGHIJKLMNO
228
 */
6104 serge 229
int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
230
               char *linebuf, size_t linebuflen, bool ascii)
5078 serge 231
{
232
    const u8 *ptr = buf;
6104 serge 233
    int ngroups;
5078 serge 234
    u8 ch;
235
    int j, lx = 0;
236
    int ascii_column;
6104 serge 237
    int ret;
5078 serge 238
 
239
    if (rowsize != 16 && rowsize != 32)
240
        rowsize = 16;
241
 
242
    if (len > rowsize)      /* limit to one line at a time */
243
        len = rowsize;
6104 serge 244
    if (!is_power_of_2(groupsize) || groupsize > 8)
245
        groupsize = 1;
5078 serge 246
    if ((len % groupsize) != 0) /* no mixed size output */
247
        groupsize = 1;
248
 
6104 serge 249
    ngroups = len / groupsize;
250
    ascii_column = rowsize * 2 + rowsize / groupsize + 1;
251
 
252
    if (!linebuflen)
253
        goto overflow1;
254
 
255
    if (!len)
256
        goto nil;
257
 
258
    if (groupsize == 8) {
5078 serge 259
        const u64 *ptr8 = buf;
260
 
6104 serge 261
        for (j = 0; j < ngroups; j++) {
262
            ret = snprintf(linebuf + lx, linebuflen - lx,
263
                       "%s%16.16llx", j ? " " : "",
264
                       (unsigned long long)*(ptr8 + j));
265
            if (ret >= linebuflen - lx)
266
                goto overflow1;
267
            lx += ret;
268
        }
269
    } else if (groupsize == 4) {
5078 serge 270
        const u32 *ptr4 = buf;
271
 
6104 serge 272
        for (j = 0; j < ngroups; j++) {
273
            ret = snprintf(linebuf + lx, linebuflen - lx,
274
                       "%s%8.8x", j ? " " : "",
275
                       *(ptr4 + j));
276
            if (ret >= linebuflen - lx)
277
                goto overflow1;
278
            lx += ret;
279
        }
280
    } else if (groupsize == 2) {
5078 serge 281
        const u16 *ptr2 = buf;
282
 
6104 serge 283
        for (j = 0; j < ngroups; j++) {
284
            ret = snprintf(linebuf + lx, linebuflen - lx,
285
                       "%s%4.4x", j ? " " : "",
286
                       *(ptr2 + j));
287
            if (ret >= linebuflen - lx)
288
                goto overflow1;
289
            lx += ret;
290
        }
291
    } else {
292
        for (j = 0; j < len; j++) {
293
            if (linebuflen < lx + 3)
294
                goto overflow2;
5078 serge 295
            ch = ptr[j];
296
            linebuf[lx++] = hex_asc_hi(ch);
297
            linebuf[lx++] = hex_asc_lo(ch);
298
            linebuf[lx++] = ' ';
299
        }
300
        if (j)
301
            lx--;
302
    }
303
    if (!ascii)
304
        goto nil;
305
 
6104 serge 306
    while (lx < ascii_column) {
307
        if (linebuflen < lx + 2)
308
            goto overflow2;
5078 serge 309
        linebuf[lx++] = ' ';
6104 serge 310
    }
311
    for (j = 0; j < len; j++) {
312
        if (linebuflen < lx + 2)
313
            goto overflow2;
5078 serge 314
        ch = ptr[j];
315
        linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
316
    }
317
nil:
6104 serge 318
    linebuf[lx] = '\0';
319
    return lx;
320
overflow2:
5078 serge 321
    linebuf[lx++] = '\0';
6104 serge 322
overflow1:
323
    return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1;
5078 serge 324
}
325
/**
326
 * print_hex_dump - print a text hex dump to syslog for a binary blob of data
327
 * @level: kernel log level (e.g. KERN_DEBUG)
328
 * @prefix_str: string to prefix each line with;
329
 *  caller supplies trailing spaces for alignment if desired
330
 * @prefix_type: controls whether prefix of an offset, address, or none
331
 *  is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
332
 * @rowsize: number of bytes to print per line; must be 16 or 32
333
 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
334
 * @buf: data blob to dump
335
 * @len: number of bytes in the @buf
336
 * @ascii: include ASCII after the hex output
337
 *
338
 * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
339
 * to the kernel log at the specified kernel log level, with an optional
340
 * leading prefix.
341
 *
342
 * print_hex_dump() works on one "line" of output at a time, i.e.,
343
 * 16 or 32 bytes of input data converted to hex + ASCII output.
344
 * print_hex_dump() iterates over the entire input @buf, breaking it into
345
 * "line size" chunks to format and print.
346
 *
347
 * E.g.:
348
 *   print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
349
 *          16, 1, frame->data, frame->len, true);
350
 *
351
 * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
352
 * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f  @ABCDEFGHIJKLMNO
353
 * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
354
 * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c  pqrstuvwxyz{|}~.
355
 */
356
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
357
            int rowsize, int groupsize,
358
            const void *buf, size_t len, bool ascii)
359
{
360
    const u8 *ptr = buf;
361
    int i, linelen, remaining = len;
362
    unsigned char linebuf[32 * 3 + 2 + 32 + 1];
363
 
364
    if (rowsize != 16 && rowsize != 32)
365
        rowsize = 16;
366
 
367
    for (i = 0; i < len; i += rowsize) {
368
        linelen = min(remaining, rowsize);
369
        remaining -= rowsize;
370
 
371
        hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
372
                   linebuf, sizeof(linebuf), ascii);
373
 
374
        switch (prefix_type) {
375
        case DUMP_PREFIX_ADDRESS:
376
            printk("%s%s%p: %s\n",
377
                   level, prefix_str, ptr + i, linebuf);
378
            break;
379
        case DUMP_PREFIX_OFFSET:
380
            printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
381
            break;
382
        default:
383
            printk("%s%s%s\n", level, prefix_str, linebuf);
384
            break;
385
        }
386
    }
387
}
388
 
389
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
390
                          const void *buf, size_t len)
391
{
392
    print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
393
                       buf, len, true);
394
}
395
 
6104 serge 396
#define KMAP_MAX    256
397
 
398
static struct mutex kmap_mutex;
399
static struct page* kmap_table[KMAP_MAX];
400
static int kmap_av;
401
static int kmap_first;
402
static void* kmap_base;
403
 
404
 
405
int kmap_init()
406
{
407
    kmap_base = AllocKernelSpace(KMAP_MAX*4096);
408
    if(kmap_base == NULL)
409
        return -1;
410
 
411
    kmap_av = KMAP_MAX;
412
    MutexInit(&kmap_mutex);
413
    return 0;
414
};
415
 
416
void *kmap(struct page *page)
417
{
418
    void *vaddr = NULL;
419
    int i;
420
 
421
    do
422
    {
423
        MutexLock(&kmap_mutex);
424
        if(kmap_av != 0)
425
        {
426
            for(i = kmap_first; i < KMAP_MAX; i++)
427
            {
428
                if(kmap_table[i] == NULL)
429
                {
430
                    kmap_av--;
431
                    kmap_first = i;
432
                    kmap_table[i] = page;
433
                    vaddr = kmap_base + (i<<12);
434
                    MapPage(vaddr,(addr_t)page,3);
435
                    break;
436
                };
437
            };
438
        };
439
        MutexUnlock(&kmap_mutex);
440
    }while(vaddr == NULL);
441
 
442
    return vaddr;
443
};
444
 
445
void *kmap_atomic(struct page *page) __attribute__ ((alias ("kmap")));
446
 
447
void kunmap(struct page *page)
448
{
449
    void *vaddr;
450
    int   i;
451
 
452
    MutexLock(&kmap_mutex);
453
 
454
    for(i = 0; i < KMAP_MAX; i++)
455
    {
456
        if(kmap_table[i] == page)
457
        {
458
            kmap_av++;
459
            if(i < kmap_first)
460
                kmap_first = i;
461
            kmap_table[i] = NULL;
462
            vaddr = kmap_base + (i<<12);
463
            MapPage(vaddr,0,0);
464
            break;
465
        };
466
    };
467
 
468
    MutexUnlock(&kmap_mutex);
469
};
470
 
471
void kunmap_atomic(void *vaddr)
472
{
473
    int i;
474
 
475
    MapPage(vaddr,0,0);
476
 
477
    i = (vaddr - kmap_base) >> 12;
478
 
479
    MutexLock(&kmap_mutex);
480
 
481
    kmap_av++;
482
    if(i < kmap_first)
483
        kmap_first = i;
484
    kmap_table[i] = NULL;
485
 
486
    MutexUnlock(&kmap_mutex);
487
}
5271 serge 488
void msleep(unsigned int msecs)
489
{
490
    msecs /= 10;
491
    if(!msecs) msecs = 1;
5078 serge 492
 
5271 serge 493
     __asm__ __volatile__ (
494
     "call *__imp__Delay"
495
     ::"b" (msecs));
496
     __asm__ __volatile__ (
497
     "":::"ebx");
498
 
499
};
500
 
501
 
502
/* simple loop based delay: */
503
static void delay_loop(unsigned long loops)
5078 serge 504
{
5271 serge 505
        asm volatile(
506
                "       test %0,%0      \n"
507
                "       jz 3f           \n"
508
                "       jmp 1f          \n"
509
 
510
                ".align 16              \n"
511
                "1:     jmp 2f          \n"
512
 
513
                ".align 16              \n"
514
                "2:     dec %0          \n"
515
                "       jnz 2b          \n"
516
                "3:     dec %0          \n"
517
 
518
                : /* we don't need output */
519
                :"a" (loops)
520
        );
5078 serge 521
}
522
 
5271 serge 523
 
524
static void (*delay_fn)(unsigned long) = delay_loop;
525
 
526
void __delay(unsigned long loops)
5078 serge 527
{
5271 serge 528
        delay_fn(loops);
5078 serge 529
}
530
 
5271 serge 531
 
532
inline void __const_udelay(unsigned long xloops)
5078 serge 533
{
5271 serge 534
        int d0;
5078 serge 535
 
5271 serge 536
        xloops *= 4;
537
        asm("mull %%edx"
538
                : "=d" (xloops), "=&a" (d0)
539
                : "1" (xloops), ""
540
                (loops_per_jiffy * (HZ/4)));
5078 serge 541
 
5271 serge 542
        __delay(++xloops);
543
}
5078 serge 544
 
5271 serge 545
void __udelay(unsigned long usecs)
546
{
547
        __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
5078 serge 548
}
549
 
5271 serge 550
unsigned int _sw_hweight32(unsigned int w)
551
{
552
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
553
        w -= (w >> 1) & 0x55555555;
554
        w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
555
        w =  (w + (w >> 4)) & 0x0f0f0f0f;
556
        return (w * 0x01010101) >> 24;
557
#else
558
        unsigned int res = w - ((w >> 1) & 0x55555555);
559
        res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
560
        res = (res + (res >> 4)) & 0x0F0F0F0F;
561
        res = res + (res >> 8);
562
        return (res + (res >> 16)) & 0x000000FF;
563
#endif
564
}
565
EXPORT_SYMBOL(_sw_hweight32);
5078 serge 566
 
567
 
5271 serge 568
void usleep_range(unsigned long min, unsigned long max)
569
{
570
    udelay(max);
571
}
572
EXPORT_SYMBOL(usleep_range);
573
 
574
 
5078 serge 575
void *kmemdup(const void *src, size_t len, gfp_t gfp)
576
{
577
    void *p;
578
 
579
    p = kmalloc(len, gfp);
580
    if (p)
581
        memcpy(p, src, len);
582
    return p;
583
}
584
 
5271 serge 585
void cpu_detect1()
586
{
5078 serge 587
 
5271 serge 588
    u32 junk, tfms, cap0, misc;
589
    int i;
590
 
591
    cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
592
 
593
    if (cap0 & (1<<19))
594
    {
595
        x86_clflush_size = ((misc >> 8) & 0xff) * 8;
596
    }
597
 
598
#if 0
599
    cpuid(0x80000002, (unsigned int*)&cpuinfo.model_name[0], (unsigned int*)&cpuinfo.model_name[4],
600
          (unsigned int*)&cpuinfo.model_name[8], (unsigned int*)&cpuinfo.model_name[12]);
601
    cpuid(0x80000003, (unsigned int*)&cpuinfo.model_name[16], (unsigned int*)&cpuinfo.model_name[20],
602
          (unsigned int*)&cpuinfo.model_name[24], (unsigned int*)&cpuinfo.model_name[28]);
603
    cpuid(0x80000004, (unsigned int*)&cpuinfo.model_name[32], (unsigned int*)&cpuinfo.model_name[36],
604
          (unsigned int*)&cpuinfo.model_name[40], (unsigned int*)&cpuinfo.model_name[44]);
605
 
606
    printf("\n%s\n\n",cpuinfo.model_name);
607
 
608
    cpuinfo.def_mtrr = read_msr(MSR_MTRRdefType);
609
    cpuinfo.mtrr_cap = read_msr(IA32_MTRRCAP);
610
 
611
    printf("MSR_MTRRdefType %016llx\n\n", cpuinfo.def_mtrr);
612
 
613
    cpuinfo.var_mtrr_count = (u8_t)cpuinfo.mtrr_cap;
614
 
615
    for(i = 0; i < cpuinfo.var_mtrr_count; i++)
616
    {
617
        u64_t mtrr_base;
618
        u64_t mtrr_mask;
619
 
620
        cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
621
        cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
622
 
623
        printf("MTRR_%d base: %016llx mask: %016llx\n", i,
624
               cpuinfo.var_mtrr[i].base,
625
               cpuinfo.var_mtrr[i].mask);
626
    };
627
 
628
    unsigned int cr0, cr3, cr4, eflags;
629
 
630
    eflags = safe_cli();
631
 
632
    /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
633
    cr0 = read_cr0() | (1<<30);
634
    write_cr0(cr0);
635
    wbinvd();
636
 
637
    cr4 = read_cr4();
638
    write_cr4(cr4 & ~(1<<7));
639
 
640
    cr3 = read_cr3();
641
    write_cr3(cr3);
642
 
643
    /* Save MTRR state */
644
    rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
645
 
646
    /* Disable MTRRs, and set the default type to uncached */
647
    native_write_msr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
648
    wbinvd();
649
 
650
    i = 0;
651
    set_mtrr(i++,0,0x80000000>>12,MTRR_WB);
652
    set_mtrr(i++,0x80000000>>12,0x40000000>>12,MTRR_WB);
653
    set_mtrr(i++,0xC0000000>>12,0x20000000>>12,MTRR_WB);
654
    set_mtrr(i++,0xdb800000>>12,0x00800000>>12,MTRR_UC);
655
    set_mtrr(i++,0xdc000000>>12,0x04000000>>12,MTRR_UC);
656
    set_mtrr(i++,0xE0000000>>12,0x10000000>>12,MTRR_WC);
657
 
658
    for(; i < cpuinfo.var_mtrr_count; i++)
659
        set_mtrr(i,0,0,0);
660
 
661
    write_cr3(cr3);
662
 
663
    /* Intel (P6) standard MTRRs */
664
    native_write_msr(MSR_MTRRdefType, deftype_lo, deftype_hi);
665
 
666
    /* Enable caches */
667
    write_cr0(read_cr0() & ~(1<<30));
668
 
669
    /* Restore value of CR4 */
670
    write_cr4(cr4);
671
 
672
    safe_sti(eflags);
673
 
674
    printf("\nnew MTRR map\n\n");
675
 
676
    for(i = 0; i < cpuinfo.var_mtrr_count; i++)
677
    {
678
        u64_t mtrr_base;
679
        u64_t mtrr_mask;
680
 
681
        cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
682
        cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
683
 
684
        printf("MTRR_%d base: %016llx mask: %016llx\n", i,
685
               cpuinfo.var_mtrr[i].base,
686
               cpuinfo.var_mtrr[i].mask);
687
    };
688
#endif
689
 
690
    tsc_khz = (unsigned int)(GetCpuFreq()/1000);
691
}
692
 
693
 
694
static atomic_t fence_context_counter = ATOMIC_INIT(0);
695
 
696
/**
697
 * fence_context_alloc - allocate an array of fence contexts
698
 * @num:        [in]    amount of contexts to allocate
699
 *
700
 * This function will return the first index of the number of fences allocated.
701
 * The fence context is used for setting fence->context to a unique number.
702
 */
703
unsigned fence_context_alloc(unsigned num)
5078 serge 704
{
5271 serge 705
        BUG_ON(!num);
706
        return atomic_add_return(num, &fence_context_counter) - num;
707
}
708
EXPORT_SYMBOL(fence_context_alloc);
5078 serge 709
 
5271 serge 710
 
711
int fence_signal(struct fence *fence)
712
{
713
        unsigned long flags;
714
 
715
        if (!fence)
716
                return -EINVAL;
717
 
718
//        if (!ktime_to_ns(fence->timestamp)) {
719
//                fence->timestamp = ktime_get();
720
//                smp_mb__before_atomic();
721
//        }
722
 
723
        if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
724
                return -EINVAL;
725
 
726
//        trace_fence_signaled(fence);
727
 
728
        if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
729
                struct fence_cb *cur, *tmp;
730
 
731
                spin_lock_irqsave(fence->lock, flags);
732
                list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
733
                        list_del_init(&cur->node);
734
                        cur->func(fence, cur);
735
                }
736
                spin_unlock_irqrestore(fence->lock, flags);
5078 serge 737
        }
5271 serge 738
        return 0;
739
}
740
EXPORT_SYMBOL(fence_signal);
5078 serge 741
 
5271 serge 742
int fence_signal_locked(struct fence *fence)
743
{
744
        struct fence_cb *cur, *tmp;
745
        int ret = 0;
746
 
747
        if (WARN_ON(!fence))
748
                return -EINVAL;
749
 
750
//        if (!ktime_to_ns(fence->timestamp)) {
751
//                fence->timestamp = ktime_get();
752
//                smp_mb__before_atomic();
753
//        }
754
 
755
        if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
756
                ret = -EINVAL;
757
 
758
                /*
759
                 * we might have raced with the unlocked fence_signal,
760
                 * still run through all callbacks
761
                 */
762
        }// else
763
//                trace_fence_signaled(fence);
764
 
765
        list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
766
                list_del_init(&cur->node);
767
                cur->func(fence, cur);
768
        }
769
        return ret;
5078 serge 770
}
5271 serge 771
EXPORT_SYMBOL(fence_signal_locked);
5078 serge 772
 
5271 serge 773
 
774
void fence_enable_sw_signaling(struct fence *fence)
775
{
776
        unsigned long flags;
777
 
778
        if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
779
            !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
780
//                trace_fence_enable_signal(fence);
781
 
782
                spin_lock_irqsave(fence->lock, flags);
783
 
784
                if (!fence->ops->enable_signaling(fence))
785
                        fence_signal_locked(fence);
786
 
787
                spin_unlock_irqrestore(fence->lock, flags);
788
        }
789
}
790
EXPORT_SYMBOL(fence_enable_sw_signaling);
791
 
792
 
793
 
794
signed long
795
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
796
{
797
        signed long ret;
798
 
799
        if (WARN_ON(timeout < 0))
800
                return -EINVAL;
801
 
802
//        trace_fence_wait_start(fence);
803
        ret = fence->ops->wait(fence, intr, timeout);
804
//        trace_fence_wait_end(fence);
805
        return ret;
806
}
807
EXPORT_SYMBOL(fence_wait_timeout);
808
 
809
void fence_release(struct kref *kref)
810
{
811
        struct fence *fence =
812
                        container_of(kref, struct fence, refcount);
813
 
814
//        trace_fence_destroy(fence);
815
 
816
        BUG_ON(!list_empty(&fence->cb_list));
817
 
818
        if (fence->ops->release)
819
                fence->ops->release(fence);
820
        else
821
                fence_free(fence);
822
}
823
EXPORT_SYMBOL(fence_release);
824
 
825
void fence_free(struct fence *fence)
826
{
827
        kfree_rcu(fence, rcu);
828
}
829
EXPORT_SYMBOL(fence_free);
830
 
831
 
832
reservation_object_add_shared_inplace(struct reservation_object *obj,
833
                                      struct reservation_object_list *fobj,
834
                                      struct fence *fence)
835
{
836
        u32 i;
837
 
838
        fence_get(fence);
839
 
840
//        preempt_disable();
841
        write_seqcount_begin(&obj->seq);
842
 
843
        for (i = 0; i < fobj->shared_count; ++i) {
844
                struct fence *old_fence;
845
 
846
                old_fence = rcu_dereference_protected(fobj->shared[i],
847
                                                reservation_object_held(obj));
848
 
849
                if (old_fence->context == fence->context) {
850
                        /* memory barrier is added by write_seqcount_begin */
851
                        RCU_INIT_POINTER(fobj->shared[i], fence);
852
                        write_seqcount_end(&obj->seq);
853
                        preempt_enable();
854
 
855
                        fence_put(old_fence);
856
                        return;
857
                }
858
        }
859
 
860
        /*
861
         * memory barrier is added by write_seqcount_begin,
862
         * fobj->shared_count is protected by this lock too
863
         */
864
        RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
865
        fobj->shared_count++;
866
 
867
        write_seqcount_end(&obj->seq);
868
//        preempt_enable();
869
}
870
 
871
 
872
 
873
static void
874
reservation_object_add_shared_replace(struct reservation_object *obj,
875
                                      struct reservation_object_list *old,
876
                                      struct reservation_object_list *fobj,
877
                                      struct fence *fence)
878
{
879
        unsigned i;
880
        struct fence *old_fence = NULL;
881
 
882
        fence_get(fence);
883
 
884
        if (!old) {
885
                RCU_INIT_POINTER(fobj->shared[0], fence);
886
                fobj->shared_count = 1;
887
                goto done;
888
        }
889
 
890
        /*
891
         * no need to bump fence refcounts, rcu_read access
892
         * requires the use of kref_get_unless_zero, and the
893
         * references from the old struct are carried over to
894
         * the new.
895
         */
896
        fobj->shared_count = old->shared_count;
897
 
898
        for (i = 0; i < old->shared_count; ++i) {
899
                struct fence *check;
900
 
901
                check = rcu_dereference_protected(old->shared[i],
902
                                                reservation_object_held(obj));
903
 
904
                if (!old_fence && check->context == fence->context) {
905
                        old_fence = check;
906
                        RCU_INIT_POINTER(fobj->shared[i], fence);
907
                } else
908
                        RCU_INIT_POINTER(fobj->shared[i], check);
909
        }
910
        if (!old_fence) {
911
                RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
912
                fobj->shared_count++;
913
        }
914
 
915
done:
916
//        preempt_disable();
917
        write_seqcount_begin(&obj->seq);
918
        /*
919
         * RCU_INIT_POINTER can be used here,
920
         * seqcount provides the necessary barriers
921
         */
922
        RCU_INIT_POINTER(obj->fence, fobj);
923
        write_seqcount_end(&obj->seq);
924
//        preempt_enable();
925
 
926
        if (old)
927
                kfree_rcu(old, rcu);
928
 
929
        if (old_fence)
930
                fence_put(old_fence);
931
}
932
 
933
 
934
int reservation_object_reserve_shared(struct reservation_object *obj)
935
{
936
        struct reservation_object_list *fobj, *old;
937
        u32 max;
938
 
939
        old = reservation_object_get_list(obj);
940
 
941
        if (old && old->shared_max) {
942
                if (old->shared_count < old->shared_max) {
943
                        /* perform an in-place update */
944
                        kfree(obj->staged);
945
                        obj->staged = NULL;
946
                        return 0;
947
                } else
948
                        max = old->shared_max * 2;
949
        } else
950
                max = 4;
951
 
952
        /*
953
         * resize obj->staged or allocate if it doesn't exist,
954
         * noop if already correct size
955
         */
956
        fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
957
                        GFP_KERNEL);
958
        if (!fobj)
959
                return -ENOMEM;
960
 
961
        obj->staged = fobj;
962
        fobj->shared_max = max;
963
        return 0;
964
}
965
EXPORT_SYMBOL(reservation_object_reserve_shared);
966
 
967
void reservation_object_add_shared_fence(struct reservation_object *obj,
968
                                         struct fence *fence)
969
{
970
        struct reservation_object_list *old, *fobj = obj->staged;
971
 
972
        old = reservation_object_get_list(obj);
973
        obj->staged = NULL;
974
 
975
        if (!fobj) {
976
                BUG_ON(old->shared_count >= old->shared_max);
977
                reservation_object_add_shared_inplace(obj, old, fence);
978
        } else
979
                reservation_object_add_shared_replace(obj, old, fobj, fence);
980
}
981
EXPORT_SYMBOL(reservation_object_add_shared_fence);
982
 
983
 
984
void reservation_object_add_excl_fence(struct reservation_object *obj,
985
                                       struct fence *fence)
986
{
987
        struct fence *old_fence = reservation_object_get_excl(obj);
988
        struct reservation_object_list *old;
989
        u32 i = 0;
990
 
991
        old = reservation_object_get_list(obj);
992
        if (old)
993
                i = old->shared_count;
994
 
995
        if (fence)
996
                fence_get(fence);
997
 
998
//        preempt_disable();
999
        write_seqcount_begin(&obj->seq);
1000
        /* write_seqcount_begin provides the necessary memory barrier */
1001
        RCU_INIT_POINTER(obj->fence_excl, fence);
1002
        if (old)
1003
                old->shared_count = 0;
1004
        write_seqcount_end(&obj->seq);
1005
//        preempt_enable();
1006
 
1007
        /* inplace update, no shared fences */
1008
        while (i--)
1009
                fence_put(rcu_dereference_protected(old->shared[i],
1010
                                                reservation_object_held(obj)));
1011
 
1012
        if (old_fence)
1013
                fence_put(old_fence);
1014
}
1015
EXPORT_SYMBOL(reservation_object_add_excl_fence);
1016
 
1017
void
1018
fence_init(struct fence *fence, const struct fence_ops *ops,
1019
             spinlock_t *lock, unsigned context, unsigned seqno)
1020
{
1021
        BUG_ON(!lock);
1022
        BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
1023
               !ops->get_driver_name || !ops->get_timeline_name);
1024
 
1025
        kref_init(&fence->refcount);
1026
        fence->ops = ops;
1027
        INIT_LIST_HEAD(&fence->cb_list);
1028
        fence->lock = lock;
1029
        fence->context = context;
1030
        fence->seqno = seqno;
1031
        fence->flags = 0UL;
1032
 
1033
//        trace_fence_init(fence);
1034
}
1035
EXPORT_SYMBOL(fence_init);
1036
 
1037
 
1038
#include 
1039
 
1040
struct rcu_ctrlblk {
1041
        struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
1042
        struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
1043
        struct rcu_head **curtail;      /* ->next pointer of last CB. */
1044
//        RCU_TRACE(long qlen);           /* Number of pending CBs. */
1045
//        RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
1046
//        RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
1047
//        RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
1048
//        RCU_TRACE(const char *name);    /* Name of RCU type. */
1049
};
1050
 
1051
/* Definition for rcupdate control block. */
1052
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
1053
        .donetail       = &rcu_sched_ctrlblk.rcucblist,
1054
        .curtail        = &rcu_sched_ctrlblk.rcucblist,
1055
//        RCU_TRACE(.name = "rcu_sched")
1056
};
1057
 
1058
static void __call_rcu(struct rcu_head *head,
1059
                       void (*func)(struct rcu_head *rcu),
1060
                       struct rcu_ctrlblk *rcp)
1061
{
1062
        unsigned long flags;
1063
 
1064
//        debug_rcu_head_queue(head);
1065
        head->func = func;
1066
        head->next = NULL;
1067
 
1068
        local_irq_save(flags);
1069
        *rcp->curtail = head;
1070
        rcp->curtail = &head->next;
1071
//        RCU_TRACE(rcp->qlen++);
1072
        local_irq_restore(flags);
1073
}
1074
 
1075
/*
1076
 * Post an RCU callback to be invoked after the end of an RCU-sched grace
1077
 * period.  But since we have but one CPU, that would be after any
1078
 * quiescent state.
1079
 */
1080
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1081
{
1082
        __call_rcu(head, func, &rcu_sched_ctrlblk);
1083
}
1084
 
6104 serge 1085
fb_get_options(const char *name, char **option)
1086
{
1087
    return 1;
5271 serge 1088
 
6104 serge 1089
}
1090
 
1091
ktime_t ktime_get(void)
1092
{
1093
    ktime_t t;
1094
 
1095
    t.tv64 = GetClockNs();
1096
 
1097
    return t;
1098
}
1099
 
1100
void radeon_cursor_reset(struct drm_crtc *crtc)
1101
{
1102
 
1103
}
1104
 
1105
/* Greatest common divisor */
1106
unsigned long gcd(unsigned long a, unsigned long b)
1107
{
1108
        unsigned long r;
1109
 
1110
        if (a < b)
1111
                swap(a, b);
1112
 
1113
        if (!b)
1114
                return a;
1115
        while ((r = a % b) != 0) {
1116
                a = b;
1117
                b = r;
1118
        }
1119
        return b;
1120
}
1121