Subversion Repositories Kolibri OS

Rev

Rev 5354 | Rev 6088 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3263 Serge 1
#include 
2
#include 
3260 Serge 3
#include 
4
#include 
5
#include "i915_drv.h"
6
#include "intel_drv.h"
3480 Serge 7
#include 
6084 serge 8
#include 
9
#include 
3260 Serge 10
 
11
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
12
{
13
    struct file *filep;
14
    int count;
15
 
5354 serge 16
    filep = __builtin_malloc(sizeof(*filep));
3260 Serge 17
 
18
    if(unlikely(filep == NULL))
19
        return ERR_PTR(-ENOMEM);
20
 
21
    count = size / PAGE_SIZE;
22
 
23
    filep->pages = kzalloc(sizeof(struct page *) * count, 0);
24
    if(unlikely(filep->pages == NULL))
25
    {
26
        kfree(filep);
27
        return ERR_PTR(-ENOMEM);
28
    };
29
 
30
    filep->count     = count;
31
    filep->allocated = 0;
32
    filep->vma       = NULL;
33
 
3298 Serge 34
//    printf("%s file %p pages %p count %d\n",
35
//              __FUNCTION__,filep, filep->pages, count);
3260 Serge 36
 
37
    return filep;
38
}
39
 
40
struct page *shmem_read_mapping_page_gfp(struct file *filep,
41
                                         pgoff_t index, gfp_t gfp)
42
{
43
    struct page *page;
44
 
45
    if(unlikely(index >= filep->count))
46
        return ERR_PTR(-EINVAL);
47
 
48
    page = filep->pages[index];
49
 
50
    if(unlikely(page == NULL))
51
    {
52
        page = (struct page *)AllocPage();
53
 
54
        if(unlikely(page == NULL))
55
            return ERR_PTR(-ENOMEM);
56
 
57
        filep->pages[index] = page;
4246 Serge 58
//        printf("file %p index %d page %x\n", filep, index, page);
59
//        delay(1);
60
 
3260 Serge 61
    };
62
 
63
    return page;
64
};
3263 Serge 65
 
66
unsigned long vm_mmap(struct file *file, unsigned long addr,
67
         unsigned long len, unsigned long prot,
68
         unsigned long flag, unsigned long offset)
69
{
70
    char *mem, *ptr;
71
    int i;
72
 
73
    if (unlikely(offset + PAGE_ALIGN(len) < offset))
74
        return -EINVAL;
75
    if (unlikely(offset & ~PAGE_MASK))
76
        return -EINVAL;
77
 
78
    mem = UserAlloc(len);
79
    if(unlikely(mem == NULL))
80
        return -ENOMEM;
81
 
82
    for(i = offset, ptr = mem; i < offset+len; i+= 4096, ptr+= 4096)
83
    {
84
        struct page *page;
85
 
86
        page = shmem_read_mapping_page_gfp(file, i/PAGE_SIZE,0);
87
 
88
        if (unlikely(IS_ERR(page)))
89
            goto err;
90
 
91
        MapPage(ptr, (addr_t)page, PG_SHARED|PG_UW);
92
    }
93
 
94
    return (unsigned long)mem;
95
err:
96
    UserFree(mem);
97
    return -ENOMEM;
98
};
99
 
3290 Serge 100
void shmem_file_delete(struct file *filep)
101
{
3298 Serge 102
//    printf("%s file %p pages %p count %d\n",
103
//            __FUNCTION__, filep, filep->pages, filep->count);
3263 Serge 104
 
3290 Serge 105
    if(filep->pages)
106
        kfree(filep->pages);
107
}
3480 Serge 108
 
109
 
110
 
111
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
112
{
113
        while (bytes) {
114
                if (*start != value)
115
                        return (void *)start;
116
                start++;
117
                bytes--;
118
        }
119
        return NULL;
120
}
121
 
122
/**
123
 * memchr_inv - Find an unmatching character in an area of memory.
124
 * @start: The memory area
125
 * @c: Find a character other than c
126
 * @bytes: The size of the area.
127
 *
128
 * returns the address of the first character other than @c, or %NULL
129
 * if the whole buffer contains just @c.
130
 */
131
void *memchr_inv(const void *start, int c, size_t bytes)
132
{
133
        u8 value = c;
134
        u64 value64;
135
        unsigned int words, prefix;
136
 
137
        if (bytes <= 16)
138
                return check_bytes8(start, value, bytes);
139
 
140
        value64 = value;
141
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
142
        value64 *= 0x0101010101010101;
143
#elif defined(ARCH_HAS_FAST_MULTIPLIER)
144
        value64 *= 0x01010101;
145
        value64 |= value64 << 32;
146
#else
147
        value64 |= value64 << 8;
148
        value64 |= value64 << 16;
149
        value64 |= value64 << 32;
150
#endif
151
 
152
        prefix = (unsigned long)start % 8;
153
        if (prefix) {
154
                u8 *r;
155
 
156
                prefix = 8 - prefix;
157
                r = check_bytes8(start, value, prefix);
158
                if (r)
159
                        return r;
160
                start += prefix;
161
                bytes -= prefix;
162
        }
163
 
164
        words = bytes / 8;
165
 
166
        while (words) {
167
                if (*(u64 *)start != value64)
168
                        return check_bytes8(start, value, 8);
169
                start += 8;
170
                words--;
171
        }
172
 
173
        return check_bytes8(start, value, bytes % 8);
174
}
175
 
176
 
177
 
178
int dma_map_sg(struct device *dev, struct scatterlist *sglist,
179
                           int nelems, int dir)
180
{
181
    struct scatterlist *s;
182
    int i;
183
 
184
    for_each_sg(sglist, s, nelems, i) {
185
        s->dma_address = (dma_addr_t)sg_phys(s);
186
#ifdef CONFIG_NEED_SG_DMA_LENGTH
187
        s->dma_length  = s->length;
188
#endif
189
    }
190
 
191
    return nelems;
192
}
193
 
194
 
195
 
196
#define _U  0x01    /* upper */
197
#define _L  0x02    /* lower */
198
#define _D  0x04    /* digit */
199
#define _C  0x08    /* cntrl */
200
#define _P  0x10    /* punct */
201
#define _S  0x20    /* white space (space/lf/tab) */
202
#define _X  0x40    /* hex digit */
203
#define _SP 0x80    /* hard space (0x20) */
204
 
205
extern const unsigned char _ctype[];
206
 
207
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
208
 
209
#define isalnum(c)  ((__ismask(c)&(_U|_L|_D)) != 0)
210
#define isalpha(c)  ((__ismask(c)&(_U|_L)) != 0)
211
#define iscntrl(c)  ((__ismask(c)&(_C)) != 0)
212
#define isdigit(c)  ((__ismask(c)&(_D)) != 0)
213
#define isgraph(c)  ((__ismask(c)&(_P|_U|_L|_D)) != 0)
214
#define islower(c)  ((__ismask(c)&(_L)) != 0)
215
#define isprint(c)  ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
216
#define ispunct(c)  ((__ismask(c)&(_P)) != 0)
217
/* Note: isspace() must return false for %NUL-terminator */
218
#define isspace(c)  ((__ismask(c)&(_S)) != 0)
219
#define isupper(c)  ((__ismask(c)&(_U)) != 0)
220
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
221
 
222
#define isascii(c) (((unsigned char)(c))<=0x7f)
223
#define toascii(c) (((unsigned char)(c))&0x7f)
224
 
225
static inline unsigned char __tolower(unsigned char c)
226
{
227
    if (isupper(c))
228
        c -= 'A'-'a';
229
    return c;
230
}
231
 
232
static inline unsigned char __toupper(unsigned char c)
233
{
234
    if (islower(c))
235
        c -= 'a'-'A';
236
    return c;
237
}
238
 
239
#define tolower(c) __tolower(c)
240
#define toupper(c) __toupper(c)
241
 
242
/*
243
 * Fast implementation of tolower() for internal usage. Do not use in your
244
 * code.
245
 */
246
static inline char _tolower(const char c)
247
{
248
    return c | 0x20;
249
}
250
 
251
 
252
//const char hex_asc[] = "0123456789abcdef";
253
 
254
/**
255
 * hex_to_bin - convert a hex digit to its real value
256
 * @ch: ascii character represents hex digit
257
 *
258
 * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
259
 * input.
260
 */
261
int hex_to_bin(char ch)
262
{
263
    if ((ch >= '0') && (ch <= '9'))
264
        return ch - '0';
265
    ch = tolower(ch);
266
    if ((ch >= 'a') && (ch <= 'f'))
267
        return ch - 'a' + 10;
268
    return -1;
269
}
270
EXPORT_SYMBOL(hex_to_bin);
271
 
272
/**
273
 * hex2bin - convert an ascii hexadecimal string to its binary representation
274
 * @dst: binary result
275
 * @src: ascii hexadecimal string
276
 * @count: result length
277
 *
278
 * Return 0 on success, -1 in case of bad input.
279
 */
280
int hex2bin(u8 *dst, const char *src, size_t count)
281
{
282
    while (count--) {
283
        int hi = hex_to_bin(*src++);
284
        int lo = hex_to_bin(*src++);
285
 
286
        if ((hi < 0) || (lo < 0))
287
            return -1;
288
 
289
        *dst++ = (hi << 4) | lo;
290
    }
291
    return 0;
292
}
293
EXPORT_SYMBOL(hex2bin);
294
 
295
/**
296
 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
297
 * @buf: data blob to dump
298
 * @len: number of bytes in the @buf
299
 * @rowsize: number of bytes to print per line; must be 16 or 32
300
 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
301
 * @linebuf: where to put the converted data
302
 * @linebuflen: total size of @linebuf, including space for terminating NUL
303
 * @ascii: include ASCII after the hex output
304
 *
305
 * hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
306
 * 16 or 32 bytes of input data converted to hex + ASCII output.
307
 *
308
 * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
309
 * to a hex + ASCII dump at the supplied memory location.
310
 * The converted output is always NUL-terminated.
311
 *
312
 * E.g.:
313
 *   hex_dump_to_buffer(frame->data, frame->len, 16, 1,
314
 *          linebuf, sizeof(linebuf), true);
315
 *
316
 * example output buffer:
317
 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f  @ABCDEFGHIJKLMNO
318
 */
6084 serge 319
int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
320
               char *linebuf, size_t linebuflen, bool ascii)
3480 Serge 321
{
322
    const u8 *ptr = buf;
6084 serge 323
    int ngroups;
3480 Serge 324
    u8 ch;
325
    int j, lx = 0;
326
    int ascii_column;
6084 serge 327
    int ret;
3480 Serge 328
 
329
    if (rowsize != 16 && rowsize != 32)
330
        rowsize = 16;
331
 
332
    if (len > rowsize)      /* limit to one line at a time */
333
        len = rowsize;
6084 serge 334
    if (!is_power_of_2(groupsize) || groupsize > 8)
335
        groupsize = 1;
3480 Serge 336
    if ((len % groupsize) != 0) /* no mixed size output */
337
        groupsize = 1;
338
 
6084 serge 339
    ngroups = len / groupsize;
340
    ascii_column = rowsize * 2 + rowsize / groupsize + 1;
341
 
342
    if (!linebuflen)
343
        goto overflow1;
344
 
345
    if (!len)
346
        goto nil;
347
 
348
    if (groupsize == 8) {
3480 Serge 349
        const u64 *ptr8 = buf;
350
 
6084 serge 351
        for (j = 0; j < ngroups; j++) {
352
            ret = snprintf(linebuf + lx, linebuflen - lx,
353
                       "%s%16.16llx", j ? " " : "",
354
                       (unsigned long long)*(ptr8 + j));
355
            if (ret >= linebuflen - lx)
356
                goto overflow1;
357
            lx += ret;
358
        }
359
    } else if (groupsize == 4) {
3480 Serge 360
        const u32 *ptr4 = buf;
361
 
6084 serge 362
        for (j = 0; j < ngroups; j++) {
363
            ret = snprintf(linebuf + lx, linebuflen - lx,
364
                       "%s%8.8x", j ? " " : "",
365
                       *(ptr4 + j));
366
            if (ret >= linebuflen - lx)
367
                goto overflow1;
368
            lx += ret;
369
        }
370
    } else if (groupsize == 2) {
3480 Serge 371
        const u16 *ptr2 = buf;
372
 
6084 serge 373
        for (j = 0; j < ngroups; j++) {
374
            ret = snprintf(linebuf + lx, linebuflen - lx,
375
                       "%s%4.4x", j ? " " : "",
376
                       *(ptr2 + j));
377
            if (ret >= linebuflen - lx)
378
                goto overflow1;
379
            lx += ret;
380
        }
381
    } else {
382
        for (j = 0; j < len; j++) {
383
            if (linebuflen < lx + 3)
384
                goto overflow2;
3480 Serge 385
            ch = ptr[j];
386
            linebuf[lx++] = hex_asc_hi(ch);
387
            linebuf[lx++] = hex_asc_lo(ch);
388
            linebuf[lx++] = ' ';
389
        }
390
        if (j)
391
            lx--;
392
    }
393
    if (!ascii)
394
        goto nil;
395
 
6084 serge 396
    while (lx < ascii_column) {
397
        if (linebuflen < lx + 2)
398
            goto overflow2;
3480 Serge 399
        linebuf[lx++] = ' ';
6084 serge 400
    }
401
    for (j = 0; j < len; j++) {
402
        if (linebuflen < lx + 2)
403
            goto overflow2;
3480 Serge 404
        ch = ptr[j];
405
        linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
406
    }
407
nil:
6084 serge 408
    linebuf[lx] = '\0';
409
    return lx;
410
overflow2:
3480 Serge 411
    linebuf[lx++] = '\0';
6084 serge 412
overflow1:
413
    return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1;
3480 Serge 414
}
415
/**
416
 * print_hex_dump - print a text hex dump to syslog for a binary blob of data
417
 * @level: kernel log level (e.g. KERN_DEBUG)
418
 * @prefix_str: string to prefix each line with;
419
 *  caller supplies trailing spaces for alignment if desired
420
 * @prefix_type: controls whether prefix of an offset, address, or none
421
 *  is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
422
 * @rowsize: number of bytes to print per line; must be 16 or 32
423
 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
424
 * @buf: data blob to dump
425
 * @len: number of bytes in the @buf
426
 * @ascii: include ASCII after the hex output
427
 *
428
 * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
429
 * to the kernel log at the specified kernel log level, with an optional
430
 * leading prefix.
431
 *
432
 * print_hex_dump() works on one "line" of output at a time, i.e.,
433
 * 16 or 32 bytes of input data converted to hex + ASCII output.
434
 * print_hex_dump() iterates over the entire input @buf, breaking it into
435
 * "line size" chunks to format and print.
436
 *
437
 * E.g.:
438
 *   print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
439
 *          16, 1, frame->data, frame->len, true);
440
 *
441
 * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
442
 * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f  @ABCDEFGHIJKLMNO
443
 * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
444
 * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c  pqrstuvwxyz{|}~.
445
 */
446
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
447
            int rowsize, int groupsize,
448
            const void *buf, size_t len, bool ascii)
449
{
450
    const u8 *ptr = buf;
451
    int i, linelen, remaining = len;
452
    unsigned char linebuf[32 * 3 + 2 + 32 + 1];
453
 
454
    if (rowsize != 16 && rowsize != 32)
455
        rowsize = 16;
456
 
457
    for (i = 0; i < len; i += rowsize) {
458
        linelen = min(remaining, rowsize);
459
        remaining -= rowsize;
460
 
461
        hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
462
                   linebuf, sizeof(linebuf), ascii);
463
 
464
        switch (prefix_type) {
465
        case DUMP_PREFIX_ADDRESS:
466
            printk("%s%s%p: %s\n",
467
                   level, prefix_str, ptr + i, linebuf);
468
            break;
469
        case DUMP_PREFIX_OFFSET:
470
            printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
471
            break;
472
        default:
473
            printk("%s%s%s\n", level, prefix_str, linebuf);
474
            break;
475
        }
476
    }
477
}
478
 
479
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
480
                          const void *buf, size_t len)
481
{
482
    print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
483
                       buf, len, true);
484
}
485
 
4104 Serge 486
void *kmemdup(const void *src, size_t len, gfp_t gfp)
487
{
488
    void *p;
3480 Serge 489
 
4104 Serge 490
    p = kmalloc(len, gfp);
491
    if (p)
492
        memcpy(p, src, len);
493
    return p;
494
}
495
 
496
 
5354 serge 497
#define KMAP_MAX    256
5060 serge 498
 
5354 serge 499
static struct mutex kmap_mutex;
500
static struct page* kmap_table[KMAP_MAX];
501
static int kmap_av;
502
static int kmap_first;
503
static void* kmap_base;
504
 
505
 
506
int kmap_init()
507
{
508
    kmap_base = AllocKernelSpace(KMAP_MAX*4096);
509
    if(kmap_base == NULL)
510
        return -1;
511
 
512
    kmap_av = KMAP_MAX;
513
    MutexInit(&kmap_mutex);
514
    return 0;
515
};
516
 
5060 serge 517
void *kmap(struct page *page)
518
{
5354 serge 519
    void *vaddr = NULL;
520
    int i;
521
 
522
    do
523
    {
524
        MutexLock(&kmap_mutex);
525
        if(kmap_av != 0)
526
        {
527
            for(i = kmap_first; i < KMAP_MAX; i++)
528
            {
529
                if(kmap_table[i] == NULL)
530
                {
531
                    kmap_av--;
532
                    kmap_first = i;
533
                    kmap_table[i] = page;
534
                    vaddr = kmap_base + (i<<12);
535
                    MapPage(vaddr,(addr_t)page,3);
536
                    break;
537
                };
538
            };
539
        };
540
        MutexUnlock(&kmap_mutex);
541
    }while(vaddr == NULL);
542
 
543
    return vaddr;
544
};
545
 
546
void *kmap_atomic(struct page *page) __attribute__ ((alias ("kmap")));
547
 
548
void kunmap(struct page *page)
549
{
5060 serge 550
    void *vaddr;
5354 serge 551
    int   i;
5060 serge 552
 
5354 serge 553
    MutexLock(&kmap_mutex);
5060 serge 554
 
5354 serge 555
    for(i = 0; i < KMAP_MAX; i++)
556
    {
557
        if(kmap_table[i] == page)
558
        {
559
            kmap_av++;
560
            if(i < kmap_first)
561
                kmap_first = i;
562
            kmap_table[i] = NULL;
563
            vaddr = kmap_base + (i<<12);
564
            MapPage(vaddr,0,0);
565
            break;
566
        };
567
    };
568
 
569
    MutexUnlock(&kmap_mutex);
570
};
571
 
572
void kunmap_atomic(void *vaddr)
573
{
574
    int i;
575
 
576
    MapPage(vaddr,0,0);
577
 
578
    i = (vaddr - kmap_base) >> 12;
579
 
580
    MutexLock(&kmap_mutex);
581
 
582
    kmap_av++;
583
    if(i < kmap_first)
584
        kmap_first = i;
585
    kmap_table[i] = NULL;
586
 
587
    MutexUnlock(&kmap_mutex);
5060 serge 588
}
589
 
5354 serge 590
size_t strlcat(char *dest, const char *src, size_t count)
5060 serge 591
{
5354 serge 592
        size_t dsize = strlen(dest);
593
        size_t len = strlen(src);
594
        size_t res = dsize + len;
5060 serge 595
 
5354 serge 596
        /* This would be a bug */
597
        BUG_ON(dsize >= count);
5060 serge 598
 
5354 serge 599
        dest += dsize;
600
        count -= dsize;
601
        if (len >= count)
602
                len = count-1;
603
        memcpy(dest, src, len);
604
        dest[len] = 0;
605
        return res;
5060 serge 606
}
5354 serge 607
EXPORT_SYMBOL(strlcat);
5060 serge 608
 
5354 serge 609
void msleep(unsigned int msecs)
610
{
611
    msecs /= 10;
612
    if(!msecs) msecs = 1;
613
 
614
     __asm__ __volatile__ (
615
     "call *__imp__Delay"
616
     ::"b" (msecs));
617
     __asm__ __volatile__ (
618
     "":::"ebx");
619
 
620
};
621
 
622
 
623
/* simple loop based delay: */
624
static void delay_loop(unsigned long loops)
625
{
626
        asm volatile(
627
                "       test %0,%0      \n"
628
                "       jz 3f           \n"
629
                "       jmp 1f          \n"
630
 
631
                ".align 16              \n"
632
                "1:     jmp 2f          \n"
633
 
634
                ".align 16              \n"
635
                "2:     dec %0          \n"
636
                "       jnz 2b          \n"
637
                "3:     dec %0          \n"
638
 
639
                : /* we don't need output */
640
                :"a" (loops)
641
        );
642
}
643
 
644
 
645
static void (*delay_fn)(unsigned long) = delay_loop;
646
 
647
void __delay(unsigned long loops)
648
{
649
        delay_fn(loops);
650
}
651
 
652
 
653
inline void __const_udelay(unsigned long xloops)
654
{
655
        int d0;
656
 
657
        xloops *= 4;
658
        asm("mull %%edx"
659
                : "=d" (xloops), "=&a" (d0)
660
                : "1" (xloops), ""
661
                (loops_per_jiffy * (HZ/4)));
662
 
663
        __delay(++xloops);
664
}
665
 
666
void __udelay(unsigned long usecs)
667
{
668
        __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
669
}
670
 
671
unsigned int _sw_hweight32(unsigned int w)
672
{
673
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
674
        w -= (w >> 1) & 0x55555555;
675
        w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
676
        w =  (w + (w >> 4)) & 0x0f0f0f0f;
677
        return (w * 0x01010101) >> 24;
678
#else
679
        unsigned int res = w - ((w >> 1) & 0x55555555);
680
        res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
681
        res = (res + (res >> 4)) & 0x0F0F0F0F;
682
        res = res + (res >> 8);
683
        return (res + (res >> 16)) & 0x000000FF;
684
#endif
685
}
686
EXPORT_SYMBOL(_sw_hweight32);
687
 
688
 
689
void usleep_range(unsigned long min, unsigned long max)
690
{
691
    udelay(max);
692
}
693
EXPORT_SYMBOL(usleep_range);
694
 
695
 
696
static unsigned long round_jiffies_common(unsigned long j, int cpu,
697
                bool force_up)
698
{
699
        int rem;
700
        unsigned long original = j;
701
 
702
        /*
703
         * We don't want all cpus firing their timers at once hitting the
704
         * same lock or cachelines, so we skew each extra cpu with an extra
705
         * 3 jiffies. This 3 jiffies came originally from the mm/ code which
706
         * already did this.
707
         * The skew is done by adding 3*cpunr, then round, then subtract this
708
         * extra offset again.
709
         */
710
        j += cpu * 3;
711
 
712
        rem = j % HZ;
713
 
714
        /*
715
         * If the target jiffie is just after a whole second (which can happen
716
         * due to delays of the timer irq, long irq off times etc etc) then
717
         * we should round down to the whole second, not up. Use 1/4th second
718
         * as cutoff for this rounding as an extreme upper bound for this.
719
         * But never round down if @force_up is set.
720
         */
721
        if (rem < HZ/4 && !force_up) /* round down */
722
                j = j - rem;
723
        else /* round up */
724
                j = j - rem + HZ;
725
 
726
        /* now that we have rounded, subtract the extra skew again */
727
        j -= cpu * 3;
728
 
729
        /*
730
         * Make sure j is still in the future. Otherwise return the
731
         * unmodified value.
732
         */
733
        return time_is_after_jiffies(j) ? j : original;
734
}
735
 
736
 
737
unsigned long round_jiffies_up_relative(unsigned long j, int cpu)
738
{
739
        unsigned long j0 = jiffies;
740
 
741
        /* Use j0 because jiffies might change while we run */
742
        return round_jiffies_common(j + j0, 0, true) - j0;
743
}
744
EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
745
 
746
 
747
#include 
748
 
749
struct rcu_ctrlblk {
750
        struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
751
        struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
752
        struct rcu_head **curtail;      /* ->next pointer of last CB. */
753
//        RCU_TRACE(long qlen);           /* Number of pending CBs. */
754
//        RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
755
//        RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
756
//        RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
757
//        RCU_TRACE(const char *name);    /* Name of RCU type. */
758
};
759
 
760
/* Definition for rcupdate control block. */
761
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
762
        .donetail       = &rcu_sched_ctrlblk.rcucblist,
763
        .curtail        = &rcu_sched_ctrlblk.rcucblist,
764
//        RCU_TRACE(.name = "rcu_sched")
765
};
766
 
767
static void __call_rcu(struct rcu_head *head,
768
                       void (*func)(struct rcu_head *rcu),
769
                       struct rcu_ctrlblk *rcp)
770
{
771
        unsigned long flags;
772
 
773
//        debug_rcu_head_queue(head);
774
        head->func = func;
775
        head->next = NULL;
776
 
777
        local_irq_save(flags);
778
        *rcp->curtail = head;
779
        rcp->curtail = &head->next;
780
//        RCU_TRACE(rcp->qlen++);
781
        local_irq_restore(flags);
782
}
783
 
784
/*
785
 * Post an RCU callback to be invoked after the end of an RCU-sched grace
786
 * period.  But since we have but one CPU, that would be after any
787
 * quiescent state.
788
 */
789
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
790
{
791
        __call_rcu(head, func, &rcu_sched_ctrlblk);
792
}
793
 
6084 serge 794
int seq_puts(struct seq_file *m, const char *s)
795
{
796
    return 0;
797
};
5354 serge 798
 
6084 serge 799
__printf(2, 3) int seq_printf(struct seq_file *m, const char *f, ...)
800
{
801
    return 0;
802
}
5354 serge 803
 
6084 serge 804
 
805
signed long
806
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
807
{
808
        signed long ret;
809
 
810
        if (WARN_ON(timeout < 0))
811
                return -EINVAL;
812
 
813
//        trace_fence_wait_start(fence);
814
        ret = fence->ops->wait(fence, intr, timeout);
815
//        trace_fence_wait_end(fence);
816
        return ret;
817
}
818
 
819
void fence_release(struct kref *kref)
820
{
821
        struct fence *fence =
822
                        container_of(kref, struct fence, refcount);
823
 
824
//        trace_fence_destroy(fence);
825
 
826
        BUG_ON(!list_empty(&fence->cb_list));
827
 
828
        if (fence->ops->release)
829
                fence->ops->release(fence);
830
        else
831
                fence_free(fence);
832
}
833
 
834
void fence_free(struct fence *fence)
835
{
836
        kfree_rcu(fence, rcu);
837
}
838
EXPORT_SYMBOL(fence_free);
839
 
840
 
841
ktime_t ktime_get(void)
842
{
843
    ktime_t t;
844
 
845
    t.tv64 = GetClockNs();
846
 
847
    return t;
848
}
849