Subversion Repositories Kolibri OS

Rev

Rev 6131 | Rev 6660 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3263 Serge 1
#include 
2
#include 
3260 Serge 3
#include 
4
#include 
5
#include "i915_drv.h"
6
#include "intel_drv.h"
3480 Serge 7
#include 
6084 serge 8
#include 
9
#include 
6088 serge 10
#include "i915_kos32.h"
3260 Serge 11
 
12
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
13
{
14
    struct file *filep;
15
    int count;
16
 
5354 serge 17
    filep = __builtin_malloc(sizeof(*filep));
3260 Serge 18
 
19
    if(unlikely(filep == NULL))
20
        return ERR_PTR(-ENOMEM);
21
 
22
    count = size / PAGE_SIZE;
23
 
24
    filep->pages = kzalloc(sizeof(struct page *) * count, 0);
25
    if(unlikely(filep->pages == NULL))
26
    {
27
        kfree(filep);
28
        return ERR_PTR(-ENOMEM);
29
    };
30
 
31
    filep->count     = count;
32
    filep->allocated = 0;
33
    filep->vma       = NULL;
34
 
3298 Serge 35
//    printf("%s file %p pages %p count %d\n",
36
//              __FUNCTION__,filep, filep->pages, count);
3260 Serge 37
 
38
    return filep;
39
}
40
 
41
struct page *shmem_read_mapping_page_gfp(struct file *filep,
42
                                         pgoff_t index, gfp_t gfp)
43
{
44
    struct page *page;
45
 
46
    if(unlikely(index >= filep->count))
47
        return ERR_PTR(-EINVAL);
48
 
49
    page = filep->pages[index];
50
 
51
    if(unlikely(page == NULL))
52
    {
53
        page = (struct page *)AllocPage();
54
 
55
        if(unlikely(page == NULL))
56
            return ERR_PTR(-ENOMEM);
57
 
58
        filep->pages[index] = page;
4246 Serge 59
//        printf("file %p index %d page %x\n", filep, index, page);
60
//        delay(1);
61
 
3260 Serge 62
    };
63
 
64
    return page;
65
};
3263 Serge 66
 
67
unsigned long vm_mmap(struct file *file, unsigned long addr,
68
         unsigned long len, unsigned long prot,
69
         unsigned long flag, unsigned long offset)
70
{
71
    char *mem, *ptr;
72
    int i;
73
 
74
    if (unlikely(offset + PAGE_ALIGN(len) < offset))
75
        return -EINVAL;
76
    if (unlikely(offset & ~PAGE_MASK))
77
        return -EINVAL;
78
 
79
    mem = UserAlloc(len);
80
    if(unlikely(mem == NULL))
81
        return -ENOMEM;
82
 
83
    for(i = offset, ptr = mem; i < offset+len; i+= 4096, ptr+= 4096)
84
    {
85
        struct page *page;
86
 
87
        page = shmem_read_mapping_page_gfp(file, i/PAGE_SIZE,0);
88
 
89
        if (unlikely(IS_ERR(page)))
90
            goto err;
91
 
92
        MapPage(ptr, (addr_t)page, PG_SHARED|PG_UW);
93
    }
94
 
95
    return (unsigned long)mem;
96
err:
97
    UserFree(mem);
98
    return -ENOMEM;
99
};
100
 
3290 Serge 101
void shmem_file_delete(struct file *filep)
102
{
3298 Serge 103
//    printf("%s file %p pages %p count %d\n",
104
//            __FUNCTION__, filep, filep->pages, filep->count);
3263 Serge 105
 
3290 Serge 106
    if(filep->pages)
107
        kfree(filep->pages);
108
}
3480 Serge 109
 
110
 
111
 
112
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
113
{
114
        while (bytes) {
115
                if (*start != value)
116
                        return (void *)start;
117
                start++;
118
                bytes--;
119
        }
120
        return NULL;
121
}
122
 
123
/**
124
 * memchr_inv - Find an unmatching character in an area of memory.
125
 * @start: The memory area
126
 * @c: Find a character other than c
127
 * @bytes: The size of the area.
128
 *
129
 * returns the address of the first character other than @c, or %NULL
130
 * if the whole buffer contains just @c.
131
 */
132
void *memchr_inv(const void *start, int c, size_t bytes)
133
{
134
        u8 value = c;
135
        u64 value64;
136
        unsigned int words, prefix;
137
 
138
        if (bytes <= 16)
139
                return check_bytes8(start, value, bytes);
140
 
141
        value64 = value;
142
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
143
        value64 *= 0x0101010101010101;
144
#elif defined(ARCH_HAS_FAST_MULTIPLIER)
145
        value64 *= 0x01010101;
146
        value64 |= value64 << 32;
147
#else
148
        value64 |= value64 << 8;
149
        value64 |= value64 << 16;
150
        value64 |= value64 << 32;
151
#endif
152
 
153
        prefix = (unsigned long)start % 8;
154
        if (prefix) {
155
                u8 *r;
156
 
157
                prefix = 8 - prefix;
158
                r = check_bytes8(start, value, prefix);
159
                if (r)
160
                        return r;
161
                start += prefix;
162
                bytes -= prefix;
163
        }
164
 
165
        words = bytes / 8;
166
 
167
        while (words) {
168
                if (*(u64 *)start != value64)
169
                        return check_bytes8(start, value, 8);
170
                start += 8;
171
                words--;
172
        }
173
 
174
        return check_bytes8(start, value, bytes % 8);
175
}
176
 
177
 
178
 
179
int dma_map_sg(struct device *dev, struct scatterlist *sglist,
180
                           int nelems, int dir)
181
{
182
    struct scatterlist *s;
183
    int i;
184
 
185
    for_each_sg(sglist, s, nelems, i) {
186
        s->dma_address = (dma_addr_t)sg_phys(s);
187
#ifdef CONFIG_NEED_SG_DMA_LENGTH
188
        s->dma_length  = s->length;
189
#endif
190
    }
191
 
192
    return nelems;
193
}
194
 
195
 
196
 
197
#define _U  0x01    /* upper */
198
#define _L  0x02    /* lower */
199
#define _D  0x04    /* digit */
200
#define _C  0x08    /* cntrl */
201
#define _P  0x10    /* punct */
202
#define _S  0x20    /* white space (space/lf/tab) */
203
#define _X  0x40    /* hex digit */
204
#define _SP 0x80    /* hard space (0x20) */
205
 
206
extern const unsigned char _ctype[];
207
 
208
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
209
 
210
#define isalnum(c)  ((__ismask(c)&(_U|_L|_D)) != 0)
211
#define isalpha(c)  ((__ismask(c)&(_U|_L)) != 0)
212
#define iscntrl(c)  ((__ismask(c)&(_C)) != 0)
213
#define isdigit(c)  ((__ismask(c)&(_D)) != 0)
214
#define isgraph(c)  ((__ismask(c)&(_P|_U|_L|_D)) != 0)
215
#define islower(c)  ((__ismask(c)&(_L)) != 0)
216
#define isprint(c)  ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
217
#define ispunct(c)  ((__ismask(c)&(_P)) != 0)
218
/* Note: isspace() must return false for %NUL-terminator */
219
#define isspace(c)  ((__ismask(c)&(_S)) != 0)
220
#define isupper(c)  ((__ismask(c)&(_U)) != 0)
221
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
222
 
223
#define isascii(c) (((unsigned char)(c))<=0x7f)
224
#define toascii(c) (((unsigned char)(c))&0x7f)
225
 
226
static inline unsigned char __tolower(unsigned char c)
227
{
228
    if (isupper(c))
229
        c -= 'A'-'a';
230
    return c;
231
}
232
 
233
static inline unsigned char __toupper(unsigned char c)
234
{
235
    if (islower(c))
236
        c -= 'a'-'A';
237
    return c;
238
}
239
 
240
#define tolower(c) __tolower(c)
241
#define toupper(c) __toupper(c)
242
 
243
/*
244
 * Fast implementation of tolower() for internal usage. Do not use in your
245
 * code.
246
 */
247
static inline char _tolower(const char c)
248
{
249
    return c | 0x20;
250
}
251
 
252
 
4104 Serge 253
void *kmemdup(const void *src, size_t len, gfp_t gfp)
254
{
255
    void *p;
3480 Serge 256
 
4104 Serge 257
    p = kmalloc(len, gfp);
258
    if (p)
259
        memcpy(p, src, len);
260
    return p;
261
}
262
 
263
 
5354 serge 264
#define KMAP_MAX    256
5060 serge 265
 
5354 serge 266
static struct mutex kmap_mutex;
267
static struct page* kmap_table[KMAP_MAX];
268
static int kmap_av;
269
static int kmap_first;
270
static void* kmap_base;
271
 
272
 
273
int kmap_init()
274
{
275
    kmap_base = AllocKernelSpace(KMAP_MAX*4096);
276
    if(kmap_base == NULL)
277
        return -1;
278
 
279
    kmap_av = KMAP_MAX;
280
    MutexInit(&kmap_mutex);
281
    return 0;
282
};
283
 
5060 serge 284
void *kmap(struct page *page)
285
{
5354 serge 286
    void *vaddr = NULL;
287
    int i;
288
 
289
    do
290
    {
291
        MutexLock(&kmap_mutex);
292
        if(kmap_av != 0)
293
        {
294
            for(i = kmap_first; i < KMAP_MAX; i++)
295
            {
296
                if(kmap_table[i] == NULL)
297
                {
298
                    kmap_av--;
299
                    kmap_first = i;
300
                    kmap_table[i] = page;
301
                    vaddr = kmap_base + (i<<12);
302
                    MapPage(vaddr,(addr_t)page,3);
303
                    break;
304
                };
305
            };
306
        };
307
        MutexUnlock(&kmap_mutex);
308
    }while(vaddr == NULL);
309
 
310
    return vaddr;
311
};
312
 
313
void *kmap_atomic(struct page *page) __attribute__ ((alias ("kmap")));
314
 
315
void kunmap(struct page *page)
316
{
5060 serge 317
    void *vaddr;
5354 serge 318
    int   i;
5060 serge 319
 
5354 serge 320
    MutexLock(&kmap_mutex);
5060 serge 321
 
5354 serge 322
    for(i = 0; i < KMAP_MAX; i++)
323
    {
324
        if(kmap_table[i] == page)
325
        {
326
            kmap_av++;
327
            if(i < kmap_first)
328
                kmap_first = i;
329
            kmap_table[i] = NULL;
330
            vaddr = kmap_base + (i<<12);
331
            MapPage(vaddr,0,0);
332
            break;
333
        };
334
    };
335
 
336
    MutexUnlock(&kmap_mutex);
337
};
338
 
339
void kunmap_atomic(void *vaddr)
340
{
341
    int i;
342
 
343
    MapPage(vaddr,0,0);
344
 
345
    i = (vaddr - kmap_base) >> 12;
346
 
347
    MutexLock(&kmap_mutex);
348
 
349
    kmap_av++;
350
    if(i < kmap_first)
351
        kmap_first = i;
352
    kmap_table[i] = NULL;
353
 
354
    MutexUnlock(&kmap_mutex);
5060 serge 355
}
356
 
5354 serge 357
void msleep(unsigned int msecs)
358
{
359
    msecs /= 10;
360
    if(!msecs) msecs = 1;
361
 
362
     __asm__ __volatile__ (
363
     "call *__imp__Delay"
364
     ::"b" (msecs));
365
     __asm__ __volatile__ (
366
     "":::"ebx");
367
 
368
};
369
 
370
 
371
/* simple loop based delay: */
372
static void delay_loop(unsigned long loops)
373
{
374
        asm volatile(
375
                "       test %0,%0      \n"
376
                "       jz 3f           \n"
377
                "       jmp 1f          \n"
378
 
379
                ".align 16              \n"
380
                "1:     jmp 2f          \n"
381
 
382
                ".align 16              \n"
383
                "2:     dec %0          \n"
384
                "       jnz 2b          \n"
385
                "3:     dec %0          \n"
386
 
387
                : /* we don't need output */
388
                :"a" (loops)
389
        );
390
}
391
 
392
 
393
static void (*delay_fn)(unsigned long) = delay_loop;
394
 
395
void __delay(unsigned long loops)
396
{
397
        delay_fn(loops);
398
}
399
 
400
 
401
inline void __const_udelay(unsigned long xloops)
402
{
403
        int d0;
404
 
405
        xloops *= 4;
406
        asm("mull %%edx"
407
                : "=d" (xloops), "=&a" (d0)
408
                : "1" (xloops), ""
409
                (loops_per_jiffy * (HZ/4)));
410
 
411
        __delay(++xloops);
412
}
413
 
414
void __udelay(unsigned long usecs)
415
{
416
        __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
417
}
418
 
419
unsigned int _sw_hweight32(unsigned int w)
420
{
421
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
422
        w -= (w >> 1) & 0x55555555;
423
        w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
424
        w =  (w + (w >> 4)) & 0x0f0f0f0f;
425
        return (w * 0x01010101) >> 24;
426
#else
427
        unsigned int res = w - ((w >> 1) & 0x55555555);
428
        res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
429
        res = (res + (res >> 4)) & 0x0F0F0F0F;
430
        res = res + (res >> 8);
431
        return (res + (res >> 16)) & 0x000000FF;
432
#endif
433
}
434
EXPORT_SYMBOL(_sw_hweight32);
435
 
436
 
437
void usleep_range(unsigned long min, unsigned long max)
438
{
439
    udelay(max);
440
}
441
EXPORT_SYMBOL(usleep_range);
442
 
443
 
444
static unsigned long round_jiffies_common(unsigned long j, int cpu,
445
                bool force_up)
446
{
447
        int rem;
448
        unsigned long original = j;
449
 
450
        /*
451
         * We don't want all cpus firing their timers at once hitting the
452
         * same lock or cachelines, so we skew each extra cpu with an extra
453
         * 3 jiffies. This 3 jiffies came originally from the mm/ code which
454
         * already did this.
455
         * The skew is done by adding 3*cpunr, then round, then subtract this
456
         * extra offset again.
457
         */
458
        j += cpu * 3;
459
 
460
        rem = j % HZ;
461
 
462
        /*
463
         * If the target jiffie is just after a whole second (which can happen
464
         * due to delays of the timer irq, long irq off times etc etc) then
465
         * we should round down to the whole second, not up. Use 1/4th second
466
         * as cutoff for this rounding as an extreme upper bound for this.
467
         * But never round down if @force_up is set.
468
         */
469
        if (rem < HZ/4 && !force_up) /* round down */
470
                j = j - rem;
471
        else /* round up */
472
                j = j - rem + HZ;
473
 
474
        /* now that we have rounded, subtract the extra skew again */
475
        j -= cpu * 3;
476
 
477
        /*
478
         * Make sure j is still in the future. Otherwise return the
479
         * unmodified value.
480
         */
481
        return time_is_after_jiffies(j) ? j : original;
482
}
483
 
484
 
485
unsigned long round_jiffies_up_relative(unsigned long j, int cpu)
486
{
487
        unsigned long j0 = jiffies;
488
 
489
        /* Use j0 because jiffies might change while we run */
490
        return round_jiffies_common(j + j0, 0, true) - j0;
491
}
492
EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
493
 
494
 
495
#include 
496
 
497
struct rcu_ctrlblk {
498
        struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
499
        struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
500
        struct rcu_head **curtail;      /* ->next pointer of last CB. */
501
//        RCU_TRACE(long qlen);           /* Number of pending CBs. */
502
//        RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
503
//        RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
504
//        RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
505
//        RCU_TRACE(const char *name);    /* Name of RCU type. */
506
};
507
 
508
/* Definition for rcupdate control block. */
509
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
510
        .donetail       = &rcu_sched_ctrlblk.rcucblist,
511
        .curtail        = &rcu_sched_ctrlblk.rcucblist,
512
//        RCU_TRACE(.name = "rcu_sched")
513
};
514
 
515
static void __call_rcu(struct rcu_head *head,
516
                       void (*func)(struct rcu_head *rcu),
517
                       struct rcu_ctrlblk *rcp)
518
{
519
        unsigned long flags;
520
 
521
//        debug_rcu_head_queue(head);
522
        head->func = func;
523
        head->next = NULL;
524
 
525
        local_irq_save(flags);
526
        *rcp->curtail = head;
527
        rcp->curtail = &head->next;
528
//        RCU_TRACE(rcp->qlen++);
529
        local_irq_restore(flags);
530
}
531
 
532
/*
533
 * Post an RCU callback to be invoked after the end of an RCU-sched grace
534
 * period.  But since we have but one CPU, that would be after any
535
 * quiescent state.
536
 */
537
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
538
{
539
        __call_rcu(head, func, &rcu_sched_ctrlblk);
540
}
541
 
6084 serge 542
int seq_puts(struct seq_file *m, const char *s)
543
{
544
    return 0;
545
};
5354 serge 546
 
6084 serge 547
__printf(2, 3) int seq_printf(struct seq_file *m, const char *f, ...)
548
{
549
    return 0;
550
}
5354 serge 551
 
6084 serge 552
ktime_t ktime_get(void)
553
{
554
    ktime_t t;
555
 
556
    t.tv64 = GetClockNs();
557
 
558
    return t;
559
}
560
 
6088 serge 561
char *strdup(const char *str)
562
{
563
    size_t len = strlen(str) + 1;
564
    char *copy = __builtin_malloc(len);
565
    if (copy)
566
    {
567
        memcpy (copy, str, len);
568
    }
569
    return copy;
570
}
571
 
572
int split_cmdline(char *cmdline, char **argv)
573
{
574
    enum quote_state
575
    {
576
        QUOTE_NONE,         /* no " active in current parm       */
577
        QUOTE_DELIMITER,    /* " was first char and must be last */
578
        QUOTE_STARTED       /* " was seen, look for a match      */
579
    };
580
 
581
    enum quote_state state;
582
    unsigned int argc;
583
    char *p = cmdline;
584
    char *new_arg, *start;
585
 
586
    argc = 0;
587
 
588
    for(;;)
589
    {
590
        /* skip over spaces and tabs */
591
        if ( *p )
592
        {
593
            while (*p == ' ' || *p == '\t')
594
                ++p;
595
        }
596
 
597
        if (*p == '\0')
598
            break;
599
 
600
        state = QUOTE_NONE;
601
        if( *p == '\"' )
602
        {
603
            p++;
604
            state = QUOTE_DELIMITER;
605
        }
606
        new_arg = start = p;
607
        for (;;)
608
        {
609
            if( *p == '\"' )
610
            {
611
                p++;
612
                if( state == QUOTE_NONE )
613
                {
614
                    state = QUOTE_STARTED;
615
                }
616
                else
617
                {
618
                    state = QUOTE_NONE;
619
                }
620
                continue;
621
            }
622
 
623
            if( *p == ' ' || *p == '\t' )
624
            {
625
                if( state == QUOTE_NONE )
626
                {
627
                    break;
628
                }
629
            }
630
 
631
            if( *p == '\0' )
632
                break;
633
 
634
            if( *p == '\\' )
635
            {
636
                if( p[1] == '\"' )
637
                {
638
                    ++p;
639
                    if( p[-2] == '\\' )
640
                    {
641
                        continue;
642
                    }
643
                }
644
            }
645
            if( argv )
646
            {
647
                *(new_arg++) = *p;
648
            }
649
            ++p;
650
        };
651
 
652
        if( argv )
653
        {
654
            argv[ argc ] = start;
655
            ++argc;
656
 
657
            /*
658
              The *new = '\0' is req'd in case there was a \" to "
659
              translation. It must be after the *p check against
660
              '\0' because new and p could point to the same char
661
              in which case the scan would be terminated too soon.
662
            */
663
 
664
            if( *p == '\0' )
665
            {
666
                *new_arg = '\0';
667
                break;
668
            }
669
            *new_arg = '\0';
670
            ++p;
671
        }
672
        else
673
        {
674
            ++argc;
675
            if( *p == '\0' )
676
            {
677
                break;
678
            }
679
            ++p;
680
        }
681
    }
682
 
683
    return argc;
684
};
685
 
686
 
687
fb_get_options(const char *name, char **option)
688
{
689
    char *opt, *options = NULL;
690
    int retval = 1;
691
    int name_len;
692
 
693
    if(i915.cmdline_mode == NULL)
694
        return 1;
695
 
696
    name_len = __builtin_strlen(name);
697
 
698
    if (name_len )
699
    {
700
        opt = i915.cmdline_mode;
701
        if (!__builtin_strncmp(name, opt, name_len) &&
702
             opt[name_len] == ':')
703
        {
704
             options = opt + name_len + 1;
705
             retval = 0;
706
        }
707
    }
708
 
709
    if (option)
710
        *option = options;
711
 
712
    return retval;
713
}