Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 6084 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5060 Rev 5354
Line 10... Line 10...
10
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
10
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
11
{
11
{
12
    struct file *filep;
12
    struct file *filep;
13
    int count;
13
    int count;
Line 14... Line 14...
14
 
14
 
Line 15... Line 15...
15
    filep = malloc(sizeof(*filep));
15
    filep = __builtin_malloc(sizeof(*filep));
16
 
16
 
Line 17... Line 17...
17
    if(unlikely(filep == NULL))
17
    if(unlikely(filep == NULL))
Line 246... Line 246...
246
{
246
{
247
    return c | 0x20;
247
    return c | 0x20;
248
}
248
}
Line 249... Line -...
249
 
-
 
250
 
249
 
Line 251... Line 250...
251
 
250
 
252
//const char hex_asc[] = "0123456789abcdef";
251
//const char hex_asc[] = "0123456789abcdef";
253
 
252
 
Line 476... Line 475...
476
        memcpy(p, src, len);
475
        memcpy(p, src, len);
477
    return p;
476
    return p;
478
}
477
}
Line -... Line 478...
-
 
478
 
-
 
479
 
-
 
480
#define KMAP_MAX    256
-
 
481
 
-
 
482
static struct mutex kmap_mutex;
-
 
483
static struct page* kmap_table[KMAP_MAX];
-
 
484
static int kmap_av;
-
 
485
static int kmap_first;
-
 
486
static void* kmap_base;
-
 
487
 
-
 
488
 
-
 
489
int kmap_init()
-
 
490
{
-
 
491
    kmap_base = AllocKernelSpace(KMAP_MAX*4096);
-
 
492
    if(kmap_base == NULL)
-
 
493
        return -1;
-
 
494
 
-
 
495
    kmap_av = KMAP_MAX;
-
 
496
    MutexInit(&kmap_mutex);
Line 479... Line 497...
479
 
497
    return 0;
480
 
498
};
481
 
499
 
-
 
500
void *kmap(struct page *page)
Line -... Line 501...
-
 
501
{
-
 
502
    void *vaddr = NULL;
-
 
503
    int i;
-
 
504
 
-
 
505
    do
-
 
506
    {
-
 
507
        MutexLock(&kmap_mutex);
-
 
508
        if(kmap_av != 0)
-
 
509
        {
-
 
510
            for(i = kmap_first; i < KMAP_MAX; i++)
-
 
511
            {
-
 
512
                if(kmap_table[i] == NULL)
-
 
513
                {
482
void *kmap(struct page *page)
514
                    kmap_av--;
-
 
515
                    kmap_first = i;
-
 
516
                    kmap_table[i] = page;
-
 
517
                    vaddr = kmap_base + (i<<12);
-
 
518
                    MapPage(vaddr,(addr_t)page,3);
-
 
519
                    break;
-
 
520
                };
Line 483... Line 521...
483
{
521
            };
-
 
522
        };
-
 
523
        MutexUnlock(&kmap_mutex);
-
 
524
    }while(vaddr == NULL);
-
 
525
 
-
 
526
    return vaddr;
-
 
527
};
-
 
528
 
-
 
529
void *kmap_atomic(struct page *page) __attribute__ ((alias ("kmap")));
-
 
530
 
-
 
531
void kunmap(struct page *page)
-
 
532
{
-
 
533
    void *vaddr;
-
 
534
    int   i;
-
 
535
 
-
 
536
    MutexLock(&kmap_mutex);
-
 
537
 
-
 
538
    for(i = 0; i < KMAP_MAX; i++)
-
 
539
    {
-
 
540
        if(kmap_table[i] == page)
-
 
541
        {
-
 
542
            kmap_av++;
-
 
543
            if(i < kmap_first)
-
 
544
                kmap_first = i;
-
 
545
            kmap_table[i] = NULL;
-
 
546
            vaddr = kmap_base + (i<<12);
-
 
547
            MapPage(vaddr,0,0);
-
 
548
            break;
-
 
549
        };
-
 
550
    };
-
 
551
 
-
 
552
    MutexUnlock(&kmap_mutex);
-
 
553
};
-
 
554
 
-
 
555
void kunmap_atomic(void *vaddr)
-
 
556
{
-
 
557
    int i;
-
 
558
 
-
 
559
    MapPage(vaddr,0,0);
-
 
560
 
-
 
561
    i = (vaddr - kmap_base) >> 12;
-
 
562
 
-
 
563
    MutexLock(&kmap_mutex);
-
 
564
 
-
 
565
    kmap_av++;
-
 
566
    if(i < kmap_first)
-
 
567
        kmap_first = i;
-
 
568
    kmap_table[i] = NULL;
-
 
569
 
-
 
570
    MutexUnlock(&kmap_mutex);
-
 
571
}
-
 
572
 
-
 
573
size_t strlcat(char *dest, const char *src, size_t count)
-
 
574
{
-
 
575
        size_t dsize = strlen(dest);
-
 
576
        size_t len = strlen(src);
-
 
577
        size_t res = dsize + len;
-
 
578
 
-
 
579
        /* This would be a bug */
-
 
580
        BUG_ON(dsize >= count);
-
 
581
 
-
 
582
        dest += dsize;
-
 
583
        count -= dsize;
-
 
584
        if (len >= count)
-
 
585
                len = count-1;
-
 
586
        memcpy(dest, src, len);
-
 
587
        dest[len] = 0;
-
 
588
        return res;
-
 
589
}
-
 
590
EXPORT_SYMBOL(strlcat);
-
 
591
 
-
 
592
void msleep(unsigned int msecs)
-
 
593
{
-
 
594
    msecs /= 10;
-
 
595
    if(!msecs) msecs = 1;
-
 
596
 
-
 
597
     __asm__ __volatile__ (
-
 
598
     "call *__imp__Delay"
-
 
599
     ::"b" (msecs));
-
 
600
     __asm__ __volatile__ (
-
 
601
     "":::"ebx");
-
 
602
 
-
 
603
};
-
 
604
 
-
 
605
 
-
 
606
/* simple loop based delay: */
-
 
607
static void delay_loop(unsigned long loops)
-
 
608
{
-
 
609
        asm volatile(
-
 
610
                "       test %0,%0      \n"
-
 
611
                "       jz 3f           \n"
-
 
612
                "       jmp 1f          \n"
-
 
613
 
-
 
614
                ".align 16              \n"
-
 
615
                "1:     jmp 2f          \n"
-
 
616
 
-
 
617
                ".align 16              \n"
-
 
618
                "2:     dec %0          \n"
-
 
619
                "       jnz 2b          \n"
484
    void *vaddr;
620
                "3:     dec %0          \n"
Line -... Line 621...
-
 
621
 
485
 
622
                : /* we don't need output */
-
 
623
                :"a" (loops)
-
 
624
        );
486
    vaddr = (void*)MapIoMem(page_to_phys(page), 4096, PG_SW);
625
}
487
 
-
 
488
    return vaddr;
-
 
489
}
626
 
490
 
-
 
491
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
-
 
492
{
-
 
493
        const unsigned long *p = addr;
-
 
494
        unsigned long result = 0;
-
 
495
        unsigned long tmp;
-
 
496
 
-
 
497
        while (size & ~(BITS_PER_LONG-1)) {
-
 
498
                if (~(tmp = *(p++)))
-
 
499
                        goto found;
-
 
500
                result += BITS_PER_LONG;
-
 
501
                size -= BITS_PER_LONG;
-
 
502
        }
-
 
503
        if (!size)
-
 
504
                return result;
-
 
505
 
627
 
Line -... Line 628...
-
 
628
static void (*delay_fn)(unsigned long) = delay_loop;
-
 
629
 
-
 
630
void __delay(unsigned long loops)
-
 
631
{
-
 
632
        delay_fn(loops);
-
 
633
}
-
 
634
 
-
 
635
 
-
 
636
inline void __const_udelay(unsigned long xloops)
-
 
637
{
-
 
638
        int d0;
-
 
639
 
-
 
640
        xloops *= 4;
-
 
641
        asm("mull %%edx"
-
 
642
                : "=d" (xloops), "=&a" (d0)
-
 
643
                : "1" (xloops), ""
-
 
644
                (loops_per_jiffy * (HZ/4)));
-
 
645
 
-
 
646
        __delay(++xloops);
-
 
647
}
-
 
648
 
-
 
649
void __udelay(unsigned long usecs)
-
 
650
{
-
 
651
        __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
-
 
652
}
-
 
653
 
-
 
654
unsigned int _sw_hweight32(unsigned int w)
-
 
655
{
-
 
656
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
-
 
657
        w -= (w >> 1) & 0x55555555;
-
 
658
        w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
-
 
659
        w =  (w + (w >> 4)) & 0x0f0f0f0f;
-
 
660
        return (w * 0x01010101) >> 24;
-
 
661
#else
-
 
662
        unsigned int res = w - ((w >> 1) & 0x55555555);
-
 
663
        res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
-
 
664
        res = (res + (res >> 4)) & 0x0F0F0F0F;
-
 
665
        res = res + (res >> 8);
-
 
666
        return (res + (res >> 16)) & 0x000000FF;
-
 
667
#endif
-
 
668
}
-
 
669
EXPORT_SYMBOL(_sw_hweight32);
-
 
670
 
-
 
671
 
-
 
672
void usleep_range(unsigned long min, unsigned long max)
-
 
673
{
-
 
674
    udelay(max);
-
 
675
}
-
 
676
EXPORT_SYMBOL(usleep_range);
-
 
677
 
-
 
678
 
-
 
679
static unsigned long round_jiffies_common(unsigned long j, int cpu,
-
 
680
                bool force_up)
-
 
681
{
-
 
682
        int rem;
-
 
683
        unsigned long original = j;
-
 
684
 
-
 
685
        /*
-
 
686
         * We don't want all cpus firing their timers at once hitting the
-
 
687
         * same lock or cachelines, so we skew each extra cpu with an extra
-
 
688
         * 3 jiffies. This 3 jiffies came originally from the mm/ code which
-
 
689
         * already did this.
-
 
690
         * The skew is done by adding 3*cpunr, then round, then subtract this
-
 
691
         * extra offset again.
-
 
692
         */
-
 
693
        j += cpu * 3;
-
 
694
 
-
 
695
        rem = j % HZ;
-
 
696
 
-
 
697
        /*
-
 
698
         * If the target jiffie is just after a whole second (which can happen
-
 
699
         * due to delays of the timer irq, long irq off times etc etc) then
-
 
700
         * we should round down to the whole second, not up. Use 1/4th second
-
 
701
         * as cutoff for this rounding as an extreme upper bound for this.
-
 
702
         * But never round down if @force_up is set.
-
 
703
         */
-
 
704
        if (rem < HZ/4 && !force_up) /* round down */
-
 
705
                j = j - rem;
-
 
706
        else /* round up */
-
 
707
                j = j - rem + HZ;
-
 
708
 
-
 
709
        /* now that we have rounded, subtract the extra skew again */
-
 
710
        j -= cpu * 3;
-
 
711
 
-
 
712
        /*
-
 
713
         * Make sure j is still in the future. Otherwise return the
-
 
714
         * unmodified value.
-
 
715
         */
-
 
716
        return time_is_after_jiffies(j) ? j : original;
-
 
717
}
-
 
718
 
-
 
719
 
-
 
720
unsigned long round_jiffies_up_relative(unsigned long j, int cpu)
-
 
721
{
-
 
722
        unsigned long j0 = jiffies;
-
 
723
 
-
 
724
        /* Use j0 because jiffies might change while we run */
-
 
725
        return round_jiffies_common(j + j0, 0, true) - j0;
-
 
726
}
-
 
727
EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
-
 
728
 
-
 
729
 
-
 
730
#include 
-
 
731
 
-
 
732
struct rcu_ctrlblk {
-
 
733
        struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
-
 
734
        struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
-
 
735
        struct rcu_head **curtail;      /* ->next pointer of last CB. */
-
 
736
//        RCU_TRACE(long qlen);           /* Number of pending CBs. */
-
 
737
//        RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
-
 
738
//        RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
-
 
739
//        RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
-
 
740
//        RCU_TRACE(const char *name);    /* Name of RCU type. */
-
 
741
};
-
 
742
 
-
 
743
/* Definition for rcupdate control block. */
-
 
744
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
-
 
745
        .donetail       = &rcu_sched_ctrlblk.rcucblist,
-
 
746
        .curtail        = &rcu_sched_ctrlblk.rcucblist,
-
 
747
//        RCU_TRACE(.name = "rcu_sched")
-
 
748
};
-
 
749
 
-
 
750
static void __call_rcu(struct rcu_head *head,
-
 
751
                       void (*func)(struct rcu_head *rcu),
-
 
752
                       struct rcu_ctrlblk *rcp)
-
 
753
{
-
 
754
        unsigned long flags;
-
 
755
 
-
 
756
//        debug_rcu_head_queue(head);
-
 
757
        head->func = func;
-
 
758
        head->next = NULL;
-
 
759
 
-
 
760
        local_irq_save(flags);
-
 
761
        *rcp->curtail = head;
-
 
762
        rcp->curtail = &head->next;
-
 
763
//        RCU_TRACE(rcp->qlen++);
-
 
764
        local_irq_restore(flags);
-
 
765
}
-
 
766
 
-
 
767
/*
-
 
768
 * Post an RCU callback to be invoked after the end of an RCU-sched grace
-
 
769
 * period.  But since we have but one CPU, that would be after any
-
 
770
 * quiescent state.
-
 
771
 */