Subversion Repositories Kolibri OS

Rev

Rev 5078 | Rev 6104 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 5271
Line 1... Line 1...
1
#include 
1
#include 
2
#include 
2
#include 
3
#include 
-
 
4
#include 
3
#include 
5
#include 
4
#include 
-
 
5
#include "radeon.h"
Line 6... Line 6...
6
 
6
 
7
int x86_clflush_size;
7
int x86_clflush_size;
Line 8... Line 8...
8
unsigned int tsc_khz;
8
unsigned int tsc_khz;
9
 
9
 
10
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
10
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
11
{
11
{
Line 12... Line 12...
12
    struct file *filep;
12
    struct file *filep;
Line 13... Line 13...
13
    int count;
13
    int count;
14
 
14
 
Line 15... Line 15...
15
    filep = malloc(sizeof(*filep));
15
    filep = __builtin_malloc(sizeof(*filep));
Line 157... Line 157...
157
{
157
{
158
    return c | 0x20;
158
    return c | 0x20;
159
}
159
}
Line 160... Line -...
160
 
-
 
161
 
160
 
Line 162... Line 161...
162
 
161
 
163
//const char hex_asc[] = "0123456789abcdef";
162
//const char hex_asc[] = "0123456789abcdef";
164
 
163
 
Line 376... Line 375...
376
{
375
{
377
    print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
376
    print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
378
                       buf, len, true);
377
                       buf, len, true);
379
}
378
}
Line -... Line 379...
-
 
379
 
-
 
380
void msleep(unsigned int msecs)
-
 
381
{
-
 
382
    msecs /= 10;
-
 
383
    if(!msecs) msecs = 1;
-
 
384
 
-
 
385
     __asm__ __volatile__ (
-
 
386
     "call *__imp__Delay"
-
 
387
     ::"b" (msecs));
-
 
388
     __asm__ __volatile__ (
Line -... Line 389...
-
 
389
     "":::"ebx");
-
 
390
 
-
 
391
};
380
 
392
 
381
 
393
 
382
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
394
/* simple loop based delay: */
-
 
395
static void delay_loop(unsigned long loops)
-
 
396
{
-
 
397
        asm volatile(
383
                unsigned int *ecx, unsigned int *edx)
398
                "       test %0,%0      \n"
-
 
399
                "       jz 3f           \n"
-
 
400
                "       jmp 1f          \n"
-
 
401
 
-
 
402
                ".align 16              \n"
384
{
403
                "1:     jmp 2f          \n"
385
    /* ecx is often an input as well as an output. */
404
 
386
    asm volatile("cpuid"
405
                ".align 16              \n"
387
        : "=a" (*eax),
406
                "2:     dec %0          \n"
-
 
407
                "       jnz 2b          \n"
388
          "=b" (*ebx),
408
                "3:     dec %0          \n"
389
          "=c" (*ecx),
409
 
390
          "=d" (*edx)
410
                : /* we don't need output */
391
        : "0" (*eax), "2" (*ecx)
411
                :"a" (loops)
Line -... Line 412...
-
 
412
        );
392
        : "memory");
413
}
393
}
-
 
-
 
414
 
394
 
415
 
395
static inline void cpuid(unsigned int op,
416
static void (*delay_fn)(unsigned long) = delay_loop;
396
                         unsigned int *eax, unsigned int *ebx,
417
 
397
                         unsigned int *ecx, unsigned int *edx)
-
 
398
{
-
 
399
        *eax = op;
418
void __delay(unsigned long loops)
Line -... Line 419...
-
 
419
{
400
        *ecx = 0;
420
        delay_fn(loops);
401
        __cpuid(eax, ebx, ecx, edx);
421
}
402
}
422
 
Line -... Line 423...
-
 
423
 
-
 
424
inline void __const_udelay(unsigned long xloops)
403
 
425
{
-
 
426
        int d0;
-
 
427
 
Line 404... Line 428...
404
void cpu_detect()
428
        xloops *= 4;
-
 
429
        asm("mull %%edx"
-
 
430
                : "=d" (xloops), "=&a" (d0)
-
 
431
                : "1" (xloops), ""
405
{
432
                (loops_per_jiffy * (HZ/4)));
406
    u32 junk, tfms, cap0, misc;
433
 
407
 
434
        __delay(++xloops);
Line -... Line 435...
-
 
435
}
-
 
436
 
-
 
437
void __udelay(unsigned long usecs)
-
 
438
{
-
 
439
        __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
-
 
440
}
-
 
441
 
-
 
442
unsigned int _sw_hweight32(unsigned int w)
-
 
443
{
-
 
444
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
-
 
445
        w -= (w >> 1) & 0x55555555;
408
    cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
446
        w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
-
 
447
        w =  (w + (w >> 4)) & 0x0f0f0f0f;
-
 
448
        return (w * 0x01010101) >> 24;
409
 
449
#else
-
 
450
        unsigned int res = w - ((w >> 1) & 0x55555555);
-
 
451
        res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
Line -... Line 452...
-
 
452
        res = (res + (res >> 4)) & 0x0F0F0F0F;
-
 
453
        res = res + (res >> 8);
-
 
454
        return (res + (res >> 16)) & 0x000000FF;
-
 
455
#endif
-
 
456
}
Line 410... Line 457...
410
    if (cap0 & (1<<19))
457
EXPORT_SYMBOL(_sw_hweight32);
411
    {
458
 
412
        x86_clflush_size = ((misc >> 8) & 0xff) * 8;
459
 
Line 425... Line 472...
425
    if (p)
472
    if (p)
426
        memcpy(p, src, len);
473
        memcpy(p, src, len);
427
    return p;
474
    return p;
428
}
475
}
Line -... Line 476...
-
 
476
 
-
 
477
void cpu_detect1()
-
 
478
{
-
 
479
 
-
 
480
    u32 junk, tfms, cap0, misc;
-
 
481
    int i;
-
 
482
 
-
 
483
    cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
-
 
484
 
-
 
485
    if (cap0 & (1<<19))
-
 
486
    {
-
 
487
        x86_clflush_size = ((misc >> 8) & 0xff) * 8;
-
 
488
    }
-
 
489
 
-
 
490
#if 0
-
 
491
    cpuid(0x80000002, (unsigned int*)&cpuinfo.model_name[0], (unsigned int*)&cpuinfo.model_name[4],
-
 
492
          (unsigned int*)&cpuinfo.model_name[8], (unsigned int*)&cpuinfo.model_name[12]);
-
 
493
    cpuid(0x80000003, (unsigned int*)&cpuinfo.model_name[16], (unsigned int*)&cpuinfo.model_name[20],
-
 
494
          (unsigned int*)&cpuinfo.model_name[24], (unsigned int*)&cpuinfo.model_name[28]);
-
 
495
    cpuid(0x80000004, (unsigned int*)&cpuinfo.model_name[32], (unsigned int*)&cpuinfo.model_name[36],
-
 
496
          (unsigned int*)&cpuinfo.model_name[40], (unsigned int*)&cpuinfo.model_name[44]);
-
 
497
 
-
 
498
    printf("\n%s\n\n",cpuinfo.model_name);
-
 
499
 
-
 
500
    cpuinfo.def_mtrr = read_msr(MSR_MTRRdefType);
-
 
501
    cpuinfo.mtrr_cap = read_msr(IA32_MTRRCAP);
-
 
502
 
-
 
503
    printf("MSR_MTRRdefType %016llx\n\n", cpuinfo.def_mtrr);
-
 
504
 
-
 
505
    cpuinfo.var_mtrr_count = (u8_t)cpuinfo.mtrr_cap;
-
 
506
 
-
 
507
    for(i = 0; i < cpuinfo.var_mtrr_count; i++)
-
 
508
    {
-
 
509
        u64_t mtrr_base;
-
 
510
        u64_t mtrr_mask;
-
 
511
 
-
 
512
        cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
-
 
513
        cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
-
 
514
 
-
 
515
        printf("MTRR_%d base: %016llx mask: %016llx\n", i,
-
 
516
               cpuinfo.var_mtrr[i].base,
-
 
517
               cpuinfo.var_mtrr[i].mask);
-
 
518
    };
-
 
519
 
-
 
520
    unsigned int cr0, cr3, cr4, eflags;
-
 
521
 
-
 
522
    eflags = safe_cli();
-
 
523
 
-
 
524
    /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
-
 
525
    cr0 = read_cr0() | (1<<30);
-
 
526
    write_cr0(cr0);
-
 
527
    wbinvd();
-
 
528
 
-
 
529
    cr4 = read_cr4();
-
 
530
    write_cr4(cr4 & ~(1<<7));
-
 
531
 
-
 
532
    cr3 = read_cr3();
-
 
533
    write_cr3(cr3);
-
 
534
 
-
 
535
    /* Save MTRR state */
-
 
536
    rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
-
 
537
 
-
 
538
    /* Disable MTRRs, and set the default type to uncached */
-
 
539
    native_write_msr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
-
 
540
    wbinvd();
-
 
541
 
-
 
542
    i = 0;
-
 
543
    set_mtrr(i++,0,0x80000000>>12,MTRR_WB);
-
 
544
    set_mtrr(i++,0x80000000>>12,0x40000000>>12,MTRR_WB);
-
 
545
    set_mtrr(i++,0xC0000000>>12,0x20000000>>12,MTRR_WB);
-
 
546
    set_mtrr(i++,0xdb800000>>12,0x00800000>>12,MTRR_UC);
-
 
547
    set_mtrr(i++,0xdc000000>>12,0x04000000>>12,MTRR_UC);
-
 
548
    set_mtrr(i++,0xE0000000>>12,0x10000000>>12,MTRR_WC);
-
 
549
 
-
 
550
    for(; i < cpuinfo.var_mtrr_count; i++)
-
 
551
        set_mtrr(i,0,0,0);
-
 
552
 
-
 
553
    write_cr3(cr3);
-
 
554
 
-
 
555
    /* Intel (P6) standard MTRRs */
-
 
556
    native_write_msr(MSR_MTRRdefType, deftype_lo, deftype_hi);
-
 
557
 
-
 
558
    /* Enable caches */
-
 
559
    write_cr0(read_cr0() & ~(1<<30));
-
 
560
 
-
 
561
    /* Restore value of CR4 */
-
 
562
    write_cr4(cr4);
-
 
563
 
-
 
564
    safe_sti(eflags);
-
 
565
 
-
 
566
    printf("\nnew MTRR map\n\n");
-
 
567
 
-
 
568
    for(i = 0; i < cpuinfo.var_mtrr_count; i++)
-
 
569
    {
-
 
570
        u64_t mtrr_base;
-
 
571
        u64_t mtrr_mask;
-
 
572
 
-
 
573
        cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
-
 
574
        cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
-
 
575
 
-
 
576
        printf("MTRR_%d base: %016llx mask: %016llx\n", i,
-
 
577
               cpuinfo.var_mtrr[i].base,
-
 
578
               cpuinfo.var_mtrr[i].mask);
-
 
579
    };
-
 
580
#endif
-
 
581
 
-
 
582
    tsc_khz = (unsigned int)(GetCpuFreq()/1000);
-
 
583
}
Line -... Line 584...
-
 
584
 
-
 
585
 
-
 
586
static atomic_t fence_context_counter = ATOMIC_INIT(0);
-
 
587
 
-
 
588
/**
-
 
589
 * fence_context_alloc - allocate an array of fence contexts
-
 
590
 * @num:        [in]    amount of contexts to allocate
-
 
591
 *
-
 
592
 * This function will return the first index of the number of fences allocated.
429
 
593
 * The fence context is used for setting fence->context to a unique number.
430
 
594
 */
431
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
-
 
432
{
-
 
433
        const unsigned long *p = addr;
-
 
434
        unsigned long result = 0;
-
 
435
        unsigned long tmp;
-
 
436
 
-
 
437
        while (size & ~(BITS_PER_LONG-1)) {
-
 
438
                if (~(tmp = *(p++)))
-
 
439
                        goto found;
-
 
440
                result += BITS_PER_LONG;
-
 
441
                size -= BITS_PER_LONG;
595
unsigned fence_context_alloc(unsigned num)
442
        }
-
 
443
        if (!size)
-
 
444
                return result;
-
 
445
 
-
 
446
        tmp = (*p) | (~0UL << size);
596
{
447
        if (tmp == ~0UL)        /* Are any bits zero? */
-
 
448
                return result + size;   /* Nope. */
-
 
449
found:
597
        BUG_ON(!num);
-
 
598
        return atomic_add_return(num, &fence_context_counter) - num;
-
 
599
}
-
 
600
EXPORT_SYMBOL(fence_context_alloc);
-
 
601
 
-
 
602
 
-
 
603
int fence_signal(struct fence *fence)
-
 
604
{
-
 
605
        unsigned long flags;
-
 
606
 
-
 
607
        if (!fence)
-
 
608
                return -EINVAL;
-
 
609
 
-
 
610
//        if (!ktime_to_ns(fence->timestamp)) {
-
 
611
//                fence->timestamp = ktime_get();
-
 
612
//                smp_mb__before_atomic();
-
 
613
//        }
-
 
614
 
-
 
615
        if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
-
 
616
                return -EINVAL;
-
 
617
 
-
 
618
//        trace_fence_signaled(fence);
-
 
619
 
-
 
620
        if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
-
 
621
                struct fence_cb *cur, *tmp;
-
 
622
 
-
 
623
                spin_lock_irqsave(fence->lock, flags);
-
 
624
                list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
-
 
625
                        list_del_init(&cur->node);
-
 
626
                        cur->func(fence, cur);
-
 
627
                }
-
 
628
                spin_unlock_irqrestore(fence->lock, flags);
-
 
629
        }
-
 
630
        return 0;
-
 
631
}
-
 
632
EXPORT_SYMBOL(fence_signal);
-
 
633
 
-
 
634
int fence_signal_locked(struct fence *fence)
-
 
635
{
-
 
636
        struct fence_cb *cur, *tmp;
-
 
637
        int ret = 0;
-
 
638
 
-
 
639
        if (WARN_ON(!fence))
-
 
640
                return -EINVAL;
-
 
641
 
-
 
642
//        if (!ktime_to_ns(fence->timestamp)) {
-
 
643
//                fence->timestamp = ktime_get();
-
 
644
//                smp_mb__before_atomic();
-
 
645
//        }
-
 
646
 
-
 
647
        if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-
 
648
                ret = -EINVAL;
-
 
649
 
-
 
650
                /*
-
 
651
                 * we might have raced with the unlocked fence_signal,
-
 
652
                 * still run through all callbacks
-
 
653
                 */
-
 
654
        }// else
-
 
655
//                trace_fence_signaled(fence);
-
 
656
 
-
 
657
        list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
-
 
658
                list_del_init(&cur->node);
-
 
659
                cur->func(fence, cur);
-
 
660
        }
-
 
661
        return ret;
-
 
662
}
-
 
663
EXPORT_SYMBOL(fence_signal_locked);
-
 
664
 
-
 
665
 
-
 
666
void fence_enable_sw_signaling(struct fence *fence)
-
 
667
{
-
 
668
        unsigned long flags;
-
 
669
 
-
 
670
        if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
-
 
671
            !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-
 
672
//                trace_fence_enable_signal(fence);
-
 
673
 
-
 
674
                spin_lock_irqsave(fence->lock, flags);
-
 
675
 
-
 
676
                if (!fence->ops->enable_signaling(fence))
-
 
677
                        fence_signal_locked(fence);
-
 
678
 
-
 
679
                spin_unlock_irqrestore(fence->lock, flags);
-
 
680
        }
-
 
681
}
-
 
682
EXPORT_SYMBOL(fence_enable_sw_signaling);
-
 
683
 
-
 
684
 
-
 
685
 
-
 
686
signed long
-
 
687
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
-
 
688
{
-
 
689
        signed long ret;
-
 
690
 
-
 
691
        if (WARN_ON(timeout < 0))
-
 
692
                return -EINVAL;
-
 
693
 
-
 
694
//        trace_fence_wait_start(fence);
-
 
695
        ret = fence->ops->wait(fence, intr, timeout);
-
 
696
//        trace_fence_wait_end(fence);
-
 
697
        return ret;
-
 
698
}
-
 
699
EXPORT_SYMBOL(fence_wait_timeout);
-
 
700
 
-
 
701
void fence_release(struct kref *kref)
-
 
702
{
-
 
703
        struct fence *fence =
-
 
704
                        container_of(kref, struct fence, refcount);
-
 
705
 
-
 
706
//        trace_fence_destroy(fence);
-
 
707
 
-
 
708
        BUG_ON(!list_empty(&fence->cb_list));
-
 
709
 
-
 
710
        if (fence->ops->release)
-
 
711
                fence->ops->release(fence);
-
 
712
        else
-
 
713
                fence_free(fence);
-
 
714
}
-
 
715
EXPORT_SYMBOL(fence_release);
-
 
716
 
-
 
717
void fence_free(struct fence *fence)
-
 
718
{
-
 
719
        kfree_rcu(fence, rcu);
-
 
720
}
-
 
721
EXPORT_SYMBOL(fence_free);
-
 
722
 
-
 
723
 
-
 
724
reservation_object_add_shared_inplace(struct reservation_object *obj,
-
 
725
                                      struct reservation_object_list *fobj,
-
 
726
                                      struct fence *fence)
-
 
727
{
-
 
728
        u32 i;
-
 
729
 
-
 
730
        fence_get(fence);
-
 
731
 
-
 
732
//        preempt_disable();
-
 
733
        write_seqcount_begin(&obj->seq);
-
 
734
 
-
 
735
        for (i = 0; i < fobj->shared_count; ++i) {
-
 
736
                struct fence *old_fence;
-
 
737
 
-
 
738
                old_fence = rcu_dereference_protected(fobj->shared[i],
-
 
739
                                                reservation_object_held(obj));
-
 
740
 
-
 
741
                if (old_fence->context == fence->context) {
-
 
742
                        /* memory barrier is added by write_seqcount_begin */
-
 
743
                        RCU_INIT_POINTER(fobj->shared[i], fence);
-
 
744
                        write_seqcount_end(&obj->seq);
-
 
745
                        preempt_enable();
-
 
746
 
-
 
747
                        fence_put(old_fence);
-
 
748
                        return;
-
 
749
                }
-
 
750
        }
-
 
751
 
-
 
752
        /*
-
 
753
         * memory barrier is added by write_seqcount_begin,
-
 
754
         * fobj->shared_count is protected by this lock too
-
 
755
         */
-
 
756
        RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
-
 
757
        fobj->shared_count++;
-
 
758
 
-
 
759
        write_seqcount_end(&obj->seq);
-
 
760
//        preempt_enable();
-
 
761
}
-
 
762
 
-
 
763
 
-
 
764
 
-
 
765
static void
-
 
766
reservation_object_add_shared_replace(struct reservation_object *obj,
-
 
767
                                      struct reservation_object_list *old,
-
 
768
                                      struct reservation_object_list *fobj,
-
 
769
                                      struct fence *fence)
-
 
770
{
-
 
771
        unsigned i;
-
 
772
        struct fence *old_fence = NULL;
-
 
773
 
-
 
774
        fence_get(fence);
-
 
775
 
-
 
776
        if (!old) {
-
 
777
                RCU_INIT_POINTER(fobj->shared[0], fence);
-
 
778
                fobj->shared_count = 1;
-
 
779
                goto done;
-
 
780
        }
-
 
781
 
-
 
782
        /*
-
 
783
         * no need to bump fence refcounts, rcu_read access
-
 
784
         * requires the use of kref_get_unless_zero, and the
-
 
785
         * references from the old struct are carried over to
-
 
786
         * the new.
-
 
787
         */
-
 
788
        fobj->shared_count = old->shared_count;
-
 
789
 
-
 
790
        for (i = 0; i < old->shared_count; ++i) {
-
 
791
                struct fence *check;
-
 
792
 
-
 
793
                check = rcu_dereference_protected(old->shared[i],
-
 
794
                                                reservation_object_held(obj));
-
 
795
 
-
 
796
                if (!old_fence && check->context == fence->context) {
-
 
797
                        old_fence = check;
-
 
798
                        RCU_INIT_POINTER(fobj->shared[i], fence);
-
 
799
                } else
-
 
800
                        RCU_INIT_POINTER(fobj->shared[i], check);
-
 
801
        }
-
 
802
        if (!old_fence) {
-
 
803
                RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
-
 
804
                fobj->shared_count++;
-
 
805
        }
-
 
806
 
-
 
807
done:
-
 
808
//        preempt_disable();
-
 
809
        write_seqcount_begin(&obj->seq);
-
 
810
        /*
-
 
811
         * RCU_INIT_POINTER can be used here,
-
 
812
         * seqcount provides the necessary barriers
-
 
813
         */
-
 
814
        RCU_INIT_POINTER(obj->fence, fobj);
-
 
815
        write_seqcount_end(&obj->seq);
-
 
816
//        preempt_enable();
-
 
817
 
-
 
818
        if (old)
-
 
819
                kfree_rcu(old, rcu);
-
 
820
 
-
 
821
        if (old_fence)
-
 
822
                fence_put(old_fence);
-
 
823
}
-
 
824
 
-
 
825
 
-
 
826
int reservation_object_reserve_shared(struct reservation_object *obj)
-
 
827
{
-
 
828
        struct reservation_object_list *fobj, *old;
-
 
829
        u32 max;
-
 
830
 
-
 
831
        old = reservation_object_get_list(obj);
-
 
832
 
-
 
833
        if (old && old->shared_max) {
-
 
834
                if (old->shared_count < old->shared_max) {
-
 
835
                        /* perform an in-place update */
-
 
836
                        kfree(obj->staged);
-
 
837
                        obj->staged = NULL;
-
 
838
                        return 0;
-
 
839
                } else
-
 
840
                        max = old->shared_max * 2;
-
 
841
        } else
-
 
842
                max = 4;
-
 
843
 
-
 
844
        /*
-
 
845
         * resize obj->staged or allocate if it doesn't exist,
-
 
846
         * noop if already correct size
-
 
847
         */
-
 
848
        fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
-
 
849
                        GFP_KERNEL);
-
 
850
        if (!fobj)
-
 
851
                return -ENOMEM;
-
 
852
 
-
 
853
        obj->staged = fobj;
-
 
854
        fobj->shared_max = max;
-
 
855
        return 0;
-
 
856
}
-
 
857
EXPORT_SYMBOL(reservation_object_reserve_shared);
-
 
858
 
-
 
859
void reservation_object_add_shared_fence(struct reservation_object *obj,
-
 
860
                                         struct fence *fence)
-
 
861
{
-
 
862
        struct reservation_object_list *old, *fobj = obj->staged;
-
 
863
 
-
 
864
        old = reservation_object_get_list(obj);
-
 
865
        obj->staged = NULL;
-
 
866
 
-
 
867
        if (!fobj) {
-
 
868
                BUG_ON(old->shared_count >= old->shared_max);
-
 
869
                reservation_object_add_shared_inplace(obj, old, fence);
-
 
870
        } else
-
 
871
                reservation_object_add_shared_replace(obj, old, fobj, fence);
-
 
872
}
-
 
873
EXPORT_SYMBOL(reservation_object_add_shared_fence);
-
 
874
 
-
 
875
 
-
 
876
void reservation_object_add_excl_fence(struct reservation_object *obj,
-
 
877
                                       struct fence *fence)
-
 
878
{
-
 
879
        struct fence *old_fence = reservation_object_get_excl(obj);
-
 
880
        struct reservation_object_list *old;
-
 
881
        u32 i = 0;
-
 
882
 
-
 
883
        old = reservation_object_get_list(obj);
-
 
884
        if (old)
-
 
885
                i = old->shared_count;
-
 
886
 
-
 
887
        if (fence)
-
 
888
                fence_get(fence);
-
 
889
 
-
 
890
//        preempt_disable();
-
 
891
        write_seqcount_begin(&obj->seq);
-
 
892
        /* write_seqcount_begin provides the necessary memory barrier */
-
 
893
        RCU_INIT_POINTER(obj->fence_excl, fence);
-
 
894
        if (old)
-
 
895
                old->shared_count = 0;
-
 
896
        write_seqcount_end(&obj->seq);
-
 
897
//        preempt_enable();
-
 
898
 
-
 
899
        /* inplace update, no shared fences */
-
 
900
        while (i--)
-
 
901
                fence_put(rcu_dereference_protected(old->shared[i],
-
 
902
                                                reservation_object_held(obj)));
-
 
903
 
-
 
904
        if (old_fence)
-
 
905
                fence_put(old_fence);
-
 
906
}
-
 
907
EXPORT_SYMBOL(reservation_object_add_excl_fence);
-
 
908
 
-
 
909
void
-
 
910
fence_init(struct fence *fence, const struct fence_ops *ops,
-
 
911
             spinlock_t *lock, unsigned context, unsigned seqno)
-
 
912
{
-
 
913
        BUG_ON(!lock);
-
 
914
        BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
-
 
915
               !ops->get_driver_name || !ops->get_timeline_name);
-
 
916
 
-
 
917
        kref_init(&fence->refcount);
-
 
918
        fence->ops = ops;
-
 
919
        INIT_LIST_HEAD(&fence->cb_list);
-
 
920
        fence->lock = lock;
-
 
921
        fence->context = context;
-
 
922
        fence->seqno = seqno;
-
 
923
        fence->flags = 0UL;
-
 
924
 
-
 
925
//        trace_fence_init(fence);
-
 
926
}
-
 
927
EXPORT_SYMBOL(fence_init);
-
 
928
 
-
 
929
 
-
 
930
#include 
-
 
931
 
-
 
932
struct rcu_ctrlblk {
-
 
933
        struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
-
 
934
        struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
-
 
935
        struct rcu_head **curtail;      /* ->next pointer of last CB. */
-
 
936
//        RCU_TRACE(long qlen);           /* Number of pending CBs. */
-
 
937
//        RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
-
 
938
//        RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
-
 
939
//        RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
-
 
940
//        RCU_TRACE(const char *name);    /* Name of RCU type. */
-
 
941
};
-
 
942
 
-
 
943
/* Definition for rcupdate control block. */
-
 
944
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
-
 
945
        .donetail       = &rcu_sched_ctrlblk.rcucblist,
-
 
946
        .curtail        = &rcu_sched_ctrlblk.rcucblist,
-
 
947
//        RCU_TRACE(.name = "rcu_sched")
-
 
948
};
-
 
949
 
-
 
950
static void __call_rcu(struct rcu_head *head,
-
 
951
                       void (*func)(struct rcu_head *rcu),
-
 
952
                       struct rcu_ctrlblk *rcp)
-
 
953
{
-
 
954
        unsigned long flags;
-
 
955
 
-
 
956
//        debug_rcu_head_queue(head);
-
 
957
        head->func = func;
-
 
958
        head->next = NULL;
-
 
959
 
-
 
960
        local_irq_save(flags);
-
 
961
        *rcp->curtail = head;
-
 
962
        rcp->curtail = &head->next;
-
 
963
//        RCU_TRACE(rcp->qlen++);
-
 
964
        local_irq_restore(flags);
-
 
965
}
-
 
966
 
-
 
967
/*
-
 
968
 * Post an RCU callback to be invoked after the end of an RCU-sched grace
-
 
969
 * period.  But since we have but one CPU, that would be after any
-
 
970
 * quiescent state.
-
 
971
 */
-
 
972
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
-
 
973
{
-
 
974
        __call_rcu(head, func, &rcu_sched_ctrlblk);