Subversion Repositories Kolibri OS

Rev

Rev 6104 | Rev 6661 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #include <ddk.h>
  2. #include <linux/mm.h>
  3. #include <drm/drmP.h>
  4. #include <linux/hdmi.h>
  5. #include "radeon.h"
  6.  
  7. int x86_clflush_size;
  8. unsigned int tsc_khz;
  9.  
  10. struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
  11. {
  12.     struct file *filep;
  13.     int count;
  14.  
  15.     filep = __builtin_malloc(sizeof(*filep));
  16.  
  17.     if(unlikely(filep == NULL))
  18.         return ERR_PTR(-ENOMEM);
  19.  
  20.     count = size / PAGE_SIZE;
  21.  
  22.     filep->pages = kzalloc(sizeof(struct page *) * count, 0);
  23.     if(unlikely(filep->pages == NULL))
  24.     {
  25.         kfree(filep);
  26.         return ERR_PTR(-ENOMEM);
  27.     };
  28.  
  29.     filep->count     = count;
  30.     filep->allocated = 0;
  31.     filep->vma       = NULL;
  32.  
  33. //    printf("%s file %p pages %p count %d\n",
  34. //              __FUNCTION__,filep, filep->pages, count);
  35.  
  36.     return filep;
  37. }
  38.  
  39. static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
  40. {
  41.         while (bytes) {
  42.                 if (*start != value)
  43.                         return (void *)start;
  44.                 start++;
  45.                 bytes--;
  46.         }
  47.         return NULL;
  48. }
  49.  
  50. /**
  51.  * memchr_inv - Find an unmatching character in an area of memory.
  52.  * @start: The memory area
  53.  * @c: Find a character other than c
  54.  * @bytes: The size of the area.
  55.  *
  56.  * returns the address of the first character other than @c, or %NULL
  57.  * if the whole buffer contains just @c.
  58.  */
  59. void *memchr_inv(const void *start, int c, size_t bytes)
  60. {
  61.         u8 value = c;
  62.         u64 value64;
  63.         unsigned int words, prefix;
  64.  
  65.         if (bytes <= 16)
  66.                 return check_bytes8(start, value, bytes);
  67.  
  68.         value64 = value;
  69. #if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
  70.         value64 *= 0x0101010101010101;
  71. #elif defined(ARCH_HAS_FAST_MULTIPLIER)
  72.         value64 *= 0x01010101;
  73.         value64 |= value64 << 32;
  74. #else
  75.         value64 |= value64 << 8;
  76.         value64 |= value64 << 16;
  77.         value64 |= value64 << 32;
  78. #endif
  79.  
  80.         prefix = (unsigned long)start % 8;
  81.         if (prefix) {
  82.                 u8 *r;
  83.  
  84.                 prefix = 8 - prefix;
  85.                 r = check_bytes8(start, value, prefix);
  86.                 if (r)
  87.                         return r;
  88.                 start += prefix;
  89.                 bytes -= prefix;
  90.         }
  91.  
  92.         words = bytes / 8;
  93.  
  94.         while (words) {
  95.                 if (*(u64 *)start != value64)
  96.                         return check_bytes8(start, value, 8);
  97.                 start += 8;
  98.                 words--;
  99.         }
  100.  
  101.         return check_bytes8(start, value, bytes % 8);
  102. }
  103.  
  104.  
  105.  
  106. #define _U  0x01    /* upper */
  107. #define _L  0x02    /* lower */
  108. #define _D  0x04    /* digit */
  109. #define _C  0x08    /* cntrl */
  110. #define _P  0x10    /* punct */
  111. #define _S  0x20    /* white space (space/lf/tab) */
  112. #define _X  0x40    /* hex digit */
  113. #define _SP 0x80    /* hard space (0x20) */
  114.  
  115. extern const unsigned char _ctype[];
  116.  
  117. #define __ismask(x) (_ctype[(int)(unsigned char)(x)])
  118.  
  119. #define isalnum(c)  ((__ismask(c)&(_U|_L|_D)) != 0)
  120. #define isalpha(c)  ((__ismask(c)&(_U|_L)) != 0)
  121. #define iscntrl(c)  ((__ismask(c)&(_C)) != 0)
  122. #define isdigit(c)  ((__ismask(c)&(_D)) != 0)
  123. #define isgraph(c)  ((__ismask(c)&(_P|_U|_L|_D)) != 0)
  124. #define islower(c)  ((__ismask(c)&(_L)) != 0)
  125. #define isprint(c)  ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
  126. #define ispunct(c)  ((__ismask(c)&(_P)) != 0)
  127. /* Note: isspace() must return false for %NUL-terminator */
  128. #define isspace(c)  ((__ismask(c)&(_S)) != 0)
  129. #define isupper(c)  ((__ismask(c)&(_U)) != 0)
  130. #define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
  131.  
  132. #define isascii(c) (((unsigned char)(c))<=0x7f)
  133. #define toascii(c) (((unsigned char)(c))&0x7f)
  134.  
  135. static inline unsigned char __tolower(unsigned char c)
  136. {
  137.     if (isupper(c))
  138.         c -= 'A'-'a';
  139.     return c;
  140. }
  141.  
  142. static inline unsigned char __toupper(unsigned char c)
  143. {
  144.     if (islower(c))
  145.         c -= 'a'-'A';
  146.     return c;
  147. }
  148.  
  149. #define tolower(c) __tolower(c)
  150. #define toupper(c) __toupper(c)
  151.  
  152. /*
  153.  * Fast implementation of tolower() for internal usage. Do not use in your
  154.  * code.
  155.  */
  156. static inline char _tolower(const char c)
  157. {
  158.     return c | 0x20;
  159. }
  160.  
  161.  
  162.  
  163. #define KMAP_MAX    256
  164.  
  165. static struct mutex kmap_mutex;
  166. static struct page* kmap_table[KMAP_MAX];
  167. static int kmap_av;
  168. static int kmap_first;
  169. static void* kmap_base;
  170.  
  171.  
  172. int kmap_init()
  173. {
  174.     kmap_base = AllocKernelSpace(KMAP_MAX*4096);
  175.     if(kmap_base == NULL)
  176.         return -1;
  177.  
  178.     kmap_av = KMAP_MAX;
  179.     MutexInit(&kmap_mutex);
  180.     return 0;
  181. };
  182.  
  183. void *kmap(struct page *page)
  184. {
  185.     void *vaddr = NULL;
  186.     int i;
  187.  
  188.     do
  189.     {
  190.         MutexLock(&kmap_mutex);
  191.         if(kmap_av != 0)
  192.         {
  193.             for(i = kmap_first; i < KMAP_MAX; i++)
  194.             {
  195.                 if(kmap_table[i] == NULL)
  196.                 {
  197.                     kmap_av--;
  198.                     kmap_first = i;
  199.                     kmap_table[i] = page;
  200.                     vaddr = kmap_base + (i<<12);
  201.                     MapPage(vaddr,(addr_t)page,3);
  202.                     break;
  203.                 };
  204.             };
  205.         };
  206.         MutexUnlock(&kmap_mutex);
  207.     }while(vaddr == NULL);
  208.  
  209.     return vaddr;
  210. };
  211.  
  212. void *kmap_atomic(struct page *page) __attribute__ ((alias ("kmap")));
  213.  
  214. void kunmap(struct page *page)
  215. {
  216.     void *vaddr;
  217.     int   i;
  218.  
  219.     MutexLock(&kmap_mutex);
  220.  
  221.     for(i = 0; i < KMAP_MAX; i++)
  222.     {
  223.         if(kmap_table[i] == page)
  224.         {
  225.             kmap_av++;
  226.             if(i < kmap_first)
  227.                 kmap_first = i;
  228.             kmap_table[i] = NULL;
  229.             vaddr = kmap_base + (i<<12);
  230.             MapPage(vaddr,0,0);
  231.             break;
  232.         };
  233.     };
  234.  
  235.     MutexUnlock(&kmap_mutex);
  236. };
  237.  
  238. void kunmap_atomic(void *vaddr)
  239. {
  240.     int i;
  241.  
  242.     MapPage(vaddr,0,0);
  243.  
  244.     i = (vaddr - kmap_base) >> 12;
  245.  
  246.     MutexLock(&kmap_mutex);
  247.  
  248.     kmap_av++;
  249.     if(i < kmap_first)
  250.         kmap_first = i;
  251.     kmap_table[i] = NULL;
  252.  
  253.     MutexUnlock(&kmap_mutex);
  254. }
  255. void msleep(unsigned int msecs)
  256. {
  257.     msecs /= 10;
  258.     if(!msecs) msecs = 1;
  259.  
  260.      __asm__ __volatile__ (
  261.      "call *__imp__Delay"
  262.      ::"b" (msecs));
  263.      __asm__ __volatile__ (
  264.      "":::"ebx");
  265.  
  266. };
  267.  
  268.  
  269. /* simple loop based delay: */
  270. static void delay_loop(unsigned long loops)
  271. {
  272.         asm volatile(
  273.                 "       test %0,%0      \n"
  274.                 "       jz 3f           \n"
  275.                 "       jmp 1f          \n"
  276.  
  277.                 ".align 16              \n"
  278.                 "1:     jmp 2f          \n"
  279.  
  280.                 ".align 16              \n"
  281.                 "2:     dec %0          \n"
  282.                 "       jnz 2b          \n"
  283.                 "3:     dec %0          \n"
  284.  
  285.                 : /* we don't need output */
  286.                 :"a" (loops)
  287.         );
  288. }
  289.  
  290.  
  291. static void (*delay_fn)(unsigned long) = delay_loop;
  292.  
  293. void __delay(unsigned long loops)
  294. {
  295.         delay_fn(loops);
  296. }
  297.  
  298.  
  299. inline void __const_udelay(unsigned long xloops)
  300. {
  301.         int d0;
  302.  
  303.         xloops *= 4;
  304.         asm("mull %%edx"
  305.                 : "=d" (xloops), "=&a" (d0)
  306.                 : "1" (xloops), ""
  307.                 (loops_per_jiffy * (HZ/4)));
  308.  
  309.         __delay(++xloops);
  310. }
  311.  
  312. void __udelay(unsigned long usecs)
  313. {
  314.         __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
  315. }
  316.  
  317. unsigned int _sw_hweight32(unsigned int w)
  318. {
  319. #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
  320.         w -= (w >> 1) & 0x55555555;
  321.         w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
  322.         w =  (w + (w >> 4)) & 0x0f0f0f0f;
  323.         return (w * 0x01010101) >> 24;
  324. #else
  325.         unsigned int res = w - ((w >> 1) & 0x55555555);
  326.         res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
  327.         res = (res + (res >> 4)) & 0x0F0F0F0F;
  328.         res = res + (res >> 8);
  329.         return (res + (res >> 16)) & 0x000000FF;
  330. #endif
  331. }
  332. EXPORT_SYMBOL(_sw_hweight32);
  333.  
  334.  
  335. void usleep_range(unsigned long min, unsigned long max)
  336. {
  337.     udelay(max);
  338. }
  339. EXPORT_SYMBOL(usleep_range);
  340.  
  341.  
  342. void *kmemdup(const void *src, size_t len, gfp_t gfp)
  343. {
  344.     void *p;
  345.  
  346.     p = kmalloc(len, gfp);
  347.     if (p)
  348.         memcpy(p, src, len);
  349.     return p;
  350. }
  351.  
  352. void cpu_detect1()
  353. {
  354.  
  355.     u32 junk, tfms, cap0, misc;
  356.     int i;
  357.  
  358.     cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
  359.  
  360.     if (cap0 & (1<<19))
  361.     {
  362.         x86_clflush_size = ((misc >> 8) & 0xff) * 8;
  363.     }
  364.  
  365. #if 0
  366.     cpuid(0x80000002, (unsigned int*)&cpuinfo.model_name[0], (unsigned int*)&cpuinfo.model_name[4],
  367.           (unsigned int*)&cpuinfo.model_name[8], (unsigned int*)&cpuinfo.model_name[12]);
  368.     cpuid(0x80000003, (unsigned int*)&cpuinfo.model_name[16], (unsigned int*)&cpuinfo.model_name[20],
  369.           (unsigned int*)&cpuinfo.model_name[24], (unsigned int*)&cpuinfo.model_name[28]);
  370.     cpuid(0x80000004, (unsigned int*)&cpuinfo.model_name[32], (unsigned int*)&cpuinfo.model_name[36],
  371.           (unsigned int*)&cpuinfo.model_name[40], (unsigned int*)&cpuinfo.model_name[44]);
  372.  
  373.     printf("\n%s\n\n",cpuinfo.model_name);
  374.  
  375.     cpuinfo.def_mtrr = read_msr(MSR_MTRRdefType);
  376.     cpuinfo.mtrr_cap = read_msr(IA32_MTRRCAP);
  377.  
  378.     printf("MSR_MTRRdefType %016llx\n\n", cpuinfo.def_mtrr);
  379.  
  380.     cpuinfo.var_mtrr_count = (u8_t)cpuinfo.mtrr_cap;
  381.  
  382.     for(i = 0; i < cpuinfo.var_mtrr_count; i++)
  383.     {
  384.         u64_t mtrr_base;
  385.         u64_t mtrr_mask;
  386.  
  387.         cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
  388.         cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
  389.  
  390.         printf("MTRR_%d base: %016llx mask: %016llx\n", i,
  391.                cpuinfo.var_mtrr[i].base,
  392.                cpuinfo.var_mtrr[i].mask);
  393.     };
  394.  
  395.     unsigned int cr0, cr3, cr4, eflags;
  396.  
  397.     eflags = safe_cli();
  398.  
  399.     /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
  400.     cr0 = read_cr0() | (1<<30);
  401.     write_cr0(cr0);
  402.     wbinvd();
  403.  
  404.     cr4 = read_cr4();
  405.     write_cr4(cr4 & ~(1<<7));
  406.  
  407.     cr3 = read_cr3();
  408.     write_cr3(cr3);
  409.  
  410.     /* Save MTRR state */
  411.     rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
  412.  
  413.     /* Disable MTRRs, and set the default type to uncached */
  414.     native_write_msr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
  415.     wbinvd();
  416.  
  417.     i = 0;
  418.     set_mtrr(i++,0,0x80000000>>12,MTRR_WB);
  419.     set_mtrr(i++,0x80000000>>12,0x40000000>>12,MTRR_WB);
  420.     set_mtrr(i++,0xC0000000>>12,0x20000000>>12,MTRR_WB);
  421.     set_mtrr(i++,0xdb800000>>12,0x00800000>>12,MTRR_UC);
  422.     set_mtrr(i++,0xdc000000>>12,0x04000000>>12,MTRR_UC);
  423.     set_mtrr(i++,0xE0000000>>12,0x10000000>>12,MTRR_WC);
  424.  
  425.     for(; i < cpuinfo.var_mtrr_count; i++)
  426.         set_mtrr(i,0,0,0);
  427.  
  428.     write_cr3(cr3);
  429.  
  430.     /* Intel (P6) standard MTRRs */
  431.     native_write_msr(MSR_MTRRdefType, deftype_lo, deftype_hi);
  432.  
  433.     /* Enable caches */
  434.     write_cr0(read_cr0() & ~(1<<30));
  435.  
  436.     /* Restore value of CR4 */
  437.     write_cr4(cr4);
  438.  
  439.     safe_sti(eflags);
  440.  
  441.     printf("\nnew MTRR map\n\n");
  442.  
  443.     for(i = 0; i < cpuinfo.var_mtrr_count; i++)
  444.     {
  445.         u64_t mtrr_base;
  446.         u64_t mtrr_mask;
  447.  
  448.         cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
  449.         cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
  450.  
  451.         printf("MTRR_%d base: %016llx mask: %016llx\n", i,
  452.                cpuinfo.var_mtrr[i].base,
  453.                cpuinfo.var_mtrr[i].mask);
  454.     };
  455. #endif
  456.  
  457.     tsc_khz = (unsigned int)(GetCpuFreq()/1000);
  458. }
  459.  
  460.  
  461. static atomic_t fence_context_counter = ATOMIC_INIT(0);
  462.  
  463. /**
  464.  * fence_context_alloc - allocate an array of fence contexts
  465.  * @num:        [in]    amount of contexts to allocate
  466.  *
  467.  * This function will return the first index of the number of fences allocated.
  468.  * The fence context is used for setting fence->context to a unique number.
  469.  */
  470. unsigned fence_context_alloc(unsigned num)
  471. {
  472.         BUG_ON(!num);
  473.         return atomic_add_return(num, &fence_context_counter) - num;
  474. }
  475. EXPORT_SYMBOL(fence_context_alloc);
  476.  
  477.  
  478. int fence_signal(struct fence *fence)
  479. {
  480.         unsigned long flags;
  481.  
  482.         if (!fence)
  483.                 return -EINVAL;
  484.  
  485. //        if (!ktime_to_ns(fence->timestamp)) {
  486. //                fence->timestamp = ktime_get();
  487. //                smp_mb__before_atomic();
  488. //        }
  489.  
  490.         if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  491.                 return -EINVAL;
  492.  
  493. //        trace_fence_signaled(fence);
  494.  
  495.         if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
  496.                 struct fence_cb *cur, *tmp;
  497.  
  498.                 spin_lock_irqsave(fence->lock, flags);
  499.                 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
  500.                         list_del_init(&cur->node);
  501.                         cur->func(fence, cur);
  502.                 }
  503.                 spin_unlock_irqrestore(fence->lock, flags);
  504.         }
  505.         return 0;
  506. }
  507. EXPORT_SYMBOL(fence_signal);
  508.  
  509. int fence_signal_locked(struct fence *fence)
  510. {
  511.         struct fence_cb *cur, *tmp;
  512.         int ret = 0;
  513.  
  514.         if (WARN_ON(!fence))
  515.                 return -EINVAL;
  516.  
  517. //        if (!ktime_to_ns(fence->timestamp)) {
  518. //                fence->timestamp = ktime_get();
  519. //                smp_mb__before_atomic();
  520. //        }
  521.  
  522.         if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
  523.                 ret = -EINVAL;
  524.  
  525.                 /*
  526.                  * we might have raced with the unlocked fence_signal,
  527.                  * still run through all callbacks
  528.                  */
  529.         }// else
  530. //                trace_fence_signaled(fence);
  531.  
  532.         list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
  533.                 list_del_init(&cur->node);
  534.                 cur->func(fence, cur);
  535.         }
  536.         return ret;
  537. }
  538. EXPORT_SYMBOL(fence_signal_locked);
  539.  
  540.  
  541. void fence_enable_sw_signaling(struct fence *fence)
  542. {
  543.         unsigned long flags;
  544.  
  545.         if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
  546.             !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
  547. //                trace_fence_enable_signal(fence);
  548.  
  549.                 spin_lock_irqsave(fence->lock, flags);
  550.  
  551.                 if (!fence->ops->enable_signaling(fence))
  552.                         fence_signal_locked(fence);
  553.  
  554.                 spin_unlock_irqrestore(fence->lock, flags);
  555.         }
  556. }
  557. EXPORT_SYMBOL(fence_enable_sw_signaling);
  558.  
  559.  
  560.  
  561. signed long
  562. fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
  563. {
  564.         signed long ret;
  565.  
  566.         if (WARN_ON(timeout < 0))
  567.                 return -EINVAL;
  568.  
  569. //        trace_fence_wait_start(fence);
  570.         ret = fence->ops->wait(fence, intr, timeout);
  571. //        trace_fence_wait_end(fence);
  572.         return ret;
  573. }
  574. EXPORT_SYMBOL(fence_wait_timeout);
  575.  
  576. void fence_release(struct kref *kref)
  577. {
  578.         struct fence *fence =
  579.                         container_of(kref, struct fence, refcount);
  580.  
  581. //        trace_fence_destroy(fence);
  582.  
  583.         BUG_ON(!list_empty(&fence->cb_list));
  584.  
  585.         if (fence->ops->release)
  586.                 fence->ops->release(fence);
  587.         else
  588.                 fence_free(fence);
  589. }
  590. EXPORT_SYMBOL(fence_release);
  591.  
  592. void fence_free(struct fence *fence)
  593. {
  594.         kfree_rcu(fence, rcu);
  595. }
  596. EXPORT_SYMBOL(fence_free);
  597.  
  598.  
  599. reservation_object_add_shared_inplace(struct reservation_object *obj,
  600.                                       struct reservation_object_list *fobj,
  601.                                       struct fence *fence)
  602. {
  603.         u32 i;
  604.  
  605.         fence_get(fence);
  606.  
  607. //        preempt_disable();
  608.         write_seqcount_begin(&obj->seq);
  609.  
  610.         for (i = 0; i < fobj->shared_count; ++i) {
  611.                 struct fence *old_fence;
  612.  
  613.                 old_fence = rcu_dereference_protected(fobj->shared[i],
  614.                                                 reservation_object_held(obj));
  615.  
  616.                 if (old_fence->context == fence->context) {
  617.                         /* memory barrier is added by write_seqcount_begin */
  618.                         RCU_INIT_POINTER(fobj->shared[i], fence);
  619.                         write_seqcount_end(&obj->seq);
  620.                         preempt_enable();
  621.  
  622.                         fence_put(old_fence);
  623.                         return;
  624.                 }
  625.         }
  626.  
  627.         /*
  628.          * memory barrier is added by write_seqcount_begin,
  629.          * fobj->shared_count is protected by this lock too
  630.          */
  631.         RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
  632.         fobj->shared_count++;
  633.  
  634.         write_seqcount_end(&obj->seq);
  635. //        preempt_enable();
  636. }
  637.  
  638.  
  639.  
  640. static void
  641. reservation_object_add_shared_replace(struct reservation_object *obj,
  642.                                       struct reservation_object_list *old,
  643.                                       struct reservation_object_list *fobj,
  644.                                       struct fence *fence)
  645. {
  646.         unsigned i;
  647.         struct fence *old_fence = NULL;
  648.  
  649.         fence_get(fence);
  650.  
  651.         if (!old) {
  652.                 RCU_INIT_POINTER(fobj->shared[0], fence);
  653.                 fobj->shared_count = 1;
  654.                 goto done;
  655.         }
  656.  
  657.         /*
  658.          * no need to bump fence refcounts, rcu_read access
  659.          * requires the use of kref_get_unless_zero, and the
  660.          * references from the old struct are carried over to
  661.          * the new.
  662.          */
  663.         fobj->shared_count = old->shared_count;
  664.  
  665.         for (i = 0; i < old->shared_count; ++i) {
  666.                 struct fence *check;
  667.  
  668.                 check = rcu_dereference_protected(old->shared[i],
  669.                                                 reservation_object_held(obj));
  670.  
  671.                 if (!old_fence && check->context == fence->context) {
  672.                         old_fence = check;
  673.                         RCU_INIT_POINTER(fobj->shared[i], fence);
  674.                 } else
  675.                         RCU_INIT_POINTER(fobj->shared[i], check);
  676.         }
  677.         if (!old_fence) {
  678.                 RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
  679.                 fobj->shared_count++;
  680.         }
  681.  
  682. done:
  683. //        preempt_disable();
  684.         write_seqcount_begin(&obj->seq);
  685.         /*
  686.          * RCU_INIT_POINTER can be used here,
  687.          * seqcount provides the necessary barriers
  688.          */
  689.         RCU_INIT_POINTER(obj->fence, fobj);
  690.         write_seqcount_end(&obj->seq);
  691. //        preempt_enable();
  692.  
  693.         if (old)
  694.                 kfree_rcu(old, rcu);
  695.  
  696.         if (old_fence)
  697.                 fence_put(old_fence);
  698. }
  699.  
  700.  
  701. int reservation_object_reserve_shared(struct reservation_object *obj)
  702. {
  703.         struct reservation_object_list *fobj, *old;
  704.         u32 max;
  705.  
  706.         old = reservation_object_get_list(obj);
  707.  
  708.         if (old && old->shared_max) {
  709.                 if (old->shared_count < old->shared_max) {
  710.                         /* perform an in-place update */
  711.                         kfree(obj->staged);
  712.                         obj->staged = NULL;
  713.                         return 0;
  714.                 } else
  715.                         max = old->shared_max * 2;
  716.         } else
  717.                 max = 4;
  718.  
  719.         /*
  720.          * resize obj->staged or allocate if it doesn't exist,
  721.          * noop if already correct size
  722.          */
  723.         fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
  724.                         GFP_KERNEL);
  725.         if (!fobj)
  726.                 return -ENOMEM;
  727.  
  728.         obj->staged = fobj;
  729.         fobj->shared_max = max;
  730.         return 0;
  731. }
  732. EXPORT_SYMBOL(reservation_object_reserve_shared);
  733.  
  734. void reservation_object_add_shared_fence(struct reservation_object *obj,
  735.                                          struct fence *fence)
  736. {
  737.         struct reservation_object_list *old, *fobj = obj->staged;
  738.  
  739.         old = reservation_object_get_list(obj);
  740.         obj->staged = NULL;
  741.  
  742.         if (!fobj) {
  743.                 BUG_ON(old->shared_count >= old->shared_max);
  744.                 reservation_object_add_shared_inplace(obj, old, fence);
  745.         } else
  746.                 reservation_object_add_shared_replace(obj, old, fobj, fence);
  747. }
  748. EXPORT_SYMBOL(reservation_object_add_shared_fence);
  749.  
  750.  
  751. void reservation_object_add_excl_fence(struct reservation_object *obj,
  752.                                        struct fence *fence)
  753. {
  754.         struct fence *old_fence = reservation_object_get_excl(obj);
  755.         struct reservation_object_list *old;
  756.         u32 i = 0;
  757.  
  758.         old = reservation_object_get_list(obj);
  759.         if (old)
  760.                 i = old->shared_count;
  761.  
  762.         if (fence)
  763.                 fence_get(fence);
  764.  
  765. //        preempt_disable();
  766.         write_seqcount_begin(&obj->seq);
  767.         /* write_seqcount_begin provides the necessary memory barrier */
  768.         RCU_INIT_POINTER(obj->fence_excl, fence);
  769.         if (old)
  770.                 old->shared_count = 0;
  771.         write_seqcount_end(&obj->seq);
  772. //        preempt_enable();
  773.  
  774.         /* inplace update, no shared fences */
  775.         while (i--)
  776.                 fence_put(rcu_dereference_protected(old->shared[i],
  777.                                                 reservation_object_held(obj)));
  778.  
  779.         if (old_fence)
  780.                 fence_put(old_fence);
  781. }
  782. EXPORT_SYMBOL(reservation_object_add_excl_fence);
  783.  
  784. void
  785. fence_init(struct fence *fence, const struct fence_ops *ops,
  786.              spinlock_t *lock, unsigned context, unsigned seqno)
  787. {
  788.         BUG_ON(!lock);
  789.         BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
  790.                !ops->get_driver_name || !ops->get_timeline_name);
  791.  
  792.         kref_init(&fence->refcount);
  793.         fence->ops = ops;
  794.         INIT_LIST_HEAD(&fence->cb_list);
  795.         fence->lock = lock;
  796.         fence->context = context;
  797.         fence->seqno = seqno;
  798.         fence->flags = 0UL;
  799.  
  800. //        trace_fence_init(fence);
  801. }
  802. EXPORT_SYMBOL(fence_init);
  803.  
  804.  
  805. #include <linux/rcupdate.h>
  806.  
  807. struct rcu_ctrlblk {
  808.         struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
  809.         struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
  810.         struct rcu_head **curtail;      /* ->next pointer of last CB. */
  811. //        RCU_TRACE(long qlen);           /* Number of pending CBs. */
  812. //        RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
  813. //        RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
  814. //        RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
  815. //        RCU_TRACE(const char *name);    /* Name of RCU type. */
  816. };
  817.  
  818. /* Definition for rcupdate control block. */
  819. static struct rcu_ctrlblk rcu_sched_ctrlblk = {
  820.         .donetail       = &rcu_sched_ctrlblk.rcucblist,
  821.         .curtail        = &rcu_sched_ctrlblk.rcucblist,
  822. //        RCU_TRACE(.name = "rcu_sched")
  823. };
  824.  
  825. static void __call_rcu(struct rcu_head *head,
  826.                        void (*func)(struct rcu_head *rcu),
  827.                        struct rcu_ctrlblk *rcp)
  828. {
  829.         unsigned long flags;
  830.  
  831. //        debug_rcu_head_queue(head);
  832.         head->func = func;
  833.         head->next = NULL;
  834.  
  835.         local_irq_save(flags);
  836.         *rcp->curtail = head;
  837.         rcp->curtail = &head->next;
  838. //        RCU_TRACE(rcp->qlen++);
  839.         local_irq_restore(flags);
  840. }
  841.  
  842. /*
  843.  * Post an RCU callback to be invoked after the end of an RCU-sched grace
  844.  * period.  But since we have but one CPU, that would be after any
  845.  * quiescent state.
  846.  */
  847. void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  848. {
  849.         __call_rcu(head, func, &rcu_sched_ctrlblk);
  850. }
  851.  
  852. fb_get_options(const char *name, char **option)
  853. {
  854.     return 1;
  855.  
  856. }
  857.  
  858. ktime_t ktime_get(void)
  859. {
  860.     ktime_t t;
  861.  
  862.     t.tv64 = GetClockNs();
  863.  
  864.     return t;
  865. }
  866.  
  867. void radeon_cursor_reset(struct drm_crtc *crtc)
  868. {
  869.  
  870. }
  871.  
  872. /* Greatest common divisor */
  873. unsigned long gcd(unsigned long a, unsigned long b)
  874. {
  875.         unsigned long r;
  876.  
  877.         if (a < b)
  878.                 swap(a, b);
  879.  
  880.         if (!b)
  881.                 return a;
  882.         while ((r = a % b) != 0) {
  883.                 a = b;
  884.                 b = r;
  885.         }
  886.         return b;
  887. }
  888.  
  889. void vfree(const void *addr)
  890. {
  891.     KernelFree(addr);
  892. }
  893.  
  894.