Subversion Repositories Kolibri OS

Rev

Rev 6321 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #include <ddk.h>
  2. #include <linux/mm.h>
  3. #include <drm/drmP.h>
  4. #include <linux/hdmi.h>
  5. #include "radeon.h"
  6.  
  7. int x86_clflush_size;
  8. unsigned int tsc_khz;
  9.  
  10. struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
  11. {
  12.     struct file *filep;
  13.     int count;
  14.  
  15.     filep = __builtin_malloc(sizeof(*filep));
  16.  
  17.     if(unlikely(filep == NULL))
  18.         return ERR_PTR(-ENOMEM);
  19.  
  20.     count = size / PAGE_SIZE;
  21.  
  22.     filep->pages = kzalloc(sizeof(struct page *) * count, 0);
  23.     if(unlikely(filep->pages == NULL))
  24.     {
  25.         kfree(filep);
  26.         return ERR_PTR(-ENOMEM);
  27.     };
  28.  
  29.     filep->count     = count;
  30.     filep->allocated = 0;
  31.     filep->vma       = NULL;
  32.  
  33. //    printf("%s file %p pages %p count %d\n",
  34. //              __FUNCTION__,filep, filep->pages, count);
  35.  
  36.     return filep;
  37. }
  38.  
  39. static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
  40. {
  41.         while (bytes) {
  42.                 if (*start != value)
  43.                         return (void *)start;
  44.                 start++;
  45.                 bytes--;
  46.         }
  47.         return NULL;
  48. }
  49.  
  50. /**
  51.  * memchr_inv - Find an unmatching character in an area of memory.
  52.  * @start: The memory area
  53.  * @c: Find a character other than c
  54.  * @bytes: The size of the area.
  55.  *
  56.  * returns the address of the first character other than @c, or %NULL
  57.  * if the whole buffer contains just @c.
  58.  */
  59. void *memchr_inv(const void *start, int c, size_t bytes)
  60. {
  61.         u8 value = c;
  62.         u64 value64;
  63.         unsigned int words, prefix;
  64.  
  65.         if (bytes <= 16)
  66.                 return check_bytes8(start, value, bytes);
  67.  
  68.         value64 = value;
  69. #if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
  70.         value64 *= 0x0101010101010101;
  71. #elif defined(ARCH_HAS_FAST_MULTIPLIER)
  72.         value64 *= 0x01010101;
  73.         value64 |= value64 << 32;
  74. #else
  75.         value64 |= value64 << 8;
  76.         value64 |= value64 << 16;
  77.         value64 |= value64 << 32;
  78. #endif
  79.  
  80.         prefix = (unsigned long)start % 8;
  81.         if (prefix) {
  82.                 u8 *r;
  83.  
  84.                 prefix = 8 - prefix;
  85.                 r = check_bytes8(start, value, prefix);
  86.                 if (r)
  87.                         return r;
  88.                 start += prefix;
  89.                 bytes -= prefix;
  90.         }
  91.  
  92.         words = bytes / 8;
  93.  
  94.         while (words) {
  95.                 if (*(u64 *)start != value64)
  96.                         return check_bytes8(start, value, 8);
  97.                 start += 8;
  98.                 words--;
  99.         }
  100.  
  101.         return check_bytes8(start, value, bytes % 8);
  102. }
  103.  
  104.  
  105.  
  106. #define _U  0x01    /* upper */
  107. #define _L  0x02    /* lower */
  108. #define _D  0x04    /* digit */
  109. #define _C  0x08    /* cntrl */
  110. #define _P  0x10    /* punct */
  111. #define _S  0x20    /* white space (space/lf/tab) */
  112. #define _X  0x40    /* hex digit */
  113. #define _SP 0x80    /* hard space (0x20) */
  114.  
  115. extern const unsigned char _ctype[];
  116.  
  117. #define __ismask(x) (_ctype[(int)(unsigned char)(x)])
  118.  
  119. #define isalnum(c)  ((__ismask(c)&(_U|_L|_D)) != 0)
  120. #define isalpha(c)  ((__ismask(c)&(_U|_L)) != 0)
  121. #define iscntrl(c)  ((__ismask(c)&(_C)) != 0)
  122. #define isdigit(c)  ((__ismask(c)&(_D)) != 0)
  123. #define isgraph(c)  ((__ismask(c)&(_P|_U|_L|_D)) != 0)
  124. #define islower(c)  ((__ismask(c)&(_L)) != 0)
  125. #define isprint(c)  ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
  126. #define ispunct(c)  ((__ismask(c)&(_P)) != 0)
  127. /* Note: isspace() must return false for %NUL-terminator */
  128. #define isspace(c)  ((__ismask(c)&(_S)) != 0)
  129. #define isupper(c)  ((__ismask(c)&(_U)) != 0)
  130. #define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
  131.  
  132. #define isascii(c) (((unsigned char)(c))<=0x7f)
  133. #define toascii(c) (((unsigned char)(c))&0x7f)
  134.  
  135. static inline unsigned char __tolower(unsigned char c)
  136. {
  137.     if (isupper(c))
  138.         c -= 'A'-'a';
  139.     return c;
  140. }
  141.  
  142. static inline unsigned char __toupper(unsigned char c)
  143. {
  144.     if (islower(c))
  145.         c -= 'a'-'A';
  146.     return c;
  147. }
  148.  
  149. #define tolower(c) __tolower(c)
  150. #define toupper(c) __toupper(c)
  151.  
  152. /*
  153.  * Fast implementation of tolower() for internal usage. Do not use in your
  154.  * code.
  155.  */
  156. static inline char _tolower(const char c)
  157. {
  158.     return c | 0x20;
  159. }
  160.  
  161.  
  162.  
  163. void msleep(unsigned int msecs)
  164. {
  165.     msecs /= 10;
  166.     if(!msecs) msecs = 1;
  167.  
  168.      __asm__ __volatile__ (
  169.      "call *__imp__Delay"
  170.      ::"b" (msecs));
  171.      __asm__ __volatile__ (
  172.      "":::"ebx");
  173.  
  174. };
  175.  
  176.  
  177. /* simple loop based delay: */
  178. static void delay_loop(unsigned long loops)
  179. {
  180.         asm volatile(
  181.                 "       test %0,%0      \n"
  182.                 "       jz 3f           \n"
  183.                 "       jmp 1f          \n"
  184.  
  185.                 ".align 16              \n"
  186.                 "1:     jmp 2f          \n"
  187.  
  188.                 ".align 16              \n"
  189.                 "2:     dec %0          \n"
  190.                 "       jnz 2b          \n"
  191.                 "3:     dec %0          \n"
  192.  
  193.                 : /* we don't need output */
  194.                 :"a" (loops)
  195.         );
  196. }
  197.  
  198.  
  199. static void (*delay_fn)(unsigned long) = delay_loop;
  200.  
  201. void __delay(unsigned long loops)
  202. {
  203.         delay_fn(loops);
  204. }
  205.  
  206.  
  207. inline void __const_udelay(unsigned long xloops)
  208. {
  209.         int d0;
  210.  
  211.         xloops *= 4;
  212.         asm("mull %%edx"
  213.                 : "=d" (xloops), "=&a" (d0)
  214.                 : "1" (xloops), ""
  215.                 (loops_per_jiffy * (HZ/4)));
  216.  
  217.         __delay(++xloops);
  218. }
  219.  
  220. void __udelay(unsigned long usecs)
  221. {
  222.         __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
  223. }
  224.  
  225. unsigned int _sw_hweight32(unsigned int w)
  226. {
  227. #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
  228.         w -= (w >> 1) & 0x55555555;
  229.         w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
  230.         w =  (w + (w >> 4)) & 0x0f0f0f0f;
  231.         return (w * 0x01010101) >> 24;
  232. #else
  233.         unsigned int res = w - ((w >> 1) & 0x55555555);
  234.         res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
  235.         res = (res + (res >> 4)) & 0x0F0F0F0F;
  236.         res = res + (res >> 8);
  237.         return (res + (res >> 16)) & 0x000000FF;
  238. #endif
  239. }
  240. EXPORT_SYMBOL(_sw_hweight32);
  241.  
  242.  
  243. void usleep_range(unsigned long min, unsigned long max)
  244. {
  245.     udelay(max);
  246. }
  247. EXPORT_SYMBOL(usleep_range);
  248.  
  249.  
  250. void *kmemdup(const void *src, size_t len, gfp_t gfp)
  251. {
  252.     void *p;
  253.  
  254.     p = kmalloc(len, gfp);
  255.     if (p)
  256.         memcpy(p, src, len);
  257.     return p;
  258. }
  259.  
  260. void cpu_detect1()
  261. {
  262.  
  263.     u32 junk, tfms, cap0, misc;
  264.     int i;
  265.  
  266.     cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
  267.  
  268.     if (cap0 & (1<<19))
  269.     {
  270.         x86_clflush_size = ((misc >> 8) & 0xff) * 8;
  271.     }
  272.  
  273. #if 0
  274.     cpuid(0x80000002, (unsigned int*)&cpuinfo.model_name[0], (unsigned int*)&cpuinfo.model_name[4],
  275.           (unsigned int*)&cpuinfo.model_name[8], (unsigned int*)&cpuinfo.model_name[12]);
  276.     cpuid(0x80000003, (unsigned int*)&cpuinfo.model_name[16], (unsigned int*)&cpuinfo.model_name[20],
  277.           (unsigned int*)&cpuinfo.model_name[24], (unsigned int*)&cpuinfo.model_name[28]);
  278.     cpuid(0x80000004, (unsigned int*)&cpuinfo.model_name[32], (unsigned int*)&cpuinfo.model_name[36],
  279.           (unsigned int*)&cpuinfo.model_name[40], (unsigned int*)&cpuinfo.model_name[44]);
  280.  
  281.     printf("\n%s\n\n",cpuinfo.model_name);
  282.  
  283.     cpuinfo.def_mtrr = read_msr(MSR_MTRRdefType);
  284.     cpuinfo.mtrr_cap = read_msr(IA32_MTRRCAP);
  285.  
  286.     printf("MSR_MTRRdefType %016llx\n\n", cpuinfo.def_mtrr);
  287.  
  288.     cpuinfo.var_mtrr_count = (u8_t)cpuinfo.mtrr_cap;
  289.  
  290.     for(i = 0; i < cpuinfo.var_mtrr_count; i++)
  291.     {
  292.         u64_t mtrr_base;
  293.         u64_t mtrr_mask;
  294.  
  295.         cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
  296.         cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
  297.  
  298.         printf("MTRR_%d base: %016llx mask: %016llx\n", i,
  299.                cpuinfo.var_mtrr[i].base,
  300.                cpuinfo.var_mtrr[i].mask);
  301.     };
  302.  
  303.     unsigned int cr0, cr3, cr4, eflags;
  304.  
  305.     eflags = safe_cli();
  306.  
  307.     /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
  308.     cr0 = read_cr0() | (1<<30);
  309.     write_cr0(cr0);
  310.     wbinvd();
  311.  
  312.     cr4 = read_cr4();
  313.     write_cr4(cr4 & ~(1<<7));
  314.  
  315.     cr3 = read_cr3();
  316.     write_cr3(cr3);
  317.  
  318.     /* Save MTRR state */
  319.     rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
  320.  
  321.     /* Disable MTRRs, and set the default type to uncached */
  322.     native_write_msr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
  323.     wbinvd();
  324.  
  325.     i = 0;
  326.     set_mtrr(i++,0,0x80000000>>12,MTRR_WB);
  327.     set_mtrr(i++,0x80000000>>12,0x40000000>>12,MTRR_WB);
  328.     set_mtrr(i++,0xC0000000>>12,0x20000000>>12,MTRR_WB);
  329.     set_mtrr(i++,0xdb800000>>12,0x00800000>>12,MTRR_UC);
  330.     set_mtrr(i++,0xdc000000>>12,0x04000000>>12,MTRR_UC);
  331.     set_mtrr(i++,0xE0000000>>12,0x10000000>>12,MTRR_WC);
  332.  
  333.     for(; i < cpuinfo.var_mtrr_count; i++)
  334.         set_mtrr(i,0,0,0);
  335.  
  336.     write_cr3(cr3);
  337.  
  338.     /* Intel (P6) standard MTRRs */
  339.     native_write_msr(MSR_MTRRdefType, deftype_lo, deftype_hi);
  340.  
  341.     /* Enable caches */
  342.     write_cr0(read_cr0() & ~(1<<30));
  343.  
  344.     /* Restore value of CR4 */
  345.     write_cr4(cr4);
  346.  
  347.     safe_sti(eflags);
  348.  
  349.     printf("\nnew MTRR map\n\n");
  350.  
  351.     for(i = 0; i < cpuinfo.var_mtrr_count; i++)
  352.     {
  353.         u64_t mtrr_base;
  354.         u64_t mtrr_mask;
  355.  
  356.         cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
  357.         cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
  358.  
  359.         printf("MTRR_%d base: %016llx mask: %016llx\n", i,
  360.                cpuinfo.var_mtrr[i].base,
  361.                cpuinfo.var_mtrr[i].mask);
  362.     };
  363. #endif
  364.  
  365.     tsc_khz = (unsigned int)(GetCpuFreq()/1000);
  366. }
  367.  
  368.  
  369. static atomic_t fence_context_counter = ATOMIC_INIT(0);
  370.  
  371. /**
  372.  * fence_context_alloc - allocate an array of fence contexts
  373.  * @num:        [in]    amount of contexts to allocate
  374.  *
  375.  * This function will return the first index of the number of fences allocated.
  376.  * The fence context is used for setting fence->context to a unique number.
  377.  */
  378. unsigned fence_context_alloc(unsigned num)
  379. {
  380.         BUG_ON(!num);
  381.         return atomic_add_return(num, &fence_context_counter) - num;
  382. }
  383. EXPORT_SYMBOL(fence_context_alloc);
  384.  
  385.  
  386. int fence_signal(struct fence *fence)
  387. {
  388.         unsigned long flags;
  389.  
  390.         if (!fence)
  391.                 return -EINVAL;
  392.  
  393. //        if (!ktime_to_ns(fence->timestamp)) {
  394. //                fence->timestamp = ktime_get();
  395. //                smp_mb__before_atomic();
  396. //        }
  397.  
  398.         if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  399.                 return -EINVAL;
  400.  
  401. //        trace_fence_signaled(fence);
  402.  
  403.         if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
  404.                 struct fence_cb *cur, *tmp;
  405.  
  406.                 spin_lock_irqsave(fence->lock, flags);
  407.                 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
  408.                         list_del_init(&cur->node);
  409.                         cur->func(fence, cur);
  410.                 }
  411.                 spin_unlock_irqrestore(fence->lock, flags);
  412.         }
  413.         return 0;
  414. }
  415. EXPORT_SYMBOL(fence_signal);
  416.  
  417. int fence_signal_locked(struct fence *fence)
  418. {
  419.         struct fence_cb *cur, *tmp;
  420.         int ret = 0;
  421.  
  422.         if (WARN_ON(!fence))
  423.                 return -EINVAL;
  424.  
  425. //        if (!ktime_to_ns(fence->timestamp)) {
  426. //                fence->timestamp = ktime_get();
  427. //                smp_mb__before_atomic();
  428. //        }
  429.  
  430.         if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
  431.                 ret = -EINVAL;
  432.  
  433.                 /*
  434.                  * we might have raced with the unlocked fence_signal,
  435.                  * still run through all callbacks
  436.                  */
  437.         }// else
  438. //                trace_fence_signaled(fence);
  439.  
  440.         list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
  441.                 list_del_init(&cur->node);
  442.                 cur->func(fence, cur);
  443.         }
  444.         return ret;
  445. }
  446. EXPORT_SYMBOL(fence_signal_locked);
  447.  
  448.  
  449. void fence_enable_sw_signaling(struct fence *fence)
  450. {
  451.         unsigned long flags;
  452.  
  453.         if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
  454.             !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
  455. //                trace_fence_enable_signal(fence);
  456.  
  457.                 spin_lock_irqsave(fence->lock, flags);
  458.  
  459.                 if (!fence->ops->enable_signaling(fence))
  460.                         fence_signal_locked(fence);
  461.  
  462.                 spin_unlock_irqrestore(fence->lock, flags);
  463.         }
  464. }
  465. EXPORT_SYMBOL(fence_enable_sw_signaling);
  466.  
  467.  
  468.  
  469. signed long
  470. fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
  471. {
  472.         signed long ret;
  473.  
  474.         if (WARN_ON(timeout < 0))
  475.                 return -EINVAL;
  476.  
  477. //        trace_fence_wait_start(fence);
  478.         ret = fence->ops->wait(fence, intr, timeout);
  479. //        trace_fence_wait_end(fence);
  480.         return ret;
  481. }
  482. EXPORT_SYMBOL(fence_wait_timeout);
  483.  
  484. void fence_release(struct kref *kref)
  485. {
  486.         struct fence *fence =
  487.                         container_of(kref, struct fence, refcount);
  488.  
  489. //        trace_fence_destroy(fence);
  490.  
  491.         BUG_ON(!list_empty(&fence->cb_list));
  492.  
  493.         if (fence->ops->release)
  494.                 fence->ops->release(fence);
  495.         else
  496.                 fence_free(fence);
  497. }
  498. EXPORT_SYMBOL(fence_release);
  499.  
  500. void fence_free(struct fence *fence)
  501. {
  502.         kfree_rcu(fence, rcu);
  503. }
  504. EXPORT_SYMBOL(fence_free);
  505.  
  506.  
  507. reservation_object_add_shared_inplace(struct reservation_object *obj,
  508.                                       struct reservation_object_list *fobj,
  509.                                       struct fence *fence)
  510. {
  511.         u32 i;
  512.  
  513.         fence_get(fence);
  514.  
  515. //        preempt_disable();
  516.         write_seqcount_begin(&obj->seq);
  517.  
  518.         for (i = 0; i < fobj->shared_count; ++i) {
  519.                 struct fence *old_fence;
  520.  
  521.                 old_fence = rcu_dereference_protected(fobj->shared[i],
  522.                                                 reservation_object_held(obj));
  523.  
  524.                 if (old_fence->context == fence->context) {
  525.                         /* memory barrier is added by write_seqcount_begin */
  526.                         RCU_INIT_POINTER(fobj->shared[i], fence);
  527.                         write_seqcount_end(&obj->seq);
  528.                         preempt_enable();
  529.  
  530.                         fence_put(old_fence);
  531.                         return;
  532.                 }
  533.         }
  534.  
  535.         /*
  536.          * memory barrier is added by write_seqcount_begin,
  537.          * fobj->shared_count is protected by this lock too
  538.          */
  539.         RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
  540.         fobj->shared_count++;
  541.  
  542.         write_seqcount_end(&obj->seq);
  543. //        preempt_enable();
  544. }
  545.  
  546.  
  547.  
  548. static void
  549. reservation_object_add_shared_replace(struct reservation_object *obj,
  550.                                       struct reservation_object_list *old,
  551.                                       struct reservation_object_list *fobj,
  552.                                       struct fence *fence)
  553. {
  554.         unsigned i;
  555.         struct fence *old_fence = NULL;
  556.  
  557.         fence_get(fence);
  558.  
  559.         if (!old) {
  560.                 RCU_INIT_POINTER(fobj->shared[0], fence);
  561.                 fobj->shared_count = 1;
  562.                 goto done;
  563.         }
  564.  
  565.         /*
  566.          * no need to bump fence refcounts, rcu_read access
  567.          * requires the use of kref_get_unless_zero, and the
  568.          * references from the old struct are carried over to
  569.          * the new.
  570.          */
  571.         fobj->shared_count = old->shared_count;
  572.  
  573.         for (i = 0; i < old->shared_count; ++i) {
  574.                 struct fence *check;
  575.  
  576.                 check = rcu_dereference_protected(old->shared[i],
  577.                                                 reservation_object_held(obj));
  578.  
  579.                 if (!old_fence && check->context == fence->context) {
  580.                         old_fence = check;
  581.                         RCU_INIT_POINTER(fobj->shared[i], fence);
  582.                 } else
  583.                         RCU_INIT_POINTER(fobj->shared[i], check);
  584.         }
  585.         if (!old_fence) {
  586.                 RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
  587.                 fobj->shared_count++;
  588.         }
  589.  
  590. done:
  591. //        preempt_disable();
  592.         write_seqcount_begin(&obj->seq);
  593.         /*
  594.          * RCU_INIT_POINTER can be used here,
  595.          * seqcount provides the necessary barriers
  596.          */
  597.         RCU_INIT_POINTER(obj->fence, fobj);
  598.         write_seqcount_end(&obj->seq);
  599. //        preempt_enable();
  600.  
  601.         if (old)
  602.                 kfree_rcu(old, rcu);
  603.  
  604.         if (old_fence)
  605.                 fence_put(old_fence);
  606. }
  607.  
  608.  
  609. int reservation_object_reserve_shared(struct reservation_object *obj)
  610. {
  611.         struct reservation_object_list *fobj, *old;
  612.         u32 max;
  613.  
  614.         old = reservation_object_get_list(obj);
  615.  
  616.         if (old && old->shared_max) {
  617.                 if (old->shared_count < old->shared_max) {
  618.                         /* perform an in-place update */
  619.                         kfree(obj->staged);
  620.                         obj->staged = NULL;
  621.                         return 0;
  622.                 } else
  623.                         max = old->shared_max * 2;
  624.         } else
  625.                 max = 4;
  626.  
  627.         /*
  628.          * resize obj->staged or allocate if it doesn't exist,
  629.          * noop if already correct size
  630.          */
  631.         fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
  632.                         GFP_KERNEL);
  633.         if (!fobj)
  634.                 return -ENOMEM;
  635.  
  636.         obj->staged = fobj;
  637.         fobj->shared_max = max;
  638.         return 0;
  639. }
  640. EXPORT_SYMBOL(reservation_object_reserve_shared);
  641.  
  642. void reservation_object_add_shared_fence(struct reservation_object *obj,
  643.                                          struct fence *fence)
  644. {
  645.         struct reservation_object_list *old, *fobj = obj->staged;
  646.  
  647.         old = reservation_object_get_list(obj);
  648.         obj->staged = NULL;
  649.  
  650.         if (!fobj) {
  651.                 BUG_ON(old->shared_count >= old->shared_max);
  652.                 reservation_object_add_shared_inplace(obj, old, fence);
  653.         } else
  654.                 reservation_object_add_shared_replace(obj, old, fobj, fence);
  655. }
  656. EXPORT_SYMBOL(reservation_object_add_shared_fence);
  657.  
  658.  
  659. void reservation_object_add_excl_fence(struct reservation_object *obj,
  660.                                        struct fence *fence)
  661. {
  662.         struct fence *old_fence = reservation_object_get_excl(obj);
  663.         struct reservation_object_list *old;
  664.         u32 i = 0;
  665.  
  666.         old = reservation_object_get_list(obj);
  667.         if (old)
  668.                 i = old->shared_count;
  669.  
  670.         if (fence)
  671.                 fence_get(fence);
  672.  
  673. //        preempt_disable();
  674.         write_seqcount_begin(&obj->seq);
  675.         /* write_seqcount_begin provides the necessary memory barrier */
  676.         RCU_INIT_POINTER(obj->fence_excl, fence);
  677.         if (old)
  678.                 old->shared_count = 0;
  679.         write_seqcount_end(&obj->seq);
  680. //        preempt_enable();
  681.  
  682.         /* inplace update, no shared fences */
  683.         while (i--)
  684.                 fence_put(rcu_dereference_protected(old->shared[i],
  685.                                                 reservation_object_held(obj)));
  686.  
  687.         if (old_fence)
  688.                 fence_put(old_fence);
  689. }
  690. EXPORT_SYMBOL(reservation_object_add_excl_fence);
  691.  
  692. void
  693. fence_init(struct fence *fence, const struct fence_ops *ops,
  694.              spinlock_t *lock, unsigned context, unsigned seqno)
  695. {
  696.         BUG_ON(!lock);
  697.         BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
  698.                !ops->get_driver_name || !ops->get_timeline_name);
  699.  
  700.         kref_init(&fence->refcount);
  701.         fence->ops = ops;
  702.         INIT_LIST_HEAD(&fence->cb_list);
  703.         fence->lock = lock;
  704.         fence->context = context;
  705.         fence->seqno = seqno;
  706.         fence->flags = 0UL;
  707.  
  708. //        trace_fence_init(fence);
  709. }
  710. EXPORT_SYMBOL(fence_init);
  711.  
  712.  
  713. #include <linux/rcupdate.h>
  714.  
  715. struct rcu_ctrlblk {
  716.         struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
  717.         struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
  718.         struct rcu_head **curtail;      /* ->next pointer of last CB. */
  719. //        RCU_TRACE(long qlen);           /* Number of pending CBs. */
  720. //        RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
  721. //        RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
  722. //        RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
  723. //        RCU_TRACE(const char *name);    /* Name of RCU type. */
  724. };
  725.  
  726. /* Definition for rcupdate control block. */
  727. static struct rcu_ctrlblk rcu_sched_ctrlblk = {
  728.         .donetail       = &rcu_sched_ctrlblk.rcucblist,
  729.         .curtail        = &rcu_sched_ctrlblk.rcucblist,
  730. //        RCU_TRACE(.name = "rcu_sched")
  731. };
  732.  
  733. static void __call_rcu(struct rcu_head *head,
  734.                        void (*func)(struct rcu_head *rcu),
  735.                        struct rcu_ctrlblk *rcp)
  736. {
  737.         unsigned long flags;
  738.  
  739. //        debug_rcu_head_queue(head);
  740.         head->func = func;
  741.         head->next = NULL;
  742.  
  743.         local_irq_save(flags);
  744.         *rcp->curtail = head;
  745.         rcp->curtail = &head->next;
  746. //        RCU_TRACE(rcp->qlen++);
  747.         local_irq_restore(flags);
  748. }
  749.  
  750. /*
  751.  * Post an RCU callback to be invoked after the end of an RCU-sched grace
  752.  * period.  But since we have but one CPU, that would be after any
  753.  * quiescent state.
  754.  */
  755. void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  756. {
  757.         __call_rcu(head, func, &rcu_sched_ctrlblk);
  758. }
  759.  
  760. fb_get_options(const char *name, char **option)
  761. {
  762.     return 1;
  763.  
  764. }
  765.  
  766. ktime_t ktime_get(void)
  767. {
  768.     ktime_t t;
  769.  
  770.     t.tv64 = GetClockNs();
  771.  
  772.     return t;
  773. }
  774.  
  775. void radeon_cursor_reset(struct drm_crtc *crtc)
  776. {
  777.  
  778. }
  779.  
  780. /* Greatest common divisor */
  781. unsigned long gcd(unsigned long a, unsigned long b)
  782. {
  783.         unsigned long r;
  784.  
  785.         if (a < b)
  786.                 swap(a, b);
  787.  
  788.         if (!b)
  789.                 return a;
  790.         while ((r = a % b) != 0) {
  791.                 a = b;
  792.                 b = r;
  793.         }
  794.         return b;
  795. }
  796.  
  797. void vfree(const void *addr)
  798. {
  799.     KernelFree(addr);
  800. }
  801.  
  802.  
  803. int set_memory_uc(unsigned long addr, int numpages)
  804. {
  805.     return 0;
  806. };
  807.  
  808. int set_memory_wb(unsigned long addr, int numpages)
  809. {
  810.     return 0;
  811. };
  812.