Subversion Repositories Kolibri OS

Rev

Rev 6938 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #include <ddk.h>
  2. #include <linux/mm.h>
  3. #include <linux/scatterlist.h>
  4. #include <linux/dma-mapping.h>
  5. #include <drm/drmP.h>
  6. #include <drm/i915_drm.h>
  7. #include "i915_drv.h"
  8. #include "intel_drv.h"
  9. #include <linux/hdmi.h>
  10. #include <linux/seq_file.h>
  11. #include <linux/fence.h>
  12. #include "i915_kos32.h"
  13.  
  14. struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
  15. {
  16.     struct file *filep;
  17.     int count;
  18.  
  19.     filep = __builtin_malloc(sizeof(*filep));
  20.  
  21.     if(unlikely(filep == NULL))
  22.         return ERR_PTR(-ENOMEM);
  23.  
  24.     count = size / PAGE_SIZE;
  25.  
  26.     filep->pages = kzalloc(sizeof(struct page *) * count, 0);
  27.     if(unlikely(filep->pages == NULL))
  28.     {
  29.         kfree(filep);
  30.         return ERR_PTR(-ENOMEM);
  31.     };
  32.  
  33.     filep->count     = count;
  34.     filep->allocated = 0;
  35.     filep->vma       = NULL;
  36.  
  37. //    printf("%s file %p pages %p count %d\n",
  38. //              __FUNCTION__,filep, filep->pages, count);
  39.  
  40.     return filep;
  41. }
  42.  
  43. struct page *shmem_read_mapping_page_gfp(struct file *filep,
  44.                                          pgoff_t index, gfp_t gfp)
  45. {
  46.     struct page *page;
  47.  
  48.     if(unlikely(index >= filep->count))
  49.         return ERR_PTR(-EINVAL);
  50.  
  51.     page = filep->pages[index];
  52.  
  53.     if(unlikely(page == NULL))
  54.     {
  55.         page = (struct page *)AllocPage();
  56.  
  57.         if(unlikely(page == NULL))
  58.             return ERR_PTR(-ENOMEM);
  59.  
  60.         filep->pages[index] = page;
  61. //        printf("file %p index %d page %x\n", filep, index, page);
  62. //        delay(1);
  63.  
  64.     };
  65.  
  66.     return page;
  67. };
  68.  
  69. unsigned long vm_mmap(struct file *file, unsigned long addr,
  70.          unsigned long len, unsigned long prot,
  71.          unsigned long flag, unsigned long offset)
  72. {
  73.     char *mem, *ptr;
  74.     int i;
  75.  
  76.     if (unlikely(offset + PAGE_ALIGN(len) < offset))
  77.         return -EINVAL;
  78.     if (unlikely(offset & ~PAGE_MASK))
  79.         return -EINVAL;
  80.  
  81.     mem = UserAlloc(len);
  82.     if(unlikely(mem == NULL))
  83.         return -ENOMEM;
  84.  
  85.     for(i = offset, ptr = mem; i < offset+len; i+= 4096, ptr+= 4096)
  86.     {
  87.         struct page *page;
  88.  
  89.         page = shmem_read_mapping_page_gfp(file, i/PAGE_SIZE,0);
  90.  
  91.         if (unlikely(IS_ERR(page)))
  92.             goto err;
  93.  
  94.         MapPage(ptr, (addr_t)page, PG_SHARED|PG_UW);
  95.     }
  96.  
  97.     return (unsigned long)mem;
  98. err:
  99.     UserFree(mem);
  100.     return -ENOMEM;
  101. };
  102.  
  103. void shmem_file_delete(struct file *filep)
  104. {
  105. //    printf("%s file %p pages %p count %d\n",
  106. //            __FUNCTION__, filep, filep->pages, filep->count);
  107.  
  108.     if(filep->pages)
  109.         kfree(filep->pages);
  110. }
  111.  
  112.  
  113.  
  114. static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
  115. {
  116.         while (bytes) {
  117.                 if (*start != value)
  118.                         return (void *)start;
  119.                 start++;
  120.                 bytes--;
  121.         }
  122.         return NULL;
  123. }
  124.  
  125. /**
  126.  * memchr_inv - Find an unmatching character in an area of memory.
  127.  * @start: The memory area
  128.  * @c: Find a character other than c
  129.  * @bytes: The size of the area.
  130.  *
  131.  * returns the address of the first character other than @c, or %NULL
  132.  * if the whole buffer contains just @c.
  133.  */
  134. void *memchr_inv(const void *start, int c, size_t bytes)
  135. {
  136.         u8 value = c;
  137.         u64 value64;
  138.         unsigned int words, prefix;
  139.  
  140.         if (bytes <= 16)
  141.                 return check_bytes8(start, value, bytes);
  142.  
  143.         value64 = value;
  144. #if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
  145.         value64 *= 0x0101010101010101;
  146. #elif defined(ARCH_HAS_FAST_MULTIPLIER)
  147.         value64 *= 0x01010101;
  148.         value64 |= value64 << 32;
  149. #else
  150.         value64 |= value64 << 8;
  151.         value64 |= value64 << 16;
  152.         value64 |= value64 << 32;
  153. #endif
  154.  
  155.         prefix = (unsigned long)start % 8;
  156.         if (prefix) {
  157.                 u8 *r;
  158.  
  159.                 prefix = 8 - prefix;
  160.                 r = check_bytes8(start, value, prefix);
  161.                 if (r)
  162.                         return r;
  163.                 start += prefix;
  164.                 bytes -= prefix;
  165.         }
  166.  
  167.         words = bytes / 8;
  168.  
  169.         while (words) {
  170.                 if (*(u64 *)start != value64)
  171.                         return check_bytes8(start, value, 8);
  172.                 start += 8;
  173.                 words--;
  174.         }
  175.  
  176.         return check_bytes8(start, value, bytes % 8);
  177. }
  178.  
  179.  
  180. int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  181.            enum dma_data_direction direction)
  182. {
  183.     struct scatterlist *s;
  184.     int i;
  185.  
  186.     for_each_sg(sg, s, nents, i) {
  187.         s->dma_address = (dma_addr_t)sg_phys(s);
  188. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  189.         s->dma_length  = s->length;
  190. #endif
  191.     }
  192.  
  193.     return nents;
  194. }
  195.  
  196. void
  197. dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  198.              enum dma_data_direction direction)
  199. {
  200. };
  201.  
  202.  
  203. #define _U  0x01    /* upper */
  204. #define _L  0x02    /* lower */
  205. #define _D  0x04    /* digit */
  206. #define _C  0x08    /* cntrl */
  207. #define _P  0x10    /* punct */
  208. #define _S  0x20    /* white space (space/lf/tab) */
  209. #define _X  0x40    /* hex digit */
  210. #define _SP 0x80    /* hard space (0x20) */
  211.  
  212. extern const unsigned char _ctype[];
  213.  
  214. #define __ismask(x) (_ctype[(int)(unsigned char)(x)])
  215.  
  216. #define isalnum(c)  ((__ismask(c)&(_U|_L|_D)) != 0)
  217. #define isalpha(c)  ((__ismask(c)&(_U|_L)) != 0)
  218. #define iscntrl(c)  ((__ismask(c)&(_C)) != 0)
  219. #define isdigit(c)  ((__ismask(c)&(_D)) != 0)
  220. #define isgraph(c)  ((__ismask(c)&(_P|_U|_L|_D)) != 0)
  221. #define islower(c)  ((__ismask(c)&(_L)) != 0)
  222. #define isprint(c)  ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
  223. #define ispunct(c)  ((__ismask(c)&(_P)) != 0)
  224. /* Note: isspace() must return false for %NUL-terminator */
  225. #define isspace(c)  ((__ismask(c)&(_S)) != 0)
  226. #define isupper(c)  ((__ismask(c)&(_U)) != 0)
  227. #define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
  228.  
  229. #define isascii(c) (((unsigned char)(c))<=0x7f)
  230. #define toascii(c) (((unsigned char)(c))&0x7f)
  231.  
  232. static inline unsigned char __tolower(unsigned char c)
  233. {
  234.     if (isupper(c))
  235.         c -= 'A'-'a';
  236.     return c;
  237. }
  238.  
  239. static inline unsigned char __toupper(unsigned char c)
  240. {
  241.     if (islower(c))
  242.         c -= 'a'-'A';
  243.     return c;
  244. }
  245.  
  246. #define tolower(c) __tolower(c)
  247. #define toupper(c) __toupper(c)
  248.  
  249. /*
  250.  * Fast implementation of tolower() for internal usage. Do not use in your
  251.  * code.
  252.  */
  253. static inline char _tolower(const char c)
  254. {
  255.     return c | 0x20;
  256. }
  257.  
  258.  
  259. void *kmemdup(const void *src, size_t len, gfp_t gfp)
  260. {
  261.     void *p;
  262.  
  263.     p = kmalloc(len, gfp);
  264.     if (p)
  265.         memcpy(p, src, len);
  266.     return p;
  267. }
  268.  
  269.  
  270. void msleep(unsigned int msecs)
  271. {
  272.     msecs /= 10;
  273.     if(!msecs) msecs = 1;
  274.  
  275.      __asm__ __volatile__ (
  276.      "call *__imp__Delay"
  277.      ::"b" (msecs));
  278.      __asm__ __volatile__ (
  279.      "":::"ebx");
  280.  
  281. };
  282.  
  283.  
  284. /* simple loop based delay: */
  285. static void delay_loop(unsigned long loops)
  286. {
  287.         asm volatile(
  288.                 "       test %0,%0      \n"
  289.                 "       jz 3f           \n"
  290.                 "       jmp 1f          \n"
  291.  
  292.                 ".align 16              \n"
  293.                 "1:     jmp 2f          \n"
  294.  
  295.                 ".align 16              \n"
  296.                 "2:     dec %0          \n"
  297.                 "       jnz 2b          \n"
  298.                 "3:     dec %0          \n"
  299.  
  300.                 : /* we don't need output */
  301.                 :"a" (loops)
  302.         );
  303. }
  304.  
  305.  
  306. static void (*delay_fn)(unsigned long) = delay_loop;
  307.  
  308. void __delay(unsigned long loops)
  309. {
  310.         delay_fn(loops);
  311. }
  312.  
  313.  
  314. inline void __const_udelay(unsigned long xloops)
  315. {
  316.         int d0;
  317.  
  318.         xloops *= 4;
  319.         asm("mull %%edx"
  320.                 : "=d" (xloops), "=&a" (d0)
  321.                 : "1" (xloops), ""
  322.                 (loops_per_jiffy * (HZ/4)));
  323.  
  324.         __delay(++xloops);
  325. }
  326.  
  327. void __udelay(unsigned long usecs)
  328. {
  329.         __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
  330. }
  331.  
  332. unsigned int _sw_hweight32(unsigned int w)
  333. {
  334. #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
  335.         w -= (w >> 1) & 0x55555555;
  336.         w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
  337.         w =  (w + (w >> 4)) & 0x0f0f0f0f;
  338.         return (w * 0x01010101) >> 24;
  339. #else
  340.         unsigned int res = w - ((w >> 1) & 0x55555555);
  341.         res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
  342.         res = (res + (res >> 4)) & 0x0F0F0F0F;
  343.         res = res + (res >> 8);
  344.         return (res + (res >> 16)) & 0x000000FF;
  345. #endif
  346. }
  347. EXPORT_SYMBOL(_sw_hweight32);
  348.  
  349.  
  350. void usleep_range(unsigned long min, unsigned long max)
  351. {
  352.     udelay(max);
  353. }
  354. EXPORT_SYMBOL(usleep_range);
  355.  
  356.  
  357. static unsigned long round_jiffies_common(unsigned long j, int cpu,
  358.                 bool force_up)
  359. {
  360.         int rem;
  361.         unsigned long original = j;
  362.  
  363.         /*
  364.          * We don't want all cpus firing their timers at once hitting the
  365.          * same lock or cachelines, so we skew each extra cpu with an extra
  366.          * 3 jiffies. This 3 jiffies came originally from the mm/ code which
  367.          * already did this.
  368.          * The skew is done by adding 3*cpunr, then round, then subtract this
  369.          * extra offset again.
  370.          */
  371.         j += cpu * 3;
  372.  
  373.         rem = j % HZ;
  374.  
  375.         /*
  376.          * If the target jiffie is just after a whole second (which can happen
  377.          * due to delays of the timer irq, long irq off times etc etc) then
  378.          * we should round down to the whole second, not up. Use 1/4th second
  379.          * as cutoff for this rounding as an extreme upper bound for this.
  380.          * But never round down if @force_up is set.
  381.          */
  382.         if (rem < HZ/4 && !force_up) /* round down */
  383.                 j = j - rem;
  384.         else /* round up */
  385.                 j = j - rem + HZ;
  386.  
  387.         /* now that we have rounded, subtract the extra skew again */
  388.         j -= cpu * 3;
  389.  
  390.         /*
  391.          * Make sure j is still in the future. Otherwise return the
  392.          * unmodified value.
  393.          */
  394.         return time_is_after_jiffies(j) ? j : original;
  395. }
  396.  
  397.  
  398. unsigned long round_jiffies_up_relative(unsigned long j)
  399. {
  400.         unsigned long j0 = jiffies;
  401.  
  402.         /* Use j0 because jiffies might change while we run */
  403.         return round_jiffies_common(j + j0, 0, true) - j0;
  404. }
  405. EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
  406.  
  407.  
  408. #include <linux/rcupdate.h>
  409.  
  410. struct rcu_ctrlblk {
  411.         struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
  412.         struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
  413.         struct rcu_head **curtail;      /* ->next pointer of last CB. */
  414. //        RCU_TRACE(long qlen);           /* Number of pending CBs. */
  415. //        RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
  416. //        RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
  417. //        RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
  418. //        RCU_TRACE(const char *name);    /* Name of RCU type. */
  419. };
  420.  
  421. /* Definition for rcupdate control block. */
  422. static struct rcu_ctrlblk rcu_sched_ctrlblk = {
  423.         .donetail       = &rcu_sched_ctrlblk.rcucblist,
  424.         .curtail        = &rcu_sched_ctrlblk.rcucblist,
  425. //        RCU_TRACE(.name = "rcu_sched")
  426. };
  427.  
  428. static void __call_rcu(struct rcu_head *head,
  429.                        void (*func)(struct rcu_head *rcu),
  430.                        struct rcu_ctrlblk *rcp)
  431. {
  432.         unsigned long flags;
  433.  
  434. //        debug_rcu_head_queue(head);
  435.         head->func = func;
  436.         head->next = NULL;
  437.  
  438.         local_irq_save(flags);
  439.         *rcp->curtail = head;
  440.         rcp->curtail = &head->next;
  441. //        RCU_TRACE(rcp->qlen++);
  442.         local_irq_restore(flags);
  443. }
  444.  
  445. /*
  446.  * Post an RCU callback to be invoked after the end of an RCU-sched grace
  447.  * period.  But since we have but one CPU, that would be after any
  448.  * quiescent state.
  449.  */
  450. void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  451. {
  452.         __call_rcu(head, func, &rcu_sched_ctrlblk);
  453. }
  454.  
  455. int seq_puts(struct seq_file *m, const char *s)
  456. {
  457.     return 0;
  458. };
  459.  
  460. __printf(2, 3) int seq_printf(struct seq_file *m, const char *f, ...)
  461. {
  462.     return 0;
  463. }
  464.  
  465. ktime_t ktime_get(void)
  466. {
  467.     ktime_t t;
  468.  
  469.     t.tv64 = GetClockNs();
  470.  
  471.     return t;
  472. }
  473.  
  474. char *strdup(const char *str)
  475. {
  476.     size_t len = strlen(str) + 1;
  477.     char *copy = __builtin_malloc(len);
  478.     if (copy)
  479.     {
  480.         memcpy (copy, str, len);
  481.     }
  482.     return copy;
  483. }
  484.  
  485. int split_cmdline(char *cmdline, char **argv)
  486. {
  487.     enum quote_state
  488.     {
  489.         QUOTE_NONE,         /* no " active in current parm       */
  490.         QUOTE_DELIMITER,    /* " was first char and must be last */
  491.         QUOTE_STARTED       /* " was seen, look for a match      */
  492.     };
  493.  
  494.     enum quote_state state;
  495.     unsigned int argc;
  496.     char *p = cmdline;
  497.     char *new_arg, *start;
  498.  
  499.     argc = 0;
  500.  
  501.     for(;;)
  502.     {
  503.         /* skip over spaces and tabs */
  504.         if ( *p )
  505.         {
  506.             while (*p == ' ' || *p == '\t')
  507.                 ++p;
  508.         }
  509.  
  510.         if (*p == '\0')
  511.             break;
  512.  
  513.         state = QUOTE_NONE;
  514.         if( *p == '\"' )
  515.         {
  516.             p++;
  517.             state = QUOTE_DELIMITER;
  518.         }
  519.         new_arg = start = p;
  520.         for (;;)
  521.         {
  522.             if( *p == '\"' )
  523.             {
  524.                 p++;
  525.                 if( state == QUOTE_NONE )
  526.                 {
  527.                     state = QUOTE_STARTED;
  528.                 }
  529.                 else
  530.                 {
  531.                     state = QUOTE_NONE;
  532.                 }
  533.                 continue;
  534.             }
  535.  
  536.             if( *p == ' ' || *p == '\t' )
  537.             {
  538.                 if( state == QUOTE_NONE )
  539.                 {
  540.                     break;
  541.                 }
  542.             }
  543.  
  544.             if( *p == '\0' )
  545.                 break;
  546.  
  547.             if( *p == '\\' )
  548.             {
  549.                 if( p[1] == '\"' )
  550.                 {
  551.                     ++p;
  552.                     if( p[-2] == '\\' )
  553.                     {
  554.                         continue;
  555.                     }
  556.                 }
  557.             }
  558.             if( argv )
  559.             {
  560.                 *(new_arg++) = *p;
  561.             }
  562.             ++p;
  563.         };
  564.  
  565.         if( argv )
  566.         {
  567.             argv[ argc ] = start;
  568.             ++argc;
  569.  
  570.             /*
  571.               The *new = '\0' is req'd in case there was a \" to "
  572.               translation. It must be after the *p check against
  573.               '\0' because new and p could point to the same char
  574.               in which case the scan would be terminated too soon.
  575.             */
  576.  
  577.             if( *p == '\0' )
  578.             {
  579.                 *new_arg = '\0';
  580.                 break;
  581.             }
  582.             *new_arg = '\0';
  583.             ++p;
  584.         }
  585.         else
  586.         {
  587.             ++argc;
  588.             if( *p == '\0' )
  589.             {
  590.                 break;
  591.             }
  592.             ++p;
  593.         }
  594.     }
  595.  
  596.     return argc;
  597. };
  598.  
  599.  
  600. int fb_get_options(const char *name, char **option)
  601. {
  602.     char *opt, *options = NULL;
  603.     int retval = 1;
  604.     int name_len;
  605.  
  606.     if(i915.cmdline_mode == NULL)
  607.         return 1;
  608.  
  609.     name_len = __builtin_strlen(name);
  610.  
  611.     if (name_len )
  612.     {
  613.         opt = i915.cmdline_mode;
  614.         if (!__builtin_strncmp(name, opt, name_len) &&
  615.              opt[name_len] == ':')
  616.         {
  617.              options = opt + name_len + 1;
  618.              retval = 0;
  619.         }
  620.     }
  621.  
  622.     if (option)
  623.         *option = options;
  624.  
  625.     return retval;
  626. }
  627.  
  628. void *vmap(struct page **pages, unsigned int count,
  629.            unsigned long flags, pgprot_t prot)
  630. {
  631.     void *vaddr;
  632.     char *tmp;
  633.     int i;
  634.  
  635.     vaddr = AllocKernelSpace(count << 12);
  636.     if(vaddr == NULL)
  637.         return NULL;
  638.  
  639.     for(i = 0, tmp = vaddr; i < count; i++)
  640.     {
  641.         MapPage(tmp, page_to_phys(pages[i]), PG_SW);
  642.         tmp+= 4096;
  643.     };
  644.  
  645.     return vaddr;
  646. };
  647.  
  648. void vunmap(const void *addr)
  649. {
  650.     FreeKernelSpace((void*)addr);
  651. }
  652.  
  653. void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size)
  654. {
  655.     return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100);
  656. }
  657.  
  658. void __iomem *ioremap_wc(resource_size_t offset, unsigned long size)
  659. {
  660. //    return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_WRITEC|0x100);
  661.     return (void __iomem*) MapIoMem(offset, size, PG_SW|0x100);
  662. }
  663.  
  664. void iounmap(volatile void __iomem *addr)
  665. {
  666.     FreeKernelSpace((void*)addr);
  667. }
  668.  
  669. unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
  670. {
  671. //    if (access_ok(VERIFY_READ, from, n))
  672.         n = __copy_from_user(to, from, n);
  673. //    else
  674. //        memset(to, 0, n);
  675.     return n;
  676. }
  677.