Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 6084 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #include <ddk.h>
  2. #include <linux/mm.h>
  3. #include <drm/drmP.h>
  4. #include <drm/i915_drm.h>
  5. #include "i915_drv.h"
  6. #include "intel_drv.h"
  7. #include <linux/hdmi.h>
  8.  
  9.  
  10. struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
  11. {
  12.     struct file *filep;
  13.     int count;
  14.  
  15.     filep = __builtin_malloc(sizeof(*filep));
  16.  
  17.     if(unlikely(filep == NULL))
  18.         return ERR_PTR(-ENOMEM);
  19.  
  20.     count = size / PAGE_SIZE;
  21.  
  22.     filep->pages = kzalloc(sizeof(struct page *) * count, 0);
  23.     if(unlikely(filep->pages == NULL))
  24.     {
  25.         kfree(filep);
  26.         return ERR_PTR(-ENOMEM);
  27.     };
  28.  
  29.     filep->count     = count;
  30.     filep->allocated = 0;
  31.     filep->vma       = NULL;
  32.  
  33. //    printf("%s file %p pages %p count %d\n",
  34. //              __FUNCTION__,filep, filep->pages, count);
  35.  
  36.     return filep;
  37. }
  38.  
  39. struct page *shmem_read_mapping_page_gfp(struct file *filep,
  40.                                          pgoff_t index, gfp_t gfp)
  41. {
  42.     struct page *page;
  43.  
  44.     if(unlikely(index >= filep->count))
  45.         return ERR_PTR(-EINVAL);
  46.  
  47.     page = filep->pages[index];
  48.  
  49.     if(unlikely(page == NULL))
  50.     {
  51.         page = (struct page *)AllocPage();
  52.  
  53.         if(unlikely(page == NULL))
  54.             return ERR_PTR(-ENOMEM);
  55.  
  56.         filep->pages[index] = page;
  57. //        printf("file %p index %d page %x\n", filep, index, page);
  58. //        delay(1);
  59.  
  60.     };
  61.  
  62.     return page;
  63. };
  64.  
  65. unsigned long vm_mmap(struct file *file, unsigned long addr,
  66.          unsigned long len, unsigned long prot,
  67.          unsigned long flag, unsigned long offset)
  68. {
  69.     char *mem, *ptr;
  70.     int i;
  71.  
  72.     if (unlikely(offset + PAGE_ALIGN(len) < offset))
  73.         return -EINVAL;
  74.     if (unlikely(offset & ~PAGE_MASK))
  75.         return -EINVAL;
  76.  
  77.     mem = UserAlloc(len);
  78.     if(unlikely(mem == NULL))
  79.         return -ENOMEM;
  80.  
  81.     for(i = offset, ptr = mem; i < offset+len; i+= 4096, ptr+= 4096)
  82.     {
  83.         struct page *page;
  84.  
  85.         page = shmem_read_mapping_page_gfp(file, i/PAGE_SIZE,0);
  86.  
  87.         if (unlikely(IS_ERR(page)))
  88.             goto err;
  89.  
  90.         MapPage(ptr, (addr_t)page, PG_SHARED|PG_UW);
  91.     }
  92.  
  93.     return (unsigned long)mem;
  94. err:
  95.     UserFree(mem);
  96.     return -ENOMEM;
  97. };
  98.  
  99. void shmem_file_delete(struct file *filep)
  100. {
  101. //    printf("%s file %p pages %p count %d\n",
  102. //            __FUNCTION__, filep, filep->pages, filep->count);
  103.  
  104.     if(filep->pages)
  105.         kfree(filep->pages);
  106. }
  107.  
  108.  
  109.  
  110. static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
  111. {
  112.         while (bytes) {
  113.                 if (*start != value)
  114.                         return (void *)start;
  115.                 start++;
  116.                 bytes--;
  117.         }
  118.         return NULL;
  119. }
  120.  
  121. /**
  122.  * memchr_inv - Find an unmatching character in an area of memory.
  123.  * @start: The memory area
  124.  * @c: Find a character other than c
  125.  * @bytes: The size of the area.
  126.  *
  127.  * returns the address of the first character other than @c, or %NULL
  128.  * if the whole buffer contains just @c.
  129.  */
  130. void *memchr_inv(const void *start, int c, size_t bytes)
  131. {
  132.         u8 value = c;
  133.         u64 value64;
  134.         unsigned int words, prefix;
  135.  
  136.         if (bytes <= 16)
  137.                 return check_bytes8(start, value, bytes);
  138.  
  139.         value64 = value;
  140. #if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
  141.         value64 *= 0x0101010101010101;
  142. #elif defined(ARCH_HAS_FAST_MULTIPLIER)
  143.         value64 *= 0x01010101;
  144.         value64 |= value64 << 32;
  145. #else
  146.         value64 |= value64 << 8;
  147.         value64 |= value64 << 16;
  148.         value64 |= value64 << 32;
  149. #endif
  150.  
  151.         prefix = (unsigned long)start % 8;
  152.         if (prefix) {
  153.                 u8 *r;
  154.  
  155.                 prefix = 8 - prefix;
  156.                 r = check_bytes8(start, value, prefix);
  157.                 if (r)
  158.                         return r;
  159.                 start += prefix;
  160.                 bytes -= prefix;
  161.         }
  162.  
  163.         words = bytes / 8;
  164.  
  165.         while (words) {
  166.                 if (*(u64 *)start != value64)
  167.                         return check_bytes8(start, value, 8);
  168.                 start += 8;
  169.                 words--;
  170.         }
  171.  
  172.         return check_bytes8(start, value, bytes % 8);
  173. }
  174.  
  175.  
  176.  
  177. int dma_map_sg(struct device *dev, struct scatterlist *sglist,
  178.                            int nelems, int dir)
  179. {
  180.     struct scatterlist *s;
  181.     int i;
  182.  
  183.     for_each_sg(sglist, s, nelems, i) {
  184.         s->dma_address = (dma_addr_t)sg_phys(s);
  185. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  186.         s->dma_length  = s->length;
  187. #endif
  188.     }
  189.  
  190.     return nelems;
  191. }
  192.  
  193.  
  194.  
  195. #define _U  0x01    /* upper */
  196. #define _L  0x02    /* lower */
  197. #define _D  0x04    /* digit */
  198. #define _C  0x08    /* cntrl */
  199. #define _P  0x10    /* punct */
  200. #define _S  0x20    /* white space (space/lf/tab) */
  201. #define _X  0x40    /* hex digit */
  202. #define _SP 0x80    /* hard space (0x20) */
  203.  
  204. extern const unsigned char _ctype[];
  205.  
  206. #define __ismask(x) (_ctype[(int)(unsigned char)(x)])
  207.  
  208. #define isalnum(c)  ((__ismask(c)&(_U|_L|_D)) != 0)
  209. #define isalpha(c)  ((__ismask(c)&(_U|_L)) != 0)
  210. #define iscntrl(c)  ((__ismask(c)&(_C)) != 0)
  211. #define isdigit(c)  ((__ismask(c)&(_D)) != 0)
  212. #define isgraph(c)  ((__ismask(c)&(_P|_U|_L|_D)) != 0)
  213. #define islower(c)  ((__ismask(c)&(_L)) != 0)
  214. #define isprint(c)  ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
  215. #define ispunct(c)  ((__ismask(c)&(_P)) != 0)
  216. /* Note: isspace() must return false for %NUL-terminator */
  217. #define isspace(c)  ((__ismask(c)&(_S)) != 0)
  218. #define isupper(c)  ((__ismask(c)&(_U)) != 0)
  219. #define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
  220.  
  221. #define isascii(c) (((unsigned char)(c))<=0x7f)
  222. #define toascii(c) (((unsigned char)(c))&0x7f)
  223.  
  224. static inline unsigned char __tolower(unsigned char c)
  225. {
  226.     if (isupper(c))
  227.         c -= 'A'-'a';
  228.     return c;
  229. }
  230.  
  231. static inline unsigned char __toupper(unsigned char c)
  232. {
  233.     if (islower(c))
  234.         c -= 'a'-'A';
  235.     return c;
  236. }
  237.  
  238. #define tolower(c) __tolower(c)
  239. #define toupper(c) __toupper(c)
  240.  
  241. /*
  242.  * Fast implementation of tolower() for internal usage. Do not use in your
  243.  * code.
  244.  */
  245. static inline char _tolower(const char c)
  246. {
  247.     return c | 0x20;
  248. }
  249.  
  250.  
  251. //const char hex_asc[] = "0123456789abcdef";
  252.  
  253. /**
  254.  * hex_to_bin - convert a hex digit to its real value
  255.  * @ch: ascii character represents hex digit
  256.  *
  257.  * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
  258.  * input.
  259.  */
  260. int hex_to_bin(char ch)
  261. {
  262.     if ((ch >= '0') && (ch <= '9'))
  263.         return ch - '0';
  264.     ch = tolower(ch);
  265.     if ((ch >= 'a') && (ch <= 'f'))
  266.         return ch - 'a' + 10;
  267.     return -1;
  268. }
  269. EXPORT_SYMBOL(hex_to_bin);
  270.  
  271. /**
  272.  * hex2bin - convert an ascii hexadecimal string to its binary representation
  273.  * @dst: binary result
  274.  * @src: ascii hexadecimal string
  275.  * @count: result length
  276.  *
  277.  * Return 0 on success, -1 in case of bad input.
  278.  */
  279. int hex2bin(u8 *dst, const char *src, size_t count)
  280. {
  281.     while (count--) {
  282.         int hi = hex_to_bin(*src++);
  283.         int lo = hex_to_bin(*src++);
  284.  
  285.         if ((hi < 0) || (lo < 0))
  286.             return -1;
  287.  
  288.         *dst++ = (hi << 4) | lo;
  289.     }
  290.     return 0;
  291. }
  292. EXPORT_SYMBOL(hex2bin);
  293.  
  294. /**
  295.  * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
  296.  * @buf: data blob to dump
  297.  * @len: number of bytes in the @buf
  298.  * @rowsize: number of bytes to print per line; must be 16 or 32
  299.  * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
  300.  * @linebuf: where to put the converted data
  301.  * @linebuflen: total size of @linebuf, including space for terminating NUL
  302.  * @ascii: include ASCII after the hex output
  303.  *
  304.  * hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
  305.  * 16 or 32 bytes of input data converted to hex + ASCII output.
  306.  *
  307.  * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
  308.  * to a hex + ASCII dump at the supplied memory location.
  309.  * The converted output is always NUL-terminated.
  310.  *
  311.  * E.g.:
  312.  *   hex_dump_to_buffer(frame->data, frame->len, 16, 1,
  313.  *          linebuf, sizeof(linebuf), true);
  314.  *
  315.  * example output buffer:
  316.  * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f  @ABCDEFGHIJKLMNO
  317.  */
  318. void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
  319.             int groupsize, char *linebuf, size_t linebuflen,
  320.             bool ascii)
  321. {
  322.     const u8 *ptr = buf;
  323.     u8 ch;
  324.     int j, lx = 0;
  325.     int ascii_column;
  326.  
  327.     if (rowsize != 16 && rowsize != 32)
  328.         rowsize = 16;
  329.  
  330.     if (!len)
  331.         goto nil;
  332.     if (len > rowsize)      /* limit to one line at a time */
  333.         len = rowsize;
  334.     if ((len % groupsize) != 0) /* no mixed size output */
  335.         groupsize = 1;
  336.  
  337.     switch (groupsize) {
  338.     case 8: {
  339.         const u64 *ptr8 = buf;
  340.         int ngroups = len / groupsize;
  341.  
  342.         for (j = 0; j < ngroups; j++)
  343.             lx += scnprintf(linebuf + lx, linebuflen - lx,
  344.                     "%s%16.16llx", j ? " " : "",
  345.                     (unsigned long long)*(ptr8 + j));
  346.         ascii_column = 17 * ngroups + 2;
  347.         break;
  348.     }
  349.  
  350.     case 4: {
  351.         const u32 *ptr4 = buf;
  352.         int ngroups = len / groupsize;
  353.  
  354.         for (j = 0; j < ngroups; j++)
  355.             lx += scnprintf(linebuf + lx, linebuflen - lx,
  356.                     "%s%8.8x", j ? " " : "", *(ptr4 + j));
  357.         ascii_column = 9 * ngroups + 2;
  358.         break;
  359.     }
  360.  
  361.     case 2: {
  362.         const u16 *ptr2 = buf;
  363.         int ngroups = len / groupsize;
  364.  
  365.         for (j = 0; j < ngroups; j++)
  366.             lx += scnprintf(linebuf + lx, linebuflen - lx,
  367.                     "%s%4.4x", j ? " " : "", *(ptr2 + j));
  368.         ascii_column = 5 * ngroups + 2;
  369.         break;
  370.     }
  371.  
  372.     default:
  373.         for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
  374.             ch = ptr[j];
  375.             linebuf[lx++] = hex_asc_hi(ch);
  376.             linebuf[lx++] = hex_asc_lo(ch);
  377.             linebuf[lx++] = ' ';
  378.         }
  379.         if (j)
  380.             lx--;
  381.  
  382.         ascii_column = 3 * rowsize + 2;
  383.         break;
  384.     }
  385.     if (!ascii)
  386.         goto nil;
  387.  
  388.     while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
  389.         linebuf[lx++] = ' ';
  390.     for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
  391.         ch = ptr[j];
  392.         linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
  393.     }
  394. nil:
  395.     linebuf[lx++] = '\0';
  396. }
  397.  
  398. /**
  399.  * print_hex_dump - print a text hex dump to syslog for a binary blob of data
  400.  * @level: kernel log level (e.g. KERN_DEBUG)
  401.  * @prefix_str: string to prefix each line with;
  402.  *  caller supplies trailing spaces for alignment if desired
  403.  * @prefix_type: controls whether prefix of an offset, address, or none
  404.  *  is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
  405.  * @rowsize: number of bytes to print per line; must be 16 or 32
  406.  * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
  407.  * @buf: data blob to dump
  408.  * @len: number of bytes in the @buf
  409.  * @ascii: include ASCII after the hex output
  410.  *
  411.  * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
  412.  * to the kernel log at the specified kernel log level, with an optional
  413.  * leading prefix.
  414.  *
  415.  * print_hex_dump() works on one "line" of output at a time, i.e.,
  416.  * 16 or 32 bytes of input data converted to hex + ASCII output.
  417.  * print_hex_dump() iterates over the entire input @buf, breaking it into
  418.  * "line size" chunks to format and print.
  419.  *
  420.  * E.g.:
  421.  *   print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
  422.  *          16, 1, frame->data, frame->len, true);
  423.  *
  424.  * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
  425.  * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f  @ABCDEFGHIJKLMNO
  426.  * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
  427.  * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c  pqrstuvwxyz{|}~.
  428.  */
  429. void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
  430.             int rowsize, int groupsize,
  431.             const void *buf, size_t len, bool ascii)
  432. {
  433.     const u8 *ptr = buf;
  434.     int i, linelen, remaining = len;
  435.     unsigned char linebuf[32 * 3 + 2 + 32 + 1];
  436.  
  437.     if (rowsize != 16 && rowsize != 32)
  438.         rowsize = 16;
  439.  
  440.     for (i = 0; i < len; i += rowsize) {
  441.         linelen = min(remaining, rowsize);
  442.         remaining -= rowsize;
  443.  
  444.         hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
  445.                    linebuf, sizeof(linebuf), ascii);
  446.  
  447.         switch (prefix_type) {
  448.         case DUMP_PREFIX_ADDRESS:
  449.             printk("%s%s%p: %s\n",
  450.                    level, prefix_str, ptr + i, linebuf);
  451.             break;
  452.         case DUMP_PREFIX_OFFSET:
  453.             printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
  454.             break;
  455.         default:
  456.             printk("%s%s%s\n", level, prefix_str, linebuf);
  457.             break;
  458.         }
  459.     }
  460. }
  461.  
  462. void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
  463.                           const void *buf, size_t len)
  464. {
  465.     print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
  466.                        buf, len, true);
  467. }
  468.  
  469. void *kmemdup(const void *src, size_t len, gfp_t gfp)
  470. {
  471.     void *p;
  472.  
  473.     p = kmalloc(len, gfp);
  474.     if (p)
  475.         memcpy(p, src, len);
  476.     return p;
  477. }
  478.  
  479.  
  480. #define KMAP_MAX    256
  481.  
  482. static struct mutex kmap_mutex;
  483. static struct page* kmap_table[KMAP_MAX];
  484. static int kmap_av;
  485. static int kmap_first;
  486. static void* kmap_base;
  487.  
  488.  
  489. int kmap_init()
  490. {
  491.     kmap_base = AllocKernelSpace(KMAP_MAX*4096);
  492.     if(kmap_base == NULL)
  493.         return -1;
  494.  
  495.     kmap_av = KMAP_MAX;
  496.     MutexInit(&kmap_mutex);
  497.     return 0;
  498. };
  499.  
  500. void *kmap(struct page *page)
  501. {
  502.     void *vaddr = NULL;
  503.     int i;
  504.  
  505.     do
  506.     {
  507.         MutexLock(&kmap_mutex);
  508.         if(kmap_av != 0)
  509.         {
  510.             for(i = kmap_first; i < KMAP_MAX; i++)
  511.             {
  512.                 if(kmap_table[i] == NULL)
  513.                 {
  514.                     kmap_av--;
  515.                     kmap_first = i;
  516.                     kmap_table[i] = page;
  517.                     vaddr = kmap_base + (i<<12);
  518.                     MapPage(vaddr,(addr_t)page,3);
  519.                     break;
  520.                 };
  521.             };
  522.         };
  523.         MutexUnlock(&kmap_mutex);
  524.     }while(vaddr == NULL);
  525.  
  526.     return vaddr;
  527. };
  528.  
  529. void *kmap_atomic(struct page *page) __attribute__ ((alias ("kmap")));
  530.  
  531. void kunmap(struct page *page)
  532. {
  533.     void *vaddr;
  534.     int   i;
  535.  
  536.     MutexLock(&kmap_mutex);
  537.  
  538.     for(i = 0; i < KMAP_MAX; i++)
  539.     {
  540.         if(kmap_table[i] == page)
  541.         {
  542.             kmap_av++;
  543.             if(i < kmap_first)
  544.                 kmap_first = i;
  545.             kmap_table[i] = NULL;
  546.             vaddr = kmap_base + (i<<12);
  547.             MapPage(vaddr,0,0);
  548.             break;
  549.         };
  550.     };
  551.  
  552.     MutexUnlock(&kmap_mutex);
  553. };
  554.  
  555. void kunmap_atomic(void *vaddr)
  556. {
  557.     int i;
  558.  
  559.     MapPage(vaddr,0,0);
  560.  
  561.     i = (vaddr - kmap_base) >> 12;
  562.  
  563.     MutexLock(&kmap_mutex);
  564.  
  565.     kmap_av++;
  566.     if(i < kmap_first)
  567.         kmap_first = i;
  568.     kmap_table[i] = NULL;
  569.  
  570.     MutexUnlock(&kmap_mutex);
  571. }
  572.  
  573. size_t strlcat(char *dest, const char *src, size_t count)
  574. {
  575.         size_t dsize = strlen(dest);
  576.         size_t len = strlen(src);
  577.         size_t res = dsize + len;
  578.  
  579.         /* This would be a bug */
  580.         BUG_ON(dsize >= count);
  581.  
  582.         dest += dsize;
  583.         count -= dsize;
  584.         if (len >= count)
  585.                 len = count-1;
  586.         memcpy(dest, src, len);
  587.         dest[len] = 0;
  588.         return res;
  589. }
  590. EXPORT_SYMBOL(strlcat);
  591.  
  592. void msleep(unsigned int msecs)
  593. {
  594.     msecs /= 10;
  595.     if(!msecs) msecs = 1;
  596.  
  597.      __asm__ __volatile__ (
  598.      "call *__imp__Delay"
  599.      ::"b" (msecs));
  600.      __asm__ __volatile__ (
  601.      "":::"ebx");
  602.  
  603. };
  604.  
  605.  
  606. /* simple loop based delay: */
  607. static void delay_loop(unsigned long loops)
  608. {
  609.         asm volatile(
  610.                 "       test %0,%0      \n"
  611.                 "       jz 3f           \n"
  612.                 "       jmp 1f          \n"
  613.  
  614.                 ".align 16              \n"
  615.                 "1:     jmp 2f          \n"
  616.  
  617.                 ".align 16              \n"
  618.                 "2:     dec %0          \n"
  619.                 "       jnz 2b          \n"
  620.                 "3:     dec %0          \n"
  621.  
  622.                 : /* we don't need output */
  623.                 :"a" (loops)
  624.         );
  625. }
  626.  
  627.  
  628. static void (*delay_fn)(unsigned long) = delay_loop;
  629.  
  630. void __delay(unsigned long loops)
  631. {
  632.         delay_fn(loops);
  633. }
  634.  
  635.  
  636. inline void __const_udelay(unsigned long xloops)
  637. {
  638.         int d0;
  639.  
  640.         xloops *= 4;
  641.         asm("mull %%edx"
  642.                 : "=d" (xloops), "=&a" (d0)
  643.                 : "1" (xloops), ""
  644.                 (loops_per_jiffy * (HZ/4)));
  645.  
  646.         __delay(++xloops);
  647. }
  648.  
  649. void __udelay(unsigned long usecs)
  650. {
  651.         __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
  652. }
  653.  
  654. unsigned int _sw_hweight32(unsigned int w)
  655. {
  656. #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
  657.         w -= (w >> 1) & 0x55555555;
  658.         w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
  659.         w =  (w + (w >> 4)) & 0x0f0f0f0f;
  660.         return (w * 0x01010101) >> 24;
  661. #else
  662.         unsigned int res = w - ((w >> 1) & 0x55555555);
  663.         res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
  664.         res = (res + (res >> 4)) & 0x0F0F0F0F;
  665.         res = res + (res >> 8);
  666.         return (res + (res >> 16)) & 0x000000FF;
  667. #endif
  668. }
  669. EXPORT_SYMBOL(_sw_hweight32);
  670.  
  671.  
  672. void usleep_range(unsigned long min, unsigned long max)
  673. {
  674.     udelay(max);
  675. }
  676. EXPORT_SYMBOL(usleep_range);
  677.  
  678.  
  679. static unsigned long round_jiffies_common(unsigned long j, int cpu,
  680.                 bool force_up)
  681. {
  682.         int rem;
  683.         unsigned long original = j;
  684.  
  685.         /*
  686.          * We don't want all cpus firing their timers at once hitting the
  687.          * same lock or cachelines, so we skew each extra cpu with an extra
  688.          * 3 jiffies. This 3 jiffies came originally from the mm/ code which
  689.          * already did this.
  690.          * The skew is done by adding 3*cpunr, then round, then subtract this
  691.          * extra offset again.
  692.          */
  693.         j += cpu * 3;
  694.  
  695.         rem = j % HZ;
  696.  
  697.         /*
  698.          * If the target jiffie is just after a whole second (which can happen
  699.          * due to delays of the timer irq, long irq off times etc etc) then
  700.          * we should round down to the whole second, not up. Use 1/4th second
  701.          * as cutoff for this rounding as an extreme upper bound for this.
  702.          * But never round down if @force_up is set.
  703.          */
  704.         if (rem < HZ/4 && !force_up) /* round down */
  705.                 j = j - rem;
  706.         else /* round up */
  707.                 j = j - rem + HZ;
  708.  
  709.         /* now that we have rounded, subtract the extra skew again */
  710.         j -= cpu * 3;
  711.  
  712.         /*
  713.          * Make sure j is still in the future. Otherwise return the
  714.          * unmodified value.
  715.          */
  716.         return time_is_after_jiffies(j) ? j : original;
  717. }
  718.  
  719.  
  720. unsigned long round_jiffies_up_relative(unsigned long j, int cpu)
  721. {
  722.         unsigned long j0 = jiffies;
  723.  
  724.         /* Use j0 because jiffies might change while we run */
  725.         return round_jiffies_common(j + j0, 0, true) - j0;
  726. }
  727. EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
  728.  
  729.  
  730. #include <linux/rcupdate.h>
  731.  
  732. struct rcu_ctrlblk {
  733.         struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
  734.         struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
  735.         struct rcu_head **curtail;      /* ->next pointer of last CB. */
  736. //        RCU_TRACE(long qlen);           /* Number of pending CBs. */
  737. //        RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
  738. //        RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
  739. //        RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
  740. //        RCU_TRACE(const char *name);    /* Name of RCU type. */
  741. };
  742.  
  743. /* Definition for rcupdate control block. */
  744. static struct rcu_ctrlblk rcu_sched_ctrlblk = {
  745.         .donetail       = &rcu_sched_ctrlblk.rcucblist,
  746.         .curtail        = &rcu_sched_ctrlblk.rcucblist,
  747. //        RCU_TRACE(.name = "rcu_sched")
  748. };
  749.  
  750. static void __call_rcu(struct rcu_head *head,
  751.                        void (*func)(struct rcu_head *rcu),
  752.                        struct rcu_ctrlblk *rcp)
  753. {
  754.         unsigned long flags;
  755.  
  756. //        debug_rcu_head_queue(head);
  757.         head->func = func;
  758.         head->next = NULL;
  759.  
  760.         local_irq_save(flags);
  761.         *rcp->curtail = head;
  762.         rcp->curtail = &head->next;
  763. //        RCU_TRACE(rcp->qlen++);
  764.         local_irq_restore(flags);
  765. }
  766.  
  767. /*
  768.  * Post an RCU callback to be invoked after the end of an RCU-sched grace
  769.  * period.  But since we have but one CPU, that would be after any
  770.  * quiescent state.
  771.  */
  772. void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  773. {
  774.         __call_rcu(head, func, &rcu_sched_ctrlblk);
  775. }
  776.  
  777.  
  778.  
  779.