Subversion Repositories Kolibri OS

Rev

Rev 1631 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * DMA Pool allocator
  3.  *
  4.  * Copyright 2001 David Brownell
  5.  * Copyright 2007 Intel Corporation
  6.  *   Author: Matthew Wilcox <willy@linux.intel.com>
  7.  *
  8.  * This software may be redistributed and/or modified under the terms of
  9.  * the GNU General Public License ("GPL") version 2 as published by the
  10.  * Free Software Foundation.
  11.  *
  12.  * This allocator returns small blocks of a given size which are DMA-able by
  13.  * the given device.  It uses the dma_alloc_coherent page allocator to get
  14.  * new pages, then splits them up into blocks of the required size.
  15.  * Many older drivers still have their own code to do this.
  16.  *
  17.  * The current design of this allocator is fairly simple.  The pool is
  18.  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
  19.  * allocated pages.  Each page in the page_list is split into blocks of at
  20.  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
  21.  * list of free blocks within the page.  Used blocks aren't tracked, but we
  22.  * keep a count of how many are currently allocated from each page.
  23.  */
  24.  
  25.  
  26. #include <ddk.h>
  27. #include <linux/mutex.h>
  28. #include <syscall.h>
  29.  
  30.  
  31. struct dma_pool {       /* the pool */
  32.     struct list_head page_list;
  33.     struct mutex lock;
  34.     size_t size;
  35.     size_t allocation;
  36.     size_t boundary;
  37.     struct list_head pools;
  38. };
  39.  
  40. struct dma_page {       /* cacheable header for 'allocation' bytes */
  41.     struct list_head page_list;
  42.     void *vaddr;
  43.     dma_addr_t dma;
  44.     unsigned int in_use;
  45.     unsigned int offset;
  46. };
  47.  
  48.  
  49. static DEFINE_MUTEX(pools_lock);
  50.  
  51.  
  52. /**
  53.  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
  54.  * @name: name of pool, for diagnostics
  55.  * @dev: device that will be doing the DMA
  56.  * @size: size of the blocks in this pool.
  57.  * @align: alignment requirement for blocks; must be a power of two
  58.  * @boundary: returned blocks won't cross this power of two boundary
  59.  * Context: !in_interrupt()
  60.  *
  61.  * Returns a dma allocation pool with the requested characteristics, or
  62.  * null if one can't be created.  Given one of these pools, dma_pool_alloc()
  63.  * may be used to allocate memory.  Such memory will all have "consistent"
  64.  * DMA mappings, accessible by the device and its driver without using
  65.  * cache flushing primitives.  The actual size of blocks allocated may be
  66.  * larger than requested because of alignment.
  67.  *
  68.  * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
  69.  * cross that size boundary.  This is useful for devices which have
  70.  * addressing restrictions on individual DMA transfers, such as not crossing
  71.  * boundaries of 4KBytes.
  72.  */
  73. struct dma_pool *dma_pool_create(const char *name, struct device *dev,
  74.                  size_t size, size_t align, size_t boundary)
  75. {
  76.     struct dma_pool *retval;
  77.     size_t allocation;
  78.  
  79.     if (align == 0) {
  80.         align = 1;
  81.     } else if (align & (align - 1)) {
  82.         return NULL;
  83.     }
  84.  
  85.     if (size == 0) {
  86.         return NULL;
  87.     } else if (size < 4) {
  88.         size = 4;
  89.     }
  90.  
  91.     if ((size % align) != 0)
  92.         size = ALIGN(size, align);
  93.  
  94.     allocation = max_t(size_t, size, PAGE_SIZE);
  95.  
  96.     allocation = (allocation+0x7FFF) & ~0x7FFF;
  97.  
  98.     if (!boundary) {
  99.         boundary = allocation;
  100.     } else if ((boundary < size) || (boundary & (boundary - 1))) {
  101.         return NULL;
  102.     }
  103.  
  104.     retval = kmalloc(sizeof(*retval), GFP_KERNEL);
  105.  
  106.     if (!retval)
  107.         return retval;
  108.  
  109.     INIT_LIST_HEAD(&retval->page_list);
  110.  
  111. //    spin_lock_init(&retval->lock);
  112.  
  113.     retval->size = size;
  114.     retval->boundary = boundary;
  115.     retval->allocation = allocation;
  116.  
  117.     INIT_LIST_HEAD(&retval->pools);
  118.  
  119.     return retval;
  120. }
  121.  
  122. static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
  123. {
  124.     unsigned int offset = 0;
  125.     unsigned int next_boundary = pool->boundary;
  126.  
  127.     do {
  128.         unsigned int next = offset + pool->size;
  129.         if (unlikely((next + pool->size) >= next_boundary)) {
  130.             next = next_boundary;
  131.             next_boundary += pool->boundary;
  132.         }
  133.         *(int *)(page->vaddr + offset) = next;
  134.         offset = next;
  135.     } while (offset < pool->allocation);
  136. }
  137.  
  138.  
  139. static struct dma_page *pool_alloc_page(struct dma_pool *pool)
  140. {
  141.     struct dma_page *page;
  142.  
  143.     page = malloc(sizeof(*page));
  144.     if (!page)
  145.         return NULL;
  146.     page->vaddr = (void*)KernelAlloc(pool->allocation);
  147.  
  148.     dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr);
  149.  
  150.     if (page->vaddr)
  151.     {
  152.         page->dma = GetPgAddr(page->vaddr);
  153.  
  154.         dbgprintf("dma 0x%0x\n", page->dma);
  155.  
  156.         pool_initialise_page(pool, page);
  157.         list_add(&page->page_list, &pool->page_list);
  158.         page->in_use = 0;
  159.         page->offset = 0;
  160.     } else {
  161.         free(page);
  162.         page = NULL;
  163.     }
  164.     return page;
  165. }
  166.  
  167. static inline int is_page_busy(struct dma_page *page)
  168. {
  169.     return page->in_use != 0;
  170. }
  171.  
  172.  
  173. static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
  174. {
  175.     dma_addr_t dma = page->dma;
  176.  
  177.     KernelFree(page->vaddr);
  178.     list_del(&page->page_list);
  179.     free(page);
  180. }
  181.  
  182.  
  183. /**
  184.  * dma_pool_destroy - destroys a pool of dma memory blocks.
  185.  * @pool: dma pool that will be destroyed
  186.  * Context: !in_interrupt()
  187.  *
  188.  * Caller guarantees that no more memory from the pool is in use,
  189.  * and that nothing will try to use the pool after this call.
  190.  */
  191. void dma_pool_destroy(struct dma_pool *pool)
  192. {
  193.     mutex_lock(&pools_lock);
  194.     list_del(&pool->pools);
  195.     mutex_unlock(&pools_lock);
  196.  
  197.     while (!list_empty(&pool->page_list)) {
  198.         struct dma_page *page;
  199.         page = list_entry(pool->page_list.next,
  200.                   struct dma_page, page_list);
  201.         if (is_page_busy(page))
  202.         {
  203.             printk(KERN_ERR "dma_pool_destroy %p busy\n",
  204.                    page->vaddr);
  205.             /* leak the still-in-use consistent memory */
  206.             list_del(&page->page_list);
  207.             kfree(page);
  208.         } else
  209.             pool_free_page(pool, page);
  210.     }
  211.  
  212.     kfree(pool);
  213. }
  214.  
  215.  
  216. /**
  217.  * dma_pool_alloc - get a block of consistent memory
  218.  * @pool: dma pool that will produce the block
  219.  * @mem_flags: GFP_* bitmask
  220.  * @handle: pointer to dma address of block
  221.  *
  222.  * This returns the kernel virtual address of a currently unused block,
  223.  * and reports its dma address through the handle.
  224.  * If such a memory block can't be allocated, %NULL is returned.
  225.  */
  226. void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
  227.              dma_addr_t *handle)
  228. {
  229.     u32_t   efl;
  230.     struct  dma_page *page;
  231.     size_t  offset;
  232.     void   *retval;
  233.  
  234.     efl = safe_cli();
  235.  restart:
  236.     list_for_each_entry(page, &pool->page_list, page_list) {
  237.         if (page->offset < pool->allocation)
  238.             goto ready;
  239.     }
  240.     page = pool_alloc_page(pool);
  241.     if (!page)
  242.     {
  243.         retval = NULL;
  244.         goto done;
  245.     }
  246.  
  247.  ready:
  248.     page->in_use++;
  249.     offset = page->offset;
  250.     page->offset = *(int *)(page->vaddr + offset);
  251.     retval = offset + page->vaddr;
  252.     *handle = offset + page->dma;
  253.  done:
  254.     safe_sti(efl);
  255.     return retval;
  256. }
  257.  
  258.  
  259.  
  260. static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
  261. {
  262.     struct dma_page *page;
  263.     u32_t  efl;
  264.  
  265.     efl = safe_cli();
  266.  
  267.     list_for_each_entry(page, &pool->page_list, page_list) {
  268.         if (dma < page->dma)
  269.             continue;
  270.         if (dma < (page->dma + pool->allocation))
  271.             goto done;
  272.     }
  273.     page = NULL;
  274.  done:
  275.     safe_sti(efl);
  276.  
  277.     return page;
  278. }
  279.  
  280. /**
  281.  * dma_pool_free - put block back into dma pool
  282.  * @pool: the dma pool holding the block
  283.  * @vaddr: virtual address of block
  284.  * @dma: dma address of block
  285.  *
  286.  * Caller promises neither device nor driver will again touch this block
  287.  * unless it is first re-allocated.
  288.  */
  289. void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
  290. {
  291.     struct dma_page *page;
  292.     unsigned long flags;
  293.     unsigned int offset;
  294.  
  295.     u32_t efl;
  296.  
  297.     page = pool_find_page(pool, dma);
  298.     if (!page) {
  299.         printk(KERN_ERR "dma_pool_free %p/%lx (bad dma)\n",
  300.                vaddr, (unsigned long)dma);
  301.         return;
  302.     }
  303.  
  304.     offset = vaddr - page->vaddr;
  305.  
  306.     efl = safe_cli();
  307.     {
  308.         page->in_use--;
  309.         *(int *)vaddr = page->offset;
  310.         page->offset = offset;
  311.     /*
  312.      * Resist a temptation to do
  313.      *    if (!is_page_busy(page)) pool_free_page(pool, page);
  314.      * Better have a few empty pages hang around.
  315.      */
  316.     }safe_sti(efl);
  317. }
  318.  
  319.