Subversion Repositories Kolibri OS

Rev

Rev 6295 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * DMA Pool allocator
  3.  *
  4.  * Copyright 2001 David Brownell
  5.  * Copyright 2007 Intel Corporation
  6.  *   Author: Matthew Wilcox <willy@linux.intel.com>
  7.  *
  8.  * This software may be redistributed and/or modified under the terms of
  9.  * the GNU General Public License ("GPL") version 2 as published by the
  10.  * Free Software Foundation.
  11.  *
  12.  * This allocator returns small blocks of a given size which are DMA-able by
  13.  * the given device.  It uses the dma_alloc_coherent page allocator to get
  14.  * new pages, then splits them up into blocks of the required size.
  15.  * Many older drivers still have their own code to do this.
  16.  *
  17.  * The current design of this allocator is fairly simple.  The pool is
  18.  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
  19.  * allocated pages.  Each page in the page_list is split into blocks of at
  20.  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
  21.  * list of free blocks within the page.  Used blocks aren't tracked, but we
  22.  * keep a count of how many are currently allocated from each page.
  23.  */
  24.  
  25. #include <linux/device.h>
  26. #include <linux/dmapool.h>
  27. #include <linux/kernel.h>
  28. #include <linux/list.h>
  29. #include <linux/export.h>
  30. #include <linux/mutex.h>
  31.  
  32. #include <linux/slab.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/string.h>
  35. #include <linux/types.h>
  36. #include <linux/wait.h>
  37.  
  38. #include <linux/mutex.h>
  39. #include <syscall.h>
  40.  
  41.  
  42. struct dma_pool {       /* the pool */
  43.     struct list_head page_list;
  44.         spinlock_t lock;
  45.    size_t size;
  46.         struct device *dev;
  47.     size_t allocation;
  48.     size_t boundary;
  49.         char name[32];
  50.     struct list_head pools;
  51. };
  52.  
  53. struct dma_page {       /* cacheable header for 'allocation' bytes */
  54.     struct list_head page_list;
  55.     void *vaddr;
  56.     dma_addr_t dma;
  57.     unsigned int in_use;
  58.     unsigned int offset;
  59. };
  60.  
  61. static DEFINE_MUTEX(pools_lock);
  62. static DEFINE_MUTEX(pools_reg_lock);
  63.  
  64.  
  65.  
  66.  
  67. /**
  68.  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
  69.  * @name: name of pool, for diagnostics
  70.  * @dev: device that will be doing the DMA
  71.  * @size: size of the blocks in this pool.
  72.  * @align: alignment requirement for blocks; must be a power of two
  73.  * @boundary: returned blocks won't cross this power of two boundary
  74.  * Context: !in_interrupt()
  75.  *
  76.  * Returns a dma allocation pool with the requested characteristics, or
  77.  * null if one can't be created.  Given one of these pools, dma_pool_alloc()
  78.  * may be used to allocate memory.  Such memory will all have "consistent"
  79.  * DMA mappings, accessible by the device and its driver without using
  80.  * cache flushing primitives.  The actual size of blocks allocated may be
  81.  * larger than requested because of alignment.
  82.  *
  83.  * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
  84.  * cross that size boundary.  This is useful for devices which have
  85.  * addressing restrictions on individual DMA transfers, such as not crossing
  86.  * boundaries of 4KBytes.
  87.  */
  88. struct dma_pool *dma_pool_create(const char *name, struct device *dev,
  89.                                  size_t size, size_t align, size_t boundary)
  90. {
  91.         struct dma_pool *retval;
  92.         size_t allocation;
  93.         bool empty = false;
  94.  
  95.         if (align == 0)
  96.                 align = 1;
  97.         else if (align & (align - 1))
  98.                 return NULL;
  99.  
  100.         if (size == 0)
  101.                 return NULL;
  102.         else if (size < 4)
  103.                 size = 4;
  104.  
  105.         if ((size % align) != 0)
  106.                 size = ALIGN(size, align);
  107.  
  108.         allocation = max_t(size_t, size, PAGE_SIZE);
  109.  
  110.     allocation = (allocation+0x7FFF) & ~0x7FFF;
  111.  
  112.         if (!boundary)
  113.                 boundary = allocation;
  114.         else if ((boundary < size) || (boundary & (boundary - 1)))
  115.                 return NULL;
  116.  
  117.     retval = kmalloc(sizeof(*retval), GFP_KERNEL);
  118.  
  119.     if (!retval)
  120.         return retval;
  121.  
  122.         strlcpy(retval->name, name, sizeof(retval->name));
  123.  
  124.         retval->dev = dev;
  125.  
  126.         INIT_LIST_HEAD(&retval->page_list);
  127.         spin_lock_init(&retval->lock);
  128.     retval->size = size;
  129.     retval->boundary = boundary;
  130.     retval->allocation = allocation;
  131.  
  132.     INIT_LIST_HEAD(&retval->pools);
  133.  
  134.     return retval;
  135. }
  136.  
  137. static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
  138. {
  139.     unsigned int offset = 0;
  140.     unsigned int next_boundary = pool->boundary;
  141.  
  142.     do {
  143.         unsigned int next = offset + pool->size;
  144.         if (unlikely((next + pool->size) >= next_boundary)) {
  145.             next = next_boundary;
  146.             next_boundary += pool->boundary;
  147.         }
  148.         *(int *)(page->vaddr + offset) = next;
  149.         offset = next;
  150.     } while (offset < pool->allocation);
  151. }
  152.  
  153. static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
  154. {
  155.     struct dma_page *page;
  156.  
  157.         page = kmalloc(sizeof(*page), mem_flags);
  158.         if (!page)
  159.                 return NULL;
  160.     page->vaddr = (void*)KernelAlloc(pool->allocation);
  161.  
  162.     dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr);
  163.  
  164.         if (page->vaddr) {
  165. #ifdef  DMAPOOL_DEBUG
  166.                 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
  167. #endif
  168.  
  169.         page->dma = GetPgAddr(page->vaddr);
  170.  
  171.         dbgprintf("dma 0x%0x\n", page->dma);
  172.  
  173.         pool_initialise_page(pool, page);
  174.         page->in_use = 0;
  175.         page->offset = 0;
  176.     } else {
  177.                 kfree(page);
  178.         page = NULL;
  179.     }
  180.     return page;
  181. }
  182.  
  183. static inline bool is_page_busy(struct dma_page *page)
  184. {
  185.         return page->in_use != 0;
  186. }
  187.  
  188. static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
  189. {
  190.         dma_addr_t dma = page->dma;
  191.  
  192. #ifdef  DMAPOOL_DEBUG
  193.         memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
  194. #endif
  195.  
  196.     KernelFree(page->vaddr);
  197.         list_del(&page->page_list);
  198.         kfree(page);
  199. }
  200.  
  201. /**
  202.  * dma_pool_destroy - destroys a pool of dma memory blocks.
  203.  * @pool: dma pool that will be destroyed
  204.  * Context: !in_interrupt()
  205.  *
  206.  * Caller guarantees that no more memory from the pool is in use,
  207.  * and that nothing will try to use the pool after this call.
  208.  */
  209. void dma_pool_destroy(struct dma_pool *pool)
  210. {
  211.         bool empty = false;
  212.  
  213.         if (unlikely(!pool))
  214.                 return;
  215.  
  216.         mutex_lock(&pools_reg_lock);
  217.     mutex_lock(&pools_lock);
  218.     list_del(&pool->pools);
  219.     mutex_unlock(&pools_lock);
  220.  
  221.         mutex_unlock(&pools_reg_lock);
  222.  
  223.     while (!list_empty(&pool->page_list)) {
  224.         struct dma_page *page;
  225.         page = list_entry(pool->page_list.next,
  226.                   struct dma_page, page_list);
  227.         if (is_page_busy(page)) {
  228.             printk(KERN_ERR "dma_pool_destroy %p busy\n",
  229.                    page->vaddr);
  230.             /* leak the still-in-use consistent memory */
  231.             list_del(&page->page_list);
  232.             kfree(page);
  233.         } else
  234.             pool_free_page(pool, page);
  235.     }
  236.  
  237.     kfree(pool);
  238. }
  239. EXPORT_SYMBOL(dma_pool_destroy);
  240.  
  241. /**
  242.  * dma_pool_alloc - get a block of consistent memory
  243.  * @pool: dma pool that will produce the block
  244.  * @mem_flags: GFP_* bitmask
  245.  * @handle: pointer to dma address of block
  246.  *
  247.  * This returns the kernel virtual address of a currently unused block,
  248.  * and reports its dma address through the handle.
  249.  * If such a memory block can't be allocated, %NULL is returned.
  250.  */
  251. void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
  252.              dma_addr_t *handle)
  253. {
  254.         unsigned long flags;
  255.     struct  dma_page *page;
  256.     size_t  offset;
  257.     void   *retval;
  258.  
  259.  
  260.         spin_lock_irqsave(&pool->lock, flags);
  261.     list_for_each_entry(page, &pool->page_list, page_list) {
  262.         if (page->offset < pool->allocation)
  263.             goto ready;
  264.     }
  265.  
  266.         /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
  267.         spin_unlock_irqrestore(&pool->lock, flags);
  268.  
  269.         page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
  270.     if (!page)
  271.                 return NULL;
  272.  
  273.         spin_lock_irqsave(&pool->lock, flags);
  274.  
  275.         list_add(&page->page_list, &pool->page_list);
  276.  ready:
  277.         page->in_use++;
  278.     offset = page->offset;
  279.     page->offset = *(int *)(page->vaddr + offset);
  280.     retval = offset + page->vaddr;
  281.         *handle = offset + page->dma;
  282. #ifdef  DMAPOOL_DEBUG
  283.         {
  284.                 int i;
  285.                 u8 *data = retval;
  286.                 /* page->offset is stored in first 4 bytes */
  287.                 for (i = sizeof(page->offset); i < pool->size; i++) {
  288.                         if (data[i] == POOL_POISON_FREED)
  289.                                 continue;
  290.                         if (pool->dev)
  291.                                 dev_err(pool->dev,
  292.                                         "dma_pool_alloc %s, %p (corrupted)\n",
  293.                                         pool->name, retval);
  294.                         else
  295.                                 pr_err("dma_pool_alloc %s, %p (corrupted)\n",
  296.                                         pool->name, retval);
  297.  
  298.                         /*
  299.                          * Dump the first 4 bytes even if they are not
  300.                          * POOL_POISON_FREED
  301.                          */
  302.                         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
  303.                                         data, pool->size, 1);
  304.                         break;
  305.                 }
  306.         }
  307.         if (!(mem_flags & __GFP_ZERO))
  308.                 memset(retval, POOL_POISON_ALLOCATED, pool->size);
  309. #endif
  310.         spin_unlock_irqrestore(&pool->lock, flags);
  311.  
  312.         if (mem_flags & __GFP_ZERO)
  313.                 memset(retval, 0, pool->size);
  314.  
  315.     return retval;
  316. }
  317. EXPORT_SYMBOL(dma_pool_alloc);
  318.  
  319. static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
  320. {
  321.     struct dma_page *page;
  322.  
  323.     list_for_each_entry(page, &pool->page_list, page_list) {
  324.         if (dma < page->dma)
  325.             continue;
  326.                 if ((dma - page->dma) < pool->allocation)
  327.                         return page;
  328.     }
  329.         return NULL;
  330. }
  331.  
  332. /**
  333.  * dma_pool_free - put block back into dma pool
  334.  * @pool: the dma pool holding the block
  335.  * @vaddr: virtual address of block
  336.  * @dma: dma address of block
  337.  *
  338.  * Caller promises neither device nor driver will again touch this block
  339.  * unless it is first re-allocated.
  340.  */
  341. void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
  342. {
  343.     struct dma_page *page;
  344.     unsigned long flags;
  345.     unsigned int offset;
  346.  
  347.         spin_lock_irqsave(&pool->lock, flags);
  348.     page = pool_find_page(pool, dma);
  349.     if (!page) {
  350.                 spin_unlock_irqrestore(&pool->lock, flags);
  351.                 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
  352.                        pool->name, vaddr, (unsigned long)dma);
  353.         return;
  354.     }
  355.  
  356.     offset = vaddr - page->vaddr;
  357. #ifdef  DMAPOOL_DEBUG
  358.         if ((dma - page->dma) != offset) {
  359.                 spin_unlock_irqrestore(&pool->lock, flags);
  360.                 if (pool->dev)
  361.                         dev_err(pool->dev,
  362.                                 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
  363.                                 pool->name, vaddr, (unsigned long long)dma);
  364.                 else
  365.                         printk(KERN_ERR
  366.                                "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
  367.                                pool->name, vaddr, (unsigned long long)dma);
  368.                 return;
  369.         }
  370.         {
  371.                 unsigned int chain = page->offset;
  372.                 while (chain < pool->allocation) {
  373.                         if (chain != offset) {
  374.                                 chain = *(int *)(page->vaddr + chain);
  375.                                 continue;
  376.                         }
  377.                         spin_unlock_irqrestore(&pool->lock, flags);
  378.                         if (pool->dev)
  379.                                 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
  380.                                         "already free\n", pool->name,
  381.                                         (unsigned long long)dma);
  382.                         else
  383.                                 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
  384.                                         "already free\n", pool->name,
  385.                                         (unsigned long long)dma);
  386.                         return;
  387.                 }
  388.         }
  389.         memset(vaddr, POOL_POISON_FREED, pool->size);
  390. #endif
  391.  
  392.         page->in_use--;
  393.         *(int *)vaddr = page->offset;
  394.         page->offset = offset;
  395.     /*
  396.      * Resist a temptation to do
  397.      *    if (!is_page_busy(page)) pool_free_page(pool, page);
  398.      * Better have a few empty pages hang around.
  399.      */
  400.         spin_unlock_irqrestore(&pool->lock, flags);
  401. }
  402. EXPORT_SYMBOL(dma_pool_free);
  403.  
  404. /*
  405.  * Managed DMA pool
  406.  */
  407. static void dmam_pool_release(struct device *dev, void *res)
  408. {
  409.         struct dma_pool *pool = *(struct dma_pool **)res;
  410.  
  411.         dma_pool_destroy(pool);
  412. }
  413.  
  414. static int dmam_pool_match(struct device *dev, void *res, void *match_data)
  415. {
  416.         return *(struct dma_pool **)res == match_data;
  417. }
  418.  
  419.