Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
  2. /*
  3.  * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
  4.  *
  5.  * The Weather Channel (TM) funded Tungsten Graphics to develop the
  6.  * initial release of the Radeon 8500 driver under the XFree86 license.
  7.  * This notice must be preserved.
  8.  *
  9.  * Permission is hereby granted, free of charge, to any person obtaining a
  10.  * copy of this software and associated documentation files (the "Software"),
  11.  * to deal in the Software without restriction, including without limitation
  12.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13.  * and/or sell copies of the Software, and to permit persons to whom the
  14.  * Software is furnished to do so, subject to the following conditions:
  15.  *
  16.  * The above copyright notice and this permission notice (including the next
  17.  * paragraph) shall be included in all copies or substantial portions of the
  18.  * Software.
  19.  *
  20.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  23.  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  26.  * DEALINGS IN THE SOFTWARE.
  27.  *
  28.  * Authors:
  29.  *    Keith Whitwell <keith@tungstengraphics.com>
  30.  */
  31.  
  32. #define USED_BLOCK  1
  33.  
  34. #define list_for_each(entry, head)        \
  35.    for (entry = (head)->next; entry != head; entry = (entry)->next)
  36.  
  37.  
  38. /* Very simple allocator for GART memory, working on a static range
  39.  * already mapped into each client's address space.
  40.  */
  41.  
  42. struct mem_block
  43. {
  44.   struct mem_block *next;
  45.   struct mem_block *prev;
  46.   addr_t            start;
  47.   size_t            size;
  48. };
  49.  
  50. /* Initialize.  How to check for an uninitialized heap?
  51.  */
  52. static int init_heap(struct mem_block **heap, int start, int size)
  53. {
  54.     struct mem_block *blocks = malloc(sizeof(*blocks));
  55.  
  56.     if (!blocks)
  57.         return -1; //-ENOMEM;
  58.  
  59.     *heap = malloc(sizeof(**heap));
  60.     if (!*heap)
  61.     {
  62.         free(blocks);
  63.         return -1; //-ENOMEM;
  64.         }
  65.  
  66.     blocks->start = start;
  67.     blocks->size  = size;
  68.     blocks->next  = blocks->prev = *heap;
  69.  
  70.     __clear(*heap,sizeof(**heap));
  71.         (*heap)->next = (*heap)->prev = blocks;
  72.     (*heap)->start |= USED_BLOCK;
  73.         return 0;
  74. }
  75.  
  76. static struct mem_block **get_heap(RHDPtr rhdPtr, int region)
  77. {
  78.     switch (region)
  79.     {
  80.         case RHD_MEM_GART:
  81.             return &rhdPtr->gart_heap;
  82.         case RHD_MEM_FB:
  83.             return &rhdPtr->fb_heap;
  84.         default:
  85.         return NULL;
  86.         }
  87. }
  88.  
  89. static struct mem_block *split_block(struct mem_block *p, int size)
  90. {
  91.  
  92.         /* Maybe cut off the end of an existing block */
  93.     if (size < p->size)
  94.     {
  95.         struct mem_block *newblock = malloc(sizeof(*newblock));
  96.         if (!newblock)
  97.             goto out;
  98.         newblock->start = p->start + size;
  99.         newblock->size = p->size - size;
  100.         newblock->next = p->next;
  101.         newblock->prev = p;
  102.         p->next->prev = newblock;
  103.         p->next = newblock;
  104.         p->size = size;
  105.         p->start|=USED_BLOCK;
  106.     }
  107.  
  108. out:
  109.     return p;
  110. }
  111.  
  112. static struct mem_block *alloc_block(struct mem_block *heap, int size)
  113. {
  114.         struct mem_block *p;
  115.  
  116.     list_for_each(p, heap)
  117.     {
  118.         if ( !(p->start & USED_BLOCK) && size <= p->size)
  119.             return split_block(p, size);
  120.         }
  121.  
  122.         return NULL;
  123. }
  124.  
  125.  
  126. static struct mem_block *find_block(struct mem_block *heap, int start)
  127. {
  128.         struct mem_block *p;
  129.  
  130.         list_for_each(p, heap)
  131.     if ((p->start & ~USED_BLOCK) == start)
  132.                         return p;
  133.         return NULL;
  134. }
  135.  
  136. static void free_block(struct mem_block *p)
  137. {
  138.  
  139.         /* Assumes a single contiguous range.  Needs a special file_priv in
  140.          * 'heap' to stop it being subsumed.
  141.          */
  142.  
  143.     p->start &= ~USED_BLOCK;
  144.  
  145.     if ( !(p->next->start & USED_BLOCK))
  146.     {
  147.                 struct mem_block *q = p->next;
  148.                 p->size += q->size;
  149.                 p->next = q->next;
  150.                 p->next->prev = p;
  151.         free(q);
  152.         }
  153.  
  154.     if ( !(p->prev->start & USED_BLOCK))
  155.     {
  156.                 struct mem_block *q = p->prev;
  157.                 q->size += p->size;
  158.                 q->next = p->next;
  159.                 q->next->prev = q;
  160.         free(p);
  161.         }
  162. }
  163.  
  164. int rhdInitHeap(RHDPtr rhdPtr)
  165. {
  166.     int base = rhdPtr->FbFreeStart;
  167.  
  168.     return init_heap(&rhdPtr->fb_heap, base, rhdPtr->FbFreeSize);
  169. };
  170.  
  171. addr_t  rhd_mem_alloc(RHDPtr rhdPtr,int region, int size)
  172. {
  173.         struct mem_block *block, **heap;
  174.  
  175.     heap = get_heap(rhdPtr, region);
  176.     if (!heap || !*heap)
  177.         return 0;
  178.  
  179.         /* Make things easier on ourselves: all allocations at least
  180.          * 4k aligned.
  181.          */
  182.  
  183.     size = (size+4095) & ~4095;
  184.  
  185.     block = alloc_block(*heap, size);
  186.  
  187.         if (!block)
  188.         return 0;
  189.  
  190.     return (block->start & ~USED_BLOCK);
  191. }
  192.  
  193. int rhd_mem_free(RHDPtr rhdPtr, int region, addr_t offset)
  194. {
  195.         struct mem_block *block, **heap;
  196.  
  197.     heap = get_heap(rhdPtr, region);
  198.     if (!heap || !*heap)
  199.         return -1;
  200.  
  201.     block = find_block(*heap, (int)offset);
  202.         if (!block)
  203.         return -1;
  204.  
  205.     if ( !(block->start & USED_BLOCK))
  206.         return -1;
  207.  
  208.         free_block(block);
  209.         return 0;
  210. }
  211.  
  212. void dump_mem()
  213. {
  214.         struct mem_block *p;
  215.     struct mem_block **heap;
  216.  
  217.     heap = &rhd.fb_heap;
  218.  
  219.     list_for_each(p, *heap)
  220.     {
  221.         dbgprintf("block: %x  next: %x  prev: %x  start: %x  size:%x\n",
  222.               p,p->next,p->prev,p->start,p->size);
  223.         }
  224. }
  225.  
  226.