Subversion Repositories Kolibri OS

Rev

Rev 808 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
808 serge 1
/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
2
/*
3
 * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
4
 *
5
 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6
 * initial release of the Radeon 8500 driver under the XFree86 license.
7
 * This notice must be preserved.
8
 *
9
 * Permission is hereby granted, free of charge, to any person obtaining a
10
 * copy of this software and associated documentation files (the "Software"),
11
 * to deal in the Software without restriction, including without limitation
12
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13
 * and/or sell copies of the Software, and to permit persons to whom the
14
 * Software is furnished to do so, subject to the following conditions:
15
 *
16
 * The above copyright notice and this permission notice (including the next
17
 * paragraph) shall be included in all copies or substantial portions of the
18
 * Software.
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26
 * DEALINGS IN THE SOFTWARE.
27
 *
28
 * Authors:
29
 *    Keith Whitwell 
30
 */
31
 
32
#define USED_BLOCK  1
33
 
34
#define list_for_each(entry, head)        \
35
   for (entry = (head)->next; entry != head; entry = (entry)->next)
36
 
37
 
38
/* Very simple allocator for GART memory, working on a static range
39
 * already mapped into each client's address space.
40
 */
41
 
42
struct mem_block {
43
	struct mem_block *next;
44
	struct mem_block *prev;
45
	int start;
46
	int size;
47
};
48
 
49
/* Initialize.  How to check for an uninitialized heap?
50
 */
51
static int init_heap(struct mem_block **heap, int start, int size)
52
{
53
  struct mem_block *blocks = kmalloc(sizeof(*blocks));
54
 
55
	if (!blocks)
56
    return -1; //-ENOMEM;
57
 
58
  *heap = kmalloc(sizeof(**heap));
59
  if (!*heap)
60
  {
61
    kfree(blocks);
62
    return -1; //-ENOMEM;
63
	}
64
 
65
  blocks->start = start;
66
  blocks->size  = size;
67
  blocks->next  = blocks->prev = *heap;
68
 
69
  __clear(*heap,sizeof(**heap));
70
	(*heap)->next = (*heap)->prev = blocks;
71
	return 0;
72
}
73
 
74
static struct mem_block **get_heap(RHDPtr rhdPtr, int region)
75
{
76
  switch (region)
77
  {
78
    case RHD_MEM_GART:
79
      return &rhdPtr->gart_heap;
80
    case RHD_MEM_FB:
81
      return &rhdPtr->fb_heap;
82
    default:
83
      return NULL;
84
	}
85
}
86
 
87
static struct mem_block *split_block(struct mem_block *p, int size)
88
{
89
 
90
	/* Maybe cut off the end of an existing block */
91
    if (size < p->size)
92
    {
93
      struct mem_block *newblock = kmalloc(sizeof(*newblock));
94
      if (!newblock)
95
        goto out;
96
      newblock->start = p->start + size;
97
      newblock->size = p->size - size;
98
      newblock->next = p->next;
99
      newblock->prev = p;
100
      p->next->prev = newblock;
101
      p->next = newblock;
102
      p->size = size;
103
      p->start|=1;
104
    }
105
 
106
out:
107
    return p;
108
}
109
 
110
static struct mem_block *alloc_block(struct mem_block *heap, int size)
111
{
112
	struct mem_block *p;
113
 
114
  list_for_each(p, heap)
115
  {
116
    if ( !(p->start & USED_BLOCK) && size <= p->size)
117
      return split_block(p, size);
118
	}
119
 
120
	return NULL;
121
}
122
 
123
 
124
static struct mem_block *find_block(struct mem_block *heap, int start)
125
{
126
	struct mem_block *p;
127
 
128
	list_for_each(p, heap)
129
    if ((p->start & ~USED_BLOCK) == start)
130
			return p;
131
 
132
	return NULL;
133
}
134
 
135
static void free_block(struct mem_block *p)
136
{
137
 
138
	/* Assumes a single contiguous range.  Needs a special file_priv in
139
	 * 'heap' to stop it being subsumed.
140
	 */
141
 
142
  p->start &= ~USED_BLOCK;
143
 
144
  if ( !(p->next->start & USED_BLOCK))
145
  {
146
		struct mem_block *q = p->next;
147
		p->size += q->size;
148
		p->next = q->next;
149
		p->next->prev = p;
150
    kfree(q);
151
	}
152
 
153
  if ( !(p->prev->start & USED_BLOCK))
154
  {
155
		struct mem_block *q = p->prev;
156
		q->size += p->size;
157
		q->next = p->next;
158
		q->next->prev = q;
159
    kfree(p);
160
	}
161
}
162
 
163
int rhdInitHeap(RHDPtr rhdPtr)
164
{
817 serge 165
  int base = rhdPtr->FbFreeStart;
808 serge 166
 
167
  return init_heap(&rhdPtr->fb_heap, base, rhdPtr->FbFreeSize);
168
};
169
 
170
void *rhd_mem_alloc(RHDPtr rhdPtr,int region, int size)
171
{
172
	struct mem_block *block, **heap;
173
 
174
  heap = get_heap(rhdPtr, region);
175
	if (!heap || !*heap)
176
    return NULL;
177
 
178
	/* Make things easier on ourselves: all allocations at least
179
	 * 4k aligned.
180
	 */
181
 
182
  size = (size+4095) & ~4095;
183
 
184
  block = alloc_block(*heap, size);
185
 
186
	if (!block)
187
    return NULL;
188
 
189
  return (void*)(block->start & ~USED_BLOCK);
190
}
191
 
192
int rhd_mem_free(RHDPtr rhdPtr, int region, void *offset)
193
{
194
	struct mem_block *block, **heap;
195
 
196
  heap = get_heap(rhdPtr, region);
197
	if (!heap || !*heap)
198
    return -1;
199
 
200
  block = find_block(*heap, (int)offset);
201
	if (!block)
202
    return -1;
203
 
204
  if ( !(block->start & 1))
205
    return -1;
206
 
207
	free_block(block);
208
	return 0;
209
}
210