Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
1029 serge 1
/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
2
/*
3
 * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
4
 *
5
 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6
 * initial release of the Radeon 8500 driver under the XFree86 license.
7
 * This notice must be preserved.
8
 *
9
 * Permission is hereby granted, free of charge, to any person obtaining a
10
 * copy of this software and associated documentation files (the "Software"),
11
 * to deal in the Software without restriction, including without limitation
12
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13
 * and/or sell copies of the Software, and to permit persons to whom the
14
 * Software is furnished to do so, subject to the following conditions:
15
 *
16
 * The above copyright notice and this permission notice (including the next
17
 * paragraph) shall be included in all copies or substantial portions of the
18
 * Software.
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26
 * DEALINGS IN THE SOFTWARE.
27
 *
28
 * Authors:
29
 *    Keith Whitwell 
30
 */
31
 
32
#include "common.h"
33
#include "rhd.h"
34
 
35
#define USED_BLOCK  1
36
 
37
#define list_for_each(entry, head)        \
38
   for (entry = (head)->next; entry != head; entry = (entry)->next)
39
 
40
 
41
/* Very simple allocator for GART memory, working on a static range
42
 * already mapped into each client's address space.
43
 */
44
 
45
struct mem_block {
46
	struct mem_block *next;
47
	struct mem_block *prev;
48
	int start;
49
	int size;
50
};
51
 
52
/* Initialize.  How to check for an uninitialized heap?
53
 */
54
static int init_heap(struct mem_block **heap, int start, int size)
55
{
56
  struct mem_block *blocks = malloc(sizeof(*blocks));
57
 
58
	if (!blocks)
59
    return -1; //-ENOMEM;
60
 
61
  *heap = malloc(sizeof(**heap));
62
  if (!*heap)
63
  {
64
    free(blocks);
65
    return -1; //-ENOMEM;
66
	}
67
 
68
  blocks->start = start;
69
  blocks->size  = size;
70
  blocks->next  = blocks->prev = *heap;
71
 
72
  __clear(*heap,sizeof(**heap));
73
	(*heap)->next = (*heap)->prev = blocks;
74
	return 0;
75
}
76
 
77
static struct mem_block **get_heap(RHDPtr rhdPtr, int region)
78
{
79
  switch (region)
80
  {
81
    case RHD_MEM_GART:
82
      return &rhdPtr->gart_heap;
83
    case RHD_MEM_FB:
84
      return &rhdPtr->fb_heap;
85
    default:
86
      return NULL;
87
	}
88
}
89
 
90
static struct mem_block *split_block(struct mem_block *p, int size)
91
{
92
 
93
	/* Maybe cut off the end of an existing block */
94
    if (size < p->size)
95
    {
96
      struct mem_block *newblock = malloc(sizeof(*newblock));
97
      if (!newblock)
98
        goto out;
99
      newblock->start = p->start + size;
100
      newblock->size = p->size - size;
101
      newblock->next = p->next;
102
      newblock->prev = p;
103
      p->next->prev = newblock;
104
      p->next = newblock;
105
      p->size = size;
106
      p->start|=1;
107
    }
108
 
109
out:
110
    return p;
111
}
112
 
113
static struct mem_block *alloc_block(struct mem_block *heap, int size)
114
{
115
	struct mem_block *p;
116
 
117
  list_for_each(p, heap)
118
  {
119
    if ( !(p->start & USED_BLOCK) && size <= p->size)
120
      return split_block(p, size);
121
	}
122
 
123
	return NULL;
124
}
125
 
126
 
127
static struct mem_block *find_block(struct mem_block *heap, int start)
128
{
129
	struct mem_block *p;
130
 
131
	list_for_each(p, heap)
132
    if ((p->start & ~USED_BLOCK) == start)
133
			return p;
134
 
135
	return NULL;
136
}
137
 
138
static void free_block(struct mem_block *p)
139
{
140
 
141
	/* Assumes a single contiguous range.  Needs a special file_priv in
142
	 * 'heap' to stop it being subsumed.
143
	 */
144
  if ( !(p->next->start & USED_BLOCK))
145
  {
146
		struct mem_block *q = p->next;
147
		p->size += q->size;
148
		p->next = q->next;
149
		p->next->prev = p;
150
        free(q);
151
	}
152
 
153
  if ( !(p->prev->start & USED_BLOCK))
154
  {
155
		struct mem_block *q = p->prev;
156
		q->size += p->size;
157
		q->next = p->next;
158
		q->next->prev = q;
159
        free(p);
160
	}
161
}
162
 
163
int rhdInitHeap(RHDPtr rhdPtr)
164
{
165
  int base = rhdPtr->FbBase + rhdPtr->FbFreeStart;
166
 
167
  return init_heap(&rhdPtr->fb_heap, base, rhdPtr->FbFreeSize);
168
};
169
 
170
void *rhd_mem_alloc(RHDPtr rhdPtr,int region, int size)
171
{
172
	struct mem_block *block, **heap;
173
 
174
  heap = get_heap(rhdPtr, region);
175
	if (!heap || !*heap)
176
    return NULL;
177
 
178
	/* Make things easier on ourselves: all allocations at least
179
	 * 4k aligned.
180
	 */
181
 
182
  size = (size+4095) & ~4095;
183
 
184
  block = alloc_block(*heap, size);
185
 
186
	if (!block)
187
    return NULL;
188
 
189
  return (void*)(block->start & ~USED_BLOCK);
190
}
191
 
192
int rhd_mem_free(RHDPtr rhdPtr, int region, void *offset)
193
{
194
	struct mem_block *block, **heap;
195
 
196
  heap = get_heap(rhdPtr, region);
197
	if (!heap || !*heap)
198
    return -1;
199
 
200
  block = find_block(*heap, (int)offset);
201
	if (!block)
202
    return -1;
203
 
204
  if ( !(block->start & 1))
205
    return -1;
206
 
207
	free_block(block);
208
	return 0;
209
}
210