Subversion Repositories Kolibri OS

Rev

Rev 808 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 808 Rev 817
1
/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
1
/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
2
/*
2
/*
3
 * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
3
 * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
4
 *
4
 *
5
 * The Weather Channel (TM) funded Tungsten Graphics to develop the
5
 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6
 * initial release of the Radeon 8500 driver under the XFree86 license.
6
 * initial release of the Radeon 8500 driver under the XFree86 license.
7
 * This notice must be preserved.
7
 * This notice must be preserved.
8
 *
8
 *
9
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * Permission is hereby granted, free of charge, to any person obtaining a
10
 * copy of this software and associated documentation files (the "Software"),
10
 * copy of this software and associated documentation files (the "Software"),
11
 * to deal in the Software without restriction, including without limitation
11
 * to deal in the Software without restriction, including without limitation
12
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13
 * and/or sell copies of the Software, and to permit persons to whom the
13
 * and/or sell copies of the Software, and to permit persons to whom the
14
 * Software is furnished to do so, subject to the following conditions:
14
 * Software is furnished to do so, subject to the following conditions:
15
 *
15
 *
16
 * The above copyright notice and this permission notice (including the next
16
 * The above copyright notice and this permission notice (including the next
17
 * paragraph) shall be included in all copies or substantial portions of the
17
 * paragraph) shall be included in all copies or substantial portions of the
18
 * Software.
18
 * Software.
19
 *
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26
 * DEALINGS IN THE SOFTWARE.
26
 * DEALINGS IN THE SOFTWARE.
27
 *
27
 *
28
 * Authors:
28
 * Authors:
29
 *    Keith Whitwell 
29
 *    Keith Whitwell 
30
 */
30
 */
31
 
31
 
32
#define USED_BLOCK  1
32
#define USED_BLOCK  1
33
 
33
 
34
#define list_for_each(entry, head)        \
34
#define list_for_each(entry, head)        \
35
   for (entry = (head)->next; entry != head; entry = (entry)->next)
35
   for (entry = (head)->next; entry != head; entry = (entry)->next)
36
 
36
 
37
 
37
 
38
/* Very simple allocator for GART memory, working on a static range
38
/* Very simple allocator for GART memory, working on a static range
39
 * already mapped into each client's address space.
39
 * already mapped into each client's address space.
40
 */
40
 */
41
 
41
 
42
struct mem_block {
42
struct mem_block {
43
	struct mem_block *next;
43
	struct mem_block *next;
44
	struct mem_block *prev;
44
	struct mem_block *prev;
45
	int start;
45
	int start;
46
	int size;
46
	int size;
47
};
47
};
48
 
48
 
49
/* Initialize.  How to check for an uninitialized heap?
49
/* Initialize.  How to check for an uninitialized heap?
50
 */
50
 */
51
static int init_heap(struct mem_block **heap, int start, int size)
51
static int init_heap(struct mem_block **heap, int start, int size)
52
{
52
{
53
  struct mem_block *blocks = kmalloc(sizeof(*blocks));
53
  struct mem_block *blocks = kmalloc(sizeof(*blocks));
54
 
54
 
55
	if (!blocks)
55
	if (!blocks)
56
    return -1; //-ENOMEM;
56
    return -1; //-ENOMEM;
57
 
57
 
58
  *heap = kmalloc(sizeof(**heap));
58
  *heap = kmalloc(sizeof(**heap));
59
  if (!*heap)
59
  if (!*heap)
60
  {
60
  {
61
    kfree(blocks);
61
    kfree(blocks);
62
    return -1; //-ENOMEM;
62
    return -1; //-ENOMEM;
63
	}
63
	}
64
 
64
 
65
  blocks->start = start;
65
  blocks->start = start;
66
  blocks->size  = size;
66
  blocks->size  = size;
67
  blocks->next  = blocks->prev = *heap;
67
  blocks->next  = blocks->prev = *heap;
68
 
68
 
69
  __clear(*heap,sizeof(**heap));
69
  __clear(*heap,sizeof(**heap));
70
	(*heap)->next = (*heap)->prev = blocks;
70
	(*heap)->next = (*heap)->prev = blocks;
71
	return 0;
71
	return 0;
72
}
72
}
73
 
73
 
74
static struct mem_block **get_heap(RHDPtr rhdPtr, int region)
74
static struct mem_block **get_heap(RHDPtr rhdPtr, int region)
75
{
75
{
76
  switch (region)
76
  switch (region)
77
  {
77
  {
78
    case RHD_MEM_GART:
78
    case RHD_MEM_GART:
79
      return &rhdPtr->gart_heap;
79
      return &rhdPtr->gart_heap;
80
    case RHD_MEM_FB:
80
    case RHD_MEM_FB:
81
      return &rhdPtr->fb_heap;
81
      return &rhdPtr->fb_heap;
82
    default:
82
    default:
83
      return NULL;
83
      return NULL;
84
	}
84
	}
85
}
85
}
86
 
86
 
87
static struct mem_block *split_block(struct mem_block *p, int size)
87
static struct mem_block *split_block(struct mem_block *p, int size)
88
{
88
{
89
 
89
 
90
	/* Maybe cut off the end of an existing block */
90
	/* Maybe cut off the end of an existing block */
91
    if (size < p->size)
91
    if (size < p->size)
92
    {
92
    {
93
      struct mem_block *newblock = kmalloc(sizeof(*newblock));
93
      struct mem_block *newblock = kmalloc(sizeof(*newblock));
94
      if (!newblock)
94
      if (!newblock)
95
        goto out;
95
        goto out;
96
      newblock->start = p->start + size;
96
      newblock->start = p->start + size;
97
      newblock->size = p->size - size;
97
      newblock->size = p->size - size;
98
      newblock->next = p->next;
98
      newblock->next = p->next;
99
      newblock->prev = p;
99
      newblock->prev = p;
100
      p->next->prev = newblock;
100
      p->next->prev = newblock;
101
      p->next = newblock;
101
      p->next = newblock;
102
      p->size = size;
102
      p->size = size;
103
      p->start|=1;
103
      p->start|=1;
104
    }
104
    }
105
 
105
 
106
out:
106
out:
107
    return p;
107
    return p;
108
}
108
}
109
 
109
 
110
static struct mem_block *alloc_block(struct mem_block *heap, int size)
110
static struct mem_block *alloc_block(struct mem_block *heap, int size)
111
{
111
{
112
	struct mem_block *p;
112
	struct mem_block *p;
113
 
113
 
114
  list_for_each(p, heap)
114
  list_for_each(p, heap)
115
  {
115
  {
116
    if ( !(p->start & USED_BLOCK) && size <= p->size)
116
    if ( !(p->start & USED_BLOCK) && size <= p->size)
117
      return split_block(p, size);
117
      return split_block(p, size);
118
	}
118
	}
119
 
119
 
120
	return NULL;
120
	return NULL;
121
}
121
}
122
 
122
 
123
 
123
 
124
static struct mem_block *find_block(struct mem_block *heap, int start)
124
static struct mem_block *find_block(struct mem_block *heap, int start)
125
{
125
{
126
	struct mem_block *p;
126
	struct mem_block *p;
127
 
127
 
128
	list_for_each(p, heap)
128
	list_for_each(p, heap)
129
    if ((p->start & ~USED_BLOCK) == start)
129
    if ((p->start & ~USED_BLOCK) == start)
130
			return p;
130
			return p;
131
 
131
 
132
	return NULL;
132
	return NULL;
133
}
133
}
134
 
134
 
135
static void free_block(struct mem_block *p)
135
static void free_block(struct mem_block *p)
136
{
136
{
137
 
137
 
138
	/* Assumes a single contiguous range.  Needs a special file_priv in
138
	/* Assumes a single contiguous range.  Needs a special file_priv in
139
	 * 'heap' to stop it being subsumed.
139
	 * 'heap' to stop it being subsumed.
140
	 */
140
	 */
141
 
141
 
142
  p->start &= ~USED_BLOCK;
142
  p->start &= ~USED_BLOCK;
143
 
143
 
144
  if ( !(p->next->start & USED_BLOCK))
144
  if ( !(p->next->start & USED_BLOCK))
145
  {
145
  {
146
		struct mem_block *q = p->next;
146
		struct mem_block *q = p->next;
147
		p->size += q->size;
147
		p->size += q->size;
148
		p->next = q->next;
148
		p->next = q->next;
149
		p->next->prev = p;
149
		p->next->prev = p;
150
    kfree(q);
150
    kfree(q);
151
	}
151
	}
152
 
152
 
153
  if ( !(p->prev->start & USED_BLOCK))
153
  if ( !(p->prev->start & USED_BLOCK))
154
  {
154
  {
155
		struct mem_block *q = p->prev;
155
		struct mem_block *q = p->prev;
156
		q->size += p->size;
156
		q->size += p->size;
157
		q->next = p->next;
157
		q->next = p->next;
158
		q->next->prev = q;
158
		q->next->prev = q;
159
    kfree(p);
159
    kfree(p);
160
	}
160
	}
161
}
161
}
162
 
162
 
163
int rhdInitHeap(RHDPtr rhdPtr)
163
int rhdInitHeap(RHDPtr rhdPtr)
164
{
164
{
165
  int base = rhdPtr->FbBase + rhdPtr->FbFreeStart;
165
  int base = rhdPtr->FbFreeStart;
166
 
166
 
167
  return init_heap(&rhdPtr->fb_heap, base, rhdPtr->FbFreeSize);
167
  return init_heap(&rhdPtr->fb_heap, base, rhdPtr->FbFreeSize);
168
};
168
};
169
 
169
 
170
void *rhd_mem_alloc(RHDPtr rhdPtr,int region, int size)
170
void *rhd_mem_alloc(RHDPtr rhdPtr,int region, int size)
171
{
171
{
172
	struct mem_block *block, **heap;
172
	struct mem_block *block, **heap;
173
 
173
 
174
  heap = get_heap(rhdPtr, region);
174
  heap = get_heap(rhdPtr, region);
175
	if (!heap || !*heap)
175
	if (!heap || !*heap)
176
    return NULL;
176
    return NULL;
177
 
177
 
178
	/* Make things easier on ourselves: all allocations at least
178
	/* Make things easier on ourselves: all allocations at least
179
	 * 4k aligned.
179
	 * 4k aligned.
180
	 */
180
	 */
181
 
181
 
182
  size = (size+4095) & ~4095;
182
  size = (size+4095) & ~4095;
183
 
183
 
184
  block = alloc_block(*heap, size);
184
  block = alloc_block(*heap, size);
185
 
185
 
186
	if (!block)
186
	if (!block)
187
    return NULL;
187
    return NULL;
188
 
188
 
189
  return (void*)(block->start & ~USED_BLOCK);
189
  return (void*)(block->start & ~USED_BLOCK);
190
}
190
}
191
 
191
 
192
int rhd_mem_free(RHDPtr rhdPtr, int region, void *offset)
192
int rhd_mem_free(RHDPtr rhdPtr, int region, void *offset)
193
{
193
{
194
	struct mem_block *block, **heap;
194
	struct mem_block *block, **heap;
195
 
195
 
196
  heap = get_heap(rhdPtr, region);
196
  heap = get_heap(rhdPtr, region);
197
	if (!heap || !*heap)
197
	if (!heap || !*heap)
198
    return -1;
198
    return -1;
199
 
199
 
200
  block = find_block(*heap, (int)offset);
200
  block = find_block(*heap, (int)offset);
201
	if (!block)
201
	if (!block)
202
    return -1;
202
    return -1;
203
 
203
 
204
  if ( !(block->start & 1))
204
  if ( !(block->start & 1))
205
    return -1;
205
    return -1;
206
 
206
 
207
	free_block(block);
207
	free_block(block);
208
	return 0;
208
	return 0;
209
}
209
}