Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
5564 serge 1
 
2
3
 
4
#include "util/u_memory.h"
5
#include "util/list.h"
6
7
 
8
#include "nouveau_screen.h"
9
#include "nouveau_mm.h"
10
11
 
12
 * add an extra cache for such buffer objects.
13
 *
14
 * HACK: Max order == 21 to accommodate TF2's 1.5 MiB, frequently reallocated
15
 * vertex buffer (VM flush (?) decreases performance dramatically).
16
 */
17
18
 
19
#define MM_MAX_ORDER 21
20
21
 
22
23
 
24
#define MM_MAX_SIZE (1 << MM_MAX_ORDER)
25
26
 
27
   struct list_head free;
28
   struct list_head used;
29
   struct list_head full;
30
   int num_free;
31
};
32
33
 
34
   struct nouveau_device *dev;
35
   struct mm_bucket bucket[MM_NUM_BUCKETS];
36
   uint32_t domain;
37
   union nouveau_bo_config config;
38
   uint64_t allocated;
39
};
40
41
 
42
   struct list_head head;
43
   struct nouveau_bo *bo;
44
   struct nouveau_mman *cache;
45
   int order;
46
   int count;
47
   int free;
48
   uint32_t bits[0];
49
};
50
51
 
52
mm_slab_alloc(struct mm_slab *slab)
53
{
54
   int i, n, b;
55
56
 
57
      return -1;
58
59
 
60
      b = ffs(slab->bits[i]) - 1;
61
      if (b >= 0) {
62
         n = i * 32 + b;
63
         assert(n < slab->count);
64
         slab->free--;
65
         slab->bits[i] &= ~(1 << b);
66
         return n;
67
      }
68
   }
69
   return -1;
70
}
71
72
 
73
mm_slab_free(struct mm_slab *slab, int i)
74
{
75
   assert(i < slab->count);
76
   slab->bits[i / 32] |= 1 << (i % 32);
77
   slab->free++;
78
   assert(slab->free <= slab->count);
79
}
80
81
 
82
mm_get_order(uint32_t size)
83
{
84
   int s = __builtin_clz(size) ^ 31;
85
86
 
87
      s += 1;
88
   return s;
89
}
90
91
 
92
mm_bucket_by_order(struct nouveau_mman *cache, int order)
93
{
94
   if (order > MM_MAX_ORDER)
95
      return NULL;
96
   return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
97
}
98
99
 
100
mm_bucket_by_size(struct nouveau_mman *cache, unsigned size)
101
{
102
   return mm_bucket_by_order(cache, mm_get_order(size));
103
}
104
105
 
106
static INLINE uint32_t
107
mm_default_slab_size(unsigned chunk_order)
108
{
109
   static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
110
   {
111
      12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22, 22
112
   };
113
114
 
115
116
 
117
}
118
119
 
120
mm_slab_new(struct nouveau_mman *cache, int chunk_order)
121
{
122
   struct mm_slab *slab;
123
   int words, ret;
124
   const uint32_t size = mm_default_slab_size(chunk_order);
125
126
 
127
   assert(words);
128
129
 
130
   if (!slab)
131
      return PIPE_ERROR_OUT_OF_MEMORY;
132
133
 
134
135
 
136
137
 
138
                        &slab->bo);
139
   if (ret) {
140
      FREE(slab);
141
      return PIPE_ERROR_OUT_OF_MEMORY;
142
   }
143
144
 
145
146
 
147
   slab->order = chunk_order;
148
   slab->count = slab->free = size >> chunk_order;
149
150
 
151
152
 
153
154
 
155
      debug_printf("MM: new slab, total memory = %"PRIu64" KiB\n",
156
                   cache->allocated / 1024);
157
158
 
159
}
160
161
 
162
struct nouveau_mm_allocation *
163
nouveau_mm_allocate(struct nouveau_mman *cache,
164
                    uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
165
{
166
   struct mm_bucket *bucket;
167
   struct mm_slab *slab;
168
   struct nouveau_mm_allocation *alloc;
169
   int ret;
170
171
 
172
   if (!bucket) {
173
      ret = nouveau_bo_new(cache->dev, cache->domain, 0, size, &cache->config,
174
                           bo);
175
      if (ret)
176
         debug_printf("bo_new(%x, %x): %i\n",
177
                      size, cache->config.nv50.memtype, ret);
178
179
 
180
      return NULL;
181
   }
182
183
 
184
      slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
185
   } else {
186
      if (LIST_IS_EMPTY(&bucket->free)) {
187
         mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
188
      }
189
      slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
190
191
 
192
      LIST_ADD(&slab->head, &bucket->used);
193
   }
194
195
 
196
197
 
198
   if (!alloc)
199
      return NULL;
200
201
 
202
203
 
204
      LIST_DEL(&slab->head);
205
      LIST_ADD(&slab->head, &bucket->full);
206
   }
207
208
 
209
   alloc->offset = *offset;
210
   alloc->priv = (void *)slab;
211
212
 
213
}
214
215
 
216
nouveau_mm_free(struct nouveau_mm_allocation *alloc)
217
{
218
   struct mm_slab *slab = (struct mm_slab *)alloc->priv;
219
   struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
220
221
 
222
223
 
224
      LIST_DEL(&slab->head);
225
      LIST_ADDTAIL(&slab->head, &bucket->free);
226
   } else
227
   if (slab->free == 1) {
228
      LIST_DEL(&slab->head);
229
      LIST_ADDTAIL(&slab->head, &bucket->used);
230
   }
231
232
 
233
}
234
235
 
236
nouveau_mm_free_work(void *data)
237
{
238
   nouveau_mm_free(data);
239
}
240
241
 
242
nouveau_mm_create(struct nouveau_device *dev, uint32_t domain,
243
                  union nouveau_bo_config *config)
244
{
245
   struct nouveau_mman *cache = MALLOC_STRUCT(nouveau_mman);
246
   int i;
247
248
 
249
      return NULL;
250
251
 
252
   cache->domain = domain;
253
   cache->config = *config;
254
   cache->allocated = 0;
255
256
 
257
      LIST_INITHEAD(&cache->bucket[i].free);
258
      LIST_INITHEAD(&cache->bucket[i].used);
259
      LIST_INITHEAD(&cache->bucket[i].full);
260
   }
261
262
 
263
}
264
265
 
266
nouveau_mm_free_slabs(struct list_head *head)
267
{
268
   struct mm_slab *slab, *next;
269
270
 
271
      LIST_DEL(&slab->head);
272
      nouveau_bo_ref(NULL, &slab->bo);
273
      FREE(slab);
274
   }
275
}
276
277
 
278
nouveau_mm_destroy(struct nouveau_mman *cache)
279
{
280
   int i;
281
282
 
283
      return;
284
285
 
286
      if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
287
          !LIST_IS_EMPTY(&cache->bucket[i].full))
288
         debug_printf("WARNING: destroying GPU memory cache "
289
                      "with some buffers still in use\n");
290
291
 
292
      nouveau_mm_free_slabs(&cache->bucket[i].used);
293
      nouveau_mm_free_slabs(&cache->bucket[i].full);
294
   }
295
296
 
297
}
298