Rev 1066 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1066 | Rev 2971 | ||
---|---|---|---|
1 | #include |
1 | #include |
2 | #include |
2 | #include |
3 | #include |
3 | #include |
4 | #include |
4 | #include |
5 | #include |
5 | #include |
6 | 6 | ||
7 | extern u32_t pg_balloc; |
7 | extern u32_t pg_balloc; |
8 | extern u32_t mem_amount; |
8 | extern u32_t mem_amount; |
9 | 9 | ||
10 | void __fastcall *balloc(size_t size); |
10 | void __fastcall *balloc(size_t size); |
11 | 11 | ||
12 | static zone_t z_core; |
12 | static zone_t z_core; |
13 | 13 | ||
14 | #include "buddy.inc" |
14 | #include "buddy.inc" |
15 | 15 | ||
16 | typedef struct |
16 | typedef struct |
17 | { |
17 | { |
18 | link_t link; |
18 | link_t link; |
19 | SPINLOCK_DECLARE(lock); |
19 | SPINLOCK_DECLARE(lock); |
20 | u32_t state; |
20 | u32_t state; |
21 | void *parent; |
21 | void *parent; |
22 | count_t avail; |
22 | count_t avail; |
23 | addr_t base; |
23 | addr_t base; |
24 | index_t next; |
24 | index_t next; |
25 | int list[512]; |
25 | int list[512]; |
26 | }pslab_t; |
26 | }pslab_t; |
27 | 27 | ||
28 | typedef struct |
28 | typedef struct |
29 | { |
29 | { |
30 | SPINLOCK_DECLARE(lock); |
30 | SPINLOCK_DECLARE(lock); |
31 | 31 | ||
32 | count_t partial_count; |
32 | count_t partial_count; |
33 | 33 | ||
34 | link_t full_slabs; /**< List of full slabs */ |
34 | link_t full_slabs; /**< List of full slabs */ |
35 | link_t partial_slabs; /**< List of partial slabs */ |
35 | link_t partial_slabs; /**< List of partial slabs */ |
36 | }pcache_t; |
36 | }pcache_t; |
37 | 37 | ||
38 | static pcache_t page_cache; |
38 | static pcache_t page_cache; |
39 | 39 | ||
40 | static pslab_t *create_page_slab(); |
40 | static pslab_t *create_page_slab(); |
41 | 41 | ||
42 | 42 | ||
43 | 43 | ||
44 | void init_mm() |
44 | void init_mm() |
45 | { |
45 | { |
46 | int i; |
46 | int i; |
47 | 47 | ||
48 | u32_t base; |
48 | u32_t base; |
49 | u32_t size; |
49 | u32_t size; |
50 | count_t pages; |
50 | count_t pages; |
51 | size_t conf_size; |
51 | size_t conf_size; |
52 | size_t core_size; |
52 | size_t core_size; |
53 | pslab_t *slab; |
53 | pslab_t *slab; |
54 | 54 | ||
55 | pages = mem_amount >> PAGE_WIDTH; |
55 | pages = mem_amount >> PAGE_WIDTH; |
56 | DBG("last page = %x total pages = %x\n",mem_amount, pages); |
56 | DBG("last page = %x total pages = %x\n",mem_amount, pages); |
57 | 57 | ||
58 | conf_size = pages*sizeof(frame_t); |
58 | conf_size = pages*sizeof(frame_t); |
59 | DBG("conf_size = %x free mem start =%x\n",conf_size, pg_balloc); |
59 | DBG("conf_size = %x free mem start =%x\n",conf_size, pg_balloc); |
60 | 60 | ||
61 | zone_create(&z_core, 0, pages); |
61 | zone_create(&z_core, 0, pages); |
62 | zone_release(&z_core, 0, pages); |
62 | zone_release(&z_core, 0, pages); |
63 | zone_reserve(&z_core, 0, pg_balloc >> PAGE_WIDTH); |
63 | zone_reserve(&z_core, 0, pg_balloc >> PAGE_WIDTH); |
64 | 64 | ||
65 | list_initialize(&page_cache.full_slabs); |
65 | list_initialize(&page_cache.full_slabs); |
66 | list_initialize(&page_cache.partial_slabs); |
66 | list_initialize(&page_cache.partial_slabs); |
67 | 67 | ||
68 | slab = create_page_slab(); |
68 | slab = create_page_slab(); |
69 | 69 | ||
70 | ASSERT(slab); |
70 | ASSERT(slab); |
71 | 71 | ||
72 | slab->parent = &page_cache; |
72 | slab->parent = &page_cache; |
73 | page_cache.partial_count++; |
73 | page_cache.partial_count++; |
74 | list_prepend(&slab->link, &page_cache.partial_slabs); |
74 | list_prepend(&slab->link, &page_cache.partial_slabs); |
75 | }; |
75 | }; |
76 | 76 | ||
77 | /** Return wasted space in slab */ |
77 | /** Return wasted space in slab */ |
78 | static unsigned int badness(index_t order, size_t size) |
78 | static unsigned int badness(index_t order, size_t size) |
79 | { |
79 | { |
80 | unsigned int objects; |
80 | unsigned int objects; |
81 | unsigned int ssize; |
81 | unsigned int ssize; |
82 | 82 | ||
83 | ssize = PAGE_SIZE << order; |
83 | ssize = PAGE_SIZE << order; |
84 | objects = (PAGE_SIZE << order) / size; |
84 | objects = (PAGE_SIZE << order) / size; |
85 | return ssize - objects * size; |
85 | return ssize - objects * size; |
86 | } |
86 | } |
87 | 87 | ||
88 | #define SLAB_MAX_BADNESS(order) (((size_t) PAGE_SIZE << (order)) >> 2) |
88 | #define SLAB_MAX_BADNESS(order) (((size_t) PAGE_SIZE << (order)) >> 2) |
89 | 89 | ||
90 | 90 | ||
91 | static pslab_t *create_page_slab() |
91 | static pslab_t *create_page_slab() |
92 | { |
92 | { |
93 | pslab_t *slab; |
93 | pslab_t *slab; |
94 | link_t *tmp; |
94 | link_t *tmp; |
95 | 95 | ||
96 | spinlock_lock(&z_core.lock); |
96 | spinlock_lock(&z_core.lock); |
97 | 97 | ||
98 | tmp = buddy_alloc(9); |
98 | tmp = buddy_alloc(9); |
99 | 99 | ||
100 | if( tmp != 0 ) |
100 | if( tmp != 0 ) |
101 | { |
101 | { |
102 | frame_t *frame; |
102 | frame_t *frame; |
103 | int i; |
103 | int i; |
104 | addr_t v; |
104 | addr_t v; |
105 | 105 | ||
106 | /* Update zone information. */ |
106 | /* Update zone information. */ |
107 | z_core.free_count -= 512; |
107 | z_core.free_count -= 512; |
108 | z_core.busy_count += 512; |
108 | z_core.busy_count += 512; |
109 | 109 | ||
110 | spinlock_unlock(&z_core.lock); |
110 | spinlock_unlock(&z_core.lock); |
111 | 111 | ||
112 | /* Frame will be actually a first frame of the block. */ |
112 | /* Frame will be actually a first frame of the block. */ |
113 | frame = (frame_t*)tmp; |
113 | frame = (frame_t*)tmp; |
114 | 114 | ||
115 | frame->parent = 0; |
115 | frame->parent = 0; |
116 | 116 | ||
117 | v = (z_core.base + (index_t)(frame - z_core.frames)) << PAGE_WIDTH; |
117 | v = (z_core.base + (index_t)(frame - z_core.frames)) << PAGE_WIDTH; |
118 | 118 | ||
119 | slab = (pslab_t*)PA2KA(v); |
119 | slab = (pslab_t*)PA2KA(v); |
120 | 120 | ||
121 | for(i = 1; i < 512; i++) |
121 | for(i = 1; i < 512; i++) |
122 | frame[i].parent = slab; |
122 | frame[i].parent = slab; |
123 | 123 | ||
124 | slab->base = v + PAGE_SIZE; |
124 | slab->base = v + PAGE_SIZE; |
125 | 125 | ||
126 | slab->avail = 511; |
126 | slab->avail = 511; |
127 | slab->next = 0; |
127 | slab->next = 0; |
128 | 128 | ||
129 | for(i = 0; i < 511; i++) |
129 | for(i = 0; i < 511; i++) |
130 | slab->list[i] = i + 1; |
130 | slab->list[i] = i + 1; |
131 | 131 | ||
132 | } |
132 | } |
133 | else |
133 | else |
134 | { |
134 | { |
135 | spinlock_unlock(&z_core.lock); |
135 | spinlock_unlock(&z_core.lock); |
136 | slab = NULL; |
136 | slab = NULL; |
137 | }; |
137 | }; |
138 | 138 | ||
139 | DBG("create page slab at %x\n", slab); |
139 | DBG("create page slab at %x\n", slab); |
140 | 140 | ||
141 | return slab; |
141 | return slab; |
142 | } |
142 | } |
143 | 143 | ||
144 | static void destroy_page_slab(pslab_t *slab) |
144 | static void destroy_page_slab(pslab_t *slab) |
145 | { |
145 | { |
146 | u32_t order; |
146 | u32_t order; |
147 | count_t idx; |
147 | count_t idx; |
148 | frame_t *frame; |
148 | frame_t *frame; |
149 | 149 | ||
150 | 150 | ||
151 | idx = (KA2PA(slab) >> PAGE_WIDTH)-z_core.base; |
151 | idx = (KA2PA(slab) >> PAGE_WIDTH)-z_core.base; |
152 | 152 | ||
153 | frame = &z_core.frames[idx]; |
153 | frame = &z_core.frames[idx]; |
154 | 154 | ||
155 | /* remember frame order */ |
155 | /* remember frame order */ |
156 | order = frame->buddy_order; |
156 | order = frame->buddy_order; |
157 | 157 | ||
158 | ASSERT(frame->refcount); |
158 | ASSERT(frame->refcount); |
159 | 159 | ||
160 | if (!--frame->refcount) |
160 | if (!--frame->refcount) |
161 | { |
161 | { |
162 | spinlock_lock(&z_core.lock); |
162 | spinlock_lock(&z_core.lock); |
163 | 163 | ||
164 | buddy_system_free(&frame->buddy_link); |
164 | buddy_system_free(&frame->buddy_link); |
165 | 165 | ||
166 | /* Update zone information. */ |
166 | /* Update zone information. */ |
167 | z_core.free_count += (1 << order); |
167 | z_core.free_count += (1 << order); |
168 | z_core.busy_count -= (1 << order); |
168 | z_core.busy_count -= (1 << order); |
169 | 169 | ||
170 | spinlock_unlock(&z_core.lock); |
170 | spinlock_unlock(&z_core.lock); |
171 | } |
171 | } |
172 | } |
172 | } |
173 | 173 | ||
174 | #if 0 |
174 | #if 0 |
175 | fslab_t *create_slab(index_t order, size_t size) |
175 | fslab_t *create_slab(index_t order, size_t size) |
176 | { |
176 | { |
177 | fslab_t *slab; |
177 | fslab_t *slab; |
178 | 178 | ||
179 | slab = (fslab_t*)PA2KA(frame_alloc(0)); |
179 | slab = (fslab_t*)PA2KA(frame_alloc(0)); |
180 | 180 | ||
181 | if( slab ) |
181 | if( slab ) |
182 | { |
182 | { |
183 | link_t *tmp; |
183 | link_t *tmp; |
184 | 184 | ||
185 | spinlock_lock(&z_core.lock); |
185 | spinlock_lock(&z_core.lock); |
186 | 186 | ||
187 | tmp = buddy_alloc(order); |
187 | tmp = buddy_alloc(order); |
188 | ASSERT(tmp); |
188 | ASSERT(tmp); |
189 | 189 | ||
190 | if( tmp ) |
190 | if( tmp ) |
191 | { |
191 | { |
192 | frame_t *frame; |
192 | frame_t *frame; |
193 | count_t objects; |
193 | count_t objects; |
194 | count_t i; |
194 | count_t i; |
195 | addr_t v; |
195 | addr_t v; |
196 | 196 | ||
197 | /* Update zone information. */ |
197 | /* Update zone information. */ |
198 | z_core.free_count -= (1 << order); |
198 | z_core.free_count -= (1 << order); |
199 | z_core.busy_count += (1 << order); |
199 | z_core.busy_count += (1 << order); |
200 | 200 | ||
201 | spinlock_unlock(&z_heap.lock); |
201 | spinlock_unlock(&z_heap.lock); |
202 | 202 | ||
203 | /* Frame will be actually a first frame of the block. */ |
203 | /* Frame will be actually a first frame of the block. */ |
204 | frame = (frame_t*)tmp; |
204 | frame = (frame_t*)tmp; |
205 | 205 | ||
206 | for(i = 0; i < (1U< |
206 | for(i = 0; i < (1U< |
207 | frame[i].parent = slab; |
207 | frame[i].parent = slab; |
208 | 208 | ||
209 | /* get frame address */ |
209 | /* get frame address */ |
210 | v = z_core.base + (index_t)(frame - z_core.frames); |
210 | v = z_core.base + (index_t)(frame - z_core.frames); |
211 | 211 | ||
212 | slab->base = (v << PAGE_WIDTH); |
212 | slab->base = (v << PAGE_WIDTH); |
213 | 213 | ||
214 | slab->avail = (PAGE_SIZE << order) / size; |
214 | slab->avail = (PAGE_SIZE << order) / size; |
215 | slab->next = 0; |
215 | slab->next = 0; |
216 | 216 | ||
217 | objects = (PAGE_SIZE << order) / size; |
217 | objects = (PAGE_SIZE << order) / size; |
218 | 218 | ||
219 | for(i = 0; i < objects; i++) |
219 | for(i = 0; i < objects; i++) |
220 | slab->list[i] = i + 1; |
220 | slab->list[i] = i + 1; |
221 | } |
221 | } |
222 | else |
222 | else |
223 | { |
223 | { |
224 | spinlock_unlock(&z_core.lock); |
224 | spinlock_unlock(&z_core.lock); |
225 | frame_free(KA2PA(slab)); |
225 | frame_free(KA2PA(slab)); |
226 | slab = NULL; |
226 | slab = NULL; |
227 | }; |
227 | }; |
228 | }; |
228 | }; |
229 | 229 | ||
230 | return slab; |
230 | return slab; |
231 | } |
231 | } |
232 | 232 | ||
233 | static void destroy_slab(fslab_t *slab) |
233 | static void destroy_slab(fslab_t *slab) |
234 | { |
234 | { |
235 | u32_t order; |
235 | u32_t order; |
236 | count_t idx; |
236 | count_t idx; |
237 | frame_t *frame; |
237 | frame_t *frame; |
238 | 238 | ||
239 | idx = (slab->base >> PAGE_WIDTH)-z_core.base; |
239 | idx = (slab->base >> PAGE_WIDTH)-z_core.base; |
240 | frame = &z_core.frames[idx]; |
240 | frame = &z_core.frames[idx]; |
241 | 241 | ||
242 | /* remember frame order */ |
242 | /* remember frame order */ |
243 | order = frame->buddy_order; |
243 | order = frame->buddy_order; |
244 | 244 | ||
245 | ASSERT(frame->refcount); |
245 | ASSERT(frame->refcount); |
246 | 246 | ||
247 | if (!--frame->refcount) |
247 | if (!--frame->refcount) |
248 | { |
248 | { |
249 | spinlock_lock(&z_core.lock); |
249 | spinlock_lock(&z_core.lock); |
250 | 250 | ||
251 | buddy_system_free(&frame->buddy_link); |
251 | buddy_system_free(&frame->buddy_link); |
252 | 252 | ||
253 | /* Update zone information. */ |
253 | /* Update zone information. */ |
254 | z_core.free_count += (1 << order); |
254 | z_core.free_count += (1 << order); |
255 | z_core.busy_count -= (1 << order); |
255 | z_core.busy_count -= (1 << order); |
256 | 256 | ||
257 | spinlock_unlock(&z_core.lock); |
257 | spinlock_unlock(&z_core.lock); |
258 | } |
258 | } |
259 | 259 | ||
260 | // slab_free(fslab, slab); |
260 | // slab_free(fslab, slab); |
261 | 261 | ||
262 | }; |
262 | }; |
263 | #endif |
263 | #endif |
264 | 264 | ||
265 | addr_t alloc_page(void) |
265 | addr_t alloc_page(void) |
266 | { |
266 | { |
267 | eflags_t efl; |
267 | eflags_t efl; |
268 | pslab_t *slab; |
268 | pslab_t *slab; |
269 | addr_t frame; |
269 | addr_t frame; |
270 | 270 | ||
271 | efl = safe_cli(); |
271 | efl = safe_cli(); |
272 | 272 | ||
273 | spinlock_lock(&page_cache.lock); |
273 | spinlock_lock(&page_cache.lock); |
274 | 274 | ||
275 | if (list_empty(&page_cache.partial_slabs)) |
275 | if (list_empty(&page_cache.partial_slabs)) |
276 | { |
276 | { |
277 | slab = create_page_slab(); |
277 | slab = create_page_slab(); |
278 | if (!slab) |
278 | if (!slab) |
279 | { |
279 | { |
280 | spinlock_unlock(&page_cache.lock); |
280 | spinlock_unlock(&page_cache.lock); |
281 | safe_sti(efl); |
281 | safe_sti(efl); |
282 | return 0; |
282 | return 0; |
283 | } |
283 | } |
284 | slab->parent = &page_cache; |
284 | slab->parent = &page_cache; |
285 | slab->state = 1; |
285 | slab->state = 1; |
286 | page_cache.partial_count++; |
286 | page_cache.partial_count++; |
287 | list_prepend(&slab->link, &page_cache.partial_slabs); |
287 | list_prepend(&slab->link, &page_cache.partial_slabs); |
288 | } |
288 | } |
289 | else |
289 | else |
290 | slab = (pslab_t*)page_cache.partial_slabs.next; |
290 | slab = (pslab_t*)page_cache.partial_slabs.next; |
291 | 291 | ||
292 | frame = slab->base + (slab->next << PAGE_WIDTH); |
292 | frame = slab->base + (slab->next << PAGE_WIDTH); |
293 | slab->next = slab->list[slab->next]; |
293 | slab->next = slab->list[slab->next]; |
294 | 294 | ||
295 | slab->avail--; |
295 | slab->avail--; |
296 | if( slab->avail == 0 ) |
296 | if( slab->avail == 0 ) |
297 | { |
297 | { |
298 | slab->state = 0; |
298 | slab->state = 0; |
299 | list_remove(&slab->link); |
299 | list_remove(&slab->link); |
300 | list_prepend(&slab->link, &page_cache.full_slabs); |
300 | list_prepend(&slab->link, &page_cache.full_slabs); |
301 | page_cache.partial_count--; |
301 | page_cache.partial_count--; |
302 | DBG("%s insert empty page slab\n"); |
302 | DBG("%s insert empty page slab\n", __FUNCTION__); |
303 | }; |
303 | }; |
304 | spinlock_unlock(&page_cache.lock); |
304 | spinlock_unlock(&page_cache.lock); |
305 | 305 | ||
306 | // DBG("alloc_page: %x remain %d\n", frame, slab->avail); |
306 | // DBG("alloc_page: %x remain %d\n", frame, slab->avail); |
307 | 307 | ||
308 | safe_sti(efl); |
308 | safe_sti(efl); |
309 | 309 | ||
310 | return frame; |
310 | return frame; |
311 | } |
311 | } |
312 | 312 | ||
313 | 313 | ||
314 | addr_t __fastcall frame_alloc(count_t count) |
314 | addr_t __fastcall frame_alloc(count_t count) |
315 | { |
315 | { |
316 | addr_t frame; |
316 | addr_t frame; |
317 | 317 | ||
318 | if ( count > 1) |
318 | if ( count > 1) |
319 | { |
319 | { |
320 | eflags_t efl; |
320 | eflags_t efl; |
321 | index_t order; |
321 | index_t order; |
322 | frame_t *tmp; |
322 | frame_t *tmp; |
323 | count_t i; |
323 | count_t i; |
324 | 324 | ||
325 | order = fnzb(count-1)+1; |
325 | order = fnzb(count-1)+1; |
326 | 326 | ||
327 | efl = safe_cli(); |
327 | efl = safe_cli(); |
328 | 328 | ||
329 | spinlock_lock(&z_core.lock); |
329 | spinlock_lock(&z_core.lock); |
330 | 330 | ||
331 | tmp = (frame_t*)buddy_alloc( order ); |
331 | tmp = (frame_t*)buddy_alloc( order ); |
332 | 332 | ||
333 | ASSERT(tmp); |
333 | ASSERT(tmp); |
334 | 334 | ||
335 | z_core.free_count -= (1 << order); |
335 | z_core.free_count -= (1 << order); |
336 | z_core.busy_count += (1 << order); |
336 | z_core.busy_count += (1 << order); |
337 | 337 | ||
338 | for(i = 0; i < (1 << order); i++) |
338 | for(i = 0; i < (1 << order); i++) |
339 | tmp[i].parent = NULL; |
339 | tmp[i].parent = NULL; |
340 | 340 | ||
341 | spinlock_unlock(&z_core.lock); |
341 | spinlock_unlock(&z_core.lock); |
342 | 342 | ||
343 | safe_sti(efl); |
343 | safe_sti(efl); |
344 | 344 | ||
345 | frame = (z_core.base + |
345 | frame = (z_core.base + |
346 | (index_t)(tmp - z_core.frames)) << PAGE_WIDTH; |
346 | (index_t)(tmp - z_core.frames)) << PAGE_WIDTH; |
347 | 347 | ||
348 | 348 | ||
349 | DBG("%s %x order %d remain %d\n", __FUNCTION__, |
349 | DBG("%s %x order %d remain %d\n", __FUNCTION__, |
350 | frame, order, z_core.free_count); |
350 | frame, order, z_core.free_count); |
351 | } |
351 | } |
352 | else |
352 | else |
353 | frame = alloc_page(); |
353 | frame = alloc_page(); |
354 | 354 | ||
355 | return frame; |
355 | return frame; |
356 | } |
356 | } |
357 | 357 | ||
358 | size_t __fastcall frame_free(addr_t addr) |
358 | size_t __fastcall frame_free(addr_t addr) |
359 | { |
359 | { |
360 | eflags_t efl; |
360 | eflags_t efl; |
361 | index_t idx; |
361 | index_t idx; |
362 | frame_t *frame; |
362 | frame_t *frame; |
363 | size_t frame_size; |
363 | size_t frame_size; |
364 | 364 | ||
365 | idx = addr >> PAGE_WIDTH; |
365 | idx = addr >> PAGE_WIDTH; |
366 | 366 | ||
367 | if( (idx < z_core.base) || |
367 | if( (idx < z_core.base) || |
368 | (idx >= z_core.base+z_core.count)) { |
368 | (idx >= z_core.base+z_core.count)) { |
369 | DBG("%s: invalid address %x\n", __FUNCTION__, addr); |
369 | DBG("%s: invalid address %x\n", __FUNCTION__, addr); |
370 | return 0; |
370 | return 0; |
371 | } |
371 | } |
372 | 372 | ||
373 | efl = safe_cli(); |
373 | efl = safe_cli(); |
374 | 374 | ||
375 | frame = &z_core.frames[idx-z_core.base]; |
375 | frame = &z_core.frames[idx-z_core.base]; |
376 | 376 | ||
377 | if( frame->parent != NULL ) |
377 | if( frame->parent != NULL ) |
378 | { |
378 | { |
379 | pslab_t *slab; |
379 | pslab_t *slab; |
380 | 380 | ||
381 | slab = frame->parent; |
381 | slab = frame->parent; |
382 | 382 | ||
383 | spinlock_lock(&page_cache.lock); |
383 | spinlock_lock(&page_cache.lock); |
384 | 384 | ||
385 | idx = (addr - slab->base) >> PAGE_WIDTH; |
385 | idx = (addr - slab->base) >> PAGE_WIDTH; |
386 | 386 | ||
387 | ASSERT(idx < 512); |
387 | ASSERT(idx < 512); |
388 | 388 | ||
389 | slab->list[idx] = slab->next; |
389 | slab->list[idx] = slab->next; |
390 | slab->next = idx; |
390 | slab->next = idx; |
391 | 391 | ||
392 | slab->avail++; |
392 | slab->avail++; |
393 | 393 | ||
394 | if( (slab->state == 0 ) && |
394 | if( (slab->state == 0 ) && |
395 | (slab->avail >= 4)) |
395 | (slab->avail >= 4)) |
396 | { |
396 | { |
397 | slab->state = 1; |
397 | slab->state = 1; |
398 | // list_remove(&slab->link); |
398 | list_remove(&slab->link); |
399 | // list_prepend(&slab->link, &page_cache.partial_slabs); |
399 | list_prepend(&slab->link, &page_cache.partial_slabs); |
400 | // page_cache.partial_count++; |
400 | page_cache.partial_count++; |
401 | 401 | ||
402 | DBG("%s: insert partial page slab\n", __FUNCTION__); |
402 | DBG("%s: insert partial page slab\n", __FUNCTION__); |
403 | } |
403 | } |
404 | spinlock_unlock(&page_cache.lock); |
404 | spinlock_unlock(&page_cache.lock); |
405 | 405 | ||
406 | frame_size = 1; |
406 | frame_size = 1; |
407 | } |
407 | } |
408 | else |
408 | else |
409 | { |
409 | { |
410 | count_t order; |
410 | count_t order; |
411 | 411 | ||
412 | order = frame->buddy_order; |
412 | order = frame->buddy_order; |
413 | 413 | ||
414 | DBG("%s %x order %d\n", __FUNCTION__, addr, order); |
414 | DBG("%s %x order %d\n", __FUNCTION__, addr, order); |
415 | 415 | ||
416 | ASSERT(frame->refcount); |
416 | ASSERT(frame->refcount); |
417 | 417 | ||
418 | spinlock_lock(&z_core.lock); |
418 | spinlock_lock(&z_core.lock); |
419 | 419 | ||
420 | if (!--frame->refcount) |
420 | if (!--frame->refcount) |
421 | { |
421 | { |
422 | buddy_system_free(&frame->buddy_link); |
422 | buddy_system_free(&frame->buddy_link); |
423 | 423 | ||
424 | /* Update zone information. */ |
424 | /* Update zone information. */ |
425 | z_core.free_count += (1 << order); |
425 | z_core.free_count += (1 << order); |
426 | z_core.busy_count -= (1 << order); |
426 | z_core.busy_count -= (1 << order); |
427 | } |
427 | } |
428 | spinlock_unlock(&z_core.lock); |
428 | spinlock_unlock(&z_core.lock); |
429 | 429 | ||
430 | frame_size = 1 << order; |
430 | frame_size = 1 << order; |
431 | }; |
431 | }; |
432 | safe_sti(efl); |
432 | safe_sti(efl); |
433 | 433 | ||
434 | return frame_size; |
434 | return frame_size; |
435 | } |
435 | } |
436 | 436 | ||
437 | count_t get_free_mem() |
437 | count_t get_free_mem() |
438 | { |
438 | { |
439 | return z_core.free_count; |
439 | return z_core.free_count; |
440 | }><>><>><>>>><>><>>><>><>><>><>><>>><>><>><> |
440 | }><>><>><>>>><>><>>><>><>><>><>><>>><>><>><> |
441 | 441 | ||
- | 442 | ||
- | 443 | ||
- | 444 |