Rev 4075 | Rev 4569 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4075 | Serge | 1 | /* |
2 | * Copyright (c) Red Hat Inc. |
||
3 | |||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the |
||
12 | * next paragraph) shall be included in all copies or substantial portions |
||
13 | * of the Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
||
21 | * DEALINGS IN THE SOFTWARE. |
||
22 | * |
||
23 | * Authors: Dave Airlie |
||
24 | * Jerome Glisse |
||
25 | * Pauli Nieminen |
||
26 | */ |
||
27 | |||
28 | /* simple list based uncached page pool |
||
29 | * - Pool collects resently freed pages for reuse |
||
30 | * - Use page->lru to keep a free list |
||
31 | * - doesn't track currently in use pages |
||
32 | */ |
||
33 | |||
34 | #define pr_fmt(fmt) "[TTM] " fmt |
||
35 | |||
36 | #include |
||
37 | #include |
||
38 | //#include |
||
39 | //#include |
||
40 | #include |
||
41 | #include |
||
42 | #include |
||
43 | #include |
||
44 | //#include |
||
45 | |||
46 | //#include |
||
47 | |||
48 | #include |
||
49 | #include |
||
50 | |||
51 | #ifdef TTM_HAS_AGP |
||
52 | #include |
||
53 | #endif |
||
54 | |||
55 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) |
||
56 | #define SMALL_ALLOCATION 16 |
||
57 | #define FREE_ALL_PAGES (~0U) |
||
58 | /* times are in msecs */ |
||
59 | #define PAGE_FREE_INTERVAL 1000 |
||
60 | |||
61 | #define pr_err(fmt, ...) \ |
||
62 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
||
63 | |||
64 | |||
65 | |||
66 | #if 0 |
||
67 | /** |
||
68 | * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. |
||
69 | * |
||
70 | * @lock: Protects the shared pool from concurrnet access. Must be used with |
||
71 | * irqsave/irqrestore variants because pool allocator maybe called from |
||
72 | * delayed work. |
||
73 | * @fill_lock: Prevent concurrent calls to fill. |
||
74 | * @list: Pool of free uc/wc pages for fast reuse. |
||
75 | * @gfp_flags: Flags to pass for alloc_page. |
||
76 | * @npages: Number of pages in pool. |
||
77 | */ |
||
78 | struct ttm_page_pool { |
||
79 | spinlock_t lock; |
||
80 | bool fill_lock; |
||
81 | struct list_head list; |
||
82 | gfp_t gfp_flags; |
||
83 | unsigned npages; |
||
84 | char *name; |
||
85 | unsigned long nfrees; |
||
86 | unsigned long nrefills; |
||
87 | }; |
||
88 | |||
89 | /** |
||
90 | * Limits for the pool. They are handled without locks because only place where |
||
91 | * they may change is in sysfs store. They won't have immediate effect anyway |
||
92 | * so forcing serialization to access them is pointless. |
||
93 | */ |
||
94 | |||
95 | struct ttm_pool_opts { |
||
96 | unsigned alloc_size; |
||
97 | unsigned max_size; |
||
98 | unsigned small; |
||
99 | }; |
||
100 | |||
101 | #define NUM_POOLS 4 |
||
102 | |||
103 | /** |
||
104 | * struct ttm_pool_manager - Holds memory pools for fst allocation |
||
105 | * |
||
106 | * Manager is read only object for pool code so it doesn't need locking. |
||
107 | * |
||
108 | * @free_interval: minimum number of jiffies between freeing pages from pool. |
||
109 | * @page_alloc_inited: reference counting for pool allocation. |
||
110 | * @work: Work that is used to shrink the pool. Work is only run when there is |
||
111 | * some pages to free. |
||
112 | * @small_allocation: Limit in number of pages what is small allocation. |
||
113 | * |
||
114 | * @pools: All pool objects in use. |
||
115 | **/ |
||
116 | struct ttm_pool_manager { |
||
117 | struct kobject kobj; |
||
118 | struct shrinker mm_shrink; |
||
119 | struct ttm_pool_opts options; |
||
120 | |||
121 | union { |
||
122 | struct ttm_page_pool pools[NUM_POOLS]; |
||
123 | struct { |
||
124 | struct ttm_page_pool wc_pool; |
||
125 | struct ttm_page_pool uc_pool; |
||
126 | struct ttm_page_pool wc_pool_dma32; |
||
127 | struct ttm_page_pool uc_pool_dma32; |
||
128 | } ; |
||
129 | }; |
||
130 | }; |
||
131 | |||
132 | static struct attribute ttm_page_pool_max = { |
||
133 | .name = "pool_max_size", |
||
134 | .mode = S_IRUGO | S_IWUSR |
||
135 | }; |
||
136 | static struct attribute ttm_page_pool_small = { |
||
137 | .name = "pool_small_allocation", |
||
138 | .mode = S_IRUGO | S_IWUSR |
||
139 | }; |
||
140 | static struct attribute ttm_page_pool_alloc_size = { |
||
141 | .name = "pool_allocation_size", |
||
142 | .mode = S_IRUGO | S_IWUSR |
||
143 | }; |
||
144 | |||
145 | static struct attribute *ttm_pool_attrs[] = { |
||
146 | &ttm_page_pool_max, |
||
147 | &ttm_page_pool_small, |
||
148 | &ttm_page_pool_alloc_size, |
||
149 | NULL |
||
150 | }; |
||
151 | |||
152 | static void ttm_pool_kobj_release(struct kobject *kobj) |
||
153 | { |
||
154 | struct ttm_pool_manager *m = |
||
155 | container_of(kobj, struct ttm_pool_manager, kobj); |
||
156 | kfree(m); |
||
157 | } |
||
158 | |||
159 | static ssize_t ttm_pool_store(struct kobject *kobj, |
||
160 | struct attribute *attr, const char *buffer, size_t size) |
||
161 | { |
||
162 | struct ttm_pool_manager *m = |
||
163 | container_of(kobj, struct ttm_pool_manager, kobj); |
||
164 | int chars; |
||
165 | unsigned val; |
||
166 | chars = sscanf(buffer, "%u", &val); |
||
167 | if (chars == 0) |
||
168 | return size; |
||
169 | |||
170 | /* Convert kb to number of pages */ |
||
171 | val = val / (PAGE_SIZE >> 10); |
||
172 | |||
173 | if (attr == &ttm_page_pool_max) |
||
174 | m->options.max_size = val; |
||
175 | else if (attr == &ttm_page_pool_small) |
||
176 | m->options.small = val; |
||
177 | else if (attr == &ttm_page_pool_alloc_size) { |
||
178 | if (val > NUM_PAGES_TO_ALLOC*8) { |
||
179 | pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", |
||
180 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
||
181 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); |
||
182 | return size; |
||
183 | } else if (val > NUM_PAGES_TO_ALLOC) { |
||
184 | pr_warn("Setting allocation size to larger than %lu is not recommended\n", |
||
185 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); |
||
186 | } |
||
187 | m->options.alloc_size = val; |
||
188 | } |
||
189 | |||
190 | return size; |
||
191 | } |
||
192 | |||
193 | static ssize_t ttm_pool_show(struct kobject *kobj, |
||
194 | struct attribute *attr, char *buffer) |
||
195 | { |
||
196 | struct ttm_pool_manager *m = |
||
197 | container_of(kobj, struct ttm_pool_manager, kobj); |
||
198 | unsigned val = 0; |
||
199 | |||
200 | if (attr == &ttm_page_pool_max) |
||
201 | val = m->options.max_size; |
||
202 | else if (attr == &ttm_page_pool_small) |
||
203 | val = m->options.small; |
||
204 | else if (attr == &ttm_page_pool_alloc_size) |
||
205 | val = m->options.alloc_size; |
||
206 | |||
207 | val = val * (PAGE_SIZE >> 10); |
||
208 | |||
209 | return snprintf(buffer, PAGE_SIZE, "%u\n", val); |
||
210 | } |
||
211 | |||
212 | static const struct sysfs_ops ttm_pool_sysfs_ops = { |
||
213 | .show = &ttm_pool_show, |
||
214 | .store = &ttm_pool_store, |
||
215 | }; |
||
216 | |||
217 | static struct kobj_type ttm_pool_kobj_type = { |
||
218 | .release = &ttm_pool_kobj_release, |
||
219 | .sysfs_ops = &ttm_pool_sysfs_ops, |
||
220 | .default_attrs = ttm_pool_attrs, |
||
221 | }; |
||
222 | |||
223 | static struct ttm_pool_manager *_manager; |
||
224 | |||
225 | #ifndef CONFIG_X86 |
||
226 | static int set_pages_array_wb(struct page **pages, int addrinarray) |
||
227 | { |
||
228 | #ifdef TTM_HAS_AGP |
||
229 | int i; |
||
230 | |||
231 | for (i = 0; i < addrinarray; i++) |
||
232 | unmap_page_from_agp(pages[i]); |
||
233 | #endif |
||
234 | return 0; |
||
235 | } |
||
236 | |||
237 | static int set_pages_array_wc(struct page **pages, int addrinarray) |
||
238 | { |
||
239 | #ifdef TTM_HAS_AGP |
||
240 | int i; |
||
241 | |||
242 | for (i = 0; i < addrinarray; i++) |
||
243 | map_page_into_agp(pages[i]); |
||
244 | #endif |
||
245 | return 0; |
||
246 | } |
||
247 | |||
248 | static int set_pages_array_uc(struct page **pages, int addrinarray) |
||
249 | { |
||
250 | #ifdef TTM_HAS_AGP |
||
251 | int i; |
||
252 | |||
253 | for (i = 0; i < addrinarray; i++) |
||
254 | map_page_into_agp(pages[i]); |
||
255 | #endif |
||
256 | return 0; |
||
257 | } |
||
258 | #endif |
||
259 | |||
260 | /** |
||
261 | * Select the right pool or requested caching state and ttm flags. */ |
||
262 | static struct ttm_page_pool *ttm_get_pool(int flags, |
||
263 | enum ttm_caching_state cstate) |
||
264 | { |
||
265 | int pool_index; |
||
266 | |||
267 | if (cstate == tt_cached) |
||
268 | return NULL; |
||
269 | |||
270 | if (cstate == tt_wc) |
||
271 | pool_index = 0x0; |
||
272 | else |
||
273 | pool_index = 0x1; |
||
274 | |||
275 | if (flags & TTM_PAGE_FLAG_DMA32) |
||
276 | pool_index |= 0x2; |
||
277 | |||
278 | return &_manager->pools[pool_index]; |
||
279 | } |
||
280 | |||
281 | /* set memory back to wb and free the pages. */ |
||
282 | static void ttm_pages_put(struct page *pages[], unsigned npages) |
||
283 | { |
||
284 | unsigned i; |
||
285 | if (set_pages_array_wb(pages, npages)) |
||
286 | pr_err("Failed to set %d pages to wb!\n", npages); |
||
287 | for (i = 0; i < npages; ++i) |
||
288 | __free_page(pages[i]); |
||
289 | } |
||
290 | |||
291 | static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, |
||
292 | unsigned freed_pages) |
||
293 | { |
||
294 | pool->npages -= freed_pages; |
||
295 | pool->nfrees += freed_pages; |
||
296 | } |
||
297 | |||
298 | /** |
||
299 | * Free pages from pool. |
||
300 | * |
||
301 | * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC |
||
302 | * number of pages in one go. |
||
303 | * |
||
304 | * @pool: to free the pages from |
||
305 | * @free_all: If set to true will free all pages in pool |
||
306 | **/ |
||
307 | static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) |
||
308 | { |
||
309 | unsigned long irq_flags; |
||
310 | struct page *p; |
||
311 | struct page **pages_to_free; |
||
312 | unsigned freed_pages = 0, |
||
313 | npages_to_free = nr_free; |
||
314 | |||
315 | if (NUM_PAGES_TO_ALLOC < nr_free) |
||
316 | npages_to_free = NUM_PAGES_TO_ALLOC; |
||
317 | |||
318 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), |
||
319 | GFP_KERNEL); |
||
320 | if (!pages_to_free) { |
||
321 | pr_err("Failed to allocate memory for pool free operation\n"); |
||
322 | return 0; |
||
323 | } |
||
324 | |||
325 | restart: |
||
326 | spin_lock_irqsave(&pool->lock, irq_flags); |
||
327 | |||
328 | list_for_each_entry_reverse(p, &pool->list, lru) { |
||
329 | if (freed_pages >= npages_to_free) |
||
330 | break; |
||
331 | |||
332 | pages_to_free[freed_pages++] = p; |
||
333 | /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ |
||
334 | if (freed_pages >= NUM_PAGES_TO_ALLOC) { |
||
335 | /* remove range of pages from the pool */ |
||
336 | __list_del(p->lru.prev, &pool->list); |
||
337 | |||
338 | ttm_pool_update_free_locked(pool, freed_pages); |
||
339 | /** |
||
340 | * Because changing page caching is costly |
||
341 | * we unlock the pool to prevent stalling. |
||
342 | */ |
||
343 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
||
344 | |||
345 | ttm_pages_put(pages_to_free, freed_pages); |
||
346 | if (likely(nr_free != FREE_ALL_PAGES)) |
||
347 | nr_free -= freed_pages; |
||
348 | |||
349 | if (NUM_PAGES_TO_ALLOC >= nr_free) |
||
350 | npages_to_free = nr_free; |
||
351 | else |
||
352 | npages_to_free = NUM_PAGES_TO_ALLOC; |
||
353 | |||
354 | freed_pages = 0; |
||
355 | |||
356 | /* free all so restart the processing */ |
||
357 | if (nr_free) |
||
358 | goto restart; |
||
359 | |||
360 | /* Not allowed to fall through or break because |
||
361 | * following context is inside spinlock while we are |
||
362 | * outside here. |
||
363 | */ |
||
364 | goto out; |
||
365 | |||
366 | } |
||
367 | } |
||
368 | |||
369 | /* remove range of pages from the pool */ |
||
370 | if (freed_pages) { |
||
371 | __list_del(&p->lru, &pool->list); |
||
372 | |||
373 | ttm_pool_update_free_locked(pool, freed_pages); |
||
374 | nr_free -= freed_pages; |
||
375 | } |
||
376 | |||
377 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
||
378 | |||
379 | if (freed_pages) |
||
380 | ttm_pages_put(pages_to_free, freed_pages); |
||
381 | out: |
||
382 | kfree(pages_to_free); |
||
383 | return nr_free; |
||
384 | } |
||
385 | |||
386 | /** |
||
387 | * Callback for mm to request pool to reduce number of page held. |
||
4112 | Serge | 388 | * |
389 | * XXX: (dchinner) Deadlock warning! |
||
390 | * |
||
391 | * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means |
||
392 | * this can deadlock when called a sc->gfp_mask that is not equal to |
||
393 | * GFP_KERNEL. |
||
394 | * |
||
395 | * This code is crying out for a shrinker per pool.... |
||
4075 | Serge | 396 | */ |
4112 | Serge | 397 | static unsigned long |
398 | ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
||
4075 | Serge | 399 | { |
400 | static atomic_t start_pool = ATOMIC_INIT(0); |
||
401 | unsigned i; |
||
402 | unsigned pool_offset = atomic_add_return(1, &start_pool); |
||
403 | struct ttm_page_pool *pool; |
||
404 | int shrink_pages = sc->nr_to_scan; |
||
4112 | Serge | 405 | unsigned long freed = 0; |
4075 | Serge | 406 | |
407 | pool_offset = pool_offset % NUM_POOLS; |
||
408 | /* select start pool in round robin fashion */ |
||
409 | for (i = 0; i < NUM_POOLS; ++i) { |
||
410 | unsigned nr_free = shrink_pages; |
||
411 | if (shrink_pages == 0) |
||
412 | break; |
||
413 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
||
414 | shrink_pages = ttm_page_pool_free(pool, nr_free); |
||
4112 | Serge | 415 | freed += nr_free - shrink_pages; |
4075 | Serge | 416 | } |
4112 | Serge | 417 | return freed; |
4075 | Serge | 418 | } |
419 | |||
4112 | Serge | 420 | |
421 | static unsigned long |
||
422 | ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
||
423 | { |
||
424 | unsigned i; |
||
425 | unsigned long count = 0; |
||
426 | |||
427 | for (i = 0; i < NUM_POOLS; ++i) |
||
428 | count += _manager->pools[i].npages; |
||
429 | |||
430 | return count; |
||
431 | } |
||
432 | |||
4075 | Serge | 433 | static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) |
434 | { |
||
4112 | Serge | 435 | manager->mm_shrink.count_objects = ttm_pool_shrink_count; |
436 | manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; |
||
4075 | Serge | 437 | manager->mm_shrink.seeks = 1; |
438 | register_shrinker(&manager->mm_shrink); |
||
439 | } |
||
440 | |||
441 | static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) |
||
442 | { |
||
443 | unregister_shrinker(&manager->mm_shrink); |
||
444 | } |
||
445 | |||
446 | static int ttm_set_pages_caching(struct page **pages, |
||
447 | enum ttm_caching_state cstate, unsigned cpages) |
||
448 | { |
||
449 | int r = 0; |
||
450 | /* Set page caching */ |
||
451 | switch (cstate) { |
||
452 | case tt_uncached: |
||
453 | r = set_pages_array_uc(pages, cpages); |
||
454 | if (r) |
||
455 | pr_err("Failed to set %d pages to uc!\n", cpages); |
||
456 | break; |
||
457 | case tt_wc: |
||
458 | r = set_pages_array_wc(pages, cpages); |
||
459 | if (r) |
||
460 | pr_err("Failed to set %d pages to wc!\n", cpages); |
||
461 | break; |
||
462 | default: |
||
463 | break; |
||
464 | } |
||
465 | return r; |
||
466 | } |
||
467 | |||
468 | /** |
||
469 | * Free pages the pages that failed to change the caching state. If there is |
||
470 | * any pages that have changed their caching state already put them to the |
||
471 | * pool. |
||
472 | */ |
||
473 | static void ttm_handle_caching_state_failure(struct list_head *pages, |
||
474 | int ttm_flags, enum ttm_caching_state cstate, |
||
475 | struct page **failed_pages, unsigned cpages) |
||
476 | { |
||
477 | unsigned i; |
||
478 | /* Failed pages have to be freed */ |
||
479 | for (i = 0; i < cpages; ++i) { |
||
480 | list_del(&failed_pages[i]->lru); |
||
481 | __free_page(failed_pages[i]); |
||
482 | } |
||
483 | } |
||
484 | |||
485 | /** |
||
486 | * Allocate new pages with correct caching. |
||
487 | * |
||
488 | * This function is reentrant if caller updates count depending on number of |
||
489 | * pages returned in pages array. |
||
490 | */ |
||
491 | static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, |
||
492 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) |
||
493 | { |
||
494 | struct page **caching_array; |
||
495 | struct page *p; |
||
496 | int r = 0; |
||
497 | unsigned i, cpages; |
||
498 | unsigned max_cpages = min(count, |
||
499 | (unsigned)(PAGE_SIZE/sizeof(struct page *))); |
||
500 | |||
501 | /* allocate array for page caching change */ |
||
502 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); |
||
503 | |||
504 | if (!caching_array) { |
||
505 | pr_err("Unable to allocate table for new pages\n"); |
||
506 | return -ENOMEM; |
||
507 | } |
||
508 | |||
509 | for (i = 0, cpages = 0; i < count; ++i) { |
||
510 | p = alloc_page(gfp_flags); |
||
511 | |||
512 | if (!p) { |
||
513 | pr_err("Unable to get page %u\n", i); |
||
514 | |||
515 | /* store already allocated pages in the pool after |
||
516 | * setting the caching state */ |
||
517 | if (cpages) { |
||
518 | r = ttm_set_pages_caching(caching_array, |
||
519 | cstate, cpages); |
||
520 | if (r) |
||
521 | ttm_handle_caching_state_failure(pages, |
||
522 | ttm_flags, cstate, |
||
523 | caching_array, cpages); |
||
524 | } |
||
525 | r = -ENOMEM; |
||
526 | goto out; |
||
527 | } |
||
528 | |||
529 | #ifdef CONFIG_HIGHMEM |
||
530 | /* gfp flags of highmem page should never be dma32 so we |
||
531 | * we should be fine in such case |
||
532 | */ |
||
533 | if (!PageHighMem(p)) |
||
534 | #endif |
||
535 | { |
||
536 | caching_array[cpages++] = p; |
||
537 | if (cpages == max_cpages) { |
||
538 | |||
539 | r = ttm_set_pages_caching(caching_array, |
||
540 | cstate, cpages); |
||
541 | if (r) { |
||
542 | ttm_handle_caching_state_failure(pages, |
||
543 | ttm_flags, cstate, |
||
544 | caching_array, cpages); |
||
545 | goto out; |
||
546 | } |
||
547 | cpages = 0; |
||
548 | } |
||
549 | } |
||
550 | |||
551 | list_add(&p->lru, pages); |
||
552 | } |
||
553 | |||
554 | if (cpages) { |
||
555 | r = ttm_set_pages_caching(caching_array, cstate, cpages); |
||
556 | if (r) |
||
557 | ttm_handle_caching_state_failure(pages, |
||
558 | ttm_flags, cstate, |
||
559 | caching_array, cpages); |
||
560 | } |
||
561 | out: |
||
562 | kfree(caching_array); |
||
563 | |||
564 | return r; |
||
565 | } |
||
566 | |||
567 | /** |
||
568 | * Fill the given pool if there aren't enough pages and the requested number of |
||
569 | * pages is small. |
||
570 | */ |
||
571 | static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, |
||
572 | int ttm_flags, enum ttm_caching_state cstate, unsigned count, |
||
573 | unsigned long *irq_flags) |
||
574 | { |
||
575 | struct page *p; |
||
576 | int r; |
||
577 | unsigned cpages = 0; |
||
578 | /** |
||
579 | * Only allow one pool fill operation at a time. |
||
580 | * If pool doesn't have enough pages for the allocation new pages are |
||
581 | * allocated from outside of pool. |
||
582 | */ |
||
583 | if (pool->fill_lock) |
||
584 | return; |
||
585 | |||
586 | pool->fill_lock = true; |
||
587 | |||
588 | /* If allocation request is small and there are not enough |
||
589 | * pages in a pool we fill the pool up first. */ |
||
590 | if (count < _manager->options.small |
||
591 | && count > pool->npages) { |
||
592 | struct list_head new_pages; |
||
593 | unsigned alloc_size = _manager->options.alloc_size; |
||
594 | |||
595 | /** |
||
596 | * Can't change page caching if in irqsave context. We have to |
||
597 | * drop the pool->lock. |
||
598 | */ |
||
599 | spin_unlock_irqrestore(&pool->lock, *irq_flags); |
||
600 | |||
601 | INIT_LIST_HEAD(&new_pages); |
||
602 | r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, |
||
603 | cstate, alloc_size); |
||
604 | spin_lock_irqsave(&pool->lock, *irq_flags); |
||
605 | |||
606 | if (!r) { |
||
607 | list_splice(&new_pages, &pool->list); |
||
608 | ++pool->nrefills; |
||
609 | pool->npages += alloc_size; |
||
610 | } else { |
||
611 | pr_err("Failed to fill pool (%p)\n", pool); |
||
612 | /* If we have any pages left put them to the pool. */ |
||
613 | list_for_each_entry(p, &pool->list, lru) { |
||
614 | ++cpages; |
||
615 | } |
||
616 | list_splice(&new_pages, &pool->list); |
||
617 | pool->npages += cpages; |
||
618 | } |
||
619 | |||
620 | } |
||
621 | pool->fill_lock = false; |
||
622 | } |
||
623 | |||
624 | /** |
||
625 | * Cut 'count' number of pages from the pool and put them on the return list. |
||
626 | * |
||
627 | * @return count of pages still required to fulfill the request. |
||
628 | */ |
||
629 | static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, |
||
630 | struct list_head *pages, |
||
631 | int ttm_flags, |
||
632 | enum ttm_caching_state cstate, |
||
633 | unsigned count) |
||
634 | { |
||
635 | unsigned long irq_flags; |
||
636 | struct list_head *p; |
||
637 | unsigned i; |
||
638 | |||
639 | spin_lock_irqsave(&pool->lock, irq_flags); |
||
640 | ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); |
||
641 | |||
642 | if (count >= pool->npages) { |
||
643 | /* take all pages from the pool */ |
||
644 | list_splice_init(&pool->list, pages); |
||
645 | count -= pool->npages; |
||
646 | pool->npages = 0; |
||
647 | goto out; |
||
648 | } |
||
649 | /* find the last pages to include for requested number of pages. Split |
||
650 | * pool to begin and halve it to reduce search space. */ |
||
651 | if (count <= pool->npages/2) { |
||
652 | i = 0; |
||
653 | list_for_each(p, &pool->list) { |
||
654 | if (++i == count) |
||
655 | break; |
||
656 | } |
||
657 | } else { |
||
658 | i = pool->npages + 1; |
||
659 | list_for_each_prev(p, &pool->list) { |
||
660 | if (--i == count) |
||
661 | break; |
||
662 | } |
||
663 | } |
||
664 | /* Cut 'count' number of pages from the pool */ |
||
665 | list_cut_position(pages, &pool->list, p); |
||
666 | pool->npages -= count; |
||
667 | count = 0; |
||
668 | out: |
||
669 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
||
670 | return count; |
||
671 | } |
||
672 | #endif |
||
673 | |||
674 | /* Put all pages in pages list to correct pool to wait for reuse */ |
||
675 | static void ttm_put_pages(struct page **pages, unsigned npages, int flags, |
||
676 | enum ttm_caching_state cstate) |
||
677 | { |
||
678 | unsigned long irq_flags; |
||
679 | // struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
||
680 | unsigned i; |
||
681 | |||
682 | for (i = 0; i < npages; i++) { |
||
683 | if (pages[i]) { |
||
684 | // if (page_count(pages[i]) != 1) |
||
685 | // pr_err("Erroneous page count. Leaking pages.\n"); |
||
686 | FreePage(pages[i]); |
||
687 | pages[i] = NULL; |
||
688 | } |
||
689 | } |
||
690 | return; |
||
691 | |||
692 | #if 0 |
||
693 | if (pool == NULL) { |
||
694 | /* No pool for this memory type so free the pages */ |
||
695 | for (i = 0; i < npages; i++) { |
||
696 | if (pages[i]) { |
||
697 | if (page_count(pages[i]) != 1) |
||
698 | pr_err("Erroneous page count. Leaking pages.\n"); |
||
699 | __free_page(pages[i]); |
||
700 | pages[i] = NULL; |
||
701 | } |
||
702 | } |
||
703 | return; |
||
704 | } |
||
705 | |||
706 | spin_lock_irqsave(&pool->lock, irq_flags); |
||
707 | for (i = 0; i < npages; i++) { |
||
708 | if (pages[i]) { |
||
709 | if (page_count(pages[i]) != 1) |
||
710 | pr_err("Erroneous page count. Leaking pages.\n"); |
||
711 | list_add_tail(&pages[i]->lru, &pool->list); |
||
712 | pages[i] = NULL; |
||
713 | pool->npages++; |
||
714 | } |
||
715 | } |
||
716 | /* Check that we don't go over the pool limit */ |
||
717 | npages = 0; |
||
718 | if (pool->npages > _manager->options.max_size) { |
||
719 | npages = pool->npages - _manager->options.max_size; |
||
720 | /* free at least NUM_PAGES_TO_ALLOC number of pages |
||
721 | * to reduce calls to set_memory_wb */ |
||
722 | if (npages < NUM_PAGES_TO_ALLOC) |
||
723 | npages = NUM_PAGES_TO_ALLOC; |
||
724 | } |
||
725 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
||
726 | if (npages) |
||
727 | ttm_page_pool_free(pool, npages); |
||
728 | #endif |
||
729 | |||
730 | } |
||
731 | |||
732 | /* |
||
733 | * On success pages list will hold count number of correctly |
||
734 | * cached pages. |
||
735 | */ |
||
736 | static int ttm_get_pages(struct page **pages, unsigned npages, int flags, |
||
737 | enum ttm_caching_state cstate) |
||
738 | { |
||
739 | // struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
||
740 | struct list_head plist; |
||
741 | struct page *p = NULL; |
||
742 | // gfp_t gfp_flags = GFP_USER; |
||
743 | unsigned count; |
||
744 | int r; |
||
745 | |||
746 | for (r = 0; r < npages; ++r) { |
||
747 | p = AllocPage(); |
||
748 | if (!p) { |
||
749 | |||
750 | pr_err("Unable to allocate page\n"); |
||
751 | return -ENOMEM; |
||
752 | } |
||
753 | |||
754 | pages[r] = p; |
||
755 | } |
||
756 | return 0; |
||
757 | |||
758 | #if 0 |
||
759 | |||
760 | |||
761 | /* set zero flag for page allocation if required */ |
||
762 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) |
||
763 | gfp_flags |= __GFP_ZERO; |
||
764 | |||
765 | /* No pool for cached pages */ |
||
766 | if (pool == NULL) { |
||
767 | if (flags & TTM_PAGE_FLAG_DMA32) |
||
768 | gfp_flags |= GFP_DMA32; |
||
769 | else |
||
770 | gfp_flags |= GFP_HIGHUSER; |
||
771 | |||
772 | for (r = 0; r < npages; ++r) { |
||
773 | p = alloc_page(gfp_flags); |
||
774 | if (!p) { |
||
775 | |||
776 | pr_err("Unable to allocate page\n"); |
||
777 | return -ENOMEM; |
||
778 | } |
||
779 | |||
780 | pages[r] = p; |
||
781 | } |
||
782 | return 0; |
||
783 | } |
||
784 | |||
785 | /* combine zero flag to pool flags */ |
||
786 | gfp_flags |= pool->gfp_flags; |
||
787 | |||
788 | /* First we take pages from the pool */ |
||
789 | INIT_LIST_HEAD(&plist); |
||
790 | npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); |
||
791 | count = 0; |
||
792 | list_for_each_entry(p, &plist, lru) { |
||
793 | pages[count++] = p; |
||
794 | } |
||
795 | |||
796 | /* clear the pages coming from the pool if requested */ |
||
797 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { |
||
798 | list_for_each_entry(p, &plist, lru) { |
||
799 | if (PageHighMem(p)) |
||
800 | clear_highpage(p); |
||
801 | else |
||
802 | clear_page(page_address(p)); |
||
803 | } |
||
804 | } |
||
805 | |||
806 | /* If pool didn't have enough pages allocate new one. */ |
||
807 | if (npages > 0) { |
||
808 | /* ttm_alloc_new_pages doesn't reference pool so we can run |
||
809 | * multiple requests in parallel. |
||
810 | **/ |
||
811 | INIT_LIST_HEAD(&plist); |
||
812 | r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); |
||
813 | list_for_each_entry(p, &plist, lru) { |
||
814 | pages[count++] = p; |
||
815 | } |
||
816 | if (r) { |
||
817 | /* If there is any pages in the list put them back to |
||
818 | * the pool. */ |
||
819 | pr_err("Failed to allocate extra pages for large request\n"); |
||
820 | ttm_put_pages(pages, count, flags, cstate); |
||
821 | return r; |
||
822 | } |
||
823 | } |
||
824 | #endif |
||
825 | |||
826 | return 0; |
||
827 | } |
||
828 | |||
829 | #if 0 |
||
830 | static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, |
||
831 | char *name) |
||
832 | { |
||
833 | spin_lock_init(&pool->lock); |
||
834 | pool->fill_lock = false; |
||
835 | INIT_LIST_HEAD(&pool->list); |
||
836 | pool->npages = pool->nfrees = 0; |
||
837 | pool->gfp_flags = flags; |
||
838 | pool->name = name; |
||
839 | } |
||
840 | |||
841 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
||
842 | { |
||
843 | int ret; |
||
844 | |||
845 | WARN_ON(_manager); |
||
846 | |||
847 | pr_info("Initializing pool allocator\n"); |
||
848 | |||
849 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
||
850 | |||
851 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); |
||
852 | |||
853 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); |
||
854 | |||
855 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
||
856 | GFP_USER | GFP_DMA32, "wc dma"); |
||
857 | |||
858 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
||
859 | GFP_USER | GFP_DMA32, "uc dma"); |
||
860 | |||
861 | _manager->options.max_size = max_pages; |
||
862 | _manager->options.small = SMALL_ALLOCATION; |
||
863 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; |
||
864 | |||
865 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, |
||
866 | &glob->kobj, "pool"); |
||
867 | if (unlikely(ret != 0)) { |
||
868 | kobject_put(&_manager->kobj); |
||
869 | _manager = NULL; |
||
870 | return ret; |
||
871 | } |
||
872 | |||
873 | ttm_pool_mm_shrink_init(_manager); |
||
874 | |||
875 | return 0; |
||
876 | } |
||
877 | |||
878 | void ttm_page_alloc_fini(void) |
||
879 | { |
||
880 | int i; |
||
881 | |||
882 | pr_info("Finalizing pool allocator\n"); |
||
883 | ttm_pool_mm_shrink_fini(_manager); |
||
884 | |||
885 | for (i = 0; i < NUM_POOLS; ++i) |
||
886 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); |
||
887 | |||
888 | kobject_put(&_manager->kobj); |
||
889 | _manager = NULL; |
||
890 | } |
||
891 | |||
892 | #endif |
||
893 | |||
894 | int ttm_pool_populate(struct ttm_tt *ttm) |
||
895 | { |
||
896 | struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; |
||
897 | unsigned i; |
||
898 | int ret; |
||
899 | |||
900 | if (ttm->state != tt_unpopulated) |
||
901 | return 0; |
||
902 | |||
903 | for (i = 0; i < ttm->num_pages; ++i) { |
||
904 | ret = ttm_get_pages(&ttm->pages[i], 1, |
||
905 | ttm->page_flags, |
||
906 | ttm->caching_state); |
||
907 | if (ret != 0) { |
||
908 | ttm_pool_unpopulate(ttm); |
||
909 | return -ENOMEM; |
||
910 | } |
||
911 | |||
912 | } |
||
913 | |||
914 | ttm->state = tt_unbound; |
||
915 | return 0; |
||
916 | } |
||
917 | EXPORT_SYMBOL(ttm_pool_populate); |
||
918 | |||
919 | void ttm_pool_unpopulate(struct ttm_tt *ttm) |
||
920 | { |
||
921 | unsigned i; |
||
922 | |||
923 | for (i = 0; i < ttm->num_pages; ++i) { |
||
924 | if (ttm->pages[i]) { |
||
925 | ttm_mem_global_free_page(ttm->glob->mem_glob, |
||
926 | ttm->pages[i]); |
||
927 | ttm_put_pages(&ttm->pages[i], 1, |
||
928 | ttm->page_flags, |
||
929 | ttm->caching_state); |
||
930 | } |
||
931 | } |
||
932 | ttm->state = tt_unpopulated; |
||
933 | } |
||
934 | EXPORT_SYMBOL(ttm_pool_unpopulate);>>>>>>>>>=>>>>>>>>>>> |
||
935 |