Rev 4112 | Rev 5078 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4112 | Rev 4569 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) Red Hat Inc. |
2 | * Copyright (c) Red Hat Inc. |
3 | 3 | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the |
11 | * The above copyright notice and this permission notice (including the |
12 | * next paragraph) shall be included in all copies or substantial portions |
12 | * next paragraph) shall be included in all copies or substantial portions |
13 | * of the Software. |
13 | * of the Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. |
21 | * DEALINGS IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: Dave Airlie |
23 | * Authors: Dave Airlie |
24 | * Jerome Glisse |
24 | * Jerome Glisse |
25 | * Pauli Nieminen |
25 | * Pauli Nieminen |
26 | */ |
26 | */ |
27 | 27 | ||
28 | /* simple list based uncached page pool |
28 | /* simple list based uncached page pool |
29 | * - Pool collects resently freed pages for reuse |
29 | * - Pool collects resently freed pages for reuse |
30 | * - Use page->lru to keep a free list |
30 | * - Use page->lru to keep a free list |
31 | * - doesn't track currently in use pages |
31 | * - doesn't track currently in use pages |
32 | */ |
32 | */ |
33 | 33 | ||
34 | #define pr_fmt(fmt) "[TTM] " fmt |
34 | #define pr_fmt(fmt) "[TTM] " fmt |
35 | 35 | ||
36 | #include |
36 | #include |
37 | #include |
37 | #include |
38 | //#include |
38 | //#include |
39 | //#include |
39 | //#include |
40 | #include |
40 | #include |
41 | #include |
41 | #include |
42 | #include |
42 | #include |
43 | #include |
43 | #include |
44 | //#include |
44 | #include |
45 | 45 | ||
46 | //#include |
46 | //#include |
47 | 47 | ||
48 | #include |
48 | #include |
49 | #include |
49 | #include |
50 | 50 | ||
51 | #ifdef TTM_HAS_AGP |
51 | #ifdef TTM_HAS_AGP |
52 | #include |
52 | #include |
53 | #endif |
53 | #endif |
54 | 54 | ||
55 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) |
55 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) |
56 | #define SMALL_ALLOCATION 16 |
56 | #define SMALL_ALLOCATION 16 |
57 | #define FREE_ALL_PAGES (~0U) |
57 | #define FREE_ALL_PAGES (~0U) |
58 | /* times are in msecs */ |
58 | /* times are in msecs */ |
59 | #define PAGE_FREE_INTERVAL 1000 |
59 | #define PAGE_FREE_INTERVAL 1000 |
60 | 60 | ||
61 | #define pr_err(fmt, ...) \ |
61 | #define pr_err(fmt, ...) \ |
62 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
62 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
63 | 63 | ||
64 | 64 | ||
65 | 65 | ||
66 | #if 0 |
66 | #if 0 |
67 | /** |
67 | /** |
68 | * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. |
68 | * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. |
69 | * |
69 | * |
70 | * @lock: Protects the shared pool from concurrnet access. Must be used with |
70 | * @lock: Protects the shared pool from concurrnet access. Must be used with |
71 | * irqsave/irqrestore variants because pool allocator maybe called from |
71 | * irqsave/irqrestore variants because pool allocator maybe called from |
72 | * delayed work. |
72 | * delayed work. |
73 | * @fill_lock: Prevent concurrent calls to fill. |
73 | * @fill_lock: Prevent concurrent calls to fill. |
74 | * @list: Pool of free uc/wc pages for fast reuse. |
74 | * @list: Pool of free uc/wc pages for fast reuse. |
75 | * @gfp_flags: Flags to pass for alloc_page. |
75 | * @gfp_flags: Flags to pass for alloc_page. |
76 | * @npages: Number of pages in pool. |
76 | * @npages: Number of pages in pool. |
77 | */ |
77 | */ |
78 | struct ttm_page_pool { |
78 | struct ttm_page_pool { |
79 | spinlock_t lock; |
79 | spinlock_t lock; |
80 | bool fill_lock; |
80 | bool fill_lock; |
81 | struct list_head list; |
81 | struct list_head list; |
82 | gfp_t gfp_flags; |
82 | gfp_t gfp_flags; |
83 | unsigned npages; |
83 | unsigned npages; |
84 | char *name; |
84 | char *name; |
85 | unsigned long nfrees; |
85 | unsigned long nfrees; |
86 | unsigned long nrefills; |
86 | unsigned long nrefills; |
87 | }; |
87 | }; |
88 | 88 | ||
89 | /** |
89 | /** |
90 | * Limits for the pool. They are handled without locks because only place where |
90 | * Limits for the pool. They are handled without locks because only place where |
91 | * they may change is in sysfs store. They won't have immediate effect anyway |
91 | * they may change is in sysfs store. They won't have immediate effect anyway |
92 | * so forcing serialization to access them is pointless. |
92 | * so forcing serialization to access them is pointless. |
93 | */ |
93 | */ |
94 | 94 | ||
95 | struct ttm_pool_opts { |
95 | struct ttm_pool_opts { |
96 | unsigned alloc_size; |
96 | unsigned alloc_size; |
97 | unsigned max_size; |
97 | unsigned max_size; |
98 | unsigned small; |
98 | unsigned small; |
99 | }; |
99 | }; |
100 | 100 | ||
101 | #define NUM_POOLS 4 |
101 | #define NUM_POOLS 4 |
102 | 102 | ||
103 | /** |
103 | /** |
104 | * struct ttm_pool_manager - Holds memory pools for fst allocation |
104 | * struct ttm_pool_manager - Holds memory pools for fst allocation |
105 | * |
105 | * |
106 | * Manager is read only object for pool code so it doesn't need locking. |
106 | * Manager is read only object for pool code so it doesn't need locking. |
107 | * |
107 | * |
108 | * @free_interval: minimum number of jiffies between freeing pages from pool. |
108 | * @free_interval: minimum number of jiffies between freeing pages from pool. |
109 | * @page_alloc_inited: reference counting for pool allocation. |
109 | * @page_alloc_inited: reference counting for pool allocation. |
110 | * @work: Work that is used to shrink the pool. Work is only run when there is |
110 | * @work: Work that is used to shrink the pool. Work is only run when there is |
111 | * some pages to free. |
111 | * some pages to free. |
112 | * @small_allocation: Limit in number of pages what is small allocation. |
112 | * @small_allocation: Limit in number of pages what is small allocation. |
113 | * |
113 | * |
114 | * @pools: All pool objects in use. |
114 | * @pools: All pool objects in use. |
115 | **/ |
115 | **/ |
116 | struct ttm_pool_manager { |
116 | struct ttm_pool_manager { |
117 | struct kobject kobj; |
117 | struct kobject kobj; |
118 | struct shrinker mm_shrink; |
118 | struct shrinker mm_shrink; |
119 | struct ttm_pool_opts options; |
119 | struct ttm_pool_opts options; |
120 | 120 | ||
121 | union { |
121 | union { |
122 | struct ttm_page_pool pools[NUM_POOLS]; |
122 | struct ttm_page_pool pools[NUM_POOLS]; |
123 | struct { |
123 | struct { |
124 | struct ttm_page_pool wc_pool; |
124 | struct ttm_page_pool wc_pool; |
125 | struct ttm_page_pool uc_pool; |
125 | struct ttm_page_pool uc_pool; |
126 | struct ttm_page_pool wc_pool_dma32; |
126 | struct ttm_page_pool wc_pool_dma32; |
127 | struct ttm_page_pool uc_pool_dma32; |
127 | struct ttm_page_pool uc_pool_dma32; |
128 | } ; |
128 | } ; |
129 | }; |
129 | }; |
130 | }; |
130 | }; |
131 | 131 | ||
132 | static struct attribute ttm_page_pool_max = { |
132 | static struct attribute ttm_page_pool_max = { |
133 | .name = "pool_max_size", |
133 | .name = "pool_max_size", |
134 | .mode = S_IRUGO | S_IWUSR |
134 | .mode = S_IRUGO | S_IWUSR |
135 | }; |
135 | }; |
136 | static struct attribute ttm_page_pool_small = { |
136 | static struct attribute ttm_page_pool_small = { |
137 | .name = "pool_small_allocation", |
137 | .name = "pool_small_allocation", |
138 | .mode = S_IRUGO | S_IWUSR |
138 | .mode = S_IRUGO | S_IWUSR |
139 | }; |
139 | }; |
140 | static struct attribute ttm_page_pool_alloc_size = { |
140 | static struct attribute ttm_page_pool_alloc_size = { |
141 | .name = "pool_allocation_size", |
141 | .name = "pool_allocation_size", |
142 | .mode = S_IRUGO | S_IWUSR |
142 | .mode = S_IRUGO | S_IWUSR |
143 | }; |
143 | }; |
144 | 144 | ||
145 | static struct attribute *ttm_pool_attrs[] = { |
145 | static struct attribute *ttm_pool_attrs[] = { |
146 | &ttm_page_pool_max, |
146 | &ttm_page_pool_max, |
147 | &ttm_page_pool_small, |
147 | &ttm_page_pool_small, |
148 | &ttm_page_pool_alloc_size, |
148 | &ttm_page_pool_alloc_size, |
149 | NULL |
149 | NULL |
150 | }; |
150 | }; |
151 | 151 | ||
152 | static void ttm_pool_kobj_release(struct kobject *kobj) |
152 | static void ttm_pool_kobj_release(struct kobject *kobj) |
153 | { |
153 | { |
154 | struct ttm_pool_manager *m = |
154 | struct ttm_pool_manager *m = |
155 | container_of(kobj, struct ttm_pool_manager, kobj); |
155 | container_of(kobj, struct ttm_pool_manager, kobj); |
156 | kfree(m); |
156 | kfree(m); |
157 | } |
157 | } |
158 | 158 | ||
159 | static ssize_t ttm_pool_store(struct kobject *kobj, |
159 | static ssize_t ttm_pool_store(struct kobject *kobj, |
160 | struct attribute *attr, const char *buffer, size_t size) |
160 | struct attribute *attr, const char *buffer, size_t size) |
161 | { |
161 | { |
162 | struct ttm_pool_manager *m = |
162 | struct ttm_pool_manager *m = |
163 | container_of(kobj, struct ttm_pool_manager, kobj); |
163 | container_of(kobj, struct ttm_pool_manager, kobj); |
164 | int chars; |
164 | int chars; |
165 | unsigned val; |
165 | unsigned val; |
166 | chars = sscanf(buffer, "%u", &val); |
166 | chars = sscanf(buffer, "%u", &val); |
167 | if (chars == 0) |
167 | if (chars == 0) |
168 | return size; |
168 | return size; |
169 | 169 | ||
170 | /* Convert kb to number of pages */ |
170 | /* Convert kb to number of pages */ |
171 | val = val / (PAGE_SIZE >> 10); |
171 | val = val / (PAGE_SIZE >> 10); |
172 | 172 | ||
173 | if (attr == &ttm_page_pool_max) |
173 | if (attr == &ttm_page_pool_max) |
174 | m->options.max_size = val; |
174 | m->options.max_size = val; |
175 | else if (attr == &ttm_page_pool_small) |
175 | else if (attr == &ttm_page_pool_small) |
176 | m->options.small = val; |
176 | m->options.small = val; |
177 | else if (attr == &ttm_page_pool_alloc_size) { |
177 | else if (attr == &ttm_page_pool_alloc_size) { |
178 | if (val > NUM_PAGES_TO_ALLOC*8) { |
178 | if (val > NUM_PAGES_TO_ALLOC*8) { |
179 | pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", |
179 | pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", |
180 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
180 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
181 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); |
181 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); |
182 | return size; |
182 | return size; |
183 | } else if (val > NUM_PAGES_TO_ALLOC) { |
183 | } else if (val > NUM_PAGES_TO_ALLOC) { |
184 | pr_warn("Setting allocation size to larger than %lu is not recommended\n", |
184 | pr_warn("Setting allocation size to larger than %lu is not recommended\n", |
185 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); |
185 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); |
186 | } |
186 | } |
187 | m->options.alloc_size = val; |
187 | m->options.alloc_size = val; |
188 | } |
188 | } |
189 | 189 | ||
190 | return size; |
190 | return size; |
191 | } |
191 | } |
192 | 192 | ||
193 | static ssize_t ttm_pool_show(struct kobject *kobj, |
193 | static ssize_t ttm_pool_show(struct kobject *kobj, |
194 | struct attribute *attr, char *buffer) |
194 | struct attribute *attr, char *buffer) |
195 | { |
195 | { |
196 | struct ttm_pool_manager *m = |
196 | struct ttm_pool_manager *m = |
197 | container_of(kobj, struct ttm_pool_manager, kobj); |
197 | container_of(kobj, struct ttm_pool_manager, kobj); |
198 | unsigned val = 0; |
198 | unsigned val = 0; |
199 | 199 | ||
200 | if (attr == &ttm_page_pool_max) |
200 | if (attr == &ttm_page_pool_max) |
201 | val = m->options.max_size; |
201 | val = m->options.max_size; |
202 | else if (attr == &ttm_page_pool_small) |
202 | else if (attr == &ttm_page_pool_small) |
203 | val = m->options.small; |
203 | val = m->options.small; |
204 | else if (attr == &ttm_page_pool_alloc_size) |
204 | else if (attr == &ttm_page_pool_alloc_size) |
205 | val = m->options.alloc_size; |
205 | val = m->options.alloc_size; |
206 | 206 | ||
207 | val = val * (PAGE_SIZE >> 10); |
207 | val = val * (PAGE_SIZE >> 10); |
208 | 208 | ||
209 | return snprintf(buffer, PAGE_SIZE, "%u\n", val); |
209 | return snprintf(buffer, PAGE_SIZE, "%u\n", val); |
210 | } |
210 | } |
211 | 211 | ||
212 | static const struct sysfs_ops ttm_pool_sysfs_ops = { |
212 | static const struct sysfs_ops ttm_pool_sysfs_ops = { |
213 | .show = &ttm_pool_show, |
213 | .show = &ttm_pool_show, |
214 | .store = &ttm_pool_store, |
214 | .store = &ttm_pool_store, |
215 | }; |
215 | }; |
216 | 216 | ||
217 | static struct kobj_type ttm_pool_kobj_type = { |
217 | static struct kobj_type ttm_pool_kobj_type = { |
218 | .release = &ttm_pool_kobj_release, |
218 | .release = &ttm_pool_kobj_release, |
219 | .sysfs_ops = &ttm_pool_sysfs_ops, |
219 | .sysfs_ops = &ttm_pool_sysfs_ops, |
220 | .default_attrs = ttm_pool_attrs, |
220 | .default_attrs = ttm_pool_attrs, |
221 | }; |
221 | }; |
222 | 222 | ||
223 | static struct ttm_pool_manager *_manager; |
223 | static struct ttm_pool_manager *_manager; |
224 | 224 | ||
225 | #ifndef CONFIG_X86 |
225 | #ifndef CONFIG_X86 |
226 | static int set_pages_array_wb(struct page **pages, int addrinarray) |
226 | static int set_pages_array_wb(struct page **pages, int addrinarray) |
227 | { |
227 | { |
228 | #ifdef TTM_HAS_AGP |
228 | #ifdef TTM_HAS_AGP |
229 | int i; |
229 | int i; |
230 | 230 | ||
231 | for (i = 0; i < addrinarray; i++) |
231 | for (i = 0; i < addrinarray; i++) |
232 | unmap_page_from_agp(pages[i]); |
232 | unmap_page_from_agp(pages[i]); |
233 | #endif |
233 | #endif |
234 | return 0; |
234 | return 0; |
235 | } |
235 | } |
236 | 236 | ||
237 | static int set_pages_array_wc(struct page **pages, int addrinarray) |
237 | static int set_pages_array_wc(struct page **pages, int addrinarray) |
238 | { |
238 | { |
239 | #ifdef TTM_HAS_AGP |
239 | #ifdef TTM_HAS_AGP |
240 | int i; |
240 | int i; |
241 | 241 | ||
242 | for (i = 0; i < addrinarray; i++) |
242 | for (i = 0; i < addrinarray; i++) |
243 | map_page_into_agp(pages[i]); |
243 | map_page_into_agp(pages[i]); |
244 | #endif |
244 | #endif |
245 | return 0; |
245 | return 0; |
246 | } |
246 | } |
247 | 247 | ||
248 | static int set_pages_array_uc(struct page **pages, int addrinarray) |
248 | static int set_pages_array_uc(struct page **pages, int addrinarray) |
249 | { |
249 | { |
250 | #ifdef TTM_HAS_AGP |
250 | #ifdef TTM_HAS_AGP |
251 | int i; |
251 | int i; |
252 | 252 | ||
253 | for (i = 0; i < addrinarray; i++) |
253 | for (i = 0; i < addrinarray; i++) |
254 | map_page_into_agp(pages[i]); |
254 | map_page_into_agp(pages[i]); |
255 | #endif |
255 | #endif |
256 | return 0; |
256 | return 0; |
257 | } |
257 | } |
258 | #endif |
258 | #endif |
259 | 259 | ||
260 | /** |
260 | /** |
261 | * Select the right pool or requested caching state and ttm flags. */ |
261 | * Select the right pool or requested caching state and ttm flags. */ |
262 | static struct ttm_page_pool *ttm_get_pool(int flags, |
262 | static struct ttm_page_pool *ttm_get_pool(int flags, |
263 | enum ttm_caching_state cstate) |
263 | enum ttm_caching_state cstate) |
264 | { |
264 | { |
265 | int pool_index; |
265 | int pool_index; |
266 | 266 | ||
267 | if (cstate == tt_cached) |
267 | if (cstate == tt_cached) |
268 | return NULL; |
268 | return NULL; |
269 | 269 | ||
270 | if (cstate == tt_wc) |
270 | if (cstate == tt_wc) |
271 | pool_index = 0x0; |
271 | pool_index = 0x0; |
272 | else |
272 | else |
273 | pool_index = 0x1; |
273 | pool_index = 0x1; |
274 | 274 | ||
275 | if (flags & TTM_PAGE_FLAG_DMA32) |
275 | if (flags & TTM_PAGE_FLAG_DMA32) |
276 | pool_index |= 0x2; |
276 | pool_index |= 0x2; |
277 | 277 | ||
278 | return &_manager->pools[pool_index]; |
278 | return &_manager->pools[pool_index]; |
279 | } |
279 | } |
280 | 280 | ||
281 | /* set memory back to wb and free the pages. */ |
281 | /* set memory back to wb and free the pages. */ |
282 | static void ttm_pages_put(struct page *pages[], unsigned npages) |
282 | static void ttm_pages_put(struct page *pages[], unsigned npages) |
283 | { |
283 | { |
284 | unsigned i; |
284 | unsigned i; |
285 | if (set_pages_array_wb(pages, npages)) |
285 | if (set_pages_array_wb(pages, npages)) |
286 | pr_err("Failed to set %d pages to wb!\n", npages); |
286 | pr_err("Failed to set %d pages to wb!\n", npages); |
287 | for (i = 0; i < npages; ++i) |
287 | for (i = 0; i < npages; ++i) |
288 | __free_page(pages[i]); |
288 | __free_page(pages[i]); |
289 | } |
289 | } |
290 | 290 | ||
291 | static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, |
291 | static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, |
292 | unsigned freed_pages) |
292 | unsigned freed_pages) |
293 | { |
293 | { |
294 | pool->npages -= freed_pages; |
294 | pool->npages -= freed_pages; |
295 | pool->nfrees += freed_pages; |
295 | pool->nfrees += freed_pages; |
296 | } |
296 | } |
297 | 297 | ||
298 | /** |
298 | /** |
299 | * Free pages from pool. |
299 | * Free pages from pool. |
300 | * |
300 | * |
301 | * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC |
301 | * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC |
302 | * number of pages in one go. |
302 | * number of pages in one go. |
303 | * |
303 | * |
304 | * @pool: to free the pages from |
304 | * @pool: to free the pages from |
305 | * @free_all: If set to true will free all pages in pool |
305 | * @free_all: If set to true will free all pages in pool |
306 | **/ |
306 | **/ |
307 | static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) |
307 | static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) |
308 | { |
308 | { |
309 | unsigned long irq_flags; |
309 | unsigned long irq_flags; |
310 | struct page *p; |
310 | struct page *p; |
311 | struct page **pages_to_free; |
311 | struct page **pages_to_free; |
312 | unsigned freed_pages = 0, |
312 | unsigned freed_pages = 0, |
313 | npages_to_free = nr_free; |
313 | npages_to_free = nr_free; |
314 | 314 | ||
315 | if (NUM_PAGES_TO_ALLOC < nr_free) |
315 | if (NUM_PAGES_TO_ALLOC < nr_free) |
316 | npages_to_free = NUM_PAGES_TO_ALLOC; |
316 | npages_to_free = NUM_PAGES_TO_ALLOC; |
317 | 317 | ||
318 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), |
318 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), |
319 | GFP_KERNEL); |
319 | GFP_KERNEL); |
320 | if (!pages_to_free) { |
320 | if (!pages_to_free) { |
321 | pr_err("Failed to allocate memory for pool free operation\n"); |
321 | pr_err("Failed to allocate memory for pool free operation\n"); |
322 | return 0; |
322 | return 0; |
323 | } |
323 | } |
324 | 324 | ||
325 | restart: |
325 | restart: |
326 | spin_lock_irqsave(&pool->lock, irq_flags); |
326 | spin_lock_irqsave(&pool->lock, irq_flags); |
327 | 327 | ||
328 | list_for_each_entry_reverse(p, &pool->list, lru) { |
328 | list_for_each_entry_reverse(p, &pool->list, lru) { |
329 | if (freed_pages >= npages_to_free) |
329 | if (freed_pages >= npages_to_free) |
330 | break; |
330 | break; |
331 | 331 | ||
332 | pages_to_free[freed_pages++] = p; |
332 | pages_to_free[freed_pages++] = p; |
333 | /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ |
333 | /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ |
334 | if (freed_pages >= NUM_PAGES_TO_ALLOC) { |
334 | if (freed_pages >= NUM_PAGES_TO_ALLOC) { |
335 | /* remove range of pages from the pool */ |
335 | /* remove range of pages from the pool */ |
336 | __list_del(p->lru.prev, &pool->list); |
336 | __list_del(p->lru.prev, &pool->list); |
337 | 337 | ||
338 | ttm_pool_update_free_locked(pool, freed_pages); |
338 | ttm_pool_update_free_locked(pool, freed_pages); |
339 | /** |
339 | /** |
340 | * Because changing page caching is costly |
340 | * Because changing page caching is costly |
341 | * we unlock the pool to prevent stalling. |
341 | * we unlock the pool to prevent stalling. |
342 | */ |
342 | */ |
343 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
343 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
344 | 344 | ||
345 | ttm_pages_put(pages_to_free, freed_pages); |
345 | ttm_pages_put(pages_to_free, freed_pages); |
346 | if (likely(nr_free != FREE_ALL_PAGES)) |
346 | if (likely(nr_free != FREE_ALL_PAGES)) |
347 | nr_free -= freed_pages; |
347 | nr_free -= freed_pages; |
348 | 348 | ||
349 | if (NUM_PAGES_TO_ALLOC >= nr_free) |
349 | if (NUM_PAGES_TO_ALLOC >= nr_free) |
350 | npages_to_free = nr_free; |
350 | npages_to_free = nr_free; |
351 | else |
351 | else |
352 | npages_to_free = NUM_PAGES_TO_ALLOC; |
352 | npages_to_free = NUM_PAGES_TO_ALLOC; |
353 | 353 | ||
354 | freed_pages = 0; |
354 | freed_pages = 0; |
355 | 355 | ||
356 | /* free all so restart the processing */ |
356 | /* free all so restart the processing */ |
357 | if (nr_free) |
357 | if (nr_free) |
358 | goto restart; |
358 | goto restart; |
359 | 359 | ||
360 | /* Not allowed to fall through or break because |
360 | /* Not allowed to fall through or break because |
361 | * following context is inside spinlock while we are |
361 | * following context is inside spinlock while we are |
362 | * outside here. |
362 | * outside here. |
363 | */ |
363 | */ |
364 | goto out; |
364 | goto out; |
365 | 365 | ||
366 | } |
366 | } |
367 | } |
367 | } |
368 | 368 | ||
369 | /* remove range of pages from the pool */ |
369 | /* remove range of pages from the pool */ |
370 | if (freed_pages) { |
370 | if (freed_pages) { |
371 | __list_del(&p->lru, &pool->list); |
371 | __list_del(&p->lru, &pool->list); |
372 | 372 | ||
373 | ttm_pool_update_free_locked(pool, freed_pages); |
373 | ttm_pool_update_free_locked(pool, freed_pages); |
374 | nr_free -= freed_pages; |
374 | nr_free -= freed_pages; |
375 | } |
375 | } |
376 | 376 | ||
377 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
377 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
378 | 378 | ||
379 | if (freed_pages) |
379 | if (freed_pages) |
380 | ttm_pages_put(pages_to_free, freed_pages); |
380 | ttm_pages_put(pages_to_free, freed_pages); |
381 | out: |
381 | out: |
382 | kfree(pages_to_free); |
382 | kfree(pages_to_free); |
383 | return nr_free; |
383 | return nr_free; |
384 | } |
384 | } |
385 | 385 | ||
386 | /** |
386 | /** |
387 | * Callback for mm to request pool to reduce number of page held. |
387 | * Callback for mm to request pool to reduce number of page held. |
388 | * |
388 | * |
389 | * XXX: (dchinner) Deadlock warning! |
389 | * XXX: (dchinner) Deadlock warning! |
390 | * |
390 | * |
391 | * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means |
391 | * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means |
392 | * this can deadlock when called a sc->gfp_mask that is not equal to |
392 | * this can deadlock when called a sc->gfp_mask that is not equal to |
393 | * GFP_KERNEL. |
393 | * GFP_KERNEL. |
394 | * |
394 | * |
395 | * This code is crying out for a shrinker per pool.... |
395 | * This code is crying out for a shrinker per pool.... |
396 | */ |
396 | */ |
397 | static unsigned long |
397 | static unsigned long |
398 | ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
398 | ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
399 | { |
399 | { |
400 | static atomic_t start_pool = ATOMIC_INIT(0); |
400 | static atomic_t start_pool = ATOMIC_INIT(0); |
401 | unsigned i; |
401 | unsigned i; |
402 | unsigned pool_offset = atomic_add_return(1, &start_pool); |
402 | unsigned pool_offset = atomic_add_return(1, &start_pool); |
403 | struct ttm_page_pool *pool; |
403 | struct ttm_page_pool *pool; |
404 | int shrink_pages = sc->nr_to_scan; |
404 | int shrink_pages = sc->nr_to_scan; |
405 | unsigned long freed = 0; |
405 | unsigned long freed = 0; |
406 | 406 | ||
407 | pool_offset = pool_offset % NUM_POOLS; |
407 | pool_offset = pool_offset % NUM_POOLS; |
408 | /* select start pool in round robin fashion */ |
408 | /* select start pool in round robin fashion */ |
409 | for (i = 0; i < NUM_POOLS; ++i) { |
409 | for (i = 0; i < NUM_POOLS; ++i) { |
410 | unsigned nr_free = shrink_pages; |
410 | unsigned nr_free = shrink_pages; |
411 | if (shrink_pages == 0) |
411 | if (shrink_pages == 0) |
412 | break; |
412 | break; |
413 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
413 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
414 | shrink_pages = ttm_page_pool_free(pool, nr_free); |
414 | shrink_pages = ttm_page_pool_free(pool, nr_free); |
415 | freed += nr_free - shrink_pages; |
415 | freed += nr_free - shrink_pages; |
416 | } |
416 | } |
417 | return freed; |
417 | return freed; |
418 | } |
418 | } |
419 | 419 | ||
420 | 420 | ||
421 | static unsigned long |
421 | static unsigned long |
422 | ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
422 | ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
423 | { |
423 | { |
424 | unsigned i; |
424 | unsigned i; |
425 | unsigned long count = 0; |
425 | unsigned long count = 0; |
426 | 426 | ||
427 | for (i = 0; i < NUM_POOLS; ++i) |
427 | for (i = 0; i < NUM_POOLS; ++i) |
428 | count += _manager->pools[i].npages; |
428 | count += _manager->pools[i].npages; |
429 | 429 | ||
430 | return count; |
430 | return count; |
431 | } |
431 | } |
432 | 432 | ||
433 | static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) |
433 | static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) |
434 | { |
434 | { |
435 | manager->mm_shrink.count_objects = ttm_pool_shrink_count; |
435 | manager->mm_shrink.count_objects = ttm_pool_shrink_count; |
436 | manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; |
436 | manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; |
437 | manager->mm_shrink.seeks = 1; |
437 | manager->mm_shrink.seeks = 1; |
438 | register_shrinker(&manager->mm_shrink); |
438 | register_shrinker(&manager->mm_shrink); |
439 | } |
439 | } |
440 | 440 | ||
441 | static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) |
441 | static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) |
442 | { |
442 | { |
443 | unregister_shrinker(&manager->mm_shrink); |
443 | unregister_shrinker(&manager->mm_shrink); |
444 | } |
444 | } |
445 | 445 | ||
446 | static int ttm_set_pages_caching(struct page **pages, |
446 | static int ttm_set_pages_caching(struct page **pages, |
447 | enum ttm_caching_state cstate, unsigned cpages) |
447 | enum ttm_caching_state cstate, unsigned cpages) |
448 | { |
448 | { |
449 | int r = 0; |
449 | int r = 0; |
450 | /* Set page caching */ |
450 | /* Set page caching */ |
451 | switch (cstate) { |
451 | switch (cstate) { |
452 | case tt_uncached: |
452 | case tt_uncached: |
453 | r = set_pages_array_uc(pages, cpages); |
453 | r = set_pages_array_uc(pages, cpages); |
454 | if (r) |
454 | if (r) |
455 | pr_err("Failed to set %d pages to uc!\n", cpages); |
455 | pr_err("Failed to set %d pages to uc!\n", cpages); |
456 | break; |
456 | break; |
457 | case tt_wc: |
457 | case tt_wc: |
458 | r = set_pages_array_wc(pages, cpages); |
458 | r = set_pages_array_wc(pages, cpages); |
459 | if (r) |
459 | if (r) |
460 | pr_err("Failed to set %d pages to wc!\n", cpages); |
460 | pr_err("Failed to set %d pages to wc!\n", cpages); |
461 | break; |
461 | break; |
462 | default: |
462 | default: |
463 | break; |
463 | break; |
464 | } |
464 | } |
465 | return r; |
465 | return r; |
466 | } |
466 | } |
467 | 467 | ||
468 | /** |
468 | /** |
469 | * Free pages the pages that failed to change the caching state. If there is |
469 | * Free pages the pages that failed to change the caching state. If there is |
470 | * any pages that have changed their caching state already put them to the |
470 | * any pages that have changed their caching state already put them to the |
471 | * pool. |
471 | * pool. |
472 | */ |
472 | */ |
473 | static void ttm_handle_caching_state_failure(struct list_head *pages, |
473 | static void ttm_handle_caching_state_failure(struct list_head *pages, |
474 | int ttm_flags, enum ttm_caching_state cstate, |
474 | int ttm_flags, enum ttm_caching_state cstate, |
475 | struct page **failed_pages, unsigned cpages) |
475 | struct page **failed_pages, unsigned cpages) |
476 | { |
476 | { |
477 | unsigned i; |
477 | unsigned i; |
478 | /* Failed pages have to be freed */ |
478 | /* Failed pages have to be freed */ |
479 | for (i = 0; i < cpages; ++i) { |
479 | for (i = 0; i < cpages; ++i) { |
480 | list_del(&failed_pages[i]->lru); |
480 | list_del(&failed_pages[i]->lru); |
481 | __free_page(failed_pages[i]); |
481 | __free_page(failed_pages[i]); |
482 | } |
482 | } |
483 | } |
483 | } |
484 | 484 | ||
485 | /** |
485 | /** |
486 | * Allocate new pages with correct caching. |
486 | * Allocate new pages with correct caching. |
487 | * |
487 | * |
488 | * This function is reentrant if caller updates count depending on number of |
488 | * This function is reentrant if caller updates count depending on number of |
489 | * pages returned in pages array. |
489 | * pages returned in pages array. |
490 | */ |
490 | */ |
491 | static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, |
491 | static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, |
492 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) |
492 | int ttm_flags, enum ttm_caching_state cstate, unsigned count) |
493 | { |
493 | { |
494 | struct page **caching_array; |
494 | struct page **caching_array; |
495 | struct page *p; |
495 | struct page *p; |
496 | int r = 0; |
496 | int r = 0; |
497 | unsigned i, cpages; |
497 | unsigned i, cpages; |
498 | unsigned max_cpages = min(count, |
498 | unsigned max_cpages = min(count, |
499 | (unsigned)(PAGE_SIZE/sizeof(struct page *))); |
499 | (unsigned)(PAGE_SIZE/sizeof(struct page *))); |
500 | 500 | ||
501 | /* allocate array for page caching change */ |
501 | /* allocate array for page caching change */ |
502 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); |
502 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); |
503 | 503 | ||
504 | if (!caching_array) { |
504 | if (!caching_array) { |
505 | pr_err("Unable to allocate table for new pages\n"); |
505 | pr_err("Unable to allocate table for new pages\n"); |
506 | return -ENOMEM; |
506 | return -ENOMEM; |
507 | } |
507 | } |
508 | 508 | ||
509 | for (i = 0, cpages = 0; i < count; ++i) { |
509 | for (i = 0, cpages = 0; i < count; ++i) { |
510 | p = alloc_page(gfp_flags); |
510 | p = alloc_page(gfp_flags); |
511 | 511 | ||
512 | if (!p) { |
512 | if (!p) { |
513 | pr_err("Unable to get page %u\n", i); |
513 | pr_err("Unable to get page %u\n", i); |
514 | 514 | ||
515 | /* store already allocated pages in the pool after |
515 | /* store already allocated pages in the pool after |
516 | * setting the caching state */ |
516 | * setting the caching state */ |
517 | if (cpages) { |
517 | if (cpages) { |
518 | r = ttm_set_pages_caching(caching_array, |
518 | r = ttm_set_pages_caching(caching_array, |
519 | cstate, cpages); |
519 | cstate, cpages); |
520 | if (r) |
520 | if (r) |
521 | ttm_handle_caching_state_failure(pages, |
521 | ttm_handle_caching_state_failure(pages, |
522 | ttm_flags, cstate, |
522 | ttm_flags, cstate, |
523 | caching_array, cpages); |
523 | caching_array, cpages); |
524 | } |
524 | } |
525 | r = -ENOMEM; |
525 | r = -ENOMEM; |
526 | goto out; |
526 | goto out; |
527 | } |
527 | } |
528 | 528 | ||
529 | #ifdef CONFIG_HIGHMEM |
529 | #ifdef CONFIG_HIGHMEM |
530 | /* gfp flags of highmem page should never be dma32 so we |
530 | /* gfp flags of highmem page should never be dma32 so we |
531 | * we should be fine in such case |
531 | * we should be fine in such case |
532 | */ |
532 | */ |
533 | if (!PageHighMem(p)) |
533 | if (!PageHighMem(p)) |
534 | #endif |
534 | #endif |
535 | { |
535 | { |
536 | caching_array[cpages++] = p; |
536 | caching_array[cpages++] = p; |
537 | if (cpages == max_cpages) { |
537 | if (cpages == max_cpages) { |
538 | 538 | ||
539 | r = ttm_set_pages_caching(caching_array, |
539 | r = ttm_set_pages_caching(caching_array, |
540 | cstate, cpages); |
540 | cstate, cpages); |
541 | if (r) { |
541 | if (r) { |
542 | ttm_handle_caching_state_failure(pages, |
542 | ttm_handle_caching_state_failure(pages, |
543 | ttm_flags, cstate, |
543 | ttm_flags, cstate, |
544 | caching_array, cpages); |
544 | caching_array, cpages); |
545 | goto out; |
545 | goto out; |
546 | } |
546 | } |
547 | cpages = 0; |
547 | cpages = 0; |
548 | } |
548 | } |
549 | } |
549 | } |
550 | 550 | ||
551 | list_add(&p->lru, pages); |
551 | list_add(&p->lru, pages); |
552 | } |
552 | } |
553 | 553 | ||
554 | if (cpages) { |
554 | if (cpages) { |
555 | r = ttm_set_pages_caching(caching_array, cstate, cpages); |
555 | r = ttm_set_pages_caching(caching_array, cstate, cpages); |
556 | if (r) |
556 | if (r) |
557 | ttm_handle_caching_state_failure(pages, |
557 | ttm_handle_caching_state_failure(pages, |
558 | ttm_flags, cstate, |
558 | ttm_flags, cstate, |
559 | caching_array, cpages); |
559 | caching_array, cpages); |
560 | } |
560 | } |
561 | out: |
561 | out: |
562 | kfree(caching_array); |
562 | kfree(caching_array); |
563 | 563 | ||
564 | return r; |
564 | return r; |
565 | } |
565 | } |
566 | 566 | ||
567 | /** |
567 | /** |
568 | * Fill the given pool if there aren't enough pages and the requested number of |
568 | * Fill the given pool if there aren't enough pages and the requested number of |
569 | * pages is small. |
569 | * pages is small. |
570 | */ |
570 | */ |
571 | static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, |
571 | static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, |
572 | int ttm_flags, enum ttm_caching_state cstate, unsigned count, |
572 | int ttm_flags, enum ttm_caching_state cstate, unsigned count, |
573 | unsigned long *irq_flags) |
573 | unsigned long *irq_flags) |
574 | { |
574 | { |
575 | struct page *p; |
575 | struct page *p; |
576 | int r; |
576 | int r; |
577 | unsigned cpages = 0; |
577 | unsigned cpages = 0; |
578 | /** |
578 | /** |
579 | * Only allow one pool fill operation at a time. |
579 | * Only allow one pool fill operation at a time. |
580 | * If pool doesn't have enough pages for the allocation new pages are |
580 | * If pool doesn't have enough pages for the allocation new pages are |
581 | * allocated from outside of pool. |
581 | * allocated from outside of pool. |
582 | */ |
582 | */ |
583 | if (pool->fill_lock) |
583 | if (pool->fill_lock) |
584 | return; |
584 | return; |
585 | 585 | ||
586 | pool->fill_lock = true; |
586 | pool->fill_lock = true; |
587 | 587 | ||
588 | /* If allocation request is small and there are not enough |
588 | /* If allocation request is small and there are not enough |
589 | * pages in a pool we fill the pool up first. */ |
589 | * pages in a pool we fill the pool up first. */ |
590 | if (count < _manager->options.small |
590 | if (count < _manager->options.small |
591 | && count > pool->npages) { |
591 | && count > pool->npages) { |
592 | struct list_head new_pages; |
592 | struct list_head new_pages; |
593 | unsigned alloc_size = _manager->options.alloc_size; |
593 | unsigned alloc_size = _manager->options.alloc_size; |
594 | 594 | ||
595 | /** |
595 | /** |
596 | * Can't change page caching if in irqsave context. We have to |
596 | * Can't change page caching if in irqsave context. We have to |
597 | * drop the pool->lock. |
597 | * drop the pool->lock. |
598 | */ |
598 | */ |
599 | spin_unlock_irqrestore(&pool->lock, *irq_flags); |
599 | spin_unlock_irqrestore(&pool->lock, *irq_flags); |
600 | 600 | ||
601 | INIT_LIST_HEAD(&new_pages); |
601 | INIT_LIST_HEAD(&new_pages); |
602 | r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, |
602 | r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, |
603 | cstate, alloc_size); |
603 | cstate, alloc_size); |
604 | spin_lock_irqsave(&pool->lock, *irq_flags); |
604 | spin_lock_irqsave(&pool->lock, *irq_flags); |
605 | 605 | ||
606 | if (!r) { |
606 | if (!r) { |
607 | list_splice(&new_pages, &pool->list); |
607 | list_splice(&new_pages, &pool->list); |
608 | ++pool->nrefills; |
608 | ++pool->nrefills; |
609 | pool->npages += alloc_size; |
609 | pool->npages += alloc_size; |
610 | } else { |
610 | } else { |
611 | pr_err("Failed to fill pool (%p)\n", pool); |
611 | pr_err("Failed to fill pool (%p)\n", pool); |
612 | /* If we have any pages left put them to the pool. */ |
612 | /* If we have any pages left put them to the pool. */ |
613 | list_for_each_entry(p, &pool->list, lru) { |
613 | list_for_each_entry(p, &pool->list, lru) { |
614 | ++cpages; |
614 | ++cpages; |
615 | } |
615 | } |
616 | list_splice(&new_pages, &pool->list); |
616 | list_splice(&new_pages, &pool->list); |
617 | pool->npages += cpages; |
617 | pool->npages += cpages; |
618 | } |
618 | } |
619 | 619 | ||
620 | } |
620 | } |
621 | pool->fill_lock = false; |
621 | pool->fill_lock = false; |
622 | } |
622 | } |
623 | 623 | ||
624 | /** |
624 | /** |
625 | * Cut 'count' number of pages from the pool and put them on the return list. |
625 | * Cut 'count' number of pages from the pool and put them on the return list. |
626 | * |
626 | * |
627 | * @return count of pages still required to fulfill the request. |
627 | * @return count of pages still required to fulfill the request. |
628 | */ |
628 | */ |
629 | static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, |
629 | static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, |
630 | struct list_head *pages, |
630 | struct list_head *pages, |
631 | int ttm_flags, |
631 | int ttm_flags, |
632 | enum ttm_caching_state cstate, |
632 | enum ttm_caching_state cstate, |
633 | unsigned count) |
633 | unsigned count) |
634 | { |
634 | { |
635 | unsigned long irq_flags; |
635 | unsigned long irq_flags; |
636 | struct list_head *p; |
636 | struct list_head *p; |
637 | unsigned i; |
637 | unsigned i; |
638 | 638 | ||
639 | spin_lock_irqsave(&pool->lock, irq_flags); |
639 | spin_lock_irqsave(&pool->lock, irq_flags); |
640 | ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); |
640 | ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); |
641 | 641 | ||
642 | if (count >= pool->npages) { |
642 | if (count >= pool->npages) { |
643 | /* take all pages from the pool */ |
643 | /* take all pages from the pool */ |
644 | list_splice_init(&pool->list, pages); |
644 | list_splice_init(&pool->list, pages); |
645 | count -= pool->npages; |
645 | count -= pool->npages; |
646 | pool->npages = 0; |
646 | pool->npages = 0; |
647 | goto out; |
647 | goto out; |
648 | } |
648 | } |
649 | /* find the last pages to include for requested number of pages. Split |
649 | /* find the last pages to include for requested number of pages. Split |
650 | * pool to begin and halve it to reduce search space. */ |
650 | * pool to begin and halve it to reduce search space. */ |
651 | if (count <= pool->npages/2) { |
651 | if (count <= pool->npages/2) { |
652 | i = 0; |
652 | i = 0; |
653 | list_for_each(p, &pool->list) { |
653 | list_for_each(p, &pool->list) { |
654 | if (++i == count) |
654 | if (++i == count) |
655 | break; |
655 | break; |
656 | } |
656 | } |
657 | } else { |
657 | } else { |
658 | i = pool->npages + 1; |
658 | i = pool->npages + 1; |
659 | list_for_each_prev(p, &pool->list) { |
659 | list_for_each_prev(p, &pool->list) { |
660 | if (--i == count) |
660 | if (--i == count) |
661 | break; |
661 | break; |
662 | } |
662 | } |
663 | } |
663 | } |
664 | /* Cut 'count' number of pages from the pool */ |
664 | /* Cut 'count' number of pages from the pool */ |
665 | list_cut_position(pages, &pool->list, p); |
665 | list_cut_position(pages, &pool->list, p); |
666 | pool->npages -= count; |
666 | pool->npages -= count; |
667 | count = 0; |
667 | count = 0; |
668 | out: |
668 | out: |
669 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
669 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
670 | return count; |
670 | return count; |
671 | } |
671 | } |
672 | #endif |
672 | #endif |
673 | 673 | ||
674 | /* Put all pages in pages list to correct pool to wait for reuse */ |
674 | /* Put all pages in pages list to correct pool to wait for reuse */ |
675 | static void ttm_put_pages(struct page **pages, unsigned npages, int flags, |
675 | static void ttm_put_pages(struct page **pages, unsigned npages, int flags, |
676 | enum ttm_caching_state cstate) |
676 | enum ttm_caching_state cstate) |
677 | { |
677 | { |
678 | unsigned long irq_flags; |
678 | unsigned long irq_flags; |
679 | // struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
679 | // struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
680 | unsigned i; |
680 | unsigned i; |
681 | 681 | ||
682 | for (i = 0; i < npages; i++) { |
682 | for (i = 0; i < npages; i++) { |
683 | if (pages[i]) { |
683 | if (pages[i]) { |
684 | // if (page_count(pages[i]) != 1) |
684 | // if (page_count(pages[i]) != 1) |
685 | // pr_err("Erroneous page count. Leaking pages.\n"); |
685 | // pr_err("Erroneous page count. Leaking pages.\n"); |
686 | FreePage(pages[i]); |
686 | FreePage(pages[i]); |
687 | pages[i] = NULL; |
687 | pages[i] = NULL; |
688 | } |
688 | } |
689 | } |
689 | } |
690 | return; |
690 | return; |
691 | 691 | ||
692 | #if 0 |
692 | #if 0 |
693 | if (pool == NULL) { |
693 | if (pool == NULL) { |
694 | /* No pool for this memory type so free the pages */ |
694 | /* No pool for this memory type so free the pages */ |
695 | for (i = 0; i < npages; i++) { |
695 | for (i = 0; i < npages; i++) { |
696 | if (pages[i]) { |
696 | if (pages[i]) { |
697 | if (page_count(pages[i]) != 1) |
697 | if (page_count(pages[i]) != 1) |
698 | pr_err("Erroneous page count. Leaking pages.\n"); |
698 | pr_err("Erroneous page count. Leaking pages.\n"); |
699 | __free_page(pages[i]); |
699 | __free_page(pages[i]); |
700 | pages[i] = NULL; |
700 | pages[i] = NULL; |
701 | } |
701 | } |
702 | } |
702 | } |
703 | return; |
703 | return; |
704 | } |
704 | } |
705 | 705 | ||
706 | spin_lock_irqsave(&pool->lock, irq_flags); |
706 | spin_lock_irqsave(&pool->lock, irq_flags); |
707 | for (i = 0; i < npages; i++) { |
707 | for (i = 0; i < npages; i++) { |
708 | if (pages[i]) { |
708 | if (pages[i]) { |
709 | if (page_count(pages[i]) != 1) |
709 | if (page_count(pages[i]) != 1) |
710 | pr_err("Erroneous page count. Leaking pages.\n"); |
710 | pr_err("Erroneous page count. Leaking pages.\n"); |
711 | list_add_tail(&pages[i]->lru, &pool->list); |
711 | list_add_tail(&pages[i]->lru, &pool->list); |
712 | pages[i] = NULL; |
712 | pages[i] = NULL; |
713 | pool->npages++; |
713 | pool->npages++; |
714 | } |
714 | } |
715 | } |
715 | } |
716 | /* Check that we don't go over the pool limit */ |
716 | /* Check that we don't go over the pool limit */ |
717 | npages = 0; |
717 | npages = 0; |
718 | if (pool->npages > _manager->options.max_size) { |
718 | if (pool->npages > _manager->options.max_size) { |
719 | npages = pool->npages - _manager->options.max_size; |
719 | npages = pool->npages - _manager->options.max_size; |
720 | /* free at least NUM_PAGES_TO_ALLOC number of pages |
720 | /* free at least NUM_PAGES_TO_ALLOC number of pages |
721 | * to reduce calls to set_memory_wb */ |
721 | * to reduce calls to set_memory_wb */ |
722 | if (npages < NUM_PAGES_TO_ALLOC) |
722 | if (npages < NUM_PAGES_TO_ALLOC) |
723 | npages = NUM_PAGES_TO_ALLOC; |
723 | npages = NUM_PAGES_TO_ALLOC; |
724 | } |
724 | } |
725 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
725 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
726 | if (npages) |
726 | if (npages) |
727 | ttm_page_pool_free(pool, npages); |
727 | ttm_page_pool_free(pool, npages); |
728 | #endif |
728 | #endif |
729 | 729 | ||
730 | } |
730 | } |
731 | 731 | ||
732 | /* |
732 | /* |
733 | * On success pages list will hold count number of correctly |
733 | * On success pages list will hold count number of correctly |
734 | * cached pages. |
734 | * cached pages. |
735 | */ |
735 | */ |
736 | static int ttm_get_pages(struct page **pages, unsigned npages, int flags, |
736 | static int ttm_get_pages(struct page **pages, unsigned npages, int flags, |
737 | enum ttm_caching_state cstate) |
737 | enum ttm_caching_state cstate) |
738 | { |
738 | { |
739 | // struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
739 | // struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
740 | struct list_head plist; |
740 | struct list_head plist; |
741 | struct page *p = NULL; |
741 | struct page *p = NULL; |
742 | // gfp_t gfp_flags = GFP_USER; |
742 | // gfp_t gfp_flags = GFP_USER; |
743 | unsigned count; |
743 | unsigned count; |
744 | int r; |
744 | int r; |
745 | 745 | ||
746 | for (r = 0; r < npages; ++r) { |
746 | for (r = 0; r < npages; ++r) { |
747 | p = AllocPage(); |
747 | p = AllocPage(); |
748 | if (!p) { |
748 | if (!p) { |
749 | 749 | ||
750 | pr_err("Unable to allocate page\n"); |
750 | pr_err("Unable to allocate page\n"); |
751 | return -ENOMEM; |
751 | return -ENOMEM; |
752 | } |
752 | } |
753 | 753 | ||
754 | pages[r] = p; |
754 | pages[r] = p; |
755 | } |
755 | } |
756 | return 0; |
756 | return 0; |
757 | 757 | ||
758 | #if 0 |
758 | #if 0 |
759 | 759 | ||
760 | 760 | ||
761 | /* set zero flag for page allocation if required */ |
761 | /* set zero flag for page allocation if required */ |
762 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) |
762 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) |
763 | gfp_flags |= __GFP_ZERO; |
763 | gfp_flags |= __GFP_ZERO; |
764 | 764 | ||
765 | /* No pool for cached pages */ |
765 | /* No pool for cached pages */ |
766 | if (pool == NULL) { |
766 | if (pool == NULL) { |
767 | if (flags & TTM_PAGE_FLAG_DMA32) |
767 | if (flags & TTM_PAGE_FLAG_DMA32) |
768 | gfp_flags |= GFP_DMA32; |
768 | gfp_flags |= GFP_DMA32; |
769 | else |
769 | else |
770 | gfp_flags |= GFP_HIGHUSER; |
770 | gfp_flags |= GFP_HIGHUSER; |
771 | 771 | ||
772 | for (r = 0; r < npages; ++r) { |
772 | for (r = 0; r < npages; ++r) { |
773 | p = alloc_page(gfp_flags); |
773 | p = alloc_page(gfp_flags); |
774 | if (!p) { |
774 | if (!p) { |
775 | 775 | ||
776 | pr_err("Unable to allocate page\n"); |
776 | pr_err("Unable to allocate page\n"); |
777 | return -ENOMEM; |
777 | return -ENOMEM; |
778 | } |
778 | } |
779 | 779 | ||
780 | pages[r] = p; |
780 | pages[r] = p; |
781 | } |
781 | } |
782 | return 0; |
782 | return 0; |
783 | } |
783 | } |
784 | 784 | ||
785 | /* combine zero flag to pool flags */ |
785 | /* combine zero flag to pool flags */ |
786 | gfp_flags |= pool->gfp_flags; |
786 | gfp_flags |= pool->gfp_flags; |
787 | 787 | ||
788 | /* First we take pages from the pool */ |
788 | /* First we take pages from the pool */ |
789 | INIT_LIST_HEAD(&plist); |
789 | INIT_LIST_HEAD(&plist); |
790 | npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); |
790 | npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); |
791 | count = 0; |
791 | count = 0; |
792 | list_for_each_entry(p, &plist, lru) { |
792 | list_for_each_entry(p, &plist, lru) { |
793 | pages[count++] = p; |
793 | pages[count++] = p; |
794 | } |
794 | } |
795 | 795 | ||
796 | /* clear the pages coming from the pool if requested */ |
796 | /* clear the pages coming from the pool if requested */ |
797 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { |
797 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { |
798 | list_for_each_entry(p, &plist, lru) { |
798 | list_for_each_entry(p, &plist, lru) { |
799 | if (PageHighMem(p)) |
799 | if (PageHighMem(p)) |
800 | clear_highpage(p); |
800 | clear_highpage(p); |
801 | else |
801 | else |
802 | clear_page(page_address(p)); |
802 | clear_page(page_address(p)); |
803 | } |
803 | } |
804 | } |
804 | } |
805 | 805 | ||
806 | /* If pool didn't have enough pages allocate new one. */ |
806 | /* If pool didn't have enough pages allocate new one. */ |
807 | if (npages > 0) { |
807 | if (npages > 0) { |
808 | /* ttm_alloc_new_pages doesn't reference pool so we can run |
808 | /* ttm_alloc_new_pages doesn't reference pool so we can run |
809 | * multiple requests in parallel. |
809 | * multiple requests in parallel. |
810 | **/ |
810 | **/ |
811 | INIT_LIST_HEAD(&plist); |
811 | INIT_LIST_HEAD(&plist); |
812 | r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); |
812 | r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); |
813 | list_for_each_entry(p, &plist, lru) { |
813 | list_for_each_entry(p, &plist, lru) { |
814 | pages[count++] = p; |
814 | pages[count++] = p; |
815 | } |
815 | } |
816 | if (r) { |
816 | if (r) { |
817 | /* If there is any pages in the list put them back to |
817 | /* If there is any pages in the list put them back to |
818 | * the pool. */ |
818 | * the pool. */ |
819 | pr_err("Failed to allocate extra pages for large request\n"); |
819 | pr_err("Failed to allocate extra pages for large request\n"); |
820 | ttm_put_pages(pages, count, flags, cstate); |
820 | ttm_put_pages(pages, count, flags, cstate); |
821 | return r; |
821 | return r; |
822 | } |
822 | } |
823 | } |
823 | } |
824 | #endif |
824 | #endif |
825 | 825 | ||
826 | return 0; |
826 | return 0; |
827 | } |
827 | } |
828 | 828 | ||
829 | #if 0 |
829 | #if 0 |
830 | static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, |
830 | static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, |
831 | char *name) |
831 | char *name) |
832 | { |
832 | { |
833 | spin_lock_init(&pool->lock); |
833 | spin_lock_init(&pool->lock); |
834 | pool->fill_lock = false; |
834 | pool->fill_lock = false; |
835 | INIT_LIST_HEAD(&pool->list); |
835 | INIT_LIST_HEAD(&pool->list); |
836 | pool->npages = pool->nfrees = 0; |
836 | pool->npages = pool->nfrees = 0; |
837 | pool->gfp_flags = flags; |
837 | pool->gfp_flags = flags; |
838 | pool->name = name; |
838 | pool->name = name; |
839 | } |
839 | } |
840 | 840 | ||
841 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
841 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
842 | { |
842 | { |
843 | int ret; |
843 | int ret; |
844 | 844 | ||
845 | WARN_ON(_manager); |
845 | WARN_ON(_manager); |
846 | 846 | ||
847 | pr_info("Initializing pool allocator\n"); |
847 | pr_info("Initializing pool allocator\n"); |
848 | 848 | ||
849 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
849 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
850 | 850 | ||
851 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); |
851 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); |
852 | 852 | ||
853 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); |
853 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); |
854 | 854 | ||
855 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
855 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
856 | GFP_USER | GFP_DMA32, "wc dma"); |
856 | GFP_USER | GFP_DMA32, "wc dma"); |
857 | 857 | ||
858 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
858 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
859 | GFP_USER | GFP_DMA32, "uc dma"); |
859 | GFP_USER | GFP_DMA32, "uc dma"); |
860 | 860 | ||
861 | _manager->options.max_size = max_pages; |
861 | _manager->options.max_size = max_pages; |
862 | _manager->options.small = SMALL_ALLOCATION; |
862 | _manager->options.small = SMALL_ALLOCATION; |
863 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; |
863 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; |
864 | 864 | ||
865 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, |
865 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, |
866 | &glob->kobj, "pool"); |
866 | &glob->kobj, "pool"); |
867 | if (unlikely(ret != 0)) { |
867 | if (unlikely(ret != 0)) { |
868 | kobject_put(&_manager->kobj); |
868 | kobject_put(&_manager->kobj); |
869 | _manager = NULL; |
869 | _manager = NULL; |
870 | return ret; |
870 | return ret; |
871 | } |
871 | } |
872 | 872 | ||
873 | ttm_pool_mm_shrink_init(_manager); |
873 | ttm_pool_mm_shrink_init(_manager); |
874 | 874 | ||
875 | return 0; |
875 | return 0; |
876 | } |
876 | } |
877 | 877 | ||
878 | void ttm_page_alloc_fini(void) |
878 | void ttm_page_alloc_fini(void) |
879 | { |
879 | { |
880 | int i; |
880 | int i; |
881 | 881 | ||
882 | pr_info("Finalizing pool allocator\n"); |
882 | pr_info("Finalizing pool allocator\n"); |
883 | ttm_pool_mm_shrink_fini(_manager); |
883 | ttm_pool_mm_shrink_fini(_manager); |
884 | 884 | ||
885 | for (i = 0; i < NUM_POOLS; ++i) |
885 | for (i = 0; i < NUM_POOLS; ++i) |
886 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); |
886 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); |
887 | 887 | ||
888 | kobject_put(&_manager->kobj); |
888 | kobject_put(&_manager->kobj); |
889 | _manager = NULL; |
889 | _manager = NULL; |
890 | } |
890 | } |
891 | 891 | ||
892 | #endif |
892 | #endif |
893 | 893 | ||
894 | int ttm_pool_populate(struct ttm_tt *ttm) |
894 | int ttm_pool_populate(struct ttm_tt *ttm) |
895 | { |
895 | { |
896 | struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; |
896 | struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; |
897 | unsigned i; |
897 | unsigned i; |
898 | int ret; |
898 | int ret; |
899 | 899 | ||
900 | if (ttm->state != tt_unpopulated) |
900 | if (ttm->state != tt_unpopulated) |
901 | return 0; |
901 | return 0; |
902 | 902 | ||
903 | for (i = 0; i < ttm->num_pages; ++i) { |
903 | for (i = 0; i < ttm->num_pages; ++i) { |
904 | ret = ttm_get_pages(&ttm->pages[i], 1, |
904 | ret = ttm_get_pages(&ttm->pages[i], 1, |
905 | ttm->page_flags, |
905 | ttm->page_flags, |
906 | ttm->caching_state); |
906 | ttm->caching_state); |
907 | if (ret != 0) { |
907 | if (ret != 0) { |
908 | ttm_pool_unpopulate(ttm); |
908 | ttm_pool_unpopulate(ttm); |
909 | return -ENOMEM; |
909 | return -ENOMEM; |
910 | } |
910 | } |
911 | 911 | ||
912 | } |
912 | } |
913 | 913 | ||
914 | ttm->state = tt_unbound; |
914 | ttm->state = tt_unbound; |
915 | return 0; |
915 | return 0; |
916 | } |
916 | } |
917 | EXPORT_SYMBOL(ttm_pool_populate); |
917 | EXPORT_SYMBOL(ttm_pool_populate); |
918 | 918 | ||
919 | void ttm_pool_unpopulate(struct ttm_tt *ttm) |
919 | void ttm_pool_unpopulate(struct ttm_tt *ttm) |
920 | { |
920 | { |
921 | unsigned i; |
921 | unsigned i; |
922 | 922 | ||
923 | for (i = 0; i < ttm->num_pages; ++i) { |
923 | for (i = 0; i < ttm->num_pages; ++i) { |
924 | if (ttm->pages[i]) { |
924 | if (ttm->pages[i]) { |
925 | ttm_mem_global_free_page(ttm->glob->mem_glob, |
925 | ttm_mem_global_free_page(ttm->glob->mem_glob, |
926 | ttm->pages[i]); |
926 | ttm->pages[i]); |
927 | ttm_put_pages(&ttm->pages[i], 1, |
927 | ttm_put_pages(&ttm->pages[i], 1, |
928 | ttm->page_flags, |
928 | ttm->page_flags, |
929 | ttm->caching_state); |
929 | ttm->caching_state); |
930 | } |
930 | } |
931 | } |
931 | } |
932 | ttm->state = tt_unpopulated; |
932 | ttm->state = tt_unpopulated; |
933 | } |
933 | } |
934 | EXPORT_SYMBOL(ttm_pool_unpopulate);>>>>>>>>>=>>>>>>>>>>> |
934 | EXPORT_SYMBOL(ttm_pool_unpopulate);>>>>>>>>>=>>>>>>>>>>> |