Subversion Repositories Kolibri OS

Rev

Rev 4569 | Rev 5346 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4569 Rev 5078
Line 39... Line 39...
39
//#include 
39
//#include 
40
#include 
40
#include 
41
#include 
41
#include 
42
#include  /* for seq_printf */
42
#include  /* for seq_printf */
43
#include 
43
#include 
44
#include 
44
//#include 
Line 45... Line 45...
45
 
45
 
Line 46... Line 46...
46
//#include 
46
//#include 
47
 
47
 
Line 56... Line 56...
56
#define SMALL_ALLOCATION		16
56
#define SMALL_ALLOCATION		16
57
#define FREE_ALL_PAGES			(~0U)
57
#define FREE_ALL_PAGES			(~0U)
58
/* times are in msecs */
58
/* times are in msecs */
59
#define PAGE_FREE_INTERVAL		1000
59
#define PAGE_FREE_INTERVAL		1000
Line 60... Line -...
60
 
-
 
61
#define pr_err(fmt, ...) \
-
 
62
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-
 
63
 
-
 
64
 
-
 
65
 
-
 
66
#if 0
60
 
67
/**
61
/**
68
 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
62
 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
69
 *
63
 *
70
 * @lock: Protects the shared pool from concurrnet access. Must be used with
64
 * @lock: Protects the shared pool from concurrnet access. Must be used with
Line 113... Line 107...
113
 *
107
 *
114
 * @pools: All pool objects in use.
108
 * @pools: All pool objects in use.
115
 **/
109
 **/
116
struct ttm_pool_manager {
110
struct ttm_pool_manager {
117
	struct kobject		kobj;
111
	struct kobject		kobj;
118
	struct shrinker		mm_shrink;
-
 
119
	struct ttm_pool_opts	options;
112
	struct ttm_pool_opts	options;
Line 120... Line 113...
120
 
113
 
121
	union {
114
	union {
122
		struct ttm_page_pool	pools[NUM_POOLS];
115
		struct ttm_page_pool	pools[NUM_POOLS];
Line 127... Line 120...
127
			struct ttm_page_pool	uc_pool_dma32;
120
			struct ttm_page_pool	uc_pool_dma32;
128
		} ;
121
		} ;
129
	};
122
	};
130
};
123
};
Line 131... Line -...
131
 
-
 
132
static struct attribute ttm_page_pool_max = {
-
 
133
	.name = "pool_max_size",
-
 
134
	.mode = S_IRUGO | S_IWUSR
-
 
135
};
-
 
136
static struct attribute ttm_page_pool_small = {
-
 
137
	.name = "pool_small_allocation",
-
 
138
	.mode = S_IRUGO | S_IWUSR
-
 
139
};
-
 
140
static struct attribute ttm_page_pool_alloc_size = {
-
 
141
	.name = "pool_allocation_size",
-
 
142
	.mode = S_IRUGO | S_IWUSR
-
 
143
};
-
 
144
 
-
 
145
static struct attribute *ttm_pool_attrs[] = {
-
 
146
	&ttm_page_pool_max,
-
 
147
	&ttm_page_pool_small,
-
 
148
	&ttm_page_pool_alloc_size,
-
 
149
	NULL
-
 
150
};
-
 
151
 
-
 
152
static void ttm_pool_kobj_release(struct kobject *kobj)
-
 
153
{
-
 
154
	struct ttm_pool_manager *m =
-
 
155
		container_of(kobj, struct ttm_pool_manager, kobj);
-
 
156
	kfree(m);
-
 
157
}
-
 
158
 
-
 
159
static ssize_t ttm_pool_store(struct kobject *kobj,
-
 
160
		struct attribute *attr, const char *buffer, size_t size)
-
 
161
{
-
 
162
	struct ttm_pool_manager *m =
-
 
163
		container_of(kobj, struct ttm_pool_manager, kobj);
-
 
164
	int chars;
-
 
165
	unsigned val;
-
 
166
	chars = sscanf(buffer, "%u", &val);
-
 
167
	if (chars == 0)
-
 
168
		return size;
-
 
169
 
-
 
170
	/* Convert kb to number of pages */
-
 
171
	val = val / (PAGE_SIZE >> 10);
-
 
172
 
-
 
173
	if (attr == &ttm_page_pool_max)
-
 
174
		m->options.max_size = val;
-
 
175
	else if (attr == &ttm_page_pool_small)
-
 
176
		m->options.small = val;
-
 
177
	else if (attr == &ttm_page_pool_alloc_size) {
-
 
178
		if (val > NUM_PAGES_TO_ALLOC*8) {
-
 
179
			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
-
 
180
			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
-
 
181
			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
-
 
182
			return size;
-
 
183
		} else if (val > NUM_PAGES_TO_ALLOC) {
-
 
184
			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
-
 
185
				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
-
 
186
		}
-
 
187
		m->options.alloc_size = val;
-
 
188
	}
-
 
189
 
-
 
190
	return size;
-
 
191
}
-
 
192
 
-
 
193
static ssize_t ttm_pool_show(struct kobject *kobj,
-
 
194
		struct attribute *attr, char *buffer)
-
 
195
{
-
 
196
	struct ttm_pool_manager *m =
-
 
197
		container_of(kobj, struct ttm_pool_manager, kobj);
-
 
198
	unsigned val = 0;
-
 
199
 
-
 
200
	if (attr == &ttm_page_pool_max)
-
 
201
		val = m->options.max_size;
-
 
202
	else if (attr == &ttm_page_pool_small)
-
 
203
		val = m->options.small;
-
 
204
	else if (attr == &ttm_page_pool_alloc_size)
-
 
205
		val = m->options.alloc_size;
-
 
206
 
-
 
207
	val = val * (PAGE_SIZE >> 10);
-
 
208
 
-
 
209
	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
-
 
210
}
-
 
211
 
-
 
212
static const struct sysfs_ops ttm_pool_sysfs_ops = {
-
 
213
	.show = &ttm_pool_show,
-
 
214
	.store = &ttm_pool_store,
-
 
215
};
-
 
216
 
-
 
217
static struct kobj_type ttm_pool_kobj_type = {
-
 
218
	.release = &ttm_pool_kobj_release,
-
 
219
	.sysfs_ops = &ttm_pool_sysfs_ops,
-
 
220
	.default_attrs = ttm_pool_attrs,
-
 
Line 221... Line 124...
221
};
124
 
Line 222... Line -...
222
 
-
 
223
static struct ttm_pool_manager *_manager;
-
 
224
 
-
 
225
#ifndef CONFIG_X86
-
 
226
static int set_pages_array_wb(struct page **pages, int addrinarray)
-
 
227
{
-
 
228
#ifdef TTM_HAS_AGP
-
 
229
	int i;
-
 
230
 
-
 
231
	for (i = 0; i < addrinarray; i++)
-
 
232
		unmap_page_from_agp(pages[i]);
-
 
233
#endif
-
 
234
	return 0;
-
 
235
}
-
 
236
 
-
 
237
static int set_pages_array_wc(struct page **pages, int addrinarray)
-
 
238
{
-
 
239
#ifdef TTM_HAS_AGP
-
 
240
	int i;
-
 
241
 
-
 
242
	for (i = 0; i < addrinarray; i++)
-
 
243
		map_page_into_agp(pages[i]);
-
 
244
#endif
-
 
245
	return 0;
-
 
246
}
-
 
247
 
-
 
248
static int set_pages_array_uc(struct page **pages, int addrinarray)
-
 
249
{
-
 
250
#ifdef TTM_HAS_AGP
-
 
251
	int i;
-
 
252
 
-
 
253
	for (i = 0; i < addrinarray; i++)
-
 
254
		map_page_into_agp(pages[i]);
-
 
255
#endif
-
 
Line 256... Line 125...
256
	return 0;
125
 
257
}
126
static struct ttm_pool_manager *_manager;
258
#endif
127
 
259
 
128
 
Line 280... Line 149...
280
 
149
 
281
/* set memory back to wb and free the pages. */
150
/* set memory back to wb and free the pages. */
282
static void ttm_pages_put(struct page *pages[], unsigned npages)
151
static void ttm_pages_put(struct page *pages[], unsigned npages)
283
{
152
{
284
	unsigned i;
-
 
285
	if (set_pages_array_wb(pages, npages))
-
 
286
		pr_err("Failed to set %d pages to wb!\n", npages);
153
	unsigned i;
287
	for (i = 0; i < npages; ++i)
154
	for (i = 0; i < npages; ++i)
288
		__free_page(pages[i]);
155
		__free_page(pages[i]);
Line 289... Line 156...
289
}
156
}
Line 293... Line 160...
293
{
160
{
294
	pool->npages -= freed_pages;
161
	pool->npages -= freed_pages;
295
	pool->nfrees += freed_pages;
162
	pool->nfrees += freed_pages;
296
}
163
}
Line 297... Line -...
297
 
-
 
298
/**
-
 
299
 * Free pages from pool.
-
 
300
 *
-
 
301
 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
-
 
302
 * number of pages in one go.
-
 
303
 *
-
 
304
 * @pool: to free the pages from
-
 
305
 * @free_all: If set to true will free all pages in pool
-
 
306
 **/
-
 
307
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
-
 
308
{
-
 
309
	unsigned long irq_flags;
-
 
310
	struct page *p;
-
 
311
	struct page **pages_to_free;
-
 
312
	unsigned freed_pages = 0,
-
 
313
		 npages_to_free = nr_free;
-
 
314
 
-
 
315
	if (NUM_PAGES_TO_ALLOC < nr_free)
-
 
316
		npages_to_free = NUM_PAGES_TO_ALLOC;
-
 
317
 
-
 
318
	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
-
 
319
			GFP_KERNEL);
-
 
320
	if (!pages_to_free) {
-
 
321
		pr_err("Failed to allocate memory for pool free operation\n");
-
 
322
		return 0;
-
 
323
	}
-
 
324
 
-
 
325
restart:
-
 
326
	spin_lock_irqsave(&pool->lock, irq_flags);
-
 
327
 
-
 
328
	list_for_each_entry_reverse(p, &pool->list, lru) {
-
 
329
		if (freed_pages >= npages_to_free)
-
 
330
			break;
-
 
331
 
-
 
332
		pages_to_free[freed_pages++] = p;
-
 
333
		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
-
 
334
		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
-
 
335
			/* remove range of pages from the pool */
-
 
336
			__list_del(p->lru.prev, &pool->list);
-
 
337
 
-
 
338
			ttm_pool_update_free_locked(pool, freed_pages);
-
 
339
			/**
-
 
340
			 * Because changing page caching is costly
-
 
341
			 * we unlock the pool to prevent stalling.
-
 
342
			 */
-
 
343
			spin_unlock_irqrestore(&pool->lock, irq_flags);
-
 
344
 
-
 
345
			ttm_pages_put(pages_to_free, freed_pages);
-
 
346
			if (likely(nr_free != FREE_ALL_PAGES))
-
 
347
				nr_free -= freed_pages;
-
 
348
 
-
 
349
			if (NUM_PAGES_TO_ALLOC >= nr_free)
-
 
350
				npages_to_free = nr_free;
-
 
351
			else
-
 
352
				npages_to_free = NUM_PAGES_TO_ALLOC;
-
 
353
 
-
 
354
			freed_pages = 0;
-
 
355
 
-
 
356
			/* free all so restart the processing */
-
 
357
			if (nr_free)
-
 
358
				goto restart;
-
 
359
 
-
 
360
			/* Not allowed to fall through or break because
-
 
361
			 * following context is inside spinlock while we are
-
 
362
			 * outside here.
-
 
363
			 */
-
 
364
			goto out;
-
 
365
 
-
 
366
		}
-
 
367
	}
-
 
368
 
-
 
369
	/* remove range of pages from the pool */
-
 
370
	if (freed_pages) {
-
 
371
		__list_del(&p->lru, &pool->list);
-
 
372
 
-
 
373
		ttm_pool_update_free_locked(pool, freed_pages);
-
 
374
		nr_free -= freed_pages;
-
 
Line 375... Line -...
375
	}
-
 
Line 376... Line -...
376
 
-
 
377
	spin_unlock_irqrestore(&pool->lock, irq_flags);
-
 
378
 
-
 
379
	if (freed_pages)
-
 
380
		ttm_pages_put(pages_to_free, freed_pages);
-
 
381
out:
-
 
382
	kfree(pages_to_free);
-
 
383
	return nr_free;
-
 
384
}
-
 
385
 
-
 
386
/**
-
 
387
 * Callback for mm to request pool to reduce number of page held.
-
 
388
 *
-
 
389
 * XXX: (dchinner) Deadlock warning!
-
 
390
 *
-
 
391
 * ttm_page_pool_free() does memory allocation using GFP_KERNEL.  that means
-
 
392
 * this can deadlock when called a sc->gfp_mask that is not equal to
-
 
393
 * GFP_KERNEL.
-
 
394
 *
-
 
395
 * This code is crying out for a shrinker per pool....
-
 
396
 */
-
 
397
static unsigned long
-
 
398
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
-
 
399
{
-
 
400
	static atomic_t start_pool = ATOMIC_INIT(0);
-
 
401
	unsigned i;
-
 
402
	unsigned pool_offset = atomic_add_return(1, &start_pool);
-
 
403
	struct ttm_page_pool *pool;
-
 
404
	int shrink_pages = sc->nr_to_scan;
-
 
405
	unsigned long freed = 0;
-
 
406
 
-
 
407
	pool_offset = pool_offset % NUM_POOLS;
-
 
408
	/* select start pool in round robin fashion */
-
 
409
	for (i = 0; i < NUM_POOLS; ++i) {
-
 
410
		unsigned nr_free = shrink_pages;
-
 
411
		if (shrink_pages == 0)
-
 
412
			break;
-
 
413
		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
-
 
414
		shrink_pages = ttm_page_pool_free(pool, nr_free);
-
 
415
		freed += nr_free - shrink_pages;
-
 
416
	}
-
 
417
	return freed;
-
 
418
}
-
 
419
 
-
 
420
 
-
 
421
static unsigned long
-
 
422
ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
-
 
423
{
-
 
424
	unsigned i;
-
 
425
	unsigned long count = 0;
-
 
426
 
-
 
427
	for (i = 0; i < NUM_POOLS; ++i)
-
 
428
		count += _manager->pools[i].npages;
-
 
429
 
-
 
430
	return count;
-
 
431
}
-
 
432
 
-
 
433
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
-
 
434
{
-
 
435
	manager->mm_shrink.count_objects = ttm_pool_shrink_count;
-
 
436
	manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
-
 
437
	manager->mm_shrink.seeks = 1;
-
 
438
	register_shrinker(&manager->mm_shrink);
-
 
439
}
-
 
440
 
-
 
441
static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
-
 
442
{
-
 
443
	unregister_shrinker(&manager->mm_shrink);
-
 
444
}
-
 
445
 
-
 
446
static int ttm_set_pages_caching(struct page **pages,
-
 
447
		enum ttm_caching_state cstate, unsigned cpages)
-
 
448
{
-
 
449
	int r = 0;
-
 
450
	/* Set page caching */
-
 
451
	switch (cstate) {
-
 
452
	case tt_uncached:
-
 
453
		r = set_pages_array_uc(pages, cpages);
-
 
454
		if (r)
-
 
455
			pr_err("Failed to set %d pages to uc!\n", cpages);
-
 
456
		break;
-
 
457
	case tt_wc:
-
 
458
		r = set_pages_array_wc(pages, cpages);
-
 
459
		if (r)
-
 
460
			pr_err("Failed to set %d pages to wc!\n", cpages);
-
 
461
		break;
-
 
462
	default:
-
 
463
		break;
-
 
464
	}
-
 
465
	return r;
-
 
466
}
-
 
467
 
-
 
468
/**
-
 
469
 * Free pages the pages that failed to change the caching state. If there is
-
 
470
 * any pages that have changed their caching state already put them to the
-
 
471
 * pool.
-
 
472
 */
-
 
473
static void ttm_handle_caching_state_failure(struct list_head *pages,
-
 
474
		int ttm_flags, enum ttm_caching_state cstate,
-
 
475
		struct page **failed_pages, unsigned cpages)
-
 
476
{
-
 
477
	unsigned i;
-
 
478
	/* Failed pages have to be freed */
-
 
479
	for (i = 0; i < cpages; ++i) {
-
 
480
		list_del(&failed_pages[i]->lru);
-
 
481
		__free_page(failed_pages[i]);
-
 
482
	}
-
 
483
}
-
 
484
 
-
 
485
/**
-
 
486
 * Allocate new pages with correct caching.
-
 
487
 *
-
 
488
 * This function is reentrant if caller updates count depending on number of
-
 
489
 * pages returned in pages array.
-
 
490
 */
-
 
491
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
-
 
492
		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
-
 
493
{
-
 
494
	struct page **caching_array;
-
 
495
	struct page *p;
-
 
496
	int r = 0;
-
 
497
	unsigned i, cpages;
-
 
498
	unsigned max_cpages = min(count,
-
 
499
			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
-
 
500
 
-
 
501
	/* allocate array for page caching change */
-
 
502
	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
-
 
503
 
-
 
504
	if (!caching_array) {
-
 
505
		pr_err("Unable to allocate table for new pages\n");
-
 
506
		return -ENOMEM;
-
 
507
	}
-
 
508
 
-
 
509
	for (i = 0, cpages = 0; i < count; ++i) {
-
 
510
		p = alloc_page(gfp_flags);
-
 
511
 
-
 
512
		if (!p) {
-
 
513
			pr_err("Unable to get page %u\n", i);
-
 
514
 
-
 
515
			/* store already allocated pages in the pool after
-
 
516
			 * setting the caching state */
-
 
517
			if (cpages) {
-
 
518
				r = ttm_set_pages_caching(caching_array,
-
 
519
							  cstate, cpages);
-
 
520
				if (r)
-
 
521
					ttm_handle_caching_state_failure(pages,
-
 
522
						ttm_flags, cstate,
-
 
523
						caching_array, cpages);
-
 
524
			}
-
 
525
			r = -ENOMEM;
-
 
526
			goto out;
-
 
527
		}
-
 
528
 
-
 
529
#ifdef CONFIG_HIGHMEM
-
 
530
		/* gfp flags of highmem page should never be dma32 so we
-
 
531
		 * we should be fine in such case
-
 
532
		 */
-
 
533
		if (!PageHighMem(p))
-
 
534
#endif
-
 
535
		{
-
 
536
			caching_array[cpages++] = p;
-
 
537
			if (cpages == max_cpages) {
-
 
538
 
-
 
539
				r = ttm_set_pages_caching(caching_array,
-
 
540
						cstate, cpages);
-
 
541
				if (r) {
-
 
542
					ttm_handle_caching_state_failure(pages,
-
 
543
						ttm_flags, cstate,
-
 
544
						caching_array, cpages);
-
 
545
					goto out;
-
 
546
				}
-
 
547
				cpages = 0;
-
 
548
			}
-
 
549
		}
-
 
550
 
-
 
551
		list_add(&p->lru, pages);
-
 
552
	}
-
 
553
 
-
 
554
	if (cpages) {
-
 
555
		r = ttm_set_pages_caching(caching_array, cstate, cpages);
-
 
556
		if (r)
-
 
557
			ttm_handle_caching_state_failure(pages,
-
 
558
					ttm_flags, cstate,
-
 
559
					caching_array, cpages);
-
 
560
	}
-
 
561
out:
-
 
562
	kfree(caching_array);
-
 
563
 
-
 
564
	return r;
-
 
565
}
-
 
566
 
-
 
567
/**
-
 
568
 * Fill the given pool if there aren't enough pages and the requested number of
-
 
569
 * pages is small.
-
 
570
 */
-
 
571
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
-
 
572
		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
-
 
573
		unsigned long *irq_flags)
-
 
574
{
-
 
575
	struct page *p;
-
 
576
	int r;
-
 
577
	unsigned cpages = 0;
-
 
578
	/**
-
 
579
	 * Only allow one pool fill operation at a time.
-
 
580
	 * If pool doesn't have enough pages for the allocation new pages are
-
 
581
	 * allocated from outside of pool.
-
 
582
	 */
-
 
583
	if (pool->fill_lock)
-
 
584
		return;
-
 
585
 
-
 
586
	pool->fill_lock = true;
-
 
587
 
-
 
588
	/* If allocation request is small and there are not enough
-
 
589
	 * pages in a pool we fill the pool up first. */
-
 
590
	if (count < _manager->options.small
-
 
591
		&& count > pool->npages) {
-
 
592
		struct list_head new_pages;
-
 
593
		unsigned alloc_size = _manager->options.alloc_size;
-
 
594
 
-
 
595
		/**
-
 
596
		 * Can't change page caching if in irqsave context. We have to
-
 
597
		 * drop the pool->lock.
-
 
598
		 */
-
 
599
		spin_unlock_irqrestore(&pool->lock, *irq_flags);
-
 
600
 
-
 
601
		INIT_LIST_HEAD(&new_pages);
-
 
602
		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
-
 
603
				cstate,	alloc_size);
-
 
604
		spin_lock_irqsave(&pool->lock, *irq_flags);
-
 
605
 
-
 
606
		if (!r) {
-
 
607
			list_splice(&new_pages, &pool->list);
-
 
608
			++pool->nrefills;
-
 
609
			pool->npages += alloc_size;
-
 
610
		} else {
-
 
611
			pr_err("Failed to fill pool (%p)\n", pool);
-
 
612
			/* If we have any pages left put them to the pool. */
-
 
613
			list_for_each_entry(p, &pool->list, lru) {
-
 
614
				++cpages;
-
 
615
			}
-
 
616
			list_splice(&new_pages, &pool->list);
-
 
617
			pool->npages += cpages;
-
 
618
		}
-
 
619
 
-
 
620
	}
-
 
621
	pool->fill_lock = false;
-
 
622
}
-
 
623
 
-
 
624
/**
-
 
625
 * Cut 'count' number of pages from the pool and put them on the return list.
-
 
626
 *
-
 
627
 * @return count of pages still required to fulfill the request.
-
 
628
 */
-
 
629
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
-
 
630
					struct list_head *pages,
-
 
631
					int ttm_flags,
-
 
632
					enum ttm_caching_state cstate,
-
 
633
					unsigned count)
-
 
634
{
-
 
635
	unsigned long irq_flags;
-
 
636
	struct list_head *p;
-
 
637
	unsigned i;
-
 
638
 
-
 
639
	spin_lock_irqsave(&pool->lock, irq_flags);
-
 
640
	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
-
 
641
 
-
 
642
	if (count >= pool->npages) {
-
 
643
		/* take all pages from the pool */
-
 
644
		list_splice_init(&pool->list, pages);
-
 
645
		count -= pool->npages;
-
 
646
		pool->npages = 0;
-
 
647
		goto out;
-
 
648
	}
-
 
649
	/* find the last pages to include for requested number of pages. Split
-
 
650
	 * pool to begin and halve it to reduce search space. */
-
 
651
	if (count <= pool->npages/2) {
-
 
652
		i = 0;
-
 
653
		list_for_each(p, &pool->list) {
-
 
654
			if (++i == count)
-
 
655
				break;
-
 
656
		}
-
 
657
	} else {
-
 
658
		i = pool->npages + 1;
-
 
659
		list_for_each_prev(p, &pool->list) {
-
 
660
			if (--i == count)
-
 
661
				break;
-
 
662
		}
-
 
663
	}
-
 
664
	/* Cut 'count' number of pages from the pool */
-
 
665
	list_cut_position(pages, &pool->list, p);
-
 
666
	pool->npages -= count;
-
 
667
	count = 0;
-
 
668
out:
-
 
669
	spin_unlock_irqrestore(&pool->lock, irq_flags);
-
 
Line 670... Line 164...
670
	return count;
164
 
671
}
165
 
672
#endif
166
 
673
 
167
 
674
/* Put all pages in pages list to correct pool to wait for reuse */
168
/* Put all pages in pages list to correct pool to wait for reuse */
675
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
169
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
676
			  enum ttm_caching_state cstate)
170
			  enum ttm_caching_state cstate)
Line 677... Line -...
677
{
-
 
678
	unsigned long irq_flags;
-
 
679
//   struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
-
 
680
	unsigned i;
-
 
681
 
-
 
682
    for (i = 0; i < npages; i++) {
-
 
683
        if (pages[i]) {
171
{
684
//            if (page_count(pages[i]) != 1)
-
 
685
//                pr_err("Erroneous page count. Leaking pages.\n");
-
 
686
            FreePage(pages[i]);
-
 
687
            pages[i] = NULL;
-
 
688
        }
-
 
689
    }
172
	unsigned long irq_flags;
690
    return;
173
	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
691
 
174
	unsigned i;
692
#if 0
-
 
693
	if (pool == NULL) {
-
 
694
		/* No pool for this memory type so free the pages */
175
 
695
		for (i = 0; i < npages; i++) {
176
	if (1) {
696
			if (pages[i]) {
177
		/* No pool for this memory type so free the pages */
697
				if (page_count(pages[i]) != 1)
178
		for (i = 0; i < npages; i++) {
698
					pr_err("Erroneous page count. Leaking pages.\n");
179
			if (pages[i]) {
699
				__free_page(pages[i]);
180
				__free_page(pages[i]);
Line 700... Line -...
700
				pages[i] = NULL;
-
 
701
			}
-
 
702
		}
-
 
703
		return;
-
 
704
	}
-
 
705
 
-
 
706
	spin_lock_irqsave(&pool->lock, irq_flags);
-
 
707
	for (i = 0; i < npages; i++) {
-
 
708
		if (pages[i]) {
-
 
709
			if (page_count(pages[i]) != 1)
-
 
710
				pr_err("Erroneous page count. Leaking pages.\n");
-
 
711
			list_add_tail(&pages[i]->lru, &pool->list);
-
 
712
			pages[i] = NULL;
-
 
713
			pool->npages++;
-
 
714
		}
-
 
715
	}
-
 
716
	/* Check that we don't go over the pool limit */
-
 
717
	npages = 0;
-
 
718
	if (pool->npages > _manager->options.max_size) {
-
 
719
		npages = pool->npages - _manager->options.max_size;
-
 
720
		/* free at least NUM_PAGES_TO_ALLOC number of pages
-
 
721
		 * to reduce calls to set_memory_wb */
-
 
722
		if (npages < NUM_PAGES_TO_ALLOC)
-
 
723
			npages = NUM_PAGES_TO_ALLOC;
-
 
724
	}
181
				pages[i] = NULL;
Line 725... Line 182...
725
	spin_unlock_irqrestore(&pool->lock, irq_flags);
182
			}
726
	if (npages)
183
		}
727
		ttm_page_pool_free(pool, npages);
184
		return;
728
#endif
185
	}
729
 
186
 
730
}
187
}
731
 
188
 
732
/*
189
/*
733
 * On success pages list will hold count number of correctly
190
 * On success pages list will hold count number of correctly
734
 * cached pages.
191
 * cached pages.
735
 */
192
 */
736
static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
193
static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
737
			 enum ttm_caching_state cstate)
194
			 enum ttm_caching_state cstate)
Line 738... Line -...
738
{
-
 
739
//   struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
-
 
740
	struct list_head plist;
-
 
741
	struct page *p = NULL;
-
 
742
//   gfp_t gfp_flags = GFP_USER;
-
 
743
	unsigned count;
-
 
744
	int r;
-
 
745
 
-
 
746
    for (r = 0; r < npages; ++r) {
-
 
747
        p = AllocPage();
-
 
748
        if (!p) {
-
 
749
 
-
 
750
            pr_err("Unable to allocate page\n");
-
 
751
            return -ENOMEM;
-
 
752
        }
-
 
753
 
-
 
754
        pages[r] = p;
-
 
755
    }
-
 
Line 756... Line 195...
756
    return 0;
195
{
757
 
196
	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
758
#if 0
-
 
759
 
-
 
760
 
-
 
761
	/* set zero flag for page allocation if required */
-
 
Line 762... Line 197...
762
	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
197
	struct list_head plist;
763
		gfp_flags |= __GFP_ZERO;
198
	struct page *p = NULL;
764
 
199
	gfp_t gfp_flags = 0;
Line 765... Line -...
765
	/* No pool for cached pages */
-
 
766
	if (pool == NULL) {
200
	unsigned count;
767
		if (flags & TTM_PAGE_FLAG_DMA32)
201
	int r;
Line 768... Line 202...
768
			gfp_flags |= GFP_DMA32;
202
 
769
		else
203
	
770
			gfp_flags |= GFP_HIGHUSER;
204
	/* No pool for cached pages */
771
 
205
	if (1) {
Line 772... Line -...
772
		for (r = 0; r < npages; ++r) {
-
 
773
			p = alloc_page(gfp_flags);
-
 
774
			if (!p) {
-
 
775
 
-
 
776
				pr_err("Unable to allocate page\n");
-
 
777
				return -ENOMEM;
-
 
778
			}
-
 
779
 
-
 
780
			pages[r] = p;
-
 
781
		}
-
 
782
		return 0;
-
 
783
	}
-
 
784
 
-
 
785
	/* combine zero flag to pool flags */
-
 
786
	gfp_flags |= pool->gfp_flags;
-
 
787
 
-
 
788
	/* First we take pages from the pool */
-
 
789
	INIT_LIST_HEAD(&plist);
-
 
790
	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
-
 
791
	count = 0;
-
 
Line 792... Line -...
792
	list_for_each_entry(p, &plist, lru) {
-
 
793
		pages[count++] = p;
-
 
794
	}
-
 
795
 
-
 
796
	/* clear the pages coming from the pool if requested */
-
 
797
	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
-
 
798
		list_for_each_entry(p, &plist, lru) {
-
 
799
			if (PageHighMem(p))
-
 
800
				clear_highpage(p);
-
 
801
			else
-
 
802
				clear_page(page_address(p));
-
 
803
		}
-
 
804
	}
-
 
805
 
-
 
806
	/* If pool didn't have enough pages allocate new one. */
-
 
807
	if (npages > 0) {
-
 
808
		/* ttm_alloc_new_pages doesn't reference pool so we can run
-
 
809
		 * multiple requests in parallel.
-
 
810
		 **/
-
 
Line 811... Line 206...
811
		INIT_LIST_HEAD(&plist);
206
 
812
		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
207
		for (r = 0; r < npages; ++r) {
Line 813... Line -...
813
		list_for_each_entry(p, &plist, lru) {
-
 
814
			pages[count++] = p;
208
			p = alloc_page(gfp_flags);
815
		}
209
			if (!p) {
816
		if (r) {
210
 
817
			/* If there is any pages in the list put them back to
211
				return -ENOMEM;
818
			 * the pool. */
212
			}
819
			pr_err("Failed to allocate extra pages for large request\n");
213
 
Line 842... Line 236...
842
{
236
{
843
	int ret;
237
	int ret;
Line 844... Line 238...
844
 
238
 
Line 845... Line -...
845
	WARN_ON(_manager);
-
 
846
 
-
 
847
	pr_info("Initializing pool allocator\n");
239
	WARN_ON(_manager);
Line 848... Line -...
848
 
-
 
849
	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
-
 
850
 
-
 
851
	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
-
 
852
 
-
 
853
	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
-
 
854
 
-
 
855
	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
-
 
856
				  GFP_USER | GFP_DMA32, "wc dma");
-
 
857
 
-
 
858
	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
240
 
859
				  GFP_USER | GFP_DMA32, "uc dma");
241
	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
860
 
242
 
Line 861... Line -...
861
	_manager->options.max_size = max_pages;
-
 
862
	_manager->options.small = SMALL_ALLOCATION;
-
 
863
	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
-
 
864
 
-
 
865
	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
-
 
866
				   &glob->kobj, "pool");
-
 
867
	if (unlikely(ret != 0)) {
-
 
868
		kobject_put(&_manager->kobj);
-
 
869
		_manager = NULL;
-
 
870
		return ret;
-
 
871
	}
243
	_manager->options.max_size = max_pages;
872
 
244
	_manager->options.small = SMALL_ALLOCATION;
Line 873... Line 245...
873
	ttm_pool_mm_shrink_init(_manager);
245
	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
874
 
246
 
875
	return 0;
247
	return 0;
Line 876... Line -...
876
}
-
 
877
 
-
 
878
void ttm_page_alloc_fini(void)
-
 
879
{
-
 
880
	int i;
-
 
881
 
-
 
882
	pr_info("Finalizing pool allocator\n");
-
 
883
	ttm_pool_mm_shrink_fini(_manager);
248
}
884
 
249
 
Line 885... Line -...
885
	for (i = 0; i < NUM_POOLS; ++i)
-
 
886
		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
-
 
887
 
250
void ttm_page_alloc_fini(void)
888
	kobject_put(&_manager->kobj);
251
{
889
	_manager = NULL;
252
	int i;
890
}
253
 
891
 
254
	_manager = NULL;
Line 906... Line 269...
906
				    ttm->caching_state);
269
				    ttm->caching_state);
907
		if (ret != 0) {
270
		if (ret != 0) {
908
			ttm_pool_unpopulate(ttm);
271
			ttm_pool_unpopulate(ttm);
909
			return -ENOMEM;
272
			return -ENOMEM;
910
		}
273
		}
911
 
-
 
912
	}
274
	}
913
 
-
 
914
	ttm->state = tt_unbound;
275
	ttm->state = tt_unbound;
915
	return 0;
276
	return 0;
916
}
277
}
917
EXPORT_SYMBOL(ttm_pool_populate);
278
EXPORT_SYMBOL(ttm_pool_populate);
Line 920... Line 281...
920
{
281
{
921
	unsigned i;
282
	unsigned i;
Line 922... Line 283...
922
 
283
 
923
	for (i = 0; i < ttm->num_pages; ++i) {
284
	for (i = 0; i < ttm->num_pages; ++i) {
924
		if (ttm->pages[i]) {
-
 
925
			ttm_mem_global_free_page(ttm->glob->mem_glob,
-
 
926
						 ttm->pages[i]);
285
		if (ttm->pages[i]) {
927
			ttm_put_pages(&ttm->pages[i], 1,
286
			ttm_put_pages(&ttm->pages[i], 1,
928
				      ttm->page_flags,
287
				      ttm->page_flags,
929
				      ttm->caching_state);
288
				      ttm->caching_state);
930
		}
289
		}