Subversion Repositories Kolibri OS

Rev

Rev 6321 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
/*
28
 * Authors: Thomas Hellstrom 
29
 */
30
 
31
#define pr_fmt(fmt) "[TTM] " fmt
32
 
33
#include 
34
 
35
#include 
36
//#include 
37
//#include 
38
#include 
5271 serge 39
#include 
4075 Serge 40
//#include 
41
#include 
42
#include 
5271 serge 43
#include 
4075 Serge 44
#include 
45
#include 
46
#include 
47
#include 
48
#include 
49
 
6321 serge 50
#undef CONFIG_X86
4075 Serge 51
/**
52
 * Allocates storage for pointers to the pages that back the ttm.
53
 */
54
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
55
{
56
	ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
57
}
58
 
59
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
60
{
5078 serge 61
	ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
62
					  sizeof(*ttm->ttm.pages) +
63
					  sizeof(*ttm->dma_address) +
64
					  sizeof(*ttm->cpu_address));
65
	ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
66
	ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
4075 Serge 67
}
68
 
69
#ifdef CONFIG_X86
70
static inline int ttm_tt_set_page_caching(struct page *p,
71
					  enum ttm_caching_state c_old,
72
					  enum ttm_caching_state c_new)
73
{
74
	int ret = 0;
75
 
76
	if (PageHighMem(p))
77
		return 0;
78
 
79
	if (c_old != tt_cached) {
80
		/* p isn't in the default caching state, set it to
81
		 * writeback first to free its current memtype. */
82
 
83
		ret = set_pages_wb(p, 1);
84
		if (ret)
85
			return ret;
86
	}
87
 
88
	if (c_new == tt_wc)
89
		ret = set_memory_wc((unsigned long) page_address(p), 1);
90
	else if (c_new == tt_uncached)
91
		ret = set_pages_uc(p, 1);
92
 
93
	return ret;
94
}
95
#else /* CONFIG_X86 */
96
static inline int ttm_tt_set_page_caching(struct page *p,
97
					  enum ttm_caching_state c_old,
98
					  enum ttm_caching_state c_new)
99
{
100
	return 0;
101
}
102
#endif /* CONFIG_X86 */
103
 
104
/*
105
 * Change caching policy for the linear kernel map
106
 * for range of pages in a ttm.
107
 */
108
 
109
static int ttm_tt_set_caching(struct ttm_tt *ttm,
110
			      enum ttm_caching_state c_state)
111
{
112
	int i, j;
113
	struct page *cur_page;
114
	int ret;
115
 
116
	if (ttm->caching_state == c_state)
117
		return 0;
118
 
119
	if (ttm->state == tt_unpopulated) {
120
		/* Change caching but don't populate */
121
		ttm->caching_state = c_state;
122
		return 0;
123
	}
124
 
5078 serge 125
	if (ttm->caching_state == tt_cached)
126
		drm_clflush_pages(ttm->pages, ttm->num_pages);
4075 Serge 127
 
128
	for (i = 0; i < ttm->num_pages; ++i) {
129
		cur_page = ttm->pages[i];
130
		if (likely(cur_page != NULL)) {
131
			ret = ttm_tt_set_page_caching(cur_page,
132
						      ttm->caching_state,
133
						      c_state);
134
			if (unlikely(ret != 0))
135
				goto out_err;
136
		}
137
	}
138
 
139
	ttm->caching_state = c_state;
140
 
141
	return 0;
142
 
143
out_err:
144
	for (j = 0; j < i; ++j) {
145
		cur_page = ttm->pages[j];
146
		if (likely(cur_page != NULL)) {
147
			(void)ttm_tt_set_page_caching(cur_page, c_state,
148
						      ttm->caching_state);
149
		}
150
	}
151
 
152
	return ret;
153
}
154
 
155
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
156
{
157
	enum ttm_caching_state state;
158
 
159
	if (placement & TTM_PL_FLAG_WC)
160
		state = tt_wc;
161
	else if (placement & TTM_PL_FLAG_UNCACHED)
162
		state = tt_uncached;
163
	else
164
		state = tt_cached;
165
 
166
	return ttm_tt_set_caching(ttm, state);
167
}
168
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
169
 
170
void ttm_tt_destroy(struct ttm_tt *ttm)
171
{
172
	if (unlikely(ttm == NULL))
173
		return;
174
 
175
	if (ttm->state == tt_bound) {
176
		ttm_tt_unbind(ttm);
177
	}
178
 
5078 serge 179
	if (ttm->state == tt_unbound)
180
		ttm_tt_unpopulate(ttm);
4075 Serge 181
 
182
//   if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
183
//       ttm->swap_storage)
184
//       fput(ttm->swap_storage);
185
 
186
	ttm->swap_storage = NULL;
187
	ttm->func->destroy(ttm);
188
}
189
 
190
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
191
		unsigned long size, uint32_t page_flags,
192
		struct page *dummy_read_page)
193
{
194
	ttm->bdev = bdev;
195
	ttm->glob = bdev->glob;
196
	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
197
	ttm->caching_state = tt_cached;
198
	ttm->page_flags = page_flags;
199
	ttm->dummy_read_page = dummy_read_page;
200
	ttm->state = tt_unpopulated;
201
	ttm->swap_storage = NULL;
202
 
203
	ttm_tt_alloc_page_directory(ttm);
204
	if (!ttm->pages) {
205
		ttm_tt_destroy(ttm);
206
        printf("Failed allocating page table\n");
207
		return -ENOMEM;
208
	}
209
	return 0;
210
}
211
EXPORT_SYMBOL(ttm_tt_init);
212
 
213
void ttm_tt_fini(struct ttm_tt *ttm)
214
{
215
	drm_free_large(ttm->pages);
216
	ttm->pages = NULL;
217
}
218
EXPORT_SYMBOL(ttm_tt_fini);
219
 
220
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
221
		unsigned long size, uint32_t page_flags,
222
		struct page *dummy_read_page)
223
{
224
	struct ttm_tt *ttm = &ttm_dma->ttm;
225
 
226
	ttm->bdev = bdev;
227
	ttm->glob = bdev->glob;
228
	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
229
	ttm->caching_state = tt_cached;
230
	ttm->page_flags = page_flags;
231
	ttm->dummy_read_page = dummy_read_page;
232
	ttm->state = tt_unpopulated;
233
	ttm->swap_storage = NULL;
234
 
235
	INIT_LIST_HEAD(&ttm_dma->pages_list);
236
	ttm_dma_tt_alloc_page_directory(ttm_dma);
5078 serge 237
	if (!ttm->pages) {
4075 Serge 238
		ttm_tt_destroy(ttm);
239
        printf("Failed allocating page table\n");
240
		return -ENOMEM;
241
	}
242
	return 0;
243
}
244
EXPORT_SYMBOL(ttm_dma_tt_init);
245
 
246
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
247
{
248
	struct ttm_tt *ttm = &ttm_dma->ttm;
249
 
250
	drm_free_large(ttm->pages);
251
	ttm->pages = NULL;
5078 serge 252
	ttm_dma->cpu_address = NULL;
4075 Serge 253
	ttm_dma->dma_address = NULL;
254
}
255
EXPORT_SYMBOL(ttm_dma_tt_fini);
256
 
257
void ttm_tt_unbind(struct ttm_tt *ttm)
258
{
259
	int ret;
260
 
261
	if (ttm->state == tt_bound) {
262
		ret = ttm->func->unbind(ttm);
263
		BUG_ON(ret);
264
		ttm->state = tt_unbound;
265
	}
266
}
267
 
268
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
269
{
270
	int ret = 0;
271
 
272
	if (!ttm)
273
		return -EINVAL;
274
 
275
	if (ttm->state == tt_bound)
276
		return 0;
277
 
278
	ret = ttm->bdev->driver->ttm_tt_populate(ttm);
279
	if (ret)
280
		return ret;
281
 
282
	ret = ttm->func->bind(ttm, bo_mem);
283
	if (unlikely(ret != 0))
284
		return ret;
285
 
286
	ttm->state = tt_bound;
287
 
288
	return 0;
289
}
290
EXPORT_SYMBOL(ttm_tt_bind);
291
 
5078 serge 292
#if 0
4075 Serge 293
int ttm_tt_swapin(struct ttm_tt *ttm)
294
{
295
	struct address_space *swap_space;
296
	struct file *swap_storage;
297
	struct page *from_page;
298
	struct page *to_page;
299
	int i;
300
	int ret = -ENOMEM;
301
 
302
	swap_storage = ttm->swap_storage;
303
	BUG_ON(swap_storage == NULL);
304
 
305
	swap_space = file_inode(swap_storage)->i_mapping;
306
 
307
	for (i = 0; i < ttm->num_pages; ++i) {
308
		from_page = shmem_read_mapping_page(swap_space, i);
309
		if (IS_ERR(from_page)) {
310
			ret = PTR_ERR(from_page);
311
			goto out_err;
312
		}
313
		to_page = ttm->pages[i];
314
		if (unlikely(to_page == NULL))
315
			goto out_err;
316
 
317
		copy_highpage(to_page, from_page);
318
		page_cache_release(from_page);
319
	}
320
 
321
	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
322
		fput(swap_storage);
323
	ttm->swap_storage = NULL;
324
	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
325
 
326
	return 0;
327
out_err:
328
	return ret;
329
}
330
 
331
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
332
{
333
	struct address_space *swap_space;
334
	struct file *swap_storage;
335
	struct page *from_page;
336
	struct page *to_page;
337
	int i;
338
	int ret = -ENOMEM;
339
 
340
	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
341
	BUG_ON(ttm->caching_state != tt_cached);
342
 
343
	if (!persistent_swap_storage) {
344
		swap_storage = shmem_file_setup("ttm swap",
345
						ttm->num_pages << PAGE_SHIFT,
346
						0);
6938 serge 347
		if (IS_ERR(swap_storage)) {
4075 Serge 348
			pr_err("Failed allocating swap storage\n");
349
			return PTR_ERR(swap_storage);
350
		}
351
	} else
352
		swap_storage = persistent_swap_storage;
353
 
354
	swap_space = file_inode(swap_storage)->i_mapping;
355
 
356
	for (i = 0; i < ttm->num_pages; ++i) {
357
		from_page = ttm->pages[i];
358
		if (unlikely(from_page == NULL))
359
			continue;
360
		to_page = shmem_read_mapping_page(swap_space, i);
6938 serge 361
		if (IS_ERR(to_page)) {
4075 Serge 362
			ret = PTR_ERR(to_page);
363
			goto out_err;
364
		}
365
		copy_highpage(to_page, from_page);
366
		set_page_dirty(to_page);
367
		mark_page_accessed(to_page);
368
		page_cache_release(to_page);
369
	}
370
 
4569 Serge 371
	ttm_tt_unpopulate(ttm);
4075 Serge 372
	ttm->swap_storage = swap_storage;
373
	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
374
	if (persistent_swap_storage)
375
		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
376
 
377
	return 0;
378
out_err:
379
	if (!persistent_swap_storage)
380
		fput(swap_storage);
381
 
382
	return ret;
383
}
5078 serge 384
#endif
4075 Serge 385
 
5078 serge 386
static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
387
{
388
	pgoff_t i;
389
	struct page **page = ttm->pages;
4075 Serge 390
 
5078 serge 391
}
4075 Serge 392
 
5078 serge 393
void ttm_tt_unpopulate(struct ttm_tt *ttm)
394
{
395
	if (ttm->state == tt_unpopulated)
396
		return;
397
 
398
	ttm_tt_clear_mapping(ttm);
399
	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
400
}