Subversion Repositories Kolibri OS

Rev

Rev 4569 | Rev 5271 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
/*
28
 * Authors: Thomas Hellstrom 
29
 */
30
 
31
#define pr_fmt(fmt) "[TTM] " fmt
32
 
33
#include 
34
 
35
#include 
36
//#include 
37
//#include 
38
#include 
39
//#include 
40
//#include 
41
#include 
42
#include 
43
//#include 
44
#include 
45
#include 
46
#include 
47
#include 
48
#include 
49
 
50
/**
51
 * Allocates storage for pointers to the pages that back the ttm.
52
 */
53
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
54
{
55
	ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
56
}
57
 
58
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
59
{
5078 serge 60
	ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
61
					  sizeof(*ttm->ttm.pages) +
62
					  sizeof(*ttm->dma_address) +
63
					  sizeof(*ttm->cpu_address));
64
	ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
65
	ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
4075 Serge 66
}
67
 
68
#ifdef CONFIG_X86
69
static inline int ttm_tt_set_page_caching(struct page *p,
70
					  enum ttm_caching_state c_old,
71
					  enum ttm_caching_state c_new)
72
{
73
	int ret = 0;
74
 
75
	if (PageHighMem(p))
76
		return 0;
77
 
78
	if (c_old != tt_cached) {
79
		/* p isn't in the default caching state, set it to
80
		 * writeback first to free its current memtype. */
81
 
82
		ret = set_pages_wb(p, 1);
83
		if (ret)
84
			return ret;
85
	}
86
 
87
	if (c_new == tt_wc)
88
		ret = set_memory_wc((unsigned long) page_address(p), 1);
89
	else if (c_new == tt_uncached)
90
		ret = set_pages_uc(p, 1);
91
 
92
	return ret;
93
}
94
#else /* CONFIG_X86 */
95
static inline int ttm_tt_set_page_caching(struct page *p,
96
					  enum ttm_caching_state c_old,
97
					  enum ttm_caching_state c_new)
98
{
99
	return 0;
100
}
101
#endif /* CONFIG_X86 */
102
 
103
/*
104
 * Change caching policy for the linear kernel map
105
 * for range of pages in a ttm.
106
 */
107
 
108
static int ttm_tt_set_caching(struct ttm_tt *ttm,
109
			      enum ttm_caching_state c_state)
110
{
111
	int i, j;
112
	struct page *cur_page;
113
	int ret;
114
 
115
	if (ttm->caching_state == c_state)
116
		return 0;
117
 
118
	if (ttm->state == tt_unpopulated) {
119
		/* Change caching but don't populate */
120
		ttm->caching_state = c_state;
121
		return 0;
122
	}
123
 
5078 serge 124
	if (ttm->caching_state == tt_cached)
125
		drm_clflush_pages(ttm->pages, ttm->num_pages);
4075 Serge 126
 
127
	for (i = 0; i < ttm->num_pages; ++i) {
128
		cur_page = ttm->pages[i];
129
		if (likely(cur_page != NULL)) {
130
			ret = ttm_tt_set_page_caching(cur_page,
131
						      ttm->caching_state,
132
						      c_state);
133
			if (unlikely(ret != 0))
134
				goto out_err;
135
		}
136
	}
137
 
138
	ttm->caching_state = c_state;
139
 
140
	return 0;
141
 
142
out_err:
143
	for (j = 0; j < i; ++j) {
144
		cur_page = ttm->pages[j];
145
		if (likely(cur_page != NULL)) {
146
			(void)ttm_tt_set_page_caching(cur_page, c_state,
147
						      ttm->caching_state);
148
		}
149
	}
150
 
151
	return ret;
152
}
153
 
154
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
155
{
156
	enum ttm_caching_state state;
157
 
158
	if (placement & TTM_PL_FLAG_WC)
159
		state = tt_wc;
160
	else if (placement & TTM_PL_FLAG_UNCACHED)
161
		state = tt_uncached;
162
	else
163
		state = tt_cached;
164
 
165
	return ttm_tt_set_caching(ttm, state);
166
}
167
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
168
 
169
void ttm_tt_destroy(struct ttm_tt *ttm)
170
{
171
	if (unlikely(ttm == NULL))
172
		return;
173
 
174
	if (ttm->state == tt_bound) {
175
		ttm_tt_unbind(ttm);
176
	}
177
 
5078 serge 178
	if (ttm->state == tt_unbound)
179
		ttm_tt_unpopulate(ttm);
4075 Serge 180
 
181
//   if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
182
//       ttm->swap_storage)
183
//       fput(ttm->swap_storage);
184
 
185
	ttm->swap_storage = NULL;
186
	ttm->func->destroy(ttm);
187
}
188
 
189
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
190
		unsigned long size, uint32_t page_flags,
191
		struct page *dummy_read_page)
192
{
193
	ttm->bdev = bdev;
194
	ttm->glob = bdev->glob;
195
	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
196
	ttm->caching_state = tt_cached;
197
	ttm->page_flags = page_flags;
198
	ttm->dummy_read_page = dummy_read_page;
199
	ttm->state = tt_unpopulated;
200
	ttm->swap_storage = NULL;
201
 
202
	ttm_tt_alloc_page_directory(ttm);
203
	if (!ttm->pages) {
204
		ttm_tt_destroy(ttm);
205
        printf("Failed allocating page table\n");
206
		return -ENOMEM;
207
	}
208
	return 0;
209
}
210
EXPORT_SYMBOL(ttm_tt_init);
211
 
212
void ttm_tt_fini(struct ttm_tt *ttm)
213
{
214
	drm_free_large(ttm->pages);
215
	ttm->pages = NULL;
216
}
217
EXPORT_SYMBOL(ttm_tt_fini);
218
 
219
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
220
		unsigned long size, uint32_t page_flags,
221
		struct page *dummy_read_page)
222
{
223
	struct ttm_tt *ttm = &ttm_dma->ttm;
224
 
225
	ttm->bdev = bdev;
226
	ttm->glob = bdev->glob;
227
	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
228
	ttm->caching_state = tt_cached;
229
	ttm->page_flags = page_flags;
230
	ttm->dummy_read_page = dummy_read_page;
231
	ttm->state = tt_unpopulated;
232
	ttm->swap_storage = NULL;
233
 
234
	INIT_LIST_HEAD(&ttm_dma->pages_list);
235
	ttm_dma_tt_alloc_page_directory(ttm_dma);
5078 serge 236
	if (!ttm->pages) {
4075 Serge 237
		ttm_tt_destroy(ttm);
238
        printf("Failed allocating page table\n");
239
		return -ENOMEM;
240
	}
241
	return 0;
242
}
243
EXPORT_SYMBOL(ttm_dma_tt_init);
244
 
245
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
246
{
247
	struct ttm_tt *ttm = &ttm_dma->ttm;
248
 
249
	drm_free_large(ttm->pages);
250
	ttm->pages = NULL;
5078 serge 251
	ttm_dma->cpu_address = NULL;
4075 Serge 252
	ttm_dma->dma_address = NULL;
253
}
254
EXPORT_SYMBOL(ttm_dma_tt_fini);
255
 
256
void ttm_tt_unbind(struct ttm_tt *ttm)
257
{
258
	int ret;
259
 
260
	if (ttm->state == tt_bound) {
261
		ret = ttm->func->unbind(ttm);
262
		BUG_ON(ret);
263
		ttm->state = tt_unbound;
264
	}
265
}
266
 
267
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
268
{
269
	int ret = 0;
270
 
271
	if (!ttm)
272
		return -EINVAL;
273
 
274
	if (ttm->state == tt_bound)
275
		return 0;
276
 
277
	ret = ttm->bdev->driver->ttm_tt_populate(ttm);
278
	if (ret)
279
		return ret;
280
 
281
	ret = ttm->func->bind(ttm, bo_mem);
282
	if (unlikely(ret != 0))
283
		return ret;
284
 
285
	ttm->state = tt_bound;
286
 
287
	return 0;
288
}
289
EXPORT_SYMBOL(ttm_tt_bind);
290
 
5078 serge 291
#if 0
4075 Serge 292
int ttm_tt_swapin(struct ttm_tt *ttm)
293
{
294
	struct address_space *swap_space;
295
	struct file *swap_storage;
296
	struct page *from_page;
297
	struct page *to_page;
298
	int i;
299
	int ret = -ENOMEM;
300
 
301
	swap_storage = ttm->swap_storage;
302
	BUG_ON(swap_storage == NULL);
303
 
304
	swap_space = file_inode(swap_storage)->i_mapping;
305
 
306
	for (i = 0; i < ttm->num_pages; ++i) {
307
		from_page = shmem_read_mapping_page(swap_space, i);
308
		if (IS_ERR(from_page)) {
309
			ret = PTR_ERR(from_page);
310
			goto out_err;
311
		}
312
		to_page = ttm->pages[i];
313
		if (unlikely(to_page == NULL))
314
			goto out_err;
315
 
316
		copy_highpage(to_page, from_page);
317
		page_cache_release(from_page);
318
	}
319
 
320
	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
321
		fput(swap_storage);
322
	ttm->swap_storage = NULL;
323
	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
324
 
325
	return 0;
326
out_err:
327
	return ret;
328
}
329
 
330
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
331
{
332
	struct address_space *swap_space;
333
	struct file *swap_storage;
334
	struct page *from_page;
335
	struct page *to_page;
336
	int i;
337
	int ret = -ENOMEM;
338
 
339
	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
340
	BUG_ON(ttm->caching_state != tt_cached);
341
 
342
	if (!persistent_swap_storage) {
343
		swap_storage = shmem_file_setup("ttm swap",
344
						ttm->num_pages << PAGE_SHIFT,
345
						0);
346
		if (unlikely(IS_ERR(swap_storage))) {
347
			pr_err("Failed allocating swap storage\n");
348
			return PTR_ERR(swap_storage);
349
		}
350
	} else
351
		swap_storage = persistent_swap_storage;
352
 
353
	swap_space = file_inode(swap_storage)->i_mapping;
354
 
355
	for (i = 0; i < ttm->num_pages; ++i) {
356
		from_page = ttm->pages[i];
357
		if (unlikely(from_page == NULL))
358
			continue;
359
		to_page = shmem_read_mapping_page(swap_space, i);
360
		if (unlikely(IS_ERR(to_page))) {
361
			ret = PTR_ERR(to_page);
362
			goto out_err;
363
		}
364
		copy_highpage(to_page, from_page);
365
		set_page_dirty(to_page);
366
		mark_page_accessed(to_page);
367
		page_cache_release(to_page);
368
	}
369
 
4569 Serge 370
	ttm_tt_unpopulate(ttm);
4075 Serge 371
	ttm->swap_storage = swap_storage;
372
	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
373
	if (persistent_swap_storage)
374
		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
375
 
376
	return 0;
377
out_err:
378
	if (!persistent_swap_storage)
379
		fput(swap_storage);
380
 
381
	return ret;
382
}
5078 serge 383
#endif
4075 Serge 384
 
5078 serge 385
static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
386
{
387
	pgoff_t i;
388
	struct page **page = ttm->pages;
4075 Serge 389
 
5078 serge 390
}
4075 Serge 391
 
5078 serge 392
void ttm_tt_unpopulate(struct ttm_tt *ttm)
393
{
394
	if (ttm->state == tt_unpopulated)
395
		return;
396
 
397
	ttm_tt_clear_mapping(ttm);
398
	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
399
}