Subversion Repositories Kolibri OS

Rev

Rev 2997 | Rev 5078 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2997 Rev 3764
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#ifndef __RADEON_OBJECT_H__
28
#ifndef __RADEON_OBJECT_H__
29
#define __RADEON_OBJECT_H__
29
#define __RADEON_OBJECT_H__
30
 
30
 
31
#include 
31
#include 
32
#include "radeon.h"
32
#include "radeon.h"
33
 
33
 
34
struct sg_table;
34
struct sg_table;
35
 
35
 
36
/**
36
/**
37
 * radeon_mem_type_to_domain - return domain corresponding to mem_type
37
 * radeon_mem_type_to_domain - return domain corresponding to mem_type
38
 * @mem_type:	ttm memory type
38
 * @mem_type:	ttm memory type
39
 *
39
 *
40
 * Returns corresponding domain of the ttm mem_type
40
 * Returns corresponding domain of the ttm mem_type
41
 */
41
 */
42
static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
42
static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
43
{
43
{
44
	switch (mem_type) {
44
	switch (mem_type) {
45
	case TTM_PL_VRAM:
45
	case TTM_PL_VRAM:
46
		return RADEON_GEM_DOMAIN_VRAM;
46
		return RADEON_GEM_DOMAIN_VRAM;
47
	case TTM_PL_TT:
47
	case TTM_PL_TT:
48
		return RADEON_GEM_DOMAIN_GTT;
48
		return RADEON_GEM_DOMAIN_GTT;
49
	case TTM_PL_SYSTEM:
49
	case TTM_PL_SYSTEM:
50
		return RADEON_GEM_DOMAIN_CPU;
50
		return RADEON_GEM_DOMAIN_CPU;
51
	default:
51
	default:
52
		break;
52
		break;
53
	}
53
	}
54
	return 0;
54
	return 0;
55
}
55
}
56
 
56
 
57
int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
57
int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
58
 
58
 
59
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
59
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
60
{
60
{
61
	ttm_bo_unreserve(&bo->tbo);
61
   //    ttm_bo_unreserve(&bo->tbo);
62
}
62
}
63
 
63
 
64
/**
64
/**
65
 * radeon_bo_gpu_offset - return GPU offset of bo
65
 * radeon_bo_gpu_offset - return GPU offset of bo
66
 * @bo:	radeon object for which we query the offset
66
 * @bo:	radeon object for which we query the offset
67
 *
67
 *
68
 * Returns current GPU offset of the object.
68
 * Returns current GPU offset of the object.
69
 *
69
 *
70
 * Note: object should either be pinned or reserved when calling this
70
 * Note: object should either be pinned or reserved when calling this
71
 * function, it might be useful to add check for this for debugging.
71
 * function, it might be useful to add check for this for debugging.
72
 */
72
 */
73
static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
73
static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
74
{
74
{
75
	return bo->tbo.offset;
75
	return bo->tbo.offset;
76
}
76
}
77
 
77
 
78
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
78
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
79
{
79
{
80
	return bo->tbo.num_pages << PAGE_SHIFT;
80
	return bo->tbo.num_pages << PAGE_SHIFT;
81
}
81
}
82
 
82
 
83
static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
83
static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
84
{
84
{
-
 
85
#ifdef __TTM__
-
 
86
	return ttm_bo_is_reserved(&bo->tbo);
-
 
87
#else
85
	return !!atomic_read(&bo->tbo.reserved);
88
	return !!atomic_read(&bo->tbo.reserved);
-
 
89
#endif
86
}
90
}
87
 
91
 
88
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
92
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
89
{
93
{
90
	return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
94
	return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
91
}
95
}
92
 
96
 
93
static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
97
static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
94
{
98
{
95
	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
99
	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
96
}
100
}
97
 
101
 
98
/**
102
/**
99
 * radeon_bo_mmap_offset - return mmap offset of bo
103
 * radeon_bo_mmap_offset - return mmap offset of bo
100
 * @bo:	radeon object for which we query the offset
104
 * @bo:	radeon object for which we query the offset
101
 *
105
 *
102
 * Returns mmap offset of the object.
106
 * Returns mmap offset of the object.
103
 *
107
 *
104
 * Note: addr_space_offset is constant after ttm bo init thus isn't protected
108
 * Note: addr_space_offset is constant after ttm bo init thus isn't protected
105
 * by any lock.
109
 * by any lock.
106
 */
110
 */
107
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
111
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
108
{
112
{
109
	return bo->tbo.addr_space_offset;
113
	return bo->tbo.addr_space_offset;
110
}
114
}
111
 
115
 
112
extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
116
extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
113
			  bool no_wait);
117
			  bool no_wait);
114
 
118
 
115
extern int radeon_bo_create(struct radeon_device *rdev,
119
extern int radeon_bo_create(struct radeon_device *rdev,
116
				unsigned long size, int byte_align,
120
				unsigned long size, int byte_align,
117
				bool kernel, u32 domain,
121
				bool kernel, u32 domain,
118
			    struct sg_table *sg,
122
			    struct sg_table *sg,
119
				struct radeon_bo **bo_ptr);
123
				struct radeon_bo **bo_ptr);
120
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
124
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
121
extern void radeon_bo_kunmap(struct radeon_bo *bo);
125
extern void radeon_bo_kunmap(struct radeon_bo *bo);
122
extern void radeon_bo_unref(struct radeon_bo **bo);
126
extern void radeon_bo_unref(struct radeon_bo **bo);
123
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
127
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
124
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
128
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
125
				    u64 max_offset, u64 *gpu_addr);
129
				    u64 max_offset, u64 *gpu_addr);
126
extern int radeon_bo_unpin(struct radeon_bo *bo);
130
extern int radeon_bo_unpin(struct radeon_bo *bo);
127
extern int radeon_bo_evict_vram(struct radeon_device *rdev);
131
extern int radeon_bo_evict_vram(struct radeon_device *rdev);
128
extern void radeon_bo_force_delete(struct radeon_device *rdev);
132
extern void radeon_bo_force_delete(struct radeon_device *rdev);
129
extern int radeon_bo_init(struct radeon_device *rdev);
133
extern int radeon_bo_init(struct radeon_device *rdev);
130
extern void radeon_bo_fini(struct radeon_device *rdev);
134
extern void radeon_bo_fini(struct radeon_device *rdev);
131
extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
135
extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
132
				struct list_head *head);
136
				struct list_head *head);
133
extern int radeon_bo_list_validate(struct list_head *head);
137
extern int radeon_bo_list_validate(struct list_head *head, int ring);
134
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
138
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
135
				struct vm_area_struct *vma);
139
				struct vm_area_struct *vma);
136
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
140
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
137
				u32 tiling_flags, u32 pitch);
141
				u32 tiling_flags, u32 pitch);
138
extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
142
extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
139
				u32 *tiling_flags, u32 *pitch);
143
				u32 *tiling_flags, u32 *pitch);
140
extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
144
extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
141
				bool force_drop);
145
				bool force_drop);
142
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
146
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
143
					struct ttm_mem_reg *mem);
147
					struct ttm_mem_reg *mem);
144
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
148
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
145
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
149
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
146
 
150
 
147
/*
151
/*
148
 * sub allocation
152
 * sub allocation
149
 */
153
 */
150
 
154
 
151
static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
155
static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
152
{
156
{
153
	return sa_bo->manager->gpu_addr + sa_bo->soffset;
157
	return sa_bo->manager->gpu_addr + sa_bo->soffset;
154
}
158
}
155
 
159
 
156
static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
160
static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
157
{
161
{
158
	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
162
	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
159
}
163
}
160
 
164
 
161
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
165
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
162
				     struct radeon_sa_manager *sa_manager,
166
				     struct radeon_sa_manager *sa_manager,
163
				     unsigned size, u32 domain);
167
				     unsigned size, u32 domain);
164
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
168
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
165
				      struct radeon_sa_manager *sa_manager);
169
				      struct radeon_sa_manager *sa_manager);
166
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
170
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
167
				      struct radeon_sa_manager *sa_manager);
171
				      struct radeon_sa_manager *sa_manager);
168
extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
172
extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
169
					struct radeon_sa_manager *sa_manager);
173
					struct radeon_sa_manager *sa_manager);
170
extern int radeon_sa_bo_new(struct radeon_device *rdev,
174
extern int radeon_sa_bo_new(struct radeon_device *rdev,
171
			    struct radeon_sa_manager *sa_manager,
175
			    struct radeon_sa_manager *sa_manager,
172
			    struct radeon_sa_bo **sa_bo,
176
			    struct radeon_sa_bo **sa_bo,
173
			    unsigned size, unsigned align, bool block);
177
			    unsigned size, unsigned align, bool block);
174
extern void radeon_sa_bo_free(struct radeon_device *rdev,
178
extern void radeon_sa_bo_free(struct radeon_device *rdev,
175
			      struct radeon_sa_bo **sa_bo,
179
			      struct radeon_sa_bo **sa_bo,
176
			      struct radeon_fence *fence);
180
			      struct radeon_fence *fence);
177
#if defined(CONFIG_DEBUG_FS)
181
#if defined(CONFIG_DEBUG_FS)
178
extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
182
extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
179
					 struct seq_file *m);
183
					 struct seq_file *m);
180
#endif
184
#endif
181
 
185
 
182
 
186
 
183
#endif
187
#endif