Subversion Repositories Kolibri OS

Rev

Rev 5078 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 5271
1
/**************************************************************************
1
/**************************************************************************
2
 *
2
 *
3
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
3
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
4
 * All Rights Reserved.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
12
 * the following conditions:
13
 *
13
 *
14
 * The above copyright notice and this permission notice (including the
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
16
 * of the Software.
17
 *
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
25
 *
26
 **************************************************************************/
26
 **************************************************************************/
27
 
27
 
28
#include 
28
#include 
29
#include 
29
#include 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
 
34
 
35
DEFINE_WW_CLASS(reservation_ww_class);
35
DEFINE_WW_CLASS(reservation_ww_class);
36
 
36
 
-
 
37
static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37
static void ttm_eu_backoff_reservation_locked(struct list_head *list)
38
					      struct ttm_validate_buffer *entry)
38
{
-
 
39
	struct ttm_validate_buffer *entry;
-
 
40
 
39
{
41
	list_for_each_entry(entry, list, head) {
40
	list_for_each_entry_continue_reverse(entry, list, head) {
42
		struct ttm_buffer_object *bo = entry->bo;
-
 
43
		if (!entry->reserved)
-
 
44
			continue;
-
 
45
 
-
 
46
		entry->reserved = false;
-
 
47
		if (entry->removed) {
-
 
48
			ttm_bo_add_to_lru(bo);
-
 
49
			entry->removed = false;
41
		struct ttm_buffer_object *bo = entry->bo;
50
		}
42
 
51
		__ttm_bo_unreserve(bo);
43
		__ttm_bo_unreserve(bo);
52
	}
44
	}
53
}
45
}
54
 
46
 
55
static void ttm_eu_del_from_lru_locked(struct list_head *list)
47
static void ttm_eu_del_from_lru_locked(struct list_head *list)
56
{
48
{
57
	struct ttm_validate_buffer *entry;
49
	struct ttm_validate_buffer *entry;
58
 
50
 
59
	list_for_each_entry(entry, list, head) {
51
	list_for_each_entry(entry, list, head) {
60
		struct ttm_buffer_object *bo = entry->bo;
52
		struct ttm_buffer_object *bo = entry->bo;
61
		if (!entry->reserved)
-
 
62
			continue;
-
 
63
 
-
 
64
		if (!entry->removed) {
-
 
65
			entry->put_count = ttm_bo_del_from_lru(bo);
53
		unsigned put_count = ttm_bo_del_from_lru(bo);
66
			entry->removed = true;
-
 
67
		}
-
 
68
	}
-
 
69
}
-
 
70
 
-
 
71
static void ttm_eu_list_ref_sub(struct list_head *list)
-
 
72
{
-
 
73
	struct ttm_validate_buffer *entry;
-
 
74
 
-
 
75
	list_for_each_entry(entry, list, head) {
-
 
76
		struct ttm_buffer_object *bo = entry->bo;
-
 
77
 
-
 
78
		if (entry->put_count) {
54
 
79
			ttm_bo_list_ref_sub(bo, entry->put_count, true);
-
 
80
			entry->put_count = 0;
-
 
81
		}
55
		ttm_bo_list_ref_sub(bo, put_count, true);
82
	}
56
	}
83
}
57
}
84
 
58
 
85
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
59
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
86
				struct list_head *list)
60
				struct list_head *list)
87
{
61
{
88
	struct ttm_validate_buffer *entry;
62
	struct ttm_validate_buffer *entry;
89
	struct ttm_bo_global *glob;
63
	struct ttm_bo_global *glob;
90
 
64
 
91
	if (list_empty(list))
65
	if (list_empty(list))
92
		return;
66
		return;
93
 
67
 
94
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
68
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
95
	glob = entry->bo->glob;
69
	glob = entry->bo->glob;
-
 
70
 
96
	spin_lock(&glob->lru_lock);
71
	spin_lock(&glob->lru_lock);
-
 
72
	list_for_each_entry(entry, list, head) {
-
 
73
		struct ttm_buffer_object *bo = entry->bo;
-
 
74
 
-
 
75
		ttm_bo_add_to_lru(bo);
97
	ttm_eu_backoff_reservation_locked(list);
76
		__ttm_bo_unreserve(bo);
-
 
77
	}
-
 
78
	spin_unlock(&glob->lru_lock);
-
 
79
 
98
	if (ticket)
80
	if (ticket)
99
		ww_acquire_fini(ticket);
81
		ww_acquire_fini(ticket);
100
	spin_unlock(&glob->lru_lock);
-
 
101
}
82
}
102
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
83
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
103
 
84
 
104
/*
85
/*
105
 * Reserve buffers for validation.
86
 * Reserve buffers for validation.
106
 *
87
 *
107
 * If a buffer in the list is marked for CPU access, we back off and
88
 * If a buffer in the list is marked for CPU access, we back off and
108
 * wait for that buffer to become free for GPU access.
89
 * wait for that buffer to become free for GPU access.
109
 *
90
 *
110
 * If a buffer is reserved for another validation, the validator with
91
 * If a buffer is reserved for another validation, the validator with
111
 * the highest validation sequence backs off and waits for that buffer
92
 * the highest validation sequence backs off and waits for that buffer
112
 * to become unreserved. This prevents deadlocks when validating multiple
93
 * to become unreserved. This prevents deadlocks when validating multiple
113
 * buffers in different orders.
94
 * buffers in different orders.
114
 */
95
 */
115
 
96
 
116
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
97
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
-
 
98
			   struct list_head *list, bool intr,
117
			   struct list_head *list)
99
			   struct list_head *dups)
118
{
100
{
119
	struct ttm_bo_global *glob;
101
	struct ttm_bo_global *glob;
120
	struct ttm_validate_buffer *entry;
102
	struct ttm_validate_buffer *entry;
121
	int ret;
103
	int ret;
122
 
104
 
123
	if (list_empty(list))
105
	if (list_empty(list))
124
		return 0;
106
		return 0;
125
 
-
 
126
	list_for_each_entry(entry, list, head) {
-
 
127
		entry->reserved = false;
-
 
128
		entry->put_count = 0;
-
 
129
		entry->removed = false;
-
 
130
	}
-
 
131
 
107
 
132
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
108
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
133
	glob = entry->bo->glob;
109
	glob = entry->bo->glob;
134
 
110
 
135
	if (ticket)
111
	if (ticket)
136
		ww_acquire_init(ticket, &reservation_ww_class);
112
		ww_acquire_init(ticket, &reservation_ww_class);
137
retry:
113
 
138
	list_for_each_entry(entry, list, head) {
114
	list_for_each_entry(entry, list, head) {
139
		struct ttm_buffer_object *bo = entry->bo;
115
		struct ttm_buffer_object *bo = entry->bo;
-
 
116
 
-
 
117
		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
-
 
118
				       ticket);
140
 
119
		if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-
 
120
			__ttm_bo_unreserve(bo);
-
 
121
 
-
 
122
			ret = -EBUSY;
-
 
123
 
-
 
124
		} else if (ret == -EALREADY && dups) {
-
 
125
			struct ttm_validate_buffer *safe = entry;
141
		/* already slowpath reserved? */
126
			entry = list_prev_entry(entry, head);
-
 
127
			list_del(&safe->head);
142
		if (entry->reserved)
128
			list_add(&safe->head, dups);
-
 
129
			continue;
-
 
130
		}
-
 
131
 
-
 
132
		if (!ret) {
-
 
133
			if (!entry->shared)
143
			continue;
134
			continue;
-
 
135
 
144
 
136
			ret = reservation_object_reserve_shared(bo->resv);
-
 
137
			if (!ret)
145
		ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
-
 
146
				       ticket);
138
				continue;
147
 
139
		}
148
		if (ret == -EDEADLK) {
140
 
149
			/* uh oh, we lost out, drop every reservation and try
141
			/* uh oh, we lost out, drop every reservation and try
150
			 * to only reserve this buffer, then start over if
142
			 * to only reserve this buffer, then start over if
151
			 * this succeeds.
143
			 * this succeeds.
152
			 */
144
			 */
153
			BUG_ON(ticket == NULL);
-
 
154
			spin_lock(&glob->lru_lock);
-
 
155
			ttm_eu_backoff_reservation_locked(list);
145
		ttm_eu_backoff_reservation_reverse(list, entry);
156
			spin_unlock(&glob->lru_lock);
-
 
-
 
146
 
157
			ttm_eu_list_ref_sub(list);
147
		if (ret == -EDEADLK && intr) {
158
			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
148
			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
159
							       ticket);
149
							       ticket);
-
 
150
		} else if (ret == -EDEADLK) {
-
 
151
			ww_mutex_lock_slow(&bo->resv->lock, ticket);
-
 
152
			ret = 0;
-
 
153
		}
-
 
154
 
-
 
155
		if (!ret && entry->shared)
-
 
156
			ret = reservation_object_reserve_shared(bo->resv);
-
 
157
 
160
			if (unlikely(ret != 0)) {
158
			if (unlikely(ret != 0)) {
161
				if (ret == -EINTR)
159
				if (ret == -EINTR)
162
					ret = -ERESTARTSYS;
160
					ret = -ERESTARTSYS;
-
 
161
			if (ticket) {
-
 
162
				ww_acquire_done(ticket);
163
				goto err_fini;
163
				ww_acquire_fini(ticket);
164
			}
164
			}
165
 
-
 
166
			entry->reserved = true;
-
 
167
			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-
 
168
				ret = -EBUSY;
165
			return ret;
169
				goto err;
-
 
170
			}
166
			}
171
			goto retry;
-
 
172
		} else if (ret)
-
 
173
			goto err;
-
 
174
 
167
 
175
		entry->reserved = true;
168
		/* move this item to the front of the list,
176
		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
169
		 * forces correct iteration of the loop without keeping track
177
			ret = -EBUSY;
170
		 */
178
			goto err;
171
		list_del(&entry->head);
179
		}
172
		list_add(&entry->head, list);
180
	}
173
	}
181
 
174
 
182
	if (ticket)
175
	if (ticket)
183
		ww_acquire_done(ticket);
176
		ww_acquire_done(ticket);
184
	spin_lock(&glob->lru_lock);
177
	spin_lock(&glob->lru_lock);
185
	ttm_eu_del_from_lru_locked(list);
178
	ttm_eu_del_from_lru_locked(list);
186
	spin_unlock(&glob->lru_lock);
179
	spin_unlock(&glob->lru_lock);
187
	ttm_eu_list_ref_sub(list);
-
 
188
	return 0;
180
	return 0;
189
 
-
 
190
err:
-
 
191
	spin_lock(&glob->lru_lock);
-
 
192
	ttm_eu_backoff_reservation_locked(list);
-
 
193
	spin_unlock(&glob->lru_lock);
-
 
194
	ttm_eu_list_ref_sub(list);
-
 
195
err_fini:
-
 
196
	if (ticket) {
-
 
197
		ww_acquire_done(ticket);
-
 
198
		ww_acquire_fini(ticket);
-
 
199
	}
-
 
200
	return ret;
-
 
201
}
181
}
202
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
182
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
203
 
183
 
204
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
184
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
205
				 struct list_head *list, void *sync_obj)
185
				 struct list_head *list, struct fence *fence)
206
{
186
{
207
	struct ttm_validate_buffer *entry;
187
	struct ttm_validate_buffer *entry;
208
	struct ttm_buffer_object *bo;
188
	struct ttm_buffer_object *bo;
209
	struct ttm_bo_global *glob;
189
	struct ttm_bo_global *glob;
210
	struct ttm_bo_device *bdev;
190
	struct ttm_bo_device *bdev;
211
	struct ttm_bo_driver *driver;
191
	struct ttm_bo_driver *driver;
212
 
192
 
213
	if (list_empty(list))
193
	if (list_empty(list))
214
		return;
194
		return;
215
 
195
 
216
	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
196
	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
217
	bdev = bo->bdev;
197
	bdev = bo->bdev;
218
	driver = bdev->driver;
198
	driver = bdev->driver;
219
	glob = bo->glob;
199
	glob = bo->glob;
220
 
200
 
221
	spin_lock(&glob->lru_lock);
201
	spin_lock(&glob->lru_lock);
222
	spin_lock(&bdev->fence_lock);
-
 
223
 
202
 
224
	list_for_each_entry(entry, list, head) {
203
	list_for_each_entry(entry, list, head) {
225
		bo = entry->bo;
204
		bo = entry->bo;
-
 
205
		if (entry->shared)
226
		entry->old_sync_obj = bo->sync_obj;
206
			reservation_object_add_shared_fence(bo->resv, fence);
-
 
207
		else
227
		bo->sync_obj = driver->sync_obj_ref(sync_obj);
208
			reservation_object_add_excl_fence(bo->resv, fence);
228
		ttm_bo_add_to_lru(bo);
209
		ttm_bo_add_to_lru(bo);
229
		__ttm_bo_unreserve(bo);
210
		__ttm_bo_unreserve(bo);
230
		entry->reserved = false;
-
 
231
	}
211
	}
232
	spin_unlock(&bdev->fence_lock);
-
 
233
	spin_unlock(&glob->lru_lock);
212
	spin_unlock(&glob->lru_lock);
234
	if (ticket)
213
	if (ticket)
235
		ww_acquire_fini(ticket);
214
		ww_acquire_fini(ticket);
236
 
-
 
237
	list_for_each_entry(entry, list, head) {
-
 
238
		if (entry->old_sync_obj)
-
 
239
			driver->sync_obj_unref(&entry->old_sync_obj);
-
 
240
	}
-
 
241
}
215
}
242
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
216
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);