Subversion Repositories Kolibri OS

Rev

Rev 4075 | Rev 5271 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
#include 
29
#include 
30
#include 
31
#include 
32
#include 
33
#include 
34
 
5078 serge 35
DEFINE_WW_CLASS(reservation_ww_class);
36
 
37
static void ttm_eu_backoff_reservation_locked(struct list_head *list)
4075 Serge 38
{
39
	struct ttm_validate_buffer *entry;
40
 
41
	list_for_each_entry(entry, list, head) {
42
		struct ttm_buffer_object *bo = entry->bo;
43
		if (!entry->reserved)
44
			continue;
45
 
46
		entry->reserved = false;
47
		if (entry->removed) {
48
			ttm_bo_add_to_lru(bo);
49
			entry->removed = false;
50
		}
5078 serge 51
		__ttm_bo_unreserve(bo);
4075 Serge 52
	}
53
}
54
 
55
static void ttm_eu_del_from_lru_locked(struct list_head *list)
56
{
57
	struct ttm_validate_buffer *entry;
58
 
59
	list_for_each_entry(entry, list, head) {
60
		struct ttm_buffer_object *bo = entry->bo;
61
		if (!entry->reserved)
62
			continue;
63
 
64
		if (!entry->removed) {
65
			entry->put_count = ttm_bo_del_from_lru(bo);
66
			entry->removed = true;
67
		}
68
	}
69
}
70
 
71
static void ttm_eu_list_ref_sub(struct list_head *list)
72
{
73
	struct ttm_validate_buffer *entry;
74
 
75
	list_for_each_entry(entry, list, head) {
76
		struct ttm_buffer_object *bo = entry->bo;
77
 
78
		if (entry->put_count) {
79
			ttm_bo_list_ref_sub(bo, entry->put_count, true);
80
			entry->put_count = 0;
81
		}
82
	}
83
}
84
 
85
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
86
				struct list_head *list)
87
{
88
	struct ttm_validate_buffer *entry;
89
	struct ttm_bo_global *glob;
90
 
91
	if (list_empty(list))
92
		return;
93
 
94
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
95
	glob = entry->bo->glob;
96
	spin_lock(&glob->lru_lock);
5078 serge 97
	ttm_eu_backoff_reservation_locked(list);
98
	if (ticket)
99
		ww_acquire_fini(ticket);
4075 Serge 100
	spin_unlock(&glob->lru_lock);
101
}
102
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
103
 
104
/*
105
 * Reserve buffers for validation.
106
 *
107
 * If a buffer in the list is marked for CPU access, we back off and
108
 * wait for that buffer to become free for GPU access.
109
 *
110
 * If a buffer is reserved for another validation, the validator with
111
 * the highest validation sequence backs off and waits for that buffer
112
 * to become unreserved. This prevents deadlocks when validating multiple
113
 * buffers in different orders.
114
 */
115
 
116
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
117
			   struct list_head *list)
118
{
119
	struct ttm_bo_global *glob;
120
	struct ttm_validate_buffer *entry;
121
	int ret;
122
 
123
	if (list_empty(list))
124
		return 0;
125
 
126
	list_for_each_entry(entry, list, head) {
127
		entry->reserved = false;
128
		entry->put_count = 0;
129
		entry->removed = false;
130
	}
131
 
132
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
133
	glob = entry->bo->glob;
134
 
5078 serge 135
	if (ticket)
136
		ww_acquire_init(ticket, &reservation_ww_class);
4075 Serge 137
retry:
138
	list_for_each_entry(entry, list, head) {
139
		struct ttm_buffer_object *bo = entry->bo;
140
 
141
		/* already slowpath reserved? */
142
		if (entry->reserved)
143
			continue;
144
 
5078 serge 145
		ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
146
				       ticket);
4075 Serge 147
 
148
		if (ret == -EDEADLK) {
149
			/* uh oh, we lost out, drop every reservation and try
150
			 * to only reserve this buffer, then start over if
151
			 * this succeeds.
152
			 */
5078 serge 153
			BUG_ON(ticket == NULL);
4075 Serge 154
			spin_lock(&glob->lru_lock);
5078 serge 155
			ttm_eu_backoff_reservation_locked(list);
4075 Serge 156
			spin_unlock(&glob->lru_lock);
157
			ttm_eu_list_ref_sub(list);
5078 serge 158
			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
159
							       ticket);
4075 Serge 160
			if (unlikely(ret != 0)) {
161
				if (ret == -EINTR)
162
					ret = -ERESTARTSYS;
163
				goto err_fini;
164
			}
165
 
166
			entry->reserved = true;
167
			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
168
				ret = -EBUSY;
169
				goto err;
170
			}
171
			goto retry;
172
		} else if (ret)
173
			goto err;
174
 
175
		entry->reserved = true;
176
		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
177
			ret = -EBUSY;
178
			goto err;
179
		}
180
	}
181
 
5078 serge 182
	if (ticket)
183
		ww_acquire_done(ticket);
4075 Serge 184
	spin_lock(&glob->lru_lock);
185
	ttm_eu_del_from_lru_locked(list);
186
	spin_unlock(&glob->lru_lock);
187
	ttm_eu_list_ref_sub(list);
188
	return 0;
189
 
190
err:
191
	spin_lock(&glob->lru_lock);
5078 serge 192
	ttm_eu_backoff_reservation_locked(list);
4075 Serge 193
	spin_unlock(&glob->lru_lock);
194
	ttm_eu_list_ref_sub(list);
195
err_fini:
5078 serge 196
	if (ticket) {
197
		ww_acquire_done(ticket);
198
		ww_acquire_fini(ticket);
199
	}
4075 Serge 200
	return ret;
201
}
202
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
203
 
204
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
205
				 struct list_head *list, void *sync_obj)
206
{
207
	struct ttm_validate_buffer *entry;
208
	struct ttm_buffer_object *bo;
209
	struct ttm_bo_global *glob;
210
	struct ttm_bo_device *bdev;
211
	struct ttm_bo_driver *driver;
212
 
213
	if (list_empty(list))
214
		return;
215
 
216
	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
217
	bdev = bo->bdev;
218
	driver = bdev->driver;
219
	glob = bo->glob;
220
 
221
	spin_lock(&glob->lru_lock);
222
	spin_lock(&bdev->fence_lock);
223
 
224
	list_for_each_entry(entry, list, head) {
225
		bo = entry->bo;
226
		entry->old_sync_obj = bo->sync_obj;
227
		bo->sync_obj = driver->sync_obj_ref(sync_obj);
228
		ttm_bo_add_to_lru(bo);
5078 serge 229
		__ttm_bo_unreserve(bo);
4075 Serge 230
		entry->reserved = false;
231
	}
232
	spin_unlock(&bdev->fence_lock);
233
	spin_unlock(&glob->lru_lock);
5078 serge 234
	if (ticket)
235
		ww_acquire_fini(ticket);
4075 Serge 236
 
237
	list_for_each_entry(entry, list, head) {
238
		if (entry->old_sync_obj)
239
			driver->sync_obj_unref(&entry->old_sync_obj);
240
	}
241
}
242
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);