Subversion Repositories Kolibri OS

Rev

Rev 5078 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 5271
Line 32... Line 32...
32
#include 
32
#include 
33
#include 
33
#include 
Line 34... Line 34...
34
 
34
 
Line 35... Line 35...
35
DEFINE_WW_CLASS(reservation_ww_class);
35
DEFINE_WW_CLASS(reservation_ww_class);
-
 
36
 
36
 
37
static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37
static void ttm_eu_backoff_reservation_locked(struct list_head *list)
-
 
38
{
-
 
39
	struct ttm_validate_buffer *entry;
38
					      struct ttm_validate_buffer *entry)
40
 
39
{
41
	list_for_each_entry(entry, list, head) {
-
 
42
		struct ttm_buffer_object *bo = entry->bo;
-
 
Line 43... Line -...
43
		if (!entry->reserved)
-
 
44
			continue;
-
 
45
 
-
 
46
		entry->reserved = false;
-
 
47
		if (entry->removed) {
-
 
48
			ttm_bo_add_to_lru(bo);
40
	list_for_each_entry_continue_reverse(entry, list, head) {
49
			entry->removed = false;
41
		struct ttm_buffer_object *bo = entry->bo;
50
		}
42
 
Line 51... Line 43...
51
		__ttm_bo_unreserve(bo);
43
		__ttm_bo_unreserve(bo);
52
	}
44
	}
53
}
45
}
Line 54... Line 46...
54
 
46
 
55
static void ttm_eu_del_from_lru_locked(struct list_head *list)
47
static void ttm_eu_del_from_lru_locked(struct list_head *list)
56
{
-
 
57
	struct ttm_validate_buffer *entry;
-
 
58
 
-
 
59
	list_for_each_entry(entry, list, head) {
-
 
60
		struct ttm_buffer_object *bo = entry->bo;
48
{
61
		if (!entry->reserved)
-
 
62
			continue;
-
 
63
 
-
 
64
		if (!entry->removed) {
-
 
65
			entry->put_count = ttm_bo_del_from_lru(bo);
-
 
66
			entry->removed = true;
-
 
67
		}
-
 
68
	}
-
 
69
}
-
 
70
 
-
 
71
static void ttm_eu_list_ref_sub(struct list_head *list)
-
 
Line 72... Line -...
72
{
-
 
73
	struct ttm_validate_buffer *entry;
49
	struct ttm_validate_buffer *entry;
74
 
-
 
75
	list_for_each_entry(entry, list, head) {
-
 
76
		struct ttm_buffer_object *bo = entry->bo;
50
 
77
 
51
	list_for_each_entry(entry, list, head) {
Line 78... Line 52...
78
		if (entry->put_count) {
52
		struct ttm_buffer_object *bo = entry->bo;
79
			ttm_bo_list_ref_sub(bo, entry->put_count, true);
53
		unsigned put_count = ttm_bo_del_from_lru(bo);
Line 91... Line 65...
91
	if (list_empty(list))
65
	if (list_empty(list))
92
		return;
66
		return;
Line 93... Line 67...
93
 
67
 
94
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
68
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
-
 
69
	glob = entry->bo->glob;
95
	glob = entry->bo->glob;
70
 
-
 
71
	spin_lock(&glob->lru_lock);
-
 
72
	list_for_each_entry(entry, list, head) {
-
 
73
		struct ttm_buffer_object *bo = entry->bo;
-
 
74
 
96
	spin_lock(&glob->lru_lock);
75
		ttm_bo_add_to_lru(bo);
-
 
76
		__ttm_bo_unreserve(bo);
-
 
77
	}
-
 
78
	spin_unlock(&glob->lru_lock);
97
	ttm_eu_backoff_reservation_locked(list);
79
 
98
	if (ticket)
80
	if (ticket)
99
		ww_acquire_fini(ticket);
-
 
100
	spin_unlock(&glob->lru_lock);
81
		ww_acquire_fini(ticket);
101
}
82
}
Line 102... Line 83...
102
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
83
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
103
 
84
 
Line 112... Line 93...
112
 * to become unreserved. This prevents deadlocks when validating multiple
93
 * to become unreserved. This prevents deadlocks when validating multiple
113
 * buffers in different orders.
94
 * buffers in different orders.
114
 */
95
 */
Line 115... Line 96...
115
 
96
 
-
 
97
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
116
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
98
			   struct list_head *list, bool intr,
117
			   struct list_head *list)
99
			   struct list_head *dups)
118
{
100
{
119
	struct ttm_bo_global *glob;
101
	struct ttm_bo_global *glob;
120
	struct ttm_validate_buffer *entry;
102
	struct ttm_validate_buffer *entry;
Line 121... Line 103...
121
	int ret;
103
	int ret;
122
 
104
 
Line 123... Line -...
123
	if (list_empty(list))
-
 
124
		return 0;
-
 
125
 
-
 
126
	list_for_each_entry(entry, list, head) {
-
 
127
		entry->reserved = false;
-
 
128
		entry->put_count = 0;
-
 
129
		entry->removed = false;
105
	if (list_empty(list))
130
	}
106
		return 0;
Line 131... Line 107...
131
 
107
 
132
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
108
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
133
	glob = entry->bo->glob;
109
	glob = entry->bo->glob;
134
 
110
 
135
	if (ticket)
111
	if (ticket)
Line -... Line 112...
-
 
112
		ww_acquire_init(ticket, &reservation_ww_class);
-
 
113
 
-
 
114
	list_for_each_entry(entry, list, head) {
136
		ww_acquire_init(ticket, &reservation_ww_class);
115
		struct ttm_buffer_object *bo = entry->bo;
-
 
116
 
-
 
117
		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
-
 
118
				       ticket);
-
 
119
		if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-
 
120
			__ttm_bo_unreserve(bo);
-
 
121
 
137
retry:
122
			ret = -EBUSY;
-
 
123
 
138
	list_for_each_entry(entry, list, head) {
124
		} else if (ret == -EALREADY && dups) {
-
 
125
			struct ttm_validate_buffer *safe = entry;
Line -... Line 126...
-
 
126
			entry = list_prev_entry(entry, head);
-
 
127
			list_del(&safe->head);
-
 
128
			list_add(&safe->head, dups);
-
 
129
			continue;
139
		struct ttm_buffer_object *bo = entry->bo;
130
		}
-
 
131
 
140
 
132
		if (!ret) {
-
 
133
			if (!entry->shared)
Line 141... Line -...
141
		/* already slowpath reserved? */
-
 
142
		if (entry->reserved)
134
			continue;
143
			continue;
135
 
144
 
136
			ret = reservation_object_reserve_shared(bo->resv);
145
		ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
137
			if (!ret)
146
				       ticket);
-
 
147
 
-
 
148
		if (ret == -EDEADLK) {
138
				continue;
149
			/* uh oh, we lost out, drop every reservation and try
-
 
-
 
139
		}
150
			 * to only reserve this buffer, then start over if
140
 
151
			 * this succeeds.
141
			/* uh oh, we lost out, drop every reservation and try
152
			 */
142
			 * to only reserve this buffer, then start over if
-
 
143
			 * this succeeds.
-
 
144
			 */
-
 
145
		ttm_eu_backoff_reservation_reverse(list, entry);
-
 
146
 
-
 
147
		if (ret == -EDEADLK && intr) {
-
 
148
			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
-
 
149
							       ticket);
-
 
150
		} else if (ret == -EDEADLK) {
153
			BUG_ON(ticket == NULL);
151
			ww_mutex_lock_slow(&bo->resv->lock, ticket);
154
			spin_lock(&glob->lru_lock);
152
			ret = 0;
155
			ttm_eu_backoff_reservation_locked(list);
153
		}
-
 
154
 
-
 
155
		if (!ret && entry->shared)
156
			spin_unlock(&glob->lru_lock);
156
			ret = reservation_object_reserve_shared(bo->resv);
157
			ttm_eu_list_ref_sub(list);
157
 
158
			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
-
 
159
							       ticket);
-
 
160
			if (unlikely(ret != 0)) {
-
 
161
				if (ret == -EINTR)
158
			if (unlikely(ret != 0)) {
162
					ret = -ERESTARTSYS;
-
 
163
				goto err_fini;
159
				if (ret == -EINTR)
164
			}
-
 
165
 
-
 
166
			entry->reserved = true;
-
 
Line 167... Line 160...
167
			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
160
					ret = -ERESTARTSYS;
168
				ret = -EBUSY;
161
			if (ticket) {
169
				goto err;
162
				ww_acquire_done(ticket);
170
			}
163
				ww_acquire_fini(ticket);
171
			goto retry;
164
			}
172
		} else if (ret)
165
			return ret;
Line 173... Line 166...
173
			goto err;
166
			}
174
 
167
 
175
		entry->reserved = true;
168
		/* move this item to the front of the list,
176
		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
169
		 * forces correct iteration of the loop without keeping track
177
			ret = -EBUSY;
170
		 */
178
			goto err;
-
 
179
		}
171
		list_del(&entry->head);
180
	}
-
 
181
 
-
 
182
	if (ticket)
-
 
183
		ww_acquire_done(ticket);
-
 
184
	spin_lock(&glob->lru_lock);
-
 
185
	ttm_eu_del_from_lru_locked(list);
-
 
186
	spin_unlock(&glob->lru_lock);
-
 
187
	ttm_eu_list_ref_sub(list);
-
 
188
	return 0;
-
 
189
 
-
 
190
err:
-
 
191
	spin_lock(&glob->lru_lock);
-
 
192
	ttm_eu_backoff_reservation_locked(list);
172
		list_add(&entry->head, list);
193
	spin_unlock(&glob->lru_lock);
173
	}
Line 194... Line 174...
194
	ttm_eu_list_ref_sub(list);
174
 
195
err_fini:
175
	if (ticket)
196
	if (ticket) {
176
		ww_acquire_done(ticket);
197
		ww_acquire_done(ticket);
177
	spin_lock(&glob->lru_lock);
198
		ww_acquire_fini(ticket);
178
	ttm_eu_del_from_lru_locked(list);
199
	}
179
	spin_unlock(&glob->lru_lock);
200
	return ret;
180
	return 0;
Line 217... Line 197...
217
	bdev = bo->bdev;
197
	bdev = bo->bdev;
218
	driver = bdev->driver;
198
	driver = bdev->driver;
219
	glob = bo->glob;
199
	glob = bo->glob;
Line 220... Line 200...
220
 
200
 
221
	spin_lock(&glob->lru_lock);
-
 
Line 222... Line 201...
222
	spin_lock(&bdev->fence_lock);
201
	spin_lock(&glob->lru_lock);
223
 
202
 
-
 
203
	list_for_each_entry(entry, list, head) {
224
	list_for_each_entry(entry, list, head) {
204
		bo = entry->bo;
-
 
205
		if (entry->shared)
225
		bo = entry->bo;
206
			reservation_object_add_shared_fence(bo->resv, fence);
226
		entry->old_sync_obj = bo->sync_obj;
207
		else
227
		bo->sync_obj = driver->sync_obj_ref(sync_obj);
208
			reservation_object_add_excl_fence(bo->resv, fence);
228
		ttm_bo_add_to_lru(bo);
-
 
229
		__ttm_bo_unreserve(bo);
209
		ttm_bo_add_to_lru(bo);
230
		entry->reserved = false;
-
 
231
	}
210
		__ttm_bo_unreserve(bo);
232
	spin_unlock(&bdev->fence_lock);
211
	}
233
	spin_unlock(&glob->lru_lock);
212
	spin_unlock(&glob->lru_lock);
234
	if (ticket)
-
 
235
		ww_acquire_fini(ticket);
-
 
236
 
-
 
237
	list_for_each_entry(entry, list, head) {
-
 
238
		if (entry->old_sync_obj)
-
 
239
			driver->sync_obj_unref(&entry->old_sync_obj);
213
	if (ticket)
240
	}
214
		ww_acquire_fini(ticket);