Subversion Repositories Kolibri OS

Rev

Rev 5078 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
#include 
29
#include 
30
#include 
31
#include 
32
#include 
33
#include 
34
 
5078 serge 35
DEFINE_WW_CLASS(reservation_ww_class);
36
 
5271 serge 37
static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
38
					      struct ttm_validate_buffer *entry)
4075 Serge 39
{
5271 serge 40
	list_for_each_entry_continue_reverse(entry, list, head) {
4075 Serge 41
		struct ttm_buffer_object *bo = entry->bo;
42
 
5078 serge 43
		__ttm_bo_unreserve(bo);
4075 Serge 44
	}
45
}
46
 
47
static void ttm_eu_del_from_lru_locked(struct list_head *list)
48
{
49
	struct ttm_validate_buffer *entry;
50
 
51
	list_for_each_entry(entry, list, head) {
52
		struct ttm_buffer_object *bo = entry->bo;
5271 serge 53
		unsigned put_count = ttm_bo_del_from_lru(bo);
4075 Serge 54
 
5271 serge 55
		ttm_bo_list_ref_sub(bo, put_count, true);
4075 Serge 56
	}
57
}
58
 
59
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
60
				struct list_head *list)
61
{
62
	struct ttm_validate_buffer *entry;
63
	struct ttm_bo_global *glob;
64
 
65
	if (list_empty(list))
66
		return;
67
 
68
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
69
	glob = entry->bo->glob;
5271 serge 70
 
4075 Serge 71
	spin_lock(&glob->lru_lock);
5271 serge 72
	list_for_each_entry(entry, list, head) {
73
		struct ttm_buffer_object *bo = entry->bo;
74
 
75
		ttm_bo_add_to_lru(bo);
76
		__ttm_bo_unreserve(bo);
77
	}
78
	spin_unlock(&glob->lru_lock);
79
 
5078 serge 80
	if (ticket)
81
		ww_acquire_fini(ticket);
4075 Serge 82
}
83
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
84
 
85
/*
86
 * Reserve buffers for validation.
87
 *
88
 * If a buffer in the list is marked for CPU access, we back off and
89
 * wait for that buffer to become free for GPU access.
90
 *
91
 * If a buffer is reserved for another validation, the validator with
92
 * the highest validation sequence backs off and waits for that buffer
93
 * to become unreserved. This prevents deadlocks when validating multiple
94
 * buffers in different orders.
95
 */
96
 
97
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
5271 serge 98
			   struct list_head *list, bool intr,
99
			   struct list_head *dups)
4075 Serge 100
{
101
	struct ttm_bo_global *glob;
102
	struct ttm_validate_buffer *entry;
103
	int ret;
104
 
105
	if (list_empty(list))
106
		return 0;
107
 
108
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
109
	glob = entry->bo->glob;
110
 
5078 serge 111
	if (ticket)
112
		ww_acquire_init(ticket, &reservation_ww_class);
5271 serge 113
 
4075 Serge 114
	list_for_each_entry(entry, list, head) {
115
		struct ttm_buffer_object *bo = entry->bo;
116
 
5271 serge 117
		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
118
				       ticket);
119
		if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
120
			__ttm_bo_unreserve(bo);
121
 
122
			ret = -EBUSY;
123
 
124
		} else if (ret == -EALREADY && dups) {
125
			struct ttm_validate_buffer *safe = entry;
126
			entry = list_prev_entry(entry, head);
127
			list_del(&safe->head);
128
			list_add(&safe->head, dups);
4075 Serge 129
			continue;
5271 serge 130
		}
4075 Serge 131
 
5271 serge 132
		if (!ret) {
133
			if (!entry->shared)
134
			continue;
4075 Serge 135
 
5271 serge 136
			ret = reservation_object_reserve_shared(bo->resv);
137
			if (!ret)
138
				continue;
139
		}
140
 
4075 Serge 141
			/* uh oh, we lost out, drop every reservation and try
142
			 * to only reserve this buffer, then start over if
143
			 * this succeeds.
144
			 */
5271 serge 145
		ttm_eu_backoff_reservation_reverse(list, entry);
146
 
147
		if (ret == -EDEADLK && intr) {
5078 serge 148
			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
149
							       ticket);
5271 serge 150
		} else if (ret == -EDEADLK) {
151
			ww_mutex_lock_slow(&bo->resv->lock, ticket);
152
			ret = 0;
153
		}
154
 
155
		if (!ret && entry->shared)
156
			ret = reservation_object_reserve_shared(bo->resv);
157
 
4075 Serge 158
			if (unlikely(ret != 0)) {
159
				if (ret == -EINTR)
160
					ret = -ERESTARTSYS;
5271 serge 161
			if (ticket) {
162
				ww_acquire_done(ticket);
163
				ww_acquire_fini(ticket);
4075 Serge 164
			}
5271 serge 165
			return ret;
4075 Serge 166
			}
167
 
5271 serge 168
		/* move this item to the front of the list,
169
		 * forces correct iteration of the loop without keeping track
170
		 */
171
		list_del(&entry->head);
172
		list_add(&entry->head, list);
4075 Serge 173
	}
174
 
5078 serge 175
	if (ticket)
176
		ww_acquire_done(ticket);
4075 Serge 177
	spin_lock(&glob->lru_lock);
178
	ttm_eu_del_from_lru_locked(list);
179
	spin_unlock(&glob->lru_lock);
180
	return 0;
181
}
182
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
183
 
184
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
5271 serge 185
				 struct list_head *list, struct fence *fence)
4075 Serge 186
{
187
	struct ttm_validate_buffer *entry;
188
	struct ttm_buffer_object *bo;
189
	struct ttm_bo_global *glob;
190
	struct ttm_bo_device *bdev;
191
	struct ttm_bo_driver *driver;
192
 
193
	if (list_empty(list))
194
		return;
195
 
196
	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
197
	bdev = bo->bdev;
198
	driver = bdev->driver;
199
	glob = bo->glob;
200
 
201
	spin_lock(&glob->lru_lock);
202
 
203
	list_for_each_entry(entry, list, head) {
204
		bo = entry->bo;
5271 serge 205
		if (entry->shared)
206
			reservation_object_add_shared_fence(bo->resv, fence);
207
		else
208
			reservation_object_add_excl_fence(bo->resv, fence);
4075 Serge 209
		ttm_bo_add_to_lru(bo);
5078 serge 210
		__ttm_bo_unreserve(bo);
4075 Serge 211
	}
212
	spin_unlock(&glob->lru_lock);
5078 serge 213
	if (ticket)
214
		ww_acquire_fini(ticket);
4075 Serge 215
}
216
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);