33,6 → 33,7 |
#include <ttm/ttm_bo_api.h> |
#include <ttm/ttm_memory.h> |
#include <ttm/ttm_module.h> |
#include <ttm/ttm_placement.h> |
#include <drm/drm_mm.h> |
#include <drm/drm_global.h> |
//#include <linux/workqueue.h> |
39,6 → 40,8 |
//#include <linux/fs.h> |
#include <linux/spinlock.h> |
|
struct ww_acquire_ctx; |
|
struct ttm_backend_func { |
/** |
* struct ttm_backend_func member bind |
778,8 → 781,8 |
* @bo: A pointer to a struct ttm_buffer_object. |
* @interruptible: Sleep interruptible if waiting. |
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. |
* @use_sequence: If @bo is already reserved, Only sleep waiting for |
* it to become unreserved if @sequence < (@bo)->sequence. |
* @use_ticket: If @bo is already reserved, Only sleep waiting for |
* it to become unreserved if @ticket->stamp is older. |
* |
* Locks a buffer object for validation. (Or prevents other processes from |
* locking it for validation) and removes it from lru lists, while taking |
790,19 → 793,10 |
* to make room for a buffer already reserved. (Buffers are reserved before |
* they are evicted). The following algorithm prevents such deadlocks from |
* occurring: |
* 1) Buffers are reserved with the lru spinlock held. Upon successful |
* reservation they are removed from the lru list. This stops a reserved buffer |
* from being evicted. However the lru spinlock is released between the time |
* a buffer is selected for eviction and the time it is reserved. |
* Therefore a check is made when a buffer is reserved for eviction, that it |
* is still the first buffer in the lru list, before it is removed from the |
* list. @check_lru == 1 forces this check. If it fails, the function returns |
* -EINVAL, and the caller should then choose a new buffer to evict and repeat |
* the procedure. |
* 2) Processes attempting to reserve multiple buffers other than for eviction, |
* Processes attempting to reserve multiple buffers other than for eviction, |
* (typically execbuf), should first obtain a unique 32-bit |
* validation sequence number, |
* and call this function with @use_sequence == 1 and @sequence == the unique |
* and call this function with @use_ticket == 1 and @ticket->stamp == the unique |
* sequence number. If upon call of this function, the buffer object is already |
* reserved, the validation sequence is checked against the validation |
* sequence of the process currently reserving the buffer, |
817,84 → 811,102 |
* will eventually succeed, preventing both deadlocks and starvation. |
* |
* Returns: |
* -EAGAIN: The reservation may cause a deadlock. |
* -EDEADLK: The reservation may cause a deadlock. |
* Release all buffer reservations, wait for @bo to become unreserved and |
* try again. (only if use_sequence == 1). |
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
* a signal. Release all buffer reservations and return to user-space. |
* -EBUSY: The function needed to sleep, but @no_wait was true |
* -EDEADLK: Bo already reserved using @sequence. This error code will only |
* be returned if @use_sequence is set to true. |
* -EALREADY: Bo already reserved using @ticket. This error code will only |
* be returned if @use_ticket is set to true. |
*/ |
extern int ttm_bo_reserve(struct ttm_buffer_object *bo, |
static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, |
bool interruptible, |
bool no_wait, bool use_sequence, uint32_t sequence); |
bool no_wait, bool use_ticket, |
struct ww_acquire_ctx *ticket) |
{ |
int ret; |
|
WARN_ON(!atomic_read(&bo->kref.refcount)); |
|
ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket, |
ticket); |
if (likely(ret == 0)) |
ttm_bo_del_sub_from_lru(bo); |
|
return ret; |
} |
|
/** |
* ttm_bo_reserve_locked: |
* |
* ttm_bo_reserve_slowpath: |
* @bo: A pointer to a struct ttm_buffer_object. |
* @interruptible: Sleep interruptible if waiting. |
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. |
* @use_sequence: If @bo is already reserved, Only sleep waiting for |
* it to become unreserved if @sequence < (@bo)->sequence. |
* @sequence: Set (@bo)->sequence to this value after lock |
* |
* Must be called with struct ttm_bo_global::lru_lock held, |
* and will not remove reserved buffers from the lru lists. |
* The function may release the LRU spinlock if it needs to sleep. |
* Otherwise identical to ttm_bo_reserve. |
* |
* Returns: |
* -EAGAIN: The reservation may cause a deadlock. |
* Release all buffer reservations, wait for @bo to become unreserved and |
* try again. (only if use_sequence == 1). |
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
* a signal. Release all buffer reservations and return to user-space. |
* -EBUSY: The function needed to sleep, but @no_wait was true |
* -EDEADLK: Bo already reserved using @sequence. This error code will only |
* be returned if @use_sequence is set to true. |
* This is called after ttm_bo_reserve returns -EAGAIN and we backed off |
* from all our other reservations. Because there are no other reservations |
* held by us, this function cannot deadlock any more. |
*/ |
extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, |
static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, |
bool interruptible, |
bool no_wait, bool use_sequence, |
uint32_t sequence); |
struct ww_acquire_ctx *ticket) |
{ |
int ret = 0; |
|
WARN_ON(!atomic_read(&bo->kref.refcount)); |
|
if (interruptible) |
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, |
ticket); |
else |
ww_mutex_lock_slow(&bo->resv->lock, ticket); |
|
if (likely(ret == 0)) |
ttm_bo_del_sub_from_lru(bo); |
else if (ret == -EINTR) |
ret = -ERESTARTSYS; |
|
return ret; |
} |
|
/** |
* ttm_bo_unreserve |
* |
* ttm_bo_unreserve_ticket |
* @bo: A pointer to a struct ttm_buffer_object. |
* @ticket: ww_acquire_ctx used for reserving |
* |
* Unreserve a previous reservation of @bo. |
* Unreserve a previous reservation of @bo made with @ticket. |
*/ |
extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); |
static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, |
struct ww_acquire_ctx *t) |
{ |
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
spin_lock(&bo->glob->lru_lock); |
ttm_bo_add_to_lru(bo); |
spin_unlock(&bo->glob->lru_lock); |
} |
ww_mutex_unlock(&bo->resv->lock); |
} |
|
/** |
* ttm_bo_unreserve_locked |
* ttm_bo_unreserve |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* |
* Unreserve a previous reservation of @bo. |
* Needs to be called with struct ttm_bo_global::lru_lock held. |
*/ |
extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); |
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) |
{ |
ttm_bo_unreserve_ticket(bo, NULL); |
} |
|
/** |
* ttm_bo_wait_unreserved |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* |
* Wait for a struct ttm_buffer_object to become unreserved. |
* This is typically used in the execbuf code to relax cpu-usage when |
* a potential deadlock condition backoff. |
*/ |
extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, |
bool interruptible); |
|
/* |
* ttm_bo_util.c |
*/ |
|
int ttm_mem_io_reserve(struct ttm_bo_device *bdev, |
struct ttm_mem_reg *mem); |
void ttm_mem_io_free(struct ttm_bo_device *bdev, |
struct ttm_mem_reg *mem); |
/** |
* ttm_bo_move_ttm |
* |