Subversion Repositories Kolibri OS

Rev

Rev 4280 | Rev 4371 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2326 Serge 1
/*
2
 * Copyright © 2008 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Eric Anholt 
25
 *
26
 */
27
 
3031 serge 28
#include 
4280 Serge 29
#include 
3031 serge 30
#include 
2326 Serge 31
#include "i915_drv.h"
2351 Serge 32
#include "i915_trace.h"
2326 Serge 33
#include "intel_drv.h"
3260 Serge 34
#include 
2330 Serge 35
#include 
2326 Serge 36
//#include 
3746 Serge 37
#include 
2326 Serge 38
#include 
39
 
2344 Serge 40
extern int x86_clflush_size;
2332 Serge 41
 
3263 Serge 42
#define PROT_READ       0x1             /* page can be read */
43
#define PROT_WRITE      0x2             /* page can be written */
44
#define MAP_SHARED      0x01            /* Share changes */
45
 
2344 Serge 46
#undef mb
47
#undef rmb
48
#undef wmb
49
#define mb() asm volatile("mfence")
50
#define rmb() asm volatile ("lfence")
51
#define wmb() asm volatile ("sfence")
52
 
3266 Serge 53
struct drm_i915_gem_object *get_fb_obj();
54
 
3263 Serge 55
unsigned long vm_mmap(struct file *file, unsigned long addr,
56
         unsigned long len, unsigned long prot,
57
         unsigned long flag, unsigned long offset);
58
 
2344 Serge 59
static inline void clflush(volatile void *__p)
60
{
61
    asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
62
}
63
 
2332 Serge 64
#define MAX_ERRNO       4095
65
 
66
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
67
 
68
 
2326 Serge 69
#define I915_EXEC_CONSTANTS_MASK        (3<<6)
70
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
71
#define I915_EXEC_CONSTANTS_ABSOLUTE    (1<<6)
72
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
73
 
2332 Serge 74
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
4104 Serge 75
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
76
						   bool force);
77
static __must_check int
78
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
79
			   struct i915_address_space *vm,
2332 Serge 80
						    unsigned alignment,
3031 serge 81
						    bool map_and_fenceable,
82
						    bool nonblocking);
2332 Serge 83
static int i915_gem_phys_pwrite(struct drm_device *dev,
84
				struct drm_i915_gem_object *obj,
85
				struct drm_i915_gem_pwrite *args,
86
				struct drm_file *file);
2326 Serge 87
 
3031 serge 88
static void i915_gem_write_fence(struct drm_device *dev, int reg,
89
				 struct drm_i915_gem_object *obj);
90
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
91
					 struct drm_i915_fence_reg *fence,
92
					 bool enable);
2332 Serge 93
 
3031 serge 94
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
4104 Serge 95
static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
3031 serge 96
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
97
 
4104 Serge 98
static bool cpu_cache_is_coherent(struct drm_device *dev,
99
				  enum i915_cache_level level)
100
{
101
	return HAS_LLC(dev) || level != I915_CACHE_NONE;
102
}
103
 
104
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
105
{
106
	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
107
		return true;
108
 
109
	return obj->pin_display;
110
}
111
 
3031 serge 112
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
113
{
114
	if (obj->tiling_mode)
115
		i915_gem_release_mmap(obj);
116
 
117
	/* As we do not have an associated fence register, we will force
118
	 * a tiling change if we ever need to acquire one.
119
	 */
120
	obj->fence_dirty = false;
121
	obj->fence_reg = I915_FENCE_REG_NONE;
122
}
123
 
2332 Serge 124
/* some bookkeeping */
125
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
126
				  size_t size)
127
{
4104 Serge 128
	spin_lock(&dev_priv->mm.object_stat_lock);
2332 Serge 129
	dev_priv->mm.object_count++;
130
	dev_priv->mm.object_memory += size;
4104 Serge 131
	spin_unlock(&dev_priv->mm.object_stat_lock);
2332 Serge 132
}
133
 
134
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
135
				     size_t size)
136
{
4104 Serge 137
	spin_lock(&dev_priv->mm.object_stat_lock);
2332 Serge 138
	dev_priv->mm.object_count--;
139
	dev_priv->mm.object_memory -= size;
4104 Serge 140
	spin_unlock(&dev_priv->mm.object_stat_lock);
2332 Serge 141
}
142
 
143
static int
3480 Serge 144
i915_gem_wait_for_error(struct i915_gpu_error *error)
2332 Serge 145
{
146
	int ret;
147
 
3480 Serge 148
#define EXIT_COND (!i915_reset_in_progress(error))
149
	if (EXIT_COND)
2332 Serge 150
		return 0;
3255 Serge 151
#if 0
3031 serge 152
	/*
153
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
154
	 * userspace. If it takes that long something really bad is going on and
155
	 * we should simply try to bail out and fail as gracefully as possible.
156
	 */
3480 Serge 157
	ret = wait_event_interruptible_timeout(error->reset_queue,
158
					       EXIT_COND,
159
					       10*HZ);
3031 serge 160
	if (ret == 0) {
161
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
162
		return -EIO;
163
	} else if (ret < 0) {
2332 Serge 164
		return ret;
3031 serge 165
	}
2332 Serge 166
 
3255 Serge 167
#endif
3480 Serge 168
#undef EXIT_COND
3255 Serge 169
 
2332 Serge 170
	return 0;
171
}
172
 
173
int i915_mutex_lock_interruptible(struct drm_device *dev)
174
{
3480 Serge 175
	struct drm_i915_private *dev_priv = dev->dev_private;
2332 Serge 176
	int ret;
177
 
3480 Serge 178
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
2332 Serge 179
	if (ret)
180
		return ret;
181
 
3480 Serge 182
	ret = mutex_lock_interruptible(&dev->struct_mutex);
183
	if (ret)
184
		return ret;
2332 Serge 185
 
186
	WARN_ON(i915_verify_lists(dev));
187
	return 0;
188
}
189
 
190
static inline bool
191
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
192
{
4104 Serge 193
	return i915_gem_obj_bound_any(obj) && !obj->active;
2332 Serge 194
}
195
 
196
 
197
#if 0
198
 
199
int
200
i915_gem_init_ioctl(struct drm_device *dev, void *data,
201
		    struct drm_file *file)
202
{
3480 Serge 203
	struct drm_i915_private *dev_priv = dev->dev_private;
2332 Serge 204
	struct drm_i915_gem_init *args = data;
205
 
3031 serge 206
	if (drm_core_check_feature(dev, DRIVER_MODESET))
207
		return -ENODEV;
208
 
2332 Serge 209
	if (args->gtt_start >= args->gtt_end ||
210
	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
211