Subversion Repositories Kolibri OS

Rev

Rev 6283 | Rev 6660 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6283 Rev 6296
Line 171... Line 171...
171
	args->aper_available_size = args->aper_size - pinned;
171
	args->aper_available_size = args->aper_size - pinned;
Line 172... Line 172...
172
 
172
 
173
	return 0;
173
	return 0;
Line -... Line 174...
-
 
174
}
-
 
175
 
-
 
176
static int
-
 
177
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
-
 
178
{
-
 
179
	char *vaddr = obj->phys_handle->vaddr;
-
 
180
	struct sg_table *st;
-
 
181
	struct scatterlist *sg;
-
 
182
	int i;
-
 
183
 
-
 
184
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
-
 
185
		return -EINVAL;
-
 
186
 
-
 
187
 
-
 
188
	st = kmalloc(sizeof(*st), GFP_KERNEL);
-
 
189
	if (st == NULL)
-
 
190
		return -ENOMEM;
-
 
191
 
-
 
192
	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
-
 
193
		kfree(st);
-
 
194
		return -ENOMEM;
-
 
195
	}
-
 
196
 
-
 
197
	sg = st->sgl;
-
 
198
	sg->offset = 0;
-
 
199
	sg->length = obj->base.size;
-
 
200
 
-
 
201
	sg_dma_address(sg) = obj->phys_handle->busaddr;
-
 
202
	sg_dma_len(sg) = obj->base.size;
-
 
203
 
-
 
204
	obj->pages = st;
-
 
205
	return 0;
-
 
206
}
-
 
207
 
-
 
208
static void
-
 
209
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
-
 
210
{
-
 
211
	int ret;
-
 
212
 
-
 
213
	BUG_ON(obj->madv == __I915_MADV_PURGED);
-
 
214
 
-
 
215
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
-
 
216
	if (ret) {
-
 
217
		/* In the event of a disaster, abandon all caches and
-
 
218
		 * hope for the best.
-
 
219
		 */
-
 
220
		WARN_ON(ret != -EIO);
-
 
221
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-
 
222
	}
-
 
223
 
-
 
224
	if (obj->madv == I915_MADV_DONTNEED)
-
 
225
		obj->dirty = 0;
-
 
226
 
-
 
227
	if (obj->dirty) {
-
 
228
		obj->dirty = 0;
-
 
229
	}
-
 
230
 
-
 
231
	sg_free_table(obj->pages);
-
 
232
	kfree(obj->pages);
-
 
233
}
-
 
234
 
-
 
235
static void
-
 
236
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
-
 
237
{
-
 
238
	drm_pci_free(obj->base.dev, obj->phys_handle);
-
 
239
}
-
 
240
 
-
 
241
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
-
 
242
	.get_pages = i915_gem_object_get_pages_phys,
-
 
243
	.put_pages = i915_gem_object_put_pages_phys,
-
 
244
	.release = i915_gem_object_release_phys,
-
 
245
};
-
 
246
 
-
 
247
static int
-
 
248
drop_pages(struct drm_i915_gem_object *obj)
-
 
249
{
-
 
250
	struct i915_vma *vma, *next;
-
 
251
	int ret;
-
 
252
 
-
 
253
	drm_gem_object_reference(&obj->base);
-
 
254
	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
-
 
255
		if (i915_vma_unbind(vma))
-
 
256
			break;
-
 
257
 
-
 
258
	ret = i915_gem_object_put_pages(obj);
-
 
259
	drm_gem_object_unreference(&obj->base);
-
 
260
 
-
 
261
	return ret;
-
 
262
}
-
 
263
 
-
 
264
int
-
 
265
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
-
 
266
			    int align)
-
 
267
{
-
 
268
	drm_dma_handle_t *phys;
-
 
269
	int ret;
-
 
270
 
-
 
271
	if (obj->phys_handle) {
-
 
272
		if ((unsigned long)obj->phys_handle->vaddr & (align -1))
-
 
273
			return -EBUSY;
-
 
274
 
-
 
275
		return 0;
-
 
276
	}
-
 
277
 
-
 
278
	if (obj->madv != I915_MADV_WILLNEED)
-
 
279
		return -EFAULT;
-
 
280
 
-
 
281
	if (obj->base.filp == NULL)
-
 
282
		return -EINVAL;
-
 
283
 
-
 
284
	ret = drop_pages(obj);
-
 
285
	if (ret)
-
 
286
		return ret;
-
 
287
 
-
 
288
	/* create a new object */
-
 
289
	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
-
 
290
	if (!phys)
-
 
291
		return -ENOMEM;
-
 
292
 
-
 
293
	obj->phys_handle = phys;
-
 
294
	obj->ops = &i915_gem_phys_ops;
-
 
295
 
174
}
296
	return i915_gem_object_get_pages(obj);
175
 
297
}
176
void *i915_gem_object_alloc(struct drm_device *dev)
298
void *i915_gem_object_alloc(struct drm_device *dev)
177
{
299
{
178
	struct drm_i915_private *dev_priv = dev->dev_private;
300
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 631... Line 753...
631
				       page_length);
753
				       page_length);
632
	kunmap_atomic(vaddr);
754
	kunmap_atomic(vaddr);
Line 633... Line 755...
633
 
755
 
634
	return ret ? -EFAULT : 0;
756
	return ret ? -EFAULT : 0;
635
}
-
 
Line 636... Line 757...
636
#if 0
757
}
637
 
758
 
638
/* Only difference to the fast-path function is that this can handle bit17
759
/* Only difference to the fast-path function is that this can handle bit17
639
 * and uses non-atomic copy and kmap functions. */
760
 * and uses non-atomic copy and kmap functions. */
Line 666... Line 787...
666
					     page_do_bit17_swizzling);
787
					     page_do_bit17_swizzling);
667
	kunmap(page);
788
	kunmap(page);
Line 668... Line 789...
668
 
789
 
669
	return ret ? -EFAULT : 0;
790
	return ret ? -EFAULT : 0;
670
}
-
 
671
#endif
-
 
Line 672... Line 791...
672
 
791
}
673
 
792
 
674
static int
793
static int
675
i915_gem_shmem_pwrite(struct drm_device *dev,
794
i915_gem_shmem_pwrite(struct drm_device *dev,
Line 755... Line 874...
755
		if (ret == 0)
874
		if (ret == 0)
756
			goto next_page;
875
			goto next_page;
Line 757... Line 876...
757
 
876
 
758
		hit_slowpath = 1;
877
		hit_slowpath = 1;
759
		mutex_unlock(&dev->struct_mutex);
-
 
760
		dbgprintf("%s need shmem_pwrite_slow\n",__FUNCTION__);
-
 
761
 
878
		mutex_unlock(&dev->struct_mutex);
762
//		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
879
		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
763
//					user_data, page_do_bit17_swizzling,
880
					user_data, page_do_bit17_swizzling,
764
//					partial_cacheline_write,
881
					partial_cacheline_write,
Line 765... Line 882...
765
//					needs_clflush_after);
882
					needs_clflush_after);
Line 766... Line 883...
766
 
883
 
767
		mutex_lock(&dev->struct_mutex);
884
		mutex_lock(&dev->struct_mutex);
Line 860... Line 977...
860
		/* Note that the gtt paths might fail with non-page-backed user
977
		/* Note that the gtt paths might fail with non-page-backed user
861
		 * pointers (e.g. gtt mappings when moving data between
978
		 * pointers (e.g. gtt mappings when moving data between
862
		 * textures). Fallback to the shmem path in that case. */
979
		 * textures). Fallback to the shmem path in that case. */
863
	}
980
	}
Line 864... Line 981...
864
 
981
 
865
	if (ret == -EFAULT || ret == -ENOSPC)
982
	if (ret == -EFAULT || ret == -ENOSPC) {
-
 
983
			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Line 866... Line 984...
866
			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
984
	}
867
 
985
 
868
out:
986
out:
869
	drm_gem_object_unreference(&obj->base);
987
	drm_gem_object_unreference(&obj->base);
Line 1745... Line 1863...
1745
		i915_gem_clflush_object(obj, true);
1863
		i915_gem_clflush_object(obj, true);
1746
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1864
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1747
	}
1865
	}
Line 1748... Line 1866...
1748
 
1866
 
-
 
1867
	i915_gem_gtt_finish_object(obj);
-
 
1868
 
-
 
1869
	if (i915_gem_object_needs_bit17_swizzle(obj))
-
 
1870
		i915_gem_object_save_bit_17_swizzle(obj);
1749
	i915_gem_gtt_finish_object(obj);
1871
 
1750
	if (obj->madv == I915_MADV_DONTNEED)
1872
	if (obj->madv == I915_MADV_DONTNEED)
Line 1751... Line 1873...
1751
		obj->dirty = 0;
1873
		obj->dirty = 0;
1752
 
1874
 
Line 1857... Line 1979...
1857
 
1979
 
1858
	ret = i915_gem_gtt_prepare_object(obj);
1980
	ret = i915_gem_gtt_prepare_object(obj);
1859
	if (ret)
1981
	if (ret)
Line -... Line 1982...
-
 
1982
		goto err_pages;
-
 
1983
 
-
 
1984
	if (i915_gem_object_needs_bit17_swizzle(obj))
1860
		goto err_pages;
1985
		i915_gem_object_do_bit_17_swizzle(obj);
1861
 
1986
 
1862
	if (obj->tiling_mode != I915_TILING_NONE &&
1987
	if (obj->tiling_mode != I915_TILING_NONE &&
Line 1863... Line 1988...
1863
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1988
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)