Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 2344 → Rev 2343

/drivers/video/drm/i915/clip.inc
File deleted
/drivers/video/drm/i915/bitmap.c
8,11 → 8,11
 
void __attribute__((regparm(1))) destroy_bitmap(bitmap_t *bitmap)
{
printf("destroy bitmap %d\n", bitmap->handle);
free_handle(&bm_man, bitmap->handle);
bitmap->handle = 0;
i915_gem_object_unpin(bitmap->obj);
drm_gem_object_unreference(&bitmap->obj->base);
/*
*
*
*
*/
__DestroyObject(bitmap);
};
 
30,47 → 30,29
};
 
 
int create_surface(struct io_call_10 *pbitmap)
int create_bitmap(struct ubitmap *pbitmap)
{
struct drm_i915_gem_object *obj;
 
bitmap_t *bitmap;
u32 handle;
u32 width, max_width;
u32 height, max_height;
u32 size, max_size;
u32 pitch, max_pitch;
u32 width;
u32 height;
u32 size;
u32 pitch;
void *uaddr;
 
int ret;
 
pbitmap->handle = 0;
pbitmap->data = (void*)-1;
pbitmap->data = NULL;
 
width = pbitmap->width;
height = pbitmap->height;
 
/*
if((width==0)||(height==0)||(width>4096)||(height>4096))
goto err1;
 
if( ((pbitmap->max_width !=0 ) &&
(pbitmap->max_width < width)) ||
(pbitmap->max_width > 4096) )
goto err1;
 
if( ((pbitmap->max_height !=0 ) &&
(pbitmap->max_height < width)) ||
(pbitmap->max_height > 4096) )
goto err1;
 
if( pbitmap->format != 0)
goto err1;
*/
 
max_width = (pbitmap->max_width ==0) ? width : pbitmap->max_width;
max_height = (pbitmap->max_height==0) ? height : pbitmap->max_height;
 
handle = alloc_handle(&bm_man);
// printf("%s %d\n",__FUNCTION__, handle);
 
91,7 → 73,6
pitch = ALIGN(width*4,64);
 
size = roundup(pitch*height, PAGE_SIZE);
 
// printf("pitch %d size %d\n", pitch, size);
 
obj = i915_gem_alloc_object(main_device, size);
102,16 → 83,13
if (ret)
goto err3;
 
max_pitch = ALIGN(max_width*4,64);
max_size = roundup(max_pitch*max_height, PAGE_SIZE);
 
uaddr = UserAlloc(max_size);
uaddr = UserAlloc(size);
if( uaddr == NULL)
goto err4;
else
{
u32_t *src, *dst;
u32 count, max_count;
int count;
 
#define page_tabs 0xFDC00000 /* really dirty hack */
 
118,43 → 96,35
src = (u32_t*)obj->pages;
dst = &((u32_t*)page_tabs)[(u32_t)uaddr >> 12];
count = size/4096;
max_count = max_size/4096 - count;
 
while(count--)
{
*dst++ = (0xFFFFF000 & *src++) | 0x207 ; // map as shared page
};
// while(max_count--)
// *dst++ = 0; // cleanup unused space
}
 
bitmap->handle = handle;
bitmap->uaddr = uaddr;
bitmap->width = width;
bitmap->height = height;
bitmap->pitch = pitch;
bitmap->gaddr = obj->gtt_offset;
 
bitmap->width = width;
bitmap->height = height;
bitmap->max_width = max_width;
bitmap->max_height = max_height;
 
bitmap->uaddr = uaddr;
bitmap->obj = obj;
bitmap->header.destroy = destroy_bitmap;
 
pbitmap->pitch = pitch;
pbitmap->handle = handle;
pbitmap->data = uaddr;
pbitmap->pitch = pitch;
 
// printf("%s handle %d pitch %d gpu %x user %x\n",
// __FUNCTION__, handle, pitch, obj->gtt_offset, uaddr);
 
printf("%s handle: %d pitch: %d gpu_addr: %x user_addr: %x\n",
__FUNCTION__, handle, pitch, obj->gtt_offset, uaddr);
 
return 0;
 
err4:
i915_gem_object_unpin(obj);
// drm_gem_object_unpin;
err3:
drm_gem_object_unreference(&obj->base);
// drm_gem_object_unreference(&obj->base);
err2:
free_handle(&bm_man, handle);
__DestroyObject(bitmap);
164,8 → 134,6
};
 
 
 
 
int init_hman(struct hman *man, u32 count)
{
u32* data;
/drivers/video/drm/i915/bitmap.h
37,35 → 37,24
kobj_t header;
 
u32 handle;
void *uaddr;
 
u32 width;
u32 height;
u32 pitch;
u32 gaddr;
 
u32 width;
u32 height;
u32 max_width;
u32 max_height;
 
u32 format;
void *uaddr;
struct drm_i915_gem_object *obj;
}bitmap_t;
 
 
struct io_call_10 /* SRV_CREATE_SURFACE */
struct ubitmap
{
u32 handle; // ignored
void *data; // ignored
 
u32 width;
u32 height;
u32 pitch; // ignored
 
u32 max_width;
u32 max_height;
u32 format; // reserved mbz
u32 pitch;
u32 handle;
void *data;
};
 
int create_surface(struct io_call_10 *pbitmap);
int create_bitmap(struct ubitmap *pbitmap);
int init_bitmaps();
 
/drivers/video/drm/i915/i915_drv.h
756,8 → 756,6
struct drm_i915_gem_object {
struct drm_gem_object base;
 
void *mapped;
 
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
struct list_head gtt_list;
/drivers/video/drm/i915/i915_gem.c
36,20 → 36,7
//#include <linux/swap.h>
#include <linux/pci.h>
 
extern int x86_clflush_size;
 
#undef mb
#undef rmb
#undef wmb
#define mb() asm volatile("mfence")
#define rmb() asm volatile ("lfence")
#define wmb() asm volatile ("sfence")
 
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
}
 
#define MAX_ERRNO 4095
 
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
69,17 → 56,7
return (long) ptr;
}
 
void
drm_gem_object_free(struct kref *kref)
{
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_device *dev = obj->dev;
 
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 
i915_gem_free_object(obj);
}
 
/**
* Initialize an already allocated GEM object of the specified size with
* shmfs backing store.
90,7 → 67,6
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
 
obj->dev = dev;
kref_init(&obj->refcount);
atomic_set(&obj->handle_count, 0);
obj->size = size;
 
97,11 → 73,9
return 0;
}
 
void
drm_gem_object_release(struct drm_gem_object *obj)
{ }
 
 
 
#define I915_EXEC_CONSTANTS_MASK (3<<6)
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
290,6 → 264,7
 
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference(&obj->base);
// trace_i915_gem_object_create(obj);
 
*handle_p = handle;
return 0;
780,8 → 755,8
return 0;
 
err_pages:
while (i--)
FreePage(obj->pages[i]);
// while (i--)
// page_cache_release(obj->pages[i]);
 
free(obj->pages);
obj->pages = NULL;
794,8 → 769,6
int page_count = obj->base.size / PAGE_SIZE;
int i;
 
ENTER();
 
BUG_ON(obj->madv == __I915_MADV_PURGED);
 
// if (obj->tiling_mode != I915_TILING_NONE)
803,16 → 776,21
 
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
/* It's a swap!!!
for (i = 0; i < page_count; i++) {
if (obj->dirty)
set_page_dirty(obj->pages[i]);
 
for (i = 0; i < page_count; i++) {
FreePage(obj->pages[i]);
if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(obj->pages[i]);
 
//page_cache_release(obj->pages[i]);
}
obj->dirty = 0;
*/
 
free(obj->pages);
obj->pages = NULL;
 
LEAVE();
}
 
void
828,7 → 806,7
 
/* Add a reference if we're newly entering the active list. */
if (!obj->active) {
drm_gem_object_reference(&obj->base);
// drm_gem_object_reference(&obj->base);
obj->active = 1;
}
 
850,51 → 828,10
}
}
 
static void
i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
{
list_del_init(&obj->ring_list);
obj->last_rendering_seqno = 0;
}
 
static void
i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
 
BUG_ON(!obj->active);
list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
 
i915_gem_object_move_off_active(obj);
}
 
 
 
 
 
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
struct inode *inode;
 
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
 
obj->madv = __I915_MADV_PURGED;
}
 
static inline int
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
{
return obj->madv == I915_MADV_DONTNEED;
}
 
static void
i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
uint32_t flush_domains)
{
911,6 → 848,9
i915_gem_object_move_to_active(obj, ring,
i915_gem_next_request_seqno(ring));
 
// trace_i915_gem_object_change_domain(obj,
// obj->base.read_domains,
// old_write_domain);
}
}
}
953,173 → 893,41
 
 
 
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
{
int ret;
 
/* This function only exists to support waiting for existing rendering,
* not for emitting required flushes.
*/
BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
 
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
if (obj->active) {
// ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
// if (ret)
// return ret;
}
 
return 0;
}
 
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
{
u32 old_write_domain, old_read_domains;
 
/* Act a barrier for all accesses through the GTT */
mb();
 
/* Force a pagefault for domain tracking on next user access */
// i915_gem_release_mmap(obj);
 
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
return;
 
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
 
}
 
/**
* Unbinds an object from the GTT aperture.
*/
int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
int ret = 0;
 
ENTER();
if (obj->gtt_space == NULL)
return 0;
 
if (obj->pin_count != 0) {
DRM_ERROR("Attempting to unbind pinned buffer\n");
return -EINVAL;
}
 
ret = i915_gem_object_finish_gpu(obj);
if (ret == -ERESTARTSYS)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might
* cause memory corruption through use-after-free.
*/
 
i915_gem_object_finish_gtt(obj);
 
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it.
*/
if (ret == 0)
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret == -ERESTARTSYS)
return ret;
if (ret) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
i915_gem_clflush_object(obj);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
 
/* release the fence reg _after_ flushing */
ret = i915_gem_object_put_fence(obj);
if (ret == -ERESTARTSYS)
return ret;
 
 
i915_gem_gtt_unbind_object(obj);
i915_gem_object_put_pages_gtt(obj);
 
list_del_init(&obj->gtt_list);
list_del_init(&obj->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
 
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
obj->gtt_offset = 0;
 
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
 
LEAVE();
return ret;
}
 
int
i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
int ret;
 
if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
return 0;
 
 
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
 
if (flush_domains & I915_GEM_GPU_DOMAINS)
i915_gem_process_flushing_list(ring, flush_domains);
 
return 0;
}
 
static int i915_ring_idle(struct intel_ring_buffer *ring)
{
int ret;
 
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0;
 
if (!list_empty(&ring->gpu_write_list)) {
ret = i915_gem_flush_ring(ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
 
return 0; //i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
 
int
i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
 
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
ret = i915_ring_idle(&dev_priv->ring[i]);
if (ret)
return ret;
}
 
return 0;
}
 
 
 
1138,74 → 946,52
 
 
 
 
 
 
 
static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
{
return i915_seqno_passed(ring->get_seqno(ring), seqno);
}
 
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined)
{
int ret;
 
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->last_fenced_ring,
0, obj->base.write_domain);
if (ret)
return ret;
}
/* This function only exists to support waiting for existing rendering,
* not for emitting required flushes.
*/
BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
 
obj->fenced_gpu_access = false;
}
 
if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
// ret = i915_wait_request(obj->last_fenced_ring,
// obj->last_fenced_seqno);
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
// if (obj->active) {
// ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
// if (ret)
// return ret;
}
// }
 
obj->last_fenced_seqno = 0;
obj->last_fenced_ring = NULL;
return 0;
}
 
/* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence.
*/
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
mb();
 
return 0;
}
 
int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
int ret;
 
// if (obj->tiling_mode)
// i915_gem_release_mmap(obj);
if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
return 0;
 
ret = i915_gem_object_flush_fence(obj, NULL);
// trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
 
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
 
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
i915_gem_clear_fence_reg(obj->base.dev,
&dev_priv->fence_regs[obj->fence_reg]);
if (flush_domains & I915_GEM_GPU_DOMAINS)
i915_gem_process_flushing_list(ring, flush_domains);
 
obj->fence_reg = I915_FENCE_REG_NONE;
}
 
return 0;
}
 
1228,20 → 1014,6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
/**
* i915_gem_clear_fence_reg - clear out fence register info
* @obj: object to clear
1392,7 → 1164,7
 
ret = i915_gem_gtt_bind_object(obj);
if (ret) {
i915_gem_object_put_pages_gtt(obj);
// i915_gem_object_put_pages_gtt(obj);
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
 
1423,6 → 1195,7
 
obj->map_and_fenceable = mappable && fenceable;
 
// trace_i915_gem_object_bind(obj, map_and_fenceable);
return 0;
}
 
1447,54 → 1220,14
if (obj->cache_level != I915_CACHE_NONE)
return;
 
if(obj->mapped != NULL)
{
uint8_t *page_virtual;
unsigned int i;
// trace_i915_gem_object_clflush(obj);
 
page_virtual = obj->mapped;
asm volatile("mfence");
for (i = 0; i < obj->base.size; i += x86_clflush_size)
clflush(page_virtual + i);
asm volatile("mfence");
// drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
mb();
__asm__ ("wbinvd"); // this is really ugly
mb();
}
else
{
uint8_t *page_virtual;
unsigned int i;
page_virtual = AllocKernelSpace(obj->base.size);
if(page_virtual != NULL)
{
u32_t *src, *dst;
u32 count;
 
#define page_tabs 0xFDC00000 /* really dirty hack */
 
src = (u32_t*)obj->pages;
dst = &((u32_t*)page_tabs)[(u32_t)page_virtual >> 12];
count = obj->base.size/4096;
 
while(count--)
{
*dst++ = (0xFFFFF000 & *src++) | 0x001 ;
};
 
asm volatile("mfence");
for (i = 0; i < obj->base.size; i += x86_clflush_size)
clflush(page_virtual + i);
asm volatile("mfence");
FreeKernelSpace(page_virtual);
}
else
{
asm volatile (
"mfence \n"
"wbinvd \n" /* this is really ugly */
"mfence");
}
}
}
 
/** Flushes any GPU write domain for the object if it's dirty. */
static int
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
1506,29 → 1239,10
return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
}
 
/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
{
uint32_t old_write_domain;
 
if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
return;
 
/* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*
* However, we do have to enforce the order so that all writes through
* the GTT land before any writes to the device, such as updates to
* the GATT itself.
*/
wmb();
 
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
 
}
 
/** Flushes the CPU write domain for the object if it's dirty. */
static void
1544,6 → 1258,9
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
 
// trace_i915_gem_object_change_domain(obj,
// obj->base.read_domains,
// old_write_domain);
}
 
/**
1646,6 → 1363,9
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
}
 
obj->cache_level = cache_level;
1713,110 → 1433,33
BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 
// trace_i915_gem_object_change_domain(obj,
// old_read_domains,
// old_write_domain);
 
return 0;
}
 
int
i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
{
int ret;
 
if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
return 0;
 
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
if (ret)
return ret;
}
 
/* Ensure that we invalidate the GPU's caches and TLBs. */
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
 
return i915_gem_object_wait_rendering(obj);
}
 
/**
* Moves a single object to the CPU read, and possibly write domain.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
int ret;
 
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
return 0;
 
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
 
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
 
i915_gem_object_flush_gtt_write_domain(obj);
 
/* If we have a partially-valid cache of the object in the CPU,
* finish invalidating it and free the per-page flags.
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
 
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
 
/* Flush the CPU cache if it's still invalid. */
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
 
obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
}
 
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
*/
if (write) {
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
 
 
return 0;
}
 
/**
* Moves the object from a partially CPU read to a full one.
*
* Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
* and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/
static void
i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
{
if (!obj->page_cpu_valid)
return;
 
/* If we're partially in the CPU read domain, finish moving it in.
*/
if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
}
 
/* Free the page_cpu_valid mappings which are now stale, whether
* or not we've got I915_GEM_DOMAIN_CPU.
*/
kfree(obj->page_cpu_valid);
obj->page_cpu_valid = NULL;
}
 
 
 
1841,6 → 1484,7
 
 
 
 
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
1851,6 → 1495,7
int ret;
 
BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
// WARN_ON(i915_verify_lists(dev));
 
#if 0
if (obj->gtt_space != NULL) {
1884,25 → 1529,12
}
obj->pin_mappable |= map_and_fenceable;
 
// WARN_ON(i915_verify_lists(dev));
return 0;
}
 
void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
 
BUG_ON(obj->pin_count == 0);
BUG_ON(obj->gtt_space == NULL);
 
if (--obj->pin_count == 0) {
if (!obj->active)
list_move_tail(&obj->mm_list,
&dev_priv->mm.inactive_list);
obj->pin_mappable = false;
}
}
 
 
 
1987,56 → 1619,18
return obj;
}
 
int i915_gem_init_object(struct drm_gem_object *obj)
{
BUG();
 
return 0;
}
 
static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
 
ENTER();
 
ret = i915_gem_object_unbind(obj);
if (ret == -ERESTARTSYS) {
list_move(&obj->mm_list,
&dev_priv->mm.deferred_free_list);
return;
}
 
 
// if (obj->base.map_list.map)
// drm_gem_free_mmap_offset(&obj->base);
 
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
 
kfree(obj->page_cpu_valid);
kfree(obj->bit_17);
kfree(obj);
LEAVE();
}
 
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
 
ENTER();
while (obj->pin_count > 0)
i915_gem_object_unpin(obj);
 
// if (obj->phys_obj)
// i915_gem_detach_phys_object(dev, obj);
 
i915_gem_free_object_tail(obj);
LEAVE();
}
 
 
 
2048,7 → 1642,6
 
 
 
 
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
2191,6 → 1784,9
init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
// INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
// i915_gem_retire_work_handler);
// init_completion(&dev_priv->error_completion);
 
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
2215,6 → 1811,7
}
 
i915_gem_detect_bit_6_swizzle(dev);
// init_waitqueue_head(&dev_priv->pending_flip_queue);
 
dev_priv->mm.interruptible = true;
 
/drivers/video/drm/i915/intel_display.c
2006,7 → 2006,7
return 0;
 
err_unpin:
i915_gem_object_unpin(obj);
// i915_gem_object_unpin(obj);
err_interruptible:
dev_priv->mm.interruptible = true;
return ret;
2223,7 → 2223,7
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
LEAVE_ATOMIC_MODE_SET);
if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
// i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("failed to update base address\n");
LEAVE();
3310,7 → 3310,7
 
if (crtc->fb) {
mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
// i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
}
}
6299,7 → 6299,7
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_mode_fb_cmd mode_cmd;
 
// obj = i915_gem_alloc_object(dev,
// intel_framebuffer_size_for_mode(mode, bpp));
6658,6 → 6658,8
LEAVE();
 
/* Schedule downclock */
// mod_timer(&intel_crtc->idle_timer, jiffies +
// msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
}
 
 
6890,6 → 6892,8
 
intel_crtc->busy = false;
 
// setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
// (unsigned long)intel_crtc);
}
 
 
/drivers/video/drm/i915/intel_fb.c
144,7 → 144,7
 
obj->gtt_space = &lfb_vm_node;
obj->gtt_offset = 0;
obj->pin_count = 2;
obj->pin_count = 1;
}
/***********************************************************************/
 
200,9 → 200,9
return 0;
 
out_unpin:
i915_gem_object_unpin(obj);
// i915_gem_object_unpin(obj);
out_unref:
drm_gem_object_unreference(&obj->base);
// drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
out:
return ret;
/drivers/video/drm/i915/intel_ringbuffer.c
364,9 → 364,9
return 0;
 
err_unpin:
i915_gem_object_unpin(obj);
// i915_gem_object_unpin(obj);
err_unref:
drm_gem_object_unreference(&obj->base);
// drm_gem_object_unreference(&obj->base);
err:
kfree(pc);
return ret;
383,8 → 383,8
 
obj = pc->obj;
// kunmap(obj->pages[0]);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
// i915_gem_object_unpin(obj);
// drm_gem_object_unreference(&obj->base);
 
kfree(pc);
ring->private = NULL;
948,8 → 948,8
return;
 
kunmap(obj->pages[0]);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
// i915_gem_object_unpin(obj);
// drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
 
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
992,9 → 992,9
return 0;
 
err_unpin:
i915_gem_object_unpin(obj);
// i915_gem_object_unpin(obj);
err_unref:
drm_gem_object_unreference(&obj->base);
// drm_gem_object_unreference(&obj->base);
err:
return ret;
}
1065,11 → 1065,12
return 0;
 
err_unmap:
// drm_core_ioremapfree(&ring->map, dev);
FreeKernelSpace(ring->virtual_start);
err_unpin:
i915_gem_object_unpin(obj);
// i915_gem_object_unpin(obj);
err_unref:
drm_gem_object_unreference(&obj->base);
// drm_gem_object_unreference(&obj->base);
ring->obj = NULL;
err_hws:
// cleanup_status_page(ring);
1095,8 → 1096,8
 
// drm_core_ioremapfree(&ring->map, ring->dev);
 
i915_gem_object_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
// i915_gem_object_unpin(ring->obj);
// drm_gem_object_unreference(&ring->obj->base);
ring->obj = NULL;
 
if (ring->cleanup)
1155,7 → 1156,7
};
}
 
 
// trace_i915_ring_wait_begin(ring);
end = jiffies + 3 * HZ;
do {
ring->head = I915_READ_HEAD(ring);
1173,6 → 1174,7
return -EAGAIN;
};
} while (!time_after(jiffies, end));
// trace_i915_ring_wait_end(ring);
LEAVE();
 
return -EBUSY;
1409,26 → 1411,21
 
ret = i915_gem_object_pin(obj, 4096, true);
if (ret) {
drm_gem_object_unreference(&obj->base);
// drm_gem_object_unreference(&obj->base);
return ret;
}
 
ptr = MapIoMem(obj->pages[0], 4096, PG_SW);
obj->mapped = ptr;
 
ptr = ioremap(obj->pages[0], 4096);
*ptr++ = MI_BATCH_BUFFER_END;
*ptr++ = MI_NOOP;
// iounmap(obj->pages[0]);
 
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret) {
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
FreeKernelSpace(ptr);
obj->mapped = NULL;
// i915_gem_object_unpin(obj);
// drm_gem_object_unreference(&obj->base);
return ret;
}
FreeKernelSpace(ptr);
obj->mapped = NULL;
 
ring->private = obj;
}
/drivers/video/drm/i915/intel_sprite.c
501,7 → 501,7
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
mutex_lock(&dev->struct_mutex);
}
i915_gem_object_unpin(old_obj);
// i915_gem_object_unpin(old_obj);
}
 
out_unlock:
528,7 → 528,7
goto out;
 
mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin(intel_plane->obj);
// i915_gem_object_unpin(intel_plane->obj);
intel_plane->obj = NULL;
mutex_unlock(&dev->struct_mutex);
out:
/drivers/video/drm/i915/kms_display.c
428,7 → 428,7
 
ret = i915_gem_object_pin(obj, CURSOR_WIDTH*CURSOR_HEIGHT*4, true);
if (ret) {
drm_gem_object_unreference(&obj->base);
// drm_gem_object_unreference(&obj->base);
return ret;
}
 
440,8 → 440,8
 
if (unlikely(bits == NULL))
{
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
// i915_gem_object_unpin(obj);
// drm_gem_object_unreference(&obj->base);
return -ENOMEM;
};
cursor->cobj = obj;
807,7 → 807,7
if( n & 1)
b[n++] = MI_NOOP;
 
i915_gem_object_set_to_gtt_domain(bitmap->obj, false);
// i915_gem_object_set_to_gtt_domain(obj, false);
 
if (HAS_BLT(main_device))
ring = &dev_priv->ring[BCS];
816,12 → 816,10
 
ring->dispatch_execbuffer(ring, cmd_offset, n*4);
 
int ret;
intel_ring_begin(ring, 4);
// if (ret)
// return ret;
 
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
 
intel_ring_emit(ring, MI_FLUSH_DW);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
/drivers/video/drm/i915/main.c
14,8 → 14,6
 
#include "bitmap.h"
 
void cpu_detect();
 
void parse_cmdline(char *cmdline, char *log);
int _stdcall display_handler(ioctl_t *io);
int init_agp(void);
26,8 → 24,6
 
static char log[256];
 
int x86_clflush_size;
 
int i915_modeset = 1;
 
u32_t drvEntry(int action, char *cmdline)
75,18 → 71,13
return err;
};
 
#define CURRENT_API 0x0200 /* 2.00 */
#define COMPATIBLE_API 0x0100 /* 1.00 */
#define API_VERSION 0x01000100
 
#define API_VERSION (COMPATIBLE_API << 16) | CURRENT_API
#define DISPLAY_VERSION CURRENT_API
 
 
#define SRV_GETVERSION 0
#define SRV_ENUM_MODES 1
#define SRV_SET_MODE 2
 
#define SRV_CREATE_SURFACE 10
#define SRV_CREATE_BITMAP 10
 
#define SRV_BLIT_VIDEO 20
 
111,7 → 102,7
{
case SRV_GETVERSION:
check_output(4);
*outp = DISPLAY_VERSION;
*outp = API_VERSION;
retval = 0;
break;
 
132,9 → 123,9
retval = set_user_mode((videomode_t*)inp);
break;
 
case SRV_CREATE_SURFACE:
// check_input(8);
retval = create_surface((struct io_call_10*)inp);
case SRV_CREATE_BITMAP:
check_input(5);
retval = create_bitmap((struct ubitmap*)inp);
break;
 
 
211,37 → 202,3
};
};
 
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
asm volatile(
"cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "" (*eax), "2" (*ecx));
}
 
static inline void cpuid(unsigned int op,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
*eax = op;
*ecx = 0;
__cpuid(eax, ebx, ecx, edx);
}
 
void cpu_detect()
{
u32 junk, tfms, cap0, misc;
 
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 
if (cap0 & (1<<19))
{
x86_clflush_size = ((misc >> 8) & 0xff) * 8;
}
}
 
/drivers/video/drm/i915/Gtt/intel-gtt.c
600,7 → 600,6
}
readl(intel_private.gtt+j-1);
}
EXPORT_SYMBOL(intel_gtt_insert_pages);
 
 
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
/drivers/video/drm/i915/i915_gem_gtt.c
53,29 → 53,8
}
}
 
static bool do_idling(struct drm_i915_private *dev_priv)
{
bool ret = dev_priv->mm.interruptible;
#if 0
 
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
}
}
 
return ret;
}
 
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
if (unlikely(dev_priv->mm.gtt->do_idle_maps))
dev_priv->mm.interruptible = interruptible;
}
 
#if 0
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
144,23 → 123,15
agp_type);
}
 
#endif
 
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible;
 
interruptible = do_idling(dev_priv);
 
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
 
if (obj->sg_list) {
// intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
}
}
 
undo_idling(dev_priv, interruptible);
}
#endif