Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 3263 → Rev 3266

/drivers/video/Intel-2D/kgem-sna.c
3776,9 → 3776,18
 
assert((read_write_domain & 0x7fff) == 0 || bo != NULL);
 
// if( bo != NULL && bo->handle == -1)
// return 0;
if( bo != NULL && bo->handle == -2)
{
if (bo->exec == NULL)
kgem_add_bo(kgem, bo);
if (read_write_domain & 0x7fff && !bo->dirty) {
assert(!bo->snoop || kgem->can_blt_cpu);
__kgem_bo_mark_dirty(bo);
}
return 0;
};
index = kgem->nreloc++;
assert(index < ARRAY_SIZE(kgem->reloc));
kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]);
/drivers/video/Intel-2D/sna.c
32,6 → 32,14
return retval;
};
 
static inline void get_proc_info(char *info)
{
__asm__ __volatile__(
"int $0x40"
:
:"a"(9), "b"(info), "c"(-1));
}
 
const struct intel_device_info *
intel_detect_chipset(struct pci_device *pci);
 
336,6 → 344,14
struct _Pixmap src, dst;
struct kgem_bo *src_bo;
 
char proc_info[1024];
int winx, winy;
 
get_proc_info(proc_info);
 
winx = *(uint32_t*)(proc_info+34);
winy = *(uint32_t*)(proc_info+38);
memset(&src, 0, sizeof(src));
memset(&dst, 0, sizeof(dst));
 
355,7 → 371,7
&src, src_bo,
&dst, sna_fb.fb_bo, &copy) )
{
copy.blt(sna_device, &copy, src_x, src_y, w, h, dst_x, dst_y);
copy.blt(sna_device, &copy, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
copy.done(sna_device, &copy);
}
 
390,7 → 406,21
err_1:
return -1;
};
 
void sna_lock_bitmap(bitmap_t *bitmap)
{
struct kgem_bo *bo;
bo = (struct kgem_bo *)bitmap->handle;
kgem_bo_sync__cpu(&sna_device->kgem, bo);
 
};
 
 
 
/*
 
int sna_blit_tex(bitmap_t *dst_bitmap, int dst_x, int dst_y,
/drivers/video/drm/i915/i915.lds
33,6 → 33,7
*(.debug$F)
*(.drectve)
*(.edata)
*(.eh_frame)
}
 
.idata ALIGN(__section_alignment__):
/drivers/video/drm/i915/i915_gem.c
48,6 → 48,8
#define rmb() asm volatile ("lfence")
#define wmb() asm volatile ("sfence")
 
struct drm_i915_gem_object *get_fb_obj();
 
unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset);
1051,11 → 1053,10
wait_forever = false;
}
 
// timeout_jiffies = timespec_to_jiffies(&wait_time);
timeout_jiffies = timespec_to_jiffies(&wait_time);
 
if (WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
#if 0
 
/* Record current time in case interrupted by signal, or wedged * */
getrawmonotonic(&before);
1064,6 → 1065,11
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
atomic_read(&dev_priv->mm.wedged))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
EXIT_COND,
timeout_jiffies);
else
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
 
1089,24 → 1095,13
case -ERESTARTSYS: /* Signal */
return (int)end;
case 0: /* Timeout */
// if (timeout)
// set_normalized_timespec(timeout, 0, 0);
if (timeout)
set_normalized_timespec(timeout, 0, 0);
return -ETIME;
default: /* Completed */
WARN_ON(end < 0); /* We're not aware of other errors */
return 0;
}
 
#endif
 
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
atomic_read(&dev_priv->mm.wedged))
wait_event(ring->irq_queue, EXIT_COND);
#undef EXIT_COND
ring->irq_put(ring);
 
return 0;
}
 
/**
1917,8 → 1912,6
{
uint32_t seqno;
 
ENTER();
 
if (list_empty(&ring->request_list))
return;
 
1972,7 → 1965,6
}
 
WARN_ON(i915_verify_lists(ring->dev));
LEAVE();
}
 
void
1995,8 → 1987,6
bool idle;
int i;
 
ENTER();
 
dev_priv = container_of(work, drm_i915_private_t,
mm.retire_work.work);
dev = dev_priv->dev;
2026,8 → 2016,6
intel_mark_idle(dev);
 
mutex_unlock(&dev->struct_mutex);
 
LEAVE();
}
 
/**
/drivers/video/drm/i915/i915_gem_execbuffer.c
267,7 → 267,7
struct eb_objects *eb)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)];
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int remain, ret;
367,6 → 367,8
bool need_fence, need_mappable;
int ret;
 
// ENTER();
 
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
375,7 → 377,10
 
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
if (ret)
{
FAIL();
return ret;
};
 
entry->flags |= __EXEC_OBJECT_HAS_PIN;
 
383,7 → 388,10
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
ret = i915_gem_object_get_fence(obj);
if (ret)
{
FAIL();
return ret;
};
 
if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
401,6 → 409,8
}
 
entry->offset = obj->gtt_offset;
// LEAVE();
 
return 0;
}
 
433,6 → 443,8
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
 
// ENTER();
 
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry;
514,7 → 526,10
i915_gem_execbuffer_unreserve_object(obj);
 
if (ret != -ENOSPC || retry++)
{
// LEAVE();
return ret;
};
 
// ret = i915_gem_evict_everything(ring->dev);
if (ret)
554,8 → 569,8
reloc_offset = malloc(count * sizeof(*reloc_offset));
reloc = malloc(total * sizeof(*reloc));
if (reloc == NULL || reloc_offset == NULL) {
free(reloc);
free(reloc_offset);
kfree(reloc);
kfree(reloc_offset);
mutex_lock(&dev->struct_mutex);
return -ENOMEM;
}
609,7 → 624,10
for (i = 0; i < count; i++) {
 
if(exec[i].handle == -2)
{
obj = get_fb_obj();
drm_gem_object_reference(&obj->base);
}
else
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
645,8 → 663,8
*/
 
err:
free(reloc);
free(reloc_offset);
kfree(reloc);
kfree(reloc_offset);
return ret;
}
 
843,12 → 861,16
 
if (!i915_gem_check_execbuffer(args)) {
DRM_DEBUG("execbuf with invalid offset/length\n");
FAIL();
return -EINVAL;
}
 
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
{
FAIL();
return ret;
};
 
flags = 0;
if (args->flags & I915_EXEC_SECURE) {
870,6 → 892,7
if (ctx_id != 0) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
FAIL();
return -EPERM;
}
break;
978,10 → 1001,16
struct drm_i915_gem_object *obj;
 
if(exec[i].handle == -2)
{
obj = get_fb_obj();
drm_gem_object_reference(&obj->base);
}
else
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
 
// printf("%s object %p handle %d\n", __FUNCTION__, obj, exec[i].handle);
 
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
1094,11 → 1123,11
goto err;
}
 
// i915_gem_execbuffer_move_to_active(&objects, ring);
// i915_gem_execbuffer_retire_commands(dev, file, ring);
ring->gpu_caches_dirty = true;
intel_ring_flush_all_caches(ring);
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 
i915_gem_execbuffer_move_to_active(&objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
 
err:
eb_destroy(eb);
while (!list_empty(&objects)) {
1115,10 → 1144,11
 
pre_mutex_err:
kfree(cliprects);
 
 
return ret;
}
 
 
int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
1127,18 → 1157,24
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
 
// ENTER();
 
if (args->buffer_count < 1 ||
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
FAIL();
return -EINVAL;
}
 
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, 0);
if (exec2_list == NULL)
exec2_list = malloc(sizeof(*exec2_list) * args->buffer_count);
 
// if (exec2_list == NULL)
// exec2_list = drm_malloc_ab(sizeof(*exec2_list),
// args->buffer_count);
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
FAIL();
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
1148,7 → 1184,8
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
free(exec2_list);
kfree(exec2_list);
FAIL();
return -EFAULT;
}
 
1166,6 → 1203,9
}
}
 
free(exec2_list);
kfree(exec2_list);
 
// LEAVE();
 
return ret;
}
/drivers/video/drm/i915/i915_irq.c
357,7 → 357,7
struct drm_i915_private *dev_priv,
u32 gt_iir)
{
printf("%s\n", __FUNCTION__);
// printf("%s\n", __FUNCTION__);
 
if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
413,8 → 413,6
u32 pipe_stats[I915_MAX_PIPES];
bool blc_event;
 
printf("%s\n", __FUNCTION__);
 
atomic_inc(&dev_priv->irq_received);
 
while (true) {
566,8 → 564,6
irqreturn_t ret = IRQ_NONE;
int i;
 
printf("%s\n", __FUNCTION__);
 
atomic_inc(&dev_priv->irq_received);
 
/* disable master interrupt before clearing iir */
643,8 → 639,6
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
 
printf("%s\n", __FUNCTION__);
 
atomic_inc(&dev_priv->irq_received);
 
/* disable master interrupt before clearing iir */
2488,7 → 2482,7
irqreturn_t intel_irq_handler(struct drm_device *dev)
{
 
printf("i915 irq\n");
// printf("i915 irq\n");
 
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
 
/drivers/video/drm/i915/intel_display.c
7004,8 → 7004,6
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
 
ENTER();
 
if (!i915_powersave)
return;
 
/drivers/video/drm/i915/kms_display.c
1341,8 → 1341,8
{
unsigned long irqflags;
 
dbgprintf("wq: %x head %x, next %x\n",
cwq, &cwq->worklist, cwq->worklist.next);
// dbgprintf("wq: %x head %x, next %x\n",
// cwq, &cwq->worklist, cwq->worklist.next);
 
spin_lock_irqsave(&cwq->lock, irqflags);
 
1352,8 → 1352,8
struct work_struct, entry);
work_func_t f = work->func;
list_del_init(cwq->worklist.next);
dbgprintf("head %x, next %x\n",
&cwq->worklist, cwq->worklist.next);
// dbgprintf("head %x, next %x\n",
// &cwq->worklist, cwq->worklist.next);
 
spin_unlock_irqrestore(&cwq->lock, irqflags);
f(work);
1370,8 → 1370,8
{
unsigned long flags;
 
dbgprintf("wq: %x, work: %x\n",
wq, work );
// dbgprintf("wq: %x, work: %x\n",
// wq, work );
 
if(!list_empty(&work->entry))
return 0;
1384,8 → 1384,8
list_add_tail(&work->entry, &wq->worklist);
 
spin_unlock_irqrestore(&wq->lock, flags);
dbgprintf("wq: %x head %x, next %x\n",
wq, &wq->worklist, wq->worklist.next);
// dbgprintf("wq: %x head %x, next %x\n",
// wq, &wq->worklist, wq->worklist.next);
 
return 1;
};
1395,8 → 1395,8
struct delayed_work *dwork = (struct delayed_work *)__data;
struct workqueue_struct *wq = dwork->work.data;
 
dbgprintf("wq: %x, work: %x\n",
wq, &dwork->work );
// dbgprintf("wq: %x, work: %x\n",
// wq, &dwork->work );
 
__queue_work(wq, &dwork->work);
}
1417,8 → 1417,8
{
u32 flags;
 
dbgprintf("wq: %x, work: %x\n",
wq, &dwork->work );
// dbgprintf("wq: %x, work: %x\n",
// wq, &dwork->work );
 
if (delay == 0)
return __queue_work(wq, &dwork->work);