Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 2352 → Rev 2360

/drivers/video/drm/i915/bitmap.c
325,8 → 325,6
int ret = 0;
ENTER();
 
dbgprintf("caps ptr %x\n", caps);
 
switch(caps->idx)
{
case 0:
/drivers/video/drm/i915/i915_dma.c
508,16 → 508,15
* so there is no point in running more than one instance of the
* workqueue at any time: max_active = 1 and NON_REENTRANT.
*/
dev_priv->wq = alloc_workqueue("i915",
WQ_UNBOUND | WQ_NON_REENTRANT,
1);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
goto out_mtrrfree;
}
 
// dev_priv->wq = alloc_workqueue("i915",
// WQ_UNBOUND | WQ_NON_REENTRANT,
// 1);
// if (dev_priv->wq == NULL) {
// DRM_ERROR("Failed to create our workqueue.\n");
// ret = -ENOMEM;
// goto out_mtrrfree;
// }
 
/* enable GEM by default */
dev_priv->has_gem = 1;
 
/drivers/video/drm/i915/i915_drv.h
40,6 → 40,7
 
#include <linux/spinlock.h>
 
 
/* General customization:
*/
 
329,7 → 330,7
u32 pch_irq_mask;
 
u32 hotplug_supported_mask;
// struct work_struct hotplug_work;
struct work_struct hotplug_work;
 
int tex_lru_log_granularity;
int allow_batchbuffer;
398,9 → 399,9
 
spinlock_t error_lock;
// struct drm_i915_error_state *first_error;
// struct work_struct error_work;
struct work_struct error_work;
// struct completion error_completion;
// struct workqueue_struct *wq;
struct workqueue_struct *wq;
 
/* Display functions */
struct drm_i915_display_funcs display;
642,7 → 643,7
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
// struct delayed_work retire_work;
struct delayed_work retire_work;
 
/**
* Are we in a non-interruptible section of code like
699,7 → 700,7
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
// struct work_struct idle_work;
struct work_struct idle_work;
struct timer_list idle_timer;
bool busy;
u16 orig_clock;
710,7 → 711,7
 
bool mchbar_need_disable;
 
// struct work_struct rps_work;
struct work_struct rps_work;
spinlock_t rps_lock;
u32 pm_iir;
 
1416,4 → 1417,18
int freq;
}videomode_t;
 
 
static inline int mutex_trylock(struct mutex *lock)
{
if (likely(atomic_cmpxchg(&lock->count, 1, 0) == 1))
return 1;
return 0;
}
 
 
 
 
 
 
 
#endif
/drivers/video/drm/i915/i915_gem.c
960,16 → 960,16
 
ring->outstanding_lazy_request = false;
 
// if (!dev_priv->mm.suspended) {
// if (i915_enable_hangcheck) {
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
// mod_timer(&dev_priv->hangcheck_timer,
// jiffies +
// msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
// }
// if (was_empty)
// queue_delayed_work(dev_priv->wq,
// &dev_priv->mm.retire_work, HZ);
// }
}
if (was_empty)
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
}
return 0;
}
 
984,6 → 984,16
 
 
 
 
 
 
 
 
 
 
 
 
 
/**
* This function clears the request list as sequence numbers are passed.
*/
1072,15 → 1082,58
i915_gem_retire_requests_ring(&dev_priv->ring[i]);
}
 
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
bool idle;
int i;
 
// ENTER();
 
dev_priv = container_of(work, drm_i915_private_t,
mm.retire_work.work);
dev = dev_priv->dev;
 
/* Come back later if the device is busy... */
if (!mutex_trylock(&dev->struct_mutex)) {
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
// LEAVE();
return;
}
 
i915_gem_retire_requests(dev);
 
/* Send a periodic flush down the ring so we don't hold onto GEM
* objects indefinitely.
*/
idle = true;
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
 
if (!list_empty(&ring->gpu_write_list)) {
struct drm_i915_gem_request *request;
int ret;
 
ret = i915_gem_flush_ring(ring,
0, I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (ret || request == NULL ||
i915_add_request(ring, NULL, request))
kfree(request);
}
 
idle &= list_empty(&ring->request_list);
}
 
if (!dev_priv->mm.suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
 
mutex_unlock(&dev->struct_mutex);
// LEAVE();
}
 
/**
* Waits for a sequence number to be signaled, and cleans up the
* request and object lists appropriately for that event.
1326,7 → 1379,7
return ret;
}
 
return 0; //i915_wait_request(ring, i915_gem_next_request_seqno(ring));
return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
 
int
1923,9 → 1976,9
* of uncaching, which would allow us to flush all the LLC-cached data
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
// ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
// if (ret)
// return ret;
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
if (ret)
return ret;
 
/* As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
2123,6 → 2176,7
}
obj->pin_mappable |= map_and_fenceable;
 
WARN_ON(i915_verify_lists(dev));
return 0;
}
 
2132,6 → 2186,7
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
 
WARN_ON(i915_verify_lists(dev));
BUG_ON(obj->pin_count == 0);
BUG_ON(obj->gtt_space == NULL);
 
2141,6 → 2196,7
&dev_priv->mm.inactive_list);
obj->pin_mappable = false;
}
WARN_ON(i915_verify_lists(dev));
}
 
 
2424,6 → 2480,8
init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
 
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
2457,4 → 2515,3
}
 
 
 
/drivers/video/drm/i915/i915_gem_gtt.c
143,7 → 143,6
agp_type);
}
 
 
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
/drivers/video/drm/i915/intel_bios.c
656,7 → 656,7
size_t size;
int i;
 
bios = (void*)pci_map_rom(pdev, &size);
bios = pci_map_rom(pdev, &size);
if (!bios)
return -1;
 
/drivers/video/drm/i915/intel_display.c
1859,7 → 1859,7
if (enable_fbc < 0) {
DRM_DEBUG_KMS("fbc set to per-chip default\n");
enable_fbc = 1;
if (INTEL_INFO(dev)->gen <= 5)
if (INTEL_INFO(dev)->gen <= 6)
enable_fbc = 0;
}
if (!enable_fbc) {
2171,18 → 2171,12
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ENTER();
 
ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret)
{
LEAVE();
return ret;
};
 
intel_update_fbc(dev);
intel_increase_pllclock(crtc);
LEAVE();
 
return 0;
}
2235,34 → 2229,9
LEAVE();
return 0;
 
#if 0
if (!dev->primary->master)
{
LEAVE();
return 0;
};
 
master_priv = dev->primary->master->driver_priv;
if (!master_priv->sarea_priv)
{
LEAVE();
return 0;
};
 
if (intel_crtc->pipe) {
master_priv->sarea_priv->pipeB_x = x;
master_priv->sarea_priv->pipeB_y = y;
} else {
master_priv->sarea_priv->pipeA_x = x;
master_priv->sarea_priv->pipeA_y = y;
}
LEAVE();
 
return 0;
#endif
 
}
 
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
2835,8 → 2804,8
 
obj = to_intel_framebuffer(crtc->fb)->obj;
dev_priv = crtc->dev->dev_private;
// wait_event(dev_priv->pending_flip_queue,
// atomic_read(&obj->pending_flip) == 0);
wait_event(dev_priv->pending_flip_queue,
atomic_read(&obj->pending_flip) == 0);
}
 
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
5292,6 → 5261,7
}
}
 
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
5302,7 → 5272,7
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
pipeconf |= PIPECONF_PROGRESSIVE;
 
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
5889,6 → 5859,7
}
}
 
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
5899,7 → 5870,7
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
pipeconf |= PIPECONF_PROGRESSIVE;
 
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
7043,10 → 7014,6
 
 
 
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = NULL /*intel_user_framebuffer_create*/,
.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
};
 
 
 
7100,6 → 7067,10
}
 
 
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = NULL /*intel_user_framebuffer_create*/,
.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
};
 
 
 
7109,7 → 7080,6
 
 
 
 
bool ironlake_set_drps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/drivers/video/drm/i915/intel_drv.h
276,7 → 276,7
}
 
struct intel_unpin_work {
// struct work_struct work;
struct work_struct work;
struct drm_device *dev;
struct drm_i915_gem_object *old_fb_obj;
struct drm_i915_gem_object *pending_flip_obj;
286,7 → 286,7
};
 
struct intel_fbc_work {
// struct delayed_work work;
struct delayed_work work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
int interval;
/drivers/video/drm/i915/kms_display.c
1007,3 → 1007,120
 
};
 
 
void __stdcall run_workqueue(struct workqueue_struct *cwq)
{
unsigned long irqflags;
 
// dbgprintf("wq: %x head %x, next %x\n",
// cwq, &cwq->worklist, cwq->worklist.next);
 
spin_lock_irqsave(&cwq->lock, irqflags);
 
while (!list_empty(&cwq->worklist))
{
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
work_func_t f = work->func;
list_del_init(cwq->worklist.next);
// dbgprintf("head %x, next %x\n",
// &cwq->worklist, cwq->worklist.next);
 
spin_unlock_irqrestore(&cwq->lock, irqflags);
f(work);
spin_lock_irqsave(&cwq->lock, irqflags);
}
 
spin_unlock_irqrestore(&cwq->lock, irqflags);
}
 
 
static inline
int __queue_work(struct workqueue_struct *wq,
struct work_struct *work)
{
unsigned long flags;
// ENTER();
 
// dbgprintf("wq: %x, work: %x\n",
// wq, work );
 
if(!list_empty(&work->entry))
return 0;
 
spin_lock_irqsave(&wq->lock, flags);
 
if(list_empty(&wq->worklist))
TimerHs(0,0, run_workqueue, wq);
 
list_add_tail(&work->entry, &wq->worklist);
 
spin_unlock_irqrestore(&wq->lock, flags);
// dbgprintf("wq: %x head %x, next %x\n",
// wq, &wq->worklist, wq->worklist.next);
 
// LEAVE();
return 1;
};
 
void __stdcall delayed_work_timer_fn(unsigned long __data)
{
// ENTER();
struct delayed_work *dwork = (struct delayed_work *)__data;
struct workqueue_struct *wq = dwork->work.data;
 
// dbgprintf("wq: %x, work: %x\n",
// wq, &dwork->work );
 
__queue_work(wq, &dwork->work);
// LEAVE();
}
 
 
int queue_delayed_work_on(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
struct work_struct *work = &dwork->work;
 
work->data = wq;
TimerHs(0,0, delayed_work_timer_fn, dwork);
return 1;
}
 
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
u32 flags;
// ENTER();
 
// dbgprintf("wq: %x, work: %x\n",
// wq, &dwork->work );
 
if (delay == 0)
return __queue_work(wq, &dwork->work);
 
return queue_delayed_work_on(wq, dwork, delay);
}
 
 
struct workqueue_struct *alloc_workqueue(const char *fmt,
unsigned int flags,
int max_active)
{
struct workqueue_struct *wq;
 
wq = kzalloc(sizeof(*wq),0);
if (!wq)
goto err;
 
INIT_LIST_HEAD(&wq->worklist);
 
return wq;
err:
return NULL;
}
 
 
 
 
 
/drivers/video/drm/i915/render/exa_wm_mask_affine.g6b
0,0 → 1,4
{ 0x0060005a, 0x210077be, 0x00000100, 0x008d0040 },
{ 0x0060005a, 0x212077be, 0x00000100, 0x008d0080 },
{ 0x0060005a, 0x214077be, 0x00000110, 0x008d0040 },
{ 0x0060005a, 0x216077be, 0x00000110, 0x008d0080 },
/drivers/video/drm/i915/render/exa_wm_mask_sample_a.g6b
0,0 → 1,3
{ 0x00000201, 0x20080061, 0x00000000, 0x00007000 },
{ 0x00600001, 0x20e00022, 0x008d0000, 0x00000000 },
{ 0x02800031, 0x23801cc9, 0x000000e0, 0x0a2a0102 },
/drivers/video/drm/i915/render/exa_wm_noca.g6b
0,0 → 1,4
{ 0x00800041, 0x21c077bd, 0x008d01c0, 0x008d0380 },
{ 0x00800041, 0x220077bd, 0x008d0200, 0x008d0380 },
{ 0x00800041, 0x224077bd, 0x008d0240, 0x008d0380 },
{ 0x00800041, 0x228077bd, 0x008d0280, 0x008d0380 },
/drivers/video/drm/i915/sna/gen6_render.c
72,9 → 72,12
#include "exa_wm_write.g6b"
};
 
static const uint32_t ps_kernel_nomask_projective[][4] = {
#include "exa_wm_src_projective.g6b"
static const uint32_t ps_kernel_masknoca_affine[][4] = {
#include "exa_wm_src_affine.g6b"
#include "exa_wm_src_sample_argb.g6b"
#include "exa_wm_mask_affine.g6b"
#include "exa_wm_mask_sample_a.g6b"
#include "exa_wm_noca.g6b"
#include "exa_wm_write.g6b"
};
 
88,8 → 91,7
Bool has_mask;
} wm_kernels[] = {
KERNEL(NOMASK, ps_kernel_nomask_affine, FALSE),
KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, FALSE),
 
KERNEL(MASK, ps_kernel_masknoca_affine, TRUE),
};
#undef KERNEL
 
659,11 → 661,6
 
OUT_BATCH(GEN6_PIPE_CONTROL | (4 - 2));
OUT_BATCH(GEN6_PIPE_CONTROL_WRITE_TIME);
// OUT_BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch,
// sna->render_state.gen6.general_bo,
// I915_GEM_DOMAIN_INSTRUCTION << 16 |
// I915_GEM_DOMAIN_INSTRUCTION,
// 64));
 
OUT_BATCH(sna->render_state.gen6.general_bo->gaddr+64);
 
1618,7 → 1615,6
bool dirty;
 
gen6_get_batch(sna);
// dirty = kgem_bo_is_dirty(op->dst.bo);
 
binding_table = gen6_composite_get_binding_table(sna, &offset);
 
1713,9 → 1709,6
op.src.width = src->width;
op.src.height = src->height;
 
// src_scale_x = ((float)src_w / frame->width) / (float)drw_w;
// src_scale_y = ((float)src_h / frame->height) / (float)drw_h;
 
op.src.scale[0] = 1.f/w; //src->width;
op.src.scale[1] = 1.f/h; //src->height;
op.src.filter = SAMPLER_FILTER_BILINEAR;
/drivers/video/drm/i915/sna/sna_render.h
243,7 → 243,7
 
enum {
GEN6_WM_KERNEL_NOMASK = 0,
GEN6_WM_KERNEL_NOMASK_PROJECTIVE,
GEN6_WM_KERNEL_MASK,
 
GEN6_KERNEL_COUNT
};
277,25 → 277,8
Bool needs_invariant;
};
 
enum {
GEN7_WM_KERNEL_NOMASK = 0,
GEN7_WM_KERNEL_NOMASK_PROJECTIVE,
 
GEN7_WM_KERNEL_MASK,
GEN7_WM_KERNEL_MASK_PROJECTIVE,
 
GEN7_WM_KERNEL_MASKCA,
GEN7_WM_KERNEL_MASKCA_PROJECTIVE,
 
GEN7_WM_KERNEL_MASKCA_SRCALPHA,
GEN7_WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
 
GEN7_WM_KERNEL_VIDEO_PLANAR,
GEN7_WM_KERNEL_VIDEO_PACKED,
GEN7_KERNEL_COUNT
};
 
 
struct sna_static_stream {
uint32_t size, used;
uint8_t *data;