Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4538 → Rev 4539

/drivers/video/drm/drm_edid.c
68,6 → 68,8
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
/* Force 8bpc */
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
 
struct detailed_mode_closure {
struct drm_connector *connector;
128,6 → 130,9
 
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
 
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
};
 
/*
3236,6 → 3241,9
 
drm_add_display_info(edid, &connector->display_info);
 
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
 
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);
/drivers/video/drm/drm_gem.c
254,9 → 254,6
* we may want to use ida for number allocation and a hash table
* for the pointers, anyway.
*/
if(handle == -2)
printf("%s handle %d\n", __FUNCTION__, handle);
 
spin_lock(&filp->table_lock);
 
/* Check if we currently have a reference on the object */
267,8 → 264,6
}
dev = obj->dev;
 
// printf("%s handle %d obj %p\n", __FUNCTION__, handle, obj);
 
/* Release reference and decrement refcount. */
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
286,6 → 281,12
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
/**
* drm_gem_handle_create_tail - internal functions to create a handle
*
* This expects the dev->object_name_lock to be held already and will drop it
* before returning. Used to avoid races in establishing new handles when
* importing an object from either an flink name or a dma-buf.
*/
int
drm_gem_handle_create_tail(struct drm_file *file_priv,
436,9 → 437,6
{
struct drm_gem_object *obj;
 
if(handle == -2)
printf("%s handle %d\n", __FUNCTION__, handle);
 
spin_lock(&filp->table_lock);
 
/* Check if we currently have a reference on the object */
539,9 → 537,6
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
 
if(handle == -2)
printf("%s handle %d\n", __FUNCTION__, handle);
 
mutex_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
if (obj) {
/drivers/video/drm/i915/i915_dma.c
1360,6 → 1360,11
 
aperture_size = dev_priv->gtt.mappable_end;
 
dev_priv->gtt.mappable = AllocKernelSpace(8192);
if (dev_priv->gtt.mappable == NULL) {
ret = -EIO;
goto out_rmmap;
}
 
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
/drivers/video/drm/i915/i915_drv.h
534,7 → 534,7
size_t stolen_size; /* Total size of stolen memory */
 
unsigned long mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
void *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
 
/** "Graphics Stolen Memory" holds the global PTEs */
/drivers/video/drm/i915/i915_gem.c
633,7 → 633,6
loff_t offset, page_base;
char __user *user_data;
int page_offset, page_length, ret;
char *vaddr;
 
ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
if (ret)
647,14 → 646,7
if (ret)
goto out_unpin;
 
vaddr = AllocKernelSpace(4096);
if(vaddr == NULL)
{
ret = -ENOSPC;
goto out_unpin;
};
 
user_data = (char __user *) (uintptr_t) args->data_ptr;
user_data = to_user_ptr(args->data_ptr);
remain = args->size;
 
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
672,9 → 664,9
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
 
MapPage(vaddr, dev_priv->gtt.mappable_base+page_base, PG_SW|PG_NOCACHE);
MapPage(dev_priv->gtt.mappable, dev_priv->gtt.mappable_base+page_base, PG_SW);
 
memcpy(vaddr+page_offset, user_data, page_length);
memcpy(dev_priv->gtt.mappable+page_offset, user_data, page_length);
 
remain -= page_length;
user_data += page_length;
681,8 → 673,6
offset += page_length;
}
 
FreeKernelSpace(vaddr);
 
out_unpin:
i915_gem_object_unpin(obj);
out:
706,7 → 696,7
if (unlikely(page_do_bit17_swizzling))
return -EINVAL;
 
vaddr = (char *)MapIoMem((addr_t)page, 4096, PG_SW|PG_NOCACHE);
vaddr = (char *)MapIoMem((addr_t)page, 4096, PG_SW);
if (needs_clflush_before)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
2082,15 → 2072,24
kfree(request);
}
 
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
u32 completed_seqno;
u32 acthd;
u32 completed_seqno = ring->get_seqno(ring, false);
u32 acthd = intel_ring_get_active_head(ring);
struct drm_i915_gem_request *request;
 
acthd = intel_ring_get_active_head(ring);
completed_seqno = ring->get_seqno(ring, false);
list_for_each_entry(request, &ring->request_list, list) {
if (i915_seqno_passed(completed_seqno, request->seqno))
continue;
 
i915_set_reset_status(ring, request, acthd);
}
}
 
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
 
2098,9 → 2097,6
struct drm_i915_gem_request,
list);
 
if (request->seqno > completed_seqno)
i915_set_reset_status(ring, request, acthd);
 
i915_gem_free_request(request);
}
 
2142,9 → 2138,17
struct intel_ring_buffer *ring;
int i;
 
/*
* Before we free the objects from the requests, we need to inspect
* them for finding the guilty party. As the requests only borrow
* their reference to the objects, the inspection must be done first.
*/
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring);
i915_gem_reset_ring_status(dev_priv, ring);
 
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_cleanup(dev_priv, ring);
 
i915_gem_restore_fences(dev);
}
 
/drivers/video/drm/i915/i915_gem_context.c
328,10 → 328,8
{
struct drm_i915_file_private *file_priv = file->driver_priv;
 
mutex_lock(&dev->struct_mutex);
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
mutex_unlock(&dev->struct_mutex);
}
 
static struct i915_hw_context *
404,11 → 402,21
if (ret)
return ret;
 
/* Clear this page out of any CPU caches for coherent swap-in/out. Note
/*
* Pin can switch back to the default context if we end up calling into
* evict_everything - as a last ditch gtt defrag effort that also
* switches to the default context. Hence we need to reload from here.
*/
from = ring->last_context;
 
/*
* Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
* write domains when putting a context object onto the active list
* (when switching away from it), this won't block.
* XXX: We need a real interface to do this instead of trickery. */
*
* XXX: We need a real interface to do this instead of trickery.
*/
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
if (ret) {
i915_gem_object_unpin(to->obj);
/drivers/video/drm/i915/i915_gem_execbuffer.c
190,6 → 190,8
relocate_entry_cpu(struct drm_i915_gem_object *obj,
struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t page_offset = offset_in_page(reloc->offset);
char *vaddr;
int ret = -EINVAL;
198,10 → 200,9
if (ret)
return ret;
 
vaddr = (char *)MapIoMem((addr_t)i915_gem_object_get_page(obj,
reloc->offset >> PAGE_SHIFT), 4096, 3);
vaddr = dev_priv->gtt.mappable+4096;
MapPage(vaddr,(addr_t)i915_gem_object_get_page(obj,reloc->offset >> PAGE_SHIFT), PG_SW);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
FreeKernelSpace(vaddr);
 
return 0;
}
226,12 → 227,12
 
/* Map the page containing the relocation we're going to perform. */
reloc->offset += i915_gem_obj_ggtt_offset(obj);
reloc_page = (void*)MapIoMem(dev_priv->gtt.mappable_base +
(reloc->offset & PAGE_MASK), 4096, 0x18|3);
MapPage(dev_priv->gtt.mappable,dev_priv->gtt.mappable_base +
(reloc->offset & PAGE_MASK), PG_SW);
reloc_page = dev_priv->gtt.mappable;
reloc_entry = (uint32_t __iomem *)
(reloc_page + offset_in_page(reloc->offset));
iowrite32(reloc->delta, reloc_entry);
FreeKernelSpace(reloc_page);
 
return 0;
}
343,7 → 344,7
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int remain, ret;
 
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
user_relocs = to_user_ptr(entry->relocs_ptr);
 
remain = entry->relocation_count;
while (remain) {
667,7 → 668,7
u64 invalid_offset = (u64)-1;
int j;
 
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
user_relocs = to_user_ptr(exec[i].relocs_ptr);
 
if (copy_from_user(reloc+total, user_relocs,
exec[i].relocation_count * sizeof(*reloc))) {
1260,8 → 1261,7
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
(struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
to_user_ptr(args->buffers_ptr),
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
1275,7 → 1275,7
&dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
/drivers/video/drm/i915/intel_ddi.c
961,12 → 961,18
enum pipe pipe;
struct intel_crtc *intel_crtc;
 
dev_priv->ddi_plls.spll_refcount = 0;
dev_priv->ddi_plls.wrpll1_refcount = 0;
dev_priv->ddi_plls.wrpll2_refcount = 0;
 
for_each_pipe(pipe) {
intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
if (!intel_crtc->active)
if (!intel_crtc->active) {
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
continue;
}
 
intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
pipe);
/drivers/video/drm/i915/intel_display.c
1432,6 → 1432,20
POSTING_READ(DPLL(pipe));
}
 
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
u32 val = 0;
 
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
 
/* Leave integrated clock source enabled */
if (pipe == PIPE_B)
val = DPLL_INTEGRATED_CRI_CLK_VLV;
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
}
 
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
{
u32 port_mask;
5999,7 → 6013,7
uint32_t val;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
 
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
10515,7 → 10529,9
 
// intel_setup_overlay(dev);
 
mutex_lock(&dev->mode_config.mutex);
intel_modeset_setup_hw_state(dev, false);
mutex_unlock(&dev->mode_config.mutex);
}
 
void intel_modeset_cleanup(struct drm_device *dev)
10589,14 → 10605,15
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
 
pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
return 0;
}
 
/drivers/video/drm/i915/intel_pm.c
4629,8 → 4629,6
rps.delayed_resume_work.work);
struct drm_device *dev = dev_priv->dev;
 
ENTER();
 
mutex_lock(&dev_priv->rps.hw_lock);
 
if (IS_VALLEYVIEW(dev)) {
4640,8 → 4638,6
gen6_update_ring_freq(dev);
}
mutex_unlock(&dev_priv->rps.hw_lock);
 
LEAVE();
}
 
void intel_enable_gt_powersave(struct drm_device *dev)
/drivers/video/drm/i915/intel_ringbuffer.c
483,7 → 483,7
goto err_unref;
 
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
ring->scratch.cpu_page = (void*)MapIoMem((addr_t)sg_page(ring->scratch.obj->pages->sgl),4096, PG_SW);
ring->scratch.cpu_page = (void*)MapIoMem((addr_t)sg_page(ring->scratch.obj->pages->sgl),4096, PG_SW|0x100);
if (ring->scratch.cpu_page == NULL) {
ret = -ENOMEM;
goto err_unpin;
1182,7 → 1182,7
}
 
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW);
ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW|0x100);
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
goto err_unpin;
/drivers/video/drm/i915/kms_display.c
913,12 → 913,215
safe_sti(ifl);
 
ret = i915_gem_object_set_to_gtt_domain(to_intel_bo(obj), false);
if(ret != 0 )
}
 
err2:
mutex_unlock(&dev->struct_mutex);
err1:
drm_gem_object_unreference(obj);
 
return ret;
}
 
int i915_mask_update_ex(struct drm_device *dev, void *data,
struct drm_file *file)
{
dbgprintf("%s: i915_gem_object_set_to_gtt_domain failed\n", __FUNCTION__);
struct drm_i915_mask_update *mask = data;
struct drm_gem_object *obj;
static unsigned int mask_seqno[256];
static warn_count;
 
rect_t win;
u32 winw,winh;
u32 ml,mt,mr,mb;
u32 slot;
int ret = 0;
slot = *((u8*)CURRENT_TASK);
 
if( mask_seqno[slot] == os_display->mask_seqno)
return 0;
 
GetWindowRect(&win);
win.right+= 1;
win.bottom+= 1;
 
winw = win.right - win.left;
winh = win.bottom - win.top;
 
if(mask->dx >= winw ||
mask->dy >= winh)
return 1;
 
ml = win.left + mask->dx;
mt = win.top + mask->dy;
mr = ml + mask->width;
mb = mt + mask->height;
 
if( ml >= win.right || mt >= win.bottom ||
mr < win.left || mb < win.top )
return 1;
 
if( mr > win.right )
mr = win.right;
 
if( mb > win.bottom )
mb = win.bottom;
 
mask->width = mr - ml;
mask->height = mb - mt;
 
if( mask->width == 0 ||
mask->height== 0 )
return 1;
 
obj = drm_gem_object_lookup(dev, file, mask->handle);
if (obj == NULL)
return -ENOENT;
 
if (!obj->filp) {
drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
 
#if 1
if(warn_count < 1000)
{
printf("left %d top %d right %d bottom %d\n",
ml, mt, mr, mb);
warn_count++;
};
#endif
 
 
#if 1
 
{
u8* src_offset;
u8* dst_offset;
u32 ifl;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto err1;
 
i915_gem_object_set_to_cpu_domain(to_intel_bo(obj), true);
 
src_offset = (u8*)( mt*os_display->width + ml);
src_offset+= get_display_map();
dst_offset = (u8*)mask->bo_map;
 
u32_t tmp_h = mask->height;
 
ifl = safe_cli();
{
mask_seqno[slot] = os_display->mask_seqno;
 
slot|= (slot<<8)|(slot<<16)|(slot<<24);
 
__asm__ __volatile__ (
"movd %[slot], %%xmm6 \n"
"punpckldq %%xmm6, %%xmm6 \n"
"punpcklqdq %%xmm6, %%xmm6 \n"
:: [slot] "m" (slot)
:"xmm6");
 
while( tmp_h--)
{
int tmp_w = mask->width;
 
u8* tmp_src = src_offset;
u8* tmp_dst = dst_offset;
 
src_offset+= os_display->width;
dst_offset+= mask->bo_pitch;
 
while(tmp_w >= 64)
{
__asm__ __volatile__ (
"movdqu (%0), %%xmm0 \n"
"movdqu 16(%0), %%xmm1 \n"
"movdqu 32(%0), %%xmm2 \n"
"movdqu 48(%0), %%xmm3 \n"
"pcmpeqb %%xmm6, %%xmm0 \n"
"pcmpeqb %%xmm6, %%xmm1 \n"
"pcmpeqb %%xmm6, %%xmm2 \n"
"pcmpeqb %%xmm6, %%xmm3 \n"
"movdqa %%xmm0, (%%edi) \n"
"movdqa %%xmm1, 16(%%edi) \n"
"movdqa %%xmm2, 32(%%edi) \n"
"movdqa %%xmm3, 48(%%edi) \n"
 
:: "r" (tmp_src), "D" (tmp_dst)
:"xmm0","xmm1","xmm2","xmm3");
tmp_w -= 64;
tmp_src += 64;
tmp_dst += 64;
}
 
if( tmp_w >= 32 )
{
__asm__ __volatile__ (
"movdqu (%0), %%xmm0 \n"
"movdqu 16(%0), %%xmm1 \n"
"pcmpeqb %%xmm6, %%xmm0 \n"
"pcmpeqb %%xmm6, %%xmm1 \n"
"movdqa %%xmm0, (%%edi) \n"
"movdqa %%xmm1, 16(%%edi) \n"
 
:: "r" (tmp_src), "D" (tmp_dst)
:"xmm0","xmm1");
tmp_w -= 32;
tmp_src += 32;
tmp_dst += 32;
}
 
if( tmp_w >= 16 )
{
__asm__ __volatile__ (
"movdqu (%0), %%xmm0 \n"
"pcmpeqb %%xmm6, %%xmm0 \n"
"movdqa %%xmm0, (%%edi) \n"
:: "r" (tmp_src), "D" (tmp_dst)
:"xmm0");
tmp_w -= 16;
tmp_src += 16;
tmp_dst += 16;
}
 
if( tmp_w >= 8 )
{
__asm__ __volatile__ (
"movq (%0), %%xmm0 \n"
"pcmpeqb %%xmm6, %%xmm0 \n"
"movq %%xmm0, (%%edi) \n"
:: "r" (tmp_src), "D" (tmp_dst)
:"xmm0");
tmp_w -= 8;
tmp_src += 8;
tmp_dst += 8;
}
if( tmp_w >= 4 )
{
__asm__ __volatile__ (
"movd (%0), %%xmm0 \n"
"pcmpeqb %%xmm6, %%xmm0 \n"
"movd %%xmm0, (%%edi) \n"
:: "r" (tmp_src), "D" (tmp_dst)
:"xmm0");
tmp_w -= 4;
tmp_src += 4;
tmp_dst += 4;
}
while(tmp_w--)
*tmp_dst++ = (*tmp_src++ == (u8)slot) ? 0xFF:0x00;
};
};
safe_sti(ifl);
 
i915_gem_object_set_to_gtt_domain(to_intel_bo(obj), false);
}
#endif
 
err2:
mutex_unlock(&dev->struct_mutex);
err1:
937,7 → 1140,6
 
 
 
 
#define NSEC_PER_SEC 1000000000L
 
void getrawmonotonic(struct timespec *ts)
/drivers/video/drm/i915/main.c
266,8 → 266,8
 
#define SRV_FBINFO 43
#define SRV_MASK_UPDATE 44
#define SRV_MASK_UPDATE_EX 45
 
 
#define check_input(size) \
if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \
break;
443,6 → 443,10
case SRV_MASK_UPDATE:
retval = i915_mask_update(main_device, inp, file);
break;
 
case SRV_MASK_UPDATE_EX:
retval = i915_mask_update_ex(main_device, inp, file);
break;
};
 
return retval;