Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6320 → Rev 6319

/drivers/video/drm/drm_stub.c
560,45 → 560,3
DRM_DEBUG("generating hotplug event\n");
}
 
u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
{
u32 high = divisor >> 32;
u64 quot;
 
if (high == 0) {
u32 rem32;
quot = div_u64_rem(dividend, divisor, &rem32);
*remainder = rem32;
} else {
int n = 1 + fls(high);
quot = div_u64(dividend >> n, divisor >> n);
 
if (quot != 0)
quot--;
 
*remainder = dividend - quot * divisor;
if (*remainder >= divisor) {
quot++;
*remainder -= divisor;
}
}
 
return quot;
}
 
u64 div64_u64(u64 dividend, u64 divisor)
{
u32 high, d;
 
high = divisor >> 32;
if (high) {
unsigned int shift = fls(high);
 
d = divisor >> shift;
dividend >>= shift;
} else
d = divisor;
 
return div_u64(dividend, d);
}
 
/drivers/video/drm/i915/kos_fb.c
File deleted
/drivers/video/drm/i915/intel_hotplug.c
File deleted
/drivers/video/drm/i915/Makefile
113,7 → 113,6
intel_uncore.c \
kms_display.c \
kos_cursor.c \
kos_fb.c \
utils.c \
fwblob.asm \
../hdmi.c \
/drivers/video/drm/i915/i915_dma.c
840,8 → 840,6
goto put_bridge;
}
 
set_fake_framebuffer();
 
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);
 
/drivers/video/drm/i915/i915_drv.h
51,6 → 51,7
#include "intel_guc.h"
 
#include <linux/spinlock.h>
#include <linux/err.h>
 
#define ioread32(addr) readl(addr)
static inline u8 inb(u16 port)
2622,7 → 2623,6
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
 
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
/drivers/video/drm/i915/i915_irq.c
1657,11 → 1657,11
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
/*intel_check_page_flip(dev, pipe)*/;
 
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip(dev, pipe);
// intel_prepare_page_flip(dev, pipe);
// intel_finish_page_flip(dev, pipe);
}
 
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2028,7 → 2028,7
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe) &&
intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
/*intel_check_page_flip(dev, pipe)*/;
 
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2038,8 → 2038,8
 
/* plane/pipes map 1:1 on ilk+ */
if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip_plane(dev, pipe);
// intel_prepare_page_flip(dev, pipe);
// intel_finish_page_flip_plane(dev, pipe);
}
}
 
2081,12 → 2081,12
for_each_pipe(dev_priv, pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
/*intel_check_page_flip(dev, pipe)*/;
 
/* plane/pipes map 1:1 on ilk+ */
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip_plane(dev, pipe);
// intel_prepare_page_flip(dev, pipe);
// intel_finish_page_flip_plane(dev, pipe);
}
}
 
2290,7 → 2290,7
 
if (pipe_iir & GEN8_PIPE_VBLANK &&
intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
/* intel_check_page_flip(dev, pipe)*/;
 
if (INTEL_INFO(dev_priv)->gen >= 9)
flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2297,10 → 2297,6
else
flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
 
if (flip_done) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip_plane(dev, pipe);
}
 
if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev, pipe);
2339,14 → 2335,10
spt_irq_handler(dev, pch_iir);
else
cpt_irq_handler(dev, pch_iir);
} else {
/*
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
} else
DRM_ERROR("The master control interrupt lied (SDE)!\n");
 
}
}
 
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ_FW(GEN8_MASTER_IRQ);
2371,8 → 2363,6
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
 
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
wake_up_all(&dev_priv->pending_flip_queue);
 
/*
* Signal tasks blocked in i915_gem_wait_for_error that the pending
3785,12 → 3775,12
if (I915_READ16(ISR) & flip_pending)
goto check_page_flip;
 
intel_prepare_page_flip(dev, plane);
intel_finish_page_flip(dev, pipe);
// intel_prepare_page_flip(dev, plane);
// intel_finish_page_flip(dev, pipe);
return true;
 
check_page_flip:
intel_check_page_flip(dev, pipe);
// intel_check_page_flip(dev, pipe);
return false;
}
 
3969,12 → 3959,9
if (I915_READ(ISR) & flip_pending)
goto check_page_flip;
 
intel_prepare_page_flip(dev, plane);
intel_finish_page_flip(dev, pipe);
return true;
 
check_page_flip:
intel_check_page_flip(dev, pipe);
return false;
}
 
4462,7 → 4449,7
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
// drm_irq_uninstall(dev_priv->dev);
intel_hpd_cancel_work(dev_priv);
// intel_hpd_cancel_work(dev_priv);
dev_priv->pm.irqs_enabled = false;
}
 
/drivers/video/drm/i915/intel_display.c
3945,13 → 3945,13
 
drm_crtc_vblank_put(&intel_crtc->base);
 
wake_up_all(&dev_priv->pending_flip_queue);
queue_work(dev_priv->wq, &work->work);
// wake_up_all(&dev_priv->pending_flip_queue);
// queue_work(dev_priv->wq, &work->work);
 
trace_i915_flip_complete(intel_crtc->plane,
work->pending_flip_obj);
// trace_i915_flip_complete(intel_crtc->plane,
// work->pending_flip_obj);
}
 
#if 0
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3977,6 → 3977,7
mutex_unlock(&dev->struct_mutex);
}
}
#endif
 
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
4850,9 → 4851,6
mutex_unlock(&dev->struct_mutex);
}
 
if (atomic->wait_for_flips)
intel_crtc_wait_for_pending_flips(&crtc->base);
 
if (atomic->disable_fbc)
intel_fbc_disable_crtc(crtc);
 
4885,7 → 4883,7
* to compute the mask of flip planes precisely. For the time being
* consider this a flip to a NULL plane.
*/
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
// intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
}
 
static void ironlake_crtc_enable(struct drm_crtc *crtc)
6322,7 → 6320,6
return;
 
if (to_intel_plane_state(crtc->primary->state)->visible) {
intel_crtc_wait_for_pending_flips(crtc);
intel_pre_disable_primary(crtc);
 
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
10913,7 → 10910,7
/* and that it is marked active as soon as the irq could fire. */
smp_wmb();
}
 
#if 0
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
11376,6 → 11373,8
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
 
WARN_ON(!in_interrupt());
 
if (crtc == NULL)
return;
 
11392,7 → 11391,7
intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
spin_unlock(&dev->event_lock);
}
 
#endif
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
11442,7 → 11441,7
work->event = event;
work->crtc = crtc;
work->old_fb = old_fb;
INIT_WORK(&work->work, intel_unpin_work_fn);
// INIT_WORK(&work->work, intel_unpin_work_fn);
 
ret = drm_crtc_vblank_get(crtc);
if (ret)
11469,8 → 11468,8
intel_crtc->unpin_work = work;
spin_unlock_irq(&dev->event_lock);
 
// if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
// flush_workqueue(dev_priv->wq);
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
 
/* Reference the objects for the scheduled work. */
drm_framebuffer_reference(work->old_fb);
11928,23 → 11927,13
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
}
 
/* Clamp bpp to default limit on screens without EDID 1.4 */
if (connector->base.display_info.bpc == 0) {
int type = connector->base.connector_type;
int clamp_bpp = 24;
 
/* Fall back to 18 bpp when DP sink capability is unknown. */
if (type == DRM_MODE_CONNECTOR_DisplayPort ||
type == DRM_MODE_CONNECTOR_eDP)
clamp_bpp = 18;
 
if (bpp > clamp_bpp) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
bpp, clamp_bpp);
pipe_config->pipe_bpp = clamp_bpp;
/* Clamp bpp to 8 on screens without EDID 1.4 */
if (connector->base.display_info.bpc == 0 && bpp > 24) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
bpp);
pipe_config->pipe_bpp = 24;
}
}
}
 
static int
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
13328,7 → 13317,7
.gamma_set = intel_crtc_gamma_set,
.set_config = drm_atomic_helper_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
// .page_flip = intel_crtc_page_flip,
.atomic_duplicate_state = intel_crtc_duplicate_state,
.atomic_destroy_state = intel_crtc_destroy_state,
};
13545,12 → 13534,11
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
bool can_position = false;
 
if (INTEL_INFO(plane->dev)->gen >= 9) {
/* use scaler when colorkey is not required */
if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
if (INTEL_INFO(plane->dev)->gen >= 9 &&
state->ckey.flags == I915_SET_COLORKEY_NONE) {
min_scale = 1;
max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
}
can_position = true;
}
 
14640,34 → 14628,11
broxton_modeset_calc_cdclk;
}
 
switch (INTEL_INFO(dev)->gen) {
case 2:
dev_priv->display.queue_flip = intel_gen2_queue_flip;
break;
 
case 3:
dev_priv->display.queue_flip = intel_gen3_queue_flip;
break;
 
case 4:
case 5:
dev_priv->display.queue_flip = intel_gen4_queue_flip;
break;
 
case 6:
dev_priv->display.queue_flip = intel_gen6_queue_flip;
break;
case 7:
case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
case 9:
/* Drop through - unsupported since execlist only. */
default:
/* Default just returns -ENODEV to indicate unsupported */
dev_priv->display.queue_flip = intel_default_queue_flip;
}
 
 
mutex_init(&dev_priv->pps_mutex);
}
 
/drivers/video/drm/i915/intel_dp.c
1894,7 → 1894,7
* operations.
*/
delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
// schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
}
 
/*
5756,7 → 5756,7
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
 
cancel_delayed_work(&dev_priv->drrs.work);
// cancel_delayed_work(&dev_priv->drrs.work);
 
mutex_lock(&dev_priv->drrs.mutex);
if (!dev_priv->drrs.dp) {
5776,13 → 5776,6
dev_priv->drrs.dp->attached_connector->panel.
fixed_mode->vrefresh);
 
/*
* flush also means no more activity hence schedule downclock, if all
* other fbs are quiescent too
*/
if (!dev_priv->drrs.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->drrs.work,
msecs_to_jiffies(1000));
mutex_unlock(&dev_priv->drrs.mutex);
}
 
/drivers/video/drm/i915/kms_display.c
10,10 → 10,16
#include <linux/pci.h>
 
#include <syscall.h>
 
//#include "bitmap.h"
#include <display.h>
 
void FASTCALL sysSetFramebuffer(void *fb)__asm__("SetFramebuffer");
void kolibri_framebuffer_update(struct drm_i915_private *dev_priv, struct kos_framebuffer *kfb);
void init_system_cursors(struct drm_device *dev);
 
addr_t dummy_fb_page;
 
display_t *os_display;
 
u32 cmd_buffer;
54,17 → 60,26
struct drm_i915_gem_object *obj = NULL;
int stride, size;
 
ENTER();
 
stride = mode->hdisplay *4;
 
if(IS_GEN3(dev))
tiling = 0;
 
if(tiling)
{
int gen3size;
 
if(IS_GEN3(dev))
for (stride = 512; stride < mode->hdisplay * 4; stride <<= 1);
else
stride = ALIGN(stride, 512);
size = stride * ALIGN(mode->vdisplay, 8);
 
if(IS_GEN3(dev))
{
for (gen3size = 1024*1024; gen3size < size; gen3size <<= 1);
size = gen3size;
}
else
size = ALIGN(size, 4096);
}
else
81,7 → 96,6
int ret;
 
DRM_DEBUG_KMS("remove old framebuffer\n");
set_fake_framebuffer();
drm_framebuffer_remove(fb);
ifbdev->fb = NULL;
fb = NULL;
158,7 → 172,7
 
fb->bits_per_pixel = 32;
fb->depth = 24;
 
LEAVE();
return fb;
 
out_fb:
268,7 → 282,7
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct kos_framebuffer *kfb = intel_fb->private;
kolibri_framebuffer_update(dev, kfb);
kolibri_framebuffer_update(dev_priv, kfb);
DRM_DEBUG_KMS("kolibri framebuffer %p\n", kfb);
 
os_display->width = mode->hdisplay;
364,7 → 378,7
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct kos_framebuffer *kfb = intel_fb->private;
kolibri_framebuffer_update(dev, kfb);
kolibri_framebuffer_update(dev_priv, kfb);
DRM_DEBUG_KMS("kolibri framebuffer %p\n", kfb);
 
os_display->width = mode->hdisplay;
561,6 → 575,8
return -1;
};
 
dummy_fb_page = AllocPage();
 
os_display = GetDisplay();
os_display->ddev = dev;
os_display->connector = connector;
799,7 → 815,7
fb->name = obj->base.name;
fb->width = os_display->width;
fb->height = os_display->height;
fb->pitch = os_display->lfb_pitch;
fb->pitch = obj->stride;
fb->tiling = obj->tiling_mode;
fb->crtc = crtc->base.base.id;
fb->pipe = crtc->pipe;
809,7 → 825,112
return 0;
}
 
int kolibri_framebuffer_init(struct intel_framebuffer *intel_fb)
{
struct kos_framebuffer *kfb;
addr_t dummy_table;
addr_t *pt_addr = NULL;
int pde;
 
kfb = kzalloc(sizeof(struct kos_framebuffer),0);
kfb->private = intel_fb;
 
for(pde = 0; pde < 8; pde++)
{
dummy_table = AllocPage();
kfb->pde[pde] = dummy_table|PG_UW;
 
pt_addr = kmap((struct page*)dummy_table);
__builtin_memset(pt_addr,0,4096);
kunmap((struct page*)dummy_table);
};
 
intel_fb->private = kfb;
 
return 0;
#if 0
struct sg_page_iter sg_iter;
num_pages = obj->base.size/4096;
printf("num_pages %d\n",num_pages);
 
pte = 0;
pde = 0;
pt_addr = NULL;
 
__sg_page_iter_start(&sg_iter, obj->pages->sgl, sg_nents(obj->pages->sgl), 0);
while (__sg_page_iter_next(&sg_iter))
{
if (pt_addr == NULL)
{
addr_t pt = AllocPage();
kfb->pde[pde] = pt|PG_UW;
pde++;
pt_addr = kmap_atomic((struct page*)pt);
}
pt_addr[pte] = sg_page_iter_dma_address(&sg_iter)|PG_UW|PG_WRITEC;
if( (pte & 15) == 0)
DRM_DEBUG_KMS("pte %x\n",pt_addr[pte]);
if (++pte == 1024)
{
kunmap_atomic(pt_addr);
pt_addr = NULL;
if (pde == 8)
break;
pte = 0;
}
}
 
if(pt_addr)
{
for(;pte < 1024; pte++)
pt_addr[pte] = dummy_page|PG_UW;
kunmap_atomic(pt_addr);
}
#endif
};
 
void kolibri_framebuffer_update(struct drm_i915_private *dev_priv, struct kos_framebuffer *kfb)
{
struct intel_framebuffer *intel_fb = kfb->private;
addr_t *pt_addr = NULL;
int pte = 0;
int pde = 0;
int num_pages;
addr_t pfn;
ENTER();
num_pages = intel_fb->obj->base.size/4096;
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(intel_fb->obj);
 
while(num_pages)
{
if (pt_addr == NULL)
{
addr_t pt = kfb->pde[pde] & 0xFFFFF000;
pde++;
pt_addr = kmap_atomic((struct page*)pt);
}
pt_addr[pte] = pfn|PG_UW|PG_WRITEC;
pfn+= 4096;
num_pages--;
if (++pte == 1024)
{
kunmap_atomic(pt_addr);
pt_addr = NULL;
if (pde == 8)
break;
pte = 0;
}
}
 
if(pt_addr)
{
for(;pte < 1024; pte++)
pt_addr[pte] = dummy_fb_page|PG_UW;
kunmap_atomic(pt_addr);
}
LEAVE();
};
 
typedef struct
{
int left;
1290,3 → 1411,6
list_del_init(&wait->task_list);
return 1;
}
 
 
 
/drivers/video/drm/i915/main.c
14,7 → 14,7
#include "bitmap.h"
#include "i915_kos32.h"
 
#define DRV_NAME "i915 v4.4.5"
#define DRV_NAME "i915 v4.4.3"
 
#define I915_DEV_CLOSE 0
#define I915_DEV_INIT 1
254,10 → 254,6
 
dmi_scan_machine();
 
err = fake_framebuffer_create();
if( unlikely(err != 0))
return 0;
 
driver_wq_state = I915_DEV_INIT;
CreateKernelThread(i915_driver_thread);
 
/drivers/video/drm/i915/i915_trace.h
44,7 → 44,4
#define trace_i915_va_alloc(vm,start,size,name)
#define trace_i915_gem_request_notify(ring)
#define trace_i915_gem_object_pread(obj, offset, size)
#define trace_i915_flip_complete(plane, pending_flip_obj)
#define trace_i915_flip_request(plane, obj)
 
#endif
/drivers/video/drm/i915/intel_lrc.c
1706,7 → 1706,6
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
 
2360,7 → 2359,6
kunmap_atomic(reg_state);
 
ctx_obj->dirty = 1;
set_page_dirty(page);
i915_gem_object_unpin_pages(ctx_obj);
 
return 0;
/drivers/video/drm/i915/Makefile.lto
2,11 → 2,11
CC = kos32-gcc
FASM = fasm.exe
 
DEFINES = -DDRM_DEBUG_CODE=0 -D__KERNEL__ -DCONFIG_X86 -DCONFIG_X86_32 -DCONFIG_PCI
DEFINES += -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI
DEFINES += -DKBUILD_MODNAME=\"i915.dll\"
DEFINES = -DDRM_DEBUG_CODE=1 -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU
DEFINES += -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI -DKBUILD_MODNAME=\"i915.dll\"
 
 
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
16,7 → 16,7
-I$(DRV_INCLUDES)/uapi \
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES)
 
CFLAGS_OPT = -O2 -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -fno-ident -mno-stack-arg-probe
CFLAGS_OPT = -Os -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -fno-ident -mno-stack-arg-probe
CFLAGS_OPT+= -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields -flto
CFLAGS = -c $(INCLUDES) $(DEFINES) $(CFLAGS_OPT)
 
45,8 → 45,6
 
NAME_SRC= main.c \
pci.c \
getopt.c \
getopt1.c \
dvo_ch7017.c \
dvo_ch7xxx.c \
dvo_ivch.c \
92,7 → 90,6
intel_frontbuffer.c \
intel_guc_loader.c \
intel_hdmi.c \
intel_hotplug.c \
intel_i2c.c \
intel_lrc.c \
intel_lvds.c \
112,8 → 109,6
intel_sprite.c \
intel_uncore.c \
kms_display.c \
kos_cursor.c \
kos_fb.c \
utils.c \
fwblob.asm \
../hdmi.c \
128,7 → 123,7
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
$(DRM_TOPDIR)/drm_dp_mst_topology.c \
../drm_dp_mst_topology.c \
$(DRM_TOPDIR)/drm_atomic.c \
$(DRM_TOPDIR)/drm_atomic_helper.c \
$(DRM_TOPDIR)/drm_bridge.c \
154,6 → 149,7
$(patsubst %.c, %.o, $(NAME_SRC))))
 
 
 
all: $(NAME).dll
 
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) i915.lds Makefile.lto
166,10 → 162,7
%.o : %.S $(HFILES) Makefile.lto
as -o $@ $<
 
fwblob.o: fwblob.asm $(FW_BINS) Makefile
$(FASM) $< $@
 
 
clean:
-rm -f ../*/*.o
 
/drivers/video/drm/i915/i915_drv.c
508,10 → 508,7
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
pch->subsystem_vendor == 0x1af4 &&
pch->subsystem_device == 0x1100)) {
} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
dev_priv->pch_type = intel_virt_detect_pch(dev);
} else
continue;
/drivers/video/drm/i915/i915_gem_context.c
340,10 → 340,6
i915_gem_context_unreference(lctx);
ring->last_context = NULL;
}
 
/* Force the GPU state to be reinitialised on enabling */
if (ring->default_context)
ring->default_context->legacy_hw_ctx.initialized = false;
}
}
 
712,7 → 708,7
if (ret)
goto unpin_out;
 
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
if (!to->legacy_hw_ctx.initialized) {
hw_flags |= MI_RESTORE_INHIBIT;
/* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address
/drivers/video/drm/i915/i915_guc_submission.c
24,6 → 24,7
#include <linux/firmware.h>
#include <linux/circ_buf.h>
#include "intel_drv.h"
#include "i915_drv.h"
#include "intel_guc.h"
 
/**
/drivers/video/drm/i915/intel_ddi.c
1582,8 → 1582,7
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
intel_encoder->type == INTEL_OUTPUT_DP_MST) {
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
switch (crtc_state->port_clock / 2) {
case 81000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
/drivers/video/drm/i915/intel_dsi_panel_vbt.c
207,13 → 207,8
gpio = *data++;
 
/* pull up/down */
action = *data++ & 1;
action = *data++;
 
if (gpio >= ARRAY_SIZE(gtable)) {
DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
goto out;
}
 
function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg;
 
231,7 → 226,6
vlv_gpio_nc_write(dev_priv, pad, val);
mutex_unlock(&dev_priv->sb_lock);
 
out:
return data;
}
 
/drivers/video/drm/i915/intel_i2c.c
675,7 → 675,7
return 0;
 
err:
while (pin--) {
while (--pin) {
if (!intel_gmbus_is_valid_pin(dev_priv, pin))
continue;
 
/drivers/video/drm/i915/intel_ringbuffer.c
347,7 → 347,6
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (invalidate_domains) {
420,7 → 419,6
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (invalidate_domains) {
/drivers/video/drm/drm_irq.c
238,64 → 238,6
diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
}
 
/*
* Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
* interval? If so then vblank irqs keep running and it will likely
* happen that the hardware vblank counter is not trustworthy as it
* might reset at some point in that interval and vblank timestamps
* are not trustworthy either in that interval. Iow. this can result
* in a bogus diff >> 1 which must be avoided as it would cause
* random large forward jumps of the software vblank counter.
*/
if (diff > 1 && (vblank->inmodeset & 0x2)) {
DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
" due to pre-modeset.\n", pipe, diff);
diff = 1;
}
 
/*
* FIMXE: Need to replace this hack with proper seqlocks.
*
* Restrict the bump of the software vblank counter to a safe maximum
* value of +1 whenever there is the possibility that concurrent readers
* of vblank timestamps could be active at the moment, as the current
* implementation of the timestamp caching and updating is not safe
* against concurrent readers for calls to store_vblank() with a bump
* of anything but +1. A bump != 1 would very likely return corrupted
* timestamps to userspace, because the same slot in the cache could
* be concurrently written by store_vblank() and read by one of those
* readers without the read-retry logic detecting the collision.
*
* Concurrent readers can exist when we are called from the
* drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
* irq callers. However, all those calls to us are happening with the
* vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
* can't increase while we are executing. Therefore a zero refcount at
* this point is safe for arbitrary counter bumps if we are called
* outside vblank irq, a non-zero count is not 100% safe. Unfortunately
* we must also accept a refcount of 1, as whenever we are called from
* drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
* we must let that one pass through in order to not lose vblank counts
* during vblank irq off - which would completely defeat the whole
* point of this routine.
*
* Whenever we are called from vblank irq, we have to assume concurrent
* readers exist or can show up any time during our execution, even if
* the refcount is currently zero, as vblank irqs are usually only
* enabled due to the presence of readers, and because when we are called
* from vblank irq we can't hold the vbl_lock to protect us from sudden
* bumps in vblank refcount. Therefore also restrict bumps to +1 when
* called from vblank irq.
*/
if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
(flags & DRM_CALLED_FROM_VBLIRQ))) {
DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
"refcount %u, vblirq %u\n", pipe, diff,
atomic_read(&vblank->refcount),
(flags & DRM_CALLED_FROM_VBLIRQ) != 0);
diff = 1;
}
 
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
1236,8 → 1178,6
spin_lock_irqsave(&dev->event_lock, irqflags);
 
spin_lock(&dev->vbl_lock);
DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
vblank_disable_and_save(dev, pipe);
wake_up(&vblank->queue);
 
1340,9 → 1280,6
return;
 
spin_lock_irqsave(&dev->vbl_lock, irqflags);
DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
 
/* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) {
atomic_dec(&vblank->refcount);
1355,7 → 1292,8
* re-enable interrupts if there are users left, or the
* user wishes vblank interrupts to be enabled all the time.
*/
if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
if (atomic_read(&vblank->refcount) != 0 ||
(!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
WARN_ON(drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
1450,7 → 1388,6
if (vblank->inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = true;
drm_reset_vblank_timestamp(dev, pipe);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
if (vblank->inmodeset & 0x2)
1570,3 → 1507,19
return 0;
}
EXPORT_SYMBOL(drm_vblank_no_hw_counter);
 
u64 div64_u64(u64 dividend, u64 divisor)
{
u32 high, d;
 
high = divisor >> 32;
if (high) {
unsigned int shift = fls(high);
 
d = divisor >> shift;
dividend >>= shift;
} else
d = divisor;
 
return div_u64(dividend, d);
}
/drivers/video/drm/drm_dp_mst_topology.c
806,18 → 806,6
return mstb;
}
 
static void drm_dp_free_mst_port(struct kref *kref);
 
static void drm_dp_free_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
if (mstb->port_parent) {
if (list_empty(&mstb->port_parent->next))
kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
}
kfree(mstb);
}
 
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
825,15 → 813,6
bool wake_tx = false;
 
/*
* init kref again to be used by ports to remove mst branch when it is
* not needed anymore
*/
kref_init(kref);
 
if (mstb->port_parent && list_empty(&mstb->port_parent->next))
kref_get(&mstb->port_parent->kref);
 
/*
* destroy all ports - don't need lock
* as there are no more references to the mst branch
* device at this point.
859,7 → 838,7
 
// if (wake_tx)
// wake_up(&mstb->mgr->tx_waitq);
kref_put(kref, drm_dp_free_mst_branch_device);
kfree(mstb);
}
 
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
907,7 → 886,6
* from an EDID retrieval */
 
mutex_lock(&mgr->destroy_connector_lock);
kref_get(&port->parent->kref);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
// schedule_work(&mgr->destroy_connector_work);
1003,17 → 981,17
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
u8 *rad)
{
int parent_lct = port->parent->lct;
int lct = port->parent->lct;
int shift = 4;
int idx = (parent_lct - 1) / 2;
if (parent_lct > 1) {
memcpy(rad, port->parent->rad, idx + 1);
shift = (parent_lct % 2) ? 4 : 0;
int idx = lct / 2;
if (lct > 1) {
memcpy(rad, port->parent->rad, idx);
shift = (lct % 2) ? 4 : 0;
} else
rad[0] = 0;
 
rad[idx] |= port->port_num << shift;
return parent_lct + 1;
return lct + 1;
}
 
/*
1043,27 → 1021,18
return send_link;
}
 
static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port)
{
int ret;
 
memcpy(mstb->guid, guid, 16);
 
if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
if (mstb->port_parent) {
ret = drm_dp_send_dpcd_write(
mstb->mgr,
mstb->port_parent,
if (port->dpcd_rev >= 0x12) {
port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
if (!port->guid_valid) {
ret = drm_dp_send_dpcd_write(mstb->mgr,
port,
DP_GUID,
16,
mstb->guid);
} else {
 
ret = drm_dp_dpcd_write(
mstb->mgr->aux,
DP_GUID,
mstb->guid,
16);
16, port->guid);
port->guid_valid = true;
}
}
}
1078,7 → 1047,7
snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
for (i = 0; i < (mstb->lct - 1); i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
int port_num = mstb->rad[i / 2] >> shift;
snprintf(temp, sizeof(temp), "-%d", port_num);
strlcat(proppath, temp, proppath_size);
}
1120,6 → 1089,7
port->dpcd_rev = port_msg->dpcd_revision;
port->num_sdp_streams = port_msg->num_sdp_streams;
port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
memcpy(port->guid, port_msg->peer_guid, 16);
 
/* manage mstb port lists with mgr lock - take a reference
for this list */
1132,9 → 1102,11
 
if (old_ddps != port->ddps) {
if (port->ddps) {
drm_dp_check_port_guid(mstb, port);
if (!port->input)
drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
} else {
port->guid_valid = false;
port->available_pbn = 0;
}
}
1193,8 → 1165,10
 
if (old_ddps != port->ddps) {
if (port->ddps) {
drm_dp_check_port_guid(mstb, port);
dowork = true;
} else {
port->guid_valid = false;
port->available_pbn = 0;
}
}
1224,7 → 1198,7
 
for (i = 0; i < lct - 1; i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = (rad[i / 2] >> shift) & 0xf;
int port_num = rad[i / 2] >> shift;
 
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
1244,48 → 1218,6
return mstb;
}
 
static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *mstb,
uint8_t *guid)
{
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
 
if (memcmp(mstb->guid, guid, 16) == 0)
return mstb;
 
 
list_for_each_entry(port, &mstb->ports, next) {
if (!port->mstb)
continue;
 
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
 
if (found_mstb)
return found_mstb;
}
 
return NULL;
}
 
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
struct drm_dp_mst_topology_mgr *mgr,
uint8_t *guid)
{
struct drm_dp_mst_branch *mstb;
 
/* find the port by iterating down */
mutex_lock(&mgr->lock);
 
mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
 
if (mstb)
kref_get(&mstb->kref);
 
mutex_unlock(&mgr->lock);
return mstb;
}
 
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
1396,7 → 1328,6
struct drm_dp_sideband_msg_tx *txmsg)
{
struct drm_dp_mst_branch *mstb = txmsg->dst;
u8 req_type;
 
/* both msg slots are full */
if (txmsg->seqno == -1) {
1413,12 → 1344,6
txmsg->seqno = 1;
mstb->tx_slots[txmsg->seqno] = txmsg;
}
 
req_type = txmsg->msg[0] & 0x7f;
if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
req_type == DP_RESOURCE_STATUS_NOTIFY)
hdr->broadcast = 1;
else
hdr->broadcast = 0;
hdr->path_msg = txmsg->path_msg;
hdr->lct = mstb->lct;
1521,18 → 1446,26
}
 
/* called holding qlock */
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
int ret;
 
/* construct a chunk from the first msg in the tx_msg queue */
if (list_empty(&mgr->tx_msg_upq)) {
mgr->tx_up_in_progress = false;
return;
}
 
txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, true);
 
if (ret != 1)
if (ret == 1) {
/* up txmsgs aren't put in slots - so free after we send it */
list_del(&txmsg->next);
kfree(txmsg);
} else if (ret)
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
 
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
mgr->tx_up_in_progress = true;
}
 
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1582,9 → 1515,6
txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
}
 
drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
 
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
}
1632,37 → 1562,6
return 0;
}
 
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
{
if (!mstb->port_parent)
return NULL;
 
if (mstb->port_parent->mstb != mstb)
return mstb->port_parent;
 
return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
}
 
static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
int *port_num)
{
struct drm_dp_mst_branch *rmstb = NULL;
struct drm_dp_mst_port *found_port;
mutex_lock(&mgr->lock);
if (mgr->mst_primary) {
found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
 
if (found_port) {
rmstb = found_port->parent;
kref_get(&rmstb->kref);
*port_num = found_port->port_num;
}
}
mutex_unlock(&mgr->lock);
return rmstb;
}
 
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int id,
1670,16 → 1569,11
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
int len, ret, port_num;
int len, ret;
 
port_num = port->port_num;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
if (!mstb) {
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
 
if (!mstb)
return -EINVAL;
}
 
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
1688,7 → 1582,7
}
 
txmsg->dst = mstb;
len = build_allocate_payload(txmsg, port_num,
len = build_allocate_payload(txmsg, port->port_num,
id,
pbn);
 
1958,12 → 1852,11
drm_dp_encode_up_ack_reply(txmsg, req_type);
 
mutex_lock(&mgr->qlock);
 
process_single_up_tx_qlock(mgr, txmsg);
 
list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
if (!mgr->tx_up_in_progress) {
process_single_up_tx_qlock(mgr);
}
mutex_unlock(&mgr->qlock);
 
kfree(txmsg);
return 0;
}
 
2042,6 → 1935,13
mgr->mst_primary = mstb;
kref_get(&mgr->mst_primary->kref);
 
{
struct drm_dp_payload reset_pay;
reset_pay.start_slot = 0;
reset_pay.num_slots = 0x3f;
drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
}
 
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
if (ret < 0) {
2048,13 → 1948,20
goto out_unlock;
}
 
{
struct drm_dp_payload reset_pay;
reset_pay.start_slot = 0;
reset_pay.num_slots = 0x3f;
drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
 
/* sort out guid */
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
if (ret != 16) {
DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
goto out_unlock;
}
 
mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
if (!mgr->guid_valid) {
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
mgr->guid_valid = true;
}
 
// queue_work(system_long_wq, &mgr->work);
 
ret = 0;
2244,10 → 2151,8
 
if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_mst_branch *mstb = NULL;
struct drm_dp_mst_branch *mstb;
bool seqno;
 
if (!mgr->up_req_recv.initial_hdr.broadcast) {
mstb = drm_dp_get_mst_branch_device(mgr,
mgr->up_req_recv.initial_hdr.lct,
mgr->up_req_recv.initial_hdr.rad);
2256,39 → 2161,18
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
}
 
seqno = mgr->up_req_recv.initial_hdr.seqno;
drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
 
if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
 
if (!mstb)
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
 
if (!mstb) {
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
 
drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
drm_dp_update_port(mstb, &msg.u.conn_stat);
 
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
(*mgr->cbs->hotplug)(mgr);
 
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
if (!mstb)
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
 
if (!mstb) {
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
 
drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
}
 
2468,7 → 2352,6
DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
if (pbn == port->vcpi.pbn) {
*slots = port->vcpi.num_slots;
drm_dp_put_port(port);
return true;
}
}
2628,31 → 2511,32
*/
int drm_dp_calc_pbn_mode(int clock, int bpp)
{
u64 kbps;
s64 peak_kbps;
u32 numerator;
u32 denominator;
fixed20_12 pix_bw;
fixed20_12 fbpp;
fixed20_12 result;
fixed20_12 margin, tmp;
u32 res;
 
kbps = clock * bpp;
pix_bw.full = dfixed_const(clock);
fbpp.full = dfixed_const(bpp);
tmp.full = dfixed_const(8);
fbpp.full = dfixed_div(fbpp, tmp);
 
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
* common multiplier to render an integer PBN for all link rate/lane
* counts combinations
* calculate
* peak_kbps *= (1006/1000)
* peak_kbps *= (64/54)
* peak_kbps *= 8 convert to bytes
*/
result.full = dfixed_mul(pix_bw, fbpp);
margin.full = dfixed_const(54);
tmp.full = dfixed_const(64);
margin.full = dfixed_div(margin, tmp);
result.full = dfixed_div(result, margin);
 
numerator = 64 * 1006;
denominator = 54 * 8 * 1000 * 1000;
margin.full = dfixed_const(1006);
tmp.full = dfixed_const(1000);
margin.full = dfixed_div(margin, tmp);
result.full = dfixed_mul(result, margin);
 
kbps *= numerator;
peak_kbps = drm_fixp_from_fraction(kbps, denominator);
 
return drm_fixp2int_ceil(peak_kbps);
result.full = dfixed_div(result, tmp);
result.full = dfixed_ceil(result);
res = dfixed_trunc(result);
return res;
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
 
2660,23 → 2544,11
{
int ret;
ret = drm_dp_calc_pbn_mode(154000, 30);
if (ret != 689) {
DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
154000, 30, 689, ret);
if (ret != 689)
return -EINVAL;
}
ret = drm_dp_calc_pbn_mode(234000, 30);
if (ret != 1047) {
DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
234000, 30, 1047, ret);
if (ret != 1047)
return -EINVAL;
}
ret = drm_dp_calc_pbn_mode(297000, 24);
if (ret != 1063) {
DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
297000, 24, 1063, ret);
return -EINVAL;
}
return 0;
}
 
2755,12 → 2627,6
mutex_unlock(&mgr->qlock);
}
 
static void drm_dp_free_mst_port(struct kref *kref)
{
struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
kfree(port);
}
/**
* drm_dp_mst_topology_mgr_init - initialise a topology manager
* @mgr: manager struct to initialise
2781,6 → 2647,7
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
mutex_init(&mgr->destroy_connector_lock);
INIT_LIST_HEAD(&mgr->tx_msg_upq);
INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_LIST_HEAD(&mgr->destroy_connector_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);