/drivers/video/drm/i915/Makefile |
---|
113,6 → 113,7 |
intel_uncore.c \ |
kms_display.c \ |
kos_cursor.c \ |
kos_fb.c \ |
utils.c \ |
fwblob.asm \ |
../hdmi.c \ |
/drivers/video/drm/i915/Makefile.lto |
---|
2,11 → 2,11 |
CC = kos32-gcc |
FASM = fasm.exe |
DEFINES = -DDRM_DEBUG_CODE=1 -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU |
DEFINES += -DCONFIG_X86_L1_CACHE_SHIFT=6 |
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI -DKBUILD_MODNAME=\"i915.dll\" |
DEFINES = -DDRM_DEBUG_CODE=0 -D__KERNEL__ -DCONFIG_X86 -DCONFIG_X86_32 -DCONFIG_PCI |
DEFINES += -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6 |
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI |
DEFINES += -DKBUILD_MODNAME=\"i915.dll\" |
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk |
DRV_INCLUDES = /d/kos/kolibri/drivers/include |
DRM_TOPDIR = $(CURDIR)/.. |
16,7 → 16,7 |
-I$(DRV_INCLUDES)/uapi \ |
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES) |
CFLAGS_OPT = -Os -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -fno-ident -mno-stack-arg-probe |
CFLAGS_OPT = -O2 -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -fno-ident -mno-stack-arg-probe |
CFLAGS_OPT+= -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields -flto |
CFLAGS = -c $(INCLUDES) $(DEFINES) $(CFLAGS_OPT) |
45,6 → 45,8 |
NAME_SRC= main.c \ |
pci.c \ |
getopt.c \ |
getopt1.c \ |
dvo_ch7017.c \ |
dvo_ch7xxx.c \ |
dvo_ivch.c \ |
90,6 → 92,7 |
intel_frontbuffer.c \ |
intel_guc_loader.c \ |
intel_hdmi.c \ |
intel_hotplug.c \ |
intel_i2c.c \ |
intel_lrc.c \ |
intel_lvds.c \ |
109,6 → 112,8 |
intel_sprite.c \ |
intel_uncore.c \ |
kms_display.c \ |
kos_cursor.c \ |
kos_fb.c \ |
utils.c \ |
fwblob.asm \ |
../hdmi.c \ |
123,7 → 128,7 |
$(DRM_TOPDIR)/drm_crtc.c \ |
$(DRM_TOPDIR)/drm_crtc_helper.c \ |
$(DRM_TOPDIR)/drm_dp_helper.c \ |
../drm_dp_mst_topology.c \ |
$(DRM_TOPDIR)/drm_dp_mst_topology.c \ |
$(DRM_TOPDIR)/drm_atomic.c \ |
$(DRM_TOPDIR)/drm_atomic_helper.c \ |
$(DRM_TOPDIR)/drm_bridge.c \ |
149,7 → 154,6 |
$(patsubst %.c, %.o, $(NAME_SRC)))) |
all: $(NAME).dll |
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) i915.lds Makefile.lto |
162,7 → 166,10 |
%.o : %.S $(HFILES) Makefile.lto |
as -o $@ $< |
fwblob.o: fwblob.asm $(FW_BINS) Makefile |
$(FASM) $< $@ |
clean: |
-rm -f ../*/*.o |
/drivers/video/drm/i915/i915_dma.c |
---|
840,6 → 840,8 |
goto put_bridge; |
} |
set_fake_framebuffer(); |
/* This must be called before any calls to HAS_PCH_* */ |
intel_detect_pch(dev); |
/drivers/video/drm/i915/i915_drv.c |
---|
508,7 → 508,10 |
dev_priv->pch_type = PCH_SPT; |
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); |
WARN_ON(!IS_SKYLAKE(dev)); |
} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) { |
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || |
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && |
pch->subsystem_vendor == 0x1af4 && |
pch->subsystem_device == 0x1100)) { |
dev_priv->pch_type = intel_virt_detect_pch(dev); |
} else |
continue; |
/drivers/video/drm/i915/i915_drv.h |
---|
51,7 → 51,6 |
#include "intel_guc.h" |
#include <linux/spinlock.h> |
#include <linux/err.h> |
#define ioread32(addr) readl(addr) |
static inline u8 inb(u16 port) |
2623,6 → 2622,7 |
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 |
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 |
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 |
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ |
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) |
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) |
/drivers/video/drm/i915/i915_gem_context.c |
---|
340,6 → 340,10 |
i915_gem_context_unreference(lctx); |
ring->last_context = NULL; |
} |
/* Force the GPU state to be reinitialised on enabling */ |
if (ring->default_context) |
ring->default_context->legacy_hw_ctx.initialized = false; |
} |
} |
708,7 → 712,7 |
if (ret) |
goto unpin_out; |
if (!to->legacy_hw_ctx.initialized) { |
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) { |
hw_flags |= MI_RESTORE_INHIBIT; |
/* NB: If we inhibit the restore, the context is not allowed to |
* die because future work may end up depending on valid address |
/drivers/video/drm/i915/i915_guc_submission.c |
---|
24,7 → 24,6 |
#include <linux/firmware.h> |
#include <linux/circ_buf.h> |
#include "intel_drv.h" |
#include "i915_drv.h" |
#include "intel_guc.h" |
/** |
/drivers/video/drm/i915/i915_irq.c |
---|
1657,11 → 1657,11 |
for_each_pipe(dev_priv, pipe) { |
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
intel_pipe_handle_vblank(dev, pipe)) |
/*intel_check_page_flip(dev, pipe)*/; |
intel_check_page_flip(dev, pipe); |
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { |
// intel_prepare_page_flip(dev, pipe); |
// intel_finish_page_flip(dev, pipe); |
intel_prepare_page_flip(dev, pipe); |
intel_finish_page_flip(dev, pipe); |
} |
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) |
2028,7 → 2028,7 |
for_each_pipe(dev_priv, pipe) { |
if (de_iir & DE_PIPE_VBLANK(pipe) && |
intel_pipe_handle_vblank(dev, pipe)) |
/*intel_check_page_flip(dev, pipe)*/; |
intel_check_page_flip(dev, pipe); |
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) |
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); |
2038,8 → 2038,8 |
/* plane/pipes map 1:1 on ilk+ */ |
if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { |
// intel_prepare_page_flip(dev, pipe); |
// intel_finish_page_flip_plane(dev, pipe); |
intel_prepare_page_flip(dev, pipe); |
intel_finish_page_flip_plane(dev, pipe); |
} |
} |
2081,12 → 2081,12 |
for_each_pipe(dev_priv, pipe) { |
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && |
intel_pipe_handle_vblank(dev, pipe)) |
/*intel_check_page_flip(dev, pipe)*/; |
intel_check_page_flip(dev, pipe); |
/* plane/pipes map 1:1 on ilk+ */ |
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { |
// intel_prepare_page_flip(dev, pipe); |
// intel_finish_page_flip_plane(dev, pipe); |
intel_prepare_page_flip(dev, pipe); |
intel_finish_page_flip_plane(dev, pipe); |
} |
} |
2290,7 → 2290,7 |
if (pipe_iir & GEN8_PIPE_VBLANK && |
intel_pipe_handle_vblank(dev, pipe)) |
/* intel_check_page_flip(dev, pipe)*/; |
intel_check_page_flip(dev, pipe); |
if (INTEL_INFO(dev_priv)->gen >= 9) |
flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; |
2297,6 → 2297,10 |
else |
flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; |
if (flip_done) { |
intel_prepare_page_flip(dev, pipe); |
intel_finish_page_flip_plane(dev, pipe); |
} |
if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) |
hsw_pipe_crc_irq_handler(dev, pipe); |
2335,10 → 2339,14 |
spt_irq_handler(dev, pch_iir); |
else |
cpt_irq_handler(dev, pch_iir); |
} else |
DRM_ERROR("The master control interrupt lied (SDE)!\n"); |
} else { |
/* |
* Like on previous PCH there seems to be something |
* fishy going on with forwarding PCH interrupts. |
*/ |
DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); |
} |
} |
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
POSTING_READ_FW(GEN8_MASTER_IRQ); |
2363,6 → 2371,8 |
for_each_ring(ring, dev_priv, i) |
wake_up_all(&ring->irq_queue); |
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ |
wake_up_all(&dev_priv->pending_flip_queue); |
/* |
* Signal tasks blocked in i915_gem_wait_for_error that the pending |
3775,12 → 3785,12 |
if (I915_READ16(ISR) & flip_pending) |
goto check_page_flip; |
// intel_prepare_page_flip(dev, plane); |
// intel_finish_page_flip(dev, pipe); |
intel_prepare_page_flip(dev, plane); |
intel_finish_page_flip(dev, pipe); |
return true; |
check_page_flip: |
// intel_check_page_flip(dev, pipe); |
intel_check_page_flip(dev, pipe); |
return false; |
} |
3959,9 → 3969,12 |
if (I915_READ(ISR) & flip_pending) |
goto check_page_flip; |
intel_prepare_page_flip(dev, plane); |
intel_finish_page_flip(dev, pipe); |
return true; |
check_page_flip: |
intel_check_page_flip(dev, pipe); |
return false; |
} |
4449,7 → 4462,7 |
void intel_irq_uninstall(struct drm_i915_private *dev_priv) |
{ |
// drm_irq_uninstall(dev_priv->dev); |
// intel_hpd_cancel_work(dev_priv); |
intel_hpd_cancel_work(dev_priv); |
dev_priv->pm.irqs_enabled = false; |
} |
/drivers/video/drm/i915/i915_trace.h |
---|
44,4 → 44,7 |
#define trace_i915_va_alloc(vm,start,size,name) |
#define trace_i915_gem_request_notify(ring) |
#define trace_i915_gem_object_pread(obj, offset, size) |
#define trace_i915_flip_complete(plane, pending_flip_obj) |
#define trace_i915_flip_request(plane, obj) |
#endif |
/drivers/video/drm/i915/intel_ddi.c |
---|
1582,7 → 1582,8 |
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | |
wrpll_params.central_freq; |
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { |
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || |
intel_encoder->type == INTEL_OUTPUT_DP_MST) { |
switch (crtc_state->port_clock / 2) { |
case 81000: |
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); |
/drivers/video/drm/i915/intel_display.c |
---|
3945,13 → 3945,13 |
drm_crtc_vblank_put(&intel_crtc->base); |
// wake_up_all(&dev_priv->pending_flip_queue); |
// queue_work(dev_priv->wq, &work->work); |
wake_up_all(&dev_priv->pending_flip_queue); |
queue_work(dev_priv->wq, &work->work); |
// trace_i915_flip_complete(intel_crtc->plane, |
// work->pending_flip_obj); |
trace_i915_flip_complete(intel_crtc->plane, |
work->pending_flip_obj); |
} |
#if 0 |
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
3977,7 → 3977,6 |
mutex_unlock(&dev->struct_mutex); |
} |
} |
#endif |
/* Program iCLKIP clock to the desired frequency */ |
static void lpt_program_iclkip(struct drm_crtc *crtc) |
4851,6 → 4850,9 |
mutex_unlock(&dev->struct_mutex); |
} |
if (atomic->wait_for_flips) |
intel_crtc_wait_for_pending_flips(&crtc->base); |
if (atomic->disable_fbc) |
intel_fbc_disable_crtc(crtc); |
4883,7 → 4885,7 |
* to compute the mask of flip planes precisely. For the time being |
* consider this a flip to a NULL plane. |
*/ |
// intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); |
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); |
} |
static void ironlake_crtc_enable(struct drm_crtc *crtc) |
6320,6 → 6322,7 |
return; |
if (to_intel_plane_state(crtc->primary->state)->visible) { |
intel_crtc_wait_for_pending_flips(crtc); |
intel_pre_disable_primary(crtc); |
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); |
10910,7 → 10913,7 |
/* and that it is marked active as soon as the irq could fire. */ |
smp_wmb(); |
} |
#if 0 |
static int intel_gen2_queue_flip(struct drm_device *dev, |
struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
11373,8 → 11376,6 |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_unpin_work *work; |
WARN_ON(!in_interrupt()); |
if (crtc == NULL) |
return; |
11391,7 → 11392,7 |
intel_queue_rps_boost_for_request(dev, work->flip_queued_req); |
spin_unlock(&dev->event_lock); |
} |
#endif |
static int intel_crtc_page_flip(struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
struct drm_pending_vblank_event *event, |
11441,7 → 11442,7 |
work->event = event; |
work->crtc = crtc; |
work->old_fb = old_fb; |
// INIT_WORK(&work->work, intel_unpin_work_fn); |
INIT_WORK(&work->work, intel_unpin_work_fn); |
ret = drm_crtc_vblank_get(crtc); |
if (ret) |
11468,8 → 11469,8 |
intel_crtc->unpin_work = work; |
spin_unlock_irq(&dev->event_lock); |
if (atomic_read(&intel_crtc->unpin_work_count) >= 2) |
flush_workqueue(dev_priv->wq); |
// if (atomic_read(&intel_crtc->unpin_work_count) >= 2) |
// flush_workqueue(dev_priv->wq); |
/* Reference the objects for the scheduled work. */ |
drm_framebuffer_reference(work->old_fb); |
11927,13 → 11928,23 |
pipe_config->pipe_bpp = connector->base.display_info.bpc*3; |
} |
/* Clamp bpp to 8 on screens without EDID 1.4 */ |
if (connector->base.display_info.bpc == 0 && bpp > 24) { |
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", |
bpp); |
pipe_config->pipe_bpp = 24; |
/* Clamp bpp to default limit on screens without EDID 1.4 */ |
if (connector->base.display_info.bpc == 0) { |
int type = connector->base.connector_type; |
int clamp_bpp = 24; |
/* Fall back to 18 bpp when DP sink capability is unknown. */ |
if (type == DRM_MODE_CONNECTOR_DisplayPort || |
type == DRM_MODE_CONNECTOR_eDP) |
clamp_bpp = 18; |
if (bpp > clamp_bpp) { |
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", |
bpp, clamp_bpp); |
pipe_config->pipe_bpp = clamp_bpp; |
} |
} |
} |
static int |
compute_baseline_pipe_bpp(struct intel_crtc *crtc, |
13317,7 → 13328,7 |
.gamma_set = intel_crtc_gamma_set, |
.set_config = drm_atomic_helper_set_config, |
.destroy = intel_crtc_destroy, |
// .page_flip = intel_crtc_page_flip, |
.page_flip = intel_crtc_page_flip, |
.atomic_duplicate_state = intel_crtc_duplicate_state, |
.atomic_destroy_state = intel_crtc_destroy_state, |
}; |
13534,11 → 13545,12 |
int max_scale = DRM_PLANE_HELPER_NO_SCALING; |
bool can_position = false; |
if (INTEL_INFO(plane->dev)->gen >= 9) { |
/* use scaler when colorkey is not required */ |
if (INTEL_INFO(plane->dev)->gen >= 9 && |
state->ckey.flags == I915_SET_COLORKEY_NONE) { |
if (state->ckey.flags == I915_SET_COLORKEY_NONE) { |
min_scale = 1; |
max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); |
} |
can_position = true; |
} |
14628,11 → 14640,34 |
broxton_modeset_calc_cdclk; |
} |
switch (INTEL_INFO(dev)->gen) { |
case 2: |
dev_priv->display.queue_flip = intel_gen2_queue_flip; |
break; |
case 3: |
dev_priv->display.queue_flip = intel_gen3_queue_flip; |
break; |
case 4: |
case 5: |
dev_priv->display.queue_flip = intel_gen4_queue_flip; |
break; |
case 6: |
dev_priv->display.queue_flip = intel_gen6_queue_flip; |
break; |
case 7: |
case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ |
dev_priv->display.queue_flip = intel_gen7_queue_flip; |
break; |
case 9: |
/* Drop through - unsupported since execlist only. */ |
default: |
/* Default just returns -ENODEV to indicate unsupported */ |
dev_priv->display.queue_flip = intel_default_queue_flip; |
} |
mutex_init(&dev_priv->pps_mutex); |
} |
/drivers/video/drm/i915/intel_dp.c |
---|
1894,7 → 1894,7 |
* operations. |
*/ |
delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); |
// schedule_delayed_work(&intel_dp->panel_vdd_work, delay); |
schedule_delayed_work(&intel_dp->panel_vdd_work, delay); |
} |
/* |
5756,7 → 5756,7 |
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) |
return; |
// cancel_delayed_work(&dev_priv->drrs.work); |
cancel_delayed_work(&dev_priv->drrs.work); |
mutex_lock(&dev_priv->drrs.mutex); |
if (!dev_priv->drrs.dp) { |
5776,6 → 5776,13 |
dev_priv->drrs.dp->attached_connector->panel. |
fixed_mode->vrefresh); |
/* |
* flush also means no more activity hence schedule downclock, if all |
* other fbs are quiescent too |
*/ |
if (!dev_priv->drrs.busy_frontbuffer_bits) |
schedule_delayed_work(&dev_priv->drrs.work, |
msecs_to_jiffies(1000)); |
mutex_unlock(&dev_priv->drrs.mutex); |
} |
/drivers/video/drm/i915/intel_dsi_panel_vbt.c |
---|
207,8 → 207,13 |
gpio = *data++; |
/* pull up/down */ |
action = *data++; |
action = *data++ & 1; |
if (gpio >= ARRAY_SIZE(gtable)) { |
DRM_DEBUG_KMS("unknown gpio %u\n", gpio); |
goto out; |
} |
function = gtable[gpio].function_reg; |
pad = gtable[gpio].pad_reg; |
226,6 → 231,7 |
vlv_gpio_nc_write(dev_priv, pad, val); |
mutex_unlock(&dev_priv->sb_lock); |
out: |
return data; |
} |
/drivers/video/drm/i915/intel_hotplug.c |
---|
0,0 → 1,513 |
/* |
* Copyright © 2015 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
*/ |
#include <linux/kernel.h> |
#include <drm/drmP.h> |
#include <drm/i915_drm.h> |
#include "i915_drv.h" |
#include "intel_drv.h" |
/** |
* DOC: Hotplug |
* |
* Simply put, hotplug occurs when a display is connected to or disconnected |
* from the system. However, there may be adapters and docking stations and |
* Display Port short pulses and MST devices involved, complicating matters. |
* |
* Hotplug in i915 is handled in many different levels of abstraction. |
* |
* The platform dependent interrupt handling code in i915_irq.c enables, |
* disables, and does preliminary handling of the interrupts. The interrupt |
* handlers gather the hotplug detect (HPD) information from relevant registers |
* into a platform independent mask of hotplug pins that have fired. |
* |
* The platform independent interrupt handler intel_hpd_irq_handler() in |
* intel_hotplug.c does hotplug irq storm detection and mitigation, and passes |
* further processing to appropriate bottom halves (Display Port specific and |
* regular hotplug). |
* |
* The Display Port work function i915_digport_work_func() calls into |
* intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long |
* pulses, with failures and non-MST long pulses triggering regular hotplug |
* processing on the connector. |
* |
* The regular hotplug work function i915_hotplug_work_func() calls connector |
* detect hooks, and, if connector status changes, triggers sending of hotplug |
* uevent to userspace via drm_kms_helper_hotplug_event(). |
* |
* Finally, the userspace is responsible for triggering a modeset upon receiving |
* the hotplug uevent, disabling or enabling the crtc as needed. |
* |
* The hotplug interrupt storm detection and mitigation code keeps track of the |
* number of interrupts per hotplug pin per a period of time, and if the number |
* of interrupts exceeds a certain threshold, the interrupt is disabled for a |
* while before being re-enabled. The intention is to mitigate issues raising |
* from broken hardware triggering massive amounts of interrupts and grinding |
* the system to a halt. |
* |
* Current implementation expects that hotplug interrupt storm will not be |
* seen when display port sink is connected, hence on platforms whose DP |
* callback is handled by i915_digport_work_func reenabling of hpd is not |
* performed (it was never expected to be disabled in the first place ;) ) |
* this is specific to DP sinks handled by this routine and any other display |
* such as HDMI or DVI enabled on the same port will have proper logic since |
* it will use i915_hotplug_work_func where this logic is handled. |
*/ |
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port) |
{ |
switch (pin) { |
case HPD_PORT_A: |
*port = PORT_A; |
return true; |
case HPD_PORT_B: |
*port = PORT_B; |
return true; |
case HPD_PORT_C: |
*port = PORT_C; |
return true; |
case HPD_PORT_D: |
*port = PORT_D; |
return true; |
case HPD_PORT_E: |
*port = PORT_E; |
return true; |
default: |
return false; /* no hpd */ |
} |
} |
#define HPD_STORM_DETECT_PERIOD 1000 |
#define HPD_STORM_THRESHOLD 5 |
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) |
/** |
* intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin |
* @dev_priv: private driver data pointer |
* @pin: the pin to gather stats on |
* |
* Gather stats about HPD irqs from the specified @pin, and detect irq |
* storms. Only the pin specific stats and state are changed, the caller is |
* responsible for further action. |
* |
* @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms, |
* otherwise it's considered an irq storm, and the irq state is set to |
* @HPD_MARK_DISABLED. |
* |
* Return true if an irq storm was detected on @pin. |
*/ |
static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, |
enum hpd_pin pin) |
{ |
unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies; |
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); |
bool storm = false; |
if (!time_in_range(jiffies, start, end)) { |
dev_priv->hotplug.stats[pin].last_jiffies = jiffies; |
dev_priv->hotplug.stats[pin].count = 0; |
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin); |
} else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) { |
dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED; |
DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin); |
storm = true; |
} else { |
dev_priv->hotplug.stats[pin].count++; |
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin, |
dev_priv->hotplug.stats[pin].count); |
} |
return storm; |
} |
static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
struct drm_mode_config *mode_config = &dev->mode_config; |
struct intel_connector *intel_connector; |
struct intel_encoder *intel_encoder; |
struct drm_connector *connector; |
enum hpd_pin pin; |
bool hpd_disabled = false; |
assert_spin_locked(&dev_priv->irq_lock); |
list_for_each_entry(connector, &mode_config->connector_list, head) { |
if (connector->polled != DRM_CONNECTOR_POLL_HPD) |
continue; |
intel_connector = to_intel_connector(connector); |
intel_encoder = intel_connector->encoder; |
if (!intel_encoder) |
continue; |
pin = intel_encoder->hpd_pin; |
if (pin == HPD_NONE || |
dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED) |
continue; |
DRM_INFO("HPD interrupt storm detected on connector %s: " |
"switching from hotplug detection to polling\n", |
connector->name); |
dev_priv->hotplug.stats[pin].state = HPD_DISABLED; |
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
| DRM_CONNECTOR_POLL_DISCONNECT; |
hpd_disabled = true; |
} |
/* Enable polling and queue hotplug re-enabling. */ |
if (hpd_disabled) { |
drm_kms_helper_poll_enable_locked(dev); |
mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, |
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); |
} |
} |
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) |
{ |
struct drm_i915_private *dev_priv = |
container_of(work, typeof(*dev_priv), |
hotplug.reenable_work.work); |
struct drm_device *dev = dev_priv->dev; |
struct drm_mode_config *mode_config = &dev->mode_config; |
int i; |
intel_runtime_pm_get(dev_priv); |
spin_lock_irq(&dev_priv->irq_lock); |
for_each_hpd_pin(i) { |
struct drm_connector *connector; |
if (dev_priv->hotplug.stats[i].state != HPD_DISABLED) |
continue; |
dev_priv->hotplug.stats[i].state = HPD_ENABLED; |
list_for_each_entry(connector, &mode_config->connector_list, head) { |
struct intel_connector *intel_connector = to_intel_connector(connector); |
if (intel_connector->encoder->hpd_pin == i) { |
if (connector->polled != intel_connector->polled) |
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", |
connector->name); |
connector->polled = intel_connector->polled; |
if (!connector->polled) |
connector->polled = DRM_CONNECTOR_POLL_HPD; |
} |
} |
} |
if (dev_priv->display.hpd_irq_setup) |
dev_priv->display.hpd_irq_setup(dev); |
spin_unlock_irq(&dev_priv->irq_lock); |
intel_runtime_pm_put(dev_priv); |
} |
static bool intel_hpd_irq_event(struct drm_device *dev, |
struct drm_connector *connector) |
{ |
enum drm_connector_status old_status; |
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); |
old_status = connector->status; |
connector->status = connector->funcs->detect(connector, false); |
if (old_status == connector->status) |
return false; |
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", |
connector->base.id, |
connector->name, |
drm_get_connector_status_name(old_status), |
drm_get_connector_status_name(connector->status)); |
return true; |
} |
static void i915_digport_work_func(struct work_struct *work) |
{ |
struct drm_i915_private *dev_priv = |
container_of(work, struct drm_i915_private, hotplug.dig_port_work); |
u32 long_port_mask, short_port_mask; |
struct intel_digital_port *intel_dig_port; |
int i; |
u32 old_bits = 0; |
spin_lock_irq(&dev_priv->irq_lock); |
long_port_mask = dev_priv->hotplug.long_port_mask; |
dev_priv->hotplug.long_port_mask = 0; |
short_port_mask = dev_priv->hotplug.short_port_mask; |
dev_priv->hotplug.short_port_mask = 0; |
spin_unlock_irq(&dev_priv->irq_lock); |
for (i = 0; i < I915_MAX_PORTS; i++) { |
bool valid = false; |
bool long_hpd = false; |
intel_dig_port = dev_priv->hotplug.irq_port[i]; |
if (!intel_dig_port || !intel_dig_port->hpd_pulse) |
continue; |
if (long_port_mask & (1 << i)) { |
valid = true; |
long_hpd = true; |
} else if (short_port_mask & (1 << i)) |
valid = true; |
if (valid) { |
enum irqreturn ret; |
ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); |
if (ret == IRQ_NONE) { |
/* fall back to old school hpd */ |
old_bits |= (1 << intel_dig_port->base.hpd_pin); |
} |
} |
} |
if (old_bits) { |
spin_lock_irq(&dev_priv->irq_lock); |
dev_priv->hotplug.event_bits |= old_bits; |
spin_unlock_irq(&dev_priv->irq_lock); |
schedule_work(&dev_priv->hotplug.hotplug_work); |
} |
} |
/* |
* Handle hotplug events outside the interrupt handler proper. |
*/ |
static void i915_hotplug_work_func(struct work_struct *work) |
{ |
struct drm_i915_private *dev_priv = |
container_of(work, struct drm_i915_private, hotplug.hotplug_work); |
struct drm_device *dev = dev_priv->dev; |
struct drm_mode_config *mode_config = &dev->mode_config; |
struct intel_connector *intel_connector; |
struct intel_encoder *intel_encoder; |
struct drm_connector *connector; |
bool changed = false; |
u32 hpd_event_bits; |
mutex_lock(&mode_config->mutex); |
DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
spin_lock_irq(&dev_priv->irq_lock); |
hpd_event_bits = dev_priv->hotplug.event_bits; |
dev_priv->hotplug.event_bits = 0; |
/* Disable hotplug on connectors that hit an irq storm. */ |
intel_hpd_irq_storm_disable(dev_priv); |
spin_unlock_irq(&dev_priv->irq_lock); |
list_for_each_entry(connector, &mode_config->connector_list, head) { |
intel_connector = to_intel_connector(connector); |
if (!intel_connector->encoder) |
continue; |
intel_encoder = intel_connector->encoder; |
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { |
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", |
connector->name, intel_encoder->hpd_pin); |
if (intel_encoder->hot_plug) |
intel_encoder->hot_plug(intel_encoder); |
if (intel_hpd_irq_event(dev, connector)) |
changed = true; |
} |
} |
mutex_unlock(&mode_config->mutex); |
if (changed) |
drm_kms_helper_hotplug_event(dev); |
} |
/** |
* intel_hpd_irq_handler - main hotplug irq handler |
* @dev: drm device |
* @pin_mask: a mask of hpd pins that have triggered the irq |
* @long_mask: a mask of hpd pins that may be long hpd pulses |
* |
* This is the main hotplug irq handler for all platforms. The platform specific |
* irq handlers call the platform specific hotplug irq handlers, which read and |
* decode the appropriate registers into bitmasks about hpd pins that have |
* triggered (@pin_mask), and which of those pins may be long pulses |
* (@long_mask). The @long_mask is ignored if the port corresponding to the pin |
* is not a digital port. |
* |
* Here, we do hotplug irq storm detection and mitigation, and pass further |
* processing to appropriate bottom halves. |
*/ |
void intel_hpd_irq_handler(struct drm_device *dev, |
u32 pin_mask, u32 long_mask) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int i; |
enum port port; |
bool storm_detected = false; |
bool queue_dig = false, queue_hp = false; |
bool is_dig_port; |
if (!pin_mask) |
return; |
spin_lock(&dev_priv->irq_lock); |
for_each_hpd_pin(i) { |
if (!(BIT(i) & pin_mask)) |
continue; |
is_dig_port = intel_hpd_pin_to_port(i, &port) && |
dev_priv->hotplug.irq_port[port]; |
if (is_dig_port) { |
bool long_hpd = long_mask & BIT(i); |
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), |
long_hpd ? "long" : "short"); |
/* |
* For long HPD pulses we want to have the digital queue happen, |
* but we still want HPD storm detection to function. |
*/ |
queue_dig = true; |
if (long_hpd) { |
dev_priv->hotplug.long_port_mask |= (1 << port); |
} else { |
/* for short HPD just trigger the digital queue */ |
dev_priv->hotplug.short_port_mask |= (1 << port); |
continue; |
} |
} |
if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) { |
/* |
* On GMCH platforms the interrupt mask bits only |
* prevent irq generation, not the setting of the |
* hotplug bits itself. So only WARN about unexpected |
* interrupts on saner platforms. |
*/ |
WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), |
"Received HPD interrupt on pin %d although disabled\n", i); |
continue; |
} |
if (dev_priv->hotplug.stats[i].state != HPD_ENABLED) |
continue; |
if (!is_dig_port) { |
dev_priv->hotplug.event_bits |= BIT(i); |
queue_hp = true; |
} |
if (intel_hpd_irq_storm_detect(dev_priv, i)) { |
dev_priv->hotplug.event_bits &= ~BIT(i); |
storm_detected = true; |
} |
} |
if (storm_detected) |
dev_priv->display.hpd_irq_setup(dev); |
spin_unlock(&dev_priv->irq_lock); |
/* |
* Our hotplug handler can grab modeset locks (by calling down into the |
* fb helpers). Hence it must not be run on our own dev-priv->wq work |
* queue for otherwise the flush_work in the pageflip code will |
* deadlock. |
*/ |
if (queue_dig) |
queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work); |
if (queue_hp) |
schedule_work(&dev_priv->hotplug.hotplug_work); |
} |
/** |
* intel_hpd_init - initializes and enables hpd support |
* @dev_priv: i915 device instance |
* |
* This function enables the hotplug support. It requires that interrupts have |
* already been enabled with intel_irq_init_hw(). From this point on hotplug and |
* poll request can run concurrently to other code, so locking rules must be |
* obeyed. |
* |
* This is a separate step from interrupt enabling to simplify the locking rules |
* in the driver load and resume code. |
*/ |
void intel_hpd_init(struct drm_i915_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
struct drm_mode_config *mode_config = &dev->mode_config; |
struct drm_connector *connector; |
int i; |
for_each_hpd_pin(i) { |
dev_priv->hotplug.stats[i].count = 0; |
dev_priv->hotplug.stats[i].state = HPD_ENABLED; |
} |
list_for_each_entry(connector, &mode_config->connector_list, head) { |
struct intel_connector *intel_connector = to_intel_connector(connector); |
connector->polled = intel_connector->polled; |
/* MST has a dynamic intel_connector->encoder and it's reprobing |
* is all handled by the MST helpers. */ |
if (intel_connector->mst_port) |
continue; |
if (!connector->polled && I915_HAS_HOTPLUG(dev) && |
intel_connector->encoder->hpd_pin > HPD_NONE) |
connector->polled = DRM_CONNECTOR_POLL_HPD; |
} |
/* |
* Interrupt setup is already guaranteed to be single-threaded, this is |
* just to make the assert_spin_locked checks happy. |
*/ |
spin_lock_irq(&dev_priv->irq_lock); |
if (dev_priv->display.hpd_irq_setup) |
dev_priv->display.hpd_irq_setup(dev); |
spin_unlock_irq(&dev_priv->irq_lock); |
} |
void intel_hpd_init_work(struct drm_i915_private *dev_priv) |
{ |
INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); |
INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); |
INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, |
intel_hpd_irq_storm_reenable_work); |
} |
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) |
{ |
spin_lock_irq(&dev_priv->irq_lock); |
dev_priv->hotplug.long_port_mask = 0; |
dev_priv->hotplug.short_port_mask = 0; |
dev_priv->hotplug.event_bits = 0; |
spin_unlock_irq(&dev_priv->irq_lock); |
cancel_work_sync(&dev_priv->hotplug.dig_port_work); |
cancel_work_sync(&dev_priv->hotplug.hotplug_work); |
cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); |
} |
/drivers/video/drm/i915/intel_i2c.c |
---|
675,7 → 675,7 |
return 0; |
err: |
while (--pin) { |
while (pin--) { |
if (!intel_gmbus_is_valid_pin(dev_priv, pin)) |
continue; |
/drivers/video/drm/i915/intel_lrc.c |
---|
1706,6 → 1706,7 |
if (flush_domains) { |
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; |
flags |= PIPE_CONTROL_FLUSH_ENABLE; |
} |
2359,6 → 2360,7 |
kunmap_atomic(reg_state); |
ctx_obj->dirty = 1; |
set_page_dirty(page); |
i915_gem_object_unpin_pages(ctx_obj); |
return 0; |
/drivers/video/drm/i915/intel_ringbuffer.c |
---|
347,6 → 347,7 |
if (flush_domains) { |
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; |
flags |= PIPE_CONTROL_FLUSH_ENABLE; |
} |
if (invalidate_domains) { |
419,6 → 420,7 |
if (flush_domains) { |
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; |
flags |= PIPE_CONTROL_FLUSH_ENABLE; |
} |
if (invalidate_domains) { |
/drivers/video/drm/i915/kms_display.c |
---|
10,16 → 10,10 |
#include <linux/pci.h> |
#include <syscall.h> |
//#include "bitmap.h" |
#include <display.h> |
void FASTCALL sysSetFramebuffer(void *fb)__asm__("SetFramebuffer"); |
void kolibri_framebuffer_update(struct drm_i915_private *dev_priv, struct kos_framebuffer *kfb); |
void init_system_cursors(struct drm_device *dev); |
addr_t dummy_fb_page; |
display_t *os_display; |
u32 cmd_buffer; |
60,26 → 54,17 |
struct drm_i915_gem_object *obj = NULL; |
int stride, size; |
ENTER(); |
stride = mode->hdisplay *4; |
if(IS_GEN3(dev)) |
tiling = 0; |
if(tiling) |
{ |
int gen3size; |
if(IS_GEN3(dev)) |
for (stride = 512; stride < mode->hdisplay * 4; stride <<= 1); |
else |
stride = ALIGN(stride, 512); |
size = stride * ALIGN(mode->vdisplay, 8); |
if(IS_GEN3(dev)) |
{ |
for (gen3size = 1024*1024; gen3size < size; gen3size <<= 1); |
size = gen3size; |
} |
else |
size = ALIGN(size, 4096); |
} |
else |
96,6 → 81,7 |
int ret; |
DRM_DEBUG_KMS("remove old framebuffer\n"); |
set_fake_framebuffer(); |
drm_framebuffer_remove(fb); |
ifbdev->fb = NULL; |
fb = NULL; |
172,7 → 158,7 |
fb->bits_per_pixel = 32; |
fb->depth = 24; |
LEAVE(); |
return fb; |
out_fb: |
282,7 → 268,7 |
{ |
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
struct kos_framebuffer *kfb = intel_fb->private; |
kolibri_framebuffer_update(dev_priv, kfb); |
kolibri_framebuffer_update(dev, kfb); |
DRM_DEBUG_KMS("kolibri framebuffer %p\n", kfb); |
os_display->width = mode->hdisplay; |
378,7 → 364,7 |
{ |
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
struct kos_framebuffer *kfb = intel_fb->private; |
kolibri_framebuffer_update(dev_priv, kfb); |
kolibri_framebuffer_update(dev, kfb); |
DRM_DEBUG_KMS("kolibri framebuffer %p\n", kfb); |
os_display->width = mode->hdisplay; |
575,8 → 561,6 |
return -1; |
}; |
dummy_fb_page = AllocPage(); |
os_display = GetDisplay(); |
os_display->ddev = dev; |
os_display->connector = connector; |
815,7 → 799,7 |
fb->name = obj->base.name; |
fb->width = os_display->width; |
fb->height = os_display->height; |
fb->pitch = obj->stride; |
fb->pitch = os_display->lfb_pitch; |
fb->tiling = obj->tiling_mode; |
fb->crtc = crtc->base.base.id; |
fb->pipe = crtc->pipe; |
825,112 → 809,7 |
return 0; |
} |
int kolibri_framebuffer_init(struct intel_framebuffer *intel_fb) |
{ |
struct kos_framebuffer *kfb; |
addr_t dummy_table; |
addr_t *pt_addr = NULL; |
int pde; |
kfb = kzalloc(sizeof(struct kos_framebuffer),0); |
kfb->private = intel_fb; |
for(pde = 0; pde < 8; pde++) |
{ |
dummy_table = AllocPage(); |
kfb->pde[pde] = dummy_table|PG_UW; |
pt_addr = kmap((struct page*)dummy_table); |
__builtin_memset(pt_addr,0,4096); |
kunmap((struct page*)dummy_table); |
}; |
intel_fb->private = kfb; |
return 0; |
#if 0 |
struct sg_page_iter sg_iter; |
num_pages = obj->base.size/4096; |
printf("num_pages %d\n",num_pages); |
pte = 0; |
pde = 0; |
pt_addr = NULL; |
__sg_page_iter_start(&sg_iter, obj->pages->sgl, sg_nents(obj->pages->sgl), 0); |
while (__sg_page_iter_next(&sg_iter)) |
{ |
if (pt_addr == NULL) |
{ |
addr_t pt = AllocPage(); |
kfb->pde[pde] = pt|PG_UW; |
pde++; |
pt_addr = kmap_atomic((struct page*)pt); |
} |
pt_addr[pte] = sg_page_iter_dma_address(&sg_iter)|PG_UW|PG_WRITEC; |
if( (pte & 15) == 0) |
DRM_DEBUG_KMS("pte %x\n",pt_addr[pte]); |
if (++pte == 1024) |
{ |
kunmap_atomic(pt_addr); |
pt_addr = NULL; |
if (pde == 8) |
break; |
pte = 0; |
} |
} |
if(pt_addr) |
{ |
for(;pte < 1024; pte++) |
pt_addr[pte] = dummy_page|PG_UW; |
kunmap_atomic(pt_addr); |
} |
#endif |
}; |
void kolibri_framebuffer_update(struct drm_i915_private *dev_priv, struct kos_framebuffer *kfb) |
{ |
struct intel_framebuffer *intel_fb = kfb->private; |
addr_t *pt_addr = NULL; |
int pte = 0; |
int pde = 0; |
int num_pages; |
addr_t pfn; |
ENTER(); |
num_pages = intel_fb->obj->base.size/4096; |
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(intel_fb->obj); |
while(num_pages) |
{ |
if (pt_addr == NULL) |
{ |
addr_t pt = kfb->pde[pde] & 0xFFFFF000; |
pde++; |
pt_addr = kmap_atomic((struct page*)pt); |
} |
pt_addr[pte] = pfn|PG_UW|PG_WRITEC; |
pfn+= 4096; |
num_pages--; |
if (++pte == 1024) |
{ |
kunmap_atomic(pt_addr); |
pt_addr = NULL; |
if (pde == 8) |
break; |
pte = 0; |
} |
} |
if(pt_addr) |
{ |
for(;pte < 1024; pte++) |
pt_addr[pte] = dummy_fb_page|PG_UW; |
kunmap_atomic(pt_addr); |
} |
LEAVE(); |
}; |
typedef struct |
{ |
int left; |
1411,6 → 1290,3 |
list_del_init(&wait->task_list); |
return 1; |
} |
/drivers/video/drm/i915/kos_fb.c |
---|
0,0 → 1,174 |
/* |
* Copyright © 2008-2012 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
* |
* Authors: |
* Eric Anholt <eric@anholt.net> |
* Chris Wilson <chris@chris-wilson.co.uk> |
* |
*/ |
#include <drm/drmP.h> |
#include <drm/i915_drm.h> |
#include <display.h> |
#include "intel_drv.h" |
#include "i915_drv.h" |
static addr_t dummy_fb_page; |
static struct kos_framebuffer *fake_fb; |
int fake_framebuffer_create() |
{ |
struct kos_framebuffer *kfb; |
addr_t dummy_table; |
addr_t *pt_addr; |
int pde, pte; |
kfb = kzalloc(sizeof(struct kos_framebuffer),0); |
if(kfb == NULL) |
goto err_0; |
dummy_fb_page = AllocPage(); |
if(dummy_fb_page == 0) |
goto err_1; |
for(pde = 0; pde < 8; pde++) |
{ |
dummy_table = AllocPage(); |
if(dummy_table == 0) |
goto err_2; |
kfb->pde[pde] = dummy_table|PG_UW; |
pt_addr = kmap_atomic((struct page*)dummy_table); |
for(pte = 0; pte < 1024; pte++) |
pt_addr[pte] = dummy_fb_page|PG_UW; |
kunmap_atomic(pt_addr); |
}; |
fake_fb = kfb; |
return 0; |
err_2: |
for(pte = 0; pte < pde; pte++) |
FreePage(kfb->pde[pte]); |
FreePage(dummy_fb_page); |
err_1: |
kfree(kfb); |
err_0: |
return -ENOMEM; |
}; |
int kolibri_framebuffer_init(void *param) |
{ |
struct intel_framebuffer *intel_fb = param; |
struct kos_framebuffer *kfb; |
addr_t dummy_table; |
addr_t *pt_addr = NULL; |
int pde, pte; |
kfb = kzalloc(sizeof(struct kos_framebuffer),0); |
if(kfb == NULL) |
goto err_0; |
kfb->private = intel_fb; |
for(pde = 0; pde < 8; pde++) |
{ |
dummy_table = AllocPage(); |
if(dummy_table == 0) |
goto err_1; |
kfb->pde[pde] = dummy_table|PG_UW; |
pt_addr = kmap_atomic((struct page*)dummy_table); |
for(pte = 0; pte < 1024; pte++) |
pt_addr[pte] = dummy_fb_page|PG_UW; |
kunmap_atomic(pt_addr); |
}; |
intel_fb->private = kfb; |
return 0; |
err_1: |
for(pte = 0; pte < pde; pte++) |
FreePage(kfb->pde[pte]); |
kfree(kfb); |
err_0: |
return -ENOMEM; |
}; |
void kolibri_framebuffer_update(struct drm_device *dev, struct kos_framebuffer *kfb) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_framebuffer *intel_fb = kfb->private; |
addr_t *pt_addr = NULL; |
int pte = 0; |
int pde = 0; |
int num_pages; |
addr_t pfn; |
num_pages = intel_fb->obj->base.size/4096; |
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(intel_fb->obj); |
while(num_pages) |
{ |
if (pt_addr == NULL) |
{ |
addr_t pt = kfb->pde[pde] & 0xFFFFF000; |
pde++; |
pt_addr = kmap_atomic((struct page*)pt); |
} |
pt_addr[pte] = pfn|PG_UW|PG_WRITEC; |
pfn+= 4096; |
num_pages--; |
if (++pte == 1024) |
{ |
kunmap_atomic(pt_addr); |
pt_addr = NULL; |
if (pde == 8) |
break; |
pte = 0; |
} |
} |
if(pt_addr) |
{ |
for(; pte < 1024; pte++) |
pt_addr[pte] = dummy_fb_page|PG_UW; |
kunmap_atomic(pt_addr); |
}; |
for(; pde < 8; pde++) |
{ |
addr_t pt = kfb->pde[pde] & 0xFFFFF000; |
pt_addr = kmap_atomic((struct page*)pt); |
for(pte = 0; pte < 1024; pte++) |
pt_addr[pte] = dummy_fb_page|PG_UW; |
kunmap_atomic(pt_addr); |
} |
}; |
void set_fake_framebuffer() |
{ |
sysSetFramebuffer(fake_fb); |
} |
/drivers/video/drm/i915/main.c |
---|
14,7 → 14,7 |
#include "bitmap.h" |
#include "i915_kos32.h" |
#define DRV_NAME "i915 v4.4.3" |
#define DRV_NAME "i915 v4.4.5" |
#define I915_DEV_CLOSE 0 |
#define I915_DEV_INIT 1 |
254,6 → 254,10 |
dmi_scan_machine(); |
err = fake_framebuffer_create(); |
if( unlikely(err != 0)) |
return 0; |
driver_wq_state = I915_DEV_INIT; |
CreateKernelThread(i915_driver_thread); |