/drivers/video/drm/drm_dp_mst_topology.c |
---|
806,6 → 806,18 |
return mstb; |
} |
static void drm_dp_free_mst_port(struct kref *kref); |
static void drm_dp_free_mst_branch_device(struct kref *kref) |
{ |
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); |
if (mstb->port_parent) { |
if (list_empty(&mstb->port_parent->next)) |
kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port); |
} |
kfree(mstb); |
} |
static void drm_dp_destroy_mst_branch_device(struct kref *kref) |
{ |
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); |
813,6 → 825,15 |
bool wake_tx = false; |
/* |
* init kref again to be used by ports to remove mst branch when it is |
* not needed anymore |
*/ |
kref_init(kref); |
if (mstb->port_parent && list_empty(&mstb->port_parent->next)) |
kref_get(&mstb->port_parent->kref); |
/* |
* destroy all ports - don't need lock |
* as there are no more references to the mst branch |
* device at this point. |
838,7 → 859,7 |
// if (wake_tx) |
// wake_up(&mstb->mgr->tx_waitq); |
kfree(mstb); |
kref_put(kref, drm_dp_free_mst_branch_device); |
} |
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) |
886,6 → 907,7 |
* from an EDID retrieval */ |
mutex_lock(&mgr->destroy_connector_lock); |
kref_get(&port->parent->kref); |
list_add(&port->next, &mgr->destroy_connector_list); |
mutex_unlock(&mgr->destroy_connector_lock); |
// schedule_work(&mgr->destroy_connector_work); |
981,17 → 1003,17 |
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, |
u8 *rad) |
{ |
int lct = port->parent->lct; |
int parent_lct = port->parent->lct; |
int shift = 4; |
int idx = lct / 2; |
if (lct > 1) { |
memcpy(rad, port->parent->rad, idx); |
shift = (lct % 2) ? 4 : 0; |
int idx = (parent_lct - 1) / 2; |
if (parent_lct > 1) { |
memcpy(rad, port->parent->rad, idx + 1); |
shift = (parent_lct % 2) ? 4 : 0; |
} else |
rad[0] = 0; |
rad[idx] |= port->port_num << shift; |
return lct + 1; |
return parent_lct + 1; |
} |
/* |
1021,18 → 1043,27 |
return send_link; |
} |
static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, |
struct drm_dp_mst_port *port) |
static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) |
{ |
int ret; |
if (port->dpcd_rev >= 0x12) { |
port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid); |
if (!port->guid_valid) { |
ret = drm_dp_send_dpcd_write(mstb->mgr, |
port, |
memcpy(mstb->guid, guid, 16); |
if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { |
if (mstb->port_parent) { |
ret = drm_dp_send_dpcd_write( |
mstb->mgr, |
mstb->port_parent, |
DP_GUID, |
16, port->guid); |
port->guid_valid = true; |
16, |
mstb->guid); |
} else { |
ret = drm_dp_dpcd_write( |
mstb->mgr->aux, |
DP_GUID, |
mstb->guid, |
16); |
} |
} |
} |
1047,7 → 1078,7 |
snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); |
for (i = 0; i < (mstb->lct - 1); i++) { |
int shift = (i % 2) ? 0 : 4; |
int port_num = mstb->rad[i / 2] >> shift; |
int port_num = (mstb->rad[i / 2] >> shift) & 0xf; |
snprintf(temp, sizeof(temp), "-%d", port_num); |
strlcat(proppath, temp, proppath_size); |
} |
1089,7 → 1120,6 |
port->dpcd_rev = port_msg->dpcd_revision; |
port->num_sdp_streams = port_msg->num_sdp_streams; |
port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; |
memcpy(port->guid, port_msg->peer_guid, 16); |
/* manage mstb port lists with mgr lock - take a reference |
for this list */ |
1102,11 → 1132,9 |
if (old_ddps != port->ddps) { |
if (port->ddps) { |
drm_dp_check_port_guid(mstb, port); |
if (!port->input) |
drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); |
} else { |
port->guid_valid = false; |
port->available_pbn = 0; |
} |
} |
1165,10 → 1193,8 |
if (old_ddps != port->ddps) { |
if (port->ddps) { |
drm_dp_check_port_guid(mstb, port); |
dowork = true; |
} else { |
port->guid_valid = false; |
port->available_pbn = 0; |
} |
} |
1198,7 → 1224,7 |
for (i = 0; i < lct - 1; i++) { |
int shift = (i % 2) ? 0 : 4; |
int port_num = rad[i / 2] >> shift; |
int port_num = (rad[i / 2] >> shift) & 0xf; |
list_for_each_entry(port, &mstb->ports, next) { |
if (port->port_num == port_num) { |
1218,6 → 1244,48 |
return mstb; |
} |
static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( |
struct drm_dp_mst_branch *mstb, |
uint8_t *guid) |
{ |
struct drm_dp_mst_branch *found_mstb; |
struct drm_dp_mst_port *port; |
if (memcmp(mstb->guid, guid, 16) == 0) |
return mstb; |
list_for_each_entry(port, &mstb->ports, next) { |
if (!port->mstb) |
continue; |
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); |
if (found_mstb) |
return found_mstb; |
} |
return NULL; |
} |
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid( |
struct drm_dp_mst_topology_mgr *mgr, |
uint8_t *guid) |
{ |
struct drm_dp_mst_branch *mstb; |
/* find the port by iterating down */ |
mutex_lock(&mgr->lock); |
mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); |
if (mstb) |
kref_get(&mstb->kref); |
mutex_unlock(&mgr->lock); |
return mstb; |
} |
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, |
struct drm_dp_mst_branch *mstb) |
{ |
1328,6 → 1396,7 |
struct drm_dp_sideband_msg_tx *txmsg) |
{ |
struct drm_dp_mst_branch *mstb = txmsg->dst; |
u8 req_type; |
/* both msg slots are full */ |
if (txmsg->seqno == -1) { |
1344,6 → 1413,12 |
txmsg->seqno = 1; |
mstb->tx_slots[txmsg->seqno] = txmsg; |
} |
req_type = txmsg->msg[0] & 0x7f; |
if (req_type == DP_CONNECTION_STATUS_NOTIFY || |
req_type == DP_RESOURCE_STATUS_NOTIFY) |
hdr->broadcast = 1; |
else |
hdr->broadcast = 0; |
hdr->path_msg = txmsg->path_msg; |
hdr->lct = mstb->lct; |
1446,26 → 1521,18 |
} |
/* called holding qlock */ |
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) |
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, |
struct drm_dp_sideband_msg_tx *txmsg) |
{ |
struct drm_dp_sideband_msg_tx *txmsg; |
int ret; |
/* construct a chunk from the first msg in the tx_msg queue */ |
if (list_empty(&mgr->tx_msg_upq)) { |
mgr->tx_up_in_progress = false; |
return; |
} |
ret = process_single_tx_qlock(mgr, txmsg, true); |
txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next); |
ret = process_single_tx_qlock(mgr, txmsg, true); |
if (ret == 1) { |
/* up txmsgs aren't put in slots - so free after we send it */ |
list_del(&txmsg->next); |
kfree(txmsg); |
} else if (ret) |
if (ret != 1) |
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); |
mgr->tx_up_in_progress = true; |
txmsg->dst->tx_slots[txmsg->seqno] = NULL; |
} |
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, |
1515,6 → 1582,9 |
txmsg->reply.u.link_addr.ports[i].num_sdp_streams, |
txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); |
} |
drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); |
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { |
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); |
} |
1562,6 → 1632,37 |
return 0; |
} |
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) |
{ |
if (!mstb->port_parent) |
return NULL; |
if (mstb->port_parent->mstb != mstb) |
return mstb->port_parent; |
return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); |
} |
static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, |
struct drm_dp_mst_branch *mstb, |
int *port_num) |
{ |
struct drm_dp_mst_branch *rmstb = NULL; |
struct drm_dp_mst_port *found_port; |
mutex_lock(&mgr->lock); |
if (mgr->mst_primary) { |
found_port = drm_dp_get_last_connected_port_to_mstb(mstb); |
if (found_port) { |
rmstb = found_port->parent; |
kref_get(&rmstb->kref); |
*port_num = found_port->port_num; |
} |
} |
mutex_unlock(&mgr->lock); |
return rmstb; |
} |
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, |
struct drm_dp_mst_port *port, |
int id, |
1569,11 → 1670,16 |
{ |
struct drm_dp_sideband_msg_tx *txmsg; |
struct drm_dp_mst_branch *mstb; |
int len, ret; |
int len, ret, port_num; |
port_num = port->port_num; |
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); |
if (!mstb) { |
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); |
if (!mstb) |
return -EINVAL; |
} |
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); |
if (!txmsg) { |
1582,7 → 1688,7 |
} |
txmsg->dst = mstb; |
len = build_allocate_payload(txmsg, port->port_num, |
len = build_allocate_payload(txmsg, port_num, |
id, |
pbn); |
1852,11 → 1958,12 |
drm_dp_encode_up_ack_reply(txmsg, req_type); |
mutex_lock(&mgr->qlock); |
list_add_tail(&txmsg->next, &mgr->tx_msg_upq); |
if (!mgr->tx_up_in_progress) { |
process_single_up_tx_qlock(mgr); |
} |
process_single_up_tx_qlock(mgr, txmsg); |
mutex_unlock(&mgr->qlock); |
kfree(txmsg); |
return 0; |
} |
1935,13 → 2042,6 |
mgr->mst_primary = mstb; |
kref_get(&mgr->mst_primary->kref); |
{ |
struct drm_dp_payload reset_pay; |
reset_pay.start_slot = 0; |
reset_pay.num_slots = 0x3f; |
drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); |
} |
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, |
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); |
if (ret < 0) { |
1948,20 → 2048,13 |
goto out_unlock; |
} |
/* sort out guid */ |
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16); |
if (ret != 16) { |
DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret); |
goto out_unlock; |
{ |
struct drm_dp_payload reset_pay; |
reset_pay.start_slot = 0; |
reset_pay.num_slots = 0x3f; |
drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); |
} |
mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid); |
if (!mgr->guid_valid) { |
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16); |
mgr->guid_valid = true; |
} |
// queue_work(system_long_wq, &mgr->work); |
ret = 0; |
2151,8 → 2244,10 |
if (mgr->up_req_recv.have_eomt) { |
struct drm_dp_sideband_msg_req_body msg; |
struct drm_dp_mst_branch *mstb; |
struct drm_dp_mst_branch *mstb = NULL; |
bool seqno; |
if (!mgr->up_req_recv.initial_hdr.broadcast) { |
mstb = drm_dp_get_mst_branch_device(mgr, |
mgr->up_req_recv.initial_hdr.lct, |
mgr->up_req_recv.initial_hdr.rad); |
2161,18 → 2256,39 |
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); |
return 0; |
} |
} |
seqno = mgr->up_req_recv.initial_hdr.seqno; |
drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); |
if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { |
drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); |
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); |
if (!mstb) |
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid); |
if (!mstb) { |
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); |
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); |
return 0; |
} |
drm_dp_update_port(mstb, &msg.u.conn_stat); |
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); |
(*mgr->cbs->hotplug)(mgr); |
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { |
drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); |
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); |
if (!mstb) |
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid); |
if (!mstb) { |
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); |
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); |
return 0; |
} |
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); |
} |
2352,6 → 2468,7 |
DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); |
if (pbn == port->vcpi.pbn) { |
*slots = port->vcpi.num_slots; |
drm_dp_put_port(port); |
return true; |
} |
} |
2511,32 → 2628,31 |
*/ |
int drm_dp_calc_pbn_mode(int clock, int bpp) |
{ |
fixed20_12 pix_bw; |
fixed20_12 fbpp; |
fixed20_12 result; |
fixed20_12 margin, tmp; |
u32 res; |
u64 kbps; |
s64 peak_kbps; |
u32 numerator; |
u32 denominator; |
pix_bw.full = dfixed_const(clock); |
fbpp.full = dfixed_const(bpp); |
tmp.full = dfixed_const(8); |
fbpp.full = dfixed_div(fbpp, tmp); |
kbps = clock * bpp; |
result.full = dfixed_mul(pix_bw, fbpp); |
margin.full = dfixed_const(54); |
tmp.full = dfixed_const(64); |
margin.full = dfixed_div(margin, tmp); |
result.full = dfixed_div(result, margin); |
/* |
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 |
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on |
* common multiplier to render an integer PBN for all link rate/lane |
* counts combinations |
* calculate |
* peak_kbps *= (1006/1000) |
* peak_kbps *= (64/54) |
* peak_kbps *= 8 convert to bytes |
*/ |
margin.full = dfixed_const(1006); |
tmp.full = dfixed_const(1000); |
margin.full = dfixed_div(margin, tmp); |
result.full = dfixed_mul(result, margin); |
numerator = 64 * 1006; |
denominator = 54 * 8 * 1000 * 1000; |
result.full = dfixed_div(result, tmp); |
result.full = dfixed_ceil(result); |
res = dfixed_trunc(result); |
return res; |
kbps *= numerator; |
peak_kbps = drm_fixp_from_fraction(kbps, denominator); |
return drm_fixp2int_ceil(peak_kbps); |
} |
EXPORT_SYMBOL(drm_dp_calc_pbn_mode); |
2544,11 → 2660,23 |
{ |
int ret; |
ret = drm_dp_calc_pbn_mode(154000, 30); |
if (ret != 689) |
if (ret != 689) { |
DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", |
154000, 30, 689, ret); |
return -EINVAL; |
} |
ret = drm_dp_calc_pbn_mode(234000, 30); |
if (ret != 1047) |
if (ret != 1047) { |
DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", |
234000, 30, 1047, ret); |
return -EINVAL; |
} |
ret = drm_dp_calc_pbn_mode(297000, 24); |
if (ret != 1063) { |
DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", |
297000, 24, 1063, ret); |
return -EINVAL; |
} |
return 0; |
} |
2627,6 → 2755,12 |
mutex_unlock(&mgr->qlock); |
} |
static void drm_dp_free_mst_port(struct kref *kref) |
{ |
struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); |
kref_put(&port->parent->kref, drm_dp_free_mst_branch_device); |
kfree(port); |
} |
/** |
* drm_dp_mst_topology_mgr_init - initialise a topology manager |
* @mgr: manager struct to initialise |
2647,7 → 2781,6 |
mutex_init(&mgr->qlock); |
mutex_init(&mgr->payload_lock); |
mutex_init(&mgr->destroy_connector_lock); |
INIT_LIST_HEAD(&mgr->tx_msg_upq); |
INIT_LIST_HEAD(&mgr->tx_msg_downq); |
INIT_LIST_HEAD(&mgr->destroy_connector_list); |
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); |
/drivers/video/drm/drm_irq.c |
---|
238,6 → 238,64 |
diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0; |
} |
/* |
* Within a drm_vblank_pre_modeset - drm_vblank_post_modeset |
* interval? If so then vblank irqs keep running and it will likely |
* happen that the hardware vblank counter is not trustworthy as it |
* might reset at some point in that interval and vblank timestamps |
* are not trustworthy either in that interval. Iow. this can result |
* in a bogus diff >> 1 which must be avoided as it would cause |
* random large forward jumps of the software vblank counter. |
*/ |
if (diff > 1 && (vblank->inmodeset & 0x2)) { |
DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u" |
" due to pre-modeset.\n", pipe, diff); |
diff = 1; |
} |
/* |
* FIMXE: Need to replace this hack with proper seqlocks. |
* |
* Restrict the bump of the software vblank counter to a safe maximum |
* value of +1 whenever there is the possibility that concurrent readers |
* of vblank timestamps could be active at the moment, as the current |
* implementation of the timestamp caching and updating is not safe |
* against concurrent readers for calls to store_vblank() with a bump |
* of anything but +1. A bump != 1 would very likely return corrupted |
* timestamps to userspace, because the same slot in the cache could |
* be concurrently written by store_vblank() and read by one of those |
* readers without the read-retry logic detecting the collision. |
* |
* Concurrent readers can exist when we are called from the |
* drm_vblank_off() or drm_vblank_on() functions and other non-vblank- |
* irq callers. However, all those calls to us are happening with the |
* vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount |
* can't increase while we are executing. Therefore a zero refcount at |
* this point is safe for arbitrary counter bumps if we are called |
* outside vblank irq, a non-zero count is not 100% safe. Unfortunately |
* we must also accept a refcount of 1, as whenever we are called from |
* drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and |
* we must let that one pass through in order to not lose vblank counts |
* during vblank irq off - which would completely defeat the whole |
* point of this routine. |
* |
* Whenever we are called from vblank irq, we have to assume concurrent |
* readers exist or can show up any time during our execution, even if |
* the refcount is currently zero, as vblank irqs are usually only |
* enabled due to the presence of readers, and because when we are called |
* from vblank irq we can't hold the vbl_lock to protect us from sudden |
* bumps in vblank refcount. Therefore also restrict bumps to +1 when |
* called from vblank irq. |
*/ |
if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 || |
(flags & DRM_CALLED_FROM_VBLIRQ))) { |
DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u " |
"refcount %u, vblirq %u\n", pipe, diff, |
atomic_read(&vblank->refcount), |
(flags & DRM_CALLED_FROM_VBLIRQ) != 0); |
diff = 1; |
} |
DRM_DEBUG_VBL("updating vblank count on crtc %u:" |
" current=%u, diff=%u, hw=%u hw_last=%u\n", |
pipe, vblank->count, diff, cur_vblank, vblank->last); |
1178,6 → 1236,8 |
spin_lock_irqsave(&dev->event_lock, irqflags); |
spin_lock(&dev->vbl_lock); |
DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n", |
pipe, vblank->enabled, vblank->inmodeset); |
vblank_disable_and_save(dev, pipe); |
wake_up(&vblank->queue); |
1280,6 → 1340,9 |
return; |
spin_lock_irqsave(&dev->vbl_lock, irqflags); |
DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n", |
pipe, vblank->enabled, vblank->inmodeset); |
/* Drop our private "prevent drm_vblank_get" refcount */ |
if (vblank->inmodeset) { |
atomic_dec(&vblank->refcount); |
1292,8 → 1355,7 |
* re-enable interrupts if there are users left, or the |
* user wishes vblank interrupts to be enabled all the time. |
*/ |
if (atomic_read(&vblank->refcount) != 0 || |
(!dev->vblank_disable_immediate && drm_vblank_offdelay == 0)) |
if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0) |
WARN_ON(drm_vblank_enable(dev, pipe)); |
spin_unlock_irqrestore(&dev->vbl_lock, irqflags); |
} |
1388,6 → 1450,7 |
if (vblank->inmodeset) { |
spin_lock_irqsave(&dev->vbl_lock, irqflags); |
dev->vblank_disable_allowed = true; |
drm_reset_vblank_timestamp(dev, pipe); |
spin_unlock_irqrestore(&dev->vbl_lock, irqflags); |
if (vblank->inmodeset & 0x2) |
1507,19 → 1570,3 |
return 0; |
} |
EXPORT_SYMBOL(drm_vblank_no_hw_counter); |
u64 div64_u64(u64 dividend, u64 divisor) |
{ |
u32 high, d; |
high = divisor >> 32; |
if (high) { |
unsigned int shift = fls(high); |
d = divisor >> shift; |
dividend >>= shift; |
} else |
d = divisor; |
return div_u64(dividend, d); |
} |
/drivers/video/drm/drm_stub.c |
---|
560,3 → 560,45 |
DRM_DEBUG("generating hotplug event\n"); |
} |
u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) |
{ |
u32 high = divisor >> 32; |
u64 quot; |
if (high == 0) { |
u32 rem32; |
quot = div_u64_rem(dividend, divisor, &rem32); |
*remainder = rem32; |
} else { |
int n = 1 + fls(high); |
quot = div_u64(dividend >> n, divisor >> n); |
if (quot != 0) |
quot--; |
*remainder = dividend - quot * divisor; |
if (*remainder >= divisor) { |
quot++; |
*remainder -= divisor; |
} |
} |
return quot; |
} |
u64 div64_u64(u64 dividend, u64 divisor) |
{ |
u32 high, d; |
high = divisor >> 32; |
if (high) { |
unsigned int shift = fls(high); |
d = divisor >> shift; |
dividend >>= shift; |
} else |
d = divisor; |
return div_u64(dividend, d); |
} |
/drivers/video/drm/i915/Makefile |
---|
113,6 → 113,7 |
intel_uncore.c \ |
kms_display.c \ |
kos_cursor.c \ |
kos_fb.c \ |
utils.c \ |
fwblob.asm \ |
../hdmi.c \ |
/drivers/video/drm/i915/Makefile.lto |
---|
2,11 → 2,11 |
CC = kos32-gcc |
FASM = fasm.exe |
DEFINES = -DDRM_DEBUG_CODE=1 -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU |
DEFINES += -DCONFIG_X86_L1_CACHE_SHIFT=6 |
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI -DKBUILD_MODNAME=\"i915.dll\" |
DEFINES = -DDRM_DEBUG_CODE=0 -D__KERNEL__ -DCONFIG_X86 -DCONFIG_X86_32 -DCONFIG_PCI |
DEFINES += -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6 |
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI |
DEFINES += -DKBUILD_MODNAME=\"i915.dll\" |
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk |
DRV_INCLUDES = /d/kos/kolibri/drivers/include |
DRM_TOPDIR = $(CURDIR)/.. |
16,7 → 16,7 |
-I$(DRV_INCLUDES)/uapi \ |
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES) |
CFLAGS_OPT = -Os -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -fno-ident -mno-stack-arg-probe |
CFLAGS_OPT = -O2 -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -fno-ident -mno-stack-arg-probe |
CFLAGS_OPT+= -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields -flto |
CFLAGS = -c $(INCLUDES) $(DEFINES) $(CFLAGS_OPT) |
45,6 → 45,8 |
NAME_SRC= main.c \ |
pci.c \ |
getopt.c \ |
getopt1.c \ |
dvo_ch7017.c \ |
dvo_ch7xxx.c \ |
dvo_ivch.c \ |
90,6 → 92,7 |
intel_frontbuffer.c \ |
intel_guc_loader.c \ |
intel_hdmi.c \ |
intel_hotplug.c \ |
intel_i2c.c \ |
intel_lrc.c \ |
intel_lvds.c \ |
109,6 → 112,8 |
intel_sprite.c \ |
intel_uncore.c \ |
kms_display.c \ |
kos_cursor.c \ |
kos_fb.c \ |
utils.c \ |
fwblob.asm \ |
../hdmi.c \ |
123,7 → 128,7 |
$(DRM_TOPDIR)/drm_crtc.c \ |
$(DRM_TOPDIR)/drm_crtc_helper.c \ |
$(DRM_TOPDIR)/drm_dp_helper.c \ |
../drm_dp_mst_topology.c \ |
$(DRM_TOPDIR)/drm_dp_mst_topology.c \ |
$(DRM_TOPDIR)/drm_atomic.c \ |
$(DRM_TOPDIR)/drm_atomic_helper.c \ |
$(DRM_TOPDIR)/drm_bridge.c \ |
149,7 → 154,6 |
$(patsubst %.c, %.o, $(NAME_SRC)))) |
all: $(NAME).dll |
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) i915.lds Makefile.lto |
162,7 → 166,10 |
%.o : %.S $(HFILES) Makefile.lto |
as -o $@ $< |
fwblob.o: fwblob.asm $(FW_BINS) Makefile |
$(FASM) $< $@ |
clean: |
-rm -f ../*/*.o |
/drivers/video/drm/i915/i915_dma.c |
---|
840,6 → 840,8 |
goto put_bridge; |
} |
set_fake_framebuffer(); |
/* This must be called before any calls to HAS_PCH_* */ |
intel_detect_pch(dev); |
/drivers/video/drm/i915/i915_drv.c |
---|
508,7 → 508,10 |
dev_priv->pch_type = PCH_SPT; |
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); |
WARN_ON(!IS_SKYLAKE(dev)); |
} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) { |
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || |
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && |
pch->subsystem_vendor == 0x1af4 && |
pch->subsystem_device == 0x1100)) { |
dev_priv->pch_type = intel_virt_detect_pch(dev); |
} else |
continue; |
/drivers/video/drm/i915/i915_drv.h |
---|
51,7 → 51,6 |
#include "intel_guc.h" |
#include <linux/spinlock.h> |
#include <linux/err.h> |
#define ioread32(addr) readl(addr) |
static inline u8 inb(u16 port) |
2623,6 → 2622,7 |
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 |
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 |
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 |
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ |
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) |
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) |
/drivers/video/drm/i915/i915_gem_context.c |
---|
340,6 → 340,10 |
i915_gem_context_unreference(lctx); |
ring->last_context = NULL; |
} |
/* Force the GPU state to be reinitialised on enabling */ |
if (ring->default_context) |
ring->default_context->legacy_hw_ctx.initialized = false; |
} |
} |
708,7 → 712,7 |
if (ret) |
goto unpin_out; |
if (!to->legacy_hw_ctx.initialized) { |
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) { |
hw_flags |= MI_RESTORE_INHIBIT; |
/* NB: If we inhibit the restore, the context is not allowed to |
* die because future work may end up depending on valid address |
/drivers/video/drm/i915/i915_guc_submission.c |
---|
24,7 → 24,6 |
#include <linux/firmware.h> |
#include <linux/circ_buf.h> |
#include "intel_drv.h" |
#include "i915_drv.h" |
#include "intel_guc.h" |
/** |
/drivers/video/drm/i915/i915_irq.c |
---|
1657,11 → 1657,11 |
for_each_pipe(dev_priv, pipe) { |
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
intel_pipe_handle_vblank(dev, pipe)) |
/*intel_check_page_flip(dev, pipe)*/; |
intel_check_page_flip(dev, pipe); |
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { |
// intel_prepare_page_flip(dev, pipe); |
// intel_finish_page_flip(dev, pipe); |
intel_prepare_page_flip(dev, pipe); |
intel_finish_page_flip(dev, pipe); |
} |
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) |
2028,7 → 2028,7 |
for_each_pipe(dev_priv, pipe) { |
if (de_iir & DE_PIPE_VBLANK(pipe) && |
intel_pipe_handle_vblank(dev, pipe)) |
/*intel_check_page_flip(dev, pipe)*/; |
intel_check_page_flip(dev, pipe); |
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) |
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); |
2038,8 → 2038,8 |
/* plane/pipes map 1:1 on ilk+ */ |
if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { |
// intel_prepare_page_flip(dev, pipe); |
// intel_finish_page_flip_plane(dev, pipe); |
intel_prepare_page_flip(dev, pipe); |
intel_finish_page_flip_plane(dev, pipe); |
} |
} |
2081,12 → 2081,12 |
for_each_pipe(dev_priv, pipe) { |
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && |
intel_pipe_handle_vblank(dev, pipe)) |
/*intel_check_page_flip(dev, pipe)*/; |
intel_check_page_flip(dev, pipe); |
/* plane/pipes map 1:1 on ilk+ */ |
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { |
// intel_prepare_page_flip(dev, pipe); |
// intel_finish_page_flip_plane(dev, pipe); |
intel_prepare_page_flip(dev, pipe); |
intel_finish_page_flip_plane(dev, pipe); |
} |
} |
2290,7 → 2290,7 |
if (pipe_iir & GEN8_PIPE_VBLANK && |
intel_pipe_handle_vblank(dev, pipe)) |
/* intel_check_page_flip(dev, pipe)*/; |
intel_check_page_flip(dev, pipe); |
if (INTEL_INFO(dev_priv)->gen >= 9) |
flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; |
2297,6 → 2297,10 |
else |
flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; |
if (flip_done) { |
intel_prepare_page_flip(dev, pipe); |
intel_finish_page_flip_plane(dev, pipe); |
} |
if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) |
hsw_pipe_crc_irq_handler(dev, pipe); |
2335,10 → 2339,14 |
spt_irq_handler(dev, pch_iir); |
else |
cpt_irq_handler(dev, pch_iir); |
} else |
DRM_ERROR("The master control interrupt lied (SDE)!\n"); |
} else { |
/* |
* Like on previous PCH there seems to be something |
* fishy going on with forwarding PCH interrupts. |
*/ |
DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); |
} |
} |
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
POSTING_READ_FW(GEN8_MASTER_IRQ); |
2363,6 → 2371,8 |
for_each_ring(ring, dev_priv, i) |
wake_up_all(&ring->irq_queue); |
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ |
wake_up_all(&dev_priv->pending_flip_queue); |
/* |
* Signal tasks blocked in i915_gem_wait_for_error that the pending |
3775,12 → 3785,12 |
if (I915_READ16(ISR) & flip_pending) |
goto check_page_flip; |
// intel_prepare_page_flip(dev, plane); |
// intel_finish_page_flip(dev, pipe); |
intel_prepare_page_flip(dev, plane); |
intel_finish_page_flip(dev, pipe); |
return true; |
check_page_flip: |
// intel_check_page_flip(dev, pipe); |
intel_check_page_flip(dev, pipe); |
return false; |
} |
3959,9 → 3969,12 |
if (I915_READ(ISR) & flip_pending) |
goto check_page_flip; |
intel_prepare_page_flip(dev, plane); |
intel_finish_page_flip(dev, pipe); |
return true; |
check_page_flip: |
intel_check_page_flip(dev, pipe); |
return false; |
} |
4449,7 → 4462,7 |
void intel_irq_uninstall(struct drm_i915_private *dev_priv) |
{ |
// drm_irq_uninstall(dev_priv->dev); |
// intel_hpd_cancel_work(dev_priv); |
intel_hpd_cancel_work(dev_priv); |
dev_priv->pm.irqs_enabled = false; |
} |
/drivers/video/drm/i915/i915_trace.h |
---|
44,4 → 44,7 |
#define trace_i915_va_alloc(vm,start,size,name) |
#define trace_i915_gem_request_notify(ring) |
#define trace_i915_gem_object_pread(obj, offset, size) |
#define trace_i915_flip_complete(plane, pending_flip_obj) |
#define trace_i915_flip_request(plane, obj) |
#endif |
/drivers/video/drm/i915/intel_ddi.c |
---|
1582,7 → 1582,8 |
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | |
wrpll_params.central_freq; |
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { |
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || |
intel_encoder->type == INTEL_OUTPUT_DP_MST) { |
switch (crtc_state->port_clock / 2) { |
case 81000: |
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); |
/drivers/video/drm/i915/intel_display.c |
---|
3945,13 → 3945,13 |
drm_crtc_vblank_put(&intel_crtc->base); |
// wake_up_all(&dev_priv->pending_flip_queue); |
// queue_work(dev_priv->wq, &work->work); |
wake_up_all(&dev_priv->pending_flip_queue); |
queue_work(dev_priv->wq, &work->work); |
// trace_i915_flip_complete(intel_crtc->plane, |
// work->pending_flip_obj); |
trace_i915_flip_complete(intel_crtc->plane, |
work->pending_flip_obj); |
} |
#if 0 |
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
3977,7 → 3977,6 |
mutex_unlock(&dev->struct_mutex); |
} |
} |
#endif |
/* Program iCLKIP clock to the desired frequency */ |
static void lpt_program_iclkip(struct drm_crtc *crtc) |
4851,6 → 4850,9 |
mutex_unlock(&dev->struct_mutex); |
} |
if (atomic->wait_for_flips) |
intel_crtc_wait_for_pending_flips(&crtc->base); |
if (atomic->disable_fbc) |
intel_fbc_disable_crtc(crtc); |
4883,7 → 4885,7 |
* to compute the mask of flip planes precisely. For the time being |
* consider this a flip to a NULL plane. |
*/ |
// intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); |
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); |
} |
static void ironlake_crtc_enable(struct drm_crtc *crtc) |
6320,6 → 6322,7 |
return; |
if (to_intel_plane_state(crtc->primary->state)->visible) { |
intel_crtc_wait_for_pending_flips(crtc); |
intel_pre_disable_primary(crtc); |
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); |
10910,7 → 10913,7 |
/* and that it is marked active as soon as the irq could fire. */ |
smp_wmb(); |
} |
#if 0 |
static int intel_gen2_queue_flip(struct drm_device *dev, |
struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
11373,8 → 11376,6 |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_unpin_work *work; |
WARN_ON(!in_interrupt()); |
if (crtc == NULL) |
return; |
11391,7 → 11392,7 |
intel_queue_rps_boost_for_request(dev, work->flip_queued_req); |
spin_unlock(&dev->event_lock); |
} |
#endif |
static int intel_crtc_page_flip(struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
struct drm_pending_vblank_event *event, |
11441,7 → 11442,7 |
work->event = event; |
work->crtc = crtc; |
work->old_fb = old_fb; |
// INIT_WORK(&work->work, intel_unpin_work_fn); |
INIT_WORK(&work->work, intel_unpin_work_fn); |
ret = drm_crtc_vblank_get(crtc); |
if (ret) |
11468,8 → 11469,8 |
intel_crtc->unpin_work = work; |
spin_unlock_irq(&dev->event_lock); |
if (atomic_read(&intel_crtc->unpin_work_count) >= 2) |
flush_workqueue(dev_priv->wq); |
// if (atomic_read(&intel_crtc->unpin_work_count) >= 2) |
// flush_workqueue(dev_priv->wq); |
/* Reference the objects for the scheduled work. */ |
drm_framebuffer_reference(work->old_fb); |
11927,13 → 11928,23 |
pipe_config->pipe_bpp = connector->base.display_info.bpc*3; |
} |
/* Clamp bpp to 8 on screens without EDID 1.4 */ |
if (connector->base.display_info.bpc == 0 && bpp > 24) { |
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", |
bpp); |
pipe_config->pipe_bpp = 24; |
/* Clamp bpp to default limit on screens without EDID 1.4 */ |
if (connector->base.display_info.bpc == 0) { |
int type = connector->base.connector_type; |
int clamp_bpp = 24; |
/* Fall back to 18 bpp when DP sink capability is unknown. */ |
if (type == DRM_MODE_CONNECTOR_DisplayPort || |
type == DRM_MODE_CONNECTOR_eDP) |
clamp_bpp = 18; |
if (bpp > clamp_bpp) { |
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", |
bpp, clamp_bpp); |
pipe_config->pipe_bpp = clamp_bpp; |
} |
} |
} |
static int |
compute_baseline_pipe_bpp(struct intel_crtc *crtc, |
13317,7 → 13328,7 |
.gamma_set = intel_crtc_gamma_set, |
.set_config = drm_atomic_helper_set_config, |
.destroy = intel_crtc_destroy, |
// .page_flip = intel_crtc_page_flip, |
.page_flip = intel_crtc_page_flip, |
.atomic_duplicate_state = intel_crtc_duplicate_state, |
.atomic_destroy_state = intel_crtc_destroy_state, |
}; |
13534,11 → 13545,12 |
int max_scale = DRM_PLANE_HELPER_NO_SCALING; |
bool can_position = false; |
if (INTEL_INFO(plane->dev)->gen >= 9) { |
/* use scaler when colorkey is not required */ |
if (INTEL_INFO(plane->dev)->gen >= 9 && |
state->ckey.flags == I915_SET_COLORKEY_NONE) { |
if (state->ckey.flags == I915_SET_COLORKEY_NONE) { |
min_scale = 1; |
max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); |
} |
can_position = true; |
} |
14628,11 → 14640,34 |
broxton_modeset_calc_cdclk; |
} |
switch (INTEL_INFO(dev)->gen) { |
case 2: |
dev_priv->display.queue_flip = intel_gen2_queue_flip; |
break; |
case 3: |
dev_priv->display.queue_flip = intel_gen3_queue_flip; |
break; |
case 4: |
case 5: |
dev_priv->display.queue_flip = intel_gen4_queue_flip; |
break; |
case 6: |
dev_priv->display.queue_flip = intel_gen6_queue_flip; |
break; |
case 7: |
case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ |
dev_priv->display.queue_flip = intel_gen7_queue_flip; |
break; |
case 9: |
/* Drop through - unsupported since execlist only. */ |
default: |
/* Default just returns -ENODEV to indicate unsupported */ |
dev_priv->display.queue_flip = intel_default_queue_flip; |
} |
mutex_init(&dev_priv->pps_mutex); |
} |
/drivers/video/drm/i915/intel_dp.c |
---|
1894,7 → 1894,7 |
* operations. |
*/ |
delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); |
// schedule_delayed_work(&intel_dp->panel_vdd_work, delay); |
schedule_delayed_work(&intel_dp->panel_vdd_work, delay); |
} |
/* |
5756,7 → 5756,7 |
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) |
return; |
// cancel_delayed_work(&dev_priv->drrs.work); |
cancel_delayed_work(&dev_priv->drrs.work); |
mutex_lock(&dev_priv->drrs.mutex); |
if (!dev_priv->drrs.dp) { |
5776,6 → 5776,13 |
dev_priv->drrs.dp->attached_connector->panel. |
fixed_mode->vrefresh); |
/* |
* flush also means no more activity hence schedule downclock, if all |
* other fbs are quiescent too |
*/ |
if (!dev_priv->drrs.busy_frontbuffer_bits) |
schedule_delayed_work(&dev_priv->drrs.work, |
msecs_to_jiffies(1000)); |
mutex_unlock(&dev_priv->drrs.mutex); |
} |
/drivers/video/drm/i915/intel_dsi_panel_vbt.c |
---|
207,8 → 207,13 |
gpio = *data++; |
/* pull up/down */ |
action = *data++; |
action = *data++ & 1; |
if (gpio >= ARRAY_SIZE(gtable)) { |
DRM_DEBUG_KMS("unknown gpio %u\n", gpio); |
goto out; |
} |
function = gtable[gpio].function_reg; |
pad = gtable[gpio].pad_reg; |
226,6 → 231,7 |
vlv_gpio_nc_write(dev_priv, pad, val); |
mutex_unlock(&dev_priv->sb_lock); |
out: |
return data; |
} |
/drivers/video/drm/i915/intel_hotplug.c |
---|
0,0 → 1,513 |
/* |
* Copyright © 2015 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
*/ |
#include <linux/kernel.h> |
#include <drm/drmP.h> |
#include <drm/i915_drm.h> |
#include "i915_drv.h" |
#include "intel_drv.h" |
/** |
* DOC: Hotplug |
* |
* Simply put, hotplug occurs when a display is connected to or disconnected |
* from the system. However, there may be adapters and docking stations and |
* Display Port short pulses and MST devices involved, complicating matters. |
* |
* Hotplug in i915 is handled in many different levels of abstraction. |
* |
* The platform dependent interrupt handling code in i915_irq.c enables, |
* disables, and does preliminary handling of the interrupts. The interrupt |
* handlers gather the hotplug detect (HPD) information from relevant registers |
* into a platform independent mask of hotplug pins that have fired. |
* |
* The platform independent interrupt handler intel_hpd_irq_handler() in |
* intel_hotplug.c does hotplug irq storm detection and mitigation, and passes |
* further processing to appropriate bottom halves (Display Port specific and |
* regular hotplug). |
* |
* The Display Port work function i915_digport_work_func() calls into |
* intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long |
* pulses, with failures and non-MST long pulses triggering regular hotplug |
* processing on the connector. |
* |
* The regular hotplug work function i915_hotplug_work_func() calls connector |
* detect hooks, and, if connector status changes, triggers sending of hotplug |
* uevent to userspace via drm_kms_helper_hotplug_event(). |
* |
* Finally, the userspace is responsible for triggering a modeset upon receiving |
* the hotplug uevent, disabling or enabling the crtc as needed. |
* |
* The hotplug interrupt storm detection and mitigation code keeps track of the |
* number of interrupts per hotplug pin per a period of time, and if the number |
* of interrupts exceeds a certain threshold, the interrupt is disabled for a |
* while before being re-enabled. The intention is to mitigate issues raising |
* from broken hardware triggering massive amounts of interrupts and grinding |
* the system to a halt. |
* |
* Current implementation expects that hotplug interrupt storm will not be |
* seen when display port sink is connected, hence on platforms whose DP |
* callback is handled by i915_digport_work_func reenabling of hpd is not |
* performed (it was never expected to be disabled in the first place ;) ) |
* this is specific to DP sinks handled by this routine and any other display |
* such as HDMI or DVI enabled on the same port will have proper logic since |
* it will use i915_hotplug_work_func where this logic is handled. |
*/ |
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port) |
{ |
switch (pin) { |
case HPD_PORT_A: |
*port = PORT_A; |
return true; |
case HPD_PORT_B: |
*port = PORT_B; |
return true; |
case HPD_PORT_C: |
*port = PORT_C; |
return true; |
case HPD_PORT_D: |
*port = PORT_D; |
return true; |
case HPD_PORT_E: |
*port = PORT_E; |
return true; |
default: |
return false; /* no hpd */ |
} |
} |
#define HPD_STORM_DETECT_PERIOD 1000 |
#define HPD_STORM_THRESHOLD 5 |
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) |
/** |
* intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin |
* @dev_priv: private driver data pointer |
* @pin: the pin to gather stats on |
* |
* Gather stats about HPD irqs from the specified @pin, and detect irq |
* storms. Only the pin specific stats and state are changed, the caller is |
* responsible for further action. |
* |
* @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms, |
* otherwise it's considered an irq storm, and the irq state is set to |
* @HPD_MARK_DISABLED. |
* |
* Return true if an irq storm was detected on @pin. |
*/ |
static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, |
enum hpd_pin pin) |
{ |
unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies; |
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); |
bool storm = false; |
if (!time_in_range(jiffies, start, end)) { |
dev_priv->hotplug.stats[pin].last_jiffies = jiffies; |
dev_priv->hotplug.stats[pin].count = 0; |
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin); |
} else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) { |
dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED; |
DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin); |
storm = true; |
} else { |
dev_priv->hotplug.stats[pin].count++; |
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin, |
dev_priv->hotplug.stats[pin].count); |
} |
return storm; |
} |
static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
struct drm_mode_config *mode_config = &dev->mode_config; |
struct intel_connector *intel_connector; |
struct intel_encoder *intel_encoder; |
struct drm_connector *connector; |
enum hpd_pin pin; |
bool hpd_disabled = false; |
assert_spin_locked(&dev_priv->irq_lock); |
list_for_each_entry(connector, &mode_config->connector_list, head) { |
if (connector->polled != DRM_CONNECTOR_POLL_HPD) |
continue; |
intel_connector = to_intel_connector(connector); |
intel_encoder = intel_connector->encoder; |
if (!intel_encoder) |
continue; |
pin = intel_encoder->hpd_pin; |
if (pin == HPD_NONE || |
dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED) |
continue; |
DRM_INFO("HPD interrupt storm detected on connector %s: " |
"switching from hotplug detection to polling\n", |
connector->name); |
dev_priv->hotplug.stats[pin].state = HPD_DISABLED; |
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
| DRM_CONNECTOR_POLL_DISCONNECT; |
hpd_disabled = true; |
} |
/* Enable polling and queue hotplug re-enabling. */ |
if (hpd_disabled) { |
drm_kms_helper_poll_enable_locked(dev); |
mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, |
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); |
} |
} |
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) |
{ |
struct drm_i915_private *dev_priv = |
container_of(work, typeof(*dev_priv), |
hotplug.reenable_work.work); |
struct drm_device *dev = dev_priv->dev; |
struct drm_mode_config *mode_config = &dev->mode_config; |
int i; |
intel_runtime_pm_get(dev_priv); |
spin_lock_irq(&dev_priv->irq_lock); |
for_each_hpd_pin(i) { |
struct drm_connector *connector; |
if (dev_priv->hotplug.stats[i].state != HPD_DISABLED) |
continue; |
dev_priv->hotplug.stats[i].state = HPD_ENABLED; |
list_for_each_entry(connector, &mode_config->connector_list, head) { |
struct intel_connector *intel_connector = to_intel_connector(connector); |
if (intel_connector->encoder->hpd_pin == i) { |
if (connector->polled != intel_connector->polled) |
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", |
connector->name); |
connector->polled = intel_connector->polled; |
if (!connector->polled) |
connector->polled = DRM_CONNECTOR_POLL_HPD; |
} |
} |
} |
if (dev_priv->display.hpd_irq_setup) |
dev_priv->display.hpd_irq_setup(dev); |
spin_unlock_irq(&dev_priv->irq_lock); |
intel_runtime_pm_put(dev_priv); |
} |
static bool intel_hpd_irq_event(struct drm_device *dev, |
struct drm_connector *connector) |
{ |
enum drm_connector_status old_status; |
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); |
old_status = connector->status; |
connector->status = connector->funcs->detect(connector, false); |
if (old_status == connector->status) |
return false; |
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", |
connector->base.id, |
connector->name, |
drm_get_connector_status_name(old_status), |
drm_get_connector_status_name(connector->status)); |
return true; |
} |
static void i915_digport_work_func(struct work_struct *work) |
{ |
struct drm_i915_private *dev_priv = |
container_of(work, struct drm_i915_private, hotplug.dig_port_work); |
u32 long_port_mask, short_port_mask; |
struct intel_digital_port *intel_dig_port; |
int i; |
u32 old_bits = 0; |
spin_lock_irq(&dev_priv->irq_lock); |
long_port_mask = dev_priv->hotplug.long_port_mask; |
dev_priv->hotplug.long_port_mask = 0; |
short_port_mask = dev_priv->hotplug.short_port_mask; |
dev_priv->hotplug.short_port_mask = 0; |
spin_unlock_irq(&dev_priv->irq_lock); |
for (i = 0; i < I915_MAX_PORTS; i++) { |
bool valid = false; |
bool long_hpd = false; |
intel_dig_port = dev_priv->hotplug.irq_port[i]; |
if (!intel_dig_port || !intel_dig_port->hpd_pulse) |
continue; |
if (long_port_mask & (1 << i)) { |
valid = true; |
long_hpd = true; |
} else if (short_port_mask & (1 << i)) |
valid = true; |
if (valid) { |
enum irqreturn ret; |
ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); |
if (ret == IRQ_NONE) { |
/* fall back to old school hpd */ |
old_bits |= (1 << intel_dig_port->base.hpd_pin); |
} |
} |
} |
if (old_bits) { |
spin_lock_irq(&dev_priv->irq_lock); |
dev_priv->hotplug.event_bits |= old_bits; |
spin_unlock_irq(&dev_priv->irq_lock); |
schedule_work(&dev_priv->hotplug.hotplug_work); |
} |
} |
/* |
* Handle hotplug events outside the interrupt handler proper. |
*/ |
static void i915_hotplug_work_func(struct work_struct *work) |
{ |
struct drm_i915_private *dev_priv = |
container_of(work, struct drm_i915_private, hotplug.hotplug_work); |
struct drm_device *dev = dev_priv->dev; |
struct drm_mode_config *mode_config = &dev->mode_config; |
struct intel_connector *intel_connector; |
struct intel_encoder *intel_encoder; |
struct drm_connector *connector; |
bool changed = false; |
u32 hpd_event_bits; |
mutex_lock(&mode_config->mutex); |
DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
spin_lock_irq(&dev_priv->irq_lock); |
hpd_event_bits = dev_priv->hotplug.event_bits; |
dev_priv->hotplug.event_bits = 0; |
/* Disable hotplug on connectors that hit an irq storm. */ |
intel_hpd_irq_storm_disable(dev_priv); |
spin_unlock_irq(&dev_priv->irq_lock); |
list_for_each_entry(connector, &mode_config->connector_list, head) { |
intel_connector = to_intel_connector(connector); |
if (!intel_connector->encoder) |
continue; |
intel_encoder = intel_connector->encoder; |
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { |
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", |
connector->name, intel_encoder->hpd_pin); |
if (intel_encoder->hot_plug) |
intel_encoder->hot_plug(intel_encoder); |
if (intel_hpd_irq_event(dev, connector)) |
changed = true; |
} |
} |
mutex_unlock(&mode_config->mutex); |
if (changed) |
drm_kms_helper_hotplug_event(dev); |
} |
/** |
* intel_hpd_irq_handler - main hotplug irq handler |
* @dev: drm device |
* @pin_mask: a mask of hpd pins that have triggered the irq |
* @long_mask: a mask of hpd pins that may be long hpd pulses |
* |
* This is the main hotplug irq handler for all platforms. The platform specific |
* irq handlers call the platform specific hotplug irq handlers, which read and |
* decode the appropriate registers into bitmasks about hpd pins that have |
* triggered (@pin_mask), and which of those pins may be long pulses |
* (@long_mask). The @long_mask is ignored if the port corresponding to the pin |
* is not a digital port. |
* |
* Here, we do hotplug irq storm detection and mitigation, and pass further |
* processing to appropriate bottom halves. |
*/ |
void intel_hpd_irq_handler(struct drm_device *dev, |
u32 pin_mask, u32 long_mask) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int i; |
enum port port; |
bool storm_detected = false; |
bool queue_dig = false, queue_hp = false; |
bool is_dig_port; |
if (!pin_mask) |
return; |
spin_lock(&dev_priv->irq_lock); |
for_each_hpd_pin(i) { |
if (!(BIT(i) & pin_mask)) |
continue; |
is_dig_port = intel_hpd_pin_to_port(i, &port) && |
dev_priv->hotplug.irq_port[port]; |
if (is_dig_port) { |
bool long_hpd = long_mask & BIT(i); |
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), |
long_hpd ? "long" : "short"); |
/* |
* For long HPD pulses we want to have the digital queue happen, |
* but we still want HPD storm detection to function. |
*/ |
queue_dig = true; |
if (long_hpd) { |
dev_priv->hotplug.long_port_mask |= (1 << port); |
} else { |
/* for short HPD just trigger the digital queue */ |
dev_priv->hotplug.short_port_mask |= (1 << port); |
continue; |
} |
} |
if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) { |
/* |
* On GMCH platforms the interrupt mask bits only |
* prevent irq generation, not the setting of the |
* hotplug bits itself. So only WARN about unexpected |
* interrupts on saner platforms. |
*/ |
WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), |
"Received HPD interrupt on pin %d although disabled\n", i); |
continue; |
} |
if (dev_priv->hotplug.stats[i].state != HPD_ENABLED) |
continue; |
if (!is_dig_port) { |
dev_priv->hotplug.event_bits |= BIT(i); |
queue_hp = true; |
} |
if (intel_hpd_irq_storm_detect(dev_priv, i)) { |
dev_priv->hotplug.event_bits &= ~BIT(i); |
storm_detected = true; |
} |
} |
if (storm_detected) |
dev_priv->display.hpd_irq_setup(dev); |
spin_unlock(&dev_priv->irq_lock); |
/* |
* Our hotplug handler can grab modeset locks (by calling down into the |
* fb helpers). Hence it must not be run on our own dev-priv->wq work |
* queue for otherwise the flush_work in the pageflip code will |
* deadlock. |
*/ |
if (queue_dig) |
queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work); |
if (queue_hp) |
schedule_work(&dev_priv->hotplug.hotplug_work); |
} |
/** |
* intel_hpd_init - initializes and enables hpd support |
* @dev_priv: i915 device instance |
* |
* This function enables the hotplug support. It requires that interrupts have |
* already been enabled with intel_irq_init_hw(). From this point on hotplug and |
* poll request can run concurrently to other code, so locking rules must be |
* obeyed. |
* |
* This is a separate step from interrupt enabling to simplify the locking rules |
* in the driver load and resume code. |
*/ |
void intel_hpd_init(struct drm_i915_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
struct drm_mode_config *mode_config = &dev->mode_config; |
struct drm_connector *connector; |
int i; |
for_each_hpd_pin(i) { |
dev_priv->hotplug.stats[i].count = 0; |
dev_priv->hotplug.stats[i].state = HPD_ENABLED; |
} |
list_for_each_entry(connector, &mode_config->connector_list, head) { |
struct intel_connector *intel_connector = to_intel_connector(connector); |
connector->polled = intel_connector->polled; |
/* MST has a dynamic intel_connector->encoder and it's reprobing |
* is all handled by the MST helpers. */ |
if (intel_connector->mst_port) |
continue; |
if (!connector->polled && I915_HAS_HOTPLUG(dev) && |
intel_connector->encoder->hpd_pin > HPD_NONE) |
connector->polled = DRM_CONNECTOR_POLL_HPD; |
} |
/* |
* Interrupt setup is already guaranteed to be single-threaded, this is |
* just to make the assert_spin_locked checks happy. |
*/ |
spin_lock_irq(&dev_priv->irq_lock); |
if (dev_priv->display.hpd_irq_setup) |
dev_priv->display.hpd_irq_setup(dev); |
spin_unlock_irq(&dev_priv->irq_lock); |
} |
void intel_hpd_init_work(struct drm_i915_private *dev_priv) |
{ |
INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); |
INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); |
INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, |
intel_hpd_irq_storm_reenable_work); |
} |
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) |
{ |
spin_lock_irq(&dev_priv->irq_lock); |
dev_priv->hotplug.long_port_mask = 0; |
dev_priv->hotplug.short_port_mask = 0; |
dev_priv->hotplug.event_bits = 0; |
spin_unlock_irq(&dev_priv->irq_lock); |
cancel_work_sync(&dev_priv->hotplug.dig_port_work); |
cancel_work_sync(&dev_priv->hotplug.hotplug_work); |
cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); |
} |
/drivers/video/drm/i915/intel_i2c.c |
---|
675,7 → 675,7 |
return 0; |
err: |
while (--pin) { |
while (pin--) { |
if (!intel_gmbus_is_valid_pin(dev_priv, pin)) |
continue; |
/drivers/video/drm/i915/intel_lrc.c |
---|
1706,6 → 1706,7 |
if (flush_domains) { |
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; |
flags |= PIPE_CONTROL_FLUSH_ENABLE; |
} |
2359,6 → 2360,7 |
kunmap_atomic(reg_state); |
ctx_obj->dirty = 1; |
set_page_dirty(page); |
i915_gem_object_unpin_pages(ctx_obj); |
return 0; |
/drivers/video/drm/i915/intel_ringbuffer.c |
---|
347,6 → 347,7 |
if (flush_domains) { |
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; |
flags |= PIPE_CONTROL_FLUSH_ENABLE; |
} |
if (invalidate_domains) { |
419,6 → 420,7 |
if (flush_domains) { |
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; |
flags |= PIPE_CONTROL_FLUSH_ENABLE; |
} |
if (invalidate_domains) { |
/drivers/video/drm/i915/kms_display.c |
---|
10,16 → 10,10 |
#include <linux/pci.h> |
#include <syscall.h> |
//#include "bitmap.h" |
#include <display.h> |
void FASTCALL sysSetFramebuffer(void *fb)__asm__("SetFramebuffer"); |
void kolibri_framebuffer_update(struct drm_i915_private *dev_priv, struct kos_framebuffer *kfb); |
void init_system_cursors(struct drm_device *dev); |
addr_t dummy_fb_page; |
display_t *os_display; |
u32 cmd_buffer; |
60,26 → 54,17 |
struct drm_i915_gem_object *obj = NULL; |
int stride, size; |
ENTER(); |
stride = mode->hdisplay *4; |
if(IS_GEN3(dev)) |
tiling = 0; |
if(tiling) |
{ |
int gen3size; |
if(IS_GEN3(dev)) |
for (stride = 512; stride < mode->hdisplay * 4; stride <<= 1); |
else |
stride = ALIGN(stride, 512); |
size = stride * ALIGN(mode->vdisplay, 8); |
if(IS_GEN3(dev)) |
{ |
for (gen3size = 1024*1024; gen3size < size; gen3size <<= 1); |
size = gen3size; |
} |
else |
size = ALIGN(size, 4096); |
} |
else |
96,6 → 81,7 |
int ret; |
DRM_DEBUG_KMS("remove old framebuffer\n"); |
set_fake_framebuffer(); |
drm_framebuffer_remove(fb); |
ifbdev->fb = NULL; |
fb = NULL; |
172,7 → 158,7 |
fb->bits_per_pixel = 32; |
fb->depth = 24; |
LEAVE(); |
return fb; |
out_fb: |
282,7 → 268,7 |
{ |
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
struct kos_framebuffer *kfb = intel_fb->private; |
kolibri_framebuffer_update(dev_priv, kfb); |
kolibri_framebuffer_update(dev, kfb); |
DRM_DEBUG_KMS("kolibri framebuffer %p\n", kfb); |
os_display->width = mode->hdisplay; |
378,7 → 364,7 |
{ |
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
struct kos_framebuffer *kfb = intel_fb->private; |
kolibri_framebuffer_update(dev_priv, kfb); |
kolibri_framebuffer_update(dev, kfb); |
DRM_DEBUG_KMS("kolibri framebuffer %p\n", kfb); |
os_display->width = mode->hdisplay; |
575,8 → 561,6 |
return -1; |
}; |
dummy_fb_page = AllocPage(); |
os_display = GetDisplay(); |
os_display->ddev = dev; |
os_display->connector = connector; |
815,7 → 799,7 |
fb->name = obj->base.name; |
fb->width = os_display->width; |
fb->height = os_display->height; |
fb->pitch = obj->stride; |
fb->pitch = os_display->lfb_pitch; |
fb->tiling = obj->tiling_mode; |
fb->crtc = crtc->base.base.id; |
fb->pipe = crtc->pipe; |
825,112 → 809,7 |
return 0; |
} |
int kolibri_framebuffer_init(struct intel_framebuffer *intel_fb) |
{ |
struct kos_framebuffer *kfb; |
addr_t dummy_table; |
addr_t *pt_addr = NULL; |
int pde; |
kfb = kzalloc(sizeof(struct kos_framebuffer),0); |
kfb->private = intel_fb; |
for(pde = 0; pde < 8; pde++) |
{ |
dummy_table = AllocPage(); |
kfb->pde[pde] = dummy_table|PG_UW; |
pt_addr = kmap((struct page*)dummy_table); |
__builtin_memset(pt_addr,0,4096); |
kunmap((struct page*)dummy_table); |
}; |
intel_fb->private = kfb; |
return 0; |
#if 0 |
struct sg_page_iter sg_iter; |
num_pages = obj->base.size/4096; |
printf("num_pages %d\n",num_pages); |
pte = 0; |
pde = 0; |
pt_addr = NULL; |
__sg_page_iter_start(&sg_iter, obj->pages->sgl, sg_nents(obj->pages->sgl), 0); |
while (__sg_page_iter_next(&sg_iter)) |
{ |
if (pt_addr == NULL) |
{ |
addr_t pt = AllocPage(); |
kfb->pde[pde] = pt|PG_UW; |
pde++; |
pt_addr = kmap_atomic((struct page*)pt); |
} |
pt_addr[pte] = sg_page_iter_dma_address(&sg_iter)|PG_UW|PG_WRITEC; |
if( (pte & 15) == 0) |
DRM_DEBUG_KMS("pte %x\n",pt_addr[pte]); |
if (++pte == 1024) |
{ |
kunmap_atomic(pt_addr); |
pt_addr = NULL; |
if (pde == 8) |
break; |
pte = 0; |
} |
} |
if(pt_addr) |
{ |
for(;pte < 1024; pte++) |
pt_addr[pte] = dummy_page|PG_UW; |
kunmap_atomic(pt_addr); |
} |
#endif |
}; |
void kolibri_framebuffer_update(struct drm_i915_private *dev_priv, struct kos_framebuffer *kfb) |
{ |
struct intel_framebuffer *intel_fb = kfb->private; |
addr_t *pt_addr = NULL; |
int pte = 0; |
int pde = 0; |
int num_pages; |
addr_t pfn; |
ENTER(); |
num_pages = intel_fb->obj->base.size/4096; |
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(intel_fb->obj); |
while(num_pages) |
{ |
if (pt_addr == NULL) |
{ |
addr_t pt = kfb->pde[pde] & 0xFFFFF000; |
pde++; |
pt_addr = kmap_atomic((struct page*)pt); |
} |
pt_addr[pte] = pfn|PG_UW|PG_WRITEC; |
pfn+= 4096; |
num_pages--; |
if (++pte == 1024) |
{ |
kunmap_atomic(pt_addr); |
pt_addr = NULL; |
if (pde == 8) |
break; |
pte = 0; |
} |
} |
if(pt_addr) |
{ |
for(;pte < 1024; pte++) |
pt_addr[pte] = dummy_fb_page|PG_UW; |
kunmap_atomic(pt_addr); |
} |
LEAVE(); |
}; |
typedef struct |
{ |
int left; |
1411,6 → 1290,3 |
list_del_init(&wait->task_list); |
return 1; |
} |
/drivers/video/drm/i915/kos_fb.c |
---|
0,0 → 1,174 |
/* |
* Copyright © 2008-2012 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
* |
* Authors: |
* Eric Anholt <eric@anholt.net> |
* Chris Wilson <chris@chris-wilson.co.uk> |
* |
*/ |
#include <drm/drmP.h> |
#include <drm/i915_drm.h> |
#include <display.h> |
#include "intel_drv.h" |
#include "i915_drv.h" |
static addr_t dummy_fb_page; |
static struct kos_framebuffer *fake_fb; |
int fake_framebuffer_create() |
{ |
struct kos_framebuffer *kfb; |
addr_t dummy_table; |
addr_t *pt_addr; |
int pde, pte; |
kfb = kzalloc(sizeof(struct kos_framebuffer),0); |
if(kfb == NULL) |
goto err_0; |
dummy_fb_page = AllocPage(); |
if(dummy_fb_page == 0) |
goto err_1; |
for(pde = 0; pde < 8; pde++) |
{ |
dummy_table = AllocPage(); |
if(dummy_table == 0) |
goto err_2; |
kfb->pde[pde] = dummy_table|PG_UW; |
pt_addr = kmap_atomic((struct page*)dummy_table); |
for(pte = 0; pte < 1024; pte++) |
pt_addr[pte] = dummy_fb_page|PG_UW; |
kunmap_atomic(pt_addr); |
}; |
fake_fb = kfb; |
return 0; |
err_2: |
for(pte = 0; pte < pde; pte++) |
FreePage(kfb->pde[pte]); |
FreePage(dummy_fb_page); |
err_1: |
kfree(kfb); |
err_0: |
return -ENOMEM; |
}; |
int kolibri_framebuffer_init(void *param) |
{ |
struct intel_framebuffer *intel_fb = param; |
struct kos_framebuffer *kfb; |
addr_t dummy_table; |
addr_t *pt_addr = NULL; |
int pde, pte; |
kfb = kzalloc(sizeof(struct kos_framebuffer),0); |
if(kfb == NULL) |
goto err_0; |
kfb->private = intel_fb; |
for(pde = 0; pde < 8; pde++) |
{ |
dummy_table = AllocPage(); |
if(dummy_table == 0) |
goto err_1; |
kfb->pde[pde] = dummy_table|PG_UW; |
pt_addr = kmap_atomic((struct page*)dummy_table); |
for(pte = 0; pte < 1024; pte++) |
pt_addr[pte] = dummy_fb_page|PG_UW; |
kunmap_atomic(pt_addr); |
}; |
intel_fb->private = kfb; |
return 0; |
err_1: |
for(pte = 0; pte < pde; pte++) |
FreePage(kfb->pde[pte]); |
kfree(kfb); |
err_0: |
return -ENOMEM; |
}; |
void kolibri_framebuffer_update(struct drm_device *dev, struct kos_framebuffer *kfb) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_framebuffer *intel_fb = kfb->private; |
addr_t *pt_addr = NULL; |
int pte = 0; |
int pde = 0; |
int num_pages; |
addr_t pfn; |
num_pages = intel_fb->obj->base.size/4096; |
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(intel_fb->obj); |
while(num_pages) |
{ |
if (pt_addr == NULL) |
{ |
addr_t pt = kfb->pde[pde] & 0xFFFFF000; |
pde++; |
pt_addr = kmap_atomic((struct page*)pt); |
} |
pt_addr[pte] = pfn|PG_UW|PG_WRITEC; |
pfn+= 4096; |
num_pages--; |
if (++pte == 1024) |
{ |
kunmap_atomic(pt_addr); |
pt_addr = NULL; |
if (pde == 8) |
break; |
pte = 0; |
} |
} |
if(pt_addr) |
{ |
for(; pte < 1024; pte++) |
pt_addr[pte] = dummy_fb_page|PG_UW; |
kunmap_atomic(pt_addr); |
}; |
for(; pde < 8; pde++) |
{ |
addr_t pt = kfb->pde[pde] & 0xFFFFF000; |
pt_addr = kmap_atomic((struct page*)pt); |
for(pte = 0; pte < 1024; pte++) |
pt_addr[pte] = dummy_fb_page|PG_UW; |
kunmap_atomic(pt_addr); |
} |
}; |
void set_fake_framebuffer() |
{ |
sysSetFramebuffer(fake_fb); |
} |
/drivers/video/drm/i915/main.c |
---|
14,7 → 14,7 |
#include "bitmap.h" |
#include "i915_kos32.h" |
#define DRV_NAME "i915 v4.4.3" |
#define DRV_NAME "i915 v4.4.5" |
#define I915_DEV_CLOSE 0 |
#define I915_DEV_INIT 1 |
254,6 → 254,10 |
dmi_scan_machine(); |
err = fake_framebuffer_create(); |
if( unlikely(err != 0)) |
return 0; |
driver_wq_state = I915_DEV_INIT; |
CreateKernelThread(i915_driver_thread); |