Subversion Repositories Kolibri OS

Rev

Rev 2351 | Rev 3031 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
29
#include 
30
//#include 
31
#include "drmP.h"
32
#include "drm.h"
33
#include "i915_drm.h"
34
#include "i915_drv.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
37
 
2352 Serge 38
#define DRM_WAKEUP( queue ) wake_up( queue )
39
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
40
 
2351 Serge 41
#define MAX_NOPID ((u32)~0)
42
 
43
/**
44
 * Interrupts that are always left unmasked.
45
 *
46
 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
47
 * we leave them always unmasked in IMR and then control enabling them through
48
 * PIPESTAT alone.
49
 */
50
#define I915_INTERRUPT_ENABLE_FIX			\
51
	(I915_ASLE_INTERRUPT |				\
52
	 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |		\
53
	 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |		\
54
	 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |	\
55
	 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |	\
56
	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
57
 
58
/** Interrupts that we mask and unmask at runtime. */
59
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
60
 
61
#define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
62
				 PIPE_VBLANK_INTERRUPT_STATUS)
63
 
64
#define I915_PIPE_VBLANK_ENABLE	(PIPE_START_VBLANK_INTERRUPT_ENABLE |\
65
				 PIPE_VBLANK_INTERRUPT_ENABLE)
66
 
67
#define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
68
					 DRM_I915_VBLANK_PIPE_B)
69
 
70
/* For display hotplug interrupt */
71
static void
72
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
73
{
74
    if ((dev_priv->irq_mask & mask) != 0) {
75
        dev_priv->irq_mask &= ~mask;
76
        I915_WRITE(DEIMR, dev_priv->irq_mask);
77
        POSTING_READ(DEIMR);
78
    }
79
}
80
 
81
static inline void
82
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
83
{
84
    if ((dev_priv->irq_mask & mask) != mask) {
85
        dev_priv->irq_mask |= mask;
86
        I915_WRITE(DEIMR, dev_priv->irq_mask);
87
        POSTING_READ(DEIMR);
88
    }
89
}
2352 Serge 90
static void notify_ring(struct drm_device *dev,
91
			struct intel_ring_buffer *ring)
92
{
93
	struct drm_i915_private *dev_priv = dev->dev_private;
94
	u32 seqno;
2351 Serge 95
 
2352 Serge 96
	if (ring->obj == NULL)
97
		return;
2351 Serge 98
 
2352 Serge 99
	seqno = ring->get_seqno(ring);
100
	trace_i915_gem_request_complete(ring, seqno);
2351 Serge 101
 
2352 Serge 102
	ring->irq_seqno = seqno;
103
	wake_up_all(&ring->irq_queue);
104
//   if (i915_enable_hangcheck) {
105
//       dev_priv->hangcheck_count = 0;
106
//       mod_timer(&dev_priv->hangcheck_timer,
107
//             jiffies +
108
//             msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
109
//   }
110
}
111
 
112
 
113
 
2351 Serge 114
static int ironlake_irq_handler(struct drm_device *dev)
115
{
116
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
117
    int ret = IRQ_NONE;
118
    u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
119
    u32 hotplug_mask;
120
    struct drm_i915_master_private *master_priv;
121
    u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
122
 
123
    atomic_inc(&dev_priv->irq_received);
124
 
125
    if (IS_GEN6(dev))
126
        bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
127
 
128
    /* disable master interrupt before clearing iir  */
129
    de_ier = I915_READ(DEIER);
130
    I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
131
    POSTING_READ(DEIER);
132
 
133
    de_iir = I915_READ(DEIIR);
134
    gt_iir = I915_READ(GTIIR);
135
    pch_iir = I915_READ(SDEIIR);
136
    pm_iir = I915_READ(GEN6_PMIIR);
137
 
138
    if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
139
        (!IS_GEN6(dev) || pm_iir == 0))
140
        goto done;
141
 
142
    if (HAS_PCH_CPT(dev))
143
        hotplug_mask = SDE_HOTPLUG_MASK_CPT;
144
    else
145
        hotplug_mask = SDE_HOTPLUG_MASK;
146
 
147
    ret = IRQ_HANDLED;
148
 
149
 
2352 Serge 150
    if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
151
        notify_ring(dev, &dev_priv->ring[RCS]);
152
    if (gt_iir & bsd_usr_interrupt)
153
        notify_ring(dev, &dev_priv->ring[VCS]);
154
    if (gt_iir & GT_BLT_USER_INTERRUPT)
155
        notify_ring(dev, &dev_priv->ring[BCS]);
2351 Serge 156
 
157
//    if (de_iir & DE_GSE)
158
//        intel_opregion_gse_intr(dev);
159
 
160
//    if (de_iir & DE_PLANEA_FLIP_DONE) {
161
//        intel_prepare_page_flip(dev, 0);
162
//        intel_finish_page_flip_plane(dev, 0);
163
//    }
164
 
165
//    if (de_iir & DE_PLANEB_FLIP_DONE) {
166
//        intel_prepare_page_flip(dev, 1);
167
//        intel_finish_page_flip_plane(dev, 1);
168
//    }
169
 
170
//    if (de_iir & DE_PIPEA_VBLANK)
171
//        drm_handle_vblank(dev, 0);
172
 
173
//    if (de_iir & DE_PIPEB_VBLANK)
174
//        drm_handle_vblank(dev, 1);
175
 
176
    /* check event from PCH */
177
//    if (de_iir & DE_PCH_EVENT) {
178
//        if (pch_iir & hotplug_mask)
179
//            queue_work(dev_priv->wq, &dev_priv->hotplug_work);
180
//        pch_irq_handler(dev);
181
//    }
182
 
183
//    if (de_iir & DE_PCU_EVENT) {
184
//        I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
185
//        i915_handle_rps_change(dev);
186
//    }
187
 
188
    if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
189
        /*
190
         * IIR bits should never already be set because IMR should
191
         * prevent an interrupt from being shown in IIR. The warning
192
         * displays a case where we've unsafely cleared
193
         * dev_priv->pm_iir. Although missing an interrupt of the same
194
         * type is not a problem, it displays a problem in the logic.
195
         *
196
         * The mask bit in IMR is cleared by rps_work.
197
         */
198
        unsigned long flags;
199
        spin_lock_irqsave(&dev_priv->rps_lock, flags);
200
        WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
201
        dev_priv->pm_iir |= pm_iir;
202
        I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
203
        POSTING_READ(GEN6_PMIMR);
204
        spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
205
//        queue_work(dev_priv->wq, &dev_priv->rps_work);
206
    }
207
 
208
    /* should clear PCH hotplug event before clear CPU irq */
209
    I915_WRITE(SDEIIR, pch_iir);
210
    I915_WRITE(GTIIR, gt_iir);
211
    I915_WRITE(DEIIR, de_iir);
212
    I915_WRITE(GEN6_PMIIR, pm_iir);
213
 
214
done:
215
    I915_WRITE(DEIER, de_ier);
216
    POSTING_READ(DEIER);
217
 
218
    return ret;
219
}
220
 
221
 
222
 
223
 
224
 
225
 
226
 
227
 
228
 
229
/* drm_dma.h hooks
230
*/
231
static void ironlake_irq_preinstall(struct drm_device *dev)
232
{
233
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
234
 
235
    atomic_set(&dev_priv->irq_received, 0);
236
 
237
//    INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
238
//    INIT_WORK(&dev_priv->error_work, i915_error_work_func);
239
//    if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
240
//        INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
241
 
242
    I915_WRITE(HWSTAM, 0xeffe);
243
 
244
    if (IS_GEN6(dev)) {
245
        /* Workaround stalls observed on Sandy Bridge GPUs by
246
         * making the blitter command streamer generate a
247
         * write to the Hardware Status Page for
248
         * MI_USER_INTERRUPT.  This appears to serialize the
249
         * previous seqno write out before the interrupt
250
         * happens.
251
         */
252
        I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
253
        I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
254
    }
255
 
256
    /* XXX hotplug from PCH */
257
 
258
    I915_WRITE(DEIMR, 0xffffffff);
259
    I915_WRITE(DEIER, 0x0);
260
    POSTING_READ(DEIER);
261
 
262
    /* and GT */
263
    I915_WRITE(GTIMR, 0xffffffff);
264
    I915_WRITE(GTIER, 0x0);
265
    POSTING_READ(GTIER);
266
 
267
    /* south display irq */
268
    I915_WRITE(SDEIMR, 0xffffffff);
269
    I915_WRITE(SDEIER, 0x0);
270
    POSTING_READ(SDEIER);
271
}
272
 
273
/*
274
 * Enable digital hotplug on the PCH, and configure the DP short pulse
275
 * duration to 2ms (which is the minimum in the Display Port spec)
276
 *
277
 * This register is the same on all known PCH chips.
278
 */
279
 
280
static void ironlake_enable_pch_hotplug(struct drm_device *dev)
281
{
282
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
283
	u32	hotplug;
284
 
285
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
286
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
287
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
288
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
289
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
290
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
291
}
292
 
293
static int ironlake_irq_postinstall(struct drm_device *dev)
294
{
295
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
296
    /* enable kind of interrupts always enabled */
297
    u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
298
               DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
299
    u32 render_irqs;
300
    u32 hotplug_mask;
301
 
2352 Serge 302
    DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
303
    if (HAS_BSD(dev))
304
        DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
305
    if (HAS_BLT(dev))
306
        DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
2351 Serge 307
 
308
    dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
309
    dev_priv->irq_mask = ~display_mask;
310
 
311
    /* should always can generate irq */
312
    I915_WRITE(DEIIR, I915_READ(DEIIR));
313
    I915_WRITE(DEIMR, dev_priv->irq_mask);
314
    I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
315
    POSTING_READ(DEIER);
316
 
317
	dev_priv->gt_irq_mask = ~0;
318
 
319
    I915_WRITE(GTIIR, I915_READ(GTIIR));
320
    I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
321
 
322
    if (IS_GEN6(dev))
323
        render_irqs =
324
            GT_USER_INTERRUPT |
325
            GT_GEN6_BSD_USER_INTERRUPT |
326
            GT_BLT_USER_INTERRUPT;
327
    else
328
        render_irqs =
329
            GT_USER_INTERRUPT |
330
            GT_PIPE_NOTIFY |
331
            GT_BSD_USER_INTERRUPT;
332
    I915_WRITE(GTIER, render_irqs);
333
    POSTING_READ(GTIER);
334
 
335
    if (HAS_PCH_CPT(dev)) {
336
        hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
337
                SDE_PORTB_HOTPLUG_CPT |
338
                SDE_PORTC_HOTPLUG_CPT |
339
                SDE_PORTD_HOTPLUG_CPT);
340
    } else {
341
        hotplug_mask = (SDE_CRT_HOTPLUG |
342
                SDE_PORTB_HOTPLUG |
343
                SDE_PORTC_HOTPLUG |
344
                SDE_PORTD_HOTPLUG |
345
                SDE_AUX_MASK);
346
    }
347
 
348
    dev_priv->pch_irq_mask = ~hotplug_mask;
349
 
350
    I915_WRITE(SDEIIR, I915_READ(SDEIIR));
351
    I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
352
    I915_WRITE(SDEIER, hotplug_mask);
353
    POSTING_READ(SDEIER);
354
 
355
    ironlake_enable_pch_hotplug(dev);
356
 
357
    if (IS_IRONLAKE_M(dev)) {
358
        /* Clear & enable PCU event interrupts */
359
        I915_WRITE(DEIIR, DE_PCU_EVENT);
360
        I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
361
        ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
362
    }
363
 
364
    return 0;
365
}
366
 
367
 
368
void intel_irq_init(struct drm_device *dev)
369
{
370
#if 0
371
	if (IS_IVYBRIDGE(dev)) {
372
		/* Share pre & uninstall handlers with ILK/SNB */
373
		dev->driver->irq_handler = ivybridge_irq_handler;
374
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
375
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
376
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
377
		dev->driver->enable_vblank = ivybridge_enable_vblank;
378
		dev->driver->disable_vblank = ivybridge_disable_vblank;
379
	} else if (HAS_PCH_SPLIT(dev)) {
380
		dev->driver->irq_handler = ironlake_irq_handler;
381
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
382
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
383
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
384
		dev->driver->enable_vblank = ironlake_enable_vblank;
385
		dev->driver->disable_vblank = ironlake_disable_vblank;
386
	} else {
387
		dev->driver->irq_preinstall = i915_driver_irq_preinstall;
388
		dev->driver->irq_postinstall = i915_driver_irq_postinstall;
389
		dev->driver->irq_uninstall = i915_driver_irq_uninstall;
390
		dev->driver->irq_handler = i915_driver_irq_handler;
391
		dev->driver->enable_vblank = i915_enable_vblank;
392
		dev->driver->disable_vblank = i915_disable_vblank;
393
	}
394
#endif
395
}
396
 
397
 
398
static struct drm_device *irq_device;
399
 
400
void irq_handler_kms()
401
{
402
//    printf("%s\n",__FUNCTION__);
403
    ironlake_irq_handler(irq_device);
404
}
405
 
406
int drm_irq_install(struct drm_device *dev)
407
{
408
    int irq_line;
409
    int ret = 0;
410
 
411
    ENTER();
412
 
413
    mutex_lock(&dev->struct_mutex);
414
 
415
    /* Driver must have been initialized */
416
    if (!dev->dev_private) {
417
        mutex_unlock(&dev->struct_mutex);
418
        return -EINVAL;
419
    }
420
 
421
    if (dev->irq_enabled) {
422
        mutex_unlock(&dev->struct_mutex);
423
        return -EBUSY;
424
    }
425
    dev->irq_enabled = 1;
426
    mutex_unlock(&dev->struct_mutex);
427
 
428
    irq_device = dev;
429
    irq_line   = drm_dev_to_irq(dev);
430
 
431
    DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
432
 
433
    ironlake_irq_preinstall(dev);
434
 
435
    ret = AttachIntHandler(irq_line, irq_handler_kms, 2);
436
    if (ret == 0) {
437
        mutex_lock(&dev->struct_mutex);
438
        dev->irq_enabled = 0;
439
        mutex_unlock(&dev->struct_mutex);
440
        return ret;
441
    }
442
 
443
    ret = ironlake_irq_postinstall(dev);
444
 
445
//    if (ret < 0) {
446
//        mutex_lock(&dev->struct_mutex);
447
//        dev->irq_enabled = 0;
448
//        mutex_unlock(&dev->struct_mutex);
449
//        free_irq(drm_dev_to_irq(dev), dev);
450
//    }
451
 
452
    u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
453
 
454
    cmd&= ~(1<<10);
455
 
456
    PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
457
 
458
    dbgprintf("PCI_CMD: %04x\n", cmd);
459
 
460
    DRM_INFO("i915: irq initialized.\n");
461
    LEAVE();
462
    return ret;
463
}
464
 
465
 
466