Subversion Repositories Kolibri OS

Rev

Rev 2327 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2326 Serge 1
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
29
#include "drmP.h"
30
#include "drm.h"
31
#include "drm_crtc_helper.h"
32
#include "drm_fb_helper.h"
33
#include "intel_drv.h"
34
//#include "i915_drm.h"
35
#include "i915_drv.h"
36
#include 
37
//#include "i915_trace.h"
38
//#include "../../../platform/x86/intel_ips.h"
39
#include 
40
//#include 
41
//#include 
42
//#include 
43
//#include 
44
//#include 
45
//#include 
46
 
47
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
48
 
49
static void i915_write_hws_pga(struct drm_device *dev)
50
{
51
    drm_i915_private_t *dev_priv = dev->dev_private;
52
    u32 addr;
53
 
54
    addr = dev_priv->status_page_dmah->busaddr;
55
    if (INTEL_INFO(dev)->gen >= 4)
56
        addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
57
    I915_WRITE(HWS_PGA, addr);
58
}
59
 
60
 
61
/**
62
 * Sets up the hardware status page for devices that need a physical address
63
 * in the register.
64
 */
65
static int i915_init_phys_hws(struct drm_device *dev)
66
{
67
    drm_i915_private_t *dev_priv = dev->dev_private;
68
 
69
    /* Program Hardware Status Page */
70
    dev_priv->status_page_dmah =
71
        drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
72
 
73
    if (!dev_priv->status_page_dmah) {
74
        DRM_ERROR("Can not allocate hardware status page\n");
75
        return -ENOMEM;
76
    }
77
 
78
    i915_write_hws_pga(dev);
79
 
80
    dbgprintf("Enabled hardware status page\n");
81
    return 0;
82
}
83
 
84
static void i915_pineview_get_mem_freq(struct drm_device *dev)
85
{
86
    drm_i915_private_t *dev_priv = dev->dev_private;
87
    u32 tmp;
88
 
89
    tmp = I915_READ(CLKCFG);
90
 
91
    switch (tmp & CLKCFG_FSB_MASK) {
92
    case CLKCFG_FSB_533:
93
        dev_priv->fsb_freq = 533; /* 133*4 */
94
        break;
95
    case CLKCFG_FSB_800:
96
        dev_priv->fsb_freq = 800; /* 200*4 */
97
        break;
98
    case CLKCFG_FSB_667:
99
        dev_priv->fsb_freq =  667; /* 167*4 */
100
        break;
101
    case CLKCFG_FSB_400:
102
        dev_priv->fsb_freq = 400; /* 100*4 */
103
        break;
104
    }
105
 
106
    switch (tmp & CLKCFG_MEM_MASK) {
107
    case CLKCFG_MEM_533:
108
        dev_priv->mem_freq = 533;
109
        break;
110
    case CLKCFG_MEM_667:
111
        dev_priv->mem_freq = 667;
112
        break;
113
    case CLKCFG_MEM_800:
114
        dev_priv->mem_freq = 800;
115
        break;
116
    }
117
 
118
    /* detect pineview DDR3 setting */
119
    tmp = I915_READ(CSHRDDR3CTL);
120
    dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
121
}
122
 
123
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
124
{
125
    drm_i915_private_t *dev_priv = dev->dev_private;
126
    u16 ddrpll, csipll;
127
 
128
    ddrpll = I915_READ16(DDRMPLL1);
129
    csipll = I915_READ16(CSIPLL0);
130
 
131
    switch (ddrpll & 0xff) {
132
    case 0xc:
133
        dev_priv->mem_freq = 800;
134
        break;
135
    case 0x10:
136
        dev_priv->mem_freq = 1066;
137
        break;
138
    case 0x14:
139
        dev_priv->mem_freq = 1333;
140
        break;
141
    case 0x18:
142
        dev_priv->mem_freq = 1600;
143
        break;
144
    default:
145
        DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
146
                 ddrpll & 0xff);
147
        dev_priv->mem_freq = 0;
148
        break;
149
    }
150
 
151
    dev_priv->r_t = dev_priv->mem_freq;
152
 
153
    switch (csipll & 0x3ff) {
154
    case 0x00c:
155
        dev_priv->fsb_freq = 3200;
156
        break;
157
    case 0x00e:
158
        dev_priv->fsb_freq = 3733;
159
        break;
160
    case 0x010:
161
        dev_priv->fsb_freq = 4266;
162
        break;
163
    case 0x012:
164
        dev_priv->fsb_freq = 4800;
165
        break;
166
    case 0x014:
167
        dev_priv->fsb_freq = 5333;
168
        break;
169
    case 0x016:
170
        dev_priv->fsb_freq = 5866;
171
        break;
172
    case 0x018:
173
        dev_priv->fsb_freq = 6400;
174
        break;
175
    default:
176
        DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
177
                 csipll & 0x3ff);
178
        dev_priv->fsb_freq = 0;
179
        break;
180
    }
181
 
182
    if (dev_priv->fsb_freq == 3200) {
183
        dev_priv->c_m = 0;
184
    } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
185
        dev_priv->c_m = 1;
186
    } else {
187
        dev_priv->c_m = 2;
188
    }
189
}
190
 
191
static int i915_get_bridge_dev(struct drm_device *dev)
192
{
193
    struct drm_i915_private *dev_priv = dev->dev_private;
194
 
195
    dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
196
    if (!dev_priv->bridge_dev) {
197
        DRM_ERROR("bridge device not found\n");
198
        return -1;
199
    }
200
    return 0;
201
}
202
 
203
 
204
/* Global for IPS driver to get at the current i915 device */
205
static struct drm_i915_private *i915_mch_dev;
206
/*
207
 * Lock protecting IPS related data structures
208
 *   - i915_mch_dev
209
 *   - dev_priv->max_delay
210
 *   - dev_priv->min_delay
211
 *   - dev_priv->fmax
212
 *   - dev_priv->gpu_busy
213
 */
214
static DEFINE_SPINLOCK(mchdev_lock);
215
 
216
 
217
/**
218
 * i915_driver_load - setup chip and create an initial config
219
 * @dev: DRM device
220
 * @flags: startup flags
221
 *
222
 * The driver load routine has to do several things:
223
 *   - drive output discovery via intel_modeset_init()
224
 *   - initialize the memory manager
225
 *   - allocate initial config memory
226
 *   - setup the DRM framebuffer with the allocated memory
227
 */
228
int i915_driver_load(struct drm_device *dev, unsigned long flags)
229
{
230
    struct drm_i915_private *dev_priv;
231
    int ret = 0, mmio_bar;
232
    uint32_t agp_size;
233
 
234
    ENTER();
235
 
236
    dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
237
    if (dev_priv == NULL)
238
        return -ENOMEM;
239
 
240
    dev->dev_private = (void *)dev_priv;
241
    dev_priv->dev = dev;
242
    dev_priv->info = (struct intel_device_info *) flags;
243
 
244
    if (i915_get_bridge_dev(dev)) {
245
        ret = -EIO;
246
        goto free_priv;
247
    }
248
 
249
    /* overlay on gen2 is broken and can't address above 1G */
250
//    if (IS_GEN2(dev))
251
//        dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
252
 
253
    /* 965GM sometimes incorrectly writes to hardware status page (HWS)
254
     * using 32bit addressing, overwriting memory if HWS is located
255
     * above 4GB.
256
     *
257
     * The documentation also mentions an issue with undefined
258
     * behaviour if any general state is accessed within a page above 4GB,
259
     * which also needs to be handled carefully.
260
     */
261
//    if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
262
//        dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
263
 
264
    mmio_bar = IS_GEN2(dev) ? 1 : 0;
265
    dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
266
    if (!dev_priv->regs) {
267
        DRM_ERROR("failed to map registers\n");
268
        ret = -EIO;
269
        goto put_bridge;
270
    }
271
 
272
    dev_priv->mm.gtt = intel_gtt_get();
273
    if (!dev_priv->mm.gtt) {
274
        DRM_ERROR("Failed to initialize GTT\n");
275
        ret = -ENODEV;
276
        goto out_rmmap;
277
    }
278
 
279
//    agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
280
 
281
/*   agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;   */
282
 
283
//    dev_priv->mm.gtt_mapping =
284
//        io_mapping_create_wc(dev->agp->base, agp_size);
285
//    if (dev_priv->mm.gtt_mapping == NULL) {
286
//        ret = -EIO;
287
//        goto out_rmmap;
288
//    }
289
 
290
    /* Set up a WC MTRR for non-PAT systems.  This is more common than
291
     * one would think, because the kernel disables PAT on first
292
     * generation Core chips because WC PAT gets overridden by a UC
293
     * MTRR if present.  Even if a UC MTRR isn't present.
294
     */
295
//    dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
296
//                     agp_size,
297
//                     MTRR_TYPE_WRCOMB, 1);
298
//    if (dev_priv->mm.gtt_mtrr < 0) {
299
//        DRM_INFO("MTRR allocation failed.  Graphics "
300
//             "performance may suffer.\n");
301
//    }
302
 
303
    /* The i915 workqueue is primarily used for batched retirement of
304
     * requests (and thus managing bo) once the task has been completed
305
     * by the GPU. i915_gem_retire_requests() is called directly when we
306
     * need high-priority retirement, such as waiting for an explicit
307
     * bo.
308
     *
309
     * It is also used for periodic low-priority events, such as
310
     * idle-timers and recording error state.
311
     *
312
     * All tasks on the workqueue are expected to acquire the dev mutex
313
     * so there is no point in running more than one instance of the
314
     * workqueue at any time: max_active = 1 and NON_REENTRANT.
315
     */
316
 
317
//    dev_priv->wq = alloc_workqueue("i915",
318
//                       WQ_UNBOUND | WQ_NON_REENTRANT,
319
//                       1);
320
//    if (dev_priv->wq == NULL) {
321
//        DRM_ERROR("Failed to create our workqueue.\n");
322
//        ret = -ENOMEM;
323
//        goto out_mtrrfree;
324
//    }
325
 
326
    /* enable GEM by default */
327
    dev_priv->has_gem = 1;
328
 
329
 
330
//    intel_irq_init(dev);
331
 
332
    /* Try to make sure MCHBAR is enabled before poking at it */
333
//    intel_setup_mchbar(dev);
334
    intel_setup_gmbus(dev);
335
 
336
//    intel_opregion_setup(dev);
337
 
338
    /* Make sure the bios did its job and set up vital registers */
339
//    intel_setup_bios(dev);
340
 
341
    i915_gem_load(dev);
342
 
343
    /* Init HWS */
344
    if (!I915_NEED_GFX_HWS(dev)) {
345
        ret = i915_init_phys_hws(dev);
346
        if (ret)
347
            goto out_gem_unload;
348
    }
349
 
350
    if (IS_PINEVIEW(dev))
351
        i915_pineview_get_mem_freq(dev);
352
    else if (IS_GEN5(dev))
353
        i915_ironlake_get_mem_freq(dev);
354
 
355
    /* On the 945G/GM, the chipset reports the MSI capability on the
356
     * integrated graphics even though the support isn't actually there
357
     * according to the published specs.  It doesn't appear to function
358
     * correctly in testing on 945G.
359
     * This may be a side effect of MSI having been made available for PEG
360
     * and the registers being closely associated.
361
     *
362
     * According to chipset errata, on the 965GM, MSI interrupts may
363
     * be lost or delayed, but we use them anyways to avoid
364
     * stuck interrupts on some machines.
365
     */
366
//    if (!IS_I945G(dev) && !IS_I945GM(dev))
367
//        pci_enable_msi(dev->pdev);
368
 
369
    spin_lock_init(&dev_priv->irq_lock);
370
    spin_lock_init(&dev_priv->error_lock);
371
    spin_lock_init(&dev_priv->rps_lock);
372
 
373
    if (IS_MOBILE(dev) || !IS_GEN2(dev))
374
        dev_priv->num_pipe = 2;
375
    else
376
        dev_priv->num_pipe = 1;
377
 
378
//    ret = drm_vblank_init(dev, dev_priv->num_pipe);
379
//    if (ret)
380
//        goto out_gem_unload;
381
 
382
    /* Start out suspended */
383
    dev_priv->mm.suspended = 1;
384
 
385
    intel_detect_pch(dev);
386
 
387
 
388
//    if (drm_core_check_feature(dev, DRIVER_MODESET)) {
389
//        ret = i915_load_modeset_init(dev);
390
//        if (ret < 0) {
391
//            DRM_ERROR("failed to init modeset\n");
392
//            goto out_gem_unload;
393
//        }
394
//    }
395
 
396
    /* Must be done after probing outputs */
397
//    intel_opregion_init(dev);
398
//    acpi_video_register();
399
 
400
//    setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
401
//            (unsigned long) dev);
402
 
403
    spin_lock(&mchdev_lock);
404
    i915_mch_dev = dev_priv;
405
    dev_priv->mchdev_lock = &mchdev_lock;
406
    spin_unlock(&mchdev_lock);
407
 
408
//    ips_ping_for_i915_load();
409
 
410
    LEAVE();
411
 
412
    return 0;
413
 
414
out_gem_unload:
415
//    if (dev_priv->mm.inactive_shrinker.shrink)
416
//        unregister_shrinker(&dev_priv->mm.inactive_shrinker);
417
 
418
//    if (dev->pdev->msi_enabled)
419
//        pci_disable_msi(dev->pdev);
420
 
421
//    intel_teardown_gmbus(dev);
422
//    intel_teardown_mchbar(dev);
423
//    destroy_workqueue(dev_priv->wq);
424
out_mtrrfree:
425
//    if (dev_priv->mm.gtt_mtrr >= 0) {
426
//        mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
427
//             dev->agp->agp_info.aper_size * 1024 * 1024);
428
//        dev_priv->mm.gtt_mtrr = -1;
429
//    }
430
//    io_mapping_free(dev_priv->mm.gtt_mapping);
431
 
432
out_rmmap:
433
    pci_iounmap(dev->pdev, dev_priv->regs);
434
 
435
put_bridge:
436
//    pci_dev_put(dev_priv->bridge_dev);
437
free_priv:
438
    kfree(dev_priv);
439
    return ret;
440
}
441