Subversion Repositories Kolibri OS

Rev

Rev 2352 | Rev 3031 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2326 Serge 1
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
29
#include "drmP.h"
30
#include "drm.h"
31
#include "drm_crtc_helper.h"
32
#include "drm_fb_helper.h"
33
#include "intel_drv.h"
2330 Serge 34
#include "i915_drm.h"
2326 Serge 35
#include "i915_drv.h"
36
#include 
2351 Serge 37
#include "i915_trace.h"
2326 Serge 38
//#include "../../../platform/x86/intel_ips.h"
39
#include 
40
//#include 
41
//#include 
42
//#include 
43
//#include 
2330 Serge 44
#include 
2326 Serge 45
//#include 
46
 
47
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
48
 
2330 Serge 49
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
50
                    u32 *val)
51
{
52
    *val = PciRead32(dev->busnr, dev->devfn, where);
53
    return 1;
54
}
55
 
56
 
57
 
2326 Serge 58
static void i915_write_hws_pga(struct drm_device *dev)
59
{
60
    drm_i915_private_t *dev_priv = dev->dev_private;
61
    u32 addr;
62
 
63
    addr = dev_priv->status_page_dmah->busaddr;
64
    if (INTEL_INFO(dev)->gen >= 4)
65
        addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
66
    I915_WRITE(HWS_PGA, addr);
67
}
68
 
69
/**
70
 * Sets up the hardware status page for devices that need a physical address
71
 * in the register.
72
 */
73
static int i915_init_phys_hws(struct drm_device *dev)
74
{
75
    drm_i915_private_t *dev_priv = dev->dev_private;
76
 
77
    /* Program Hardware Status Page */
78
    dev_priv->status_page_dmah =
2352 Serge 79
        (void*)drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
2326 Serge 80
 
81
    if (!dev_priv->status_page_dmah) {
82
        DRM_ERROR("Can not allocate hardware status page\n");
83
        return -ENOMEM;
84
    }
85
 
86
    i915_write_hws_pga(dev);
87
 
88
    dbgprintf("Enabled hardware status page\n");
89
    return 0;
90
}
91
 
2330 Serge 92
 
93
 
94
 
95
 
96
 
97
 
98
 
99
 
100
 
101
#define MCHBAR_I915 0x44
102
#define MCHBAR_I965 0x48
103
#define MCHBAR_SIZE (4*4096)
104
 
105
#define DEVEN_REG 0x54
106
#define   DEVEN_MCHBAR_EN (1 << 28)
107
 
108
 
109
 
110
 
111
/* Setup MCHBAR if possible, return true if we should disable it again */
112
static void
113
intel_setup_mchbar(struct drm_device *dev)
114
{
115
	drm_i915_private_t *dev_priv = dev->dev_private;
116
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
117
	u32 temp;
118
	bool enabled;
119
 
120
	dev_priv->mchbar_need_disable = false;
121
 
122
	if (IS_I915G(dev) || IS_I915GM(dev)) {
123
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
124
		enabled = !!(temp & DEVEN_MCHBAR_EN);
125
	} else {
126
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
127
		enabled = temp & 1;
128
	}
129
 
130
	/* If it's already enabled, don't have to do anything */
131
	if (enabled)
132
		return;
133
 
134
	dbgprintf("Epic fail\n");
135
 
136
#if 0
137
	if (intel_alloc_mchbar_resource(dev))
138
		return;
139
 
140
	dev_priv->mchbar_need_disable = true;
141
 
142
	/* Space is allocated or reserved, so enable it. */
143
	if (IS_I915G(dev) || IS_I915GM(dev)) {
144
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
145
				       temp | DEVEN_MCHBAR_EN);
146
	} else {
147
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
148
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
149
	}
150
#endif
151
}
152
 
153
 
154
 
155
 
156
 
157
 
158
 
159
 
160
 
161
 
162
 
163
 
164
 
165
 
166
 
2332 Serge 167
#define LFB_SIZE 0xC00000
2330 Serge 168
 
169
static int i915_load_gem_init(struct drm_device *dev)
170
{
171
	struct drm_i915_private *dev_priv = dev->dev_private;
172
	unsigned long prealloc_size, gtt_size, mappable_size;
173
	int ret;
174
 
175
	prealloc_size = dev_priv->mm.gtt->stolen_size;
176
	gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
177
	mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
178
 
179
    dbgprintf("%s prealloc: %x gtt: %x mappable: %x\n",__FUNCTION__,
180
             prealloc_size, gtt_size, mappable_size);
181
 
182
	/* Basic memrange allocator for stolen space */
183
	drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
184
 
185
	/* Let GEM Manage all of the aperture.
186
	 *
187
	 * However, leave one page at the end still bound to the scratch page.
188
	 * There are a number of places where the hardware apparently
189
	 * prefetches past the end of the object, and we've seen multiple
190
	 * hangs with the GPU head pointer stuck in a batchbuffer bound
191
	 * at the last page of the aperture.  One page should be enough to
192
	 * keep any prefetching inside of the aperture.
193
	 */
2332 Serge 194
    i915_gem_do_init(dev, LFB_SIZE, mappable_size, gtt_size - PAGE_SIZE - LFB_SIZE);
2330 Serge 195
 
2332 Serge 196
    mutex_lock(&dev->struct_mutex);
197
    ret = i915_gem_init_ringbuffer(dev);
198
    mutex_unlock(&dev->struct_mutex);
199
    if (ret)
200
        return ret;
2330 Serge 201
 
202
	/* Try to set up FBC with a reasonable compressed buffer size */
203
//   if (I915_HAS_FBC(dev) && i915_powersave) {
204
//       int cfb_size;
205
 
206
		/* Leave 1M for line length buffer & misc. */
207
 
208
		/* Try to get a 32M buffer... */
209
//       if (prealloc_size > (36*1024*1024))
210
//           cfb_size = 32*1024*1024;
211
//       else /* fall back to 7/8 of the stolen space */
212
//           cfb_size = prealloc_size * 7 / 8;
213
//       i915_setup_compression(dev, cfb_size);
214
//   }
215
 
216
	/* Allow hardware batchbuffers unless told otherwise. */
217
	dev_priv->allow_batchbuffer = 1;
218
	return 0;
219
}
220
 
2327 Serge 221
static int i915_load_modeset_init(struct drm_device *dev)
222
{
223
    struct drm_i915_private *dev_priv = dev->dev_private;
224
    int ret;
225
 
226
    ret = intel_parse_bios(dev);
227
    if (ret)
228
        DRM_INFO("failed to find VBIOS tables\n");
229
 
230
//    intel_register_dsm_handler();
231
 
232
    /* IIR "flip pending" bit means done if this bit is set */
233
    if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
234
        dev_priv->flip_pending_is_done = true;
235
 
236
    intel_modeset_init(dev);
237
 
238
    ret = i915_load_gem_init(dev);
239
    if (ret)
240
        goto cleanup_vga_switcheroo;
241
 
242
    intel_modeset_gem_init(dev);
243
 
2351 Serge 244
	ret = drm_irq_install(dev);
245
	if (ret)
246
		goto cleanup_gem;
2327 Serge 247
 
248
    /* Always safe in the mode setting case. */
249
    /* FIXME: do pre/post-mode set stuff in core KMS code */
250
    dev->vblank_disable_allowed = 1;
251
 
252
    ret = intel_fbdev_init(dev);
253
    if (ret)
254
        goto cleanup_irq;
255
 
2332 Serge 256
//    drm_kms_helper_poll_init(dev);
2327 Serge 257
 
258
    /* We're off and running w/KMS */
259
    dev_priv->mm.suspended = 0;
260
 
261
    return 0;
262
 
263
cleanup_irq:
264
//    drm_irq_uninstall(dev);
265
cleanup_gem:
266
//    mutex_lock(&dev->struct_mutex);
267
//    i915_gem_cleanup_ringbuffer(dev);
268
//    mutex_unlock(&dev->struct_mutex);
269
cleanup_vga_switcheroo:
270
//    vga_switcheroo_unregister_client(dev->pdev);
271
cleanup_vga_client:
272
//    vga_client_register(dev->pdev, NULL, NULL, NULL);
273
out:
274
    return ret;
275
}
276
 
277
 
278
 
2326 Serge 279
static void i915_pineview_get_mem_freq(struct drm_device *dev)
280
{
281
    drm_i915_private_t *dev_priv = dev->dev_private;
282
    u32 tmp;
283
 
284
    tmp = I915_READ(CLKCFG);
285
 
286
    switch (tmp & CLKCFG_FSB_MASK) {
287
    case CLKCFG_FSB_533:
288
        dev_priv->fsb_freq = 533; /* 133*4 */
289
        break;
290
    case CLKCFG_FSB_800:
291
        dev_priv->fsb_freq = 800; /* 200*4 */
292
        break;
293
    case CLKCFG_FSB_667:
294
        dev_priv->fsb_freq =  667; /* 167*4 */
295
        break;
296
    case CLKCFG_FSB_400:
297
        dev_priv->fsb_freq = 400; /* 100*4 */
298
        break;
299
    }
300
 
301
    switch (tmp & CLKCFG_MEM_MASK) {
302
    case CLKCFG_MEM_533:
303
        dev_priv->mem_freq = 533;
304
        break;
305
    case CLKCFG_MEM_667:
306
        dev_priv->mem_freq = 667;
307
        break;
308
    case CLKCFG_MEM_800:
309
        dev_priv->mem_freq = 800;
310
        break;
311
    }
312
 
313
    /* detect pineview DDR3 setting */
314
    tmp = I915_READ(CSHRDDR3CTL);
315
    dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
316
}
317
 
318
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
319
{
320
    drm_i915_private_t *dev_priv = dev->dev_private;
321
    u16 ddrpll, csipll;
322
 
323
    ddrpll = I915_READ16(DDRMPLL1);
324
    csipll = I915_READ16(CSIPLL0);
325
 
326
    switch (ddrpll & 0xff) {
327
    case 0xc:
328
        dev_priv->mem_freq = 800;
329
        break;
330
    case 0x10:
331
        dev_priv->mem_freq = 1066;
332
        break;
333
    case 0x14:
334
        dev_priv->mem_freq = 1333;
335
        break;
336
    case 0x18:
337
        dev_priv->mem_freq = 1600;
338
        break;
339
    default:
340
        DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
341
                 ddrpll & 0xff);
342
        dev_priv->mem_freq = 0;
343
        break;
344
    }
345
 
346
    dev_priv->r_t = dev_priv->mem_freq;
347
 
348
    switch (csipll & 0x3ff) {
349
    case 0x00c:
350
        dev_priv->fsb_freq = 3200;
351
        break;
352
    case 0x00e:
353
        dev_priv->fsb_freq = 3733;
354
        break;
355
    case 0x010:
356
        dev_priv->fsb_freq = 4266;
357
        break;
358
    case 0x012:
359
        dev_priv->fsb_freq = 4800;
360
        break;
361
    case 0x014:
362
        dev_priv->fsb_freq = 5333;
363
        break;
364
    case 0x016:
365
        dev_priv->fsb_freq = 5866;
366
        break;
367
    case 0x018:
368
        dev_priv->fsb_freq = 6400;
369
        break;
370
    default:
371
        DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
372
                 csipll & 0x3ff);
373
        dev_priv->fsb_freq = 0;
374
        break;
375
    }
376
 
377
    if (dev_priv->fsb_freq == 3200) {
378
        dev_priv->c_m = 0;
379
    } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
380
        dev_priv->c_m = 1;
381
    } else {
382
        dev_priv->c_m = 2;
383
    }
384
}
385
 
386
static int i915_get_bridge_dev(struct drm_device *dev)
387
{
388
    struct drm_i915_private *dev_priv = dev->dev_private;
389
 
390
    dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
391
    if (!dev_priv->bridge_dev) {
392
        DRM_ERROR("bridge device not found\n");
393
        return -1;
394
    }
395
    return 0;
396
}
397
 
398
 
399
/* Global for IPS driver to get at the current i915 device */
400
static struct drm_i915_private *i915_mch_dev;
401
/*
402
 * Lock protecting IPS related data structures
403
 *   - i915_mch_dev
404
 *   - dev_priv->max_delay
405
 *   - dev_priv->min_delay
406
 *   - dev_priv->fmax
407
 *   - dev_priv->gpu_busy
408
 */
409
static DEFINE_SPINLOCK(mchdev_lock);
410
 
411
 
412
/**
413
 * i915_driver_load - setup chip and create an initial config
414
 * @dev: DRM device
415
 * @flags: startup flags
416
 *
417
 * The driver load routine has to do several things:
418
 *   - drive output discovery via intel_modeset_init()
419
 *   - initialize the memory manager
420
 *   - allocate initial config memory
421
 *   - setup the DRM framebuffer with the allocated memory
422
 */
423
int i915_driver_load(struct drm_device *dev, unsigned long flags)
424
{
425
    struct drm_i915_private *dev_priv;
426
    int ret = 0, mmio_bar;
427
    uint32_t agp_size;
428
 
429
    ENTER();
430
 
431
    dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
432
    if (dev_priv == NULL)
433
        return -ENOMEM;
434
 
435
    dev->dev_private = (void *)dev_priv;
436
    dev_priv->dev = dev;
437
    dev_priv->info = (struct intel_device_info *) flags;
438
 
439
    if (i915_get_bridge_dev(dev)) {
440
        ret = -EIO;
441
        goto free_priv;
442
    }
443
 
444
    /* overlay on gen2 is broken and can't address above 1G */
445
//    if (IS_GEN2(dev))
446
//        dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
447
 
448
    /* 965GM sometimes incorrectly writes to hardware status page (HWS)
449
     * using 32bit addressing, overwriting memory if HWS is located
450
     * above 4GB.
451
     *
452
     * The documentation also mentions an issue with undefined
453
     * behaviour if any general state is accessed within a page above 4GB,
454
     * which also needs to be handled carefully.
455
     */
456
//    if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
457
//        dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
458
 
459
    mmio_bar = IS_GEN2(dev) ? 1 : 0;
460
    dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
461
    if (!dev_priv->regs) {
462
        DRM_ERROR("failed to map registers\n");
463
        ret = -EIO;
464
        goto put_bridge;
465
    }
466
 
467
    dev_priv->mm.gtt = intel_gtt_get();
468
    if (!dev_priv->mm.gtt) {
469
        DRM_ERROR("Failed to initialize GTT\n");
470
        ret = -ENODEV;
471
        goto out_rmmap;
472
    }
473
 
474
//    agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
475
 
476
/*   agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;   */
477
 
478
//    dev_priv->mm.gtt_mapping =
479
//        io_mapping_create_wc(dev->agp->base, agp_size);
480
//    if (dev_priv->mm.gtt_mapping == NULL) {
481
//        ret = -EIO;
482
//        goto out_rmmap;
483
//    }
484
 
485
    /* Set up a WC MTRR for non-PAT systems.  This is more common than
486
     * one would think, because the kernel disables PAT on first
487
     * generation Core chips because WC PAT gets overridden by a UC
488
     * MTRR if present.  Even if a UC MTRR isn't present.
489
     */
490
//    dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
491
//                     agp_size,
492
//                     MTRR_TYPE_WRCOMB, 1);
493
//    if (dev_priv->mm.gtt_mtrr < 0) {
494
//        DRM_INFO("MTRR allocation failed.  Graphics "
495
//             "performance may suffer.\n");
496
//    }
497
 
498
    /* The i915 workqueue is primarily used for batched retirement of
499
     * requests (and thus managing bo) once the task has been completed
500
     * by the GPU. i915_gem_retire_requests() is called directly when we
501
     * need high-priority retirement, such as waiting for an explicit
502
     * bo.
503
     *
504
     * It is also used for periodic low-priority events, such as
505
     * idle-timers and recording error state.
506
     *
507
     * All tasks on the workqueue are expected to acquire the dev mutex
508
     * so there is no point in running more than one instance of the
509
     * workqueue at any time: max_active = 1 and NON_REENTRANT.
510
     */
2360 Serge 511
      dev_priv->wq = alloc_workqueue("i915",
512
                         WQ_UNBOUND | WQ_NON_REENTRANT,
513
                         1);
514
      if (dev_priv->wq == NULL) {
515
          DRM_ERROR("Failed to create our workqueue.\n");
516
          ret = -ENOMEM;
517
          goto out_mtrrfree;
518
      }
2326 Serge 519
 
520
    /* enable GEM by default */
521
    dev_priv->has_gem = 1;
522
 
2351 Serge 523
	intel_irq_init(dev);
2326 Serge 524
 
525
    /* Try to make sure MCHBAR is enabled before poking at it */
2330 Serge 526
	intel_setup_mchbar(dev);
2326 Serge 527
    intel_setup_gmbus(dev);
2327 Serge 528
    intel_opregion_setup(dev);
2326 Serge 529
 
530
    /* Make sure the bios did its job and set up vital registers */
2330 Serge 531
    intel_setup_bios(dev);
2326 Serge 532
 
533
    i915_gem_load(dev);
534
 
535
    /* Init HWS */
536
    if (!I915_NEED_GFX_HWS(dev)) {
537
        ret = i915_init_phys_hws(dev);
538
        if (ret)
539
            goto out_gem_unload;
540
    }
541
 
542
    if (IS_PINEVIEW(dev))
543
        i915_pineview_get_mem_freq(dev);
544
    else if (IS_GEN5(dev))
545
        i915_ironlake_get_mem_freq(dev);
546
 
547
    /* On the 945G/GM, the chipset reports the MSI capability on the
548
     * integrated graphics even though the support isn't actually there
549
     * according to the published specs.  It doesn't appear to function
550
     * correctly in testing on 945G.
551
     * This may be a side effect of MSI having been made available for PEG
552
     * and the registers being closely associated.
553
     *
554
     * According to chipset errata, on the 965GM, MSI interrupts may
555
     * be lost or delayed, but we use them anyways to avoid
556
     * stuck interrupts on some machines.
557
     */
558
//    if (!IS_I945G(dev) && !IS_I945GM(dev))
559
//        pci_enable_msi(dev->pdev);
560
 
2342 Serge 561
	spin_lock_init(&dev_priv->gt_lock);
2326 Serge 562
    spin_lock_init(&dev_priv->irq_lock);
563
    spin_lock_init(&dev_priv->error_lock);
564
    spin_lock_init(&dev_priv->rps_lock);
565
 
2342 Serge 566
	if (IS_IVYBRIDGE(dev))
567
		dev_priv->num_pipe = 3;
568
	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
2326 Serge 569
        dev_priv->num_pipe = 2;
570
    else
571
        dev_priv->num_pipe = 1;
572
 
573
//    ret = drm_vblank_init(dev, dev_priv->num_pipe);
574
//    if (ret)
575
//        goto out_gem_unload;
576
 
577
    /* Start out suspended */
578
    dev_priv->mm.suspended = 1;
579
 
580
    intel_detect_pch(dev);
581
 
2327 Serge 582
    ret = i915_load_modeset_init(dev);
583
    if (ret < 0) {
584
        DRM_ERROR("failed to init modeset\n");
585
            goto out_gem_unload;
586
    }
2326 Serge 587
 
588
    /* Must be done after probing outputs */
589
//    intel_opregion_init(dev);
590
//    acpi_video_register();
591
 
592
//    setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
593
//            (unsigned long) dev);
594
 
595
    spin_lock(&mchdev_lock);
596
    i915_mch_dev = dev_priv;
597
    dev_priv->mchdev_lock = &mchdev_lock;
598
    spin_unlock(&mchdev_lock);
599
 
600
//    ips_ping_for_i915_load();
601
 
602
    LEAVE();
603
 
604
    return 0;
605
 
606
out_gem_unload:
607
//    if (dev_priv->mm.inactive_shrinker.shrink)
608
//        unregister_shrinker(&dev_priv->mm.inactive_shrinker);
609
 
610
//    if (dev->pdev->msi_enabled)
611
//        pci_disable_msi(dev->pdev);
612
 
613
//    intel_teardown_gmbus(dev);
614
//    intel_teardown_mchbar(dev);
615
//    destroy_workqueue(dev_priv->wq);
616
out_mtrrfree:
617
//    if (dev_priv->mm.gtt_mtrr >= 0) {
618
//        mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
619
//             dev->agp->agp_info.aper_size * 1024 * 1024);
620
//        dev_priv->mm.gtt_mtrr = -1;
621
//    }
622
//    io_mapping_free(dev_priv->mm.gtt_mapping);
623
 
624
out_rmmap:
625
    pci_iounmap(dev->pdev, dev_priv->regs);
626
put_bridge:
627
//    pci_dev_put(dev_priv->bridge_dev);
628
free_priv:
629
    kfree(dev_priv);
630
    return ret;
631
}
632