Subversion Repositories Kolibri OS

Rev

Rev 2005 | Rev 2997 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2005 Rev 2007
1
#include 
1
#include 
2
#include 
2
#include 
3
#include "radeon_drm.h"
3
#include "radeon_drm.h"
4
#include "radeon.h"
4
#include "radeon.h"
5
 
5
 
6
 
6
 
7
static struct drm_mm   mm_gtt;
7
static struct drm_mm   mm_gtt;
8
static struct drm_mm   mm_vram;
8
static struct drm_mm   mm_vram;
9
 
9
 
10
 
10
 
11
/**
11
/**
12
 * Initialize an already allocate GEM object of the specified size with
12
 * Initialize an already allocate GEM object of the specified size with
13
 * shmfs backing store.
13
 * shmfs backing store.
14
 */
14
 */
15
int drm_gem_object_init(struct drm_device *dev,
15
int drm_gem_object_init(struct drm_device *dev,
16
            struct drm_gem_object *obj, size_t size)
16
            struct drm_gem_object *obj, size_t size)
17
{
17
{
18
    BUG_ON((size & (PAGE_SIZE - 1)) != 0);
18
    BUG_ON((size & (PAGE_SIZE - 1)) != 0);
19
 
19
 
20
    obj->dev = dev;
20
    obj->dev = dev;
21
    obj->filp = NULL;
21
    obj->filp = NULL;
22
 
22
 
23
    atomic_set(&obj->handle_count, 0);
23
    atomic_set(&obj->handle_count, 0);
24
    obj->size = size;
24
    obj->size = size;
25
 
25
 
26
    return 0;
26
    return 0;
27
}
27
}
28
 
28
 
29
 
29
 
30
int drm_mm_alloc(struct drm_mm *mm, size_t num_pages,
30
int drm_mm_alloc(struct drm_mm *mm, size_t num_pages,
31
                 struct drm_mm_node **node)
31
                 struct drm_mm_node **node)
32
{
32
{
33
    struct drm_mm_node *vm_node;
33
    struct drm_mm_node *vm_node;
34
    int    r;
34
    int    r;
35
 
35
 
36
retry_pre_get:
36
retry_pre_get:
37
 
37
 
38
    r = drm_mm_pre_get(mm);
38
    r = drm_mm_pre_get(mm);
39
 
39
 
40
    if (unlikely(r != 0))
40
    if (unlikely(r != 0))
41
       return r;
41
       return r;
42
 
42
 
43
    vm_node = drm_mm_search_free(mm, num_pages, 0, 0);
43
    vm_node = drm_mm_search_free(mm, num_pages, 0, 0);
44
 
44
 
45
    if (unlikely(vm_node == NULL)) {
45
    if (unlikely(vm_node == NULL)) {
46
        r = -ENOMEM;
46
        r = -ENOMEM;
47
        return r;
47
        return r;
48
    }
48
    }
49
 
49
 
50
    *node =  drm_mm_get_block_atomic(vm_node, num_pages, 0);
50
    *node =  drm_mm_get_block_atomic(vm_node, num_pages, 0);
51
 
51
 
52
    if (unlikely(*node == NULL)) {
52
    if (unlikely(*node == NULL)) {
53
            goto retry_pre_get;
53
            goto retry_pre_get;
54
    }
54
    }
55
 
55
 
56
    return 0;
56
    return 0;
57
};
57
};
58
 
58
 
59
 
59
 
60
 
60
 
61
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
61
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
62
{
62
{
63
    u32 c = 0;
63
    u32 c = 0;
64
 
64
 
65
    rbo->placement.fpfn = 0;
65
    rbo->placement.fpfn = 0;
66
    rbo->placement.lpfn = 0;
66
    rbo->placement.lpfn = 0;
67
    rbo->placement.placement = rbo->placements;
67
    rbo->placement.placement = rbo->placements;
68
    rbo->placement.busy_placement = rbo->placements;
68
    rbo->placement.busy_placement = rbo->placements;
69
    if (domain & RADEON_GEM_DOMAIN_VRAM)
69
    if (domain & RADEON_GEM_DOMAIN_VRAM)
70
        rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
70
        rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
71
                    TTM_PL_FLAG_VRAM;
71
                    TTM_PL_FLAG_VRAM;
72
    if (domain & RADEON_GEM_DOMAIN_GTT)
72
    if (domain & RADEON_GEM_DOMAIN_GTT)
73
        rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
73
        rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
74
    if (domain & RADEON_GEM_DOMAIN_CPU)
74
    if (domain & RADEON_GEM_DOMAIN_CPU)
75
        rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
75
        rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
76
    if (!c)
76
    if (!c)
77
        rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
77
        rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
78
    rbo->placement.num_placement = c;
78
    rbo->placement.num_placement = c;
79
    rbo->placement.num_busy_placement = c;
79
    rbo->placement.num_busy_placement = c;
80
}
80
}
81
 
81
 
82
 
82
 
83
int radeon_bo_init(struct radeon_device *rdev)
83
int radeon_bo_init(struct radeon_device *rdev)
84
{
84
{
85
    int r;
85
    int r;
86
 
86
 
87
    DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
87
    DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
88
        rdev->mc.mc_vram_size >> 20,
88
        rdev->mc.mc_vram_size >> 20,
89
        (unsigned long long)rdev->mc.aper_size >> 20);
89
        (unsigned long long)rdev->mc.aper_size >> 20);
90
    DRM_INFO("RAM width %dbits %cDR\n",
90
    DRM_INFO("RAM width %dbits %cDR\n",
91
            rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
91
            rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
92
 
92
 
93
    r = drm_mm_init(&mm_vram, 0xC00000 >> PAGE_SHIFT,
93
    r = drm_mm_init(&mm_vram, 0xC00000 >> PAGE_SHIFT,
94
               ((rdev->mc.real_vram_size - 0xC00000) >> PAGE_SHIFT));
94
               ((rdev->mc.real_vram_size - 0xC00000) >> PAGE_SHIFT));
95
    if (r) {
95
    if (r) {
96
        DRM_ERROR("Failed initializing VRAM heap.\n");
96
        DRM_ERROR("Failed initializing VRAM heap.\n");
97
        return r;
97
        return r;
98
    };
98
    };
99
 
99
 
100
    r = drm_mm_init(&mm_gtt, 0, rdev->mc.gtt_size >> PAGE_SHIFT);
100
    r = drm_mm_init(&mm_gtt, 0, rdev->mc.gtt_size >> PAGE_SHIFT);
101
    if (r) {
101
    if (r) {
102
        DRM_ERROR("Failed initializing GTT heap.\n");
102
        DRM_ERROR("Failed initializing GTT heap.\n");
103
        return r;
103
        return r;
104
    }
104
    }
105
 
105
 
106
    return 0;
106
    return 0;
107
}
107
}
108
 
108
 
109
 
109
 
110
int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
110
int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
111
{
111
{
112
    int r;
112
    int r;
113
 
113
 
114
    bo->tbo.reserved.counter = 1;
114
    bo->tbo.reserved.counter = 1;
115
 
115
 
116
    return 0;
116
    return 0;
117
}
117
}
118
 
118
 
119
void ttm_bo_unreserve(struct ttm_buffer_object *bo)
119
void ttm_bo_unreserve(struct ttm_buffer_object *bo)
120
{
120
{
121
    bo->reserved.counter = 1;
121
    bo->reserved.counter = 1;
122
}
122
}
123
 
123
 
124
int radeon_bo_create(struct radeon_device *rdev,
124
int radeon_bo_create(struct radeon_device *rdev,
125
		     unsigned long size, int byte_align, bool kernel, u32 domain,
125
		     unsigned long size, int byte_align, bool kernel, u32 domain,
126
                struct radeon_bo **bo_ptr)
126
                struct radeon_bo **bo_ptr)
127
{
127
{
128
	struct radeon_bo *bo;
128
	struct radeon_bo *bo;
129
    enum ttm_bo_type type;
129
    enum ttm_bo_type type;
130
 
130
 
131
    size_t num_pages;
131
    size_t num_pages;
132
    struct drm_mm      *mman;
132
    struct drm_mm      *mman;
133
    u32                 bo_domain;
133
    u32                 bo_domain;
134
    int r;
134
    int r;
135
 
135
 
136
    num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
136
    num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
137
 
137
 
138
    size = num_pages << PAGE_SHIFT;
138
    size = num_pages << PAGE_SHIFT;
139
 
139
 
140
    if (num_pages == 0) {
140
    if (num_pages == 0) {
141
        dbgprintf("Illegal buffer object size.\n");
141
        dbgprintf("Illegal buffer object size.\n");
142
        return -EINVAL;
142
        return -EINVAL;
143
    }
143
    }
144
 
144
 
145
    if(domain & RADEON_GEM_DOMAIN_VRAM)
145
    if(domain & RADEON_GEM_DOMAIN_VRAM)
146
    {
146
    {
147
        mman = &mm_vram;
147
        mman = &mm_vram;
148
        bo_domain = RADEON_GEM_DOMAIN_VRAM;
148
        bo_domain = RADEON_GEM_DOMAIN_VRAM;
149
    }
149
    }
150
    else if(domain & RADEON_GEM_DOMAIN_GTT)
150
    else if(domain & RADEON_GEM_DOMAIN_GTT)
151
    {
151
    {
152
        mman = &mm_gtt;
152
        mman = &mm_gtt;
153
        bo_domain = RADEON_GEM_DOMAIN_GTT;
153
        bo_domain = RADEON_GEM_DOMAIN_GTT;
154
    }
154
    }
155
    else return -EINVAL;
155
    else return -EINVAL;
156
 
156
 
157
    if (kernel) {
157
    if (kernel) {
158
        type = ttm_bo_type_kernel;
158
        type = ttm_bo_type_kernel;
159
    } else {
159
    } else {
160
        type = ttm_bo_type_device;
160
        type = ttm_bo_type_device;
161
    }
161
    }
162
    *bo_ptr = NULL;
162
    *bo_ptr = NULL;
163
    bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
163
    bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
164
    if (bo == NULL)
164
    if (bo == NULL)
165
        return -ENOMEM;
165
        return -ENOMEM;
166
 
166
 
167
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
167
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
168
    if (unlikely(r)) {
168
    if (unlikely(r)) {
169
		kfree(bo);
169
		kfree(bo);
170
		return r;
170
		return r;
171
	}
171
	}
172
    bo->rdev = rdev;
172
    bo->rdev = rdev;
173
	bo->gem_base.driver_private = NULL;
173
	bo->gem_base.driver_private = NULL;
174
    bo->surface_reg = -1;
174
    bo->surface_reg = -1;
175
    bo->tbo.num_pages = num_pages;
175
    bo->tbo.num_pages = num_pages;
176
    bo->domain = domain;
176
    bo->domain = domain;
177
 
177
 
178
    INIT_LIST_HEAD(&bo->list);
178
    INIT_LIST_HEAD(&bo->list);
179
 
179
 
180
//    radeon_ttm_placement_from_domain(bo, domain);
180
//    radeon_ttm_placement_from_domain(bo, domain);
181
    /* Kernel allocation are uninterruptible */
181
    /* Kernel allocation are uninterruptible */
182
 
182
 
183
    r = drm_mm_alloc(mman, num_pages, &bo->tbo.vm_node);
183
    r = drm_mm_alloc(mman, num_pages, &bo->tbo.vm_node);
184
    if (unlikely(r != 0))
184
    if (unlikely(r != 0))
185
        return r;
185
        return r;
186
 
186
 
187
    *bo_ptr = bo;
187
    *bo_ptr = bo;
188
 
188
 
189
    return 0;
189
    return 0;
190
}
190
}
191
 
191
 
192
#define page_tabs  0xFDC00000      /* just another hack */
192
#define page_tabs  0xFDC00000      /* just another hack */
193
 
193
 
194
int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
194
int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
195
{
195
{
196
    int r=0, i;
196
    int r=0, i;
197
 
197
 
198
    if (bo->pin_count) {
198
    if (bo->pin_count) {
199
        bo->pin_count++;
199
        bo->pin_count++;
200
        if (gpu_addr)
200
        if (gpu_addr)
201
            *gpu_addr = radeon_bo_gpu_offset(bo);
201
            *gpu_addr = radeon_bo_gpu_offset(bo);
202
        return 0;
202
        return 0;
203
    }
203
    }
204
 
204
 
205
    bo->tbo.offset = bo->tbo.vm_node->start << PAGE_SHIFT;
205
    bo->tbo.offset = bo->tbo.vm_node->start << PAGE_SHIFT;
206
 
206
 
207
    if(bo->domain & RADEON_GEM_DOMAIN_VRAM)
207
    if(bo->domain & RADEON_GEM_DOMAIN_VRAM)
208
    {
208
    {
209
        bo->tbo.offset += (u64)bo->rdev->mc.vram_start;
209
        bo->tbo.offset += (u64)bo->rdev->mc.vram_start;
210
    }
210
    }
211
    else if (bo->domain & RADEON_GEM_DOMAIN_GTT)
211
    else if (bo->domain & RADEON_GEM_DOMAIN_GTT)
212
    {
212
    {
213
        u32_t *pagelist;
213
        u32_t *pagelist;
214
        bo->kptr  = KernelAlloc( bo->tbo.num_pages << PAGE_SHIFT );
214
        bo->kptr  = KernelAlloc( bo->tbo.num_pages << PAGE_SHIFT );
215
        dbgprintf("kernel alloc %x\n", bo->kptr );
215
        dbgprintf("kernel alloc %x\n", bo->kptr );
216
 
216
 
217
        pagelist =  &((u32_t*)page_tabs)[(u32_t)bo->kptr >> 12];
217
        pagelist =  &((u32_t*)page_tabs)[(u32_t)bo->kptr >> 12];
218
        dbgprintf("pagelist %x\n", pagelist);
218
        dbgprintf("pagelist %x\n", pagelist);
219
        radeon_gart_bind(bo->rdev, bo->tbo.offset,
219
        radeon_gart_bind(bo->rdev, bo->tbo.offset,
220
                         bo->tbo.vm_node->size,  pagelist);
220
                         bo->tbo.vm_node->size,  pagelist);
221
        bo->tbo.offset += (u64)bo->rdev->mc.gtt_start;
221
        bo->tbo.offset += (u64)bo->rdev->mc.gtt_start;
222
    }
222
    }
223
    else
223
    else
224
    {
224
    {
225
        DRM_ERROR("Unknown placement %x\n", bo->domain);
225
        DRM_ERROR("Unknown placement %x\n", bo->domain);
226
        bo->tbo.offset = -1;
226
        bo->tbo.offset = -1;
227
        r = -1;
227
        r = -1;
228
    };
228
    };
229
 
229
 
230
    if (unlikely(r != 0)) {
230
    if (unlikely(r != 0)) {
231
        DRM_ERROR("radeon: failed to pin object.\n");
231
        DRM_ERROR("radeon: failed to pin object.\n");
232
    }
232
    }
233
 
233
 
234
    if (likely(r == 0)) {
234
    if (likely(r == 0)) {
235
        bo->pin_count = 1;
235
        bo->pin_count = 1;
236
        if (gpu_addr != NULL)
236
        if (gpu_addr != NULL)
237
            *gpu_addr = radeon_bo_gpu_offset(bo);
237
            *gpu_addr = radeon_bo_gpu_offset(bo);
238
    }
238
    }
239
 
239
 
240
    if (unlikely(r != 0))
240
    if (unlikely(r != 0))
241
        dev_err(bo->rdev->dev, "%p pin failed\n", bo);
241
        dev_err(bo->rdev->dev, "%p pin failed\n", bo);
242
    return r;
242
    return r;
243
};
243
};
244
 
244
 
245
int radeon_bo_unpin(struct radeon_bo *bo)
245
int radeon_bo_unpin(struct radeon_bo *bo)
246
{
246
{
247
    int r = 0;
247
    int r = 0;
248
 
248
 
249
    if (!bo->pin_count) {
249
    if (!bo->pin_count) {
250
        dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
250
        dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
251
        return 0;
251
        return 0;
252
    }
252
    }
253
    bo->pin_count--;
253
    bo->pin_count--;
254
    if (bo->pin_count)
254
    if (bo->pin_count)
255
        return 0;
255
        return 0;
256
 
256
 
257
    if( bo->tbo.vm_node )
257
    if( bo->tbo.vm_node )
258
    {
258
    {
259
        drm_mm_put_block(bo->tbo.vm_node);
259
        drm_mm_put_block(bo->tbo.vm_node);
260
        bo->tbo.vm_node = NULL;
260
        bo->tbo.vm_node = NULL;
261
    };
261
    };
262
 
262
 
263
    return r;
263
    return r;
264
}
264
}
265
 
265
 
266
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
266
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
267
{
267
{
268
    bool is_iomem;
268
    bool is_iomem;
269
 
269
 
270
    if (bo->kptr) {
270
    if (bo->kptr) {
271
        if (ptr) {
271
        if (ptr) {
272
            *ptr = bo->kptr;
272
            *ptr = bo->kptr;
273
        }
273
        }
274
        return 0;
274
        return 0;
275
    }
275
    }
276
 
276
 
277
    if(bo->domain & RADEON_GEM_DOMAIN_VRAM)
277
    if(bo->domain & RADEON_GEM_DOMAIN_VRAM)
278
    {
278
    {
279
        bo->cpu_addr = bo->rdev->mc.aper_base +
279
        bo->cpu_addr = bo->rdev->mc.aper_base +
280
                       (bo->tbo.vm_node->start << PAGE_SHIFT);
280
                       (bo->tbo.vm_node->start << PAGE_SHIFT);
281
        bo->kptr = (void*)MapIoMem(bo->cpu_addr,
281
        bo->kptr = (void*)MapIoMem(bo->cpu_addr,
282
                        bo->tbo.vm_node->size << 12, PG_SW);
282
                        bo->tbo.vm_node->size << 12, PG_SW);
283
    }
283
    }
284
    else
284
    else
285
    {
285
    {
286
        return -1;
286
        return -1;
287
    }
287
    }
288
 
288
 
289
    if (ptr) {
289
    if (ptr) {
290
        *ptr = bo->kptr;
290
        *ptr = bo->kptr;
291
    }
291
    }
292
 
292
 
293
    return 0;
293
    return 0;
294
}
294
}
295
 
295
 
-
 
296
int radeon_bo_user_map(struct radeon_bo *bo, void **ptr)
-
 
297
{
-
 
298
    bool is_iomem;
-
 
299
 
-
 
300
    if (bo->uptr) {
-
 
301
        if (ptr) {
-
 
302
            *ptr = bo->uptr;
-
 
303
        }
-
 
304
        return 0;
-
 
305
    }
-
 
306
 
-
 
307
    if(bo->domain & RADEON_GEM_DOMAIN_VRAM)
-
 
308
    {
-
 
309
        return -1;
-
 
310
    }
-
 
311
    else
-
 
312
    {
-
 
313
        bo->uptr = UserAlloc(bo->tbo.num_pages << PAGE_SHIFT);
-
 
314
        if(bo->uptr)
-
 
315
        {
-
 
316
            u32_t *src, *dst;
-
 
317
            int count;
-
 
318
            src =  &((u32_t*)page_tabs)[(u32_t)bo->kptr >> 12];
-
 
319
            dst =  &((u32_t*)page_tabs)[(u32_t)bo->uptr >> 12];
-
 
320
            count = bo->tbo.num_pages;
-
 
321
 
-
 
322
            while(count--)
-
 
323
            {
-
 
324
              *dst++ = (0xFFFFF000 & *src++) | 0x207 ; // map as shared page
-
 
325
            };
-
 
326
        }
-
 
327
        else
-
 
328
            return -1;
-
 
329
    }
-
 
330
 
-
 
331
    if (ptr) {
-
 
332
        *ptr = bo->uptr;
-
 
333
    }
-
 
334
 
-
 
335
    return 0;
-
 
336
}
-
 
337
 
296
void radeon_bo_kunmap(struct radeon_bo *bo)
338
void radeon_bo_kunmap(struct radeon_bo *bo)
297
{
339
{
298
    if (bo->kptr == NULL)
340
    if (bo->kptr == NULL)
299
        return;
341
        return;
300
 
342
 
301
    if (bo->domain & RADEON_GEM_DOMAIN_VRAM)
343
    if (bo->domain & RADEON_GEM_DOMAIN_VRAM)
302
    {
344
    {
303
        FreeKernelSpace(bo->kptr);
345
        FreeKernelSpace(bo->kptr);
304
    }
346
    }
305
 
347
 
306
    bo->kptr = NULL;
348
    bo->kptr = NULL;
307
 
349
 
308
}
350
}
309
 
351
 
310
void radeon_bo_unref(struct radeon_bo **bo)
352
void radeon_bo_unref(struct radeon_bo **bo)
311
{
353
{
312
    struct ttm_buffer_object *tbo;
354
    struct ttm_buffer_object *tbo;
313
 
355
 
314
    if ((*bo) == NULL)
356
    if ((*bo) == NULL)
315
        return;
357
        return;
316
 
358
 
317
    *bo = NULL;
359
    *bo = NULL;
318
}
360
}
319
 
361
 
320
 
362
 
321
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
363
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
322
                uint32_t *tiling_flags,
364
                uint32_t *tiling_flags,
323
                uint32_t *pitch)
365
                uint32_t *pitch)
324
{
366
{
325
//    BUG_ON(!atomic_read(&bo->tbo.reserved));
367
//    BUG_ON(!atomic_read(&bo->tbo.reserved));
326
    if (tiling_flags)
368
    if (tiling_flags)
327
        *tiling_flags = bo->tiling_flags;
369
        *tiling_flags = bo->tiling_flags;
328
    if (pitch)
370
    if (pitch)
329
        *pitch = bo->pitch;
371
        *pitch = bo->pitch;
330
}
372
}
331
 
373
 
332
 
374
 
333
/**
375
/**
334
 * Allocate a GEM object of the specified size with shmfs backing store
376
 * Allocate a GEM object of the specified size with shmfs backing store
335
 */
377
 */
336
struct drm_gem_object *
378
struct drm_gem_object *
337
drm_gem_object_alloc(struct drm_device *dev, size_t size)
379
drm_gem_object_alloc(struct drm_device *dev, size_t size)
338
{
380
{
339
    struct drm_gem_object *obj;
381
    struct drm_gem_object *obj;
340
 
382
 
341
    BUG_ON((size & (PAGE_SIZE - 1)) != 0);
383
    BUG_ON((size & (PAGE_SIZE - 1)) != 0);
342
 
384
 
343
    obj = kzalloc(sizeof(*obj), GFP_KERNEL);
385
    obj = kzalloc(sizeof(*obj), GFP_KERNEL);
344
 
386
 
345
    obj->dev = dev;
387
    obj->dev = dev;
346
    obj->size = size;
388
    obj->size = size;
347
    return obj;
389
    return obj;
348
}
390
}
349
 
391
 
350
 
392
 
351
int radeon_fb_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
393
int radeon_fb_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
352
            unsigned long size, bool kernel, u32 domain,
394
            unsigned long size, bool kernel, u32 domain,
353
            struct radeon_bo **bo_ptr)
395
            struct radeon_bo **bo_ptr)
354
{
396
{
355
    enum ttm_bo_type    type;
397
    enum ttm_bo_type    type;
356
 
398
 
357
    struct radeon_bo    *bo;
399
    struct radeon_bo    *bo;
358
    struct drm_mm       *mman;
400
    struct drm_mm       *mman;
359
    struct drm_mm_node  *vm_node;
401
    struct drm_mm_node  *vm_node;
360
 
402
 
361
    size_t  num_pages;
403
    size_t  num_pages;
362
    u32     bo_domain;
404
    u32     bo_domain;
363
    int     r;
405
    int     r;
364
 
406
 
365
    num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
407
    num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
366
 
408
 
367
    if (num_pages == 0) {
409
    if (num_pages == 0) {
368
        dbgprintf("Illegal buffer object size.\n");
410
        dbgprintf("Illegal buffer object size.\n");
369
        return -EINVAL;
411
        return -EINVAL;
370
    }
412
    }
371
 
413
 
372
    if( (domain & RADEON_GEM_DOMAIN_VRAM) !=
414
    if( (domain & RADEON_GEM_DOMAIN_VRAM) !=
373
        RADEON_GEM_DOMAIN_VRAM )
415
        RADEON_GEM_DOMAIN_VRAM )
374
    {
416
    {
375
        return -EINVAL;
417
        return -EINVAL;
376
    };
418
    };
377
 
419
 
378
    if (kernel) {
420
    if (kernel) {
379
        type = ttm_bo_type_kernel;
421
        type = ttm_bo_type_kernel;
380
    } else {
422
    } else {
381
        type = ttm_bo_type_device;
423
        type = ttm_bo_type_device;
382
    }
424
    }
383
    *bo_ptr = NULL;
425
    *bo_ptr = NULL;
384
    bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
426
    bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
385
    if (bo == NULL)
427
    if (bo == NULL)
386
        return -ENOMEM;
428
        return -ENOMEM;
387
 
429
 
388
    bo->rdev = rdev;
430
    bo->rdev = rdev;
389
//    bo->gobj = gobj;
431
//    bo->gobj = gobj;
390
    bo->surface_reg = -1;
432
    bo->surface_reg = -1;
391
    bo->tbo.num_pages = num_pages;
433
    bo->tbo.num_pages = num_pages;
392
    bo->domain = domain;
434
    bo->domain = domain;
393
 
435
 
394
    INIT_LIST_HEAD(&bo->list);
436
    INIT_LIST_HEAD(&bo->list);
395
 
437
 
396
//    radeon_ttm_placement_from_domain(bo, domain);
438
//    radeon_ttm_placement_from_domain(bo, domain);
397
    /* Kernel allocation are uninterruptible */
439
    /* Kernel allocation are uninterruptible */
398
 
440
 
399
    vm_node = kzalloc(sizeof(*vm_node),0);
441
    vm_node = kzalloc(sizeof(*vm_node),0);
400
 
442
 
401
    vm_node->size = 0xC00000 >> 12;
443
    vm_node->size = 0xC00000 >> 12;
402
    vm_node->start = 0;
444
    vm_node->start = 0;
403
    vm_node->mm = NULL;
445
    vm_node->mm = NULL;
404
 
446
 
405
    bo->tbo.vm_node = vm_node;
447
    bo->tbo.vm_node = vm_node;
406
    bo->tbo.offset  = bo->tbo.vm_node->start << PAGE_SHIFT;
448
    bo->tbo.offset  = bo->tbo.vm_node->start << PAGE_SHIFT;
407
    bo->tbo.offset += (u64)bo->rdev->mc.vram_start;
449
    bo->tbo.offset += (u64)bo->rdev->mc.vram_start;
408
    bo->kptr        = (void*)0xFE000000;
450
    bo->kptr        = (void*)0xFE000000;
409
    bo->pin_count   = 1;
451
    bo->pin_count   = 1;
410
 
452
 
411
    *bo_ptr = bo;
453
    *bo_ptr = bo;
412
 
454
 
413
    return 0;
455
    return 0;
414
}
456
}