Subversion Repositories Kolibri OS

Rev

Rev 1430 | Rev 1986 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1404 serge 1
 
2
#include 
3
#include "radeon_drm.h"
4
#include "radeon.h"
5
6
 
7
 
8
static struct drm_mm   mm_vram;
9
10
 
11
                 struct drm_mm_node **node)
12
{
13
    struct drm_mm_node *vm_node;
14
    int    r;
15
16
 
17
18
 
19
20
 
21
       return r;
22
23
 
24
25
 
26
        r = -ENOMEM;
27
        return r;
28
    }
29
30
 
31
32
 
33
            goto retry_pre_get;
34
    }
35
36
 
37
};
38
39
 
40
 
41
{
42
    u32 c = 0;
43
44
 
45
    rbo->placement.lpfn = 0;
46
    rbo->placement.placement = rbo->placements;
47
    rbo->placement.busy_placement = rbo->placements;
48
    if (domain & RADEON_GEM_DOMAIN_VRAM)
49
        rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
50
                    TTM_PL_FLAG_VRAM;
51
    if (domain & RADEON_GEM_DOMAIN_GTT)
52
        rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
53
    if (domain & RADEON_GEM_DOMAIN_CPU)
54
        rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
55
    if (!c)
56
        rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
57
    rbo->placement.num_placement = c;
58
    rbo->placement.num_busy_placement = c;
59
}
60
61
 
62
 
63
{
64
    int r;
65
66
 
67
        rdev->mc.mc_vram_size >> 20,
68
        (unsigned long long)rdev->mc.aper_size >> 20);
69
    DRM_INFO("RAM width %dbits %cDR\n",
70
            rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
71
72
 
73
               ((rdev->mc.real_vram_size - 0xC00000) >> PAGE_SHIFT));
74
    if (r) {
75
        DRM_ERROR("Failed initializing VRAM heap.\n");
76
        return r;
77
    };
78
79
 
80
    if (r) {
81
        DRM_ERROR("Failed initializing GTT heap.\n");
82
        return r;
83
    }
84
85
 
86
}
87
88
 
89
 
90
{
91
    int r;
92
93
 
94
95
 
96
}
97
98
 
99
{
100
    bo->reserved.counter = 1;
101
}
102
103
 
1963 serge 104
                unsigned long size, int byte_align,
105
                bool kernel, u32 domain,
106
                struct radeon_bo **bo_ptr)
107
{
1404 serge 108
    enum ttm_bo_type type;
109
110
 
111
    size_t num_pages;
112
    struct drm_mm      *mman;
113
    u32                 bo_domain;
114
    int r;
115
116
 
117
118
 
119
        dbgprintf("Illegal buffer object size.\n");
120
        return -EINVAL;
121
    }
122
123
 
124
    {
125
        mman = &mm_vram;
126
        bo_domain = RADEON_GEM_DOMAIN_VRAM;
127
    }
128
    else if(domain & RADEON_GEM_DOMAIN_GTT)
129
    {
130
        mman = &mm_gtt;
131
        bo_domain = RADEON_GEM_DOMAIN_GTT;
132
    }
133
    else return -EINVAL;
134
135
 
136
        type = ttm_bo_type_kernel;
137
    } else {
138
        type = ttm_bo_type_device;
139
    }
140
    *bo_ptr = NULL;
141
    bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
142
    if (bo == NULL)
143
        return -ENOMEM;
144
145
 
146
    bo->surface_reg = -1;
147
    bo->tbo.num_pages = num_pages;
148
    bo->domain = domain;
149
150
 
151
152
 
153
    /* Kernel allocation are uninterruptible */
154
155
 
156
    if (unlikely(r != 0))
157
        return r;
158
159
 
160
161
 
162
}
163
164
 
165
166
 
167
{
168
    int r=0, i;
169
170
 
171
        bo->pin_count++;
172
        if (gpu_addr)
173
            *gpu_addr = radeon_bo_gpu_offset(bo);
174
        return 0;
175
    }
176
177
 
178
179
 
180
    {
181
        bo->tbo.offset += (u64)bo->rdev->mc.vram_start;
1430 serge 182
    }
1404 serge 183
    else if (bo->domain & RADEON_GEM_DOMAIN_GTT)
184
    {
185
        u32_t *pagelist;
186
        bo->kptr  = KernelAlloc( bo->tbo.num_pages << PAGE_SHIFT );
187
        dbgprintf("kernel alloc %x\n", bo->kptr );
188
189
 
190
        dbgprintf("pagelist %x\n", pagelist);
191
        radeon_gart_bind(bo->rdev, bo->tbo.offset,
192
                         bo->tbo.vm_node->size,  pagelist);
193
        bo->tbo.offset += (u64)bo->rdev->mc.gtt_start;
1430 serge 194
    }
1404 serge 195
    else
196
    {
197
        DRM_ERROR("Unknown placement %x\n", bo->domain);
198
        bo->tbo.offset = -1;
199
        r = -1;
200
    };
201
202
 
203
        DRM_ERROR("radeon: failed to pin object.\n");
204
    }
205
206
 
207
        bo->pin_count = 1;
208
        if (gpu_addr != NULL)
209
            *gpu_addr = radeon_bo_gpu_offset(bo);
210
    }
211
212
 
213
        dev_err(bo->rdev->dev, "%p pin failed\n", bo);
214
    return r;
215
};
216
217
 
218
{
219
    int r = 0;
220
221
 
222
        dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
223
        return 0;
224
    }
225
    bo->pin_count--;
226
    if (bo->pin_count)
227
        return 0;
228
229
 
230
    {
231
        drm_mm_put_block(bo->tbo.vm_node);
232
        bo->tbo.vm_node = NULL;
233
    };
234
235
 
236
}
237
238
 
239
{
240
    bool is_iomem;
241
242
 
243
        if (ptr) {
244
            *ptr = bo->kptr;
245
        }
246
        return 0;
247
    }
248
249
 
250
    {
251
        bo->cpu_addr = bo->rdev->mc.aper_base +
252
                       (bo->tbo.vm_node->start << PAGE_SHIFT);
253
        bo->kptr = (void*)MapIoMem(bo->cpu_addr,
254
                        bo->tbo.vm_node->size << 12, PG_SW);
255
    }
256
    else
257
    {
258
        return -1;
259
    }
260
261
 
262
        *ptr = bo->kptr;
263
    }
264
265
 
266
}
267
268
 
269
{
270
    if (bo->kptr == NULL)
271
        return;
272
273
 
274
    {
275
        FreeKernelSpace(bo->kptr);
276
    }
277
278
 
279
280
 
281
282
 
283
{
284
    struct ttm_buffer_object *tbo;
285
286
 
287
        return;
288
289
 
290
}
291
292
 
293
 
294
                uint32_t *tiling_flags,
295
                uint32_t *pitch)
296
{
297
//    BUG_ON(!atomic_read(&bo->tbo.reserved));
298
    if (tiling_flags)
299
        *tiling_flags = bo->tiling_flags;
300
    if (pitch)
301
        *pitch = bo->pitch;
302
}
303
304
 
305
 
306
 * Allocate a GEM object of the specified size with shmfs backing store
307
 */
308
struct drm_gem_object *
309
drm_gem_object_alloc(struct drm_device *dev, size_t size)
310
{
311
    struct drm_gem_object *obj;
312
313
 
314
315
 
316
317
 
318
    obj->size = size;
319
    return obj;
320
}
321
322
 
323
 
324
            unsigned long size, bool kernel, u32 domain,
325
            struct radeon_bo **bo_ptr)
326
{
327
    enum ttm_bo_type    type;
328
329
 
330
    struct drm_mm       *mman;
331
    struct drm_mm_node  *vm_node;
332
333
 
334
    u32     bo_domain;
335
    int     r;
336
337
 
338
339
 
340
        dbgprintf("Illegal buffer object size.\n");
341
        return -EINVAL;
342
    }
343
344
 
345
        RADEON_GEM_DOMAIN_VRAM )
346
    {
347
        return -EINVAL;
348
    };
349
350
 
351
        type = ttm_bo_type_kernel;
352
    } else {
353
        type = ttm_bo_type_device;
354
    }
355
    *bo_ptr = NULL;
356
    bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
357
    if (bo == NULL)
358
        return -ENOMEM;
359
360
 
361
//    bo->gobj = gobj;
1963 serge 362
    bo->surface_reg = -1;
1404 serge 363
    bo->tbo.num_pages = num_pages;
364
    bo->domain = domain;
365
366
 
367
368
 
369
    /* Kernel allocation are uninterruptible */
370
371
 
372
373
 
374
    vm_node->start = 0;
375
    vm_node->mm = NULL;
376
377
 
378
    bo->tbo.offset  = bo->tbo.vm_node->start << PAGE_SHIFT;
379
    bo->tbo.offset += (u64)bo->rdev->mc.vram_start;
1430 serge 380
    bo->kptr        = (void*)0xFE000000;
1404 serge 381
    bo->pin_count   = 1;
382
383
 
384
385
 
386
}
387