Subversion Repositories Kolibri OS

Rev

Rev 1182 | Rev 1222 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1182 Rev 1221
1
/*
1
/*
2
 * Copyright © 2007 David Airlie
2
 * Copyright © 2007 David Airlie
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
21
 * DEALINGS IN THE SOFTWARE.
22
 *
22
 *
23
 * Authors:
23
 * Authors:
24
 *     David Airlie
24
 *     David Airlie
25
 */
25
 */
26
    /*
26
    /*
27
     *  Modularization
27
     *  Modularization
28
     */
28
     */
29
 
29
 
30
#include 
30
#include 
31
#include 
31
#include 
32
 
32
 
33
#include "drmP.h"
33
#include "drmP.h"
34
#include "drm.h"
34
#include "drm.h"
35
#include "drm_crtc.h"
35
#include "drm_crtc.h"
36
#include "drm_crtc_helper.h"
36
#include "drm_crtc_helper.h"
37
#include "radeon_drm.h"
37
#include "radeon_drm.h"
38
#include "radeon.h"
38
#include "radeon.h"
39
 
39
 
40
#include "drm_fb_helper.h"
40
#include "drm_fb_helper.h"
41
 
41
 
42
#include 
42
#include 
43
#include "radeon_object.h"
43
#include "radeon_object.h"
44
 
44
 
45
struct fb_info *framebuffer_alloc(size_t size);
45
struct fb_info *framebuffer_alloc(size_t size, void *dev);
46
 
46
 
47
struct radeon_fb_device {
47
struct radeon_fb_device {
48
    struct drm_fb_helper        helper;
48
    struct drm_fb_helper        helper;
49
	struct radeon_framebuffer	*rfb;
49
	struct radeon_framebuffer	*rfb;
50
	struct radeon_device		*rdev;
50
	struct radeon_device		*rdev;
51
};
51
};
52
 
52
 
53
static struct fb_ops radeonfb_ops = {
53
static struct fb_ops radeonfb_ops = {
54
//   .owner = THIS_MODULE,
54
//   .owner = THIS_MODULE,
55
	.fb_check_var = drm_fb_helper_check_var,
55
	.fb_check_var = drm_fb_helper_check_var,
56
	.fb_set_par = drm_fb_helper_set_par,
56
	.fb_set_par = drm_fb_helper_set_par,
57
	.fb_setcolreg = drm_fb_helper_setcolreg,
57
	.fb_setcolreg = drm_fb_helper_setcolreg,
58
//	.fb_fillrect = cfb_fillrect,
58
//	.fb_fillrect = cfb_fillrect,
59
//	.fb_copyarea = cfb_copyarea,
59
//	.fb_copyarea = cfb_copyarea,
60
//	.fb_imageblit = cfb_imageblit,
60
//	.fb_imageblit = cfb_imageblit,
61
//	.fb_pan_display = drm_fb_helper_pan_display,
61
//	.fb_pan_display = drm_fb_helper_pan_display,
62
	.fb_blank = drm_fb_helper_blank,
62
	.fb_blank = drm_fb_helper_blank,
-
 
63
	.fb_setcmap = drm_fb_helper_setcmap,
63
};
64
};
64
 
65
 
65
/**
66
/**
66
 * Curretly it is assumed that the old framebuffer is reused.
67
 * Curretly it is assumed that the old framebuffer is reused.
67
 *
68
 *
68
 * LOCKING
69
 * LOCKING
69
 * caller should hold the mode config lock.
70
 * caller should hold the mode config lock.
70
 *
71
 *
71
 */
72
 */
72
int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
73
int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
73
{
74
{
74
	struct fb_info *info;
75
	struct fb_info *info;
75
	struct drm_framebuffer *fb;
76
	struct drm_framebuffer *fb;
76
	struct drm_display_mode *mode = crtc->desired_mode;
77
	struct drm_display_mode *mode = crtc->desired_mode;
77
 
78
 
78
	fb = crtc->fb;
79
	fb = crtc->fb;
79
	if (fb == NULL) {
80
	if (fb == NULL) {
80
		return 1;
81
		return 1;
81
	}
82
	}
82
	info = fb->fbdev;
83
	info = fb->fbdev;
83
	if (info == NULL) {
84
	if (info == NULL) {
84
		return 1;
85
		return 1;
85
	}
86
	}
86
	if (mode == NULL) {
87
	if (mode == NULL) {
87
		return 1;
88
		return 1;
88
	}
89
	}
89
	info->var.xres = mode->hdisplay;
90
	info->var.xres = mode->hdisplay;
90
	info->var.right_margin = mode->hsync_start - mode->hdisplay;
91
	info->var.right_margin = mode->hsync_start - mode->hdisplay;
91
	info->var.hsync_len = mode->hsync_end - mode->hsync_start;
92
	info->var.hsync_len = mode->hsync_end - mode->hsync_start;
92
	info->var.left_margin = mode->htotal - mode->hsync_end;
93
	info->var.left_margin = mode->htotal - mode->hsync_end;
93
	info->var.yres = mode->vdisplay;
94
	info->var.yres = mode->vdisplay;
94
	info->var.lower_margin = mode->vsync_start - mode->vdisplay;
95
	info->var.lower_margin = mode->vsync_start - mode->vdisplay;
95
	info->var.vsync_len = mode->vsync_end - mode->vsync_start;
96
	info->var.vsync_len = mode->vsync_end - mode->vsync_start;
96
	info->var.upper_margin = mode->vtotal - mode->vsync_end;
97
	info->var.upper_margin = mode->vtotal - mode->vsync_end;
97
	info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
98
	info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
98
	/* avoid overflow */
99
	/* avoid overflow */
99
	info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
100
	info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
100
 
101
 
101
	return 0;
102
	return 0;
102
}
103
}
103
EXPORT_SYMBOL(radeonfb_resize);
104
EXPORT_SYMBOL(radeonfb_resize);
104
 
105
 
105
static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
106
static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
106
{
107
{
107
	int aligned = width;
108
	int aligned = width;
108
	int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
109
	int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
109
	int pitch_mask = 0;
110
	int pitch_mask = 0;
110
 
111
 
111
	switch (bpp / 8) {
112
	switch (bpp / 8) {
112
	case 1:
113
	case 1:
113
		pitch_mask = align_large ? 255 : 127;
114
		pitch_mask = align_large ? 255 : 127;
114
		break;
115
		break;
115
	case 2:
116
	case 2:
116
		pitch_mask = align_large ? 127 : 31;
117
		pitch_mask = align_large ? 127 : 31;
117
		break;
118
		break;
118
	case 3:
119
	case 3:
119
	case 4:
120
	case 4:
120
		pitch_mask = align_large ? 63 : 15;
121
		pitch_mask = align_large ? 63 : 15;
121
		break;
122
		break;
122
	}
123
	}
123
 
124
 
124
	aligned += pitch_mask;
125
	aligned += pitch_mask;
125
	aligned &= ~pitch_mask;
126
	aligned &= ~pitch_mask;
126
	return aligned;
127
	return aligned;
127
}
128
}
128
 
129
 
129
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
130
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
130
	.gamma_set = radeon_crtc_fb_gamma_set,
131
	.gamma_set = radeon_crtc_fb_gamma_set,
-
 
132
	.gamma_get = radeon_crtc_fb_gamma_get,
131
};
133
};
132
 
134
 
133
int radeonfb_create(struct drm_device *dev,
135
int radeonfb_create(struct drm_device *dev,
134
		    uint32_t fb_width, uint32_t fb_height,
136
		    uint32_t fb_width, uint32_t fb_height,
135
		    uint32_t surface_width, uint32_t surface_height,
137
		    uint32_t surface_width, uint32_t surface_height,
-
 
138
		    uint32_t surface_depth, uint32_t surface_bpp,
136
		    struct drm_framebuffer **fb_p)
139
		    struct drm_framebuffer **fb_p)
137
{
140
{
138
	struct radeon_device *rdev = dev->dev_private;
141
	struct radeon_device *rdev = dev->dev_private;
139
	struct fb_info *info;
142
	struct fb_info *info;
140
	struct radeon_fb_device *rfbdev;
143
	struct radeon_fb_device *rfbdev;
141
	struct drm_framebuffer *fb = NULL;
144
	struct drm_framebuffer *fb = NULL;
142
	struct radeon_framebuffer *rfb;
145
	struct radeon_framebuffer *rfb;
143
	struct drm_mode_fb_cmd mode_cmd;
146
	struct drm_mode_fb_cmd mode_cmd;
144
	struct drm_gem_object *gobj = NULL;
147
	struct drm_gem_object *gobj = NULL;
145
	struct radeon_object *robj = NULL;
148
	struct radeon_object *robj = NULL;
146
//   struct device *device = &rdev->pdev->dev;
149
    void   *device = NULL; //&rdev->pdev->dev;
147
	int size, aligned_size, ret;
150
	int size, aligned_size, ret;
148
	u64 fb_gpuaddr;
151
	u64 fb_gpuaddr;
149
	void *fbptr = NULL;
152
	void *fbptr = NULL;
150
	unsigned long tmp;
153
	unsigned long tmp;
151
	bool fb_tiled = false; /* useful for testing */
154
	bool fb_tiled = false; /* useful for testing */
152
	u32 tiling_flags = 0;
155
	u32 tiling_flags = 0;
-
 
156
	int crtc_count;
153
 
157
 
154
    mode_cmd.width  = surface_width;
158
    mode_cmd.width  = surface_width;
155
	mode_cmd.height = surface_height;
159
	mode_cmd.height = surface_height;
-
 
160
 
-
 
161
	/* avivo can't scanout real 24bpp */
-
 
162
	if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
-
 
163
		surface_bpp = 32;
-
 
164
 
156
	mode_cmd.bpp = 32;
165
	mode_cmd.bpp = 32;
157
	/* need to align pitch with crtc limits */
166
	/* need to align pitch with crtc limits */
158
	mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
167
	mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
159
	mode_cmd.depth = 24;
168
	mode_cmd.depth = surface_depth;
160
 
169
 
161
	size = mode_cmd.pitch * mode_cmd.height;
170
	size = mode_cmd.pitch * mode_cmd.height;
162
	aligned_size = ALIGN(size, PAGE_SIZE);
171
	aligned_size = ALIGN(size, PAGE_SIZE);
163
 
172
 
164
    ret = radeon_gem_fb_object_create(rdev, aligned_size, 0,
173
    ret = radeon_gem_fb_object_create(rdev, aligned_size, 0,
165
			RADEON_GEM_DOMAIN_VRAM,
174
			RADEON_GEM_DOMAIN_VRAM,
166
            false, 0,
175
            false, 0,
167
			false, &gobj);
176
			false, &gobj);
168
 
-
 
169
	if (ret) {
177
	if (ret) {
170
		printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
178
		printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
171
		       surface_width, surface_height);
179
		       surface_width, surface_height);
172
		ret = -ENOMEM;
180
		ret = -ENOMEM;
173
		goto out;
181
		goto out;
174
	}
182
	}
175
	robj = gobj->driver_private;
183
	robj = gobj->driver_private;
176
 
184
 
177
	mutex_lock(&rdev->ddev->struct_mutex);
185
	mutex_lock(&rdev->ddev->struct_mutex);
178
	fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
186
	fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
179
	if (fb == NULL) {
187
	if (fb == NULL) {
180
		DRM_ERROR("failed to allocate fb.\n");
188
		DRM_ERROR("failed to allocate fb.\n");
181
		ret = -ENOMEM;
189
		ret = -ENOMEM;
182
		goto out_unref;
190
		goto out_unref;
183
	}
191
	}
184
	ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
192
	ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
185
	if (ret) {
193
	if (ret) {
186
		printk(KERN_ERR "failed to pin framebuffer\n");
194
		printk(KERN_ERR "failed to pin framebuffer\n");
187
		ret = -ENOMEM;
195
		ret = -ENOMEM;
188
		goto out_unref;
196
		goto out_unref;
189
	}
197
	}
190
 
198
 
191
    list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
199
    list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
192
 
200
 
193
	*fb_p = fb;
201
	*fb_p = fb;
194
	rfb = to_radeon_framebuffer(fb);
202
	rfb = to_radeon_framebuffer(fb);
195
	rdev->fbdev_rfb = rfb;
203
	rdev->fbdev_rfb = rfb;
196
	rdev->fbdev_robj = robj;
204
	rdev->fbdev_robj = robj;
197
 
205
 
198
	info = framebuffer_alloc(sizeof(struct radeon_fb_device));
206
	info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
199
	if (info == NULL) {
207
	if (info == NULL) {
200
		ret = -ENOMEM;
208
		ret = -ENOMEM;
201
		goto out_unref;
209
		goto out_unref;
202
	}
210
	}
203
 
211
 
204
	rdev->fbdev_info = info;
212
	rdev->fbdev_info = info;
205
	rfbdev = info->par;
213
	rfbdev = info->par;
206
	rfbdev->helper.funcs = &radeon_fb_helper_funcs;
214
	rfbdev->helper.funcs = &radeon_fb_helper_funcs;
207
	rfbdev->helper.dev = dev;
215
	rfbdev->helper.dev = dev;
-
 
216
	if (rdev->flags & RADEON_SINGLE_CRTC)
-
 
217
		crtc_count = 1;
-
 
218
	else
-
 
219
		crtc_count = 2;
208
	ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2,
220
	ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
209
					    RADEONFB_CONN_LIMIT);
221
					    RADEONFB_CONN_LIMIT);
210
	if (ret)
222
	if (ret)
211
		goto out_unref;
223
		goto out_unref;
212
 
224
 
213
//   ret = radeon_object_kmap(robj, &fbptr);
225
//   ret = radeon_object_kmap(robj, &fbptr);
214
//   if (ret) {
226
//   if (ret) {
215
//       goto out_unref;
227
//       goto out_unref;
216
//   }
228
//   }
217
 
229
 
218
 
230
 
219
    fbptr = (void*)0xFE000000; // LFB_BASE
231
    fbptr = (void*)0xFE000000; // LFB_BASE
220
 
232
 
221
	strcpy(info->fix.id, "radeondrmfb");
233
	strcpy(info->fix.id, "radeondrmfb");
222
 
234
 
223
	drm_fb_helper_fill_fix(info, fb->pitch);
235
	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
224
 
236
 
225
	info->flags = FBINFO_DEFAULT;
237
	info->flags = FBINFO_DEFAULT;
226
	info->fbops = &radeonfb_ops;
238
	info->fbops = &radeonfb_ops;
227
 
239
 
228
	tmp = fb_gpuaddr - rdev->mc.vram_location;
240
	tmp = fb_gpuaddr - rdev->mc.vram_location;
229
	info->fix.smem_start = rdev->mc.aper_base + tmp;
241
	info->fix.smem_start = rdev->mc.aper_base + tmp;
230
	info->fix.smem_len = size;
242
	info->fix.smem_len = size;
231
	info->screen_base = fbptr;
243
	info->screen_base = fbptr;
232
	info->screen_size = size;
244
	info->screen_size = size;
233
 
245
 
234
	drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
246
	drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
235
 
247
 
236
	/* setup aperture base/size for vesafb takeover */
248
	/* setup aperture base/size for vesafb takeover */
237
	info->aperture_base = rdev->ddev->mode_config.fb_base;
249
	info->aperture_base = rdev->ddev->mode_config.fb_base;
238
	info->aperture_size = rdev->mc.real_vram_size;
250
	info->aperture_size = rdev->mc.real_vram_size;
239
 
251
 
240
	info->fix.mmio_start = 0;
252
	info->fix.mmio_start = 0;
241
	info->fix.mmio_len = 0;
253
	info->fix.mmio_len = 0;
242
//   info->pixmap.size = 64*1024;
254
//   info->pixmap.size = 64*1024;
243
//   info->pixmap.buf_align = 8;
255
//   info->pixmap.buf_align = 8;
244
//   info->pixmap.access_align = 32;
256
//   info->pixmap.access_align = 32;
245
//   info->pixmap.flags = FB_PIXMAP_SYSTEM;
257
//   info->pixmap.flags = FB_PIXMAP_SYSTEM;
246
//   info->pixmap.scan_align = 1;
258
//   info->pixmap.scan_align = 1;
247
	if (info->screen_base == NULL) {
259
	if (info->screen_base == NULL) {
248
		ret = -ENOSPC;
260
		ret = -ENOSPC;
249
		goto out_unref;
261
		goto out_unref;
250
	}
262
	}
251
	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
263
	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
252
	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
264
	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
253
	DRM_INFO("size %lu\n", (unsigned long)size);
265
	DRM_INFO("size %lu\n", (unsigned long)size);
254
	DRM_INFO("fb depth is %d\n", fb->depth);
266
	DRM_INFO("fb depth is %d\n", fb->depth);
255
	DRM_INFO("   pitch is %d\n", fb->pitch);
267
	DRM_INFO("   pitch is %d\n", fb->pitch);
256
 
268
 
257
    dbgprintf("fb = %x\n", fb);
269
    dbgprintf("fb = %x\n", fb);
258
 
270
 
259
	fb->fbdev = info;
271
	fb->fbdev = info;
260
	rfbdev->rfb = rfb;
272
	rfbdev->rfb = rfb;
261
	rfbdev->rdev = rdev;
273
	rfbdev->rdev = rdev;
262
 
274
 
263
	mutex_unlock(&rdev->ddev->struct_mutex);
275
	mutex_unlock(&rdev->ddev->struct_mutex);
264
	return 0;
276
	return 0;
265
 
277
 
266
out_unref:
278
out_unref:
267
	if (robj) {
279
	if (robj) {
268
//       radeon_object_kunmap(robj);
280
//       radeon_object_kunmap(robj);
269
	}
281
	}
270
	if (fb && ret) {
282
	if (fb && ret) {
271
		list_del(&fb->filp_head);
283
		list_del(&fb->filp_head);
272
 //      drm_gem_object_unreference(gobj);
284
 //      drm_gem_object_unreference(gobj);
273
//       drm_framebuffer_cleanup(fb);
285
//       drm_framebuffer_cleanup(fb);
274
		kfree(fb);
286
		kfree(fb);
275
	}
287
	}
276
//   drm_gem_object_unreference(gobj);
288
//   drm_gem_object_unreference(gobj);
277
   mutex_unlock(&rdev->ddev->struct_mutex);
289
   mutex_unlock(&rdev->ddev->struct_mutex);
278
out:
290
out:
279
	return ret;
291
	return ret;
280
}
292
}
281
 
293
 
282
int radeonfb_probe(struct drm_device *dev)
294
int radeonfb_probe(struct drm_device *dev)
283
{
295
{
284
	int ret;
-
 
285
	ret = drm_fb_helper_single_fb_probe(dev, &radeonfb_create);
296
	return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
286
	return ret;
-
 
287
}
297
}
288
EXPORT_SYMBOL(radeonfb_probe);
-
 
289
 
298
 
290
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
299
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
291
{
300
{
292
	struct fb_info *info;
301
	struct fb_info *info;
293
	struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
302
	struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
294
	struct radeon_object *robj;
303
	struct radeon_object *robj;
295
 
304
 
296
	if (!fb) {
305
	if (!fb) {
297
		return -EINVAL;
306
		return -EINVAL;
298
	}
307
	}
299
	info = fb->fbdev;
308
	info = fb->fbdev;
300
	if (info) {
309
	if (info) {
301
		struct radeon_fb_device *rfbdev = info->par;
310
		struct radeon_fb_device *rfbdev = info->par;
302
		robj = rfb->obj->driver_private;
311
		robj = rfb->obj->driver_private;
303
//       unregister_framebuffer(info);
312
//       unregister_framebuffer(info);
304
//       radeon_object_kunmap(robj);
313
//       radeon_object_kunmap(robj);
305
//       radeon_object_unpin(robj);
314
//       radeon_object_unpin(robj);
306
//       framebuffer_release(info);
315
//       framebuffer_release(info);
307
	}
316
	}
308
 
317
 
309
	printk(KERN_INFO "unregistered panic notifier\n");
318
	printk(KERN_INFO "unregistered panic notifier\n");
310
 
319
 
311
	return 0;
320
	return 0;
312
}
321
}
313
EXPORT_SYMBOL(radeonfb_remove);
322
EXPORT_SYMBOL(radeonfb_remove);
314
 
323
 
315
 
324
 
316
/**
325
/**
317
 * Allocate a GEM object of the specified size with shmfs backing store
326
 * Allocate a GEM object of the specified size with shmfs backing store
318
 */
327
 */
319
struct drm_gem_object *
328
struct drm_gem_object *
320
drm_gem_object_alloc(struct drm_device *dev, size_t size)
329
drm_gem_object_alloc(struct drm_device *dev, size_t size)
321
{
330
{
322
    struct drm_gem_object *obj;
331
    struct drm_gem_object *obj;
323
 
332
 
324
    BUG_ON((size & (PAGE_SIZE - 1)) != 0);
333
    BUG_ON((size & (PAGE_SIZE - 1)) != 0);
325
 
334
 
326
    obj = kzalloc(sizeof(*obj), GFP_KERNEL);
335
    obj = kzalloc(sizeof(*obj), GFP_KERNEL);
327
 
336
 
328
    obj->dev = dev;
337
    obj->dev = dev;
329
//    obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
338
//    obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
330
//    if (IS_ERR(obj->filp)) {
339
//    if (IS_ERR(obj->filp)) {
331
//        kfree(obj);
340
//        kfree(obj);
332
//        return NULL;
341
//        return NULL;
333
//    }
342
//    }
334
 
343
 
335
//    kref_init(&obj->refcount);
344
//    kref_init(&obj->refcount);
336
//    kref_init(&obj->handlecount);
345
//    kref_init(&obj->handlecount);
337
    obj->size = size;
346
    obj->size = size;
338
 
347
 
339
//    if (dev->driver->gem_init_object != NULL &&
348
//    if (dev->driver->gem_init_object != NULL &&
340
//        dev->driver->gem_init_object(obj) != 0) {
349
//        dev->driver->gem_init_object(obj) != 0) {
341
//        fput(obj->filp);
350
//        fput(obj->filp);
342
//        kfree(obj);
351
//        kfree(obj);
343
//        return NULL;
352
//        return NULL;
344
//    }
353
//    }
345
//    atomic_inc(&dev->object_count);
354
//    atomic_inc(&dev->object_count);
346
//    atomic_add(obj->size, &dev->object_memory);
355
//    atomic_add(obj->size, &dev->object_memory);
347
    return obj;
356
    return obj;
348
}
357
}
349
 
358
 
350
 
359
 
351
int radeon_gem_fb_object_create(struct radeon_device *rdev, int size,
360
int radeon_gem_fb_object_create(struct radeon_device *rdev, int size,
352
                 int alignment, int initial_domain,
361
                 int alignment, int initial_domain,
353
                 bool discardable, bool kernel,
362
                 bool discardable, bool kernel,
354
                 bool interruptible,
363
                 bool interruptible,
355
                 struct drm_gem_object **obj)
364
                 struct drm_gem_object **obj)
356
{
365
{
357
    struct drm_gem_object *gobj;
366
    struct drm_gem_object *gobj;
358
    struct radeon_object *robj;
367
    struct radeon_object *robj;
359
 
368
 
360
    *obj = NULL;
369
    *obj = NULL;
361
    gobj = drm_gem_object_alloc(rdev->ddev, size);
370
    gobj = drm_gem_object_alloc(rdev->ddev, size);
362
    if (!gobj) {
371
    if (!gobj) {
363
        return -ENOMEM;
372
        return -ENOMEM;
364
    }
373
    }
365
    /* At least align on page size */
374
    /* At least align on page size */
366
    if (alignment < PAGE_SIZE) {
375
    if (alignment < PAGE_SIZE) {
367
        alignment = PAGE_SIZE;
376
        alignment = PAGE_SIZE;
368
    }
377
    }
369
 
378
 
370
    robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
379
    robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
371
    if (!robj) {
380
    if (!robj) {
372
        DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
381
        DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
373
              size, initial_domain, alignment);
382
              size, initial_domain, alignment);
374
//       mutex_lock(&rdev->ddev->struct_mutex);
383
//       mutex_lock(&rdev->ddev->struct_mutex);
375
//       drm_gem_object_unreference(gobj);
384
//       drm_gem_object_unreference(gobj);
376
//       mutex_unlock(&rdev->ddev->struct_mutex);
385
//       mutex_unlock(&rdev->ddev->struct_mutex);
377
        return -ENOMEM;;
386
        return -ENOMEM;;
378
    }
387
    }
379
    robj->rdev = rdev;
388
    robj->rdev = rdev;
380
    robj->gobj = gobj;
389
    robj->gobj = gobj;
381
    INIT_LIST_HEAD(&robj->list);
390
    INIT_LIST_HEAD(&robj->list);
382
 
391
 
383
    robj->flags = TTM_PL_FLAG_VRAM;
392
    robj->flags = TTM_PL_FLAG_VRAM;
384
 
393
 
385
    struct drm_mm_node *vm_node;
394
    struct drm_mm_node *vm_node;
386
 
395
 
387
    vm_node = kzalloc(sizeof(*vm_node),0);
396
    vm_node = kzalloc(sizeof(*vm_node),0);
388
 
397
 
389
    vm_node->free = 0;
398
    vm_node->free = 0;
390
    vm_node->size = 0x800000 >> 12;
399
    vm_node->size = 0x800000 >> 12;
391
    vm_node->start = 0;
400
    vm_node->start = 0;
392
    vm_node->mm = NULL;
401
    vm_node->mm = NULL;
393
 
402
 
394
    robj->mm_node = vm_node;
403
    robj->mm_node = vm_node;
395
 
404
 
396
    robj->vm_addr = ((uint32_t)robj->mm_node->start);
405
    robj->vm_addr = ((uint32_t)robj->mm_node->start);
397
 
406
 
398
    gobj->driver_private = robj;
407
    gobj->driver_private = robj;
399
    *obj = gobj;
408
    *obj = gobj;
400
    return 0;
409
    return 0;
401
}
410
}
402
 
411
 
403
 
412
 
404
struct fb_info *framebuffer_alloc(size_t size)
413
struct fb_info *framebuffer_alloc(size_t size, void *dev)
405
{
414
{
406
#define BYTES_PER_LONG (BITS_PER_LONG/8)
415
#define BYTES_PER_LONG (BITS_PER_LONG/8)
407
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
416
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
408
        int fb_info_size = sizeof(struct fb_info);
417
        int fb_info_size = sizeof(struct fb_info);
409
        struct fb_info *info;
418
        struct fb_info *info;
410
        char *p;
419
        char *p;
411
 
420
 
412
        if (size)
421
        if (size)
413
                fb_info_size += PADDING;
422
                fb_info_size += PADDING;
414
 
423
 
415
        p = kzalloc(fb_info_size + size, GFP_KERNEL);
424
        p = kzalloc(fb_info_size + size, GFP_KERNEL);
416
 
425
 
417
        if (!p)
426
        if (!p)
418
                return NULL;
427
                return NULL;
419
 
428
 
420
        info = (struct fb_info *) p;
429
        info = (struct fb_info *) p;
421
 
430
 
422
        if (size)
431
        if (size)
423
                info->par = p + fb_info_size;
432
                info->par = p + fb_info_size;
424
 
433
 
425
        return info;
434
        return info;
426
#undef PADDING
435
#undef PADDING
427
#undef BYTES_PER_LONG
436
#undef BYTES_PER_LONG
428
}
437
}
429
 
438
 
430
static char *manufacturer_name(unsigned char *x)
439
static char *manufacturer_name(unsigned char *x)
431
{
440
{
432
    static char name[4];
441
    static char name[4];
433
 
442
 
434
    name[0] = ((x[0] & 0x7C) >> 2) + '@';
443
    name[0] = ((x[0] & 0x7C) >> 2) + '@';
435
    name[1] = ((x[0] & 0x03) << 3) + ((x[1] & 0xE0) >> 5) + '@';
444
    name[1] = ((x[0] & 0x03) << 3) + ((x[1] & 0xE0) >> 5) + '@';
436
    name[2] = (x[1] & 0x1F) + '@';
445
    name[2] = (x[1] & 0x1F) + '@';
437
    name[3] = 0;
446
    name[3] = 0;
438
 
447
 
439
    return name;
448
    return name;
440
}
449
}
441
 
450
 
442
 
451
 
443
bool set_mode(struct drm_device *dev, int width, int height)
452
bool set_mode(struct drm_device *dev, int width, int height)
444
{
453
{
445
    struct drm_connector *connector;
454
    struct drm_connector *connector;
446
 
455
 
447
    bool ret = false;
456
    bool ret = false;
-
 
457
 
-
 
458
    ENTER();
448
 
459
 
449
    list_for_each_entry(connector, &dev->mode_config.connector_list, head)
460
    list_for_each_entry(connector, &dev->mode_config.connector_list, head)
450
    {
461
    {
451
        struct drm_display_mode *mode;
462
        struct drm_display_mode *mode;
452
 
463
 
453
        struct drm_encoder  *encoder;
464
        struct drm_encoder  *encoder;
454
        struct drm_crtc     *crtc;
465
        struct drm_crtc     *crtc;
455
 
466
 
456
        if( connector->status != connector_status_connected)
467
        if( connector->status != connector_status_connected)
457
            continue;
468
            continue;
458
 
469
 
459
        encoder = connector->encoder;
470
        encoder = connector->encoder;
460
        if( encoder == NULL)
471
        if( encoder == NULL)
461
            continue;
472
            continue;
462
 
473
 
463
        crtc = encoder->crtc;
474
        crtc = encoder->crtc;
464
 
475
 
465
        if(crtc == NULL)
476
        if(crtc == NULL)
466
            continue;
477
            continue;
467
 
478
 
468
 
479
/*
469
        list_for_each_entry(mode, &connector->modes, head)
480
        list_for_each_entry(mode, &connector->modes, head)
470
        {
481
        {
471
            if (mode->type & DRM_MODE_TYPE_PREFERRED);
482
            if (mode->type & DRM_MODE_TYPE_PREFERRED);
472
                break;
483
                break;
473
        };
484
        };
474
 
-
 
475
/*
485
 
476
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
486
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
477
        struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
487
        struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
478
 
488
 
479
        native_mode->panel_xres = mode->hdisplay;
489
        native_mode->panel_xres = mode->hdisplay;
480
        native_mode->panel_yres = mode->vdisplay;
490
        native_mode->panel_yres = mode->vdisplay;
481
 
491
 
482
        native_mode->hblank = mode->htotal - mode->hdisplay;
492
        native_mode->hblank = mode->htotal - mode->hdisplay;
483
        native_mode->hoverplus = mode->hsync_start - mode->hdisplay;
493
        native_mode->hoverplus = mode->hsync_start - mode->hdisplay;
484
        native_mode->hsync_width = mode->hsync_end - mode->hsync_start;
494
        native_mode->hsync_width = mode->hsync_end - mode->hsync_start;
485
        native_mode->vblank = mode->vtotal - mode->vdisplay;
495
        native_mode->vblank = mode->vtotal - mode->vdisplay;
486
        native_mode->voverplus = mode->vsync_start - mode->vdisplay;
496
        native_mode->voverplus = mode->vsync_start - mode->vdisplay;
487
        native_mode->vsync_width = mode->vsync_end - mode->vsync_start;
497
        native_mode->vsync_width = mode->vsync_end - mode->vsync_start;
488
        native_mode->dotclock = mode->clock;
498
        native_mode->dotclock = mode->clock;
489
        native_mode->flags = mode->flags;
499
        native_mode->flags = mode->flags;
490
*/
500
*/
491
        list_for_each_entry(mode, &connector->modes, head)
501
        list_for_each_entry(mode, &connector->modes, head)
492
        {
502
        {
493
            char *con_name, *enc_name;
503
            char *con_name, *enc_name;
494
 
504
 
495
            struct drm_framebuffer *fb;
505
            struct drm_framebuffer *fb;
496
 
506
 
497
            if (drm_mode_width(mode) == width &&
507
            if (drm_mode_width(mode) == width &&
498
                drm_mode_height(mode) == height)
508
                drm_mode_height(mode) == height)
499
            {
509
            {
500
                char con_edid[128];
510
                char con_edid[128];
501
 
511
 
502
                fb = list_first_entry(&dev->mode_config.fb_kernel_list,
512
                fb = list_first_entry(&dev->mode_config.fb_kernel_list,
503
                                      struct drm_framebuffer, filp_head);
513
                                      struct drm_framebuffer, filp_head);
504
 
514
 
505
                memcpy(con_edid, connector->edid_blob_ptr->data, 128);
515
                memcpy(con_edid, connector->edid_blob_ptr->data, 128);
506
 
516
 
507
                dbgprintf("Manufacturer: %s Model %x Serial Number %u\n",
517
                dbgprintf("Manufacturer: %s Model %x Serial Number %u\n",
508
                manufacturer_name(con_edid + 0x08),
518
                manufacturer_name(con_edid + 0x08),
509
                (unsigned short)(con_edid[0x0A] + (con_edid[0x0B] << 8)),
519
                (unsigned short)(con_edid[0x0A] + (con_edid[0x0B] << 8)),
510
                (unsigned int)(con_edid[0x0C] + (con_edid[0x0D] << 8)
520
                (unsigned int)(con_edid[0x0C] + (con_edid[0x0D] << 8)
511
                    + (con_edid[0x0E] << 16) + (con_edid[0x0F] << 24)));
521
                    + (con_edid[0x0E] << 16) + (con_edid[0x0F] << 24)));
512
 
522
 
513
 
523
 
514
                con_name = drm_get_connector_name(connector);
524
                con_name = drm_get_connector_name(connector);
515
                enc_name = drm_get_encoder_name(encoder);
525
                enc_name = drm_get_encoder_name(encoder);
516
 
526
 
517
                dbgprintf("set mode %d %d connector %s encoder %s\n",
527
                dbgprintf("set mode %d %d connector %s encoder %s\n",
518
                           width, height, con_name, enc_name);
528
                           width, height, con_name, enc_name);
519
 
529
 
520
                fb->width = width;
530
                fb->width = width;
521
                fb->height = height;
531
                fb->height = height;
522
                fb->pitch = radeon_align_pitch(dev->dev_private, width, 32, false) * ((32 + 1) / 8);
532
                fb->pitch = radeon_align_pitch(dev->dev_private, width, 32, false) * ((32 + 1) / 8);
523
 
533
 
524
                crtc->fb = fb;
534
                crtc->fb = fb;
525
 
535
 
526
                ret = drm_crtc_helper_set_mode(crtc, mode, 0, 0, fb);
536
                ret = drm_crtc_helper_set_mode(crtc, mode, 0, 0, fb);
527
 
537
 
528
                sysSetScreen(fb->width, fb->height, fb->pitch);
538
                sysSetScreen(fb->width, fb->height, fb->pitch);
529
 
539
 
530
                if (ret == true)
540
                if (ret == true)
531
                {
541
                {
532
                    dbgprintf("new mode %d %d pitch %d\n",fb->width, fb->height, fb->pitch);
542
                    dbgprintf("new mode %d %d pitch %d\n",fb->width, fb->height, fb->pitch);
533
                }
543
                }
534
                else
544
                else
535
                {
545
                {
536
                    DRM_ERROR("failed to set mode %d_%d on crtc %p\n",
546
                    DRM_ERROR("failed to set mode %d_%d on crtc %p\n",
537
                               fb->width, fb->height, crtc);
547
                               fb->width, fb->height, crtc);
538
                };
548
                };
-
 
549
 
-
 
550
                LEAVE();
-
 
551
 
-
 
552
                return ret;
539
            };
553
            };
540
        }
554
        }
541
    };
555
    };
542
 
556
    LEAVE();
543
    return ret;
557
    return ret;
544
};
558
};