Rev 1222 | Rev 1233 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1222 | Rev 1230 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2007 David Airlie |
2 | * Copyright © 2007 David Airlie |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. |
21 | * DEALINGS IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * David Airlie |
24 | * David Airlie |
25 | */ |
25 | */ |
26 | /* |
26 | /* |
27 | * Modularization |
27 | * Modularization |
28 | */ |
28 | */ |
29 | 29 | ||
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | 32 | ||
33 | #include "drmP.h" |
33 | #include "drmP.h" |
34 | #include "drm.h" |
34 | #include "drm.h" |
35 | #include "drm_crtc.h" |
35 | #include "drm_crtc.h" |
36 | #include "drm_crtc_helper.h" |
36 | #include "drm_crtc_helper.h" |
37 | #include "radeon_drm.h" |
37 | #include "radeon_drm.h" |
38 | #include "radeon.h" |
38 | #include "radeon.h" |
39 | 39 | ||
40 | #include "drm_fb_helper.h" |
40 | #include "drm_fb_helper.h" |
41 | 41 | ||
42 | #include |
42 | #include |
43 | #include "radeon_object.h" |
43 | #include "radeon_object.h" |
44 | 44 | ||
45 | struct fb_info *framebuffer_alloc(size_t size, void *dev); |
45 | struct fb_info *framebuffer_alloc(size_t size, void *dev); |
46 | 46 | ||
47 | struct radeon_fb_device { |
47 | struct radeon_fb_device { |
48 | struct drm_fb_helper helper; |
48 | struct drm_fb_helper helper; |
49 | struct radeon_framebuffer *rfb; |
49 | struct radeon_framebuffer *rfb; |
50 | struct radeon_device *rdev; |
50 | struct radeon_device *rdev; |
51 | }; |
51 | }; |
52 | 52 | ||
53 | static struct fb_ops radeonfb_ops = { |
53 | static struct fb_ops radeonfb_ops = { |
54 | // .owner = THIS_MODULE, |
54 | // .owner = THIS_MODULE, |
55 | .fb_check_var = drm_fb_helper_check_var, |
55 | .fb_check_var = drm_fb_helper_check_var, |
56 | .fb_set_par = drm_fb_helper_set_par, |
56 | .fb_set_par = drm_fb_helper_set_par, |
57 | .fb_setcolreg = drm_fb_helper_setcolreg, |
57 | .fb_setcolreg = drm_fb_helper_setcolreg, |
58 | // .fb_fillrect = cfb_fillrect, |
58 | // .fb_fillrect = cfb_fillrect, |
59 | // .fb_copyarea = cfb_copyarea, |
59 | // .fb_copyarea = cfb_copyarea, |
60 | // .fb_imageblit = cfb_imageblit, |
60 | // .fb_imageblit = cfb_imageblit, |
61 | // .fb_pan_display = drm_fb_helper_pan_display, |
61 | // .fb_pan_display = drm_fb_helper_pan_display, |
62 | .fb_blank = drm_fb_helper_blank, |
62 | .fb_blank = drm_fb_helper_blank, |
63 | .fb_setcmap = drm_fb_helper_setcmap, |
63 | .fb_setcmap = drm_fb_helper_setcmap, |
64 | }; |
64 | }; |
65 | 65 | ||
66 | /** |
66 | /** |
67 | * Curretly it is assumed that the old framebuffer is reused. |
67 | * Curretly it is assumed that the old framebuffer is reused. |
68 | * |
68 | * |
69 | * LOCKING |
69 | * LOCKING |
70 | * caller should hold the mode config lock. |
70 | * caller should hold the mode config lock. |
71 | * |
71 | * |
72 | */ |
72 | */ |
73 | int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc) |
73 | int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc) |
74 | { |
74 | { |
75 | struct fb_info *info; |
75 | struct fb_info *info; |
76 | struct drm_framebuffer *fb; |
76 | struct drm_framebuffer *fb; |
77 | struct drm_display_mode *mode = crtc->desired_mode; |
77 | struct drm_display_mode *mode = crtc->desired_mode; |
78 | 78 | ||
79 | fb = crtc->fb; |
79 | fb = crtc->fb; |
80 | if (fb == NULL) { |
80 | if (fb == NULL) { |
81 | return 1; |
81 | return 1; |
82 | } |
82 | } |
83 | info = fb->fbdev; |
83 | info = fb->fbdev; |
84 | if (info == NULL) { |
84 | if (info == NULL) { |
85 | return 1; |
85 | return 1; |
86 | } |
86 | } |
87 | if (mode == NULL) { |
87 | if (mode == NULL) { |
88 | return 1; |
88 | return 1; |
89 | } |
89 | } |
90 | info->var.xres = mode->hdisplay; |
90 | info->var.xres = mode->hdisplay; |
91 | info->var.right_margin = mode->hsync_start - mode->hdisplay; |
91 | info->var.right_margin = mode->hsync_start - mode->hdisplay; |
92 | info->var.hsync_len = mode->hsync_end - mode->hsync_start; |
92 | info->var.hsync_len = mode->hsync_end - mode->hsync_start; |
93 | info->var.left_margin = mode->htotal - mode->hsync_end; |
93 | info->var.left_margin = mode->htotal - mode->hsync_end; |
94 | info->var.yres = mode->vdisplay; |
94 | info->var.yres = mode->vdisplay; |
95 | info->var.lower_margin = mode->vsync_start - mode->vdisplay; |
95 | info->var.lower_margin = mode->vsync_start - mode->vdisplay; |
96 | info->var.vsync_len = mode->vsync_end - mode->vsync_start; |
96 | info->var.vsync_len = mode->vsync_end - mode->vsync_start; |
97 | info->var.upper_margin = mode->vtotal - mode->vsync_end; |
97 | info->var.upper_margin = mode->vtotal - mode->vsync_end; |
98 | info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; |
98 | info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; |
99 | /* avoid overflow */ |
99 | /* avoid overflow */ |
100 | info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; |
100 | info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; |
101 | 101 | ||
102 | return 0; |
102 | return 0; |
103 | } |
103 | } |
104 | EXPORT_SYMBOL(radeonfb_resize); |
104 | EXPORT_SYMBOL(radeonfb_resize); |
105 | 105 | ||
106 | static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) |
106 | static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) |
107 | { |
107 | { |
108 | int aligned = width; |
108 | int aligned = width; |
109 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
109 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
110 | int pitch_mask = 0; |
110 | int pitch_mask = 0; |
111 | 111 | ||
112 | switch (bpp / 8) { |
112 | switch (bpp / 8) { |
113 | case 1: |
113 | case 1: |
114 | pitch_mask = align_large ? 255 : 127; |
114 | pitch_mask = align_large ? 255 : 127; |
115 | break; |
115 | break; |
116 | case 2: |
116 | case 2: |
117 | pitch_mask = align_large ? 127 : 31; |
117 | pitch_mask = align_large ? 127 : 31; |
118 | break; |
118 | break; |
119 | case 3: |
119 | case 3: |
120 | case 4: |
120 | case 4: |
121 | pitch_mask = align_large ? 63 : 15; |
121 | pitch_mask = align_large ? 63 : 15; |
122 | break; |
122 | break; |
123 | } |
123 | } |
124 | 124 | ||
125 | aligned += pitch_mask; |
125 | aligned += pitch_mask; |
126 | aligned &= ~pitch_mask; |
126 | aligned &= ~pitch_mask; |
127 | return aligned; |
127 | return aligned; |
128 | } |
128 | } |
129 | 129 | ||
130 | static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { |
130 | static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { |
131 | .gamma_set = radeon_crtc_fb_gamma_set, |
131 | .gamma_set = radeon_crtc_fb_gamma_set, |
132 | .gamma_get = radeon_crtc_fb_gamma_get, |
132 | .gamma_get = radeon_crtc_fb_gamma_get, |
133 | }; |
133 | }; |
134 | 134 | ||
135 | int radeonfb_create(struct drm_device *dev, |
135 | int radeonfb_create(struct drm_device *dev, |
136 | uint32_t fb_width, uint32_t fb_height, |
136 | uint32_t fb_width, uint32_t fb_height, |
137 | uint32_t surface_width, uint32_t surface_height, |
137 | uint32_t surface_width, uint32_t surface_height, |
138 | uint32_t surface_depth, uint32_t surface_bpp, |
138 | uint32_t surface_depth, uint32_t surface_bpp, |
139 | struct drm_framebuffer **fb_p) |
139 | struct drm_framebuffer **fb_p) |
140 | { |
140 | { |
141 | struct radeon_device *rdev = dev->dev_private; |
141 | struct radeon_device *rdev = dev->dev_private; |
142 | struct fb_info *info; |
142 | struct fb_info *info; |
143 | struct radeon_fb_device *rfbdev; |
143 | struct radeon_fb_device *rfbdev; |
144 | struct drm_framebuffer *fb = NULL; |
144 | struct drm_framebuffer *fb = NULL; |
145 | struct radeon_framebuffer *rfb; |
145 | struct radeon_framebuffer *rfb; |
146 | struct drm_mode_fb_cmd mode_cmd; |
146 | struct drm_mode_fb_cmd mode_cmd; |
147 | struct drm_gem_object *gobj = NULL; |
147 | struct drm_gem_object *gobj = NULL; |
148 | struct radeon_object *robj = NULL; |
148 | struct radeon_object *robj = NULL; |
149 | void *device = NULL; //&rdev->pdev->dev; |
149 | void *device = NULL; //&rdev->pdev->dev; |
150 | int size, aligned_size, ret; |
150 | int size, aligned_size, ret; |
151 | u64 fb_gpuaddr; |
151 | u64 fb_gpuaddr; |
152 | void *fbptr = NULL; |
152 | void *fbptr = NULL; |
153 | unsigned long tmp; |
153 | unsigned long tmp; |
154 | bool fb_tiled = false; /* useful for testing */ |
154 | bool fb_tiled = false; /* useful for testing */ |
155 | u32 tiling_flags = 0; |
155 | u32 tiling_flags = 0; |
156 | int crtc_count; |
156 | int crtc_count; |
157 | 157 | ||
158 | mode_cmd.width = surface_width; |
158 | mode_cmd.width = surface_width; |
159 | mode_cmd.height = surface_height; |
159 | mode_cmd.height = surface_height; |
160 | 160 | ||
161 | /* avivo can't scanout real 24bpp */ |
161 | /* avivo can't scanout real 24bpp */ |
162 | if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) |
162 | if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) |
163 | surface_bpp = 32; |
163 | surface_bpp = 32; |
164 | 164 | ||
165 | mode_cmd.bpp = 32; |
165 | mode_cmd.bpp = 32; |
166 | /* need to align pitch with crtc limits */ |
166 | /* need to align pitch with crtc limits */ |
167 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); |
167 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); |
168 | mode_cmd.depth = surface_depth; |
168 | mode_cmd.depth = surface_depth; |
169 | 169 | ||
170 | size = mode_cmd.pitch * mode_cmd.height; |
170 | size = mode_cmd.pitch * mode_cmd.height; |
171 | aligned_size = ALIGN(size, PAGE_SIZE); |
171 | aligned_size = ALIGN(size, PAGE_SIZE); |
172 | 172 | ||
173 | ret = radeon_gem_fb_object_create(rdev, aligned_size, 0, |
173 | ret = radeon_gem_fb_object_create(rdev, aligned_size, 0, |
174 | RADEON_GEM_DOMAIN_VRAM, |
174 | RADEON_GEM_DOMAIN_VRAM, |
175 | false, 0, |
175 | false, 0, |
176 | false, &gobj); |
176 | false, &gobj); |
177 | if (ret) { |
177 | if (ret) { |
178 | printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", |
178 | printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", |
179 | surface_width, surface_height); |
179 | surface_width, surface_height); |
180 | ret = -ENOMEM; |
180 | ret = -ENOMEM; |
181 | goto out; |
181 | goto out; |
182 | } |
182 | } |
183 | robj = gobj->driver_private; |
183 | robj = gobj->driver_private; |
184 | 184 | ||
185 | mutex_lock(&rdev->ddev->struct_mutex); |
185 | mutex_lock(&rdev->ddev->struct_mutex); |
186 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); |
186 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); |
187 | if (fb == NULL) { |
187 | if (fb == NULL) { |
188 | DRM_ERROR("failed to allocate fb.\n"); |
188 | DRM_ERROR("failed to allocate fb.\n"); |
189 | ret = -ENOMEM; |
189 | ret = -ENOMEM; |
190 | goto out_unref; |
190 | goto out_unref; |
191 | } |
191 | } |
192 | ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); |
192 | ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); |
193 | if (ret) { |
193 | if (ret) { |
194 | printk(KERN_ERR "failed to pin framebuffer\n"); |
194 | printk(KERN_ERR "failed to pin framebuffer\n"); |
195 | ret = -ENOMEM; |
195 | ret = -ENOMEM; |
196 | goto out_unref; |
196 | goto out_unref; |
197 | } |
197 | } |
198 | 198 | ||
199 | list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); |
199 | list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); |
200 | 200 | ||
201 | *fb_p = fb; |
201 | *fb_p = fb; |
202 | rfb = to_radeon_framebuffer(fb); |
202 | rfb = to_radeon_framebuffer(fb); |
203 | rdev->fbdev_rfb = rfb; |
203 | rdev->fbdev_rfb = rfb; |
204 | rdev->fbdev_robj = robj; |
204 | rdev->fbdev_robj = robj; |
205 | 205 | ||
206 | info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); |
206 | info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); |
207 | if (info == NULL) { |
207 | if (info == NULL) { |
208 | ret = -ENOMEM; |
208 | ret = -ENOMEM; |
209 | goto out_unref; |
209 | goto out_unref; |
210 | } |
210 | } |
211 | 211 | ||
212 | rdev->fbdev_info = info; |
212 | rdev->fbdev_info = info; |
213 | rfbdev = info->par; |
213 | rfbdev = info->par; |
214 | rfbdev->helper.funcs = &radeon_fb_helper_funcs; |
214 | rfbdev->helper.funcs = &radeon_fb_helper_funcs; |
215 | rfbdev->helper.dev = dev; |
215 | rfbdev->helper.dev = dev; |
216 | if (rdev->flags & RADEON_SINGLE_CRTC) |
216 | if (rdev->flags & RADEON_SINGLE_CRTC) |
217 | crtc_count = 1; |
217 | crtc_count = 1; |
218 | else |
218 | else |
219 | crtc_count = 2; |
219 | crtc_count = 2; |
220 | ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count, |
220 | ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count, |
221 | RADEONFB_CONN_LIMIT); |
221 | RADEONFB_CONN_LIMIT); |
222 | if (ret) |
222 | if (ret) |
223 | goto out_unref; |
223 | goto out_unref; |
224 | 224 | ||
225 | // ret = radeon_object_kmap(robj, &fbptr); |
225 | // ret = radeon_object_kmap(robj, &fbptr); |
226 | // if (ret) { |
226 | // if (ret) { |
227 | // goto out_unref; |
227 | // goto out_unref; |
228 | // } |
228 | // } |
229 | 229 | ||
230 | 230 | ||
231 | fbptr = (void*)0xFE000000; // LFB_BASE |
231 | fbptr = (void*)0xFE000000; // LFB_BASE |
232 | 232 | ||
233 | strcpy(info->fix.id, "radeondrmfb"); |
233 | strcpy(info->fix.id, "radeondrmfb"); |
234 | 234 | ||
235 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
235 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
236 | 236 | ||
237 | info->flags = FBINFO_DEFAULT; |
237 | info->flags = FBINFO_DEFAULT; |
238 | info->fbops = &radeonfb_ops; |
238 | info->fbops = &radeonfb_ops; |
239 | 239 | ||
240 | tmp = fb_gpuaddr - rdev->mc.vram_location; |
240 | tmp = fb_gpuaddr - rdev->mc.vram_location; |
241 | info->fix.smem_start = rdev->mc.aper_base + tmp; |
241 | info->fix.smem_start = rdev->mc.aper_base + tmp; |
242 | info->fix.smem_len = size; |
242 | info->fix.smem_len = size; |
243 | info->screen_base = fbptr; |
243 | info->screen_base = fbptr; |
244 | info->screen_size = size; |
244 | info->screen_size = size; |
245 | 245 | ||
246 | drm_fb_helper_fill_var(info, fb, fb_width, fb_height); |
246 | drm_fb_helper_fill_var(info, fb, fb_width, fb_height); |
247 | 247 | ||
248 | /* setup aperture base/size for vesafb takeover */ |
248 | /* setup aperture base/size for vesafb takeover */ |
249 | info->aperture_base = rdev->ddev->mode_config.fb_base; |
249 | info->aperture_base = rdev->ddev->mode_config.fb_base; |
250 | info->aperture_size = rdev->mc.real_vram_size; |
250 | info->aperture_size = rdev->mc.real_vram_size; |
251 | 251 | ||
252 | info->fix.mmio_start = 0; |
252 | info->fix.mmio_start = 0; |
253 | info->fix.mmio_len = 0; |
253 | info->fix.mmio_len = 0; |
254 | // info->pixmap.size = 64*1024; |
254 | // info->pixmap.size = 64*1024; |
255 | // info->pixmap.buf_align = 8; |
255 | // info->pixmap.buf_align = 8; |
256 | // info->pixmap.access_align = 32; |
256 | // info->pixmap.access_align = 32; |
257 | // info->pixmap.flags = FB_PIXMAP_SYSTEM; |
257 | // info->pixmap.flags = FB_PIXMAP_SYSTEM; |
258 | // info->pixmap.scan_align = 1; |
258 | // info->pixmap.scan_align = 1; |
259 | if (info->screen_base == NULL) { |
259 | if (info->screen_base == NULL) { |
260 | ret = -ENOSPC; |
260 | ret = -ENOSPC; |
261 | goto out_unref; |
261 | goto out_unref; |
262 | } |
262 | } |
263 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); |
263 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); |
264 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); |
264 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); |
265 | DRM_INFO("size %lu\n", (unsigned long)size); |
265 | DRM_INFO("size %lu\n", (unsigned long)size); |
266 | DRM_INFO("fb depth is %d\n", fb->depth); |
266 | DRM_INFO("fb depth is %d\n", fb->depth); |
267 | DRM_INFO(" pitch is %d\n", fb->pitch); |
267 | DRM_INFO(" pitch is %d\n", fb->pitch); |
268 | 268 | ||
269 | dbgprintf("fb = %x\n", fb); |
269 | dbgprintf("fb = %x\n", fb); |
270 | 270 | ||
271 | fb->fbdev = info; |
271 | fb->fbdev = info; |
272 | rfbdev->rfb = rfb; |
272 | rfbdev->rfb = rfb; |
273 | rfbdev->rdev = rdev; |
273 | rfbdev->rdev = rdev; |
274 | 274 | ||
275 | mutex_unlock(&rdev->ddev->struct_mutex); |
275 | mutex_unlock(&rdev->ddev->struct_mutex); |
276 | return 0; |
276 | return 0; |
277 | 277 | ||
278 | out_unref: |
278 | out_unref: |
279 | if (robj) { |
279 | if (robj) { |
280 | // radeon_object_kunmap(robj); |
280 | // radeon_object_kunmap(robj); |
281 | } |
281 | } |
282 | if (fb && ret) { |
282 | if (fb && ret) { |
283 | list_del(&fb->filp_head); |
283 | list_del(&fb->filp_head); |
284 | // drm_gem_object_unreference(gobj); |
284 | // drm_gem_object_unreference(gobj); |
285 | // drm_framebuffer_cleanup(fb); |
285 | // drm_framebuffer_cleanup(fb); |
286 | kfree(fb); |
286 | kfree(fb); |
287 | } |
287 | } |
288 | // drm_gem_object_unreference(gobj); |
288 | // drm_gem_object_unreference(gobj); |
289 | mutex_unlock(&rdev->ddev->struct_mutex); |
289 | mutex_unlock(&rdev->ddev->struct_mutex); |
290 | out: |
290 | out: |
291 | return ret; |
291 | return ret; |
292 | } |
292 | } |
293 | 293 | ||
294 | int radeonfb_probe(struct drm_device *dev) |
294 | int radeonfb_probe(struct drm_device *dev) |
295 | { |
295 | { |
296 | return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); |
296 | return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); |
297 | } |
297 | } |
298 | 298 | ||
299 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) |
299 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) |
300 | { |
300 | { |
301 | struct fb_info *info; |
301 | struct fb_info *info; |
302 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); |
302 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); |
303 | struct radeon_object *robj; |
303 | struct radeon_object *robj; |
304 | 304 | ||
305 | if (!fb) { |
305 | if (!fb) { |
306 | return -EINVAL; |
306 | return -EINVAL; |
307 | } |
307 | } |
308 | info = fb->fbdev; |
308 | info = fb->fbdev; |
309 | if (info) { |
309 | if (info) { |
310 | struct radeon_fb_device *rfbdev = info->par; |
310 | struct radeon_fb_device *rfbdev = info->par; |
311 | robj = rfb->obj->driver_private; |
311 | robj = rfb->obj->driver_private; |
312 | // unregister_framebuffer(info); |
312 | // unregister_framebuffer(info); |
313 | // radeon_object_kunmap(robj); |
313 | // radeon_object_kunmap(robj); |
314 | // radeon_object_unpin(robj); |
314 | // radeon_object_unpin(robj); |
315 | // framebuffer_release(info); |
315 | // framebuffer_release(info); |
316 | } |
316 | } |
317 | 317 | ||
318 | printk(KERN_INFO "unregistered panic notifier\n"); |
318 | printk(KERN_INFO "unregistered panic notifier\n"); |
319 | 319 | ||
320 | return 0; |
320 | return 0; |
321 | } |
321 | } |
322 | EXPORT_SYMBOL(radeonfb_remove); |
322 | EXPORT_SYMBOL(radeonfb_remove); |
323 | 323 | ||
324 | 324 | ||
325 | /** |
325 | /** |
326 | * Allocate a GEM object of the specified size with shmfs backing store |
326 | * Allocate a GEM object of the specified size with shmfs backing store |
327 | */ |
327 | */ |
328 | struct drm_gem_object * |
328 | struct drm_gem_object * |
329 | drm_gem_object_alloc(struct drm_device *dev, size_t size) |
329 | drm_gem_object_alloc(struct drm_device *dev, size_t size) |
330 | { |
330 | { |
331 | struct drm_gem_object *obj; |
331 | struct drm_gem_object *obj; |
332 | 332 | ||
333 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
333 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
334 | 334 | ||
335 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
335 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
336 | 336 | ||
337 | obj->dev = dev; |
337 | obj->dev = dev; |
338 | // obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
338 | // obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
339 | // if (IS_ERR(obj->filp)) { |
339 | // if (IS_ERR(obj->filp)) { |
340 | // kfree(obj); |
340 | // kfree(obj); |
341 | // return NULL; |
341 | // return NULL; |
342 | // } |
342 | // } |
343 | 343 | ||
344 | // kref_init(&obj->refcount); |
344 | // kref_init(&obj->refcount); |
345 | // kref_init(&obj->handlecount); |
345 | // kref_init(&obj->handlecount); |
346 | obj->size = size; |
346 | obj->size = size; |
347 | 347 | ||
348 | // if (dev->driver->gem_init_object != NULL && |
348 | // if (dev->driver->gem_init_object != NULL && |
349 | // dev->driver->gem_init_object(obj) != 0) { |
349 | // dev->driver->gem_init_object(obj) != 0) { |
350 | // fput(obj->filp); |
350 | // fput(obj->filp); |
351 | // kfree(obj); |
351 | // kfree(obj); |
352 | // return NULL; |
352 | // return NULL; |
353 | // } |
353 | // } |
354 | // atomic_inc(&dev->object_count); |
354 | // atomic_inc(&dev->object_count); |
355 | // atomic_add(obj->size, &dev->object_memory); |
355 | // atomic_add(obj->size, &dev->object_memory); |
356 | return obj; |
356 | return obj; |
357 | } |
357 | } |
358 | 358 | ||
359 | 359 | ||
360 | int radeon_gem_fb_object_create(struct radeon_device *rdev, int size, |
360 | int radeon_gem_fb_object_create(struct radeon_device *rdev, int size, |
361 | int alignment, int initial_domain, |
361 | int alignment, int initial_domain, |
362 | bool discardable, bool kernel, |
362 | bool discardable, bool kernel, |
363 | bool interruptible, |
363 | bool interruptible, |
364 | struct drm_gem_object **obj) |
364 | struct drm_gem_object **obj) |
365 | { |
365 | { |
366 | struct drm_gem_object *gobj; |
366 | struct drm_gem_object *gobj; |
367 | struct radeon_object *robj; |
367 | struct radeon_object *robj; |
368 | 368 | ||
369 | *obj = NULL; |
369 | *obj = NULL; |
370 | gobj = drm_gem_object_alloc(rdev->ddev, size); |
370 | gobj = drm_gem_object_alloc(rdev->ddev, size); |
371 | if (!gobj) { |
371 | if (!gobj) { |
372 | return -ENOMEM; |
372 | return -ENOMEM; |
373 | } |
373 | } |
374 | /* At least align on page size */ |
374 | /* At least align on page size */ |
375 | if (alignment < PAGE_SIZE) { |
375 | if (alignment < PAGE_SIZE) { |
376 | alignment = PAGE_SIZE; |
376 | alignment = PAGE_SIZE; |
377 | } |
377 | } |
378 | 378 | ||
379 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
379 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
380 | if (!robj) { |
380 | if (!robj) { |
381 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", |
381 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", |
382 | size, initial_domain, alignment); |
382 | size, initial_domain, alignment); |
383 | // mutex_lock(&rdev->ddev->struct_mutex); |
383 | // mutex_lock(&rdev->ddev->struct_mutex); |
384 | // drm_gem_object_unreference(gobj); |
384 | // drm_gem_object_unreference(gobj); |
385 | // mutex_unlock(&rdev->ddev->struct_mutex); |
385 | // mutex_unlock(&rdev->ddev->struct_mutex); |
386 | return -ENOMEM;; |
386 | return -ENOMEM;; |
387 | } |
387 | } |
388 | robj->rdev = rdev; |
388 | robj->rdev = rdev; |
389 | robj->gobj = gobj; |
389 | robj->gobj = gobj; |
390 | INIT_LIST_HEAD(&robj->list); |
390 | INIT_LIST_HEAD(&robj->list); |
391 | 391 | ||
392 | robj->flags = TTM_PL_FLAG_VRAM; |
392 | robj->flags = TTM_PL_FLAG_VRAM; |
393 | 393 | ||
394 | struct drm_mm_node *vm_node; |
394 | struct drm_mm_node *vm_node; |
395 | 395 | ||
396 | vm_node = kzalloc(sizeof(*vm_node),0); |
396 | vm_node = kzalloc(sizeof(*vm_node),0); |
397 | 397 | ||
398 | vm_node->free = 0; |
398 | vm_node->free = 0; |
399 | vm_node->size = 0x800000 >> 12; |
399 | vm_node->size = 0x800000 >> 12; |
400 | vm_node->start = 0; |
400 | vm_node->start = 0; |
401 | vm_node->mm = NULL; |
401 | vm_node->mm = NULL; |
402 | 402 | ||
403 | robj->mm_node = vm_node; |
403 | robj->mm_node = vm_node; |
404 | 404 | ||
405 | robj->vm_addr = ((uint32_t)robj->mm_node->start); |
405 | robj->vm_addr = ((uint32_t)robj->mm_node->start); |
406 | 406 | ||
407 | gobj->driver_private = robj; |
407 | gobj->driver_private = robj; |
408 | *obj = gobj; |
408 | *obj = gobj; |
409 | return 0; |
409 | return 0; |
410 | } |
410 | } |
411 | 411 | ||
412 | 412 | ||
413 | struct fb_info *framebuffer_alloc(size_t size, void *dev) |
413 | struct fb_info *framebuffer_alloc(size_t size, void *dev) |
414 | { |
414 | { |
415 | #define BYTES_PER_LONG (BITS_PER_LONG/8) |
415 | #define BYTES_PER_LONG (BITS_PER_LONG/8) |
416 | #define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG)) |
416 | #define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG)) |
417 | int fb_info_size = sizeof(struct fb_info); |
417 | int fb_info_size = sizeof(struct fb_info); |
418 | struct fb_info *info; |
418 | struct fb_info *info; |
419 | char *p; |
419 | char *p; |
420 | 420 | ||
421 | if (size) |
421 | if (size) |
422 | fb_info_size += PADDING; |
422 | fb_info_size += PADDING; |
423 | 423 | ||
424 | p = kzalloc(fb_info_size + size, GFP_KERNEL); |
424 | p = kzalloc(fb_info_size + size, GFP_KERNEL); |
425 | 425 | ||
426 | if (!p) |
426 | if (!p) |
427 | return NULL; |
427 | return NULL; |
428 | 428 | ||
429 | info = (struct fb_info *) p; |
429 | info = (struct fb_info *) p; |
430 | 430 | ||
431 | if (size) |
431 | if (size) |
432 | info->par = p + fb_info_size; |
432 | info->par = p + fb_info_size; |
433 | 433 | ||
434 | return info; |
434 | return info; |
435 | #undef PADDING |
435 | #undef PADDING |
436 | #undef BYTES_PER_LONG |
436 | #undef BYTES_PER_LONG |
437 | } |
437 | } |
438 | 438 | ||
439 | static char *manufacturer_name(unsigned char *x) |
439 | static char *manufacturer_name(unsigned char *x) |
440 | { |
440 | { |
441 | static char name[4]; |
441 | static char name[4]; |
442 | 442 | ||
443 | name[0] = ((x[0] & 0x7C) >> 2) + '@'; |
443 | name[0] = ((x[0] & 0x7C) >> 2) + '@'; |
444 | name[1] = ((x[0] & 0x03) << 3) + ((x[1] & 0xE0) >> 5) + '@'; |
444 | name[1] = ((x[0] & 0x03) << 3) + ((x[1] & 0xE0) >> 5) + '@'; |
445 | name[2] = (x[1] & 0x1F) + '@'; |
445 | name[2] = (x[1] & 0x1F) + '@'; |
446 | name[3] = 0; |
446 | name[3] = 0; |
447 | 447 | ||
448 | return name; |
448 | return name; |
449 | } |
449 | } |
- | 450 | ||
450 | 451 | void set_crtc(struct drm_crtc *crtc); |
|
451 | 452 | ||
452 | bool set_mode(struct drm_device *dev, int width, int height) |
453 | bool set_mode(struct drm_device *dev, int width, int height) |
453 | { |
454 | { |
454 | struct drm_connector *connector; |
455 | struct drm_connector *connector; |
455 | 456 | ||
456 | bool ret = false; |
457 | bool ret = false; |
457 | 458 | ||
458 | ENTER(); |
459 | ENTER(); |
459 | 460 | ||
460 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) |
461 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) |
461 | { |
462 | { |
462 | struct drm_display_mode *mode; |
463 | struct drm_display_mode *mode; |
463 | 464 | ||
464 | struct drm_encoder *encoder; |
465 | struct drm_encoder *encoder; |
465 | struct drm_crtc *crtc; |
466 | struct drm_crtc *crtc; |
466 | 467 | ||
467 | if( connector->status != connector_status_connected) |
468 | if( connector->status != connector_status_connected) |
468 | continue; |
469 | continue; |
469 | 470 | ||
470 | encoder = connector->encoder; |
471 | encoder = connector->encoder; |
471 | if( encoder == NULL) |
472 | if( encoder == NULL) |
472 | continue; |
473 | continue; |
473 | 474 | ||
474 | crtc = encoder->crtc; |
475 | crtc = encoder->crtc; |
475 | 476 | ||
476 | if(crtc == NULL) |
477 | if(crtc == NULL) |
477 | continue; |
478 | continue; |
478 | - | ||
479 | /* |
- | |
480 | list_for_each_entry(mode, &connector->modes, head) |
- | |
481 | { |
- | |
482 | if (mode->type & DRM_MODE_TYPE_PREFERRED); |
- | |
483 | break; |
- | |
484 | }; |
- | |
485 | - | ||
486 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
- | |
487 | struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; |
- | |
488 | - | ||
489 | native_mode->panel_xres = mode->hdisplay; |
- | |
490 | native_mode->panel_yres = mode->vdisplay; |
- | |
491 | - | ||
492 | native_mode->hblank = mode->htotal - mode->hdisplay; |
- | |
493 | native_mode->hoverplus = mode->hsync_start - mode->hdisplay; |
- | |
494 | native_mode->hsync_width = mode->hsync_end - mode->hsync_start; |
- | |
495 | native_mode->vblank = mode->vtotal - mode->vdisplay; |
- | |
496 | native_mode->voverplus = mode->vsync_start - mode->vdisplay; |
- | |
497 | native_mode->vsync_width = mode->vsync_end - mode->vsync_start; |
- | |
498 | native_mode->dotclock = mode->clock; |
- | |
499 | native_mode->flags = mode->flags; |
- | |
500 | */ |
479 | |
501 | list_for_each_entry(mode, &connector->modes, head) |
480 | list_for_each_entry(mode, &connector->modes, head) |
502 | { |
481 | { |
503 | char *con_name, *enc_name; |
482 | char *con_name, *enc_name; |
504 | 483 | ||
505 | struct drm_framebuffer *fb; |
484 | struct drm_framebuffer *fb; |
506 | 485 | ||
507 | if (drm_mode_width(mode) == width && |
486 | if (drm_mode_width(mode) == width && |
508 | drm_mode_height(mode) == height) |
487 | drm_mode_height(mode) == height) |
509 | { |
488 | { |
510 | char con_edid[128]; |
489 | char con_edid[128]; |
511 | 490 | ||
512 | fb = list_first_entry(&dev->mode_config.fb_kernel_list, |
491 | fb = list_first_entry(&dev->mode_config.fb_kernel_list, |
513 | struct drm_framebuffer, filp_head); |
492 | struct drm_framebuffer, filp_head); |
514 | 493 | ||
515 | memcpy(con_edid, connector->edid_blob_ptr->data, 128); |
494 | memcpy(con_edid, connector->edid_blob_ptr->data, 128); |
516 | 495 | ||
517 | dbgprintf("Manufacturer: %s Model %x Serial Number %u\n", |
496 | dbgprintf("Manufacturer: %s Model %x Serial Number %u\n", |
518 | manufacturer_name(con_edid + 0x08), |
497 | manufacturer_name(con_edid + 0x08), |
519 | (unsigned short)(con_edid[0x0A] + (con_edid[0x0B] << 8)), |
498 | (unsigned short)(con_edid[0x0A] + (con_edid[0x0B] << 8)), |
520 | (unsigned int)(con_edid[0x0C] + (con_edid[0x0D] << 8) |
499 | (unsigned int)(con_edid[0x0C] + (con_edid[0x0D] << 8) |
521 | + (con_edid[0x0E] << 16) + (con_edid[0x0F] << 24))); |
500 | + (con_edid[0x0E] << 16) + (con_edid[0x0F] << 24))); |
522 | 501 | ||
523 | 502 | ||
524 | con_name = drm_get_connector_name(connector); |
503 | con_name = drm_get_connector_name(connector); |
525 | enc_name = drm_get_encoder_name(encoder); |
504 | enc_name = drm_get_encoder_name(encoder); |
526 | 505 | ||
527 | dbgprintf("set mode %d %d connector %s encoder %s\n", |
506 | dbgprintf("set mode %d %d connector %s encoder %s\n", |
528 | width, height, con_name, enc_name); |
507 | width, height, con_name, enc_name); |
529 | 508 | ||
530 | fb->width = width; |
509 | fb->width = width; |
531 | fb->height = height; |
510 | fb->height = height; |
532 | fb->pitch = radeon_align_pitch(dev->dev_private, width, 32, false) * ((32 + 1) / 8); |
511 | fb->pitch = radeon_align_pitch(dev->dev_private, width, 32, false) * ((32 + 1) / 8); |
533 | 512 | ||
534 | crtc->fb = fb; |
513 | crtc->fb = fb; |
535 | crtc->enabled = true; |
514 | crtc->enabled = true; |
- | 515 | set_crtc(crtc); |
|
536 | 516 | ||
537 | ret = drm_crtc_helper_set_mode(crtc, mode, 0, 0, fb); |
517 | ret = drm_crtc_helper_set_mode(crtc, mode, 0, 0, fb); |
538 | 518 | ||
539 | sysSetScreen(fb->width, fb->height, fb->pitch); |
519 | sysSetScreen(fb->width, fb->height, fb->pitch); |
540 | 520 | ||
541 | if (ret == true) |
521 | if (ret == true) |
542 | { |
522 | { |
543 | dbgprintf("new mode %d %d pitch %d\n",fb->width, fb->height, fb->pitch); |
523 | dbgprintf("new mode %d %d pitch %d\n",fb->width, fb->height, fb->pitch); |
544 | } |
524 | } |
545 | else |
525 | else |
546 | { |
526 | { |
547 | DRM_ERROR("failed to set mode %d_%d on crtc %p\n", |
527 | DRM_ERROR("failed to set mode %d_%d on crtc %p\n", |
548 | fb->width, fb->height, crtc); |
528 | fb->width, fb->height, crtc); |
549 | }; |
529 | }; |
550 | 530 | ||
551 | LEAVE(); |
531 | LEAVE(); |
552 | 532 | ||
553 | return ret; |
533 | return ret; |
554 | }; |
534 | }; |
555 | } |
535 | } |
556 | }; |
536 | }; |
557 | LEAVE(); |
537 | LEAVE(); |
558 | return ret; |
538 | return ret; |
559 | };><>><>><>><>><>> |
539 | };><>><>><>><>><>> |