Rev 5078 | Rev 6104 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5078 | Rev 5271 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2007 David Airlie |
2 | * Copyright © 2007 David Airlie |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. |
21 | * DEALINGS IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * David Airlie |
24 | * David Airlie |
25 | */ |
25 | */ |
26 | #include |
26 | #include |
27 | #include |
27 | #include |
28 | #include |
28 | #include |
29 | 29 | ||
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | #include "radeon.h" |
34 | #include "radeon.h" |
35 | 35 | ||
36 | #include |
36 | #include |
37 | 37 | ||
38 | struct drm_framebuffer *main_fb; |
38 | struct drm_framebuffer *main_fb; |
39 | struct drm_gem_object *main_fb_obj; |
39 | struct drm_gem_object *main_fb_obj; |
40 | 40 | ||
41 | /* object hierarchy - |
41 | /* object hierarchy - |
42 | this contains a helper + a radeon fb |
42 | this contains a helper + a radeon fb |
43 | the helper contains a pointer to radeon framebuffer baseclass. |
43 | the helper contains a pointer to radeon framebuffer baseclass. |
44 | */ |
44 | */ |
45 | struct radeon_fbdev { |
45 | struct radeon_fbdev { |
46 | struct drm_fb_helper helper; |
46 | struct drm_fb_helper helper; |
47 | struct radeon_framebuffer rfb; |
47 | struct radeon_framebuffer rfb; |
48 | struct list_head fbdev_list; |
48 | struct list_head fbdev_list; |
49 | struct radeon_device *rdev; |
49 | struct radeon_device *rdev; |
50 | }; |
50 | }; |
51 | 51 | ||
52 | static struct fb_ops radeonfb_ops = { |
52 | static struct fb_ops radeonfb_ops = { |
53 | .owner = THIS_MODULE, |
53 | .owner = THIS_MODULE, |
54 | .fb_check_var = drm_fb_helper_check_var, |
54 | .fb_check_var = drm_fb_helper_check_var, |
55 | .fb_set_par = drm_fb_helper_set_par, |
55 | .fb_set_par = drm_fb_helper_set_par, |
56 | // .fb_fillrect = cfb_fillrect, |
56 | // .fb_fillrect = cfb_fillrect, |
57 | // .fb_copyarea = cfb_copyarea, |
57 | // .fb_copyarea = cfb_copyarea, |
58 | // .fb_imageblit = cfb_imageblit, |
58 | // .fb_imageblit = cfb_imageblit, |
59 | // .fb_pan_display = drm_fb_helper_pan_display, |
59 | // .fb_pan_display = drm_fb_helper_pan_display, |
60 | .fb_blank = drm_fb_helper_blank, |
60 | .fb_blank = drm_fb_helper_blank, |
61 | .fb_setcmap = drm_fb_helper_setcmap, |
61 | .fb_setcmap = drm_fb_helper_setcmap, |
62 | }; |
62 | }; |
63 | 63 | ||
64 | 64 | ||
65 | int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) |
65 | int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) |
66 | { |
66 | { |
67 | int aligned = width; |
67 | int aligned = width; |
68 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
68 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
69 | int pitch_mask = 0; |
69 | int pitch_mask = 0; |
70 | 70 | ||
71 | switch (bpp / 8) { |
71 | switch (bpp / 8) { |
72 | case 1: |
72 | case 1: |
73 | pitch_mask = align_large ? 255 : 127; |
73 | pitch_mask = align_large ? 255 : 127; |
74 | break; |
74 | break; |
75 | case 2: |
75 | case 2: |
76 | pitch_mask = align_large ? 127 : 31; |
76 | pitch_mask = align_large ? 127 : 31; |
77 | break; |
77 | break; |
78 | case 3: |
78 | case 3: |
79 | case 4: |
79 | case 4: |
80 | pitch_mask = align_large ? 63 : 15; |
80 | pitch_mask = align_large ? 63 : 15; |
81 | break; |
81 | break; |
82 | } |
82 | } |
83 | 83 | ||
84 | aligned += pitch_mask; |
84 | aligned += pitch_mask; |
85 | aligned &= ~pitch_mask; |
85 | aligned &= ~pitch_mask; |
86 | return aligned; |
86 | return aligned; |
87 | } |
87 | } |
88 | 88 | ||
89 | static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) |
89 | static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) |
90 | { |
90 | { |
91 | struct radeon_bo *rbo = gem_to_radeon_bo(gobj); |
91 | struct radeon_bo *rbo = gem_to_radeon_bo(gobj); |
92 | int ret; |
92 | int ret; |
93 | 93 | ||
94 | ret = radeon_bo_reserve(rbo, false); |
94 | ret = radeon_bo_reserve(rbo, false); |
95 | if (likely(ret == 0)) { |
95 | if (likely(ret == 0)) { |
96 | radeon_bo_kunmap(rbo); |
96 | radeon_bo_kunmap(rbo); |
97 | radeon_bo_unpin(rbo); |
97 | radeon_bo_unpin(rbo); |
98 | radeon_bo_unreserve(rbo); |
98 | radeon_bo_unreserve(rbo); |
99 | } |
99 | } |
100 | drm_gem_object_unreference_unlocked(gobj); |
100 | drm_gem_object_unreference_unlocked(gobj); |
101 | } |
101 | } |
102 | 102 | ||
103 | static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, |
103 | static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, |
104 | struct drm_mode_fb_cmd2 *mode_cmd, |
104 | struct drm_mode_fb_cmd2 *mode_cmd, |
105 | struct drm_gem_object **gobj_p) |
105 | struct drm_gem_object **gobj_p) |
106 | { |
106 | { |
107 | struct radeon_device *rdev = rfbdev->rdev; |
107 | struct radeon_device *rdev = rfbdev->rdev; |
108 | struct drm_gem_object *gobj = NULL; |
108 | struct drm_gem_object *gobj = NULL; |
109 | struct radeon_bo *rbo = NULL; |
109 | struct radeon_bo *rbo = NULL; |
110 | bool fb_tiled = false; /* useful for testing */ |
110 | bool fb_tiled = false; /* useful for testing */ |
111 | u32 tiling_flags = 0; |
111 | u32 tiling_flags = 0; |
112 | int ret; |
112 | int ret; |
113 | int aligned_size, size; |
113 | int aligned_size, size; |
114 | int height = mode_cmd->height; |
114 | int height = mode_cmd->height; |
115 | u32 bpp, depth; |
115 | u32 bpp, depth; |
116 | 116 | ||
117 | drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); |
117 | drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); |
118 | 118 | ||
119 | /* need to align pitch with crtc limits */ |
119 | /* need to align pitch with crtc limits */ |
120 | mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp, |
120 | mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp, |
121 | fb_tiled) * ((bpp + 1) / 8); |
121 | fb_tiled) * ((bpp + 1) / 8); |
122 | 122 | ||
123 | if (rdev->family >= CHIP_R600) |
123 | if (rdev->family >= CHIP_R600) |
124 | height = ALIGN(mode_cmd->height, 8); |
124 | height = ALIGN(mode_cmd->height, 8); |
125 | size = mode_cmd->pitches[0] * height; |
125 | size = mode_cmd->pitches[0] * height; |
126 | aligned_size = ALIGN(size, PAGE_SIZE); |
126 | aligned_size = ALIGN(size, PAGE_SIZE); |
127 | 127 | ||
128 | rbo = rdev->stollen_vga_memory; |
128 | rbo = rdev->stollen_vga_memory; |
129 | gobj = &rbo->gem_base; |
129 | gobj = &rbo->gem_base; |
130 | mutex_lock(&rdev->gem.mutex); |
130 | mutex_lock(&rdev->gem.mutex); |
131 | list_add_tail(&rbo->list, &rdev->gem.objects); |
131 | list_add_tail(&rbo->list, &rdev->gem.objects); |
132 | mutex_unlock(&rdev->gem.mutex); |
132 | mutex_unlock(&rdev->gem.mutex); |
133 | 133 | ||
134 | rbo = gem_to_radeon_bo(gobj); |
134 | rbo = gem_to_radeon_bo(gobj); |
135 | 135 | ||
136 | if (fb_tiled) |
136 | if (fb_tiled) |
137 | tiling_flags = RADEON_TILING_MACRO; |
137 | tiling_flags = RADEON_TILING_MACRO; |
138 | 138 | ||
139 | #ifdef __BIG_ENDIAN |
139 | #ifdef __BIG_ENDIAN |
140 | switch (bpp) { |
140 | switch (bpp) { |
141 | case 32: |
141 | case 32: |
142 | tiling_flags |= RADEON_TILING_SWAP_32BIT; |
142 | tiling_flags |= RADEON_TILING_SWAP_32BIT; |
143 | break; |
143 | break; |
144 | case 16: |
144 | case 16: |
145 | tiling_flags |= RADEON_TILING_SWAP_16BIT; |
145 | tiling_flags |= RADEON_TILING_SWAP_16BIT; |
146 | default: |
146 | default: |
147 | break; |
147 | break; |
148 | } |
148 | } |
149 | #endif |
149 | #endif |
150 | 150 | ||
151 | // if (tiling_flags) { |
151 | // if (tiling_flags) { |
152 | // ret = radeon_bo_set_tiling_flags(rbo, |
152 | // ret = radeon_bo_set_tiling_flags(rbo, |
153 | // tiling_flags | RADEON_TILING_SURFACE, |
153 | // tiling_flags | RADEON_TILING_SURFACE, |
154 | // mode_cmd->pitches[0]); |
154 | // mode_cmd->pitches[0]); |
155 | // if (ret) |
155 | // if (ret) |
156 | // dev_err(rdev->dev, "FB failed to set tiling flags\n"); |
156 | // dev_err(rdev->dev, "FB failed to set tiling flags\n"); |
157 | // } |
157 | // } |
158 | 158 | ||
159 | 159 | ||
160 | ret = radeon_bo_reserve(rbo, false); |
160 | ret = radeon_bo_reserve(rbo, false); |
161 | if (unlikely(ret != 0)) |
161 | if (unlikely(ret != 0)) |
162 | goto out_unref; |
162 | goto out_unref; |
163 | /* Only 27 bit offset for legacy CRTC */ |
163 | /* Only 27 bit offset for legacy CRTC */ |
164 | ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, |
164 | ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, |
165 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, |
165 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, |
166 | NULL); |
166 | NULL); |
167 | if (ret) { |
167 | if (ret) { |
168 | radeon_bo_unreserve(rbo); |
168 | radeon_bo_unreserve(rbo); |
169 | goto out_unref; |
169 | goto out_unref; |
170 | } |
170 | } |
171 | radeon_bo_unreserve(rbo); |
171 | radeon_bo_unreserve(rbo); |
172 | if (ret) { |
172 | if (ret) { |
173 | goto out_unref; |
173 | goto out_unref; |
174 | } |
174 | } |
175 | 175 | ||
176 | *gobj_p = gobj; |
176 | *gobj_p = gobj; |
177 | return 0; |
177 | return 0; |
178 | out_unref: |
178 | out_unref: |
179 | radeonfb_destroy_pinned_object(gobj); |
179 | radeonfb_destroy_pinned_object(gobj); |
180 | *gobj_p = NULL; |
180 | *gobj_p = NULL; |
181 | return ret; |
181 | return ret; |
182 | } |
182 | } |
183 | 183 | ||
184 | static int radeonfb_create(struct drm_fb_helper *helper, |
184 | static int radeonfb_create(struct drm_fb_helper *helper, |
185 | struct drm_fb_helper_surface_size *sizes) |
185 | struct drm_fb_helper_surface_size *sizes) |
186 | { |
186 | { |
- | 187 | struct radeon_fbdev *rfbdev = |
|
187 | struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper; |
188 | container_of(helper, struct radeon_fbdev, helper); |
188 | struct radeon_device *rdev = rfbdev->rdev; |
189 | struct radeon_device *rdev = rfbdev->rdev; |
189 | struct fb_info *info; |
190 | struct fb_info *info; |
190 | struct drm_framebuffer *fb = NULL; |
191 | struct drm_framebuffer *fb = NULL; |
191 | struct drm_mode_fb_cmd2 mode_cmd; |
192 | struct drm_mode_fb_cmd2 mode_cmd; |
192 | struct drm_gem_object *gobj = NULL; |
193 | struct drm_gem_object *gobj = NULL; |
193 | struct radeon_bo *rbo = NULL; |
194 | struct radeon_bo *rbo = NULL; |
194 | struct device *device = &rdev->pdev->dev; |
195 | struct device *device = &rdev->pdev->dev; |
195 | int ret; |
196 | int ret; |
196 | unsigned long tmp; |
197 | unsigned long tmp; |
197 | 198 | ||
198 | mode_cmd.width = sizes->surface_width; |
199 | mode_cmd.width = sizes->surface_width; |
199 | mode_cmd.height = sizes->surface_height; |
200 | mode_cmd.height = sizes->surface_height; |
200 | 201 | ||
201 | /* avivo can't scanout real 24bpp */ |
202 | /* avivo can't scanout real 24bpp */ |
202 | if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) |
203 | if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) |
203 | sizes->surface_bpp = 32; |
204 | sizes->surface_bpp = 32; |
204 | 205 | ||
205 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, |
206 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, |
206 | sizes->surface_depth); |
207 | sizes->surface_depth); |
207 | 208 | ||
208 | ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); |
209 | ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); |
209 | if (ret) { |
210 | if (ret) { |
210 | DRM_ERROR("failed to create fbcon object %d\n", ret); |
211 | DRM_ERROR("failed to create fbcon object %d\n", ret); |
211 | return ret; |
212 | return ret; |
212 | } |
213 | } |
213 | 214 | ||
214 | rbo = gem_to_radeon_bo(gobj); |
215 | rbo = gem_to_radeon_bo(gobj); |
215 | 216 | ||
216 | /* okay we have an object now allocate the framebuffer */ |
217 | /* okay we have an object now allocate the framebuffer */ |
217 | info = framebuffer_alloc(0, device); |
218 | info = framebuffer_alloc(0, device); |
218 | if (info == NULL) { |
219 | if (info == NULL) { |
219 | ret = -ENOMEM; |
220 | ret = -ENOMEM; |
220 | goto out_unref; |
221 | goto out_unref; |
221 | } |
222 | } |
222 | 223 | ||
223 | info->par = rfbdev; |
224 | info->par = rfbdev; |
224 | 225 | ||
225 | ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); |
226 | ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); |
226 | if (ret) { |
227 | if (ret) { |
227 | DRM_ERROR("failed to initialize framebuffer %d\n", ret); |
228 | DRM_ERROR("failed to initialize framebuffer %d\n", ret); |
228 | goto out_unref; |
229 | goto out_unref; |
229 | } |
230 | } |
230 | 231 | ||
231 | fb = &rfbdev->rfb.base; |
232 | fb = &rfbdev->rfb.base; |
232 | 233 | ||
233 | /* setup helper */ |
234 | /* setup helper */ |
234 | rfbdev->helper.fb = fb; |
235 | rfbdev->helper.fb = fb; |
235 | rfbdev->helper.fbdev = info; |
236 | rfbdev->helper.fbdev = info; |
236 | 237 | ||
237 | // memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); |
238 | // memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); |
238 | 239 | ||
239 | strcpy(info->fix.id, "radeondrmfb"); |
240 | strcpy(info->fix.id, "radeondrmfb"); |
240 | 241 | ||
241 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); |
242 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); |
242 | 243 | ||
243 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; |
244 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; |
244 | info->fbops = &radeonfb_ops; |
245 | info->fbops = &radeonfb_ops; |
245 | 246 | ||
246 | tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; |
247 | tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; |
247 | info->fix.smem_start = rdev->mc.aper_base + tmp; |
248 | info->fix.smem_start = rdev->mc.aper_base + tmp; |
248 | info->fix.smem_len = radeon_bo_size(rbo); |
249 | info->fix.smem_len = radeon_bo_size(rbo); |
249 | info->screen_base = rbo->kptr; |
250 | info->screen_base = rbo->kptr; |
250 | info->screen_size = radeon_bo_size(rbo); |
251 | info->screen_size = radeon_bo_size(rbo); |
251 | 252 | ||
252 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); |
253 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); |
253 | 254 | ||
254 | /* setup aperture base/size for vesafb takeover */ |
255 | /* setup aperture base/size for vesafb takeover */ |
255 | info->apertures = alloc_apertures(1); |
256 | info->apertures = alloc_apertures(1); |
256 | if (!info->apertures) { |
257 | if (!info->apertures) { |
257 | ret = -ENOMEM; |
258 | ret = -ENOMEM; |
258 | goto out_unref; |
259 | goto out_unref; |
259 | } |
260 | } |
260 | info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; |
261 | info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; |
261 | info->apertures->ranges[0].size = rdev->mc.aper_size; |
262 | info->apertures->ranges[0].size = rdev->mc.aper_size; |
262 | 263 | ||
263 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
264 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
264 | 265 | ||
265 | 266 | ||
266 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); |
267 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); |
267 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); |
268 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); |
268 | DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); |
269 | DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); |
269 | DRM_INFO("fb depth is %d\n", fb->depth); |
270 | DRM_INFO("fb depth is %d\n", fb->depth); |
270 | DRM_INFO(" pitch is %d\n", fb->pitches[0]); |
271 | DRM_INFO(" pitch is %d\n", fb->pitches[0]); |
271 | 272 | ||
272 | main_fb = fb; |
273 | main_fb = fb; |
273 | main_fb_obj = gobj; |
274 | main_fb_obj = gobj; |
274 | 275 | ||
275 | return 0; |
276 | return 0; |
276 | 277 | ||
277 | out_unref: |
278 | out_unref: |
278 | if (rbo) { |
279 | if (rbo) { |
279 | 280 | ||
280 | } |
281 | } |
281 | if (fb && ret) { |
282 | if (fb && ret) { |
282 | kfree(fb); |
283 | kfree(fb); |
283 | } |
284 | } |
284 | return ret; |
285 | return ret; |
285 | } |
286 | } |
286 | 287 | ||
287 | 288 | ||
288 | static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) |
289 | static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) |
289 | { |
290 | { |
290 | struct fb_info *info; |
291 | struct fb_info *info; |
291 | struct radeon_framebuffer *rfb = &rfbdev->rfb; |
292 | struct radeon_framebuffer *rfb = &rfbdev->rfb; |
292 | 293 | ||
293 | if (rfbdev->helper.fbdev) { |
294 | if (rfbdev->helper.fbdev) { |
294 | info = rfbdev->helper.fbdev; |
295 | info = rfbdev->helper.fbdev; |
295 | 296 | ||
296 | // unregister_framebuffer(info); |
297 | // unregister_framebuffer(info); |
297 | // framebuffer_release(info); |
298 | // framebuffer_release(info); |
298 | } |
299 | } |
299 | 300 | ||
300 | if (rfb->obj) { |
301 | if (rfb->obj) { |
301 | rfb->obj = NULL; |
302 | rfb->obj = NULL; |
302 | } |
303 | } |
303 | // drm_fb_helper_fini(&rfbdev->helper); |
304 | // drm_fb_helper_fini(&rfbdev->helper); |
304 | drm_framebuffer_cleanup(&rfb->base); |
305 | drm_framebuffer_cleanup(&rfb->base); |
305 | 306 | ||
306 | return 0; |
307 | return 0; |
307 | } |
308 | } |
308 | 309 | ||
309 | static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = { |
310 | static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = { |
310 | .gamma_set = radeon_crtc_fb_gamma_set, |
311 | .gamma_set = radeon_crtc_fb_gamma_set, |
311 | .gamma_get = radeon_crtc_fb_gamma_get, |
312 | .gamma_get = radeon_crtc_fb_gamma_get, |
312 | .fb_probe = radeonfb_create, |
313 | .fb_probe = radeonfb_create, |
313 | }; |
314 | }; |
314 | 315 | ||
315 | int radeon_fbdev_init(struct radeon_device *rdev) |
316 | int radeon_fbdev_init(struct radeon_device *rdev) |
316 | { |
317 | { |
317 | struct radeon_fbdev *rfbdev; |
318 | struct radeon_fbdev *rfbdev; |
318 | int bpp_sel = 32; |
319 | int bpp_sel = 32; |
319 | int ret; |
320 | int ret; |
320 | ENTER(); |
321 | ENTER(); |
321 | 322 | ||
322 | /* select 8 bpp console on RN50 or 16MB cards */ |
323 | /* select 8 bpp console on RN50 or 16MB cards */ |
323 | if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) |
324 | if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) |
324 | bpp_sel = 8; |
325 | bpp_sel = 8; |
325 | 326 | ||
326 | rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL); |
327 | rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL); |
327 | if (!rfbdev) |
328 | if (!rfbdev) |
328 | return -ENOMEM; |
329 | return -ENOMEM; |
329 | 330 | ||
330 | rfbdev->rdev = rdev; |
331 | rfbdev->rdev = rdev; |
331 | rdev->mode_info.rfbdev = rfbdev; |
332 | rdev->mode_info.rfbdev = rfbdev; |
332 | 333 | ||
333 | drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper, |
334 | drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper, |
334 | &radeon_fb_helper_funcs); |
335 | &radeon_fb_helper_funcs); |
335 | 336 | ||
336 | ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, |
337 | ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, |
337 | rdev->num_crtc, |
338 | rdev->num_crtc, |
338 | RADEONFB_CONN_LIMIT); |
339 | RADEONFB_CONN_LIMIT); |
339 | if (ret) { |
340 | if (ret) { |
340 | kfree(rfbdev); |
341 | kfree(rfbdev); |
341 | return ret; |
342 | return ret; |
342 | } |
343 | } |
343 | 344 | ||
344 | drm_fb_helper_single_add_all_connectors(&rfbdev->helper); |
345 | drm_fb_helper_single_add_all_connectors(&rfbdev->helper); |
345 | 346 | ||
346 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
347 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
347 | drm_helper_disable_unused_functions(rdev->ddev); |
348 | drm_helper_disable_unused_functions(rdev->ddev); |
348 | 349 | ||
349 | drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); |
350 | drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); |
350 | LEAVE(); |
351 | LEAVE(); |
351 | 352 | ||
352 | return 0; |
353 | return 0; |
353 | } |
354 | } |
354 | 355 | ||
355 | void radeon_fbdev_fini(struct radeon_device *rdev) |
356 | void radeon_fbdev_fini(struct radeon_device *rdev) |
356 | { |
357 | { |
357 | if (!rdev->mode_info.rfbdev) |
358 | if (!rdev->mode_info.rfbdev) |
358 | return; |
359 | return; |
359 | 360 | ||
360 | radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev); |
361 | radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev); |
361 | kfree(rdev->mode_info.rfbdev); |
362 | kfree(rdev->mode_info.rfbdev); |
362 | rdev->mode_info.rfbdev = NULL; |
363 | rdev->mode_info.rfbdev = NULL; |
363 | } |
364 | } |
364 | 365 | ||
365 | 366 | ||
366 | int radeon_fbdev_total_size(struct radeon_device *rdev) |
367 | int radeon_fbdev_total_size(struct radeon_device *rdev) |
367 | { |
368 | { |
368 | struct radeon_bo *robj; |
369 | struct radeon_bo *robj; |
369 | int size = 0; |
370 | int size = 0; |
370 | 371 | ||
371 | robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj); |
372 | robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj); |
372 | size += radeon_bo_size(robj); |
373 | size += radeon_bo_size(robj); |
373 | return size; |
374 | return size; |
374 | } |
375 | } |
375 | 376 | ||
376 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) |
377 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) |
377 | { |
378 | { |
378 | if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj)) |
379 | if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj)) |
379 | return true; |
380 | return true; |
380 | return false; |
381 | return false; |
381 | }=>><> |
382 | }=>><> |