Rev 1313 | Rev 1404 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1313 | Rev 1403 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2007 David Airlie |
2 | * Copyright © 2007 David Airlie |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. |
21 | * DEALINGS IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * David Airlie |
24 | * David Airlie |
25 | */ |
25 | */ |
26 | /* |
26 | /* |
27 | * Modularization |
27 | * Modularization |
28 | */ |
28 | */ |
29 | 29 | ||
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | 32 | ||
33 | #include "drmP.h" |
33 | #include "drmP.h" |
34 | #include "drm.h" |
34 | #include "drm.h" |
35 | #include "drm_crtc.h" |
35 | #include "drm_crtc.h" |
36 | #include "drm_crtc_helper.h" |
36 | #include "drm_crtc_helper.h" |
37 | #include "radeon_drm.h" |
37 | #include "radeon_drm.h" |
38 | #include "radeon.h" |
38 | #include "radeon.h" |
39 | 39 | ||
40 | #include "drm_fb_helper.h" |
40 | #include "drm_fb_helper.h" |
41 | 41 | ||
42 | #include |
42 | #include |
43 | #include "radeon_object.h" |
43 | #include "radeon_object.h" |
44 | 44 | ||
45 | 45 | ||
46 | struct fb_info *framebuffer_alloc(size_t size, void *dev); |
46 | struct fb_info *framebuffer_alloc(size_t size, void *dev); |
47 | 47 | ||
48 | struct radeon_fb_device { |
48 | struct radeon_fb_device { |
49 | struct drm_fb_helper helper; |
49 | struct drm_fb_helper helper; |
50 | struct radeon_framebuffer *rfb; |
50 | struct radeon_framebuffer *rfb; |
51 | struct radeon_device *rdev; |
51 | struct radeon_device *rdev; |
52 | }; |
52 | }; |
53 | 53 | ||
54 | static struct fb_ops radeonfb_ops = { |
54 | static struct fb_ops radeonfb_ops = { |
55 | // .owner = THIS_MODULE, |
55 | // .owner = THIS_MODULE, |
56 | .fb_check_var = drm_fb_helper_check_var, |
56 | .fb_check_var = drm_fb_helper_check_var, |
57 | .fb_set_par = drm_fb_helper_set_par, |
57 | .fb_set_par = drm_fb_helper_set_par, |
58 | .fb_setcolreg = drm_fb_helper_setcolreg, |
58 | .fb_setcolreg = drm_fb_helper_setcolreg, |
59 | // .fb_fillrect = cfb_fillrect, |
59 | // .fb_fillrect = cfb_fillrect, |
60 | // .fb_copyarea = cfb_copyarea, |
60 | // .fb_copyarea = cfb_copyarea, |
61 | // .fb_imageblit = cfb_imageblit, |
61 | // .fb_imageblit = cfb_imageblit, |
62 | // .fb_pan_display = drm_fb_helper_pan_display, |
62 | // .fb_pan_display = drm_fb_helper_pan_display, |
63 | .fb_blank = drm_fb_helper_blank, |
63 | .fb_blank = drm_fb_helper_blank, |
64 | .fb_setcmap = drm_fb_helper_setcmap, |
64 | .fb_setcmap = drm_fb_helper_setcmap, |
65 | }; |
65 | }; |
66 | 66 | ||
67 | /** |
67 | /** |
68 | * Curretly it is assumed that the old framebuffer is reused. |
68 | * Currently it is assumed that the old framebuffer is reused. |
69 | * |
69 | * |
70 | * LOCKING |
70 | * LOCKING |
71 | * caller should hold the mode config lock. |
71 | * caller should hold the mode config lock. |
72 | * |
72 | * |
73 | */ |
73 | */ |
74 | int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc) |
74 | int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc) |
75 | { |
75 | { |
76 | struct fb_info *info; |
76 | struct fb_info *info; |
77 | struct drm_framebuffer *fb; |
77 | struct drm_framebuffer *fb; |
78 | struct drm_display_mode *mode = crtc->desired_mode; |
78 | struct drm_display_mode *mode = crtc->desired_mode; |
79 | 79 | ||
80 | fb = crtc->fb; |
80 | fb = crtc->fb; |
81 | if (fb == NULL) { |
81 | if (fb == NULL) { |
82 | return 1; |
82 | return 1; |
83 | } |
83 | } |
84 | info = fb->fbdev; |
84 | info = fb->fbdev; |
85 | if (info == NULL) { |
85 | if (info == NULL) { |
86 | return 1; |
86 | return 1; |
87 | } |
87 | } |
88 | if (mode == NULL) { |
88 | if (mode == NULL) { |
89 | return 1; |
89 | return 1; |
90 | } |
90 | } |
91 | info->var.xres = mode->hdisplay; |
91 | info->var.xres = mode->hdisplay; |
92 | info->var.right_margin = mode->hsync_start - mode->hdisplay; |
92 | info->var.right_margin = mode->hsync_start - mode->hdisplay; |
93 | info->var.hsync_len = mode->hsync_end - mode->hsync_start; |
93 | info->var.hsync_len = mode->hsync_end - mode->hsync_start; |
94 | info->var.left_margin = mode->htotal - mode->hsync_end; |
94 | info->var.left_margin = mode->htotal - mode->hsync_end; |
95 | info->var.yres = mode->vdisplay; |
95 | info->var.yres = mode->vdisplay; |
96 | info->var.lower_margin = mode->vsync_start - mode->vdisplay; |
96 | info->var.lower_margin = mode->vsync_start - mode->vdisplay; |
97 | info->var.vsync_len = mode->vsync_end - mode->vsync_start; |
97 | info->var.vsync_len = mode->vsync_end - mode->vsync_start; |
98 | info->var.upper_margin = mode->vtotal - mode->vsync_end; |
98 | info->var.upper_margin = mode->vtotal - mode->vsync_end; |
99 | info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; |
99 | info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; |
100 | /* avoid overflow */ |
100 | /* avoid overflow */ |
101 | info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; |
101 | info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; |
102 | 102 | ||
103 | return 0; |
103 | return 0; |
104 | } |
104 | } |
105 | EXPORT_SYMBOL(radeonfb_resize); |
105 | EXPORT_SYMBOL(radeonfb_resize); |
106 | 106 | ||
107 | int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) |
107 | int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) |
108 | { |
108 | { |
109 | int aligned = width; |
109 | int aligned = width; |
110 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
110 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
111 | int pitch_mask = 0; |
111 | int pitch_mask = 0; |
112 | 112 | ||
113 | switch (bpp / 8) { |
113 | switch (bpp / 8) { |
114 | case 1: |
114 | case 1: |
115 | pitch_mask = align_large ? 255 : 127; |
115 | pitch_mask = align_large ? 255 : 127; |
116 | break; |
116 | break; |
117 | case 2: |
117 | case 2: |
118 | pitch_mask = align_large ? 127 : 31; |
118 | pitch_mask = align_large ? 127 : 31; |
119 | break; |
119 | break; |
120 | case 3: |
120 | case 3: |
121 | case 4: |
121 | case 4: |
122 | pitch_mask = align_large ? 63 : 15; |
122 | pitch_mask = align_large ? 63 : 15; |
123 | break; |
123 | break; |
124 | } |
124 | } |
125 | 125 | ||
126 | aligned += pitch_mask; |
126 | aligned += pitch_mask; |
127 | aligned &= ~pitch_mask; |
127 | aligned &= ~pitch_mask; |
128 | return aligned; |
128 | return aligned; |
129 | } |
129 | } |
130 | 130 | ||
131 | static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { |
131 | static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { |
132 | .gamma_set = radeon_crtc_fb_gamma_set, |
132 | .gamma_set = radeon_crtc_fb_gamma_set, |
133 | .gamma_get = radeon_crtc_fb_gamma_get, |
133 | .gamma_get = radeon_crtc_fb_gamma_get, |
134 | }; |
134 | }; |
135 | 135 | ||
136 | int radeonfb_create(struct drm_device *dev, |
136 | int radeonfb_create(struct drm_device *dev, |
137 | uint32_t fb_width, uint32_t fb_height, |
137 | uint32_t fb_width, uint32_t fb_height, |
138 | uint32_t surface_width, uint32_t surface_height, |
138 | uint32_t surface_width, uint32_t surface_height, |
139 | uint32_t surface_depth, uint32_t surface_bpp, |
139 | uint32_t surface_depth, uint32_t surface_bpp, |
140 | struct drm_framebuffer **fb_p) |
140 | struct drm_framebuffer **fb_p) |
141 | { |
141 | { |
142 | struct radeon_device *rdev = dev->dev_private; |
142 | struct radeon_device *rdev = dev->dev_private; |
143 | struct fb_info *info; |
143 | struct fb_info *info; |
144 | struct radeon_fb_device *rfbdev; |
144 | struct radeon_fb_device *rfbdev; |
145 | struct drm_framebuffer *fb = NULL; |
145 | struct drm_framebuffer *fb = NULL; |
146 | struct radeon_framebuffer *rfb; |
146 | struct radeon_framebuffer *rfb; |
147 | struct drm_mode_fb_cmd mode_cmd; |
147 | struct drm_mode_fb_cmd mode_cmd; |
148 | struct drm_gem_object *gobj = NULL; |
148 | struct drm_gem_object *gobj = NULL; |
149 | struct radeon_object *robj = NULL; |
149 | struct radeon_object *robj = NULL; |
150 | void *device = NULL; //&rdev->pdev->dev; |
150 | void *device = NULL; //&rdev->pdev->dev; |
151 | int size, aligned_size, ret; |
151 | int size, aligned_size, ret; |
152 | u64 fb_gpuaddr; |
152 | u64 fb_gpuaddr; |
153 | void *fbptr = NULL; |
153 | void *fbptr = NULL; |
154 | unsigned long tmp; |
154 | unsigned long tmp; |
155 | bool fb_tiled = false; /* useful for testing */ |
155 | bool fb_tiled = false; /* useful for testing */ |
156 | u32 tiling_flags = 0; |
156 | u32 tiling_flags = 0; |
157 | int crtc_count; |
157 | int crtc_count; |
158 | 158 | ||
159 | mode_cmd.width = surface_width; |
159 | mode_cmd.width = surface_width; |
160 | mode_cmd.height = surface_height; |
160 | mode_cmd.height = surface_height; |
161 | 161 | ||
162 | /* avivo can't scanout real 24bpp */ |
162 | /* avivo can't scanout real 24bpp */ |
163 | if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) |
163 | if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) |
164 | surface_bpp = 32; |
164 | surface_bpp = 32; |
165 | 165 | ||
166 | mode_cmd.bpp = 32; |
166 | mode_cmd.bpp = 32; |
167 | /* need to align pitch with crtc limits */ |
167 | /* need to align pitch with crtc limits */ |
168 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); |
168 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); |
169 | mode_cmd.depth = surface_depth; |
169 | mode_cmd.depth = surface_depth; |
170 | 170 | ||
171 | size = mode_cmd.pitch * mode_cmd.height; |
171 | size = mode_cmd.pitch * mode_cmd.height; |
172 | aligned_size = ALIGN(size, PAGE_SIZE); |
172 | aligned_size = ALIGN(size, PAGE_SIZE); |
173 | 173 | ||
174 | ret = radeon_gem_fb_object_create(rdev, aligned_size, 0, |
174 | ret = radeon_gem_fb_object_create(rdev, aligned_size, 0, |
175 | RADEON_GEM_DOMAIN_VRAM, |
175 | RADEON_GEM_DOMAIN_VRAM, |
176 | false, 0, |
176 | false, 0, |
177 | false, &gobj); |
177 | false, &gobj); |
178 | if (ret) { |
178 | if (ret) { |
179 | printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", |
179 | printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", |
180 | surface_width, surface_height); |
180 | surface_width, surface_height); |
181 | ret = -ENOMEM; |
181 | ret = -ENOMEM; |
182 | goto out; |
182 | goto out; |
183 | } |
183 | } |
184 | robj = gobj->driver_private; |
184 | robj = gobj->driver_private; |
185 | 185 | ||
186 | mutex_lock(&rdev->ddev->struct_mutex); |
186 | mutex_lock(&rdev->ddev->struct_mutex); |
187 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); |
187 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); |
188 | if (fb == NULL) { |
188 | if (fb == NULL) { |
189 | DRM_ERROR("failed to allocate fb.\n"); |
189 | DRM_ERROR("failed to allocate fb.\n"); |
190 | ret = -ENOMEM; |
190 | ret = -ENOMEM; |
191 | goto out_unref; |
191 | goto out_unref; |
192 | } |
192 | } |
193 | ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); |
193 | ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); |
194 | if (ret) { |
194 | if (ret) { |
195 | printk(KERN_ERR "failed to pin framebuffer\n"); |
195 | printk(KERN_ERR "failed to pin framebuffer\n"); |
196 | ret = -ENOMEM; |
196 | ret = -ENOMEM; |
197 | goto out_unref; |
197 | goto out_unref; |
198 | } |
198 | } |
199 | 199 | ||
200 | list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); |
200 | list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); |
201 | 201 | ||
202 | *fb_p = fb; |
202 | *fb_p = fb; |
203 | rfb = to_radeon_framebuffer(fb); |
203 | rfb = to_radeon_framebuffer(fb); |
204 | rdev->fbdev_rfb = rfb; |
204 | rdev->fbdev_rfb = rfb; |
205 | rdev->fbdev_robj = robj; |
205 | rdev->fbdev_robj = robj; |
206 | 206 | ||
207 | info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); |
207 | info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); |
208 | if (info == NULL) { |
208 | if (info == NULL) { |
209 | ret = -ENOMEM; |
209 | ret = -ENOMEM; |
210 | goto out_unref; |
210 | goto out_unref; |
211 | } |
211 | } |
212 | 212 | ||
213 | rdev->fbdev_info = info; |
213 | rdev->fbdev_info = info; |
214 | rfbdev = info->par; |
214 | rfbdev = info->par; |
215 | rfbdev->helper.funcs = &radeon_fb_helper_funcs; |
215 | rfbdev->helper.funcs = &radeon_fb_helper_funcs; |
216 | rfbdev->helper.dev = dev; |
216 | rfbdev->helper.dev = dev; |
217 | if (rdev->flags & RADEON_SINGLE_CRTC) |
217 | if (rdev->flags & RADEON_SINGLE_CRTC) |
218 | crtc_count = 1; |
218 | crtc_count = 1; |
219 | else |
219 | else |
220 | crtc_count = 2; |
220 | crtc_count = 2; |
221 | ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count, |
221 | ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count, |
222 | RADEONFB_CONN_LIMIT); |
222 | RADEONFB_CONN_LIMIT); |
223 | if (ret) |
223 | if (ret) |
224 | goto out_unref; |
224 | goto out_unref; |
225 | 225 | ||
226 | // ret = radeon_object_kmap(robj, &fbptr); |
226 | // ret = radeon_object_kmap(robj, &fbptr); |
227 | // if (ret) { |
227 | // if (ret) { |
228 | // goto out_unref; |
228 | // goto out_unref; |
229 | // } |
229 | // } |
230 | 230 | ||
231 | 231 | ||
232 | fbptr = (void*)0xFE000000; // LFB_BASE |
232 | fbptr = (void*)0xFE000000; // LFB_BASE |
233 | 233 | ||
234 | strcpy(info->fix.id, "radeondrmfb"); |
234 | strcpy(info->fix.id, "radeondrmfb"); |
235 | 235 | ||
236 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
236 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
237 | 237 | ||
238 | info->flags = FBINFO_DEFAULT; |
238 | info->flags = FBINFO_DEFAULT; |
239 | info->fbops = &radeonfb_ops; |
239 | info->fbops = &radeonfb_ops; |
240 | 240 | ||
241 | tmp = fb_gpuaddr - rdev->mc.vram_location; |
241 | tmp = fb_gpuaddr - rdev->mc.vram_location; |
242 | info->fix.smem_start = rdev->mc.aper_base + tmp; |
242 | info->fix.smem_start = rdev->mc.aper_base + tmp; |
243 | info->fix.smem_len = size; |
243 | info->fix.smem_len = size; |
244 | info->screen_base = fbptr; |
244 | info->screen_base = fbptr; |
245 | info->screen_size = size; |
245 | info->screen_size = size; |
246 | 246 | ||
247 | drm_fb_helper_fill_var(info, fb, fb_width, fb_height); |
247 | drm_fb_helper_fill_var(info, fb, fb_width, fb_height); |
248 | 248 | ||
249 | /* setup aperture base/size for vesafb takeover */ |
249 | /* setup aperture base/size for vesafb takeover */ |
250 | info->aperture_base = rdev->ddev->mode_config.fb_base; |
250 | info->aperture_base = rdev->ddev->mode_config.fb_base; |
251 | info->aperture_size = rdev->mc.real_vram_size; |
251 | info->aperture_size = rdev->mc.real_vram_size; |
252 | 252 | ||
253 | info->fix.mmio_start = 0; |
253 | info->fix.mmio_start = 0; |
254 | info->fix.mmio_len = 0; |
254 | info->fix.mmio_len = 0; |
255 | // info->pixmap.size = 64*1024; |
255 | // info->pixmap.size = 64*1024; |
256 | // info->pixmap.buf_align = 8; |
256 | // info->pixmap.buf_align = 8; |
257 | // info->pixmap.access_align = 32; |
257 | // info->pixmap.access_align = 32; |
258 | // info->pixmap.flags = FB_PIXMAP_SYSTEM; |
258 | // info->pixmap.flags = FB_PIXMAP_SYSTEM; |
259 | // info->pixmap.scan_align = 1; |
259 | // info->pixmap.scan_align = 1; |
260 | if (info->screen_base == NULL) { |
260 | if (info->screen_base == NULL) { |
261 | ret = -ENOSPC; |
261 | ret = -ENOSPC; |
262 | goto out_unref; |
262 | goto out_unref; |
263 | } |
263 | } |
264 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); |
264 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); |
265 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); |
265 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); |
266 | DRM_INFO("size %lu\n", (unsigned long)size); |
266 | DRM_INFO("size %lu\n", (unsigned long)size); |
267 | DRM_INFO("fb depth is %d\n", fb->depth); |
267 | DRM_INFO("fb depth is %d\n", fb->depth); |
268 | DRM_INFO(" pitch is %d\n", fb->pitch); |
268 | DRM_INFO(" pitch is %d\n", fb->pitch); |
269 | 269 | ||
270 | dbgprintf("fb = %x\n", fb); |
270 | dbgprintf("fb = %x\n", fb); |
271 | 271 | ||
272 | fb->fbdev = info; |
272 | fb->fbdev = info; |
273 | rfbdev->rfb = rfb; |
273 | rfbdev->rfb = rfb; |
274 | rfbdev->rdev = rdev; |
274 | rfbdev->rdev = rdev; |
275 | 275 | ||
276 | mutex_unlock(&rdev->ddev->struct_mutex); |
276 | mutex_unlock(&rdev->ddev->struct_mutex); |
277 | return 0; |
277 | return 0; |
278 | 278 | ||
279 | out_unref: |
279 | out_unref: |
280 | if (robj) { |
280 | if (robj) { |
281 | // radeon_object_kunmap(robj); |
281 | // radeon_object_kunmap(robj); |
282 | } |
282 | } |
283 | if (fb && ret) { |
283 | if (fb && ret) { |
284 | list_del(&fb->filp_head); |
284 | list_del(&fb->filp_head); |
285 | // drm_gem_object_unreference(gobj); |
285 | // drm_gem_object_unreference(gobj); |
286 | // drm_framebuffer_cleanup(fb); |
286 | // drm_framebuffer_cleanup(fb); |
287 | kfree(fb); |
287 | kfree(fb); |
288 | } |
288 | } |
289 | // drm_gem_object_unreference(gobj); |
289 | // drm_gem_object_unreference(gobj); |
290 | mutex_unlock(&rdev->ddev->struct_mutex); |
290 | mutex_unlock(&rdev->ddev->struct_mutex); |
291 | out: |
291 | out: |
292 | return ret; |
292 | return ret; |
293 | } |
293 | } |
294 | 294 | ||
295 | int radeonfb_probe(struct drm_device *dev) |
295 | int radeonfb_probe(struct drm_device *dev) |
296 | { |
296 | { |
297 | return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); |
297 | return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); |
298 | } |
298 | } |
299 | 299 | ||
300 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) |
300 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) |
301 | { |
301 | { |
302 | struct fb_info *info; |
302 | struct fb_info *info; |
303 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); |
303 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); |
304 | struct radeon_object *robj; |
304 | struct radeon_object *robj; |
305 | 305 | ||
306 | if (!fb) { |
306 | if (!fb) { |
307 | return -EINVAL; |
307 | return -EINVAL; |
308 | } |
308 | } |
309 | info = fb->fbdev; |
309 | info = fb->fbdev; |
310 | if (info) { |
310 | if (info) { |
311 | struct radeon_fb_device *rfbdev = info->par; |
311 | struct radeon_fb_device *rfbdev = info->par; |
312 | robj = rfb->obj->driver_private; |
312 | robj = rfb->obj->driver_private; |
313 | // unregister_framebuffer(info); |
313 | // unregister_framebuffer(info); |
314 | // radeon_object_kunmap(robj); |
314 | // radeon_object_kunmap(robj); |
315 | // radeon_object_unpin(robj); |
315 | // radeon_object_unpin(robj); |
316 | // framebuffer_release(info); |
316 | // framebuffer_release(info); |
317 | } |
317 | } |
318 | 318 | ||
319 | printk(KERN_INFO "unregistered panic notifier\n"); |
319 | printk(KERN_INFO "unregistered panic notifier\n"); |
320 | 320 | ||
321 | return 0; |
321 | return 0; |
322 | } |
322 | } |
323 | EXPORT_SYMBOL(radeonfb_remove); |
323 | EXPORT_SYMBOL(radeonfb_remove); |
324 | 324 | ||
325 | 325 | ||
326 | /** |
326 | /** |
327 | * Allocate a GEM object of the specified size with shmfs backing store |
327 | * Allocate a GEM object of the specified size with shmfs backing store |
328 | */ |
328 | */ |
329 | struct drm_gem_object * |
329 | struct drm_gem_object * |
330 | drm_gem_object_alloc(struct drm_device *dev, size_t size) |
330 | drm_gem_object_alloc(struct drm_device *dev, size_t size) |
331 | { |
331 | { |
332 | struct drm_gem_object *obj; |
332 | struct drm_gem_object *obj; |
333 | 333 | ||
334 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
334 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
335 | 335 | ||
336 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
336 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
337 | 337 | ||
338 | obj->dev = dev; |
338 | obj->dev = dev; |
339 | // obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
339 | // obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
340 | // if (IS_ERR(obj->filp)) { |
340 | // if (IS_ERR(obj->filp)) { |
341 | // kfree(obj); |
341 | // kfree(obj); |
342 | // return NULL; |
342 | // return NULL; |
343 | // } |
343 | // } |
344 | 344 | ||
345 | // kref_init(&obj->refcount); |
345 | // kref_init(&obj->refcount); |
346 | // kref_init(&obj->handlecount); |
346 | // kref_init(&obj->handlecount); |
347 | obj->size = size; |
347 | obj->size = size; |
348 | 348 | ||
349 | // if (dev->driver->gem_init_object != NULL && |
349 | // if (dev->driver->gem_init_object != NULL && |
350 | // dev->driver->gem_init_object(obj) != 0) { |
350 | // dev->driver->gem_init_object(obj) != 0) { |
351 | // fput(obj->filp); |
351 | // fput(obj->filp); |
352 | // kfree(obj); |
352 | // kfree(obj); |
353 | // return NULL; |
353 | // return NULL; |
354 | // } |
354 | // } |
355 | // atomic_inc(&dev->object_count); |
355 | // atomic_inc(&dev->object_count); |
356 | // atomic_add(obj->size, &dev->object_memory); |
356 | // atomic_add(obj->size, &dev->object_memory); |
357 | return obj; |
357 | return obj; |
358 | } |
358 | } |
359 | 359 | ||
360 | 360 | ||
361 | int radeon_gem_fb_object_create(struct radeon_device *rdev, int size, |
361 | int radeon_gem_fb_object_create(struct radeon_device *rdev, int size, |
362 | int alignment, int initial_domain, |
362 | int alignment, int initial_domain, |
363 | bool discardable, bool kernel, |
363 | bool discardable, bool kernel, |
364 | bool interruptible, |
364 | bool interruptible, |
365 | struct drm_gem_object **obj) |
365 | struct drm_gem_object **obj) |
366 | { |
366 | { |
367 | struct drm_gem_object *gobj; |
367 | struct drm_gem_object *gobj; |
368 | struct radeon_object *robj; |
368 | struct radeon_object *robj; |
369 | 369 | ||
370 | *obj = NULL; |
370 | *obj = NULL; |
371 | gobj = drm_gem_object_alloc(rdev->ddev, size); |
371 | gobj = drm_gem_object_alloc(rdev->ddev, size); |
372 | if (!gobj) { |
372 | if (!gobj) { |
373 | return -ENOMEM; |
373 | return -ENOMEM; |
374 | } |
374 | } |
375 | /* At least align on page size */ |
375 | /* At least align on page size */ |
376 | if (alignment < PAGE_SIZE) { |
376 | if (alignment < PAGE_SIZE) { |
377 | alignment = PAGE_SIZE; |
377 | alignment = PAGE_SIZE; |
378 | } |
378 | } |
379 | 379 | ||
380 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
380 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
381 | if (!robj) { |
381 | if (!robj) { |
382 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", |
382 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", |
383 | size, initial_domain, alignment); |
383 | size, initial_domain, alignment); |
384 | // mutex_lock(&rdev->ddev->struct_mutex); |
384 | // mutex_lock(&rdev->ddev->struct_mutex); |
385 | // drm_gem_object_unreference(gobj); |
385 | // drm_gem_object_unreference(gobj); |
386 | // mutex_unlock(&rdev->ddev->struct_mutex); |
386 | // mutex_unlock(&rdev->ddev->struct_mutex); |
387 | return -ENOMEM;; |
387 | return -ENOMEM;; |
388 | } |
388 | } |
389 | robj->rdev = rdev; |
389 | robj->rdev = rdev; |
390 | robj->gobj = gobj; |
390 | robj->gobj = gobj; |
391 | INIT_LIST_HEAD(&robj->list); |
391 | INIT_LIST_HEAD(&robj->list); |
392 | 392 | ||
393 | robj->flags = TTM_PL_FLAG_VRAM; |
393 | robj->flags = TTM_PL_FLAG_VRAM; |
394 | 394 | ||
395 | struct drm_mm_node *vm_node; |
395 | struct drm_mm_node *vm_node; |
396 | 396 | ||
397 | vm_node = kzalloc(sizeof(*vm_node),0); |
397 | vm_node = kzalloc(sizeof(*vm_node),0); |
398 | 398 | ||
399 | vm_node->free = 0; |
399 | vm_node->free = 0; |
400 | vm_node->size = 0xC00000 >> 12; |
400 | vm_node->size = 0xC00000 >> 12; |
401 | vm_node->start = 0; |
401 | vm_node->start = 0; |
402 | vm_node->mm = NULL; |
402 | vm_node->mm = NULL; |
403 | 403 | ||
404 | robj->mm_node = vm_node; |
404 | robj->mm_node = vm_node; |
405 | 405 | ||
406 | robj->vm_addr = ((uint32_t)robj->mm_node->start); |
406 | robj->vm_addr = ((uint32_t)robj->mm_node->start); |
407 | 407 | ||
408 | gobj->driver_private = robj; |
408 | gobj->driver_private = robj; |
409 | *obj = gobj; |
409 | *obj = gobj; |
410 | return 0; |
410 | return 0; |
411 | } |
411 | } |
412 | 412 | ||
413 | 413 | ||
414 | struct fb_info *framebuffer_alloc(size_t size, void *dev) |
414 | struct fb_info *framebuffer_alloc(size_t size, void *dev) |
415 | { |
415 | { |
416 | #define BYTES_PER_LONG (BITS_PER_LONG/8) |
416 | #define BYTES_PER_LONG (BITS_PER_LONG/8) |
417 | #define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG)) |
417 | #define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG)) |
418 | int fb_info_size = sizeof(struct fb_info); |
418 | int fb_info_size = sizeof(struct fb_info); |
419 | struct fb_info *info; |
419 | struct fb_info *info; |
420 | char *p; |
420 | char *p; |
421 | 421 | ||
422 | if (size) |
422 | if (size) |
423 | fb_info_size += PADDING; |
423 | fb_info_size += PADDING; |
424 | 424 | ||
425 | p = kzalloc(fb_info_size + size, GFP_KERNEL); |
425 | p = kzalloc(fb_info_size + size, GFP_KERNEL); |
426 | 426 | ||
427 | if (!p) |
427 | if (!p) |
428 | return NULL; |
428 | return NULL; |
429 | 429 | ||
430 | info = (struct fb_info *) p; |
430 | info = (struct fb_info *) p; |
431 | 431 | ||
432 | if (size) |
432 | if (size) |
433 | info->par = p + fb_info_size; |
433 | info->par = p + fb_info_size; |
434 | 434 | ||
435 | return info; |
435 | return info; |
436 | #undef PADDING |
436 | #undef PADDING |
437 | #undef BYTES_PER_LONG |
437 | #undef BYTES_PER_LONG |
438 | }> |
438 | }> |