Subversion Repositories Kolibri OS

Rev

Rev 4359 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4359 Rev 4367
1
#include 
1
#include 
2
#include 
2
#include 
3
#include 
3
#include 
4
 
4
 
5
#include 
5
#include 
6
//#include "xf86.h"
6
//#include "xf86.h"
7
#include "uxa/intel.h"
7
#include "uxa/intel.h"
8
#include "i830_reg.h"
8
#include "i830_reg.h"
9
#include "i965_reg.h"
9
#include "i965_reg.h"
10
 
10
 
11
/* bring in brw structs */
11
/* bring in brw structs */
12
#include "brw_defines.h"
12
#include "brw_defines.h"
13
#include "brw_structs.h"
13
#include "brw_structs.h"
14
 
14
 
15
#include "i915_pciids.h"
15
#include "i915_pciids.h"
16
#include 
16
#include 
17
#include 
17
#include 
18
 
18
 
19
#define PictOpClear             0
19
#define PictOpClear             0
20
#define PictOpSrc               1
20
#define PictOpSrc               1
21
#define PictOpDst               2
21
#define PictOpDst               2
22
#define PictOpOver              3
22
#define PictOpOver              3
23
#define PictOpOverReverse       4
23
#define PictOpOverReverse       4
24
#define PictOpIn                5
24
#define PictOpIn                5
25
#define PictOpInReverse         6
25
#define PictOpInReverse         6
26
#define PictOpOut               7
26
#define PictOpOut               7
27
#define PictOpOutReverse        8
27
#define PictOpOutReverse        8
28
#define PictOpAtop              9
28
#define PictOpAtop              9
29
#define PictOpAtopReverse       10
29
#define PictOpAtopReverse       10
30
#define PictOpXor               11
30
#define PictOpXor               11
31
#define PictOpAdd               12
31
#define PictOpAdd               12
32
#define PictOpSaturate          13
32
#define PictOpSaturate          13
33
#define PictOpMaximum           13
33
#define PictOpMaximum           13
34
 
34
 
35
static int    tls_mask;
35
static int    tls_mask;
36
 
36
 
37
intel_screen_private *driverPrivate;
37
intel_screen_private *driverPrivate;
38
__LOCK_INIT_RECURSIVE(, __uxa_lock);
38
__LOCK_INIT_RECURSIVE(, __uxa_lock);
39
 
39
 
40
#define DBG printf
40
#define DBG printf
41
 
41
 
42
typedef struct
42
typedef struct
43
{
43
{
44
    struct list     entry;
44
    struct list     entry;
45
    uint32_t        width;
45
    uint32_t        width;
46
    uint32_t        height;
46
    uint32_t        height;
47
    void           *data;
47
    void           *data;
48
    uint32_t        pitch;
48
    uint32_t        pitch;
49
    drm_intel_bo   *bo;
49
    drm_intel_bo   *bo;
50
    uint32_t        bo_size;
50
    uint32_t        bo_size;
51
    uint32_t        flags;
51
    uint32_t        flags;
52
}surface_t;
52
}surface_t;
53
 
53
 
54
#define to_surface(x) (surface_t*)((x)->handle)
54
#define to_surface(x) (surface_t*)((x)->handle)
55
 
55
 
56
struct _Pixmap fb_pixmap;
56
struct _Pixmap fb_pixmap;
57
 
57
 
58
struct list sf_list;
58
struct list sf_list;
59
 
59
 
60
int uxa_update_fb(struct intel_screen_private *intel);
60
int uxa_update_fb(struct intel_screen_private *intel);
61
 
61
 
62
int sna_create_mask()
-
 
63
{
-
 
64
    return 0;
-
 
65
};
-
 
66
 
-
 
67
static void i830_done_composite(PixmapPtr dest)
62
static void i830_done_composite(PixmapPtr dest)
68
{
63
{
69
	intel_screen_private *intel = intel_get_screen_private();
64
	intel_screen_private *intel = intel_get_screen_private();
70
 
65
 
71
	if (intel->vertex_flush)
66
	if (intel->vertex_flush)
72
		intel->vertex_flush(intel);
67
		intel->vertex_flush(intel);
73
 
68
 
74
//	intel_debug_flush(scrn);
69
//	intel_debug_flush(scrn);
75
}
70
}
76
 
71
 
77
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
72
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
78
{
73
{
79
	struct intel_screen_private *intel = intel_get_screen_private();
74
	struct intel_screen_private *intel = intel_get_screen_private();
80
	drm_intel_bo *bo;
75
	drm_intel_bo *bo;
81
    surface_t    *sf;
76
    surface_t    *sf;
82
    unsigned int size;
77
    unsigned int size;
83
 
78
 
84
    bitmap->handle = 0;
79
    bitmap->handle = 0;
85
 
80
 
86
    __lock_acquire_recursive(__uxa_lock);
81
    __lock_acquire_recursive(__uxa_lock);
87
    list_for_each_entry(sf, &sf_list, entry)
82
    list_for_each_entry(sf, &sf_list, entry)
88
    {
83
    {
89
        if (sf->bo->handle == handle)
84
        if (sf->bo->handle == handle)
90
        {
85
        {
91
            bitmap->handle = (uint32_t)sf;
86
            bitmap->handle = (uint32_t)sf;
92
            break;
87
            break;
93
        }
88
        }
94
    }
89
    }
95
    __lock_release_recursive(__uxa_lock);
90
    __lock_release_recursive(__uxa_lock);
96
 
91
 
97
    if(bitmap->handle)
92
    if(bitmap->handle)
98
        return 0;
93
        return 0;
99
 
94
 
100
    sf = malloc(sizeof(*sf));
95
    sf = malloc(sizeof(*sf));
101
    if(sf == NULL)
96
    if(sf == NULL)
102
        goto err_1;
97
        goto err_1;
103
 
98
 
104
    size = bitmap->pitch * bitmap->height;
99
    size = bitmap->pitch * bitmap->height;
105
 
100
 
106
    bo = bo_create_from_gem_handle(intel->bufmgr, size, handle);
101
    bo = bo_create_from_gem_handle(intel->bufmgr, size, handle);
107
 
102
 
108
    sf->width   = bitmap->width;
103
    sf->width   = bitmap->width;
109
    sf->height  = bitmap->height;
104
    sf->height  = bitmap->height;
110
    sf->data    = NULL;
105
    sf->data    = NULL;
111
    sf->pitch   = bitmap->pitch;
106
    sf->pitch   = bitmap->pitch;
112
    sf->bo      = bo;
107
    sf->bo      = bo;
113
    sf->bo_size = size;
108
    sf->bo_size = size;
114
    sf->flags   = bitmap->flags;
109
    sf->flags   = bitmap->flags;
115
 
110
 
116
    bitmap->handle = (uint32_t)sf;
111
    bitmap->handle = (uint32_t)sf;
117
 
112
 
118
    return 0;
113
    return 0;
119
 
114
 
120
err_1:
115
err_1:
121
 
116
 
122
    return -1;
117
    return -1;
123
};
118
};
124
 
119
 
125
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
120
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
126
{
121
{
127
    sna_bitmap_from_handle(bitmap, handle);
122
    sna_bitmap_from_handle(bitmap, handle);
128
};
123
};
129
 
124
 
130
 
125
 
131
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
126
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
132
                  int w, int h, int src_x, int src_y)
127
                  int w, int h, int src_x, int src_y)
133
{
128
{
134
//    DBG("%s\n", __FUNCTION__);
129
//    DBG("%s\n", __FUNCTION__);
135
 
130
 
136
    struct _Pixmap pixSrc, pixMask;
131
    struct _Pixmap pixSrc, pixMask;
137
    struct intel_pixmap privSrc;
132
    struct intel_pixmap privSrc;
138
    struct _Picture pictSrc, pictDst;
133
    struct _Picture pictSrc, pictDst;
139
	struct intel_screen_private *intel = intel_get_screen_private();
134
	struct intel_screen_private *intel = intel_get_screen_private();
140
 
135
 
141
    surface_t *sf = to_surface(bitmap);
136
    surface_t *sf = to_surface(bitmap);
142
 
137
 
143
    int winx, winy;
138
    int winx, winy;
144
 
139
 
145
    char proc_info[1024];
140
    char proc_info[1024];
146
    get_proc_info(proc_info);
141
    get_proc_info(proc_info);
147
    winx = *(uint32_t*)(proc_info+34);
142
    winx = *(uint32_t*)(proc_info+34);
148
    winy = *(uint32_t*)(proc_info+38);
143
    winy = *(uint32_t*)(proc_info+38);
149
 
144
 
150
    memset(&pixSrc,  0, sizeof(pixSrc));
145
    memset(&pixSrc,  0, sizeof(pixSrc));
151
    memset(&pixMask, 0, sizeof(pixMask));
146
    memset(&pixMask, 0, sizeof(pixMask));
152
    memset(&privSrc, 0, sizeof(pixSrc));
147
    memset(&privSrc, 0, sizeof(pixSrc));
153
 
148
 
154
    memset(&pictSrc, 0, sizeof(pictSrc));
149
    memset(&pictSrc, 0, sizeof(pictSrc));
155
    memset(&pictDst, 0, sizeof(pictDst));
150
    memset(&pictDst, 0, sizeof(pictDst));
156
 
151
 
157
    pixSrc.drawable.bitsPerPixel = 32;
152
    pixSrc.drawable.bitsPerPixel = 32;
158
    pixSrc.drawable.width        = sf->width;
153
    pixSrc.drawable.width        = sf->width;
159
    pixSrc.drawable.height       = sf->height;
154
    pixSrc.drawable.height       = sf->height;
160
    pixSrc.devKind               = sf->pitch;
155
    pixSrc.devKind               = sf->pitch;
161
    pixSrc.private               = &privSrc;
156
    pixSrc.private               = &privSrc;
162
 
157
 
163
    list_init(&privSrc.batch);
158
    list_init(&privSrc.batch);
164
    privSrc.bo = sf->bo;
159
    privSrc.bo = sf->bo;
165
    privSrc.stride = sf->pitch;
160
    privSrc.stride = sf->pitch;
166
    privSrc.tiling = I915_TILING_X;
161
    privSrc.tiling = I915_TILING_X;
167
 
162
 
168
    pictSrc.format     = PICT_x8r8g8b8;
163
    pictSrc.format     = PICT_x8r8g8b8;
169
    pictSrc.filter     = PictFilterNearest;
164
    pictSrc.filter     = PictFilterNearest;
170
    pictSrc.repeatType = RepeatNone;
165
    pictSrc.repeatType = RepeatNone;
171
 
166
 
172
    pictDst.format     = PICT_a8r8g8b8;
167
    pictDst.format     = PICT_a8r8g8b8;
173
    pictDst.filter = PictFilterNearest;
168
    pictDst.filter = PictFilterNearest;
174
    pictDst.repeatType = RepeatNone;
169
    pictDst.repeatType = RepeatNone;
175
 
170
 
176
    uxa_update_fb(intel);
171
    uxa_update_fb(intel);
177
 
172
 
178
    i965_prepare_composite(PictOpSrc, &pictSrc, NULL, &pictDst,
173
    i965_prepare_composite(PictOpSrc, &pictSrc, NULL, &pictDst,
179
                           &pixSrc, NULL, &fb_pixmap);
174
                           &pixSrc, NULL, &fb_pixmap);
180
 
175
 
181
    i965_composite(&fb_pixmap, src_x, src_y, 0, 0,
176
    i965_composite(&fb_pixmap, src_x, src_y, 0, 0,
182
                    dst_x+winx, dst_y+winy, w, h);
177
                    dst_x+winx, dst_y+winy, w, h);
183
 
178
 
184
    i830_done_composite(&fb_pixmap);
179
    i830_done_composite(&fb_pixmap);
185
 
180
 
186
	intel_batch_submit();
181
	intel_batch_submit();
187
 
182
 
188
//    DBG("%s done\n", __FUNCTION__);
183
//    DBG("%s done\n", __FUNCTION__);
189
 
184
 
190
    return 0;
185
    return 0;
191
};
186
};
192
 
187
 
193
 
188
 
194
int uxa_init_fb(struct intel_screen_private *intel)
189
int uxa_init_fb(struct intel_screen_private *intel)
195
{
190
{
196
    struct drm_i915_fb_info fb;
191
    struct drm_i915_fb_info fb;
197
    static struct intel_pixmap ipix;
192
    static struct intel_pixmap ipix;
198
    int ret;
193
    int ret;
199
 
194
 
200
    memset(&fb, 0, sizeof(fb));
195
    memset(&fb, 0, sizeof(fb));
201
 
196
 
202
    ret = drmIoctl(intel->scrn, SRV_FBINFO, &fb);
197
    ret = drmIoctl(intel->scrn, SRV_FBINFO, &fb);
203
	if( ret != 0 )
198
	if( ret != 0 )
204
	    return ret;
199
	    return ret;
205
 
200
 
206
    intel->front_buffer = intel_bo_gem_create_from_name(intel->bufmgr,"frontbuffer", fb.name);
201
    intel->front_buffer = intel_bo_gem_create_from_name(intel->bufmgr,"frontbuffer", fb.name);
207
    if(intel->front_buffer == NULL)
202
    if(intel->front_buffer == NULL)
208
        return -1;
203
        return -1;
209
 
204
 
210
    ipix.bo = intel->front_buffer;
205
    ipix.bo = intel->front_buffer;
211
    list_init(&ipix.batch);
206
    list_init(&ipix.batch);
212
    ipix.stride = fb.pitch;
207
    ipix.stride = fb.pitch;
213
    ipix.tiling = fb.tiling;
208
    ipix.tiling = fb.tiling;
214
    ipix.pinned = PIN_SCANOUT;
209
    ipix.pinned = PIN_SCANOUT;
215
 
210
 
216
    printf("create frontbuffer name %d bo %x\n", fb.name, ipix.bo);
211
    printf("create frontbuffer name %d bo %x\n", fb.name, ipix.bo);
217
    printf("size %d, offset %d handle %d\n",ipix.bo->size, ipix.bo->offset, ipix.bo->handle);
212
    printf("size %d, offset %d handle %d\n",ipix.bo->size, ipix.bo->offset, ipix.bo->handle);
218
 
213
 
219
    fb_pixmap.drawable.bitsPerPixel = 32;
214
    fb_pixmap.drawable.bitsPerPixel = 32;
220
    fb_pixmap.drawable.width  = fb.width;
215
    fb_pixmap.drawable.width  = fb.width;
221
    fb_pixmap.drawable.height = fb.height;
216
    fb_pixmap.drawable.height = fb.height;
222
    fb_pixmap.devKind = fb.pitch;
217
    fb_pixmap.devKind = fb.pitch;
223
    fb_pixmap.private = &ipix;
218
    fb_pixmap.private = &ipix;
224
 
219
 
225
    return 0;
220
    return 0;
226
}
221
}
227
 
222
 
228
int uxa_update_fb(struct intel_screen_private *intel)
223
int uxa_update_fb(struct intel_screen_private *intel)
229
{
224
{
230
    struct drm_i915_fb_info fb;
225
    struct drm_i915_fb_info fb;
231
    struct intel_pixmap *ipix;
226
    struct intel_pixmap *ipix;
232
    size_t size;
227
    size_t size;
233
    int ret;
228
    int ret;
234
 
229
 
235
//    DBG("%s\n", __FUNCTION__);
230
//    DBG("%s\n", __FUNCTION__);
236
 
231
 
237
    ret = drmIoctl(intel->scrn, SRV_FBINFO, &fb);
232
    ret = drmIoctl(intel->scrn, SRV_FBINFO, &fb);
238
	if( ret != 0 )
233
	if( ret != 0 )
239
	    return ret;
234
	    return ret;
240
 
235
 
241
    ipix = (struct intel_pixmap*)fb_pixmap.private;
236
    ipix = (struct intel_pixmap*)fb_pixmap.private;
242
 
237
 
243
    list_init(&ipix->batch);
238
    list_init(&ipix->batch);
244
    ipix->stride = fb.pitch;
239
    ipix->stride = fb.pitch;
245
    ipix->tiling = fb.tiling;
240
    ipix->tiling = fb.tiling;
246
 
241
 
247
    fb_pixmap.drawable.width  = fb.width;
242
    fb_pixmap.drawable.width  = fb.width;
248
    fb_pixmap.drawable.height = fb.height;
243
    fb_pixmap.drawable.height = fb.height;
249
    fb_pixmap.devKind = fb.pitch;
244
    fb_pixmap.devKind = fb.pitch;
250
 
245
 
251
    return 0;
246
    return 0;
252
};
247
};
253
 
248
 
254
int uxa_init(uint32_t service)
249
int uxa_init(uint32_t service)
255
{
250
{
256
    static struct pci_device device;
251
    static struct pci_device device;
257
	struct intel_screen_private *intel = intel_get_screen_private();
252
	struct intel_screen_private *intel = intel_get_screen_private();
258
 
253
 
259
    ioctl_t   io;
254
    ioctl_t   io;
260
    int caps = 0;
255
    int caps = 0;
261
 
256
 
262
    DBG("%s\n", __FUNCTION__);
257
    DBG("%s\n", __FUNCTION__);
263
 
258
 
264
    __lock_acquire_recursive(__uxa_lock);
259
    __lock_acquire_recursive(__uxa_lock);
265
 
260
 
266
    if(intel)
261
    if(intel)
267
        goto done;
262
        goto done;
268
 
263
 
269
    io.handle   = service;
264
    io.handle   = service;
270
    io.io_code  = SRV_GET_PCI_INFO;
265
    io.io_code  = SRV_GET_PCI_INFO;
271
    io.input    = &device;
266
    io.input    = &device;
272
    io.inp_size = sizeof(device);
267
    io.inp_size = sizeof(device);
273
    io.output   = NULL;
268
    io.output   = NULL;
274
    io.out_size = 0;
269
    io.out_size = 0;
275
 
270
 
276
    if (call_service(&io)!=0)
271
    if (call_service(&io)!=0)
277
        goto err1;
272
        goto err1;
278
 
273
 
279
    intel = (intel_screen_private*)malloc(sizeof(*intel));
274
    intel = (intel_screen_private*)malloc(sizeof(*intel));
280
    if (intel == NULL)
275
    if (intel == NULL)
281
        goto err1;
276
        goto err1;
282
 
277
 
283
    list_init(&sf_list);
278
    list_init(&sf_list);
284
 
279
 
285
    driverPrivate = intel;
280
    driverPrivate = intel;
286
    memset(intel, 0, sizeof(*intel));
281
    memset(intel, 0, sizeof(*intel));
287
 
282
 
288
//    sna->cpu_features = sna_cpu_detect();
283
//    sna->cpu_features = sna_cpu_detect();
289
 
284
 
290
    intel->PciInfo = &device;
285
    intel->PciInfo = &device;
291
  	intel->info = intel_detect_chipset(intel->PciInfo);
286
  	intel->info = intel_detect_chipset(intel->PciInfo);
292
    intel->scrn = service;
287
    intel->scrn = service;
293
 
288
 
294
    intel->bufmgr = intel_bufmgr_gem_init(service, 8192);
289
    intel->bufmgr = intel_bufmgr_gem_init(service, 8192);
295
    if(intel->bufmgr == NULL)
290
    if(intel->bufmgr == NULL)
296
    {
291
    {
297
		printf("Memory manager initialization failed\n");
292
		printf("Memory manager initialization failed\n");
298
		goto err1;
293
		goto err1;
299
    };
294
    };
300
 
295
 
301
	list_init(&intel->batch_pixmaps);
296
	list_init(&intel->batch_pixmaps);
302
 
297
 
303
	if ((INTEL_INFO(intel)->gen == 060)) {
298
	if ((INTEL_INFO(intel)->gen == 060)) {
304
		intel->wa_scratch_bo =
299
		intel->wa_scratch_bo =
305
			drm_intel_bo_alloc(intel->bufmgr, "wa scratch",
300
			drm_intel_bo_alloc(intel->bufmgr, "wa scratch",
306
					   4096, 4096);
301
					   4096, 4096);
307
	}
302
	}
308
 
303
 
309
    if( uxa_init_fb(intel) != 0)
304
    if( uxa_init_fb(intel) != 0)
310
        goto err1;
305
        goto err1;
311
 
306
 
312
	intel_batch_init();
307
	intel_batch_init();
313
 
308
 
314
	if (INTEL_INFO(intel)->gen >= 040)
309
	if (INTEL_INFO(intel)->gen >= 040)
315
		gen4_render_state_init();
310
		gen4_render_state_init();
316
 
311
 
317
	if (!intel_uxa_init()) {
312
	if (!intel_uxa_init()) {
318
		printf("Hardware acceleration initialization failed\n");
313
		printf("Hardware acceleration initialization failed\n");
319
		goto err1;
314
		goto err1;
320
	}
315
	}
321
 
316
 
322
    tls_mask = tls_alloc();
317
    tls_mask = tls_alloc();
323
 
318
 
324
//    printf("tls mask %x\n", tls_mask);
319
//    printf("tls mask %x\n", tls_mask);
325
 
320
 
326
done:
321
done:
327
//    caps = sna_device->render.caps;
322
//    caps = sna_device->render.caps;
328
 
323
 
329
err1:
324
err1:
330
    __lock_release_recursive(__uxa_lock);
325
    __lock_release_recursive(__uxa_lock);
331
 
326
 
332
    LEAVE();
327
    LEAVE();
333
    return caps;
328
    return caps;
334
}
329
}
335
 
330
 
336
 
331
 
337
 
332
 
338
static void
333
static void
339
gen6_context_switch(intel_screen_private *intel,
334
gen6_context_switch(intel_screen_private *intel,
340
		    int new_mode)
335
		    int new_mode)
341
{
336
{
342
	intel_batch_submit(intel->scrn);
337
	intel_batch_submit(intel->scrn);
343
}
338
}
344
 
339
 
345
static void
340
static void
346
gen5_context_switch(intel_screen_private *intel,
341
gen5_context_switch(intel_screen_private *intel,
347
		    int new_mode)
342
		    int new_mode)
348
{
343
{
349
	/* Ironlake has a limitation that a 3D or Media command can't
344
	/* Ironlake has a limitation that a 3D or Media command can't
350
	 * be the first command after a BLT, unless it's
345
	 * be the first command after a BLT, unless it's
351
	 * non-pipelined.  Instead of trying to track it and emit a
346
	 * non-pipelined.  Instead of trying to track it and emit a
352
	 * command at the right time, we just emit a dummy
347
	 * command at the right time, we just emit a dummy
353
	 * non-pipelined 3D instruction after each blit.
348
	 * non-pipelined 3D instruction after each blit.
354
	 */
349
	 */
355
 
350
 
356
	if (new_mode == I915_EXEC_BLT) {
351
	if (new_mode == I915_EXEC_BLT) {
357
		OUT_BATCH(MI_FLUSH |
352
		OUT_BATCH(MI_FLUSH |
358
			  MI_STATE_INSTRUCTION_CACHE_FLUSH |
353
			  MI_STATE_INSTRUCTION_CACHE_FLUSH |
359
			  MI_INHIBIT_RENDER_CACHE_FLUSH);
354
			  MI_INHIBIT_RENDER_CACHE_FLUSH);
360
	} else {
355
	} else {
361
		OUT_BATCH(CMD_POLY_STIPPLE_OFFSET << 16);
356
		OUT_BATCH(CMD_POLY_STIPPLE_OFFSET << 16);
362
		OUT_BATCH(0);
357
		OUT_BATCH(0);
363
	}
358
	}
364
}
359
}
365
 
360
 
366
static void
361
static void
367
gen4_context_switch(intel_screen_private *intel,
362
gen4_context_switch(intel_screen_private *intel,
368
		    int new_mode)
363
		    int new_mode)
369
{
364
{
370
	if (new_mode == I915_EXEC_BLT) {
365
	if (new_mode == I915_EXEC_BLT) {
371
		OUT_BATCH(MI_FLUSH |
366
		OUT_BATCH(MI_FLUSH |
372
			  MI_STATE_INSTRUCTION_CACHE_FLUSH |
367
			  MI_STATE_INSTRUCTION_CACHE_FLUSH |
373
			  MI_INHIBIT_RENDER_CACHE_FLUSH);
368
			  MI_INHIBIT_RENDER_CACHE_FLUSH);
374
	}
369
	}
375
}
370
}
376
 
371
 
377
static void
372
static void
378
intel_limits_init(intel_screen_private *intel)
373
intel_limits_init(intel_screen_private *intel)
379
{
374
{
380
	/* Limits are described in the BLT engine chapter under Graphics Data Size
375
	/* Limits are described in the BLT engine chapter under Graphics Data Size
381
	 * Limitations, and the descriptions of SURFACE_STATE, 3DSTATE_BUFFER_INFO,
376
	 * Limitations, and the descriptions of SURFACE_STATE, 3DSTATE_BUFFER_INFO,
382
	 * 3DSTATE_DRAWING_RECTANGLE, 3DSTATE_MAP_INFO, and 3DSTATE_MAP_INFO.
377
	 * 3DSTATE_DRAWING_RECTANGLE, 3DSTATE_MAP_INFO, and 3DSTATE_MAP_INFO.
383
	 *
378
	 *
384
	 * i845 through i965 limits 2D rendering to 65536 lines and pitch of 32768.
379
	 * i845 through i965 limits 2D rendering to 65536 lines and pitch of 32768.
385
	 *
380
	 *
386
	 * i965 limits 3D surface to (2*element size)-aligned offset if un-tiled.
381
	 * i965 limits 3D surface to (2*element size)-aligned offset if un-tiled.
387
	 * i965 limits 3D surface to 4kB-aligned offset if tiled.
382
	 * i965 limits 3D surface to 4kB-aligned offset if tiled.
388
	 * i965 limits 3D surfaces to w,h of ?,8192.
383
	 * i965 limits 3D surfaces to w,h of ?,8192.
389
	 * i965 limits 3D surface to pitch of 1B - 128kB.
384
	 * i965 limits 3D surface to pitch of 1B - 128kB.
390
	 * i965 limits 3D surface pitch alignment to 1 or 2 times the element size.
385
	 * i965 limits 3D surface pitch alignment to 1 or 2 times the element size.
391
	 * i965 limits 3D surface pitch alignment to 512B if tiled.
386
	 * i965 limits 3D surface pitch alignment to 512B if tiled.
392
	 * i965 limits 3D destination drawing rect to w,h of 8192,8192.
387
	 * i965 limits 3D destination drawing rect to w,h of 8192,8192.
393
	 *
388
	 *
394
	 * i915 limits 3D textures to 4B-aligned offset if un-tiled.
389
	 * i915 limits 3D textures to 4B-aligned offset if un-tiled.
395
	 * i915 limits 3D textures to ~4kB-aligned offset if tiled.
390
	 * i915 limits 3D textures to ~4kB-aligned offset if tiled.
396
	 * i915 limits 3D textures to width,height of 2048,2048.
391
	 * i915 limits 3D textures to width,height of 2048,2048.
397
	 * i915 limits 3D textures to pitch of 16B - 8kB, in dwords.
392
	 * i915 limits 3D textures to pitch of 16B - 8kB, in dwords.
398
	 * i915 limits 3D destination to ~4kB-aligned offset if tiled.
393
	 * i915 limits 3D destination to ~4kB-aligned offset if tiled.
399
	 * i915 limits 3D destination to pitch of 16B - 8kB, in dwords, if un-tiled.
394
	 * i915 limits 3D destination to pitch of 16B - 8kB, in dwords, if un-tiled.
400
	 * i915 limits 3D destination to pitch 64B-aligned if used with depth.
395
	 * i915 limits 3D destination to pitch 64B-aligned if used with depth.
401
	 * i915 limits 3D destination to pitch of 512B - 8kB, in tiles, if tiled.
396
	 * i915 limits 3D destination to pitch of 512B - 8kB, in tiles, if tiled.
402
	 * i915 limits 3D destination to POT aligned pitch if tiled.
397
	 * i915 limits 3D destination to POT aligned pitch if tiled.
403
	 * i915 limits 3D destination drawing rect to w,h of 2048,2048.
398
	 * i915 limits 3D destination drawing rect to w,h of 2048,2048.
404
	 *
399
	 *
405
	 * i845 limits 3D textures to 4B-aligned offset if un-tiled.
400
	 * i845 limits 3D textures to 4B-aligned offset if un-tiled.
406
	 * i845 limits 3D textures to ~4kB-aligned offset if tiled.
401
	 * i845 limits 3D textures to ~4kB-aligned offset if tiled.
407
	 * i845 limits 3D textures to width,height of 2048,2048.
402
	 * i845 limits 3D textures to width,height of 2048,2048.
408
	 * i845 limits 3D textures to pitch of 4B - 8kB, in dwords.
403
	 * i845 limits 3D textures to pitch of 4B - 8kB, in dwords.
409
	 * i845 limits 3D destination to 4B-aligned offset if un-tiled.
404
	 * i845 limits 3D destination to 4B-aligned offset if un-tiled.
410
	 * i845 limits 3D destination to ~4kB-aligned offset if tiled.
405
	 * i845 limits 3D destination to ~4kB-aligned offset if tiled.
411
	 * i845 limits 3D destination to pitch of 8B - 8kB, in dwords.
406
	 * i845 limits 3D destination to pitch of 8B - 8kB, in dwords.
412
	 * i845 limits 3D destination drawing rect to w,h of 2048,2048.
407
	 * i845 limits 3D destination drawing rect to w,h of 2048,2048.
413
	 *
408
	 *
414
	 * For the tiled issues, the only tiled buffer we draw to should be
409
	 * For the tiled issues, the only tiled buffer we draw to should be
415
	 * the front, which will have an appropriate pitch/offset already set up,
410
	 * the front, which will have an appropriate pitch/offset already set up,
416
	 * so UXA doesn't need to worry.
411
	 * so UXA doesn't need to worry.
417
	 */
412
	 */
418
	if (INTEL_INFO(intel)->gen >= 040) {
413
	if (INTEL_INFO(intel)->gen >= 040) {
419
		intel->accel_pixmap_offset_alignment = 4 * 2;
414
		intel->accel_pixmap_offset_alignment = 4 * 2;
420
		intel->accel_max_x = 8192;
415
		intel->accel_max_x = 8192;
421
		intel->accel_max_y = 8192;
416
		intel->accel_max_y = 8192;
422
	} else {
417
	} else {
423
		intel->accel_pixmap_offset_alignment = 4;
418
		intel->accel_pixmap_offset_alignment = 4;
424
		intel->accel_max_x = 2048;
419
		intel->accel_max_x = 2048;
425
		intel->accel_max_y = 2048;
420
		intel->accel_max_y = 2048;
426
	}
421
	}
427
}
422
}
428
 
423
 
429
 
424
 
430
Bool intel_uxa_init()
425
Bool intel_uxa_init()
431
{
426
{
432
	intel_screen_private *intel = intel_get_screen_private();
427
	intel_screen_private *intel = intel_get_screen_private();
433
 
428
 
434
	intel_limits_init(intel);
429
	intel_limits_init(intel);
435
 
430
 
436
	intel->prim_offset = 0;
431
	intel->prim_offset = 0;
437
	intel->vertex_count = 0;
432
	intel->vertex_count = 0;
438
	intel->vertex_offset = 0;
433
	intel->vertex_offset = 0;
439
	intel->vertex_used = 0;
434
	intel->vertex_used = 0;
440
	intel->floats_per_vertex = 0;
435
	intel->floats_per_vertex = 0;
441
	intel->last_floats_per_vertex = 0;
436
	intel->last_floats_per_vertex = 0;
442
	intel->vertex_bo = NULL;
437
	intel->vertex_bo = NULL;
443
	intel->surface_used = 0;
438
	intel->surface_used = 0;
444
	intel->surface_reloc = 0;
439
	intel->surface_reloc = 0;
445
 
440
 
446
/*
441
/*
447
	intel->uxa_driver->check_composite = i965_check_composite;
442
	intel->uxa_driver->check_composite = i965_check_composite;
448
	intel->uxa_driver->check_composite_texture = i965_check_composite_texture;
443
	intel->uxa_driver->check_composite_texture = i965_check_composite_texture;
449
	intel->uxa_driver->prepare_composite = i965_prepare_composite;
444
	intel->uxa_driver->prepare_composite = i965_prepare_composite;
450
	intel->uxa_driver->composite = i965_composite;
445
	intel->uxa_driver->composite = i965_composite;
451
	intel->uxa_driver->done_composite = i830_done_composite;
446
	intel->uxa_driver->done_composite = i830_done_composite;
452
*/
447
*/
453
	intel->vertex_flush = i965_vertex_flush;
448
	intel->vertex_flush = i965_vertex_flush;
454
	intel->batch_flush = i965_batch_flush;
449
	intel->batch_flush = i965_batch_flush;
455
	intel->batch_commit_notify = i965_batch_commit_notify;
450
	intel->batch_commit_notify = i965_batch_commit_notify;
456
 
451
 
457
	if (IS_GEN4(intel)) {
452
	if (IS_GEN4(intel)) {
458
		intel->context_switch = gen4_context_switch;
453
		intel->context_switch = gen4_context_switch;
459
	} else if (IS_GEN5(intel)) {
454
	} else if (IS_GEN5(intel)) {
460
		intel->context_switch = gen5_context_switch;
455
		intel->context_switch = gen5_context_switch;
461
	} else {
456
	} else {
462
		intel->context_switch = gen6_context_switch;
457
		intel->context_switch = gen6_context_switch;
463
	}
458
	}
464
 
459
 
465
	return TRUE;
460
	return TRUE;
466
}
461
}
467
 
462
 
468
 
463
 
469
static const struct intel_device_info intel_generic_info = {
464
static const struct intel_device_info intel_generic_info = {
470
	.gen = -1,
465
	.gen = -1,
471
};
466
};
472
 
467
 
473
static const struct intel_device_info intel_i915_info = {
468
static const struct intel_device_info intel_i915_info = {
474
	.gen = 030,
469
	.gen = 030,
475
};
470
};
476
static const struct intel_device_info intel_i945_info = {
471
static const struct intel_device_info intel_i945_info = {
477
	.gen = 031,
472
	.gen = 031,
478
};
473
};
479
 
474
 
480
static const struct intel_device_info intel_g33_info = {
475
static const struct intel_device_info intel_g33_info = {
481
	.gen = 033,
476
	.gen = 033,
482
};
477
};
483
 
478
 
484
static const struct intel_device_info intel_i965_info = {
479
static const struct intel_device_info intel_i965_info = {
485
	.gen = 040,
480
	.gen = 040,
486
};
481
};
487
 
482
 
488
static const struct intel_device_info intel_g4x_info = {
483
static const struct intel_device_info intel_g4x_info = {
489
	.gen = 045,
484
	.gen = 045,
490
};
485
};
491
 
486
 
492
static const struct intel_device_info intel_ironlake_info = {
487
static const struct intel_device_info intel_ironlake_info = {
493
	.gen = 050,
488
	.gen = 050,
494
};
489
};
495
 
490
 
496
static const struct intel_device_info intel_sandybridge_info = {
491
static const struct intel_device_info intel_sandybridge_info = {
497
	.gen = 060,
492
	.gen = 060,
498
};
493
};
499
 
494
 
500
static const struct intel_device_info intel_ivybridge_info = {
495
static const struct intel_device_info intel_ivybridge_info = {
501
	.gen = 070,
496
	.gen = 070,
502
};
497
};
503
 
498
 
504
static const struct intel_device_info intel_valleyview_info = {
499
static const struct intel_device_info intel_valleyview_info = {
505
	.gen = 071,
500
	.gen = 071,
506
};
501
};
507
 
502
 
508
static const struct intel_device_info intel_haswell_info = {
503
static const struct intel_device_info intel_haswell_info = {
509
	.gen = 075,
504
	.gen = 075,
510
};
505
};
511
 
506
 
512
#define INTEL_DEVICE_MATCH(d,i) \
507
#define INTEL_DEVICE_MATCH(d,i) \
513
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
508
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
514
 
509
 
515
 
510
 
516
static const struct pci_id_match intel_device_match[] = {
511
static const struct pci_id_match intel_device_match[] = {
517
 
512
 
518
	INTEL_I915G_IDS(&intel_i915_info),
513
	INTEL_I915G_IDS(&intel_i915_info),
519
	INTEL_I915GM_IDS(&intel_i915_info),
514
	INTEL_I915GM_IDS(&intel_i915_info),
520
	INTEL_I945G_IDS(&intel_i945_info),
515
	INTEL_I945G_IDS(&intel_i945_info),
521
	INTEL_I945GM_IDS(&intel_i945_info),
516
	INTEL_I945GM_IDS(&intel_i945_info),
522
 
517
 
523
	INTEL_G33_IDS(&intel_g33_info),
518
	INTEL_G33_IDS(&intel_g33_info),
524
	INTEL_PINEVIEW_IDS(&intel_g33_info),
519
	INTEL_PINEVIEW_IDS(&intel_g33_info),
525
 
520
 
526
	INTEL_I965G_IDS(&intel_i965_info),
521
	INTEL_I965G_IDS(&intel_i965_info),
527
	INTEL_I965GM_IDS(&intel_i965_info),
522
	INTEL_I965GM_IDS(&intel_i965_info),
528
 
523
 
529
	INTEL_G45_IDS(&intel_g4x_info),
524
	INTEL_G45_IDS(&intel_g4x_info),
530
	INTEL_GM45_IDS(&intel_g4x_info),
525
	INTEL_GM45_IDS(&intel_g4x_info),
531
 
526
 
532
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
527
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
533
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
528
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
534
 
529
 
535
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
530
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
536
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
531
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
537
 
532
 
538
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
533
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
539
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
534
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
540
 
535
 
541
	INTEL_HSW_D_IDS(&intel_haswell_info),
536
	INTEL_HSW_D_IDS(&intel_haswell_info),
542
	INTEL_HSW_M_IDS(&intel_haswell_info),
537
	INTEL_HSW_M_IDS(&intel_haswell_info),
543
 
538
 
544
	INTEL_VLV_D_IDS(&intel_valleyview_info),
539
	INTEL_VLV_D_IDS(&intel_valleyview_info),
545
	INTEL_VLV_M_IDS(&intel_valleyview_info),
540
	INTEL_VLV_M_IDS(&intel_valleyview_info),
546
 
541
 
547
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
542
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
548
 
543
 
549
	{ 0, 0, 0 },
544
	{ 0, 0, 0 },
550
};
545
};
551
 
546
 
552
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
547
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
553
{
548
{
554
    while(list->device_id)
549
    while(list->device_id)
555
    {
550
    {
556
        if(dev==list->device_id)
551
        if(dev==list->device_id)
557
            return list;
552
            return list;
558
        list++;
553
        list++;
559
    }
554
    }
560
    return NULL;
555
    return NULL;
561
}
556
}
562
 
557
 
563
 
558
 
564
const struct intel_device_info *
559
const struct intel_device_info *
565
intel_detect_chipset(struct pci_device *pci)
560
intel_detect_chipset(struct pci_device *pci)
566
{
561
{
567
    const struct pci_id_match *ent = NULL;
562
    const struct pci_id_match *ent = NULL;
568
 
563
 
569
    ent = PciDevMatch(pci->device_id, intel_device_match);
564
    ent = PciDevMatch(pci->device_id, intel_device_match);
570
 
565
 
571
    if(ent != NULL)
566
    if(ent != NULL)
572
        return (const struct intel_device_info*)ent->match_data;
567
        return (const struct intel_device_info*)ent->match_data;
573
    else
568
    else
574
        return &intel_generic_info;
569
        return &intel_generic_info;
575
}
570
}