Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4371 → Rev 4372

/contrib/sdk/sources/Intel-2D/sna/sna.c
56,6 → 56,20
int b;
} rect_t;
 
struct pix_driver
{
char *name;
 
int (*create_bitmap)(bitmap_t * bitmap);
int (*destroy_bitmap)(bitmap_t * bitmap);
int (*lock_bitmap)(bitmap_t * bitmap);
int (*blit)(bitmap_t * bitmap, bool scale, int dst_x, int dst_y,
int w, int h, int src_x, int src_y);
int (*resize_bitmap)(bitmap_t * bitmap);
void (*fini)(void);
};
 
 
static struct sna_fb sna_fb;
static int tls_mask;
 
210,93 → 224,7
return kgem_init_fb(&sna->kgem, &sna_fb);
}
 
int sna_init(uint32_t service)
{
ioctl_t io;
int caps = 0;
 
static struct pci_device device;
struct sna *sna;
 
DBG(("%s\n", __FUNCTION__));
 
__lock_acquire_recursive(__sna_lock);
 
if(sna_device)
goto done;
 
io.handle = service;
io.io_code = SRV_GET_PCI_INFO;
io.input = &device;
io.inp_size = sizeof(device);
io.output = NULL;
io.out_size = 0;
 
if (call_service(&io)!=0)
goto err1;
 
sna = malloc(sizeof(*sna));
if (sna == NULL)
goto err1;
 
memset(sna, 0, sizeof(*sna));
 
sna->cpu_features = sna_cpu_detect();
 
sna->PciInfo = &device;
sna->info = intel_detect_chipset(sna->PciInfo);
sna->scrn = service;
 
kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
 
 
/* Disable tiling by default */
sna->tiling = 0;
 
/* Default fail-safe value of 75 Hz */
// sna->vblank_interval = 1000 * 1000 * 1000 / 75;
 
sna->flags = 0;
 
sna_accel_init(sna);
 
tls_mask = tls_alloc();
 
// printf("tls mask %x\n", tls_mask);
 
done:
caps = sna_device->render.caps;
 
err1:
__lock_release_recursive(__sna_lock);
 
return caps;
}
 
void sna_fini()
{
ENTER();
 
if( sna_device )
{
struct kgem_bo *mask;
 
__lock_acquire_recursive(__sna_lock);
 
mask = tls_get(tls_mask);
 
sna_device->render.fini(sna_device);
if(mask)
kgem_bo_destroy(&sna_device->kgem, mask);
// kgem_close_batches(&sna_device->kgem);
kgem_cleanup_cache(&sna_device->kgem);
 
sna_device = NULL;
__lock_release_recursive(__sna_lock);
};
LEAVE();
}
 
#if 0
 
static bool sna_solid_cache_init(struct sna *sna)
489,8 → 417,268
 
 
 
int sna_create_bitmap(bitmap_t *bitmap)
 
#define MI_LOAD_REGISTER_IMM (0x22<<23)
#define MI_WAIT_FOR_EVENT (0x03<<23)
 
static bool sna_emit_wait_for_scanline_gen6(struct sna *sna,
rect_t *crtc,
int pipe, int y1, int y2,
bool full_height)
{
uint32_t *b;
uint32_t event;
 
// if (!sna->kgem.has_secure_batches)
// return false;
 
assert(y1 >= 0);
assert(y2 > y1);
assert(sna->kgem.mode == KGEM_RENDER);
 
/* Always program one less than the desired value */
if (--y1 < 0)
y1 = crtc->b;
y2--;
 
/* The scanline granularity is 3 bits */
y1 &= ~7;
y2 &= ~7;
if (y2 == y1)
return false;
 
event = 1 << (3*full_height + pipe*8);
 
b = kgem_get_batch(&sna->kgem);
sna->kgem.nbatch += 10;
 
b[0] = MI_LOAD_REGISTER_IMM | 1;
b[1] = 0x44050; /* DERRMR */
b[2] = ~event;
b[3] = MI_LOAD_REGISTER_IMM | 1;
b[4] = 0x4f100; /* magic */
b[5] = (1 << 31) | (1 << 30) | pipe << 29 | (y1 << 16) | y2;
b[6] = MI_WAIT_FOR_EVENT | event;
b[7] = MI_LOAD_REGISTER_IMM | 1;
b[8] = 0x44050; /* DERRMR */
b[9] = ~0;
 
sna->kgem.batch_flags |= I915_EXEC_SECURE;
 
return true;
}
 
bool
sna_wait_for_scanline(struct sna *sna,
rect_t *crtc,
rect_t *clip)
{
bool full_height;
int y1, y2, pipe;
bool ret;
 
// if (sna->flags & SNA_NO_VSYNC)
// return false;
 
/*
* Make sure we don't wait for a scanline that will
* never occur
*/
y1 = clip->t - crtc->t;
if (y1 < 0)
y1 = 0;
y2 = clip->b - crtc->t;
if (y2 > crtc->b - crtc->t)
y2 = crtc->b - crtc->t;
// DBG(("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2));
// printf("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2);
 
if (y2 <= y1 + 4)
return false;
 
full_height = y1 == 0 && y2 == crtc->b - crtc->t;
 
pipe = 0;
DBG(("%s: pipe=%d, y1=%d, y2=%d, full_height?=%d\n",
__FUNCTION__, pipe, y1, y2, full_height));
 
if (sna->kgem.gen >= 0100)
ret = false;
// else if (sna->kgem.gen >= 075)
// ret = sna_emit_wait_for_scanline_hsw(sna, crtc, pipe, y1, y2, full_height);
// else if (sna->kgem.gen >= 070)
// ret = sna_emit_wait_for_scanline_ivb(sna, crtc, pipe, y1, y2, full_height);
else if (sna->kgem.gen >= 060)
ret =sna_emit_wait_for_scanline_gen6(sna, crtc, pipe, y1, y2, full_height);
// else if (sna->kgem.gen >= 040)
// ret = sna_emit_wait_for_scanline_gen4(sna, crtc, pipe, y1, y2, full_height);
 
return ret;
}
 
 
 
 
 
 
 
 
 
static const struct intel_device_info intel_generic_info = {
.gen = -1,
};
 
static const struct intel_device_info intel_i915_info = {
.gen = 030,
};
static const struct intel_device_info intel_i945_info = {
.gen = 031,
};
 
static const struct intel_device_info intel_g33_info = {
.gen = 033,
};
 
static const struct intel_device_info intel_i965_info = {
.gen = 040,
};
 
static const struct intel_device_info intel_g4x_info = {
.gen = 045,
};
 
static const struct intel_device_info intel_ironlake_info = {
.gen = 050,
};
 
static const struct intel_device_info intel_sandybridge_info = {
.gen = 060,
};
 
static const struct intel_device_info intel_ivybridge_info = {
.gen = 070,
};
 
static const struct intel_device_info intel_valleyview_info = {
.gen = 071,
};
 
static const struct intel_device_info intel_haswell_info = {
.gen = 075,
};
 
#define INTEL_DEVICE_MATCH(d,i) \
{ 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
 
 
static const struct pci_id_match intel_device_match[] = {
 
INTEL_I915G_IDS(&intel_i915_info),
INTEL_I915GM_IDS(&intel_i915_info),
INTEL_I945G_IDS(&intel_i945_info),
INTEL_I945GM_IDS(&intel_i945_info),
 
INTEL_G33_IDS(&intel_g33_info),
INTEL_PINEVIEW_IDS(&intel_g33_info),
 
INTEL_I965G_IDS(&intel_i965_info),
INTEL_I965GM_IDS(&intel_i965_info),
 
INTEL_G45_IDS(&intel_g4x_info),
INTEL_GM45_IDS(&intel_g4x_info),
 
INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
 
INTEL_SNB_D_IDS(&intel_sandybridge_info),
INTEL_SNB_M_IDS(&intel_sandybridge_info),
 
INTEL_IVB_D_IDS(&intel_ivybridge_info),
INTEL_IVB_M_IDS(&intel_ivybridge_info),
 
INTEL_HSW_D_IDS(&intel_haswell_info),
INTEL_HSW_M_IDS(&intel_haswell_info),
 
INTEL_VLV_D_IDS(&intel_valleyview_info),
INTEL_VLV_M_IDS(&intel_valleyview_info),
 
INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
 
{ 0, 0, 0 },
};
 
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
{
while(list->device_id)
{
if(dev==list->device_id)
return list;
list++;
}
return NULL;
}
 
const struct intel_device_info *
intel_detect_chipset(struct pci_device *pci)
{
const struct pci_id_match *ent = NULL;
 
ent = PciDevMatch(pci->device_id, intel_device_match);
 
if(ent != NULL)
return (const struct intel_device_info*)ent->match_data;
else
return &intel_generic_info;
}
 
int intel_get_device_id(int fd)
{
struct drm_i915_getparam gp;
int devid = 0;
 
memset(&gp, 0, sizeof(gp));
gp.param = I915_PARAM_CHIPSET_ID;
gp.value = &devid;
 
if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
return 0;
 
return devid;
}
 
int drmIoctl(int fd, unsigned long request, void *arg)
{
ioctl_t io;
 
io.handle = fd;
io.io_code = request;
io.input = arg;
io.inp_size = 64;
io.output = NULL;
io.out_size = 0;
 
return call_service(&io);
}
 
 
 
bool
gen6_composite(struct sna *sna,
uint8_t op,
PixmapPtr src, struct kgem_bo *src_bo,
PixmapPtr mask,struct kgem_bo *mask_bo,
PixmapPtr dst, struct kgem_bo *dst_bo,
int32_t src_x, int32_t src_y,
int32_t msk_x, int32_t msk_y,
int32_t dst_x, int32_t dst_y,
int32_t width, int32_t height,
struct sna_composite_op *tmp);
 
//#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
 
 
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
{
surface_t *sf;
struct kgem_bo *bo;
 
500,19 → 688,13
 
__lock_acquire_recursive(__sna_lock);
 
bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
32,I915_TILING_NONE, CREATE_CPU_MAP);
bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
 
if(bo == NULL)
goto err_2;
__lock_release_recursive(__sna_lock);
 
void *map = kgem_bo_map(&sna_device->kgem, bo);
if(map == NULL)
goto err_3;
 
sf->width = bitmap->width;
sf->height = bitmap->height;
sf->data = map;
sf->data = NULL;
sf->pitch = bo->pitch;
sf->bo = bo;
sf->bo_size = PAGE_SIZE * bo->size.pages.count;
519,12 → 701,9
sf->flags = bitmap->flags;
 
bitmap->handle = (uint32_t)sf;
__lock_release_recursive(__sna_lock);
 
return 0;
 
err_3:
kgem_bo_destroy(&sna_device->kgem, bo);
err_2:
__lock_release_recursive(__sna_lock);
free(sf);
532,8 → 711,15
return -1;
};
 
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
{
surface_t *sf = to_surface(bitmap);
struct kgem_bo *bo = sf->bo;
bo->handle = handle;
}
 
static int sna_create_bitmap(bitmap_t *bitmap)
{
surface_t *sf;
struct kgem_bo *bo;
 
543,13 → 729,19
 
__lock_acquire_recursive(__sna_lock);
 
bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
32,I915_TILING_NONE, CREATE_CPU_MAP);
 
__lock_release_recursive(__sna_lock);
if(bo == NULL)
goto err_2;
 
void *map = kgem_bo_map(&sna_device->kgem, bo);
if(map == NULL)
goto err_3;
 
sf->width = bitmap->width;
sf->height = bitmap->height;
sf->data = NULL;
sf->data = map;
sf->pitch = bo->pitch;
sf->bo = bo;
sf->bo_size = PAGE_SIZE * bo->size.pages.count;
556,9 → 748,12
sf->flags = bitmap->flags;
 
bitmap->handle = (uint32_t)sf;
__lock_release_recursive(__sna_lock);
 
return 0;
 
err_3:
kgem_bo_destroy(&sna_device->kgem, bo);
err_2:
__lock_release_recursive(__sna_lock);
free(sf);
566,17 → 761,10
return -1;
};
 
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
static int sna_destroy_bitmap(bitmap_t *bitmap)
{
surface_t *sf = to_surface(bitmap);
struct kgem_bo *bo = sf->bo;
bo->handle = handle;
}
 
int sna_destroy_bitmap(bitmap_t *bitmap)
{
surface_t *sf = to_surface(bitmap);
 
__lock_acquire_recursive(__sna_lock);
 
kgem_bo_destroy(&sna_device->kgem, sf->bo);
592,7 → 780,7
return 0;
};
 
int sna_lock_bitmap(bitmap_t *bitmap)
static int sna_lock_bitmap(bitmap_t *bitmap)
{
surface_t *sf = to_surface(bitmap);
 
609,7 → 797,7
return 0;
};
 
int sna_resize_bitmap(bitmap_t *bitmap)
static int sna_resize_bitmap(bitmap_t *bitmap)
{
surface_t *sf = to_surface(bitmap);
struct kgem *kgem = &sna_device->kgem;
707,120 → 895,8
return -1;
};
 
#define MI_LOAD_REGISTER_IMM (0x22<<23)
#define MI_WAIT_FOR_EVENT (0x03<<23)
 
static bool sna_emit_wait_for_scanline_gen6(struct sna *sna,
rect_t *crtc,
int pipe, int y1, int y2,
bool full_height)
{
uint32_t *b;
uint32_t event;
 
// if (!sna->kgem.has_secure_batches)
// return false;
 
assert(y1 >= 0);
assert(y2 > y1);
assert(sna->kgem.mode == KGEM_RENDER);
 
/* Always program one less than the desired value */
if (--y1 < 0)
y1 = crtc->b;
y2--;
 
/* The scanline granularity is 3 bits */
y1 &= ~7;
y2 &= ~7;
if (y2 == y1)
return false;
 
event = 1 << (3*full_height + pipe*8);
 
b = kgem_get_batch(&sna->kgem);
sna->kgem.nbatch += 10;
 
b[0] = MI_LOAD_REGISTER_IMM | 1;
b[1] = 0x44050; /* DERRMR */
b[2] = ~event;
b[3] = MI_LOAD_REGISTER_IMM | 1;
b[4] = 0x4f100; /* magic */
b[5] = (1 << 31) | (1 << 30) | pipe << 29 | (y1 << 16) | y2;
b[6] = MI_WAIT_FOR_EVENT | event;
b[7] = MI_LOAD_REGISTER_IMM | 1;
b[8] = 0x44050; /* DERRMR */
b[9] = ~0;
 
sna->kgem.batch_flags |= I915_EXEC_SECURE;
 
return true;
}
 
bool
sna_wait_for_scanline(struct sna *sna,
rect_t *crtc,
rect_t *clip)
{
bool full_height;
int y1, y2, pipe;
bool ret;
 
// if (sna->flags & SNA_NO_VSYNC)
// return false;
 
/*
* Make sure we don't wait for a scanline that will
* never occur
*/
y1 = clip->t - crtc->t;
if (y1 < 0)
y1 = 0;
y2 = clip->b - crtc->t;
if (y2 > crtc->b - crtc->t)
y2 = crtc->b - crtc->t;
// DBG(("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2));
// printf("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2);
 
if (y2 <= y1 + 4)
return false;
 
full_height = y1 == 0 && y2 == crtc->b - crtc->t;
 
pipe = 0;
DBG(("%s: pipe=%d, y1=%d, y2=%d, full_height?=%d\n",
__FUNCTION__, pipe, y1, y2, full_height));
 
if (sna->kgem.gen >= 0100)
ret = false;
// else if (sna->kgem.gen >= 075)
// ret = sna_emit_wait_for_scanline_hsw(sna, crtc, pipe, y1, y2, full_height);
// else if (sna->kgem.gen >= 070)
// ret = sna_emit_wait_for_scanline_ivb(sna, crtc, pipe, y1, y2, full_height);
else if (sna->kgem.gen >= 060)
ret =sna_emit_wait_for_scanline_gen6(sna, crtc, pipe, y1, y2, full_height);
// else if (sna->kgem.gen >= 040)
// ret = sna_emit_wait_for_scanline_gen4(sna, crtc, pipe, y1, y2, full_height);
 
return ret;
}
 
 
bool
gen6_composite(struct sna *sna,
uint8_t op,
PixmapPtr src, struct kgem_bo *src_bo,
PixmapPtr mask,struct kgem_bo *mask_bo,
PixmapPtr dst, struct kgem_bo *dst_bo,
int32_t src_x, int32_t src_y,
int32_t msk_x, int32_t msk_y,
int32_t dst_x, int32_t dst_y,
int32_t width, int32_t height,
struct sna_composite_op *tmp);
 
 
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
 
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
int w, int h, int src_x, int src_y)
 
948,146 → 1024,97
}
 
 
static void sna_fini()
{
ENTER();
 
if( sna_device )
{
struct kgem_bo *mask;
 
__lock_acquire_recursive(__sna_lock);
 
mask = tls_get(tls_mask);
 
sna_device->render.fini(sna_device);
if(mask)
kgem_bo_destroy(&sna_device->kgem, mask);
// kgem_close_batches(&sna_device->kgem);
kgem_cleanup_cache(&sna_device->kgem);
 
static const struct intel_device_info intel_generic_info = {
.gen = -1,
sna_device = NULL;
__lock_release_recursive(__sna_lock);
};
LEAVE();
}
 
static const struct intel_device_info intel_i915_info = {
.gen = 030,
};
static const struct intel_device_info intel_i945_info = {
.gen = 031,
};
uint32_t DrvInit(uint32_t service, struct pix_driver *driver)
{
ioctl_t io;
int caps = 0;
 
static const struct intel_device_info intel_g33_info = {
.gen = 033,
};
static struct pci_device device;
struct sna *sna;
 
static const struct intel_device_info intel_i965_info = {
.gen = 040,
};
DBG(("%s\n", __FUNCTION__));
 
static const struct intel_device_info intel_g4x_info = {
.gen = 045,
};
__lock_acquire_recursive(__sna_lock);
 
static const struct intel_device_info intel_ironlake_info = {
.gen = 050,
};
if(sna_device)
goto done;
 
static const struct intel_device_info intel_sandybridge_info = {
.gen = 060,
};
io.handle = service;
io.io_code = SRV_GET_PCI_INFO;
io.input = &device;
io.inp_size = sizeof(device);
io.output = NULL;
io.out_size = 0;
 
static const struct intel_device_info intel_ivybridge_info = {
.gen = 070,
};
if (call_service(&io)!=0)
goto err1;
 
static const struct intel_device_info intel_valleyview_info = {
.gen = 071,
};
sna = malloc(sizeof(*sna));
if (sna == NULL)
goto err1;
 
static const struct intel_device_info intel_haswell_info = {
.gen = 075,
};
memset(sna, 0, sizeof(*sna));
 
#define INTEL_DEVICE_MATCH(d,i) \
{ 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
sna->cpu_features = sna_cpu_detect();
 
sna->PciInfo = &device;
sna->info = intel_detect_chipset(sna->PciInfo);
sna->scrn = service;
 
static const struct pci_id_match intel_device_match[] = {
kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
 
INTEL_I915G_IDS(&intel_i915_info),
INTEL_I915GM_IDS(&intel_i915_info),
INTEL_I945G_IDS(&intel_i945_info),
INTEL_I945GM_IDS(&intel_i945_info),
/* Disable tiling by default */
sna->tiling = 0;
 
INTEL_G33_IDS(&intel_g33_info),
INTEL_PINEVIEW_IDS(&intel_g33_info),
/* Default fail-safe value of 75 Hz */
// sna->vblank_interval = 1000 * 1000 * 1000 / 75;
 
INTEL_I965G_IDS(&intel_i965_info),
INTEL_I965GM_IDS(&intel_i965_info),
sna->flags = 0;
 
INTEL_G45_IDS(&intel_g4x_info),
INTEL_GM45_IDS(&intel_g4x_info),
sna_accel_init(sna);
 
INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
tls_mask = tls_alloc();
 
INTEL_SNB_D_IDS(&intel_sandybridge_info),
INTEL_SNB_M_IDS(&intel_sandybridge_info),
// printf("tls mask %x\n", tls_mask);
 
INTEL_IVB_D_IDS(&intel_ivybridge_info),
INTEL_IVB_M_IDS(&intel_ivybridge_info),
driver->create_bitmap = sna_create_bitmap;
driver->destroy_bitmap = sna_destroy_bitmap;
driver->lock_bitmap = sna_lock_bitmap;
driver->blit = sna_blit_tex;
driver->resize_bitmap = sna_resize_bitmap;
driver->fini = sna_fini;
done:
caps = sna_device->render.caps;
 
INTEL_HSW_D_IDS(&intel_haswell_info),
INTEL_HSW_M_IDS(&intel_haswell_info),
err1:
__lock_release_recursive(__sna_lock);
 
INTEL_VLV_D_IDS(&intel_valleyview_info),
INTEL_VLV_M_IDS(&intel_valleyview_info),
 
INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
 
{ 0, 0, 0 },
};
 
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
{
while(list->device_id)
{
if(dev==list->device_id)
return list;
list++;
return caps;
}
return NULL;
}
 
const struct intel_device_info *
intel_detect_chipset(struct pci_device *pci)
{
const struct pci_id_match *ent = NULL;
 
ent = PciDevMatch(pci->device_id, intel_device_match);
 
if(ent != NULL)
return (const struct intel_device_info*)ent->match_data;
else
return &intel_generic_info;
}
 
int intel_get_device_id(int fd)
{
struct drm_i915_getparam gp;
int devid = 0;
 
memset(&gp, 0, sizeof(gp));
gp.param = I915_PARAM_CHIPSET_ID;
gp.value = &devid;
 
if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
return 0;
 
return devid;
}
 
int drmIoctl(int fd, unsigned long request, void *arg)
{
ioctl_t io;
 
io.handle = fd;
io.io_code = request;
io.input = arg;
io.inp_size = 64;
io.output = NULL;
io.out_size = 0;
 
return call_service(&io);
}