Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 3254 → Rev 3255

/drivers/include/drm/drmP.h
427,16 → 427,11
struct list_head head;
struct mutex lock;
};
#endif
 
/** File private data */
struct drm_file {
int authenticated;
struct pid *pid;
kuid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
struct list_head lhead;
struct drm_minor *minor;
unsigned long lock_count;
 
/** Mapping of mm object handles to object pointers. */
444,21 → 439,16
/** Lock for synchronization of access to object_idr. */
spinlock_t table_lock;
 
struct file *filp;
void *driver_priv;
 
int is_master; /* this file private is a master for a minor */
struct drm_master *master; /* master this node is currently associated with
N.B. not always minor->master */
struct list_head fbs;
 
wait_queue_head_t event_wait;
struct list_head event_list;
int event_space;
 
struct drm_prime_file_private prime;
};
 
#if 0
/** Wait queue */
struct drm_queue {
atomic_t use_count; /**< Outstanding uses (+1) */
972,6 → 962,8
irqreturn_t (*irq_handler) (DRM_IRQ_ARGS);
void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev);
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
};
 
 
1601,7 → 1593,6
kref_put(&obj->refcount, drm_gem_object_free);
}
 
#if 0
static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{
1682,6 → 1673,8
extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
 
#if 0
 
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
unsigned int token)
{
/drivers/include/drm/i915_drm.h
312,6 → 312,8
#define I915_PARAM_HAS_SEMAPHORES 20
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
#define I915_PARAM_HAS_SECURE_BATCHES 23
#define I915_PARAM_HAS_PINNED_BATCHES 24
 
typedef struct drm_i915_getparam {
int param;
/drivers/video/Intel-2D/kgem-sna.c
51,6 → 51,16
#define DBG_NO_HANDLE_LUT 0
#define DBG_DUMP 0
 
/* Worst case seems to be 965gm where we cannot write within a cacheline that
* is being simultaneously being read by the GPU, or within the sampler
* prefetch. In general, the chipsets seem to have a requirement that sampler
* offsets be aligned to a cacheline (64 bytes).
*/
#define UPLOAD_ALIGNMENT 128
 
#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
#define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE)
 
#define MAX_GTT_VMA_CACHE 512
#define MAX_CPU_VMA_CACHE INT16_MAX
#define MAP_PRESERVE_TIME 10
72,8 → 82,124
#define LOCAL_I915_PARAM_HAS_NO_RELOC 25
#define LOCAL_I915_PARAM_HAS_HANDLE_LUT 26
 
static struct kgem_bo *__kgem_freed_bo;
 
#define bucket(B) (B)->size.pages.bucket
#define num_pages(B) (B)->size.pages.count
 
#ifdef DEBUG_MEMORY
static void debug_alloc(struct kgem *kgem, size_t size)
{
kgem->debug_memory.bo_allocs++;
kgem->debug_memory.bo_bytes += size;
}
static void debug_alloc__bo(struct kgem *kgem, struct kgem_bo *bo)
{
debug_alloc(kgem, bytes(bo));
}
#else
#define debug_alloc(k, b)
#define debug_alloc__bo(k, b)
#endif
 
static uint32_t gem_create(int fd, int num_pages)
{
struct drm_i915_gem_create create;
ioctl_t io;
 
VG_CLEAR(create);
create.handle = 0;
create.size = PAGE_SIZE * num_pages;
io.handle = fd;
io.io_code = SRV_I915_GEM_CREATE;
io.input = &create;
io.inp_size = sizeof(create);
io.output = NULL;
io.out_size = 0;
 
if (call_service(&io)!=0)
return 0;
 
return create.handle;
}
 
static void gem_close(int fd, uint32_t handle)
{
struct drm_gem_close close;
ioctl_t io;
 
VG_CLEAR(close);
close.handle = handle;
 
io.handle = fd;
io.io_code = SRV_DRM_GEM_CLOSE;
io.input = &close;
io.inp_size = sizeof(close);
io.output = NULL;
io.out_size = 0;
 
call_service(&io);
}
 
constant inline static unsigned long __fls(unsigned long word)
{
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86__) || defined(__x86_64__))
asm("bsr %1,%0"
: "=r" (word)
: "rm" (word));
return word;
#else
unsigned int v = 0;
 
while (word >>= 1)
v++;
 
return v;
#endif
}
 
constant inline static int cache_bucket(int num_pages)
{
return __fls(num_pages);
}
 
static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo,
int handle, int num_pages)
{
assert(num_pages);
memset(bo, 0, sizeof(*bo));
 
bo->refcnt = 1;
bo->handle = handle;
bo->target_handle = -1;
num_pages(bo) = num_pages;
bucket(bo) = cache_bucket(num_pages);
bo->reusable = true;
bo->domain = DOMAIN_CPU;
list_init(&bo->request);
list_init(&bo->list);
list_init(&bo->vma);
 
return bo;
}
 
static struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages)
{
struct kgem_bo *bo;
 
if (__kgem_freed_bo) {
bo = __kgem_freed_bo;
__kgem_freed_bo = *(struct kgem_bo **)bo;
} else {
bo = malloc(sizeof(*bo));
if (bo == NULL)
return NULL;
}
 
return __kgem_bo_init(bo, handle, num_pages);
}
 
static int gem_param(struct kgem *kgem, int name)
{
ioctl_t io;
99,6 → 225,11
return v;
}
 
static bool test_has_execbuffer2(struct kgem *kgem)
{
return 1;
}
 
static bool test_has_no_reloc(struct kgem *kgem)
{
if (DBG_NO_FAST_RELOC)
131,7 → 262,42
return detected;
}
 
static bool __kgem_throttle(struct kgem *kgem)
{
// if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == 0)
return false;
 
// return errno == EIO;
}
 
static bool is_hw_supported(struct kgem *kgem,
struct pci_device *dev)
{
if (DBG_NO_HW)
return false;
 
if (!test_has_execbuffer2(kgem))
return false;
 
if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */
return kgem->has_blt;
 
/* Although pre-855gm the GMCH is fubar, it works mostly. So
* let the user decide through "NoAccel" whether or not to risk
* hw acceleration.
*/
 
if (kgem->gen == 060 && dev->revision < 8) {
/* pre-production SNB with dysfunctional BLT */
return false;
}
 
if (kgem->gen >= 060) /* Only if the kernel supports the BLT ring */
return kgem->has_blt;
 
return true;
}
 
static bool test_has_relaxed_fencing(struct kgem *kgem)
{
if (kgem->gen < 040) {
223,7 → 389,90
}
 
 
static bool kgem_init_pinned_batches(struct kgem *kgem)
{
ioctl_t io;
 
int count[2] = { 4, 2 };
int size[2] = { 1, 4 };
int n, i;
 
if (kgem->wedged)
return true;
 
for (n = 0; n < ARRAY_SIZE(count); n++) {
for (i = 0; i < count[n]; i++) {
struct drm_i915_gem_pin pin;
struct kgem_bo *bo;
 
VG_CLEAR(pin);
 
pin.handle = gem_create(kgem->fd, size[n]);
if (pin.handle == 0)
goto err;
 
DBG(("%s: new handle=%d, num_pages=%d\n",
__FUNCTION__, pin.handle, size[n]));
 
bo = __kgem_bo_alloc(pin.handle, size[n]);
if (bo == NULL) {
gem_close(kgem->fd, pin.handle);
goto err;
}
 
pin.alignment = 0;
io.handle = kgem->fd;
io.io_code = SRV_I915_GEM_PIN;
io.input = &pin;
io.inp_size = sizeof(pin);
io.output = NULL;
io.out_size = 0;
 
if (call_service(&io)!=0){
gem_close(kgem->fd, pin.handle);
goto err;
}
bo->presumed_offset = pin.offset;
debug_alloc__bo(kgem, bo);
list_add(&bo->list, &kgem->pinned_batches[n]);
}
}
 
return true;
 
err:
for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) {
while (!list_is_empty(&kgem->pinned_batches[n])) {
kgem_bo_destroy(kgem,
list_first_entry(&kgem->pinned_batches[n],
struct kgem_bo, list));
}
}
 
/* For simplicity populate the lists with a single unpinned bo */
for (n = 0; n < ARRAY_SIZE(count); n++) {
struct kgem_bo *bo;
uint32_t handle;
 
handle = gem_create(kgem->fd, size[n]);
if (handle == 0)
break;
 
bo = __kgem_bo_alloc(handle, size[n]);
if (bo == NULL) {
gem_close(kgem->fd, handle);
break;
}
 
debug_alloc__bo(kgem, bo);
list_add(&bo->list, &kgem->pinned_batches[n]);
}
return false;
}
 
 
 
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
{
struct drm_i915_gem_get_aperture aperture;
259,7 → 508,6
for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++)
list_init(&kgem->vma[i].inactive[j]);
}
 
kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE;
kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE;
 
272,7 → 520,6
DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
kgem->has_relaxed_delta));
 
 
kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem);
DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
kgem->has_relaxed_fencing));
315,15 → 562,11
DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__,
kgem->has_pinned_batches));
 
#if 0
 
if (!is_hw_supported(kgem, dev)) {
xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
"Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
printf("Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
kgem->wedged = 1;
} else if (__kgem_throttle(kgem)) {
xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
"Detected a hung GPU, disabling acceleration.\n");
printf("Detected a hung GPU, disabling acceleration.\n");
kgem->wedged = 1;
}
 
340,8 → 583,7
kgem->batch_size = 4*1024;
 
if (!kgem_init_pinned_batches(kgem) && gen == 020) {
xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
"Unable to reserve memory for GPU, disabling acceleration.\n");
printf("Unable to reserve memory for GPU, disabling acceleration.\n");
kgem->wedged = 1;
}
 
352,6 → 594,8
if (gen < 040)
kgem->min_alignment = 64;
 
#if 0
 
kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
kgem->half_cpu_cache_pages));
/drivers/video/Intel-2D/sna.h
69,6 → 69,9
 
#define SRV_GET_INFO 20
#define SRV_GET_PARAM 21
#define SRV_I915_GEM_CREATE 22
#define SRV_DRM_GEM_CLOSE 23
#define SRV_I915_GEM_PIN 24
 
static int call_service(ioctl_t *io)
{
/drivers/video/Intel-2D/sna_stream.c
85,7 → 85,7
 
bo = kgem_create_linear(&sna->kgem, stream->used, 0);
if (bo && !kgem_bo_write(&sna->kgem, bo, stream->data, stream->used)) {
// kgem_bo_destroy(&sna->kgem, bo);
kgem_bo_destroy(&sna->kgem, bo);
return NULL;
}
 
/drivers/video/drm/i915/i915_dma.c
912,6 → 912,7
 
return ret;
}
#endif
 
static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
991,7 → 992,7
value = 1;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
value = 1;
break;
case I915_PARAM_HAS_PINNED_BATCHES:
value = 1;
1002,14 → 1003,17
return -EINVAL;
}
 
if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("DRM_COPY_TO_USER failed\n");
return -EFAULT;
}
// if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
// DRM_ERROR("DRM_COPY_TO_USER failed\n");
// return -EFAULT;
// }
 
*param->value = value;
 
return 0;
}
 
#if 0
static int i915_setparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
1672,3 → 1676,9
return 1;
}
#endif
 
 
int gem_getparam(struct drm_device *dev, void *data)
{
return i915_getparam(dev, data, NULL);
};
/drivers/video/drm/i915/i915_drv.c
49,6 → 49,8
 
struct drm_device *main_device;
 
struct drm_file *drm_file_handlers[256];
 
static int i915_modeset __read_mostly = 1;
MODULE_PARM_DESC(modeset,
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
481,15 → 483,20
 
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct drm_device *dev;
static struct drm_driver driver;
static struct drm_device drm_dev;
static struct drm_file drm_file;
 
struct drm_device *dev;
struct drm_file *priv;
 
int ret;
 
dev = kzalloc(sizeof(*dev), 0);
if (!dev)
return -ENOMEM;
dev = &drm_dev;
priv = &drm_file;
 
drm_file_handlers[0] = priv;
 
// ret = pci_enable_device(pdev);
// if (ret)
// goto err_g1;
514,6 → 521,15
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
 
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
INIT_LIST_HEAD(&priv->event_list);
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
 
idr_init(&priv->object_idr);
spin_lock_init(&priv->table_lock);
 
dev->driver = &driver;
 
ret = i915_driver_load(dev, ent->driver_data );
529,7 → 545,6
return 0;
 
err_g4:
// drm_put_minor(&dev->primary);
//err_g3:
// if (drm_core_check_feature(dev, DRIVER_MODESET))
// drm_put_minor(&dev->control);
536,7 → 551,6
//err_g2:
// pci_disable_device(pdev);
//err_g1:
free(dev);
 
return ret;
}
/drivers/video/drm/i915/i915_gem.c
138,7 → 138,6
dev_priv->mm.object_memory -= size;
}
 
#if 0
 
static int
i915_gem_wait_for_error(struct drm_device *dev)
150,7 → 149,7
 
if (!atomic_read(&dev_priv->mm.wedged))
return 0;
 
#if 0
/*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and
174,6 → 173,8
x->done++;
spin_unlock_irqrestore(&x->wait.lock, flags);
}
#endif
 
return 0;
}
 
185,14 → 186,11
if (ret)
return ret;
 
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
mutex_lock(&dev->struct_mutex);
 
WARN_ON(i915_verify_lists(dev));
return 0;
}
#endif
 
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
251,7 → 249,6
return 0;
}
 
#if 0
static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
284,6 → 281,7
trace_i915_gem_object_create(obj);
 
*handle_p = handle;
 
return 0;
}
 
319,6 → 317,8
args->size, &args->handle);
}
 
#if 0
 
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1473,7 → 1473,7
* Fail silently without starting the shrinker
*/
for_each_sg(st->sgl, sg, page_count, i) {
page = AllocPage(); // oh-oh
page = (struct page *)AllocPage(); // oh-oh
if ( page == 0 )
goto err_pages;
 
3054,7 → 3054,6
obj->pin_mappable = false;
}
 
#if 0
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
3107,6 → 3106,8
return ret;
}
 
#if 0
 
int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
/drivers/video/drm/i915/main.c
13,7 → 13,21
 
#include "bitmap.h"
 
struct pci_device {
uint16_t domain;
uint8_t bus;
uint8_t dev;
uint8_t func;
uint16_t vendor_id;
uint16_t device_id;
uint16_t subvendor_id;
uint16_t subdevice_id;
uint32_t device_class;
uint8_t revision;
};
 
extern struct drm_device *main_device;
extern struct drm_file *drm_file_handlers[256];
 
void cpu_detect();
 
30,6 → 44,10
int blit_tex(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
void get_pci_info(struct pci_device *dev);
int gem_getparam(struct drm_device *dev, void *data);
 
 
static char log[256];
 
int x86_clflush_size;
38,7 → 56,6
 
u32_t drvEntry(int action, char *cmdline)
{
struct pci_device_id *ent;
 
int err = 0;
 
105,6 → 122,12
#define SRV_BLIT_TEXTURE 16
#define SRV_BLIT_VIDEO 17
 
#define SRV_PCI_INFO 20
#define SRV_GET_PARAM 21
#define SRV_I915_GEM_CREATE 22
#define SRV_DRM_GEM_CLOSE 23
#define SRV_I915_GEM_PIN 24
 
#define check_input(size) \
if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \
break;
115,6 → 138,8
 
int _stdcall display_handler(ioctl_t *io)
{
struct drm_file *file;
 
int retval = -1;
u32_t *inp;
u32_t *outp;
122,6 → 147,8
inp = io->input;
outp = io->output;
 
file = drm_file_handlers[0];
 
switch(io->io_code)
{
case SRV_GETVERSION:
164,7 → 191,7
// retval = resize_surface((struct io_call_14*)inp);
break;
 
// case SRV_BLIT_BITMAP:
case SRV_BLIT_BITMAP:
// srv_blit_bitmap( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]);
 
171,10 → 198,28
// blit_tex( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]);
 
break;
 
case SRV_PCI_INFO:
get_pci_info((struct pci_device *)inp);
retval = 0;
break;
 
case SRV_GET_PARAM:
retval = gem_getparam(main_device, inp);
break;
 
case SRV_I915_GEM_CREATE:
retval = i915_gem_create_ioctl(main_device, inp, file);
break;
 
case SRV_DRM_GEM_CLOSE:
retval = drm_gem_close_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_PIN:
retval = i915_gem_pin_ioctl(main_device, inp, file);
break;
};
 
return retval;
302,3 → 347,18
return ret;
}
 
 
void get_pci_info(struct pci_device *dev)
{
struct pci_dev *pdev = main_device->pdev;
 
memset(dev, sizeof(*dev), 0);
 
dev->domain = 0;
dev->bus = pdev->busnr;
dev->dev = pdev->devfn >> 3;
dev->func = pdev->devfn & 7;
dev->vendor_id = pdev->vendor;
dev->device_id = pdev->device;
dev->revision = pdev->revision;
};