Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6130 → Rev 6131

/drivers/video/drm/drm_cache.c
33,8 → 33,13
 
extern int x86_clflush_size;
 
#if defined(CONFIG_X86)
 
#if 0
/*
* clflushopt is an unordered instruction which needs fencing with mfence or
* sfence to avoid ordering issues. For drm_clflush_page this fencing happens
* in the caller.
*/
static void
drm_clflush_page(struct page *page)
{
66,75 → 71,71
void
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
uint8_t *pva;
unsigned int i, j;
 
pva = AllocKernelSpace(4096);
#if defined(CONFIG_X86)
drm_cache_flush_clflush(pages, num_pages);
return;
 
if(pva != NULL)
{
dma_addr_t *src, *dst;
u32 count;
#elif defined(__powerpc__)
unsigned long i;
for (i = 0; i < num_pages; i++) {
struct page *page = pages[i];
void *page_virtual;
 
for (i = 0; i < num_pages; i++)
{
mb();
MapPage(pva, page_to_phys(pages[i]), 0x001);
for (j = 0; j < PAGE_SIZE; j += x86_clflush_size)
clflush(pva + j);
if (unlikely(page == NULL))
continue;
 
page_virtual = kmap_atomic(page);
flush_dcache_range((unsigned long)page_virtual,
(unsigned long)page_virtual + PAGE_SIZE);
kunmap_atomic(page_virtual);
}
FreeKernelSpace(pva);
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
mb();
}
EXPORT_SYMBOL(drm_clflush_pages);
 
void
drm_clflush_sg(struct sg_table *st)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
struct sg_page_iter sg_iter;
struct page *page;
 
uint8_t *pva;
unsigned int i;
 
pva = AllocKernelSpace(4096);
if( pva != NULL)
{
mb();
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
{
page = sg_page_iter_page(&sg_iter);
drm_clflush_page(sg_page_iter_page(&sg_iter));
mb();
 
MapPage(pva,page_to_phys(page), 0x001);
return;
}
 
for (i = 0; i < PAGE_SIZE; i += x86_clflush_size)
clflush(pva + i);
};
FreeKernelSpace(pva);
};
mb();
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
EXPORT_SYMBOL(drm_clflush_sg);
 
#if 0
void
drm_clflush_virt_range(void *addr, unsigned long length)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
const int size = boot_cpu_data.x86_clflush_size;
if (1) {
const int size = x86_clflush_size;
void *end = addr + length;
addr = (void *)(((unsigned long)addr) & -size);
mb();
for (; addr < end; addr += size)
clflushopt(addr);
clflush(addr);
mb();
return;
}
 
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
141,5 → 142,3
#endif
}
EXPORT_SYMBOL(drm_clflush_virt_range);
 
#endif
/drivers/video/drm/drm_irq.c
54,6 → 54,10
return mono;
}
 
irqreturn_t device_irq_handler(struct drm_device *dev)
{
return dev->driver->irq_handler(0, dev);
}
 
/* Access macro for slots in vblank timestamp ringbuffer. */
#define vblanktimestamp(dev, pipe, count) \
401,16 → 405,7
 
 
 
irqreturn_t device_irq_handler(struct drm_device *dev)
{
 
// printf("video irq\n");
 
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
 
return dev->driver->irq_handler(0, dev);
}
 
/**
* drm_irq_install - install IRQ handler
* @dev: DRM device
1424,7 → 1419,7
 
list_del(&e->base.link);
drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, &now);
// send_vblank_event(dev, e, seq, &now);
}
 
}
/drivers/video/drm/drm_stub.c
547,21 → 547,6
return order;
}
 
extern int x86_clflush_size;
 
 
void drm_clflush_virt_range(void *addr, unsigned long length)
{
char *tmp = addr;
char *end = tmp + length;
mb();
for (; tmp < end; tmp += x86_clflush_size)
clflush(tmp);
clflush(end - 1);
mb();
return;
}
 
int drm_sysfs_connector_add(struct drm_connector *connector)
{
return 0;
/drivers/video/drm/i915/i915_dma.c
181,7 → 181,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
dev_priv->bridge_dev = _pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (!dev_priv->bridge_dev) {
DRM_ERROR("bridge device not found\n");
return -1;
/drivers/video/drm/i915/i915_gem.c
40,6 → 40,7
#define RQ_BUG_ON(expr)
 
extern int x86_clflush_size;
#define __copy_to_user_inatomic __copy_to_user
 
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
57,8 → 58,8
#define MAX_ERRNO 4095
 
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
 
 
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static void
238,9 → 239,6
args->size, &args->handle);
}
 
 
#if 0
 
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
293,6 → 291,42
return 0;
}
 
/*
* Pins the specified object's pages and synchronizes the object with
* GPU accesses. Sets needs_clflush to non-zero if the caller should
* flush the object from the CPU cache.
*/
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush)
{
int ret;
 
*needs_clflush = 0;
 
if (!obj->base.filp)
return -EINVAL;
 
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
obj->cache_level);
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
}
 
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
 
i915_gem_object_pin_pages(obj);
 
return ret;
}
 
/* Per-page copy function for the shmem pread fastpath.
* Flushes invalid cachelines before reading the target if
* needs_clflush is set. */
424,16 → 458,6
 
mutex_unlock(&dev->struct_mutex);
 
if (likely(!i915.prefault_disable) && !prefaulted) {
ret = fault_in_multipages_writeable(user_data, remain);
/* Userspace is tricking us, but we've already clobbered
* its pages with the prefault and promised to write the
* data up to the first fault. Hence ignore any errors
* and just continue. */
(void)ret;
prefaulted = 1;
}
 
ret = shmem_pread_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
needs_clflush);
471,11 → 495,6
if (args->size == 0)
return 0;
 
if (!access_ok(VERIFY_WRITE,
to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
516,27 → 535,7
* page faults in the source data
*/
 
static inline int
fast_user_write(struct io_mapping *mapping,
loff_t page_base, int page_offset,
char __user *user_data,
int length)
{
void __iomem *vaddr_atomic;
void *vaddr;
unsigned long unwritten;
 
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
/* We can use the cpu mem copy function because this is X86. */
vaddr = (void __force*)vaddr_atomic + page_offset;
unwritten = __copy_from_user_inatomic_nocache(vaddr,
user_data, length);
io_mapping_unmap_atomic(vaddr_atomic);
return unwritten;
}
#endif
 
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
585,7 → 584,8
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
 
MapPage(dev_priv->gtt.mappable, dev_priv->gtt.mappable_base+page_base, PG_SW);
MapPage(dev_priv->gtt.mappable,
dev_priv->gtt.mappable_base+page_base, PG_WRITEC|PG_SW);
 
memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length);
 
/drivers/video/drm/i915/i915_gem_stolen.c
415,6 → 415,16
*/
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
 
{
u32 usable_size = dev_priv->gtt.stolen_usable_size >> 20;
if(i915.fbsize > usable_size)
{
i915.fbsize = usable_size;
DRM_DEBUG_KMS("Adjust framebuffer size to match reserved memory\n"
"new fbsize %dMB\n",i915.fbsize);
}
}
 
return 0;
}
 
/drivers/video/drm/i915/i915_irq.c
1446,7 → 1446,7
*pin_mask |= BIT(i);
 
// if (!intel_hpd_pin_to_port(i, &port))
// continue;
continue;
 
if (long_pulse_detect(port, dig_hotplug_reg))
*long_mask |= BIT(i);
2009,8 → 2009,8
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
 
if (hotplug_trigger)
ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
// if (hotplug_trigger)
// ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
 
if (de_iir & DE_AUX_CHANNEL_A)
dp_aux_irq_handler(dev);
4474,13 → 4474,3
dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
}
 
irqreturn_t intel_irq_handler(struct drm_device *dev)
{
 
// printf("i915 irq\n");
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
 
return dev->driver->irq_handler(0, dev);
}
 
/drivers/video/drm/i915/i915_params.c
31,7 → 31,7
.lvds_channel_mode = 0,
.panel_use_ssc = -1,
.vbt_sdvo_panel_type = -1,
.enable_rc6 = -1,
.enable_rc6 = 0,
.enable_fbc = -1,
.enable_execlists = -1,
.enable_hangcheck = true,
/drivers/video/drm/i915/i915_trace.h
43,5 → 43,5
#define trace_i915_page_table_entry_map(vm, pde, pt, index, count, GEN6_PTES)
#define trace_i915_va_alloc(vm,start,size,name)
#define trace_i915_gem_request_notify(ring)
 
#define trace_i915_gem_object_pread(obj, offset, size)
#endif
/drivers/video/drm/i915/kms_display.c
374,6 → 374,7
connector->name, connector->base.id);
return -EINVAL;
};
connector->encoder = encoder;
}
 
crtc = encoder->crtc;
382,7 → 383,8
 
if(crtc != NULL)
{
encoder->crtc = crtc;
DRM_DEBUG_KMS("%s connector: %p encode: %p crtc: %p\n",__FUNCTION__,
connector, encoder, crtc);
return 0;
}
else
823,8 → 825,6
 
FreeKernelSpace(mapped);
 
// release old cursor
 
KernelFree(cursor->data);
 
cursor->data = bits;
1153,13 → 1153,21
mask->height== 0 )
return 1;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
obj = drm_gem_object_lookup(dev, file, mask->handle);
if (obj == NULL)
return -ENOENT;
{
ret = -ENOENT;
goto unlock;
}
 
if (!obj->filp) {
drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
if (!obj->filp)
{
ret = -ENOENT;
goto out;
}
 
#if 0
1179,10 → 1187,6
u8* dst_offset;
u32 ifl;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto err1;
 
i915_gem_object_set_to_cpu_domain(to_intel_bo(obj), true);
 
src_offset = os_display->win_map;
1301,11 → 1305,12
}
#endif
 
err2:
mutex_unlock(&dev->struct_mutex);
err1:
out:
drm_gem_object_unreference(obj);
 
unlock:
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
 
/drivers/video/drm/i915/kos_gem_fb.c
126,96 → 126,4
}
 
 
struct drm_i915_gem_object *
kos_gem_fb_object_create(struct drm_device *dev,
u32 gtt_offset,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *fb_node;
struct i915_vma *vma;
int ret;
 
DRM_DEBUG_KMS("creating preallocated framebuffer object: gtt_offset=%x, size=%x\n",
gtt_offset, size);
 
/* KISS and expect everything to be page-aligned */
BUG_ON(size & 4095);
 
if (WARN_ON(size == 0))
return NULL;
 
fb_node = kzalloc(sizeof(*fb_node), GFP_KERNEL);
if (!fb_node)
return NULL;
 
fb_node->start = gtt_offset;
fb_node->size = size;
 
obj = _kos_fb_object_create(dev, fb_node);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to preallocate framebuffer object\n");
kfree(fb_node);
return NULL;
}
 
vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;
}
 
/* To simplify the initialisation sequence between KMS and GTT,
* we allow construction of the stolen object prior to
* setting up the GTT space. The actual reservation will occur
* later.
*/
vma->node.start = gtt_offset;
vma->node.size = size;
if (drm_mm_initialized(&ggtt->mm)) {
ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate framebuffer GTT space\n");
goto err_vma;
}
}
 
// obj->has_global_gtt_mapping = 1;
 
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
 
mutex_lock(&dev->object_name_lock);
idr_preload(GFP_KERNEL);
 
if (!obj->base.name) {
ret = idr_alloc(&dev->object_name_idr, &obj->base, 1, 0, GFP_NOWAIT);
if (ret < 0)
goto err_gem;
 
obj->base.name = ret;
 
/* Allocate a reference for the name table. */
drm_gem_object_reference(&obj->base);
 
DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, obj->base.name );
}
 
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
drm_gem_object_unreference(&obj->base);
return obj;
 
err_gem:
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
err_vma:
i915_gem_vma_destroy(vma);
err_out:
kfree(fb_node);
drm_gem_object_unreference(&obj->base);
return NULL;
}
 
/drivers/video/drm/i915/main.c
14,7 → 14,7
#include "bitmap.h"
#include "i915_kos32.h"
 
#define DRV_NAME "i915 v4.4"
#define DRV_NAME "i915 v4.4.1"
 
#define I915_DEV_CLOSE 0
#define I915_DEV_INIT 1
374,6 → 374,8
#define SRV_MASK_UPDATE 45
#define SRV_MASK_UPDATE_EX 46
 
#define SRV_I915_GEM_PREAD 47
 
#define check_input(size) \
if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \
break;
459,6 → 461,10
retval = i915_gem_set_caching_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_PREAD:
retval = i915_gem_pread_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_PWRITE:
retval = i915_gem_pwrite_ioctl(main_device, inp, file);
break;
498,7 → 504,6
break;
 
case SRV_I915_GEM_EXECBUFFER2:
// printf("SRV_I915_GEM_EXECBUFFER2\n");
retval = i915_gem_execbuffer2(main_device, inp, file);
break;
 
/drivers/video/drm/i915/pci.c
1,10 → 1,12
#include <syscall.h>
 
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/pm.h>
 
#include <linux/pci.h>
#include <syscall.h>
 
extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn);
 
372,7 → 374,7
 
 
 
int pci_scan_slot(u32 bus, int devfn)
int _pci_scan_slot(u32 bus, int devfn)
{
int func, nr = 0;
 
493,7 → 495,7
for(;bus <= last_bus; bus++)
{
for (devfn = 0; devfn < 0x100; devfn += 8)
pci_scan_slot(bus, devfn);
_pci_scan_slot(bus, devfn);
 
 
}
571,7 → 573,7
};
 
 
struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
{
pci_dev_t *dev;
 
664,13 → 666,6
}
 
 
static inline void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
{
region->start = res->start;
region->end = res->end;
}
 
 
int pci_enable_rom(struct pci_dev *pdev)
682,7 → 677,7
if (!res->flags)
return -1;
 
pcibios_resource_to_bus(pdev, &region, res);
_pcibios_resource_to_bus(pdev, &region, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
817,28 → 812,6
pci_disable_rom(pdev);
}
 
#if 0
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
 
/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
if (pci_is_pcie(dev))
return;
 
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
if (lat < 16)
lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
else if (lat > pcibios_max_latency)
lat = pcibios_max_latency;
else
return;
dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
#endif
 
 
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
856,6 → 829,13
dev->is_busmaster = enable;
}
 
 
/* pci_set_master - enables bus-mastering for device dev
* @dev: the PCI device to enable
*
* Enables bus-mastering on the device and calls pcibios_set_master()
* to do the needed arch specific settings.
*/
void pci_set_master(struct pci_dev *dev)
{
__pci_set_master(dev, true);
862,5 → 842,230
// pcibios_set_master(dev);
}
 
/**
* pci_clear_master - disables bus-mastering for device dev
* @dev: the PCI device to disable
*/
void pci_clear_master(struct pci_dev *dev)
{
__pci_set_master(dev, false);
}
 
 
static inline int pcie_cap_version(const struct pci_dev *dev)
{
return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS;
}
 
static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
{
return true;
}
 
static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
 
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_ENDPOINT ||
type == PCI_EXP_TYPE_LEG_END;
}
 
static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
 
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
(type == PCI_EXP_TYPE_DOWNSTREAM &&
dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
}
 
static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
 
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_RC_EC;
}
 
static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
{
if (!pci_is_pcie(dev))
return false;
 
switch (pos) {
case PCI_EXP_FLAGS_TYPE:
return true;
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
case PCI_EXP_DEVSTA:
return pcie_cap_has_devctl(dev);
case PCI_EXP_LNKCAP:
case PCI_EXP_LNKCTL:
case PCI_EXP_LNKSTA:
return pcie_cap_has_lnkctl(dev);
case PCI_EXP_SLTCAP:
case PCI_EXP_SLTCTL:
case PCI_EXP_SLTSTA:
return pcie_cap_has_sltctl(dev);
case PCI_EXP_RTCTL:
case PCI_EXP_RTCAP:
case PCI_EXP_RTSTA:
return pcie_cap_has_rtctl(dev);
case PCI_EXP_DEVCAP2:
case PCI_EXP_DEVCTL2:
case PCI_EXP_LNKCAP2:
case PCI_EXP_LNKCTL2:
case PCI_EXP_LNKSTA2:
return pcie_cap_version(dev) > 1;
default:
return false;
}
}
 
/*
* Note that these accessor functions are only for the "PCI Express
* Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
* other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
*/
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
{
int ret;
 
*val = 0;
if (pos & 1)
return -EINVAL;
 
if (pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
/*
* Reset *val to 0 if pci_read_config_word() fails, it may
* have been written as 0xFFFF if hardware error happens
* during pci_read_config_word().
*/
if (ret)
*val = 0;
return ret;
}
 
/*
* For Functions that do not implement the Slot Capabilities,
* Slot Status, and Slot Control registers, these spaces must
* be hardwired to 0b, with the exception of the Presence Detect
* State bit in the Slot Status register of Downstream Ports,
* which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
*/
if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
*val = PCI_EXP_SLTSTA_PDS;
}
 
return 0;
}
EXPORT_SYMBOL(pcie_capability_read_word);
 
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
{
int ret;
 
*val = 0;
if (pos & 3)
return -EINVAL;
 
if (pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
/*
* Reset *val to 0 if pci_read_config_dword() fails, it may
* have been written as 0xFFFFFFFF if hardware error happens
* during pci_read_config_dword().
*/
if (ret)
*val = 0;
return ret;
}
 
if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
*val = PCI_EXP_SLTSTA_PDS;
}
 
return 0;
}
EXPORT_SYMBOL(pcie_capability_read_dword);
 
int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
{
if (pos & 1)
return -EINVAL;
 
if (!pcie_capability_reg_implemented(dev, pos))
return 0;
 
return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_word);
 
int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
{
if (pos & 3)
return -EINVAL;
 
if (!pcie_capability_reg_implemented(dev, pos))
return 0;
 
return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_dword);
 
int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
u16 clear, u16 set)
{
int ret;
u16 val;
 
ret = pcie_capability_read_word(dev, pos, &val);
if (!ret) {
val &= ~clear;
val |= set;
ret = pcie_capability_write_word(dev, pos, val);
}
 
return ret;
}
 
 
 
int pcie_get_readrq(struct pci_dev *dev)
{
u16 ctl;
 
pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
}
EXPORT_SYMBOL(pcie_get_readrq);
 
/**
* pcie_set_readrq - set PCI Express maximum memory read request
* @dev: PCI device to query
* @rq: maximum memory read count in bytes
* valid values are 128, 256, 512, 1024, 2048, 4096
*
* If possible sets maximum memory read request in bytes
*/
int pcie_set_readrq(struct pci_dev *dev, int rq)
{
u16 v;
 
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
return -EINVAL;
 
v = (ffs(rq) - 8) << 12;
 
return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_READRQ, v);
}
 
/drivers/video/drm/i915/utils.c
588,25 → 588,6
MutexUnlock(&kmap_mutex);
}
 
size_t strlcat(char *dest, const char *src, size_t count)
{
size_t dsize = strlen(dest);
size_t len = strlen(src);
size_t res = dsize + len;
 
/* This would be a bug */
BUG_ON(dsize >= count);
 
dest += dsize;
count -= dsize;
if (len >= count)
len = count-1;
memcpy(dest, src, len);
dest[len] = 0;
return res;
}
EXPORT_SYMBOL(strlcat);
 
void msleep(unsigned int msecs)
{
msecs /= 10;