/drivers/include/asm-generic/barrier.h |
---|
0,0 → 1,211 |
/* |
* Generic barrier definitions, originally based on MN10300 definitions. |
* |
* It should be possible to use these on really simple architectures, |
* but it serves more as a starting point for new ports. |
* |
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
* Written by David Howells (dhowells@redhat.com) |
* |
* This program is free software; you can redistribute it and/or |
* modify it under the terms of the GNU General Public Licence |
* as published by the Free Software Foundation; either version |
* 2 of the Licence, or (at your option) any later version. |
*/ |
#ifndef __ASM_GENERIC_BARRIER_H |
#define __ASM_GENERIC_BARRIER_H |
#ifndef __ASSEMBLY__ |
#include <linux/compiler.h> |
#ifndef nop |
#define nop() asm volatile ("nop") |
#endif |
/* |
* Force strict CPU ordering. And yes, this is required on UP too when we're |
* talking to devices. |
* |
* Fall back to compiler barriers if nothing better is provided. |
*/ |
#ifndef mb |
#define mb() barrier() |
#endif |
#ifndef rmb |
#define rmb() mb() |
#endif |
#ifndef wmb |
#define wmb() mb() |
#endif |
#ifndef dma_rmb |
#define dma_rmb() rmb() |
#endif |
#ifndef dma_wmb |
#define dma_wmb() wmb() |
#endif |
#ifndef read_barrier_depends |
#define read_barrier_depends() do { } while (0) |
#endif |
#ifndef __smp_mb |
#define __smp_mb() mb() |
#endif |
#ifndef __smp_rmb |
#define __smp_rmb() rmb() |
#endif |
#ifndef __smp_wmb |
#define __smp_wmb() wmb() |
#endif |
#ifndef __smp_read_barrier_depends |
#define __smp_read_barrier_depends() read_barrier_depends() |
#endif |
#ifdef CONFIG_SMP |
#ifndef smp_mb |
#define smp_mb() __smp_mb() |
#endif |
#ifndef smp_rmb |
#define smp_rmb() __smp_rmb() |
#endif |
#ifndef smp_wmb |
#define smp_wmb() __smp_wmb() |
#endif |
#ifndef smp_read_barrier_depends |
#define smp_read_barrier_depends() __smp_read_barrier_depends() |
#endif |
#else /* !CONFIG_SMP */ |
#ifndef smp_mb |
#define smp_mb() barrier() |
#endif |
#ifndef smp_rmb |
#define smp_rmb() barrier() |
#endif |
#ifndef smp_wmb |
#define smp_wmb() barrier() |
#endif |
#ifndef smp_read_barrier_depends |
#define smp_read_barrier_depends() do { } while (0) |
#endif |
#endif /* CONFIG_SMP */ |
#ifndef __smp_store_mb |
#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) |
#endif |
#ifndef __smp_mb__before_atomic |
#define __smp_mb__before_atomic() __smp_mb() |
#endif |
#ifndef __smp_mb__after_atomic |
#define __smp_mb__after_atomic() __smp_mb() |
#endif |
#ifndef __smp_store_release |
#define __smp_store_release(p, v) \ |
do { \ |
compiletime_assert_atomic_type(*p); \ |
__smp_mb(); \ |
WRITE_ONCE(*p, v); \ |
} while (0) |
#endif |
#ifndef __smp_load_acquire |
#define __smp_load_acquire(p) \ |
({ \ |
typeof(*p) ___p1 = READ_ONCE(*p); \ |
compiletime_assert_atomic_type(*p); \ |
__smp_mb(); \ |
___p1; \ |
}) |
#endif |
#ifdef CONFIG_SMP |
#ifndef smp_store_mb |
#define smp_store_mb(var, value) __smp_store_mb(var, value) |
#endif |
#ifndef smp_mb__before_atomic |
#define smp_mb__before_atomic() __smp_mb__before_atomic() |
#endif |
#ifndef smp_mb__after_atomic |
#define smp_mb__after_atomic() __smp_mb__after_atomic() |
#endif |
#ifndef smp_store_release |
#define smp_store_release(p, v) __smp_store_release(p, v) |
#endif |
#ifndef smp_load_acquire |
#define smp_load_acquire(p) __smp_load_acquire(p) |
#endif |
#else /* !CONFIG_SMP */ |
#ifndef smp_store_mb |
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) |
#endif |
#ifndef smp_mb__before_atomic |
#define smp_mb__before_atomic() barrier() |
#endif |
#ifndef smp_mb__after_atomic |
#define smp_mb__after_atomic() barrier() |
#endif |
#ifndef smp_store_release |
#define smp_store_release(p, v) \ |
do { \ |
compiletime_assert_atomic_type(*p); \ |
barrier(); \ |
WRITE_ONCE(*p, v); \ |
} while (0) |
#endif |
#ifndef smp_load_acquire |
#define smp_load_acquire(p) \ |
({ \ |
typeof(*p) ___p1 = READ_ONCE(*p); \ |
compiletime_assert_atomic_type(*p); \ |
barrier(); \ |
___p1; \ |
}) |
#endif |
#endif |
/* Barriers for virtual machine guests when talking to an SMP host */ |
#define virt_mb() __smp_mb() |
#define virt_rmb() __smp_rmb() |
#define virt_wmb() __smp_wmb() |
#define virt_read_barrier_depends() __smp_read_barrier_depends() |
#define virt_store_mb(var, value) __smp_store_mb(var, value) |
#define virt_mb__before_atomic() __smp_mb__before_atomic() |
#define virt_mb__after_atomic() __smp_mb__after_atomic() |
#define virt_store_release(p, v) __smp_store_release(p, v) |
#define virt_load_acquire(p) __smp_load_acquire(p) |
#endif /* !__ASSEMBLY__ */ |
#endif /* __ASM_GENERIC_BARRIER_H */ |
/drivers/include/asm-generic/bug.h |
---|
0,0 → 1,211 |
#ifndef _ASM_GENERIC_BUG_H |
#define _ASM_GENERIC_BUG_H |
#include <linux/compiler.h> |
#ifdef CONFIG_GENERIC_BUG |
#define BUGFLAG_WARNING (1 << 0) |
#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8)) |
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8) |
#endif |
#ifndef __ASSEMBLY__ |
#include <linux/kernel.h> |
#ifdef CONFIG_BUG |
#ifdef CONFIG_GENERIC_BUG |
struct bug_entry { |
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS |
unsigned long bug_addr; |
#else |
signed int bug_addr_disp; |
#endif |
#ifdef CONFIG_DEBUG_BUGVERBOSE |
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS |
const char *file; |
#else |
signed int file_disp; |
#endif |
unsigned short line; |
#endif |
unsigned short flags; |
}; |
#endif /* CONFIG_GENERIC_BUG */ |
/* |
* Don't use BUG() or BUG_ON() unless there's really no way out; one |
* example might be detecting data structure corruption in the middle |
* of an operation that can't be backed out of. If the (sub)system |
* can somehow continue operating, perhaps with reduced functionality, |
* it's probably not BUG-worthy. |
* |
* If you're tempted to BUG(), think again: is completely giving up |
* really the *only* solution? There are usually better options, where |
* users don't need to reboot ASAP and can mostly shut down cleanly. |
*/ |
#ifndef HAVE_ARCH_BUG |
#define BUG() do { \ |
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ |
panic("BUG!"); \ |
} while (0) |
#endif |
#ifndef HAVE_ARCH_BUG_ON |
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) |
#endif |
/* |
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report |
* significant issues that need prompt attention if they should ever |
* appear at runtime. Use the versions with printk format strings |
* to provide better diagnostics. |
*/ |
#ifndef __WARN_TAINT |
extern __printf(3, 4) |
void warn_slowpath_fmt(const char *file, const int line, |
const char *fmt, ...); |
extern __printf(4, 5) |
void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint, |
const char *fmt, ...); |
extern void warn_slowpath_null(const char *file, const int line); |
#define WANT_WARN_ON_SLOWPATH |
#define __WARN() warn_slowpath_null(__FILE__, __LINE__) |
#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) |
#define __WARN_printf_taint(taint, arg...) \ |
warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg) |
#else |
#define __WARN() __WARN_TAINT(TAINT_WARN) |
#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) |
#define __WARN_printf_taint(taint, arg...) \ |
do { printk(arg); __WARN_TAINT(taint); } while (0) |
#endif |
#ifndef WARN_ON |
#define WARN_ON(condition) ({ \ |
int __ret_warn_on = !!(condition); \ |
if (unlikely(__ret_warn_on)) \ |
__WARN(); \ |
unlikely(__ret_warn_on); \ |
}) |
#endif |
#ifndef WARN |
#define WARN(condition, format...) ({ \ |
int __ret_warn_on = !!(condition); \ |
if (unlikely(__ret_warn_on)) \ |
__WARN_printf(format); \ |
unlikely(__ret_warn_on); \ |
}) |
#endif |
#define WARN_TAINT(condition, taint, format...) ({ \ |
int __ret_warn_on = !!(condition); \ |
if (unlikely(__ret_warn_on)) \ |
__WARN_printf_taint(taint, format); \ |
unlikely(__ret_warn_on); \ |
}) |
#define WARN_ON_ONCE(condition) ({ \ |
static bool __section(.data.unlikely) __warned; \ |
int __ret_warn_once = !!(condition); \ |
\ |
if (unlikely(__ret_warn_once)) \ |
if (WARN_ON(!__warned)) \ |
__warned = true; \ |
unlikely(__ret_warn_once); \ |
}) |
#define WARN_ONCE(condition, format...) ({ \ |
static bool __section(.data.unlikely) __warned; \ |
int __ret_warn_once = !!(condition); \ |
\ |
if (unlikely(__ret_warn_once)) \ |
if (WARN(!__warned, format)) \ |
__warned = true; \ |
unlikely(__ret_warn_once); \ |
}) |
#define WARN_TAINT_ONCE(condition, taint, format...) ({ \ |
static bool __section(.data.unlikely) __warned; \ |
int __ret_warn_once = !!(condition); \ |
\ |
if (unlikely(__ret_warn_once)) \ |
if (WARN_TAINT(!__warned, taint, format)) \ |
__warned = true; \ |
unlikely(__ret_warn_once); \ |
}) |
#else /* !CONFIG_BUG */ |
#ifndef HAVE_ARCH_BUG |
#define BUG() do {} while (1) |
#endif |
#ifndef HAVE_ARCH_BUG_ON |
#define BUG_ON(condition) do { if (condition) ; } while (0) |
#endif |
#ifndef HAVE_ARCH_WARN_ON |
#define WARN_ON(condition) ({ \ |
int __ret_warn_on = !!(condition); \ |
unlikely(__ret_warn_on); \ |
}) |
#endif |
#ifndef WARN |
#define WARN(condition, format...) ({ \ |
int __ret_warn_on = !!(condition); \ |
no_printk(format); \ |
unlikely(__ret_warn_on); \ |
}) |
#endif |
#define WARN_ON_ONCE(condition) WARN_ON(condition) |
#define WARN_ONCE(condition, format...) WARN(condition, format) |
#define WARN_TAINT(condition, taint, format...) WARN(condition, format) |
#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format) |
#endif |
/* |
* WARN_ON_SMP() is for cases that the warning is either |
* meaningless for !SMP or may even cause failures. |
* This is usually used for cases that we have |
* WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked() |
* returns 0 for uniprocessor settings. |
* It can also be used with values that are only defined |
* on SMP: |
* |
* struct foo { |
* [...] |
* #ifdef CONFIG_SMP |
* int bar; |
* #endif |
* }; |
* |
* void func(struct foo *zoot) |
* { |
* WARN_ON_SMP(!zoot->bar); |
* |
* For CONFIG_SMP, WARN_ON_SMP() should act the same as WARN_ON(), |
* and should be a nop and return false for uniprocessor. |
* |
* if (WARN_ON_SMP(x)) returns true only when CONFIG_SMP is set |
* and x is true. |
*/ |
#ifdef CONFIG_SMP |
# define WARN_ON_SMP(x) WARN_ON(x) |
#else |
/* |
* Use of ({0;}) because WARN_ON_SMP(x) may be used either as |
* a stand alone line statement or as a condition in an if () |
* statement. |
* A simple "0" would cause gcc to give a "statement has no effect" |
* warning. |
*/ |
# define WARN_ON_SMP(x) ({0;}) |
#endif |
#endif /* __ASSEMBLY__ */ |
#endif |
/drivers/include/asm-generic/iomap.h |
---|
0,0 → 1,102 |
#ifndef __GENERIC_IO_H |
#define __GENERIC_IO_H |
#include <linux/linkage.h> |
#include <asm/byteorder.h> |
/* |
* These are the "generic" interfaces for doing new-style |
* memory-mapped or PIO accesses. Architectures may do |
* their own arch-optimized versions, these just act as |
* wrappers around the old-style IO register access functions: |
* read[bwl]/write[bwl]/in[bwl]/out[bwl] |
* |
* Don't include this directly, include it from <asm/io.h>. |
*/ |
/* |
* Read/write from/to an (offsettable) iomem cookie. It might be a PIO |
* access or a MMIO access, these functions don't care. The info is |
* encoded in the hardware mapping set up by the mapping functions |
* (or the cookie itself, depending on implementation and hw). |
* |
* The generic routines just encode the PIO/MMIO as part of the |
* cookie, and coldly assume that the MMIO IO mappings are not |
* in the low address range. Architectures for which this is not |
* true can't use this generic implementation. |
*/ |
extern unsigned int ioread8(void __iomem *); |
extern unsigned int ioread16(void __iomem *); |
extern unsigned int ioread16be(void __iomem *); |
//extern unsigned int ioread32(void __iomem *); |
#ifndef ioread32 |
#define ioread32 ioread32 |
static inline u32 ioread32(const volatile void __iomem *addr) |
{ |
return readl(addr); |
} |
#endif |
extern unsigned int ioread32be(void __iomem *); |
extern void iowrite8(u8, void __iomem *); |
extern void iowrite16(u16, void __iomem *); |
extern void iowrite16be(u16, void __iomem *); |
//extern void iowrite32(u32, void __iomem *); |
#ifndef iowrite32 |
//#define iowrite32 iowrite32 |
static inline void iowrite32(u32 value, volatile void __iomem *addr) |
{ |
writel(value, addr); |
} |
#endif |
extern void iowrite32be(u32, void __iomem *); |
/* |
* "string" versions of the above. Note that they |
* use native byte ordering for the accesses (on |
* the assumption that IO and memory agree on a |
* byte order, and CPU byteorder is irrelevant). |
* |
* They do _not_ update the port address. If you |
* want MMIO that copies stuff laid out in MMIO |
* memory across multiple ports, use "memcpy_toio()" |
* and friends. |
*/ |
extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count); |
extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count); |
extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count); |
extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); |
extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); |
extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); |
#ifdef CONFIG_HAS_IOPORT_MAP |
/* Create a virtual mapping cookie for an IO port range */ |
extern void __iomem *ioport_map(unsigned long port, unsigned int nr); |
extern void ioport_unmap(void __iomem *); |
#endif |
#ifndef ARCH_HAS_IOREMAP_WC |
#define ioremap_wc ioremap_nocache |
#endif |
#ifndef ARCH_HAS_IOREMAP_WT |
#define ioremap_wt ioremap_nocache |
#endif |
#ifdef CONFIG_PCI |
/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */ |
struct pci_dev; |
extern void pci_iounmap(struct pci_dev *dev, void __iomem *); |
#elif defined(CONFIG_GENERIC_IOMAP) |
struct pci_dev; |
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) |
{ } |
#endif |
#include <asm-generic/pci_iomap.h> |
#endif |
/drivers/include/asm-generic/pci_iomap.h |
---|
0,0 → 1,59 |
/* Generic I/O port emulation, based on MN10300 code |
* |
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
* Written by David Howells (dhowells@redhat.com) |
* |
* This program is free software; you can redistribute it and/or |
* modify it under the terms of the GNU General Public Licence |
* as published by the Free Software Foundation; either version |
* 2 of the Licence, or (at your option) any later version. |
*/ |
#ifndef __ASM_GENERIC_PCI_IOMAP_H |
#define __ASM_GENERIC_PCI_IOMAP_H |
struct pci_dev; |
#ifdef CONFIG_PCI |
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ |
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); |
extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max); |
extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, |
unsigned long offset, |
unsigned long maxlen); |
extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, |
unsigned long offset, |
unsigned long maxlen); |
/* Create a virtual mapping cookie for a port on a given PCI device. |
* Do not call this directly, it exists to make it easier for architectures |
* to override */ |
#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP |
extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port, |
unsigned int nr); |
#else |
#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr)) |
#endif |
#elif defined(CONFIG_GENERIC_PCI_IOMAP) |
static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) |
{ |
return NULL; |
} |
static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max) |
{ |
return NULL; |
} |
static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, |
unsigned long offset, |
unsigned long maxlen) |
{ |
return NULL; |
} |
static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, |
unsigned long offset, |
unsigned long maxlen) |
{ |
return NULL; |
} |
#endif |
#endif /* __ASM_GENERIC_IO_H */ |