Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6081 → Rev 6082

/drivers/ddk/Makefile
14,7 → 14,7
DEFINES+= -DCONFIG_X86_L1_CACHE_SHIFT=6 -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE
 
CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf \
-mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2
-mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -fno-ident
 
NAME:= libddk
 
36,7 → 36,6
linux/firmware.c \
linux/hdmi.c \
linux/kasprintf.c \
linux/kref.c \
linux/list_sort.c \
linux/mutex.c \
linux/rbtree.c \
/drivers/ddk/core.S
27,6 → 27,7
.global _FreeKernelSpace
.global _FreePage
 
.global _GetClockNs
.global _GetCpuFreq
.global _GetDisplay
.global _GetEvent
103,6 → 104,8
.def _FreeKernelSpace; .scl 2; .type 32; .endef
.def _FreePage; .scl 2; .type 32; .endef
 
.def _GetClockNs; .scl 2; .type 32; .endef
 
.def _GetDisplay; .scl 2; .type 32; .endef
 
.def _GetDisplay; .scl 2; .type 32; .endef
181,6 → 184,7
_FreeKernelSpace:
_FreePage:
 
_GetClockNs:
_GetCpuFreq:
_GetDisplay:
_GetEvent:
259,6 → 263,7
.ascii " -export:FreeKernelSpace" # stdcall
.ascii " -export:FreePage" #
 
.ascii " -export:GetClockNs" #
.ascii " -export:GetCpuFreq" #
.ascii " -export:GetDisplay" # stdcall
.ascii " -export:GetEvent" #
/drivers/ddk/linux/bitmap.c
41,36 → 41,6
* for the best explanations of this ordering.
*/
 
int __bitmap_empty(const unsigned long *bitmap, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap[k])
return 0;
 
if (bits % BITS_PER_LONG)
if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
return 0;
 
return 1;
}
EXPORT_SYMBOL(__bitmap_empty);
 
int __bitmap_full(const unsigned long *bitmap, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (~bitmap[k])
return 0;
 
if (bits % BITS_PER_LONG)
if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
return 0;
 
return 1;
}
EXPORT_SYMBOL(__bitmap_full);
 
int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
103,18 → 73,18
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
* @bits : bitmap size, in bits
* @nbits : bitmap size, in bits
*
* Shifting right (dividing) means moving bits in the MS -> LS bit
* direction. Zeros are fed into the vacated MS positions and the
* LS bits shifted off the bottom are lost.
*/
void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits)
void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned shift, unsigned nbits)
{
int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
unsigned long mask = (1UL << left) - 1;
unsigned k, lim = BITS_TO_LONGS(nbits);
unsigned off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
unsigned long mask = BITMAP_LAST_WORD_MASK(nbits);
for (k = 0; off + k < lim; ++k) {
unsigned long upper, lower;
 
126,17 → 96,15
upper = 0;
else {
upper = src[off + k + 1];
if (off + k + 1 == lim - 1 && left)
if (off + k + 1 == lim - 1)
upper &= mask;
upper <<= (BITS_PER_LONG - rem);
}
lower = src[off + k];
if (left && off + k == lim - 1)
if (off + k == lim - 1)
lower &= mask;
dst[k] = lower >> rem;
if (rem)
dst[k] |= upper << (BITS_PER_LONG - rem);
if (left && k == lim - 1)
dst[k] &= mask;
lower >>= rem;
dst[k] = lower | upper;
}
if (off)
memset(&dst[lim - off], 0, off*sizeof(unsigned long));
149,7 → 117,7
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
* @bits : bitmap size, in bits
* @nbits : bitmap size, in bits
*
* Shifting left (multiplying) means moving bits in the LS -> MS
* direction. Zeros are fed into the vacated LS bit positions
156,11 → 124,12
* and those MS bits shifted off the top are lost.
*/
 
void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits)
void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits)
{
int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
int k;
unsigned int lim = BITS_TO_LONGS(nbits);
unsigned int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
for (k = lim - off - 1; k >= 0; --k) {
unsigned long upper, lower;
 
169,17 → 138,11
* word below and make them the bottom rem bits of result.
*/
if (rem && k > 0)
lower = src[k - 1];
lower = src[k - 1] >> (BITS_PER_LONG - rem);
else
lower = 0;
upper = src[k];
if (left && k == lim - 1)
upper &= (1UL << left) - 1;
dst[k + off] = upper << rem;
if (rem)
dst[k + off] |= lower >> (BITS_PER_LONG - rem);
if (left && k + off == lim - 1)
dst[k + off] &= (1UL << left) - 1;
upper = src[k] << rem;
dst[k + off] = lower | upper;
}
if (off)
memset(dst, 0, off*sizeof(unsigned long));
382,10 → 345,10
/**
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
* @buf: pointer to a bitmap
* @pos: a bit position in @buf (0 <= @pos < @bits)
* @bits: number of valid bit positions in @buf
* @pos: a bit position in @buf (0 <= @pos < @nbits)
* @nbits: number of valid bit positions in @buf
*
* Map the bit at position @pos in @buf (of length @bits) to the
* Map the bit at position @pos in @buf (of length @nbits) to the
* ordinal of which set bit it is. If it is not set or if @pos
* is not a valid bit position, map to -1.
*
397,56 → 360,40
*
* The bit positions 0 through @bits are valid positions in @buf.
*/
static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigned int nbits)
{
int i, ord;
 
if (pos < 0 || pos >= bits || !test_bit(pos, buf))
if (pos >= nbits || !test_bit(pos, buf))
return -1;
 
i = find_first_bit(buf, bits);
ord = 0;
while (i < pos) {
i = find_next_bit(buf, bits, i + 1);
ord++;
return __bitmap_weight(buf, pos);
}
BUG_ON(i != pos);
 
return ord;
}
 
/**
* bitmap_ord_to_pos - find position of n-th set bit in bitmap
* @buf: pointer to bitmap
* @ord: ordinal bit position (n-th set bit, n >= 0)
* @bits: number of valid bit positions in @buf
* @nbits: number of valid bit positions in @buf
*
* Map the ordinal offset of bit @ord in @buf to its position in @buf.
* Value of @ord should be in range 0 <= @ord < weight(buf), else
* results are undefined.
* Value of @ord should be in range 0 <= @ord < weight(buf). If @ord
* >= weight(buf), returns @nbits.
*
* If for example, just bits 4 through 7 are set in @buf, then @ord
* values 0 through 3 will get mapped to 4 through 7, respectively,
* and all other @ord values return undefined values. When @ord value 3
* and all other @ord values returns @nbits. When @ord value 3
* gets mapped to (returns) @pos value 7 in this example, that means
* that the 3rd set bit (starting with 0th) is at position 7 in @buf.
*
* The bit positions 0 through @bits are valid positions in @buf.
* The bit positions 0 through @nbits-1 are valid positions in @buf.
*/
int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
unsigned int bitmap_ord_to_pos(const unsigned long *buf, unsigned int ord, unsigned int nbits)
{
int pos = 0;
unsigned int pos;
 
if (ord >= 0 && ord < bits) {
int i;
 
for (i = find_first_bit(buf, bits);
i < bits && ord > 0;
i = find_next_bit(buf, bits, i + 1))
for (pos = find_first_bit(buf, nbits);
pos < nbits && ord;
pos = find_next_bit(buf, nbits, pos + 1))
ord--;
if (i < bits && ord == 0)
pos = i;
}
 
return pos;
}
457,7 → 404,7
* @src: subset to be remapped
* @old: defines domain of map
* @new: defines range of map
* @bits: number of bits in each of these bitmaps
* @nbits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
485,22 → 432,22
*/
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new,
int bits)
unsigned int nbits)
{
int oldbit, w;
unsigned int oldbit, w;
 
if (dst == src) /* following doesn't handle inplace remaps */
return;
bitmap_zero(dst, bits);
bitmap_zero(dst, nbits);
 
w = bitmap_weight(new, bits);
for_each_set_bit(oldbit, src, bits) {
int n = bitmap_pos_to_ord(old, oldbit, bits);
w = bitmap_weight(new, nbits);
for_each_set_bit(oldbit, src, nbits) {
int n = bitmap_pos_to_ord(old, oldbit, nbits);
 
if (n < 0 || w == 0)
set_bit(oldbit, dst); /* identity map */
else
set_bit(bitmap_ord_to_pos(new, n % w, bits), dst);
set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst);
}
}
EXPORT_SYMBOL(bitmap_remap);
557,7 → 504,7
* read it, you're overqualified for your current job.)
*
* In other words, @orig is mapped onto (surjectively) @dst,
* using the the map { <n, m> | the n-th bit of @relmap is the
* using the map { <n, m> | the n-th bit of @relmap is the
* m-th set bit of @relmap }.
*
* Any set bits in @orig above bit number W, where W is the
644,9 → 591,9
* All bits in @dst not set by the above rule are cleared.
*/
void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, int bits)
const unsigned long *relmap, unsigned int bits)
{
int n, m; /* same meaning as in above comment */
unsigned int n, m; /* same meaning as in above comment */
 
if (dst == orig) /* following doesn't handle inplace mappings */
return;
677,7 → 624,7
* @dst: resulting smaller bitmap
* @orig: original larger bitmap
* @sz: specified size
* @bits: number of bits in each of these bitmaps
* @nbits: number of bits in each of these bitmaps
*
* For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
* Clear all other bits in @dst. See further the comment and
684,15 → 631,15
* Example [2] for bitmap_onto() for why and how to use this.
*/
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
int sz, int bits)
unsigned int sz, unsigned int nbits)
{
int oldbit;
unsigned int oldbit;
 
if (dst == orig) /* following doesn't handle inplace mappings */
return;
bitmap_zero(dst, bits);
bitmap_zero(dst, nbits);
 
for_each_set_bit(oldbit, orig, bits)
for_each_set_bit(oldbit, orig, nbits)
set_bit(oldbit % sz, dst);
}
EXPORT_SYMBOL(bitmap_fold);
845,16 → 792,17
*
* Require nbits % BITS_PER_LONG == 0.
*/
void bitmap_copy_le(void *dst, const unsigned long *src, int nbits)
#ifdef __BIG_ENDIAN
void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
unsigned long *d = dst;
int i;
unsigned int i;
 
for (i = 0; i < nbits/BITS_PER_LONG; i++) {
if (BITS_PER_LONG == 64)
d[i] = cpu_to_le64(src[i]);
dst[i] = cpu_to_le64(src[i]);
else
d[i] = cpu_to_le32(src[i]);
dst[i] = cpu_to_le32(src[i]);
}
}
EXPORT_SYMBOL(bitmap_copy_le);
#endif
/drivers/ddk/linux/mutex.c
24,6 → 24,27
#include <linux/export.h>
#include <linux/spinlock.h>
#include <syscall.h>
 
struct kos_taskdata
{
u32 event_mask;
u32 pid;
u16 r0;
u8 state;
u8 r1;
u16 r2;
u8 wnd_number;
u8 r3;
u32 mem_start;
u32 counter_sum;
u32 counter_add;
u32 cpu_usage;
}__attribute__((packed));
 
static inline void mutex_set_owner(struct mutex *lock)
{
}
 
/*
* A negative mutex count indicates that waiters are sleeping waiting for the
* mutex.
43,43 → 64,29
 
}
 
static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
struct ww_acquire_ctx *ww_ctx)
static inline int __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
/*
* If this WARN_ON triggers, you used ww_mutex_lock to acquire,
* but released with a normal mutex_unlock in this call.
*
* This should never happen, always use ww_mutex_unlock.
*/
DEBUG_LOCKS_WARN_ON(ww->ctx);
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
 
/*
* Not quite done after calling ww_acquire_done() ?
*/
DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
if (!hold_ctx)
return 0;
 
if (ww_ctx->contending_lock) {
/*
* After -EDEADLK you tried to
* acquire a different ww_mutex? Bad!
*/
DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
if (unlikely(ctx == hold_ctx))
return -EALREADY;
 
/*
* You called ww_mutex_lock after receiving -EDEADLK,
* but 'forgot' to unlock everything else first?
*/
DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
ww_ctx->contending_lock = NULL;
if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
(ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
return -EDEADLK;
}
 
/*
* Naughty, using a different class will lead to undefined behavior!
*/
DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
#endif
return 0;
}
 
 
static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
struct ww_acquire_ctx *ww_ctx)
{
ww_ctx->acquired++;
}
 
97,21 → 104,136
MutexUnlock(&lock->base);
}
 
int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
static inline int __mutex_fastpath_lock_retval(atomic_t *count)
{
MutexLock(&lock->base);
if (unlikely(atomic_dec_return(count) < 0))
return -1;
else
return 0;
}
 
static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
u32 flags;
struct mutex_waiter *cur;
 
ww_mutex_lock_acquired(lock, ctx);
 
lock->ctx = ctx;
 
return 0;
/*
* The lock->ctx update should be visible on all cores before
* the atomic read is done, otherwise contended waiters might be
* missed. The contended waiters will either see ww_ctx == NULL
* and keep spinning, or it will acquire wait_lock, add itself
* to waiter list and sleep.
*/
smp_mb(); /* ^^^ */
 
/*
* Check if lock is contended, if not there is nobody to wake up
*/
if (likely(atomic_read(&lock->base.count) == 0))
return;
 
/*
* Uh oh, we raced in fastpath, wake up everyone in this case,
* so they can see the new lock->ctx.
*/
flags = safe_cli();
list_for_each_entry(cur, &lock->base.wait_list, list) {
((struct kos_taskdata*)cur->task)->state = 0;
}
safe_sti(flags);
}
 
ww_mutex_set_context_slowpath(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
struct mutex_waiter *cur;
 
int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
MutexLock(&lock->base);
ww_mutex_lock_acquired(lock, ctx);
lock->ctx = ctx;
 
return 0;
/*
* Give any possible sleeping processes the chance to wake up,
* so they can recheck if they have to back off.
*/
list_for_each_entry(cur, &lock->base.wait_list, list) {
((struct kos_taskdata*)cur->task)->state = 0;
}
}
 
int __ww_mutex_lock_slowpath(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
{
struct mutex *lock;
struct mutex_waiter waiter;
struct kos_taskdata* taskdata;
u32 eflags;
int ret = 0;
 
lock = &ww->base;
taskdata = (struct kos_taskdata*)(0x80003010);
waiter.task = (u32*)taskdata;
 
eflags = safe_cli();
 
list_add_tail(&waiter.list, &lock->wait_list);
 
for(;;)
{
if( atomic_xchg(&lock->count, -1) == 1)
break;
 
if (ctx->acquired > 0) {
ret = __ww_mutex_lock_check_stamp(lock, ctx);
if (ret)
goto err;
};
taskdata->state = 1;
change_task();
};
 
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);
 
ww_mutex_set_context_slowpath(ww, ctx);
 
err:
list_del(&waiter.list);
safe_sti(eflags);
 
return ret;
}
 
 
int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
int ret;
 
ret = __mutex_fastpath_lock_retval(&lock->base.count);
 
if (likely(!ret)) {
ww_mutex_set_context_fastpath(lock, ctx);
mutex_set_owner(&lock->base);
} else
ret = __ww_mutex_lock_slowpath(lock, ctx);
return ret;
}
 
 
int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
int ret;
 
ret = __mutex_fastpath_lock_retval(&lock->base.count);
 
if (likely(!ret)) {
ww_mutex_set_context_fastpath(lock, ctx);
mutex_set_owner(&lock->base);
} else
ret = __ww_mutex_lock_slowpath(lock, ctx);
return ret;
}
/drivers/ddk/linux/scatterlist.c
54,7 → 54,39
}
EXPORT_SYMBOL(sg_nents);
 
/**
* sg_nents_for_len - return total count of entries in scatterlist
* needed to satisfy the supplied length
* @sg: The scatterlist
* @len: The total required length
*
* Description:
* Determines the number of entries in sg that are required to meet
* the supplied length, taking into acount chaining as well
*
* Returns:
* the number of sg entries needed, negative error on failure
*
**/
int sg_nents_for_len(struct scatterlist *sg, u64 len)
{
int nents;
u64 total;
 
if (!len)
return 0;
 
for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
nents++;
total += sg->length;
if (total >= len)
return nents;
}
 
return -EINVAL;
}
EXPORT_SYMBOL(sg_nents_for_len);
 
/**
* sg_last - return the last scatterlist entry in a list
* @sgl: First entry in the scatterlist
71,9 → 103,6
**/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
struct scatterlist *ret = &sgl[nents - 1];
#else
struct scatterlist *sg, *ret = NULL;
unsigned int i;
 
80,7 → 109,6
for_each_sg(sgl, sg, nents, i)
ret = sg;
 
#endif
#ifdef CONFIG_DEBUG_SG
BUG_ON(sgl[0].sg_magic != SG_MAGIC);
BUG_ON(!sg_is_last(ret));
360,5 → 388,285
}
EXPORT_SYMBOL(__sg_page_iter_next);
 
/**
* sg_miter_start - start mapping iteration over a sg list
* @miter: sg mapping iter to be started
* @sgl: sg list to iterate over
* @nents: number of sg entries
*
* Description:
* Starts mapping iterator @miter.
*
* Context:
* Don't care.
*/
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
unsigned int nents, unsigned int flags)
{
memset(miter, 0, sizeof(struct sg_mapping_iter));
 
__sg_page_iter_start(&miter->piter, sgl, nents, 0);
WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
miter->__flags = flags;
}
EXPORT_SYMBOL(sg_miter_start);
 
static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
{
if (!miter->__remaining) {
struct scatterlist *sg;
unsigned long pgoffset;
 
if (!__sg_page_iter_next(&miter->piter))
return false;
 
sg = miter->piter.sg;
pgoffset = miter->piter.sg_pgoffset;
 
miter->__offset = pgoffset ? 0 : sg->offset;
miter->__remaining = sg->offset + sg->length -
(pgoffset << PAGE_SHIFT) - miter->__offset;
miter->__remaining = min_t(unsigned long, miter->__remaining,
PAGE_SIZE - miter->__offset);
}
 
return true;
}
 
/**
* sg_miter_skip - reposition mapping iterator
* @miter: sg mapping iter to be skipped
* @offset: number of bytes to plus the current location
*
* Description:
* Sets the offset of @miter to its current location plus @offset bytes.
* If mapping iterator @miter has been proceeded by sg_miter_next(), this
* stops @miter.
*
* Context:
* Don't care if @miter is stopped, or not proceeded yet.
* Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
*
* Returns:
* true if @miter contains the valid mapping. false if end of sg
* list is reached.
*/
bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
{
sg_miter_stop(miter);
 
while (offset) {
off_t consumed;
 
if (!sg_miter_get_next_page(miter))
return false;
 
consumed = min_t(off_t, offset, miter->__remaining);
miter->__offset += consumed;
miter->__remaining -= consumed;
offset -= consumed;
}
 
return true;
}
EXPORT_SYMBOL(sg_miter_skip);
 
/**
* sg_miter_next - proceed mapping iterator to the next mapping
* @miter: sg mapping iter to proceed
*
* Description:
* Proceeds @miter to the next mapping. @miter should have been started
* using sg_miter_start(). On successful return, @miter->page,
* @miter->addr and @miter->length point to the current mapping.
*
* Context:
* Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
* till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
*
* Returns:
* true if @miter contains the next mapping. false if end of sg
* list is reached.
*/
bool sg_miter_next(struct sg_mapping_iter *miter)
{
sg_miter_stop(miter);
 
/*
* Get to the next page if necessary.
* __remaining, __offset is adjusted by sg_miter_stop
*/
if (!sg_miter_get_next_page(miter))
return false;
 
miter->page = sg_page_iter_page(&miter->piter);
miter->consumed = miter->length = miter->__remaining;
 
if (miter->__flags & SG_MITER_ATOMIC)
miter->addr = kmap_atomic(miter->page) + miter->__offset;
else
miter->addr = kmap(miter->page) + miter->__offset;
 
return true;
}
EXPORT_SYMBOL(sg_miter_next);
 
/**
* sg_miter_stop - stop mapping iteration
* @miter: sg mapping iter to be stopped
*
* Description:
* Stops mapping iterator @miter. @miter should have been started
* started using sg_miter_start(). A stopped iteration can be
* resumed by calling sg_miter_next() on it. This is useful when
* resources (kmap) need to be released during iteration.
*
* Context:
* Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
* otherwise.
*/
void sg_miter_stop(struct sg_mapping_iter *miter)
{
WARN_ON(miter->consumed > miter->length);
 
/* drop resources from the last iteration */
if (miter->addr) {
miter->__offset += miter->consumed;
miter->__remaining -= miter->consumed;
 
if (miter->__flags & SG_MITER_ATOMIC) {
WARN_ON_ONCE(preemptible());
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);
 
miter->page = NULL;
miter->addr = NULL;
miter->length = 0;
miter->consumed = 0;
}
}
EXPORT_SYMBOL(sg_miter_stop);
 
/**
* sg_copy_buffer - Copy data between a linear buffer and an SG list
* @sgl: The SG list
* @nents: Number of SG entries
* @buf: Where to copy from
* @buflen: The number of bytes to copy
* @skip: Number of bytes to skip before copying
* @to_buffer: transfer direction (true == from an sg list to a
* buffer, false == from a buffer to an sg list
*
* Returns the number of copied bytes.
*
**/
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer)
{
unsigned int offset = 0;
struct sg_mapping_iter miter;
unsigned long flags;
unsigned int sg_flags = SG_MITER_ATOMIC;
 
if (to_buffer)
sg_flags |= SG_MITER_FROM_SG;
else
sg_flags |= SG_MITER_TO_SG;
 
sg_miter_start(&miter, sgl, nents, sg_flags);
 
if (!sg_miter_skip(&miter, skip))
return false;
 
local_irq_save(flags);
 
while (sg_miter_next(&miter) && offset < buflen) {
unsigned int len;
 
len = min(miter.length, buflen - offset);
 
if (to_buffer)
memcpy(buf + offset, miter.addr, len);
else
memcpy(miter.addr, buf + offset, len);
 
offset += len;
}
 
sg_miter_stop(&miter);
 
local_irq_restore(flags);
return offset;
}
EXPORT_SYMBOL(sg_copy_buffer);
 
/**
* sg_copy_from_buffer - Copy from a linear buffer to an SG list
* @sgl: The SG list
* @nents: Number of SG entries
* @buf: Where to copy from
* @buflen: The number of bytes to copy
*
* Returns the number of copied bytes.
*
**/
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
const void *buf, size_t buflen)
{
return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
}
EXPORT_SYMBOL(sg_copy_from_buffer);
 
/**
* sg_copy_to_buffer - Copy from an SG list to a linear buffer
* @sgl: The SG list
* @nents: Number of SG entries
* @buf: Where to copy to
* @buflen: The number of bytes to copy
*
* Returns the number of copied bytes.
*
**/
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen)
{
return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
}
EXPORT_SYMBOL(sg_copy_to_buffer);
 
/**
* sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
* @sgl: The SG list
* @nents: Number of SG entries
* @buf: Where to copy from
* @buflen: The number of bytes to copy
* @skip: Number of bytes to skip before copying
*
* Returns the number of copied bytes.
*
**/
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
const void *buf, size_t buflen, off_t skip)
{
return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
}
EXPORT_SYMBOL(sg_pcopy_from_buffer);
 
/**
* sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
* @sgl: The SG list
* @nents: Number of SG entries
* @buf: Where to copy to
* @buflen: The number of bytes to copy
* @skip: Number of bytes to skip before copying
*
* Returns the number of copied bytes.
*
**/
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip)
{
return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
}
EXPORT_SYMBOL(sg_pcopy_to_buffer);
/drivers/ddk/linux/time.c
1,4 → 1,35
/*
* linux/kernel/time.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file contains the interface functions for the various
* time related system calls: time, stime, gettimeofday, settimeofday,
* adjtime
*/
/*
* Modification history kernel/time.c
*
* 1993-09-02 Philip Gladstone
* Created file with time related functions from sched/core.c and adjtimex()
* 1993-10-08 Torsten Duwe
* adjtime interface update and CMOS clock write code
* 1995-08-13 Torsten Duwe
* kernel PLL updated to 1994-12-13 specs (rfc-1589)
* 1999-01-16 Ulrich Windl
* Introduced error checking for many cases in adjtimex().
* Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
* Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
* (Even though the technical memorandum forbids it)
* 2004-07-14 Christoph Lameter
* Added getnstimeofday to allow the posix timer functions to return
* with nanosecond accuracy
*/
 
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/math64.h>
 
 
 
45,7 → 76,13
#define NSEC_PER_SEC 1000000000L
#define FSEC_PER_SEC 1000000000000000LL
 
 
# define USER_HZ 100
/*
* Convert jiffies to milliseconds and back.
*
* Avoid unnecessary multiplications/divisions in the
* two most common HZ cases:
*/
unsigned int jiffies_to_msecs(const unsigned long j)
{
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
60,6 → 97,7
# endif
#endif
}
EXPORT_SYMBOL(jiffies_to_msecs);
 
unsigned int jiffies_to_usecs(const unsigned long j)
{
75,78 → 113,249
# endif
#endif
}
EXPORT_SYMBOL(jiffies_to_usecs);
 
/**
* timespec_trunc - Truncate timespec to a granularity
* @t: Timespec
* @gran: Granularity in ns.
*
* Truncate a timespec to a granularity. Always rounds down. gran must
* not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
*/
struct timespec timespec_trunc(struct timespec t, unsigned gran)
{
/* Avoid division in the common cases 1 ns and 1 s. */
if (gran == 1) {
/* nothing */
} else if (gran == NSEC_PER_SEC) {
t.tv_nsec = 0;
} else if (gran > 1 && gran < NSEC_PER_SEC) {
t.tv_nsec -= t.tv_nsec % gran;
} else {
WARN(1, "illegal file time granularity: %u", gran);
}
return t;
}
EXPORT_SYMBOL(timespec_trunc);
 
/*
* When we convert to jiffies then we interpret incoming values
* the following way:
* mktime64 - Converts date to seconds.
* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
* Assumes input in normal date format, i.e. 1980-12-31 23:59:59
* => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
*
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
* [For the Julian calendar (which was used in Russia before 1917,
* Britain & colonies before 1752, anywhere else before 1582,
* and is still in use by some communities) leave out the
* -year/100+year/400 terms, and add 10.]
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
* This algorithm was first published by Gauss (I think).
*/
time64_t mktime64(const unsigned int year0, const unsigned int mon0,
const unsigned int day, const unsigned int hour,
const unsigned int min, const unsigned int sec)
{
unsigned int mon = mon0, year = year0;
 
/* 1..12 -> 11,12,1..10 */
if (0 >= (int) (mon -= 2)) {
mon += 12; /* Puts Feb last since it has leap day */
year -= 1;
}
 
return ((((time64_t)
(year/4 - year/100 + year/400 + 367*mon/12 + day) +
year*365 - 719499
)*24 + hour /* now have hours */
)*60 + min /* now have minutes */
)*60 + sec; /* finally seconds */
}
EXPORT_SYMBOL(mktime64);
 
/**
* set_normalized_timespec - set timespec sec and nsec parts and normalize
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor
* @ts: pointer to timespec variable to be set
* @sec: seconds to set
* @nsec: nanoseconds to set
*
* We must also be careful about 32-bit overflows.
* Set seconds and nanoseconds field of a timespec variable and
* normalize to the timespec storage format
*
* Note: The tv_nsec part is always in the range of
* 0 <= tv_nsec < NSEC_PER_SEC
* For negative values only the tv_sec field is negative !
*/
unsigned long msecs_to_jiffies(const unsigned int m)
void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
{
while (nsec >= NSEC_PER_SEC) {
/*
* Negative value, means infinite timeout:
* The following asm() prevents the compiler from
* optimising this loop into a modulo operation. See
* also __iter_div_u64_rem() in include/linux/time.h
*/
if ((int)m < 0)
return MAX_JIFFY_OFFSET;
asm("" : "+rm"(nsec));
nsec -= NSEC_PER_SEC;
++sec;
}
while (nsec < 0) {
asm("" : "+rm"(nsec));
nsec += NSEC_PER_SEC;
--sec;
}
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
EXPORT_SYMBOL(set_normalized_timespec);
 
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
/**
* ns_to_timespec - Convert nanoseconds to timespec
* @nsec: the nanoseconds value to be converted
*
* Returns the timespec representation of the nsec parameter.
*/
struct timespec ns_to_timespec(const s64 nsec)
{
struct timespec ts;
s32 rem;
 
if (!nsec)
return (struct timespec) {0, 0};
 
ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
if (unlikely(rem < 0)) {
ts.tv_sec--;
rem += NSEC_PER_SEC;
}
ts.tv_nsec = rem;
 
return ts;
}
EXPORT_SYMBOL(ns_to_timespec);
 
/**
* ns_to_timeval - Convert nanoseconds to timeval
* @nsec: the nanoseconds value to be converted
*
* Returns the timeval representation of the nsec parameter.
*/
struct timeval ns_to_timeval(const s64 nsec)
{
struct timespec ts = ns_to_timespec(nsec);
struct timeval tv;
 
tv.tv_sec = ts.tv_sec;
tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
 
return tv;
}
EXPORT_SYMBOL(ns_to_timeval);
 
#if BITS_PER_LONG == 32
/**
* set_normalized_timespec - set timespec sec and nsec parts and normalize
*
* @ts: pointer to timespec variable to be set
* @sec: seconds to set
* @nsec: nanoseconds to set
*
* Set seconds and nanoseconds field of a timespec variable and
* normalize to the timespec storage format
*
* Note: The tv_nsec part is always in the range of
* 0 <= tv_nsec < NSEC_PER_SEC
* For negative values only the tv_sec field is negative !
*/
void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
{
while (nsec >= NSEC_PER_SEC) {
/*
* HZ is equal to or smaller than 1000, and 1000 is a nice
* round multiple of HZ, divide with the factor between them,
* but round upwards:
* The following asm() prevents the compiler from
* optimising this loop into a modulo operation. See
* also __iter_div_u64_rem() in include/linux/time.h
*/
return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
/*
* HZ is larger than 1000, and HZ is a nice round multiple of
* 1000 - simply multiply with the factor between them.
asm("" : "+rm"(nsec));
nsec -= NSEC_PER_SEC;
++sec;
}
while (nsec < 0) {
asm("" : "+rm"(nsec));
nsec += NSEC_PER_SEC;
--sec;
}
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
EXPORT_SYMBOL(set_normalized_timespec64);
 
/**
* ns_to_timespec64 - Convert nanoseconds to timespec64
* @nsec: the nanoseconds value to be converted
*
* But first make sure the multiplication result cannot
* overflow:
* Returns the timespec64 representation of the nsec parameter.
*/
if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
struct timespec64 ns_to_timespec64(const s64 nsec)
{
struct timespec64 ts;
s32 rem;
 
return m * (HZ / MSEC_PER_SEC);
#else
if (!nsec)
return (struct timespec64) {0, 0};
 
ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
if (unlikely(rem < 0)) {
ts.tv_sec--;
rem += NSEC_PER_SEC;
}
ts.tv_nsec = rem;
 
return ts;
}
EXPORT_SYMBOL(ns_to_timespec64);
#endif
/**
* msecs_to_jiffies: - convert milliseconds to jiffies
* @m: time in milliseconds
*
* conversion is done as follows:
*
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor and
* handling any 32-bit overflows.
* for the details see __msecs_to_jiffies()
*
* msecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
* code, __msecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
* the _msecs_to_jiffies helpers are the HZ dependent conversion
* routines found in include/linux/jiffies.h
*/
unsigned long __msecs_to_jiffies(const unsigned int m)
{
/*
* Generic case - multiply, round and divide. But first
* check that if we are doing a net multiplication, that
* we wouldn't overflow:
* Negative value, means infinite timeout:
*/
if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
if ((int)m < 0)
return MAX_JIFFY_OFFSET;
 
return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
>> MSEC_TO_HZ_SHR32;
#endif
return _msecs_to_jiffies(m);
}
EXPORT_SYMBOL(msecs_to_jiffies);
EXPORT_SYMBOL(__msecs_to_jiffies);
 
unsigned long usecs_to_jiffies(const unsigned int u)
unsigned long __usecs_to_jiffies(const unsigned int u)
{
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return u * (HZ / USEC_PER_SEC);
#else
return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
>> USEC_TO_HZ_SHR32;
#endif
return _usecs_to_jiffies(u);
}
EXPORT_SYMBOL(usecs_to_jiffies);
EXPORT_SYMBOL(__usecs_to_jiffies);
 
/*
* The TICK_NSEC - 1 rounds up the value to the next resolution. Note
164,7 → 373,7
* value to a scaled second value.
*/
static unsigned long
__timespec_to_jiffies(unsigned long sec, long nsec)
__timespec64_to_jiffies(u64 sec, long nsec)
{
nsec = nsec + TICK_NSEC - 1;
 
172,22 → 381,27
sec = MAX_SEC_IN_JIFFIES;
nsec = 0;
}
return (((u64)sec * SEC_CONVERSION) +
return ((sec * SEC_CONVERSION) +
(((u64)nsec * NSEC_CONVERSION) >>
(NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
 
}
 
static unsigned long
__timespec_to_jiffies(unsigned long sec, long nsec)
{
return __timespec64_to_jiffies((u64)sec, nsec);
}
 
unsigned long
timespec_to_jiffies(const struct timespec *value)
timespec64_to_jiffies(const struct timespec64 *value)
{
return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec);
}
EXPORT_SYMBOL(timespec64_to_jiffies);
 
EXPORT_SYMBOL(timespec_to_jiffies);
 
void
jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
{
/*
* Convert jiffies to nanoseconds and separate with
198,8 → 412,186
NSEC_PER_SEC, &rem);
value->tv_nsec = rem;
}
EXPORT_SYMBOL(jiffies_to_timespec);
EXPORT_SYMBOL(jiffies_to_timespec64);
 
/*
* We could use a similar algorithm to timespec_to_jiffies (with a
* different multiplier for usec instead of nsec). But this has a
* problem with rounding: we can't exactly add TICK_NSEC - 1 to the
* usec value, since it's not necessarily integral.
*
* We could instead round in the intermediate scaled representation
* (i.e. in units of 1/2^(large scale) jiffies) but that's also
* perilous: the scaling introduces a small positive error, which
* combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
* units to the intermediate before shifting) leads to accidental
* overflow and overestimates.
*
* At the cost of one additional multiplication by a constant, just
* use the timespec implementation.
*/
unsigned long
timeval_to_jiffies(const struct timeval *value)
{
return __timespec_to_jiffies(value->tv_sec,
value->tv_usec * NSEC_PER_USEC);
}
EXPORT_SYMBOL(timeval_to_jiffies);
 
void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
{
/*
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
u32 rem;
 
value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
EXPORT_SYMBOL(jiffies_to_timeval);
 
/*
* Convert jiffies/jiffies_64 to clock_t and back.
*/
clock_t jiffies_to_clock_t(unsigned long x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
# if HZ < USER_HZ
return x * (USER_HZ / HZ);
# else
return x / (HZ / USER_HZ);
# endif
#else
return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
#endif
}
EXPORT_SYMBOL(jiffies_to_clock_t);
 
unsigned long clock_t_to_jiffies(unsigned long x)
{
#if (HZ % USER_HZ)==0
if (x >= ~0UL / (HZ / USER_HZ))
return ~0UL;
return x * (HZ / USER_HZ);
#else
/* Don't worry about loss of precision here .. */
if (x >= ~0UL / HZ * USER_HZ)
return ~0UL;
 
/* .. but do try to contain it here */
return div_u64((u64)x * HZ, USER_HZ);
#endif
}
EXPORT_SYMBOL(clock_t_to_jiffies);
 
u64 jiffies_64_to_clock_t(u64 x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
# if HZ < USER_HZ
x = div_u64(x * USER_HZ, HZ);
# elif HZ > USER_HZ
x = div_u64(x, HZ / USER_HZ);
# else
/* Nothing to do */
# endif
#else
/*
* There are better ways that don't overflow early,
* but even this doesn't overflow in hundreds of years
* in 64 bits, so..
*/
x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
#endif
return x;
}
EXPORT_SYMBOL(jiffies_64_to_clock_t);
 
u64 nsec_to_clock_t(u64 x)
{
#if (NSEC_PER_SEC % USER_HZ) == 0
return div_u64(x, NSEC_PER_SEC / USER_HZ);
#elif (USER_HZ % 512) == 0
return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
#else
/*
* max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
* overflow after 64.99 years.
* exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
*/
return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
#endif
}
 
/**
* nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
*
* @n: nsecs in u64
*
* Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
* And this doesn't return MAX_JIFFY_OFFSET since this function is designed
* for scheduler, not for use in device drivers to calculate timeout value.
*
* note:
* NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
* ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
*/
u64 nsecs_to_jiffies64(u64 n)
{
#if (NSEC_PER_SEC % HZ) == 0
/* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
return div_u64(n, NSEC_PER_SEC / HZ);
#elif (HZ % 512) == 0
/* overflow after 292 years if HZ = 1024 */
return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
#else
/*
* Generic case - optimized for cases where HZ is a multiple of 3.
* overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
*/
return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
#endif
}
EXPORT_SYMBOL(nsecs_to_jiffies64);
 
/**
* nsecs_to_jiffies - Convert nsecs in u64 to jiffies
*
* @n: nsecs in u64
*
* Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
* And this doesn't return MAX_JIFFY_OFFSET since this function is designed
* for scheduler, not for use in device drivers to calculate timeout value.
*
* note:
* NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
* ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
*/
unsigned long nsecs_to_jiffies(u64 n)
{
return (unsigned long)nsecs_to_jiffies64(n);
}
EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
 
/*
* Add two timespec values and do a safety check for overflow.
* It's assumed that both values are valid (>= 0)
*/
struct timespec timespec_add_safe(const struct timespec lhs,
const struct timespec rhs)
{
struct timespec res;
 
set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec,
lhs.tv_nsec + rhs.tv_nsec);
 
if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)
res.tv_sec = TIME_T_MAX;
 
return res;
}
 
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
u64 quotient;
217,21 → 609,8
return quotient;
}
 
struct timespec ns_to_timespec(const s64 nsec)
{
struct timespec ts;
s32 rem;
 
if (!nsec)
return (struct timespec) {0, 0};
 
ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
if (unlikely(rem < 0)) {
ts.tv_sec--;
rem += NSEC_PER_SEC;
}
ts.tv_nsec = rem;
 
return ts;
}
 
 
/drivers/ddk/stdio/vsprintf.c
23,6 → 23,7
#include <linux/ctype.h>
#include <linux/kernel.h>
 
#include <linux/math64.h>
#include <linux/ioport.h>
#include <linux/export.h>
 
29,20 → 30,6
#include <asm/div64.h>
#include <asm/page.h> /* for PAGE_SIZE */
 
 
static inline u64 div_u64(u64 dividend, u32 divisor)
{
u32 remainder;
return div_u64_rem(dividend, divisor, &remainder);
}
 
static inline s64 div_s64(s64 dividend, s32 divisor)
{
s32 remainder;
return div_s64_rem(dividend, divisor, &remainder);
}
 
 
#define ZERO_SIZE_PTR ((void *)16)
 
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
55,6 → 42,7
#define KSTRTOX_OVERFLOW (1U << 31)
 
const char hex_asc[] = "0123456789abcdef";
const char hex_asc_upper[] = "0123456789ABCDEF";
 
/* Works only for digits and letters, but small and fast */
#define TOLOWER(x) ((x) | 0x20)
210,148 → 198,152
{
int i = 0;
 
while (isdigit(**s))
do {
i = i*10 + *((*s)++) - '0';
} while (isdigit(**s));
 
return i;
}
 
/* Decimal conversion is by far the most typical, and is used
* for /proc and /sys data. This directly impacts e.g. top performance
* with many processes running. We optimize it for speed
* using ideas described at <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
* (with permission from the author, Douglas W. Jones).
/*
* Decimal conversion is by far the most typical, and is used for
* /proc and /sys data. This directly impacts e.g. top performance
* with many processes running. We optimize it for speed by emitting
* two characters at a time, using a 200 byte lookup table. This
* roughly halves the number of multiplications compared to computing
* the digits one at a time. Implementation strongly inspired by the
* previous version, which in turn used ideas described at
* <http://www.cs.uiowa.edu/~jones/bcd/divide.html> (with permission
* from the author, Douglas W. Jones).
*
* It turns out there is precisely one 26 bit fixed-point
* approximation a of 64/100 for which x/100 == (x * (u64)a) >> 32
* holds for all x in [0, 10^8-1], namely a = 0x28f5c29. The actual
* range happens to be somewhat larger (x <= 1073741898), but that's
* irrelevant for our purpose.
*
* For dividing a number in the range [10^4, 10^6-1] by 100, we still
* need a 32x32->64 bit multiply, so we simply use the same constant.
*
* For dividing a number in the range [100, 10^4-1] by 100, there are
* several options. The simplest is (x * 0x147b) >> 19, which is valid
* for all x <= 43698.
*/
 
#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
/* Formats correctly any integer in [0, 999999999] */
static noinline_for_stack
char *put_dec_full9(char *buf, unsigned q)
{
unsigned r;
static const u16 decpair[100] = {
#define _(x) (__force u16) cpu_to_le16(((x % 10) | ((x / 10) << 8)) + 0x3030)
_( 0), _( 1), _( 2), _( 3), _( 4), _( 5), _( 6), _( 7), _( 8), _( 9),
_(10), _(11), _(12), _(13), _(14), _(15), _(16), _(17), _(18), _(19),
_(20), _(21), _(22), _(23), _(24), _(25), _(26), _(27), _(28), _(29),
_(30), _(31), _(32), _(33), _(34), _(35), _(36), _(37), _(38), _(39),
_(40), _(41), _(42), _(43), _(44), _(45), _(46), _(47), _(48), _(49),
_(50), _(51), _(52), _(53), _(54), _(55), _(56), _(57), _(58), _(59),
_(60), _(61), _(62), _(63), _(64), _(65), _(66), _(67), _(68), _(69),
_(70), _(71), _(72), _(73), _(74), _(75), _(76), _(77), _(78), _(79),
_(80), _(81), _(82), _(83), _(84), _(85), _(86), _(87), _(88), _(89),
_(90), _(91), _(92), _(93), _(94), _(95), _(96), _(97), _(98), _(99),
#undef _
};
 
/*
* Possible ways to approx. divide by 10
* (x * 0x1999999a) >> 32 x < 1073741829 (multiply must be 64-bit)
* (x * 0xcccd) >> 19 x < 81920 (x < 262149 when 64-bit mul)
* (x * 0x6667) >> 18 x < 43699
* (x * 0x3334) >> 17 x < 16389
* (x * 0x199a) >> 16 x < 16389
* (x * 0x0ccd) >> 15 x < 16389
* (x * 0x0667) >> 14 x < 2739
* (x * 0x0334) >> 13 x < 1029
* (x * 0x019a) >> 12 x < 1029
* (x * 0x00cd) >> 11 x < 1029 shorter code than * 0x67 (on i386)
* (x * 0x0067) >> 10 x < 179
* (x * 0x0034) >> 9 x < 69 same
* (x * 0x001a) >> 8 x < 69 same
* (x * 0x000d) >> 7 x < 69 same, shortest code (on i386)
* (x * 0x0007) >> 6 x < 19
* See <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
* This will print a single '0' even if r == 0, since we would
* immediately jump to out_r where two 0s would be written but only
* one of them accounted for in buf. This is needed by ip4_string
* below. All other callers pass a non-zero value of r.
*/
r = (q * (uint64_t)0x1999999a) >> 32;
*buf++ = (q - 10 * r) + '0'; /* 1 */
q = (r * (uint64_t)0x1999999a) >> 32;
*buf++ = (r - 10 * q) + '0'; /* 2 */
r = (q * (uint64_t)0x1999999a) >> 32;
*buf++ = (q - 10 * r) + '0'; /* 3 */
q = (r * (uint64_t)0x1999999a) >> 32;
*buf++ = (r - 10 * q) + '0'; /* 4 */
r = (q * (uint64_t)0x1999999a) >> 32;
*buf++ = (q - 10 * r) + '0'; /* 5 */
/* Now value is under 10000, can avoid 64-bit multiply */
q = (r * 0x199a) >> 16;
*buf++ = (r - 10 * q) + '0'; /* 6 */
r = (q * 0xcd) >> 11;
*buf++ = (q - 10 * r) + '0'; /* 7 */
q = (r * 0xcd) >> 11;
*buf++ = (r - 10 * q) + '0'; /* 8 */
*buf++ = q + '0'; /* 9 */
return buf;
}
#endif
 
/* Similar to above but do not pad with zeros.
* Code can be easily arranged to print 9 digits too, but our callers
* always call put_dec_full9() instead when the number has 9 decimal digits.
*/
static noinline_for_stack
char *put_dec_trunc8(char *buf, unsigned r)
{
unsigned q;
 
/* Copy of previous function's body with added early returns */
while (r >= 10000) {
q = r + '0';
r = (r * (uint64_t)0x1999999a) >> 32;
*buf++ = q - 10*r;
}
/* 1 <= r < 10^8 */
if (r < 100)
goto out_r;
 
q = (r * 0x199a) >> 16; /* r <= 9999 */
*buf++ = (r - 10 * q) + '0';
if (q == 0)
/* 100 <= r < 10^8 */
q = (r * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
 
/* 1 <= q < 10^6 */
if (q < 100)
goto out_q;
 
/* 100 <= q < 10^6 */
r = (q * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[q - 100*r];
buf += 2;
 
/* 1 <= r < 10^4 */
if (r < 100)
goto out_r;
 
/* 100 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
out_q:
/* 1 <= q < 100 */
r = q;
out_r:
/* 1 <= r < 100 */
*((u16 *)buf) = decpair[r];
buf += r < 10 ? 1 : 2;
return buf;
r = (q * 0xcd) >> 11; /* q <= 999 */
*buf++ = (q - 10 * r) + '0';
if (r == 0)
return buf;
q = (r * 0xcd) >> 11; /* r <= 99 */
*buf++ = (r - 10 * q) + '0';
if (q == 0)
return buf;
*buf++ = q + '0'; /* q <= 9 */
return buf;
}
 
/* There are two algorithms to print larger numbers.
* One is generic: divide by 1000000000 and repeatedly print
* groups of (up to) 9 digits. It's conceptually simple,
* but requires a (unsigned long long) / 1000000000 division.
*
* Second algorithm splits 64-bit unsigned long long into 16-bit chunks,
* manipulates them cleverly and generates groups of 4 decimal digits.
* It so happens that it does NOT require long long division.
*
* If long is > 32 bits, division of 64-bit values is relatively easy,
* and we will use the first algorithm.
* If long long is > 64 bits (strange architecture with VERY large long long),
* second algorithm can't be used, and we again use the first one.
*
* Else (if long is 32 bits and long long is 64 bits) we use second one.
*/
#if BITS_PER_LONG == 64 && BITS_PER_LONG_LONG == 64
static noinline_for_stack
char *put_dec_full8(char *buf, unsigned r)
{
unsigned q;
 
#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
/* 0 <= r < 10^8 */
q = (r * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
 
/* First algorithm: generic */
/* 0 <= q < 10^6 */
r = (q * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[q - 100*r];
buf += 2;
 
static
/* 0 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
 
/* 0 <= q < 100 */
*((u16 *)buf) = decpair[q];
buf += 2;
return buf;
}
 
static noinline_for_stack
char *put_dec(char *buf, unsigned long long n)
{
if (n >= 100*1000*1000) {
while (n >= 1000*1000*1000)
buf = put_dec_full9(buf, do_div(n, 1000*1000*1000));
if (n >= 100*1000*1000)
return put_dec_full9(buf, n);
}
buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
/* 1 <= n <= 1.6e11 */
if (n >= 100*1000*1000)
buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
/* 1 <= n < 1e8 */
return put_dec_trunc8(buf, n);
}
 
#else
#elif BITS_PER_LONG == 32 && BITS_PER_LONG_LONG == 64
 
/* Second algorithm: valid only for 64-bit long longs */
static void
put_dec_full4(char *buf, unsigned r)
{
unsigned q;
 
/* See comment in put_dec_full9 for choice of constants */
static noinline_for_stack
void put_dec_full4(char *buf, unsigned q)
{
unsigned r;
r = (q * 0xccd) >> 15;
buf[0] = (q - 10 * r) + '0';
q = (r * 0xcd) >> 11;
buf[1] = (r - 10 * q) + '0';
r = (q * 0xcd) >> 11;
buf[2] = (q - 10 * r) + '0';
buf[3] = r + '0';
/* 0 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 0 <= q < 100 */
*((u16 *)buf) = decpair[q];
}
 
/*
359,9 → 351,9
* The approximation x/10000 == (x * 0x346DC5D7) >> 43
* holds for all x < 1,128,869,999. The largest value this
* helper will ever be asked to convert is 1,125,520,955.
* (d1 in the put_dec code, assuming n is all-ones).
* (second call in the put_dec code, assuming n is all-ones).
*/
static
static noinline_for_stack
unsigned put_dec_helper4(char *buf, unsigned x)
{
uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43;
388,6 → 380,8
d2 = (h ) & 0xffff;
d3 = (h >> 16); /* implicit "& 0xffff" */
 
/* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0
= 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */
q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
q = put_dec_helper4(buf, q);
 
417,7 → 411,8
*/
int num_to_str(char *buf, int size, unsigned long long num)
{
char tmp[sizeof(num) * 3];
/* put_dec requires 2-byte alignment of the buffer. */
char tmp[sizeof(num) * 3] __aligned(2);
int idx, len;
 
/* put_dec() may work incorrectly for num = 0 (generate "", not "0") */
435,11 → 430,11
return len;
}
 
#define ZEROPAD 1 /* pad with zero */
#define SIGN 2 /* unsigned/signed long */
#define SIGN 1 /* unsigned/signed, must be 1 */
#define LEFT 2 /* left justified */
#define PLUS 4 /* show plus */
#define SPACE 8 /* space if plus */
#define LEFT 16 /* left justified */
#define ZEROPAD 16 /* pad with zero, must be 16 == '0' - ' ' */
#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
 
478,10 → 473,8
char *number(char *buf, char *end, unsigned long long num,
struct printf_spec spec)
{
/* we are called with base 8, 10 or 16, only, thus don't need "G..." */
static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
 
char tmp[66];
/* put_dec requires 2-byte alignment of the buffer. */
char tmp[3 * sizeof(num)] __aligned(2);
char sign;
char locase;
int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
517,12 → 510,7
/* generate full string in tmp[], in reverse order */
i = 0;
if (num < spec.base)
tmp[i++] = digits[num] | locase;
/* Generic code, for any base:
else do {
tmp[i++] = (digits[do_div(num,base)] | locase);
} while (num != 0);
*/
tmp[i++] = hex_asc_upper[num] | locase;
else if (spec.base != 10) { /* 8 or 16 */
int mask = spec.base - 1;
int shift = 3;
530,7 → 518,7
if (spec.base == 16)
shift = 4;
do {
tmp[i++] = (digits[((unsigned char)num) & mask] | locase);
tmp[i++] = (hex_asc_upper[((unsigned char)num) & mask] | locase);
num >>= shift;
} while (num);
} else { /* base 10 */
542,7 → 530,7
spec.precision = i;
/* leading space padding */
spec.field_width -= spec.precision;
if (!(spec.flags & (ZEROPAD+LEFT))) {
if (!(spec.flags & (ZEROPAD | LEFT))) {
while (--spec.field_width >= 0) {
if (buf < end)
*buf = ' ';
570,7 → 558,8
}
/* zero or space padding */
if (!(spec.flags & LEFT)) {
char c = (spec.flags & ZEROPAD) ? '0' : ' ';
char c = ' ' + (spec.flags & ZEROPAD);
BUILD_BUG_ON(' ' + ZEROPAD != '0');
while (--spec.field_width >= 0) {
if (buf < end)
*buf = c;
712,8 → 701,6
#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]")
#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
#undef max
#define max(a,b) ((a) > (b) ? (a) : (b))
char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
 
742,11 → 729,16
specp = &mem_spec;
decode = 0;
}
if (decode && res->flags & IORESOURCE_UNSET) {
p = string(p, pend, "size ", str_spec);
p = number(p, pend, resource_size(res), *specp);
} else {
p = number(p, pend, res->start, *specp);
if (res->start != res->end) {
*p++ = '-';
p = number(p, pend, res->end, *specp);
}
}
if (decode) {
if (res->flags & IORESOURCE_MEM_64)
p = string(p, pend, " 64bit", str_spec);
800,17 → 792,106
if (spec.field_width > 0)
len = min_t(int, spec.field_width, 64);
 
for (i = 0; i < len && buf < end - 1; i++) {
buf = hex_byte_pack(buf, addr[i]);
for (i = 0; i < len; ++i) {
if (buf < end)
*buf = hex_asc_hi(addr[i]);
++buf;
if (buf < end)
*buf = hex_asc_lo(addr[i]);
++buf;
 
if (buf < end && separator && i != len - 1)
*buf++ = separator;
if (separator && i != len - 1) {
if (buf < end)
*buf = separator;
++buf;
}
}
 
return buf;
}
 
static noinline_for_stack
char *bitmap_string(char *buf, char *end, unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
const int CHUNKSZ = 32;
int nr_bits = max_t(int, spec.field_width, 0);
int i, chunksz;
bool first = true;
 
/* reused to print numbers */
spec = (struct printf_spec){ .flags = SMALL | ZEROPAD, .base = 16 };
 
chunksz = nr_bits & (CHUNKSZ - 1);
if (chunksz == 0)
chunksz = CHUNKSZ;
 
i = ALIGN(nr_bits, CHUNKSZ) - CHUNKSZ;
for (; i >= 0; i -= CHUNKSZ) {
u32 chunkmask, val;
int word, bit;
 
chunkmask = ((1ULL << chunksz) - 1);
word = i / BITS_PER_LONG;
bit = i % BITS_PER_LONG;
val = (bitmap[word] >> bit) & chunkmask;
 
if (!first) {
if (buf < end)
*buf = ',';
buf++;
}
first = false;
 
spec.field_width = DIV_ROUND_UP(chunksz, 4);
buf = number(buf, end, val, spec);
 
chunksz = CHUNKSZ;
}
return buf;
}
 
static noinline_for_stack
char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
int nr_bits = max_t(int, spec.field_width, 0);
/* current bit is 'cur', most recently seen range is [rbot, rtop] */
int cur, rbot, rtop;
bool first = true;
 
/* reused to print numbers */
spec = (struct printf_spec){ .base = 10 };
 
rbot = cur = find_first_bit(bitmap, nr_bits);
while (cur < nr_bits) {
rtop = cur;
cur = find_next_bit(bitmap, nr_bits, cur + 1);
if (cur < nr_bits && cur <= rtop + 1)
continue;
 
if (!first) {
if (buf < end)
*buf = ',';
buf++;
}
first = false;
 
buf = number(buf, end, rbot, spec);
if (rbot < rtop) {
if (buf < end)
*buf = '-';
buf++;
 
buf = number(buf, end, rtop, spec);
}
 
rbot = cur;
}
return buf;
}
 
static noinline_for_stack
char *mac_address_string(char *buf, char *end, u8 *addr,
struct printf_spec spec, const char *fmt)
{
878,7 → 959,7
break;
}
for (i = 0; i < 4; i++) {
char temp[3]; /* hold each IP quad in reverse order */
char temp[4] __aligned(2); /* hold each IP quad in reverse order */
int digits = put_dec_trunc8(temp, addr[index]) - temp;
if (leading_zeros) {
if (digits < 3)
928,6 → 1009,10
* - 'B' For backtraced symbolic direct pointers with offset
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
* - 'b[l]' For a bitmap, the number of bits is determined by the field
* width which must be explicitly specified either as part of the
* format string '%32b[l]' or through '%*b[l]', [l] selects
* range-list format instead of hex format
* - 'M' For a 6-byte MAC address, it prints the address in the
* usual colon-separated hex notation
* - 'm' For a 6-byte MAC address, it prints the hex address without colons
949,6 → 1034,17
* - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order
* - 'I[6S]c' for IPv6 addresses printed as specified by
* http://tools.ietf.org/html/rfc5952
* - 'E[achnops]' For an escaped buffer, where rules are defined by combination
* of the following flags (see string_escape_mem() for the
* details):
* a - ESCAPE_ANY
* c - ESCAPE_SPECIAL
* h - ESCAPE_HEX
* n - ESCAPE_NULL
* o - ESCAPE_OCTAL
* p - ESCAPE_NP
* s - ESCAPE_SPACE
* By default ESCAPE_ANY_NP is used.
* - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
* "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
* Options for %pU are:
978,6 → 1074,11
* (default assumed to be phys_addr_t, passed by reference)
* - 'd[234]' For a dentry name (optionally 2-4 last components)
* - 'D[234]' Same as 'd' but for a struct file
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
* - 'Cn' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
* - 'Cr' For a clock, it prints the current rate of the clock
*
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
1188,8 → 1289,7
 
case 'p':
spec->type = FORMAT_TYPE_PTR;
return fmt - start;
/* skip alnum */
return ++fmt - start;
 
case '%':
spec->type = FORMAT_TYPE_PERCENT_CHAR;
1230,29 → 1330,21
if (spec->qualifier == 'L')
spec->type = FORMAT_TYPE_LONG_LONG;
else if (spec->qualifier == 'l') {
if (spec->flags & SIGN)
spec->type = FORMAT_TYPE_LONG;
else
spec->type = FORMAT_TYPE_ULONG;
BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG);
spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN);
} else if (_tolower(spec->qualifier) == 'z') {
spec->type = FORMAT_TYPE_SIZE_T;
} else if (spec->qualifier == 't') {
spec->type = FORMAT_TYPE_PTRDIFF;
} else if (spec->qualifier == 'H') {
if (spec->flags & SIGN)
spec->type = FORMAT_TYPE_BYTE;
else
spec->type = FORMAT_TYPE_UBYTE;
BUILD_BUG_ON(FORMAT_TYPE_UBYTE + SIGN != FORMAT_TYPE_BYTE);
spec->type = FORMAT_TYPE_UBYTE + (spec->flags & SIGN);
} else if (spec->qualifier == 'h') {
if (spec->flags & SIGN)
spec->type = FORMAT_TYPE_SHORT;
else
spec->type = FORMAT_TYPE_USHORT;
BUILD_BUG_ON(FORMAT_TYPE_USHORT + SIGN != FORMAT_TYPE_SHORT);
spec->type = FORMAT_TYPE_USHORT + (spec->flags & SIGN);
} else {
if (spec->flags & SIGN)
spec->type = FORMAT_TYPE_INT;
else
spec->type = FORMAT_TYPE_UINT;
BUILD_BUG_ON(FORMAT_TYPE_UINT + SIGN != FORMAT_TYPE_INT);
spec->type = FORMAT_TYPE_UINT + (spec->flags & SIGN);
}
 
return ++fmt - start;
1273,6 → 1365,8
* %pB output the name of a backtrace symbol with its offset
* %pR output the address range in a struct resource with decoded flags
* %pr output the address range in a struct resource with raw flags
* %pb output the bitmap with field width as the number of bits
* %pbl output the bitmap as range list with field width as the number of bits
* %pM output a 6-byte MAC address with colons
* %pMR output a 6-byte MAC address with colons in reversed order
* %pMF output a 6-byte MAC address with dashes
1290,6 → 1384,11
* %*pE[achnops] print an escaped buffer
* %*ph[CDN] a variable-length hex string with a separator (supports up to 64
* bytes of the input)
* %pC output the name (Common Clock Framework) or address (legacy clock
* framework) of a clock
* %pCn output the name (Common Clock Framework) or address (legacy clock
* framework) of a clock
* %pCr output the current rate of a clock
* %n is ignored
*
* ** Please update Documentation/printk-formats.txt when making changes **
1312,7 → 1411,7
 
/* Reject out-of-range values early. Large positive sizes are
used for unknown buffer sizes. */
if ((int) size < 0)
if (WARN_ON_ONCE(size > INT_MAX))
return 0;
 
str = buf;
1378,7 → 1477,7
break;
 
case FORMAT_TYPE_PTR:
str = pointer(fmt+1, str, end, va_arg(args, void *),
str = pointer(fmt, str, end, va_arg(args, void *),
spec);
while (isalnum(*fmt))
fmt++;