/drivers/ddk/linux/bitmap.c |
---|
2,7 → 2,7 |
* lib/bitmap.c |
* Helper functions for bitmap.h. |
* |
* Tlhis source code is licensed under the GNU General Public License, |
* This source code is licensed under the GNU General Public License, |
* Version 2. See the file COPYING for more details. |
*/ |
#include <syscall.h> |
41,9 → 41,9 |
* for the best explanations of this ordering. |
*/ |
int __bitmap_empty(const unsigned long *bitmap, int bits) |
int __bitmap_empty(const unsigned long *bitmap, unsigned int bits) |
{ |
int k, lim = bits/BITS_PER_LONG; |
unsigned int k, lim = bits/BITS_PER_LONG; |
for (k = 0; k < lim; ++k) |
if (bitmap[k]) |
return 0; |
56,9 → 56,9 |
} |
EXPORT_SYMBOL(__bitmap_empty); |
int __bitmap_full(const unsigned long *bitmap, int bits) |
int __bitmap_full(const unsigned long *bitmap, unsigned int bits) |
{ |
int k, lim = bits/BITS_PER_LONG; |
unsigned int k, lim = bits/BITS_PER_LONG; |
for (k = 0; k < lim; ++k) |
if (~bitmap[k]) |
return 0; |
72,9 → 72,9 |
EXPORT_SYMBOL(__bitmap_full); |
int __bitmap_equal(const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits) |
const unsigned long *bitmap2, unsigned int bits) |
{ |
int k, lim = bits/BITS_PER_LONG; |
unsigned int k, lim = bits/BITS_PER_LONG; |
for (k = 0; k < lim; ++k) |
if (bitmap1[k] != bitmap2[k]) |
return 0; |
87,14 → 87,14 |
} |
EXPORT_SYMBOL(__bitmap_equal); |
void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits) |
void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits) |
{ |
int k, lim = bits/BITS_PER_LONG; |
unsigned int k, lim = bits/BITS_PER_LONG; |
for (k = 0; k < lim; ++k) |
dst[k] = ~src[k]; |
if (bits % BITS_PER_LONG) |
dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); |
dst[k] = ~src[k]; |
} |
EXPORT_SYMBOL(__bitmap_complement); |
183,23 → 183,26 |
EXPORT_SYMBOL(__bitmap_shift_left); |
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits) |
const unsigned long *bitmap2, unsigned int bits) |
{ |
int k; |
int nr = BITS_TO_LONGS(bits); |
unsigned int k; |
unsigned int lim = bits/BITS_PER_LONG; |
unsigned long result = 0; |
for (k = 0; k < nr; k++) |
for (k = 0; k < lim; k++) |
result |= (dst[k] = bitmap1[k] & bitmap2[k]); |
if (bits % BITS_PER_LONG) |
result |= (dst[k] = bitmap1[k] & bitmap2[k] & |
BITMAP_LAST_WORD_MASK(bits)); |
return result != 0; |
} |
EXPORT_SYMBOL(__bitmap_and); |
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits) |
const unsigned long *bitmap2, unsigned int bits) |
{ |
int k; |
int nr = BITS_TO_LONGS(bits); |
unsigned int k; |
unsigned int nr = BITS_TO_LONGS(bits); |
for (k = 0; k < nr; k++) |
dst[k] = bitmap1[k] | bitmap2[k]; |
207,10 → 210,10 |
EXPORT_SYMBOL(__bitmap_or); |
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits) |
const unsigned long *bitmap2, unsigned int bits) |
{ |
int k; |
int nr = BITS_TO_LONGS(bits); |
unsigned int k; |
unsigned int nr = BITS_TO_LONGS(bits); |
for (k = 0; k < nr; k++) |
dst[k] = bitmap1[k] ^ bitmap2[k]; |
218,22 → 221,25 |
EXPORT_SYMBOL(__bitmap_xor); |
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits) |
const unsigned long *bitmap2, unsigned int bits) |
{ |
int k; |
int nr = BITS_TO_LONGS(bits); |
unsigned int k; |
unsigned int lim = bits/BITS_PER_LONG; |
unsigned long result = 0; |
for (k = 0; k < nr; k++) |
for (k = 0; k < lim; k++) |
result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); |
if (bits % BITS_PER_LONG) |
result |= (dst[k] = bitmap1[k] & ~bitmap2[k] & |
BITMAP_LAST_WORD_MASK(bits)); |
return result != 0; |
} |
EXPORT_SYMBOL(__bitmap_andnot); |
int __bitmap_intersects(const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits) |
const unsigned long *bitmap2, unsigned int bits) |
{ |
int k, lim = bits/BITS_PER_LONG; |
unsigned int k, lim = bits/BITS_PER_LONG; |
for (k = 0; k < lim; ++k) |
if (bitmap1[k] & bitmap2[k]) |
return 1; |
246,9 → 252,9 |
EXPORT_SYMBOL(__bitmap_intersects); |
int __bitmap_subset(const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits) |
const unsigned long *bitmap2, unsigned int bits) |
{ |
int k, lim = bits/BITS_PER_LONG; |
unsigned int k, lim = bits/BITS_PER_LONG; |
for (k = 0; k < lim; ++k) |
if (bitmap1[k] & ~bitmap2[k]) |
return 0; |
260,9 → 266,10 |
} |
EXPORT_SYMBOL(__bitmap_subset); |
int __bitmap_weight(const unsigned long *bitmap, int bits) |
int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) |
{ |
int k, w = 0, lim = bits/BITS_PER_LONG; |
unsigned int k, lim = bits/BITS_PER_LONG; |
int w = 0; |
for (k = 0; k < lim; k++) |
w += hweight_long(bitmap[k]); |
274,21 → 281,21 |
} |
EXPORT_SYMBOL(__bitmap_weight); |
void bitmap_set(unsigned long *map, int start, int nr) |
void bitmap_set(unsigned long *map, unsigned int start, int len) |
{ |
unsigned long *p = map + BIT_WORD(start); |
const int size = start + nr; |
const unsigned int size = start + len; |
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); |
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); |
while (nr - bits_to_set >= 0) { |
while (len - bits_to_set >= 0) { |
*p |= mask_to_set; |
nr -= bits_to_set; |
len -= bits_to_set; |
bits_to_set = BITS_PER_LONG; |
mask_to_set = ~0UL; |
p++; |
} |
if (nr) { |
if (len) { |
mask_to_set &= BITMAP_LAST_WORD_MASK(size); |
*p |= mask_to_set; |
} |
295,21 → 302,21 |
} |
EXPORT_SYMBOL(bitmap_set); |
void bitmap_clear(unsigned long *map, int start, int nr) |
void bitmap_clear(unsigned long *map, unsigned int start, int len) |
{ |
unsigned long *p = map + BIT_WORD(start); |
const int size = start + nr; |
const unsigned int size = start + len; |
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); |
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); |
while (nr - bits_to_clear >= 0) { |
while (len - bits_to_clear >= 0) { |
*p &= ~mask_to_clear; |
nr -= bits_to_clear; |
len -= bits_to_clear; |
bits_to_clear = BITS_PER_LONG; |
mask_to_clear = ~0UL; |
p++; |
} |
if (nr) { |
if (len) { |
mask_to_clear &= BITMAP_LAST_WORD_MASK(size); |
*p &= ~mask_to_clear; |
} |
378,7 → 385,7 |
* |
* If for example, just bits 4 through 7 are set in @buf, then @pos |
* values 4 through 7 will get mapped to 0 through 3, respectively, |
* and other @pos values will get mapped to 0. When @pos value 7 |
* and other @pos values will get mapped to -1. When @pos value 7 |
* gets mapped to (returns) @ord value 3 in this example, that means |
* that bit 7 is the 3rd (starting with 0th) set bit in @buf. |
* |
708,7 → 715,7 |
REG_OP_RELEASE, /* clear all bits in region */ |
}; |
static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op) |
static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op) |
{ |
int nbits_reg; /* number of bits in region */ |
int index; /* index first long of region in bitmap */ |
774,11 → 781,11 |
* Return the bit offset in bitmap of the allocated region, |
* or -errno on failure. |
*/ |
int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) |
int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order) |
{ |
int pos, end; /* scans bitmap by regions of size order */ |
unsigned int pos, end; /* scans bitmap by regions of size order */ |
for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) { |
for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) { |
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) |
continue; |
__reg_op(bitmap, pos, order, REG_OP_ALLOC); |
799,7 → 806,7 |
* |
* No return value. |
*/ |
void bitmap_release_region(unsigned long *bitmap, int pos, int order) |
void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order) |
{ |
__reg_op(bitmap, pos, order, REG_OP_RELEASE); |
} |
816,12 → 823,11 |
* Return 0 on success, or %-EBUSY if specified region wasn't |
* free (not all bits were zero). |
*/ |
int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) |
int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order) |
{ |
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) |
return -EBUSY; |
__reg_op(bitmap, pos, order, REG_OP_ALLOC); |
return 0; |
return __reg_op(bitmap, pos, order, REG_OP_ALLOC); |
} |
EXPORT_SYMBOL(bitmap_allocate_region); |
/drivers/ddk/linux/hdmi.c |
---|
0,0 → 1,436 |
/* |
* Copyright (C) 2012 Avionic Design GmbH |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sub license, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS IN THE SOFTWARE. |
*/ |
#include <linux/bitops.h> |
#include <linux/bug.h> |
#include <linux/errno.h> |
#include <linux/export.h> |
#include <linux/hdmi.h> |
#include <linux/string.h> |
static void hdmi_infoframe_checksum(void *buffer, size_t size) |
{ |
u8 *ptr = buffer; |
u8 csum = 0; |
size_t i; |
/* compute checksum */ |
for (i = 0; i < size; i++) |
csum += ptr[i]; |
ptr[3] = 256 - csum; |
} |
/** |
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe |
* @frame: HDMI AVI infoframe |
* |
* Returns 0 on success or a negative error code on failure. |
*/ |
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame) |
{ |
memset(frame, 0, sizeof(*frame)); |
frame->type = HDMI_INFOFRAME_TYPE_AVI; |
frame->version = 2; |
frame->length = HDMI_AVI_INFOFRAME_SIZE; |
return 0; |
} |
EXPORT_SYMBOL(hdmi_avi_infoframe_init); |
/** |
* hdmi_avi_infoframe_pack() - write HDMI AVI infoframe to binary buffer |
* @frame: HDMI AVI infoframe |
* @buffer: destination buffer |
* @size: size of buffer |
* |
* Packs the information contained in the @frame structure into a binary |
* representation that can be written into the corresponding controller |
* registers. Also computes the checksum as required by section 5.3.5 of |
* the HDMI 1.4 specification. |
* |
* Returns the number of bytes packed into the binary buffer or a negative |
* error code on failure. |
*/ |
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer, |
size_t size) |
{ |
u8 *ptr = buffer; |
size_t length; |
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; |
if (size < length) |
return -ENOSPC; |
memset(buffer, 0, size); |
ptr[0] = frame->type; |
ptr[1] = frame->version; |
ptr[2] = frame->length; |
ptr[3] = 0; /* checksum */ |
/* start infoframe payload */ |
ptr += HDMI_INFOFRAME_HEADER_SIZE; |
ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3); |
/* |
* Data byte 1, bit 4 has to be set if we provide the active format |
* aspect ratio |
*/ |
if (frame->active_aspect & 0xf) |
ptr[0] |= BIT(4); |
/* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */ |
if (frame->top_bar || frame->bottom_bar) |
ptr[0] |= BIT(3); |
if (frame->left_bar || frame->right_bar) |
ptr[0] |= BIT(2); |
ptr[1] = ((frame->colorimetry & 0x3) << 6) | |
((frame->picture_aspect & 0x3) << 4) | |
(frame->active_aspect & 0xf); |
ptr[2] = ((frame->extended_colorimetry & 0x7) << 4) | |
((frame->quantization_range & 0x3) << 2) | |
(frame->nups & 0x3); |
if (frame->itc) |
ptr[2] |= BIT(7); |
ptr[3] = frame->video_code & 0x7f; |
ptr[4] = ((frame->ycc_quantization_range & 0x3) << 6) | |
((frame->content_type & 0x3) << 4) | |
(frame->pixel_repeat & 0xf); |
ptr[5] = frame->top_bar & 0xff; |
ptr[6] = (frame->top_bar >> 8) & 0xff; |
ptr[7] = frame->bottom_bar & 0xff; |
ptr[8] = (frame->bottom_bar >> 8) & 0xff; |
ptr[9] = frame->left_bar & 0xff; |
ptr[10] = (frame->left_bar >> 8) & 0xff; |
ptr[11] = frame->right_bar & 0xff; |
ptr[12] = (frame->right_bar >> 8) & 0xff; |
hdmi_infoframe_checksum(buffer, length); |
return length; |
} |
EXPORT_SYMBOL(hdmi_avi_infoframe_pack); |
/** |
* hdmi_spd_infoframe_init() - initialize an HDMI SPD infoframe |
* @frame: HDMI SPD infoframe |
* @vendor: vendor string |
* @product: product string |
* |
* Returns 0 on success or a negative error code on failure. |
*/ |
int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame, |
const char *vendor, const char *product) |
{ |
memset(frame, 0, sizeof(*frame)); |
frame->type = HDMI_INFOFRAME_TYPE_SPD; |
frame->version = 1; |
frame->length = HDMI_SPD_INFOFRAME_SIZE; |
strncpy(frame->vendor, vendor, sizeof(frame->vendor)); |
strncpy(frame->product, product, sizeof(frame->product)); |
return 0; |
} |
EXPORT_SYMBOL(hdmi_spd_infoframe_init); |
/** |
* hdmi_spd_infoframe_pack() - write HDMI SPD infoframe to binary buffer |
* @frame: HDMI SPD infoframe |
* @buffer: destination buffer |
* @size: size of buffer |
* |
* Packs the information contained in the @frame structure into a binary |
* representation that can be written into the corresponding controller |
* registers. Also computes the checksum as required by section 5.3.5 of |
* the HDMI 1.4 specification. |
* |
* Returns the number of bytes packed into the binary buffer or a negative |
* error code on failure. |
*/ |
ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer, |
size_t size) |
{ |
u8 *ptr = buffer; |
size_t length; |
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; |
if (size < length) |
return -ENOSPC; |
memset(buffer, 0, size); |
ptr[0] = frame->type; |
ptr[1] = frame->version; |
ptr[2] = frame->length; |
ptr[3] = 0; /* checksum */ |
/* start infoframe payload */ |
ptr += HDMI_INFOFRAME_HEADER_SIZE; |
memcpy(ptr, frame->vendor, sizeof(frame->vendor)); |
memcpy(ptr + 8, frame->product, sizeof(frame->product)); |
ptr[24] = frame->sdi; |
hdmi_infoframe_checksum(buffer, length); |
return length; |
} |
EXPORT_SYMBOL(hdmi_spd_infoframe_pack); |
/** |
* hdmi_audio_infoframe_init() - initialize an HDMI audio infoframe |
* @frame: HDMI audio infoframe |
* |
* Returns 0 on success or a negative error code on failure. |
*/ |
int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame) |
{ |
memset(frame, 0, sizeof(*frame)); |
frame->type = HDMI_INFOFRAME_TYPE_AUDIO; |
frame->version = 1; |
frame->length = HDMI_AUDIO_INFOFRAME_SIZE; |
return 0; |
} |
EXPORT_SYMBOL(hdmi_audio_infoframe_init); |
/** |
* hdmi_audio_infoframe_pack() - write HDMI audio infoframe to binary buffer |
* @frame: HDMI audio infoframe |
* @buffer: destination buffer |
* @size: size of buffer |
* |
* Packs the information contained in the @frame structure into a binary |
* representation that can be written into the corresponding controller |
* registers. Also computes the checksum as required by section 5.3.5 of |
* the HDMI 1.4 specification. |
* |
* Returns the number of bytes packed into the binary buffer or a negative |
* error code on failure. |
*/ |
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, |
void *buffer, size_t size) |
{ |
unsigned char channels; |
u8 *ptr = buffer; |
size_t length; |
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; |
if (size < length) |
return -ENOSPC; |
memset(buffer, 0, size); |
if (frame->channels >= 2) |
channels = frame->channels - 1; |
else |
channels = 0; |
ptr[0] = frame->type; |
ptr[1] = frame->version; |
ptr[2] = frame->length; |
ptr[3] = 0; /* checksum */ |
/* start infoframe payload */ |
ptr += HDMI_INFOFRAME_HEADER_SIZE; |
ptr[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7); |
ptr[1] = ((frame->sample_frequency & 0x7) << 2) | |
(frame->sample_size & 0x3); |
ptr[2] = frame->coding_type_ext & 0x1f; |
ptr[3] = frame->channel_allocation; |
ptr[4] = (frame->level_shift_value & 0xf) << 3; |
if (frame->downmix_inhibit) |
ptr[4] |= BIT(7); |
hdmi_infoframe_checksum(buffer, length); |
return length; |
} |
EXPORT_SYMBOL(hdmi_audio_infoframe_pack); |
/** |
* hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe |
* @frame: HDMI vendor infoframe |
* |
* Returns 0 on success or a negative error code on failure. |
*/ |
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame) |
{ |
memset(frame, 0, sizeof(*frame)); |
frame->type = HDMI_INFOFRAME_TYPE_VENDOR; |
frame->version = 1; |
frame->oui = HDMI_IEEE_OUI; |
/* |
* 0 is a valid value for s3d_struct, so we use a special "not set" |
* value |
*/ |
frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID; |
return 0; |
} |
EXPORT_SYMBOL(hdmi_vendor_infoframe_init); |
/** |
* hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer |
* @frame: HDMI infoframe |
* @buffer: destination buffer |
* @size: size of buffer |
* |
* Packs the information contained in the @frame structure into a binary |
* representation that can be written into the corresponding controller |
* registers. Also computes the checksum as required by section 5.3.5 of |
* the HDMI 1.4 specification. |
* |
* Returns the number of bytes packed into the binary buffer or a negative |
* error code on failure. |
*/ |
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, |
void *buffer, size_t size) |
{ |
u8 *ptr = buffer; |
size_t length; |
/* empty info frame */ |
if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID) |
return -EINVAL; |
/* only one of those can be supplied */ |
if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) |
return -EINVAL; |
/* for side by side (half) we also need to provide 3D_Ext_Data */ |
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) |
frame->length = 6; |
else |
frame->length = 5; |
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; |
if (size < length) |
return -ENOSPC; |
memset(buffer, 0, size); |
ptr[0] = frame->type; |
ptr[1] = frame->version; |
ptr[2] = frame->length; |
ptr[3] = 0; /* checksum */ |
/* HDMI OUI */ |
ptr[4] = 0x03; |
ptr[5] = 0x0c; |
ptr[6] = 0x00; |
if (frame->vic) { |
ptr[7] = 0x1 << 5; /* video format */ |
ptr[8] = frame->vic; |
} else { |
ptr[7] = 0x2 << 5; /* video format */ |
ptr[8] = (frame->s3d_struct & 0xf) << 4; |
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) |
ptr[9] = (frame->s3d_ext_data & 0xf) << 4; |
} |
hdmi_infoframe_checksum(buffer, length); |
return length; |
} |
EXPORT_SYMBOL(hdmi_vendor_infoframe_pack); |
/* |
* hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer |
*/ |
static ssize_t |
hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame, |
void *buffer, size_t size) |
{ |
/* we only know about HDMI vendor infoframes */ |
if (frame->any.oui != HDMI_IEEE_OUI) |
return -EINVAL; |
return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size); |
} |
/** |
* hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer |
* @frame: HDMI infoframe |
* @buffer: destination buffer |
* @size: size of buffer |
* |
* Packs the information contained in the @frame structure into a binary |
* representation that can be written into the corresponding controller |
* registers. Also computes the checksum as required by section 5.3.5 of |
* the HDMI 1.4 specification. |
* |
* Returns the number of bytes packed into the binary buffer or a negative |
* error code on failure. |
*/ |
ssize_t |
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size) |
{ |
ssize_t length; |
switch (frame->any.type) { |
case HDMI_INFOFRAME_TYPE_AVI: |
length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size); |
break; |
case HDMI_INFOFRAME_TYPE_SPD: |
length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size); |
break; |
case HDMI_INFOFRAME_TYPE_AUDIO: |
length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size); |
break; |
case HDMI_INFOFRAME_TYPE_VENDOR: |
length = hdmi_vendor_any_infoframe_pack(&frame->vendor, |
buffer, size); |
break; |
default: |
WARN(1, "Bad infoframe type %d\n", frame->any.type); |
length = -EINVAL; |
} |
return length; |
} |
EXPORT_SYMBOL(hdmi_infoframe_pack); |
/drivers/ddk/linux/idr.c |
---|
18,12 → 18,6 |
* pointer or what ever, we treat it as a (void *). You can pass this |
* id to a user for him to pass back at a later time. You then pass |
* that id to this code and it returns your pointer. |
* You can release ids at any time. When all ids are released, most of |
* the memory is returned (we keep MAX_IDR_FREE) in a local pool so we |
* don't need to go to the memory "store" during an id allocate, just |
* so you don't need to be too concerned about locking and conflicts |
* with the slab allocator. |
*/ |
#include <linux/kernel.h> |
136,7 → 130,7 |
static inline void free_layer(struct idr *idr, struct idr_layer *p) |
{ |
if (idr->hint && idr->hint == p) |
if (idr->hint == p) |
RCU_INIT_POINTER(idr->hint, NULL); |
idr_layer_rcu_free(&p->rcu_head); |
} |
181,7 → 175,7 |
} |
} |
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
{ |
while (idp->id_free_cnt < MAX_IDR_FREE) { |
struct idr_layer *new; |
192,7 → 186,6 |
} |
return 1; |
} |
EXPORT_SYMBOL(__idr_pre_get); |
/** |
* sub_alloc - try to allocate an id without growing the tree depth |
235,7 → 228,7 |
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
/* if already at the top layer, we need to grow */ |
if (id >= 1 << (idp->layers * IDR_BITS)) { |
if (id > idr_max(idp->layers)) { |
*starting_id = id; |
return -EAGAIN; |
} |
359,21 → 352,7 |
idr_mark_full(pa, id); |
} |
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
{ |
struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
int rv; |
rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); |
if (rv < 0) |
return rv == -ENOMEM ? -EAGAIN : rv; |
idr_fill_slot(idp, ptr, rv, pa); |
*id = rv; |
return 0; |
} |
EXPORT_SYMBOL(__idr_get_new_above); |
/** |
* idr_preload - preload for idr_alloc() |
* @gfp_mask: allocation mask to use for preloading |
550,6 → 529,11 |
if (id < 0) |
return; |
if (id > idr_max(idp->layers)) { |
idr_remove_warning(id); |
return; |
} |
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
idp->top->ary[0]) { |
567,20 → 551,10 |
bitmap_clear(to_free->bitmap, 0, IDR_SIZE); |
free_layer(idp, to_free); |
} |
while (idp->id_free_cnt >= MAX_IDR_FREE) { |
p = get_from_free_list(idp); |
/* |
* Note: we don't call the rcu callback here, since the only |
* layers that fall into the freelist are those that have been |
* preallocated. |
*/ |
kfree(p); |
} |
return; |
} |
EXPORT_SYMBOL(idr_remove); |
void __idr_remove_all(struct idr *idp) |
static void __idr_remove_all(struct idr *idp) |
{ |
int n, id, max; |
int bt_mask; |
589,16 → 563,17 |
struct idr_layer **paa = &pa[0]; |
n = idp->layers * IDR_BITS; |
p = idp->top; |
*paa = idp->top; |
rcu_assign_pointer(idp->top, NULL); |
max = idr_max(idp->layers); |
id = 0; |
while (id >= 0 && id <= max) { |
p = *paa; |
while (n > IDR_BITS && p) { |
n -= IDR_BITS; |
*paa++ = p; |
p = p->ary[(id >> n) & IDR_MASK]; |
*++paa = p; |
} |
bt_mask = id; |
605,15 → 580,14 |
id += 1 << n; |
/* Get the highest bit that the above add changed from 0->1. */ |
while (n < fls(id ^ bt_mask)) { |
if (p) |
free_layer(idp, p); |
if (*paa) |
free_layer(idp, *paa); |
n += IDR_BITS; |
p = *--paa; |
--paa; |
} |
} |
idp->layers = 0; |
} |
EXPORT_SYMBOL(__idr_remove_all); |
/** |
* idr_destroy - release all cached layers within an idr tree |
692,15 → 666,16 |
struct idr_layer **paa = &pa[0]; |
n = idp->layers * IDR_BITS; |
p = rcu_dereference_raw(idp->top); |
*paa = rcu_dereference_raw(idp->top); |
max = idr_max(idp->layers); |
id = 0; |
while (id >= 0 && id <= max) { |
p = *paa; |
while (n > 0 && p) { |
n -= IDR_BITS; |
*paa++ = p; |
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
*++paa = p; |
} |
if (p) { |
712,7 → 687,7 |
id += 1 << n; |
while (n < fls(id)) { |
n += IDR_BITS; |
p = *--paa; |
--paa; |
} |
} |
740,7 → 715,7 |
int n, max; |
/* find first ent */ |
p = rcu_dereference_raw(idp->top); |
p = *paa = rcu_dereference_raw(idp->top); |
if (!p) |
return NULL; |
n = (p->layer + 1) * IDR_BITS; |
747,10 → 722,11 |
max = idr_max(p->layer + 1); |
while (id >= 0 && id <= max) { |
p = *paa; |
while (n > 0 && p) { |
n -= IDR_BITS; |
*paa++ = p; |
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
*++paa = p; |
} |
if (p) { |
768,7 → 744,7 |
id = round_up(id + 1, 1 << n); |
while (n < fls(id)) { |
n += IDR_BITS; |
p = *--paa; |
--paa; |
} |
} |
return NULL; |
798,14 → 774,12 |
p = idp->top; |
if (!p) |
return ERR_PTR(-EINVAL); |
return ERR_PTR(-ENOENT); |
n = (p->layer+1) * IDR_BITS; |
if (id > idr_max(p->layer + 1)) |
return ERR_PTR(-ENOENT); |
if (id >= (1 << n)) |
return ERR_PTR(-EINVAL); |
n -= IDR_BITS; |
n = p->layer * IDR_BITS; |
while ((n > 0) && p) { |
p = p->ary[(id >> n) & IDR_MASK]; |
n -= IDR_BITS; |
842,7 → 816,17 |
} |
EXPORT_SYMBOL(idr_init); |
static int idr_has_entry(int id, void *p, void *data) |
{ |
return 1; |
} |
bool idr_is_empty(struct idr *idp) |
{ |
return !idr_for_each(idp, idr_has_entry, NULL); |
} |
EXPORT_SYMBOL(idr_is_empty); |
/** |
* DOC: IDA description |
* IDA - IDR based ID allocator |
1006,6 → 990,9 |
int n; |
struct ida_bitmap *bitmap; |
if (idr_id > idr_max(ida->idr.layers)) |
goto err; |
/* clear full bits while looking up the leaf idr_layer */ |
while ((shift > 0) && p) { |
n = (idr_id >> shift) & IDR_MASK; |
1021,7 → 1008,7 |
__clear_bit(n, p->bitmap); |
bitmap = (void *)p->ary[n]; |
if (!test_bit(offset, bitmap->bitmap)) |
if (!bitmap || !test_bit(offset, bitmap->bitmap)) |
goto err; |
/* update bitmap and remove it if empty */ |
1244,3 → 1231,17 |
return (res + (res >> 16)) & 0x000000FF; |
} |
unsigned long hweight64(__u64 w) |
{ |
#if BITS_PER_LONG == 32 |
return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); |
#elif BITS_PER_LONG == 64 |
__u64 res = w - ((w >> 1) & 0x5555555555555555ul); |
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); |
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; |
res = res + (res >> 8); |
res = res + (res >> 16); |
return (res + (res >> 32)) & 0x00000000000000FFul; |
#endif |
} |
/drivers/ddk/linux/interval_tree.c |
---|
0,0 → 1,16 |
//#include <linux/init.h> |
#include <linux/interval_tree.h> |
#include <linux/interval_tree_generic.h> |
#include <linux/module.h> |
#define START(node) ((node)->start) |
#define LAST(node) ((node)->last) |
INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, |
unsigned long, __subtree_last, |
START, LAST,, interval_tree) |
EXPORT_SYMBOL_GPL(interval_tree_insert); |
EXPORT_SYMBOL_GPL(interval_tree_remove); |
EXPORT_SYMBOL_GPL(interval_tree_iter_first); |
EXPORT_SYMBOL_GPL(interval_tree_iter_next); |
/drivers/ddk/linux/kasprintf.c |
---|
0,0 → 1,45 |
/* |
* linux/lib/kasprintf.c |
* |
* Copyright (C) 1991, 1992 Linus Torvalds |
*/ |
#include <stdarg.h> |
#include <linux/export.h> |
#include <linux/slab.h> |
#include <linux/types.h> |
#include <linux/string.h> |
/* Simplified asprintf. */ |
char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap) |
{ |
unsigned int len; |
char *p; |
va_list aq; |
va_copy(aq, ap); |
len = vsnprintf(NULL, 0, fmt, aq); |
va_end(aq); |
p = kmalloc(len+1, gfp); |
if (!p) |
return NULL; |
vsnprintf(p, len+1, fmt, ap); |
return p; |
} |
EXPORT_SYMBOL(kvasprintf); |
char *kasprintf(gfp_t gfp, const char *fmt, ...) |
{ |
va_list ap; |
char *p; |
va_start(ap, fmt); |
p = kvasprintf(gfp, fmt, ap); |
va_end(ap); |
return p; |
} |
EXPORT_SYMBOL(kasprintf); |
/drivers/ddk/linux/mutex.c |
---|
0,0 → 1,117 |
/* |
* kernel/locking/mutex.c |
* |
* Mutexes: blocking mutual exclusion locks |
* |
* Started by Ingo Molnar: |
* |
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
* |
* Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
* David Howells for suggestions and improvements. |
* |
* - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
* from the -rt tree, where it was originally implemented for rtmutexes |
* by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
* and Sven Dietrich. |
* |
* Also see Documentation/mutex-design.txt. |
*/ |
#include <linux/lockdep.h> |
#include <linux/mutex.h> |
#include <linux/ww_mutex.h> |
#include <linux/sched.h> |
#include <linux/export.h> |
#include <linux/spinlock.h> |
#include <syscall.h> |
/* |
* A negative mutex count indicates that waiters are sleeping waiting for the |
* mutex. |
*/ |
#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) |
void |
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
{ |
atomic_set(&lock->count, 1); |
// spin_lock_init(&lock->wait_lock); |
INIT_LIST_HEAD(&lock->wait_list); |
// mutex_clear_owner(lock); |
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
lock->osq = NULL; |
#endif |
} |
static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, |
struct ww_acquire_ctx *ww_ctx) |
{ |
#ifdef CONFIG_DEBUG_MUTEXES |
/* |
* If this WARN_ON triggers, you used ww_mutex_lock to acquire, |
* but released with a normal mutex_unlock in this call. |
* |
* This should never happen, always use ww_mutex_unlock. |
*/ |
DEBUG_LOCKS_WARN_ON(ww->ctx); |
/* |
* Not quite done after calling ww_acquire_done() ? |
*/ |
DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); |
if (ww_ctx->contending_lock) { |
/* |
* After -EDEADLK you tried to |
* acquire a different ww_mutex? Bad! |
*/ |
DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); |
/* |
* You called ww_mutex_lock after receiving -EDEADLK, |
* but 'forgot' to unlock everything else first? |
*/ |
DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); |
ww_ctx->contending_lock = NULL; |
} |
/* |
* Naughty, using a different class will lead to undefined behavior! |
*/ |
DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); |
#endif |
ww_ctx->acquired++; |
} |
void ww_mutex_unlock(struct ww_mutex *lock) |
{ |
/* |
* The unlocking fastpath is the 0->1 transition from 'locked' |
* into 'unlocked' state: |
*/ |
if (lock->ctx) { |
if (lock->ctx->acquired > 0) |
lock->ctx->acquired--; |
lock->ctx = NULL; |
} |
MutexUnlock(&lock->base); |
} |
int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
{ |
MutexLock(&lock->base); |
ww_mutex_lock_acquired(lock, ctx); |
lock->ctx = ctx; |
return 0; |
} |
int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
{ |
MutexLock(&lock->base); |
ww_mutex_lock_acquired(lock, ctx); |
lock->ctx = ctx; |
return 0; |
} |
/drivers/ddk/linux/scatterlist.c |
---|
0,0 → 1,363 |
/* |
* Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> |
* |
* Scatterlist handling helpers. |
* |
* This source code is licensed under the GNU General Public License, |
* Version 2. See the file COPYING for more details. |
*/ |
#include <linux/export.h> |
#include <linux/scatterlist.h> |
/** |
* sg_next - return the next scatterlist entry in a list |
* @sg: The current sg entry |
* |
* Description: |
* Usually the next entry will be @sg@ + 1, but if this sg element is part |
* of a chained scatterlist, it could jump to the start of a new |
* scatterlist array. |
* |
**/ |
struct scatterlist *sg_next(struct scatterlist *sg) |
{ |
#ifdef CONFIG_DEBUG_SG |
BUG_ON(sg->sg_magic != SG_MAGIC); |
#endif |
if (sg_is_last(sg)) |
return NULL; |
sg++; |
if (unlikely(sg_is_chain(sg))) |
sg = sg_chain_ptr(sg); |
return sg; |
} |
EXPORT_SYMBOL(sg_next); |
/** |
* sg_nents - return total count of entries in scatterlist |
* @sg: The scatterlist |
* |
* Description: |
* Allows to know how many entries are in sg, taking into acount |
* chaining as well |
* |
**/ |
int sg_nents(struct scatterlist *sg) |
{ |
int nents; |
for (nents = 0; sg; sg = sg_next(sg)) |
nents++; |
return nents; |
} |
EXPORT_SYMBOL(sg_nents); |
/** |
* sg_last - return the last scatterlist entry in a list |
* @sgl: First entry in the scatterlist |
* @nents: Number of entries in the scatterlist |
* |
* Description: |
* Should only be used casually, it (currently) scans the entire list |
* to get the last entry. |
* |
* Note that the @sgl@ pointer passed in need not be the first one, |
* the important bit is that @nents@ denotes the number of entries that |
* exist from @sgl@. |
* |
**/ |
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) |
{ |
#ifndef ARCH_HAS_SG_CHAIN |
struct scatterlist *ret = &sgl[nents - 1]; |
#else |
struct scatterlist *sg, *ret = NULL; |
unsigned int i; |
for_each_sg(sgl, sg, nents, i) |
ret = sg; |
#endif |
#ifdef CONFIG_DEBUG_SG |
BUG_ON(sgl[0].sg_magic != SG_MAGIC); |
BUG_ON(!sg_is_last(ret)); |
#endif |
return ret; |
} |
EXPORT_SYMBOL(sg_last); |
/** |
* sg_init_table - Initialize SG table |
* @sgl: The SG table |
* @nents: Number of entries in table |
* |
* Notes: |
* If this is part of a chained sg table, sg_mark_end() should be |
* used only on the last table part. |
* |
**/ |
void sg_init_table(struct scatterlist *sgl, unsigned int nents) |
{ |
memset(sgl, 0, sizeof(*sgl) * nents); |
#ifdef CONFIG_DEBUG_SG |
{ |
unsigned int i; |
for (i = 0; i < nents; i++) |
sgl[i].sg_magic = SG_MAGIC; |
} |
#endif |
sg_mark_end(&sgl[nents - 1]); |
} |
EXPORT_SYMBOL(sg_init_table); |
/** |
* sg_init_one - Initialize a single entry sg list |
* @sg: SG entry |
* @buf: Virtual address for IO |
* @buflen: IO length |
* |
**/ |
//void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) |
//{ |
// sg_init_table(sg, 1); |
// sg_set_buf(sg, buf, buflen); |
//} |
EXPORT_SYMBOL(sg_init_one); |
/* |
* The default behaviour of sg_alloc_table() is to use these kmalloc/kfree |
* helpers. |
*/ |
static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) |
{ |
return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); |
} |
static void sg_kfree(struct scatterlist *sg, unsigned int nents) |
{ |
kfree(sg); |
} |
/** |
* __sg_free_table - Free a previously mapped sg table |
* @table: The sg table header to use |
* @max_ents: The maximum number of entries per single scatterlist |
* @skip_first_chunk: don't free the (preallocated) first scatterlist chunk |
* @free_fn: Free function |
* |
* Description: |
* Free an sg table previously allocated and setup with |
* __sg_alloc_table(). The @max_ents value must be identical to |
* that previously used with __sg_alloc_table(). |
* |
**/ |
void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
bool skip_first_chunk, sg_free_fn *free_fn) |
{ |
struct scatterlist *sgl, *next; |
if (unlikely(!table->sgl)) |
return; |
sgl = table->sgl; |
while (table->orig_nents) { |
unsigned int alloc_size = table->orig_nents; |
unsigned int sg_size; |
/* |
* If we have more than max_ents segments left, |
* then assign 'next' to the sg table after the current one. |
* sg_size is then one less than alloc size, since the last |
* element is the chain pointer. |
*/ |
if (alloc_size > max_ents) { |
next = sg_chain_ptr(&sgl[max_ents - 1]); |
alloc_size = max_ents; |
sg_size = alloc_size - 1; |
} else { |
sg_size = alloc_size; |
next = NULL; |
} |
table->orig_nents -= sg_size; |
if (!skip_first_chunk) { |
free_fn(sgl, alloc_size); |
skip_first_chunk = false; |
} |
sgl = next; |
} |
table->sgl = NULL; |
} |
EXPORT_SYMBOL(__sg_free_table); |
/** |
* sg_free_table - Free a previously allocated sg table |
* @table: The mapped sg table header |
* |
**/ |
void sg_free_table(struct sg_table *table) |
{ |
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); |
} |
EXPORT_SYMBOL(sg_free_table); |
/** |
* __sg_alloc_table - Allocate and initialize an sg table with given allocator |
* @table: The sg table header to use |
* @nents: Number of entries in sg list |
* @max_ents: The maximum number of entries the allocator returns per call |
* @gfp_mask: GFP allocation mask |
* @alloc_fn: Allocator to use |
* |
* Description: |
* This function returns a @table @nents long. The allocator is |
* defined to return scatterlist chunks of maximum size @max_ents. |
* Thus if @nents is bigger than @max_ents, the scatterlists will be |
* chained in units of @max_ents. |
* |
* Notes: |
* If this function returns non-0 (eg failure), the caller must call |
* __sg_free_table() to cleanup any leftover allocations. |
* |
**/ |
int __sg_alloc_table(struct sg_table *table, unsigned int nents, |
unsigned int max_ents, struct scatterlist *first_chunk, |
gfp_t gfp_mask, sg_alloc_fn *alloc_fn) |
{ |
struct scatterlist *sg, *prv; |
unsigned int left; |
memset(table, 0, sizeof(*table)); |
if (nents == 0) |
return -EINVAL; |
#ifndef ARCH_HAS_SG_CHAIN |
if (WARN_ON_ONCE(nents > max_ents)) |
return -EINVAL; |
#endif |
left = nents; |
prv = NULL; |
do { |
unsigned int sg_size, alloc_size = left; |
if (alloc_size > max_ents) { |
alloc_size = max_ents; |
sg_size = alloc_size - 1; |
} else |
sg_size = alloc_size; |
left -= sg_size; |
if (first_chunk) { |
sg = first_chunk; |
first_chunk = NULL; |
} else { |
sg = alloc_fn(alloc_size, gfp_mask); |
} |
if (unlikely(!sg)) { |
/* |
* Adjust entry count to reflect that the last |
* entry of the previous table won't be used for |
* linkage. Without this, sg_kfree() may get |
* confused. |
*/ |
if (prv) |
table->nents = ++table->orig_nents; |
return -ENOMEM; |
} |
sg_init_table(sg, alloc_size); |
table->nents = table->orig_nents += sg_size; |
/* |
* If this is the first mapping, assign the sg table header. |
* If this is not the first mapping, chain previous part. |
*/ |
if (prv) |
sg_chain(prv, max_ents, sg); |
else |
table->sgl = sg; |
/* |
* If no more entries after this one, mark the end |
*/ |
if (!left) |
sg_mark_end(&sg[sg_size - 1]); |
prv = sg; |
} while (left); |
return 0; |
} |
EXPORT_SYMBOL(__sg_alloc_table); |
/** |
* sg_alloc_table - Allocate and initialize an sg table |
* @table: The sg table header to use |
* @nents: Number of entries in sg list |
* @gfp_mask: GFP allocation mask |
* |
* Description: |
* Allocate and initialize an sg table. If @nents@ is larger than |
* SG_MAX_SINGLE_ALLOC a chained sg table will be setup. |
* |
**/ |
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) |
{ |
int ret; |
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, |
NULL, gfp_mask, sg_kmalloc); |
if (unlikely(ret)) |
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); |
return ret; |
} |
EXPORT_SYMBOL(sg_alloc_table); |
void __sg_page_iter_start(struct sg_page_iter *piter, |
struct scatterlist *sglist, unsigned int nents, |
unsigned long pgoffset) |
{ |
piter->__pg_advance = 0; |
piter->__nents = nents; |
piter->sg = sglist; |
piter->sg_pgoffset = pgoffset; |
} |
EXPORT_SYMBOL(__sg_page_iter_start); |
static int sg_page_count(struct scatterlist *sg) |
{ |
return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; |
} |
bool __sg_page_iter_next(struct sg_page_iter *piter) |
{ |
if (!piter->__nents || !piter->sg) |
return false; |
piter->sg_pgoffset += piter->__pg_advance; |
piter->__pg_advance = 1; |
while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { |
piter->sg_pgoffset -= sg_page_count(piter->sg); |
piter->sg = sg_next(piter->sg); |
if (!--piter->__nents || !piter->sg) |
return false; |
} |
return true; |
} |
EXPORT_SYMBOL(__sg_page_iter_next); |