summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 19:25:06 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 19:25:06 +0300
commit7832681b365f220151d1c33cc1a8891f10ecdb6f (patch)
treeeeabbcd88f1a9b87b61469857c63a2fcb571115b /include/linux
parent516fb7f2e73dcc303fb97fc3593209fcacf2d982 (diff)
parent47427379ea80f1368ccec6ba20874fc19a9f0cc4 (diff)
downloadlinux-7832681b365f220151d1c33cc1a8891f10ecdb6f.tar.xz
Merge tag 'docs-4.15' of git://git.lwn.net/linux
Pull documentation updates from Jonathan Corbet: "A relatively calm cycle for the docs tree again. - The old driver statement has been added to the kernel docs. - We have a couple of new helper scripts. find-unused-docs.sh from Sayli Karnic will point out kerneldoc comments that are not actually used in the documentation. Jani Nikula's documentation-file-ref-check finds references to non-existing files. - A new ftrace document from Steve Rostedt. - Vinod Koul converted the dmaengine docs to RST Beyond that, it's mostly simple fixes. This set reaches outside of Documentation/ a bit more than most. In all cases, the changes are to comment docs, mostly from Randy, in places where there didn't seem to be anybody better to take them" * tag 'docs-4.15' of git://git.lwn.net/linux: (52 commits) documentation: fb: update list of available compiled-in fonts MAINTAINERS: update DMAengine documentation location dmaengine: doc: ReSTize pxa_dma doc dmaengine: doc: ReSTize dmatest doc dmaengine: doc: ReSTize client API doc dmaengine: doc: ReSTize provider doc dmaengine: doc: Add ReST style dmaengine document ftrace/docs: Add documentation on how to use ftrace from within the kernel bug-hunting.rst: Fix an example and a typo in a Sphinx tag scripts: Add a script to find unused documentation samples: Convert timers to use timer_setup() documentation: kernel-api: add more info on bitmap functions Documentation: fix selftests related file refs Documentation: fix ref to power basic-pm-debugging Documentation: fix ref to trace stm content Documentation: fix ref to coccinelle content Documentation: fix ref to workqueue content Documentation: fix ref to sphinx/kerneldoc.py Documentation: fix locking rt-mutex doc refs docs: dev-tools: correct Coccinelle version number ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bitmap.h114
-rw-r--r--include/linux/log2.h42
-rw-r--r--include/linux/math64.h27
3 files changed, 112 insertions, 71 deletions
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 19748a5b0e77..3489253e38fc 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -22,65 +22,74 @@
* See lib/bitmap.c for more details.
*/
-/*
+/**
+ * DOC: bitmap overview
+ *
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
* Note that nbits should be always a compile time evaluable constant.
* Otherwise many inlines will generate horrible code.
*
- * bitmap_zero(dst, nbits) *dst = 0UL
- * bitmap_fill(dst, nbits) *dst = ~0UL
- * bitmap_copy(dst, src, nbits) *dst = *src
- * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
- * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
- * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
- * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
- * bitmap_complement(dst, src, nbits) *dst = ~(*src)
- * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
- * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
- * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
- * bitmap_empty(src, nbits) Are all bits zero in *src?
- * bitmap_full(src, nbits) Are all bits set in *src?
- * bitmap_weight(src, nbits) Hamming Weight: number set bits
- * bitmap_set(dst, pos, nbits) Set specified bit area
- * bitmap_clear(dst, pos, nbits) Clear specified bit area
- * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
- * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
- * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
- * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
- * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
- * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
- * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
- * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
- * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
- * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
- * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
- * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
- * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
- * bitmap_release_region(bitmap, pos, order) Free specified bit region
- * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
- * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
- * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
+ * ::
+ *
+ * bitmap_zero(dst, nbits) *dst = 0UL
+ * bitmap_fill(dst, nbits) *dst = ~0UL
+ * bitmap_copy(dst, src, nbits) *dst = *src
+ * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
+ * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
+ * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
+ * bitmap_complement(dst, src, nbits) *dst = ~(*src)
+ * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
+ * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
+ * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
+ * bitmap_empty(src, nbits) Are all bits zero in *src?
+ * bitmap_full(src, nbits) Are all bits set in *src?
+ * bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
+ * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
+ * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
+ * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
+ * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
+ * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
+ * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
+ * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
+ * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
+ * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
+ * bitmap_release_region(bitmap, pos, order) Free specified bit region
+ * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
+ * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
+ * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
+ *
*/
-/*
- * Also the following operations in asm/bitops.h apply to bitmaps.
+/**
+ * DOC: bitmap bitops
+ *
+ * Also the following operations in asm/bitops.h apply to bitmaps.::
+ *
+ * set_bit(bit, addr) *addr |= bit
+ * clear_bit(bit, addr) *addr &= ~bit
+ * change_bit(bit, addr) *addr ^= bit
+ * test_bit(bit, addr) Is bit set in *addr?
+ * test_and_set_bit(bit, addr) Set bit and return old value
+ * test_and_clear_bit(bit, addr) Clear bit and return old value
+ * test_and_change_bit(bit, addr) Change bit and return old value
+ * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
+ * find_first_bit(addr, nbits) Position first set bit in *addr
+ * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
+ * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*
- * set_bit(bit, addr) *addr |= bit
- * clear_bit(bit, addr) *addr &= ~bit
- * change_bit(bit, addr) *addr ^= bit
- * test_bit(bit, addr) Is bit set in *addr?
- * test_and_set_bit(bit, addr) Set bit and return old value
- * test_and_clear_bit(bit, addr) Clear bit and return old value
- * test_and_change_bit(bit, addr) Change bit and return old value
- * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
- * find_first_bit(addr, nbits) Position first set bit in *addr
- * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
- * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*/
-/*
+/**
+ * DOC: declare bitmap
* The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
* to declare an array named 'name' of just enough unsigned longs to
* contain all bit positions from 0 to 'bits' - 1.
@@ -361,8 +370,9 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
}
-/*
+/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
+ * @n: u64 value
*
* Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit
* integers in 32-bit environment, and 64-bit integers in 64-bit one.
@@ -393,14 +403,14 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
((unsigned long) ((u64)(n) >> 32))
#endif
-/*
+/**
* bitmap_from_u64 - Check and swap words within u64.
* @mask: source bitmap
* @dst: destination bitmap
*
- * In 32-bit Big Endian kernel, when using (u32 *)(&val)[*]
+ * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]``
* to read u64 mask, we will get the wrong word.
- * That is "(u32 *)(&val)[0]" gets the upper 32 bits,
+ * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits,
* but we expect the lower 32-bits of u64.
*/
static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
diff --git a/include/linux/log2.h b/include/linux/log2.h
index c373295f359f..41a1ae010993 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -37,19 +37,23 @@ int __ilog2_u64(u64 n)
}
#endif
-/*
- * Determine whether some value is a power of two, where zero is
+/**
+ * is_power_of_2() - check if a value is a power of two
+ * @n: the value to check
+ *
+ * Determine whether some value is a power of two, where zero is
* *not* considered a power of two.
+ * Return: true if @n is a power of 2, otherwise false.
*/
-
static inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
-/*
- * round up to nearest power of two
+/**
+ * __roundup_pow_of_two() - round up to nearest power of two
+ * @n: value to round up
*/
static inline __attribute__((const))
unsigned long __roundup_pow_of_two(unsigned long n)
@@ -57,8 +61,9 @@ unsigned long __roundup_pow_of_two(unsigned long n)
return 1UL << fls_long(n - 1);
}
-/*
- * round down to nearest power of two
+/**
+ * __rounddown_pow_of_two() - round down to nearest power of two
+ * @n: value to round down
*/
static inline __attribute__((const))
unsigned long __rounddown_pow_of_two(unsigned long n)
@@ -67,12 +72,12 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
}
/**
- * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
- * @n - parameter
+ * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
+ * @n: parameter
*
* constant-capable log of base 2 calculation
* - this can be used to initialise global variables from constant data, hence
- * the massive ternary operator construction
+ * the massive ternary operator construction
*
* selects the appropriately-sized optimised version depending on sizeof(n)
*/
@@ -150,7 +155,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
/**
* roundup_pow_of_two - round the given value up to nearest power of two
- * @n - parameter
+ * @n: parameter
*
* round the given value up to the nearest power of two
* - the result is undefined when n == 0
@@ -167,7 +172,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
/**
* rounddown_pow_of_two - round the given value down to nearest power of two
- * @n - parameter
+ * @n: parameter
*
* round the given value down to the nearest power of two
* - the result is undefined when n == 0
@@ -180,6 +185,12 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
__rounddown_pow_of_two(n) \
)
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+ return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
+
/**
* order_base_2 - calculate the (rounded up) base 2 order of the argument
* @n: parameter
@@ -193,13 +204,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
* ob2(5) = 3
* ... and so on.
*/
-
-static inline __attribute_const__
-int __order_base_2(unsigned long n)
-{
- return n > 1 ? ilog2(n - 1) + 1 : 0;
-}
-
#define order_base_2(n) \
( \
__builtin_constant_p(n) ? ( \
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 082de345b73c..837f2f2d1d34 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -12,6 +12,11 @@
/**
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
+ * @remainder: pointer to unsigned 32bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*
* This is commonly provided by 32bit archs to provide an optimized 64bit
* divide.
@@ -24,6 +29,11 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
/**
* div_s64_rem - signed 64bit divide with 32bit divisor with remainder
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
+ * @remainder: pointer to signed 32bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*/
static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
@@ -33,6 +43,11 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
/**
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ * @remainder: pointer to unsigned 64bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*/
static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
{
@@ -42,6 +57,10 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ *
+ * Return: dividend / divisor
*/
static inline u64 div64_u64(u64 dividend, u64 divisor)
{
@@ -50,6 +69,10 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
/**
* div64_s64 - signed 64bit divide with 64bit divisor
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 64bit divisor
+ *
+ * Return: dividend / divisor
*/
static inline s64 div64_s64(s64 dividend, s64 divisor)
{
@@ -89,6 +112,8 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
/**
* div_u64 - unsigned 64bit divide with 32bit divisor
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
*
* This is the most common 64bit divide and should be used if possible,
* as many 32bit archs can optimize this variant better than a full 64bit
@@ -104,6 +129,8 @@ static inline u64 div_u64(u64 dividend, u32 divisor)
/**
* div_s64 - signed 64bit divide with 32bit divisor
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
*/
#ifndef div_s64
static inline s64 div_s64(s64 dividend, s32 divisor)