This documentation is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
For more details see the file COPYING in the source distribution of Linux.
Table of Contents
current
's plugged list
bdget
by gendisk and partition number
Table of Contents
list_add — add a new entry
void fsfunclist_add ( | new, | |
head) ; |
struct list_head * new
;struct list_head * head
;list_add_tail — add a new entry
void fsfunclist_add_tail ( | new, | |
head) ; |
struct list_head * new
;struct list_head * head
;__list_del_entry — deletes entry from list.
void fsfunc__list_del_entry ( | entry) ; |
struct list_head * entry
;list_replace — replace old entry by new one
void fsfunclist_replace ( | old, | |
new) ; |
struct list_head * old
;struct list_head * new
;list_del_init — deletes entry from list and reinitialize it.
void fsfunclist_del_init ( | entry) ; |
struct list_head * entry
;list_move — delete from one list and add as another's head
void fsfunclist_move ( | list, | |
head) ; |
struct list_head * list
;struct list_head * head
;list_move_tail — delete from one list and add as another's tail
void fsfunclist_move_tail ( | list, | |
head) ; |
struct list_head * list
;struct list_head * head
;list_is_last —
tests whether list
is the last entry in list head
int fsfunclist_is_last ( | list, | |
head) ; |
const struct list_head * list
;const struct list_head * head
;list_empty — tests whether a list is empty
int fsfunclist_empty ( | head) ; |
const struct list_head * head
;list_empty_careful — tests whether a list is empty and not being modified
int fsfunclist_empty_careful ( | head) ; |
const struct list_head * head
;list_rotate_left — rotate the list to the left
void fsfunclist_rotate_left ( | head) ; |
struct list_head * head
;list_is_singular — tests whether a list has just one entry.
int fsfunclist_is_singular ( | head) ; |
const struct list_head * head
;list_cut_position — cut a list into two
void fsfunclist_cut_position ( | list, | |
head, | ||
entry) ; |
struct list_head * list
;struct list_head * head
;struct list_head * entry
;list_splice — join two lists, this is designed for stacks
void fsfunclist_splice ( | list, | |
head) ; |
const struct list_head * list
;struct list_head * head
;list_splice_tail — join two lists, each list being a queue
void fsfunclist_splice_tail ( | list, | |
head) ; |
struct list_head * list
;struct list_head * head
;list_splice_init — join two lists and reinitialise the emptied list.
void fsfunclist_splice_init ( | list, | |
head) ; |
struct list_head * list
;struct list_head * head
;list_splice_tail_init — join two lists and reinitialise the emptied list
void fsfunclist_splice_tail_init ( | list, | |
head) ; |
struct list_head * list
;struct list_head * head
;list_entry — get the struct for this entry
fsfunclist_entry ( | ptr, | |
type, | ||
member) ; |
ptr
; type
; member
;list_first_entry — get the first element from a list
fsfunclist_first_entry ( | ptr, | |
type, | ||
member) ; |
ptr
; type
; member
;list_first_entry_or_null — get the first element from a list
fsfunclist_first_entry_or_null ( | ptr, | |
type, | ||
member) ; |
ptr
; type
; member
;list_for_each_prev — iterate over a list backwards
fsfunclist_for_each_prev ( | pos, | |
head) ; |
pos
; head
;list_for_each_safe — iterate over a list safe against removal of list entry
fsfunclist_for_each_safe ( | pos, | |
n, | ||
head) ; |
pos
; n
; head
;list_for_each_prev_safe — iterate over a list backwards safe against removal of list entry
fsfunclist_for_each_prev_safe ( | pos, | |
n, | ||
head) ; |
pos
; n
; head
;list_for_each_entry — iterate over list of given type
fsfunclist_for_each_entry ( | pos, | |
head, | ||
member) ; |
pos
; head
; member
;list_for_each_entry_reverse — iterate backwards over list of given type.
fsfunclist_for_each_entry_reverse ( | pos, | |
head, | ||
member) ; |
pos
; head
; member
;list_prepare_entry —
prepare a pos entry for use in list_for_each_entry_continue
fsfunclist_prepare_entry ( | pos, | |
head, | ||
member) ; |
pos
; head
; member
;list_for_each_entry_continue — continue iteration over list of given type
fsfunclist_for_each_entry_continue ( | pos, | |
head, | ||
member) ; |
pos
; head
; member
;list_for_each_entry_continue_reverse — iterate backwards from the given point
fsfunclist_for_each_entry_continue_reverse ( | pos, | |
head, | ||
member) ; |
pos
; head
; member
;list_for_each_entry_from — iterate over list of given type from the current point
fsfunclist_for_each_entry_from ( | pos, | |
head, | ||
member) ; |
pos
; head
; member
;list_for_each_entry_safe — iterate over list of given type safe against removal of list entry
fsfunclist_for_each_entry_safe ( | pos, | |
n, | ||
head, | ||
member) ; |
pos
; n
; head
; member
;list_for_each_entry_safe_continue — continue list iteration safe against removal
fsfunclist_for_each_entry_safe_continue ( | pos, | |
n, | ||
head, | ||
member) ; |
pos
; n
; head
; member
;list_for_each_entry_safe_from — iterate over list from current point safe against removal
fsfunclist_for_each_entry_safe_from ( | pos, | |
n, | ||
head, | ||
member) ; |
pos
; n
; head
; member
;list_for_each_entry_safe_reverse — iterate backwards over list safe against removal
fsfunclist_for_each_entry_safe_reverse ( | pos, | |
n, | ||
head, | ||
member) ; |
pos
; n
; head
; member
;list_safe_reset_next — reset a stale list_for_each_entry_safe loop
fsfunclist_safe_reset_next ( | pos, | |
n, | ||
member) ; |
pos
; n
; member
;pos
the loop cursor used in the list_for_each_entry_safe loop
n
temporary storage used in list_for_each_entry_safe
member
the name of the list_struct within the struct.
list_safe_reset_next is not safe to use in general if the list may be modified concurrently (eg. the lock is dropped in the loop body). An exception to this is if the cursor element (pos) is pinned in the list, and list_safe_reset_next is called after re-taking the lock and before completing the current iteration of the loop body.
hlist_for_each_entry — iterate over list of given type
fsfunchlist_for_each_entry ( | pos, | |
head, | ||
member) ; |
pos
; head
; member
;hlist_for_each_entry_continue — iterate over a hlist continuing after current point
fsfunchlist_for_each_entry_continue ( | pos, | |
member) ; |
pos
; member
;hlist_for_each_entry_from — iterate over a hlist continuing from current point
fsfunchlist_for_each_entry_from ( | pos, | |
member) ; |
pos
; member
;Table of Contents
When writing drivers, you cannot in general use routines which are from the C Library. Some of the functions have been found generally useful and they are listed below. The behaviour of these functions may vary slightly from those defined by ANSI, and these deviations are noted in the text.
simple_strtoull — convert a string to an unsigned long long
unsigned long long fsfuncsimple_strtoull ( | cp, | |
endp, | ||
base) ; |
const char * cp
;char ** endp
;unsigned int base
;simple_strtoul — convert a string to an unsigned long
unsigned long fsfuncsimple_strtoul ( | cp, | |
endp, | ||
base) ; |
const char * cp
;char ** endp
;unsigned int base
;simple_strtol — convert a string to a signed long
long fsfuncsimple_strtol ( | cp, | |
endp, | ||
base) ; |
const char * cp
;char ** endp
;unsigned int base
;simple_strtoll — convert a string to a signed long long
long long fsfuncsimple_strtoll ( | cp, | |
endp, | ||
base) ; |
const char * cp
;char ** endp
;unsigned int base
;vsnprintf — Format a string and place it in a buffer
int fsfuncvsnprintf ( | buf, | |
size, | ||
fmt, | ||
args) ; |
char * buf
;size_t size
;const char * fmt
;va_list args
;buf
The buffer to place the result into
size
The size of the buffer, including the trailing null space
fmt
The format string to use
args
Arguments for the format string
This function follows C99 vsnprintf, but has some extensions:
pS
output the name of a text symbol with offset
ps
output the name of a text symbol without offset
pF
output the name of a function pointer with its offset
pf
output the name of a function pointer without its offset
pB
output the name of a backtrace symbol with its offset
pR
output the address range in a struct resource with decoded flags
pr
output the address range in a struct resource with raw flags
pM
output a 6-byte MAC address with colons
pMR
output a 6-byte MAC address with colons in reversed order
pMF
output a 6-byte MAC address with dashes
pm
output a 6-byte MAC address without colons
pmR
output a 6-byte MAC address without colons in reversed order
pI4
print an IPv4 address without leading zeros
pi4
print an IPv4 address with leading zeros
pI6
print an IPv6 address with colons
pi6
print an IPv6 address without colons
pI6c
print an IPv6 address as specified by RFC 5952
pIS
depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address
piS
depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address
pU
[bBlL] print a UUID/GUID in big or little endian using lower or upper
case.
%*ph[CDN] a variable-length hex string with a separator (supports up to 64
bytes of the input)
n
is ignored
** Please update Documentation/printk-formats.txt when making changes **
The return value is the number of characters which would
be generated for the given input, excluding the trailing
'\0', as per ISO C99. If you want to have the exact
number of characters written into buf
as return value
(not including the trailing '\0'), use vscnprintf
. If the
return is greater than or equal to size
, the resulting
string is truncated.
If you're not already dealing with a va_list consider using snprintf
.
vscnprintf — Format a string and place it in a buffer
int fsfuncvscnprintf ( | buf, | |
size, | ||
fmt, | ||
args) ; |
char * buf
;size_t size
;const char * fmt
;va_list args
;buf
The buffer to place the result into
size
The size of the buffer, including the trailing null space
fmt
The format string to use
args
Arguments for the format string
The return value is the number of characters which have been written into
the buf
not including the trailing '\0'. If size
is == 0 the function
returns 0.
If you're not already dealing with a va_list consider using scnprintf
.
See the vsnprintf
documentation for format string extensions over C99.
snprintf — Format a string and place it in a buffer
int fsfuncsnprintf ( | buf, | |
size, | ||
fmt, | ||
...) ; |
char * buf
;size_t size
;const char * fmt
; ...
;buf
The buffer to place the result into
size
The size of the buffer, including the trailing null space
fmt
The format string to use @...: Arguments for the format string
...
variable arguments
The return value is the number of characters which would be
generated for the given input, excluding the trailing null,
as per ISO C99. If the return is greater than or equal to
size
, the resulting string is truncated.
See the vsnprintf
documentation for format string extensions over C99.
scnprintf — Format a string and place it in a buffer
int fsfuncscnprintf ( | buf, | |
size, | ||
fmt, | ||
...) ; |
char * buf
;size_t size
;const char * fmt
; ...
;vsprintf — Format a string and place it in a buffer
int fsfuncvsprintf ( | buf, | |
fmt, | ||
args) ; |
char * buf
;const char * fmt
;va_list args
;sprintf — Format a string and place it in a buffer
int fsfuncsprintf ( | buf, | |
fmt, | ||
...) ; |
char * buf
;const char * fmt
; ...
;vbin_printf — Parse a format string and place args' binary value in a buffer
int fsfuncvbin_printf ( | bin_buf, | |
size, | ||
fmt, | ||
args) ; |
u32 * bin_buf
;size_t size
;const char * fmt
;va_list args
;bin_buf
The buffer to place args' binary value
size
The size of the buffer(by words(32bits), not characters)
fmt
The format string to use
args
Arguments for the format string
bstr_printf — Format a string from binary arguments and place it in a buffer
int fsfuncbstr_printf ( | buf, | |
size, | ||
fmt, | ||
bin_buf) ; |
char * buf
;size_t size
;const char * fmt
;const u32 * bin_buf
;buf
The buffer to place the result into
size
The size of the buffer, including the trailing null space
fmt
The format string to use
bin_buf
Binary arguments for the format string
This function like C99 vsnprintf, but the difference is that vsnprintf gets
arguments from stack, and bstr_printf gets arguments from bin_buf
which is
a binary buffer that generated by vbin_printf.
The format follows C99 vsnprintf, but has some extensions: see vsnprintf comment for details.
The return value is the number of characters which would
be generated for the given input, excluding the trailing
'\0', as per ISO C99. If you want to have the exact
number of characters written into buf
as return value
(not including the trailing '\0'), use vscnprintf
. If the
return is greater than or equal to size
, the resulting
string is truncated.
bprintf — Parse a format string and place args' binary value in a buffer
int fsfuncbprintf ( | bin_buf, | |
size, | ||
fmt, | ||
...) ; |
u32 * bin_buf
;size_t size
;const char * fmt
; ...
;vsscanf — Unformat a buffer into a list of arguments
int fsfuncvsscanf ( | buf, | |
fmt, | ||
args) ; |
const char * buf
;const char * fmt
;va_list args
;sscanf — Unformat a buffer into a list of arguments
int fsfuncsscanf ( | buf, | |
fmt, | ||
...) ; |
const char * buf
;const char * fmt
; ...
;kstrtol — convert a string to a long
int fsfunckstrtol ( | s, | |
base, | ||
res) ; |
const char * s
;unsigned int base
;long * res
;s
The start of the string. The string must be null-terminated, and may also include a single newline before its terminating null. The first character may also be a plus sign or a minus sign.
base
The number base to use. The maximum supported base is 16. If base is given as 0, then the base of the string is automatically detected with the conventional semantics - If it begins with 0x the number will be parsed as a hexadecimal (case insensitive), if it otherwise begins with 0, it will be parsed as an octal number. Otherwise it will be parsed as a decimal.
res
Where to write the result of the conversion on success.
kstrtoul — convert a string to an unsigned long
int fsfunckstrtoul ( | s, | |
base, | ||
res) ; |
const char * s
;unsigned int base
;unsigned long * res
;s
The start of the string. The string must be null-terminated, and may also include a single newline before its terminating null. The first character may also be a plus sign, but not a minus sign.
base
The number base to use. The maximum supported base is 16. If base is given as 0, then the base of the string is automatically detected with the conventional semantics - If it begins with 0x the number will be parsed as a hexadecimal (case insensitive), if it otherwise begins with 0, it will be parsed as an octal number. Otherwise it will be parsed as a decimal.
res
Where to write the result of the conversion on success.
kstrtoull — convert a string to an unsigned long long
int fsfunckstrtoull ( | s, | |
base, | ||
res) ; |
const char * s
;unsigned int base
;unsigned long long * res
;s
The start of the string. The string must be null-terminated, and may also include a single newline before its terminating null. The first character may also be a plus sign, but not a minus sign.
base
The number base to use. The maximum supported base is 16. If base is given as 0, then the base of the string is automatically detected with the conventional semantics - If it begins with 0x the number will be parsed as a hexadecimal (case insensitive), if it otherwise begins with 0, it will be parsed as an octal number. Otherwise it will be parsed as a decimal.
res
Where to write the result of the conversion on success.
kstrtoll — convert a string to a long long
int fsfunckstrtoll ( | s, | |
base, | ||
res) ; |
const char * s
;unsigned int base
;long long * res
;s
The start of the string. The string must be null-terminated, and may also include a single newline before its terminating null. The first character may also be a plus sign or a minus sign.
base
The number base to use. The maximum supported base is 16. If base is given as 0, then the base of the string is automatically detected with the conventional semantics - If it begins with 0x the number will be parsed as a hexadecimal (case insensitive), if it otherwise begins with 0, it will be parsed as an octal number. Otherwise it will be parsed as a decimal.
res
Where to write the result of the conversion on success.
kstrtouint — convert a string to an unsigned int
int fsfunckstrtouint ( | s, | |
base, | ||
res) ; |
const char * s
;unsigned int base
;unsigned int * res
;s
The start of the string. The string must be null-terminated, and may also include a single newline before its terminating null. The first character may also be a plus sign, but not a minus sign.
base
The number base to use. The maximum supported base is 16. If base is given as 0, then the base of the string is automatically detected with the conventional semantics - If it begins with 0x the number will be parsed as a hexadecimal (case insensitive), if it otherwise begins with 0, it will be parsed as an octal number. Otherwise it will be parsed as a decimal.
res
Where to write the result of the conversion on success.
kstrtoint — convert a string to an int
int fsfunckstrtoint ( | s, | |
base, | ||
res) ; |
const char * s
;unsigned int base
;int * res
;s
The start of the string. The string must be null-terminated, and may also include a single newline before its terminating null. The first character may also be a plus sign or a minus sign.
base
The number base to use. The maximum supported base is 16. If base is given as 0, then the base of the string is automatically detected with the conventional semantics - If it begins with 0x the number will be parsed as a hexadecimal (case insensitive), if it otherwise begins with 0, it will be parsed as an octal number. Otherwise it will be parsed as a decimal.
res
Where to write the result of the conversion on success.
strnicmp — Case insensitive, length-limited string comparison
int fsfuncstrnicmp ( | s1, | |
s2, | ||
len) ; |
const char * s1
;const char * s2
;size_t len
;strcpy —
Copy a NUL
terminated string
char * fsfuncstrcpy ( | dest, | |
src) ; |
char * dest
;const char * src
;strncpy —
Copy a length-limited, NUL-terminated
string
char * fsfuncstrncpy ( | dest, | |
src, | ||
count) ; |
char * dest
;const char * src
;size_t count
;strlcpy —
Copy a NUL
terminated string into a sized buffer
size_t fsfuncstrlcpy ( | dest, | |
src, | ||
size) ; |
char * dest
;const char * src
;size_t size
;strcat —
Append one NUL-terminated
string to another
char * fsfuncstrcat ( | dest, | |
src) ; |
char * dest
;const char * src
;strncat —
Append a length-limited, NUL-terminated
string to another
char * fsfuncstrncat ( | dest, | |
src, | ||
count) ; |
char * dest
;const char * src
;size_t count
;strlcat —
Append a length-limited, NUL-terminated
string to another
size_t fsfuncstrlcat ( | dest, | |
src, | ||
count) ; |
char * dest
;const char * src
;size_t count
;strcmp — Compare two strings
int fsfuncstrcmp ( | cs, | |
ct) ; |
const char * cs
;const char * ct
;strncmp — Compare two length-limited strings
int fsfuncstrncmp ( | cs, | |
ct, | ||
count) ; |
const char * cs
;const char * ct
;size_t count
;strchr — Find the first occurrence of a character in a string
char * fsfuncstrchr ( | s, | |
c) ; |
const char * s
;int c
;strrchr — Find the last occurrence of a character in a string
char * fsfuncstrrchr ( | s, | |
c) ; |
const char * s
;int c
;strnchr — Find a character in a length limited string
char * fsfuncstrnchr ( | s, | |
count, | ||
c) ; |
const char * s
;size_t count
;int c
;skip_spaces —
Removes leading whitespace from str
.
char * fsfuncskip_spaces ( | str) ; |
const char * str
;strim —
Removes leading and trailing whitespace from s
.
char * fsfuncstrim ( | s) ; |
char * s
;strnlen — Find the length of a length-limited string
size_t fsfuncstrnlen ( | s, | |
count) ; |
const char * s
;size_t count
;strspn —
Calculate the length of the initial substring of s
which only contain letters in accept
size_t fsfuncstrspn ( | s, | |
accept) ; |
const char * s
;const char * accept
;strcspn —
Calculate the length of the initial substring of s
which does not contain letters in reject
size_t fsfuncstrcspn ( | s, | |
reject) ; |
const char * s
;const char * reject
;strpbrk — Find the first occurrence of a set of characters
char * fsfuncstrpbrk ( | cs, | |
ct) ; |
const char * cs
;const char * ct
;strsep — Split a string into tokens
char * fsfuncstrsep ( | s, | |
ct) ; |
char ** s
;const char * ct
;sysfs_streq — return true if strings are equal, modulo trailing newline
bool fsfuncsysfs_streq ( | s1, | |
s2) ; |
const char * s1
;const char * s2
;strtobool — convert common user inputs into boolean values
int fsfuncstrtobool ( | s, | |
res) ; |
const char * s
;bool * res
;memset — Fill a region of memory with the given value
void * fsfuncmemset ( | s, | |
c, | ||
count) ; |
void * s
;int c
;size_t count
;memcpy — Copy one area of memory to another
void * fsfuncmemcpy ( | dest, | |
src, | ||
count) ; |
void * dest
;const void * src
;size_t count
;memmove — Copy one area of memory to another
void * fsfuncmemmove ( | dest, | |
src, | ||
count) ; |
void * dest
;const void * src
;size_t count
;memcmp — Compare two areas of memory
int fsfuncmemcmp ( | cs, | |
ct, | ||
count) ; |
const void * cs
;const void * ct
;size_t count
;memscan — Find a character in an area of memory.
void * fsfuncmemscan ( | addr, | |
c, | ||
size) ; |
void * addr
;int c
;size_t size
;strstr —
Find the first substring in a NUL
terminated string
char * fsfuncstrstr ( | s1, | |
s2) ; |
const char * s1
;const char * s2
;strnstr — Find the first substring in a length-limited string
char * fsfuncstrnstr ( | s1, | |
s2, | ||
len) ; |
const char * s1
;const char * s2
;size_t len
;memchr — Find a character in an area of memory.
void * fsfuncmemchr ( | s, | |
c, | ||
n) ; |
const void * s
;int c
;size_t n
;set_bit — Atomically set a bit in memory
void fsfuncset_bit ( | nr, | |
addr) ; |
long nr
;volatile unsigned long * addr
;__set_bit — Set a bit in memory
void fsfunc__set_bit ( | nr, | |
addr) ; |
long nr
;volatile unsigned long * addr
;clear_bit — Clears a bit in memory
void fsfuncclear_bit ( | nr, | |
addr) ; |
long nr
;volatile unsigned long * addr
;__change_bit — Toggle a bit in memory
void fsfunc__change_bit ( | nr, | |
addr) ; |
long nr
;volatile unsigned long * addr
;change_bit — Toggle a bit in memory
void fsfuncchange_bit ( | nr, | |
addr) ; |
long nr
;volatile unsigned long * addr
;test_and_set_bit — Set a bit and return its old value
int fsfunctest_and_set_bit ( | nr, | |
addr) ; |
long nr
;volatile unsigned long * addr
;test_and_set_bit_lock — Set a bit and return its old value for lock
int fsfunctest_and_set_bit_lock ( | nr, | |
addr) ; |
long nr
;volatile unsigned long * addr
;__test_and_set_bit — Set a bit and return its old value
int fsfunc__test_and_set_bit ( | nr, | |
addr) ; |
long nr
;volatile unsigned long * addr
;test_and_clear_bit — Clear a bit and return its old value
int fsfunctest_and_clear_bit ( | nr, | |
addr) ; |
long nr
;volatile unsigned long * addr
;__test_and_clear_bit — Clear a bit and return its old value
int fsfunc__test_and_clear_bit ( | nr, | |
addr) ; |
long nr
;volatile unsigned long * addr
;This operation is non-atomic and can be reordered. If two examples of this operation race, one can appear to succeed but actually fail. You must protect multiple accesses with a lock.
test_and_change_bit — Change a bit and return its old value
int fsfunctest_and_change_bit ( | nr, | |
addr) ; |
long nr
;volatile unsigned long * addr
;test_bit — Determine whether a bit is set
int fsfunctest_bit ( | nr, | |
addr) ; |
int nr
;const volatile unsigned long * addr
;__ffs — find first set bit in word
unsigned long fsfunc__ffs ( | word) ; |
unsigned long word
;Table of Contents
The Linux kernel provides more basic utility functions.
__bitmap_shift_right — logical right shift of the bits in a bitmap
void fsfunc__bitmap_shift_right ( | dst, | |
src, | ||
shift, | ||
bits) ; |
unsigned long * dst
;const unsigned long * src
;int shift
;int bits
;__bitmap_shift_left — logical left shift of the bits in a bitmap
void fsfunc__bitmap_shift_left ( | dst, | |
src, | ||
shift, | ||
bits) ; |
unsigned long * dst
;const unsigned long * src
;int shift
;int bits
;bitmap_scnprintf — convert bitmap to an ASCII hex string.
int fsfuncbitmap_scnprintf ( | buf, | |
buflen, | ||
maskp, | ||
nmaskbits) ; |
char * buf
;unsigned int buflen
;const unsigned long * maskp
;int nmaskbits
;__bitmap_parse — convert an ASCII hex string into a bitmap.
int fsfunc__bitmap_parse ( | buf, | |
buflen, | ||
is_user, | ||
maskp, | ||
nmaskbits) ; |
const char * buf
;unsigned int buflen
;int is_user
;unsigned long * maskp
;int nmaskbits
;buf
pointer to buffer containing string.
buflen
buffer size in bytes. If string is smaller than this then it must be terminated with a \0.
is_user
location of buffer, 0 indicates kernel space
maskp
pointer to bitmap array that will contain result.
nmaskbits
size of bitmap, in bits.
Commas group hex digits into chunks. Each chunk defines exactly 32
bits of the resultant bitmask. No chunk may specify a value larger
than 32 bits (-EOVERFLOW
), and if a chunk specifies a smaller value
then leading 0-bits are prepended. -EINVAL
is returned for illegal
characters and for grouping errors such as “1,,5”, “,44”, “,” and "".
Leading and trailing whitespace accepted, but not embedded whitespace.
bitmap_parse_user — convert an ASCII hex string in a user buffer into a bitmap
int fsfuncbitmap_parse_user ( | ubuf, | |
ulen, | ||
maskp, | ||
nmaskbits) ; |
const char __user * ubuf
;unsigned int ulen
;unsigned long * maskp
;int nmaskbits
;bitmap_scnlistprintf — convert bitmap to list format ASCII string
int fsfuncbitmap_scnlistprintf ( | buf, | |
buflen, | ||
maskp, | ||
nmaskbits) ; |
char * buf
;unsigned int buflen
;const unsigned long * maskp
;int nmaskbits
;buf
byte buffer into which string is placed
buflen
reserved size of buf
, in bytes
maskp
pointer to bitmap to convert
nmaskbits
size of bitmap, in bits
Output format is a comma-separated list of decimal numbers and
ranges. Consecutively set bits are shown as two hyphen-separated
decimal numbers, the smallest and largest bit numbers set in
the range. Output format is compatible with the format
accepted as input by bitmap_parselist
.
The return value is the number of characters which were written to *buf excluding the trailing '\0', as per ISO C99's scnprintf.
bitmap_parselist_user —
int fsfuncbitmap_parselist_user ( | ubuf, | |
ulen, | ||
maskp, | ||
nmaskbits) ; |
const char __user * ubuf
;unsigned int ulen
;unsigned long * maskp
;int nmaskbits
;bitmap_remap — Apply map defined by a pair of bitmaps to another bitmap
void fsfuncbitmap_remap ( | dst, | |
src, | ||
old, | ||
new, | ||
bits) ; |
unsigned long * dst
;const unsigned long * src
;const unsigned long * old
;const unsigned long * new
;int bits
;dst
remapped result
src
subset to be remapped
old
defines domain of map
new
defines range of map
bits
number of bits in each of these bitmaps
Let old
and new
define a mapping of bit positions, such that
whatever position is held by the n-th set bit in old
is mapped
to the n-th set bit in new
. In the more general case, allowing
for the possibility that the weight 'w' of new
is less than the
weight of old
, map the position of the n-th set bit in old
to
the position of the m-th set bit in new
, where m == n % w.
If either of the old
and new
bitmaps are empty, or if src
and
dst
point to the same location, then this routine copies src
to dst
.
The positions of unset bits in old
are mapped to themselves
(the identify map).
Apply the above specified mapping to src
, placing the result in
dst
, clearing any bits previously set in dst
.
For example, lets say that old
has bits 4 through 7 set, and
new
has bits 12 through 15 set. This defines the mapping of bit
position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
bit positions unchanged. So if say src
comes into this routine
with bits 1, 5 and 7 set, then dst
should leave with bits 1,
13 and 15 set.
bitmap_bitremap — Apply map defined by a pair of bitmaps to a single bit
int fsfuncbitmap_bitremap ( | oldbit, | |
old, | ||
new, | ||
bits) ; |
int oldbit
;const unsigned long * old
;const unsigned long * new
;int bits
;oldbit
bit position to be mapped
old
defines domain of map
new
defines range of map
bits
number of bits in each of these bitmaps
Let old
and new
define a mapping of bit positions, such that
whatever position is held by the n-th set bit in old
is mapped
to the n-th set bit in new
. In the more general case, allowing
for the possibility that the weight 'w' of new
is less than the
weight of old
, map the position of the n-th set bit in old
to
the position of the m-th set bit in new
, where m == n % w.
The positions of unset bits in old
are mapped to themselves
(the identify map).
Apply the above specified mapping to bit position oldbit
, returning
the new bit position.
For example, lets say that old
has bits 4 through 7 set, and
new
has bits 12 through 15 set. This defines the mapping of bit
position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
bit positions unchanged. So if say oldbit
is 5, then this routine
returns 13.
bitmap_onto — translate one bitmap relative to another
void fsfuncbitmap_onto ( | dst, | |
orig, | ||
relmap, | ||
bits) ; |
unsigned long * dst
;const unsigned long * orig
;const unsigned long * relmap
;int bits
;dst
resulting translated bitmap
orig
original untranslated bitmap
relmap
bitmap relative to which translated
bits
number of bits in each of these bitmaps
Set the n-th bit of dst
iff there exists some m such that the
n-th bit of relmap
is set, the m-th bit of orig
is set, and
the n-th bit of relmap
is also the m-th _set_ bit of relmap
.
(If you understood the previous sentence the first time your
read it, you're overqualified for your current job.)
In other words, orig
is mapped onto (surjectively) dst
,
using the the map { <n, m> | the n-th bit of relmap
is the
m-th set bit of relmap
}.
Any set bits in orig
above bit number W, where W is the
weight of (number of set bits in) relmap
are mapped nowhere.
In particular, if for all bits m set in orig
, m >= W, then
dst
will end up empty. In situations where the possibility
of such an empty result is not desired, one way to avoid it is
to use the bitmap_fold
operator, below, to first fold the
orig
bitmap over itself so that all its set bits x are in the
range 0 <= x < W. The bitmap_fold
operator does this by
setting the bit (m % W) in dst
, for each bit (m) set in orig
.
Example [1] for bitmap_onto
:
Let's say relmap
has bits 30-39 set, and orig
has bits
1, 3, 5, 7, 9 and 11 set. Then on return from this routine,
dst
will have bits 31, 33, 35, 37 and 39 set.
When bit 0 is set in orig
, it means turn on the bit in
dst
corresponding to whatever is the first bit (if any)
that is turned on in relmap
. Since bit 0 was off in the
above example, we leave off that bit (bit 30) in dst
.
When bit 1 is set in orig
(as in the above example), it
means turn on the bit in dst
corresponding to whatever
is the second bit that is turned on in relmap
. The second
bit in relmap
that was turned on in the above example was
bit 31, so we turned on bit 31 in dst
.
Similarly, we turned on bits 33, 35, 37 and 39 in dst
,
because they were the 4th, 6th, 8th and 10th set bits
set in relmap
, and the 4th, 6th, 8th and 10th bits of
orig
(i.e. bits 3, 5, 7 and 9) were also set.
When bit 11 is set in orig
, it means turn on the bit in
dst
corresponding to whatever is the twelfth bit that is
turned on in relmap
. In the above example, there were
only ten bits turned on in relmap
(30..39), so that bit
11 was set in orig
had no affect on dst
.
Example [2] for bitmap_fold
+ bitmap_onto
:
Let's say relmap
has these ten bits set:
40 41 42 43 45 48 53 61 74 95
(for the curious, that's 40 plus the first ten terms of the
Fibonacci sequence.)
Further lets say we use the following code, invoking
bitmap_fold
then bitmap_onto, as suggested above to
avoid the possitility of an empty dst
result:
unsigned long *tmp; // a temporary bitmap's bits
bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits); bitmap_onto(dst, tmp, relmap, bits);
Then this table shows what various values of dst
would be, for
various orig
's. I list the zero-based positions of each set bit.
The tmp column shows the intermediate result, as computed by
using bitmap_fold
to fold the orig
bitmap modulo ten
(the weight of relmap
).
orig
tmp dst
0 0 40
1 1 41
9 9 95
10 0 40 (*)
1 3 5 7 1 3 5 7 41 43 48 61
0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
0 9 18 27 0 9 8 7 40 61 74 95
0 10 20 30 0 40
0 11 22 33 0 1 2 3 40 41 42 43
0 12 24 36 0 2 4 6 40 42 45 53
78 102 211 1 2 8 41 42 74 (*)
(*) For these marked lines, if we hadn't first done bitmap_fold
into tmp, then the dst
result would have been empty.
If either of orig
or relmap
is empty (no set bits), then dst
will be returned empty.
If (as explained above) the only set bits in orig
are in positions
m where m >= W, (where W is the weight of relmap
) then dst
will
once again be returned empty.
All bits in dst
not set by the above rule are cleared.
bitmap_fold — fold larger bitmap into smaller, modulo specified size
void fsfuncbitmap_fold ( | dst, | |
orig, | ||
sz, | ||
bits) ; |
unsigned long * dst
;const unsigned long * orig
;int sz
;int bits
;bitmap_find_free_region — find a contiguous aligned mem region
int fsfuncbitmap_find_free_region ( | bitmap, | |
bits, | ||
order) ; |
unsigned long * bitmap
;int bits
;int order
;bitmap
array of unsigned longs corresponding to the bitmap
bits
number of bits in the bitmap
order
region size (log base 2 of number of bits) to find
Find a region of free (zero) bits in a bitmap
of bits
bits and
allocate them (set them to one). Only consider regions of length
a power (order
) of two, aligned to that power of two, which
makes the search algorithm much faster.
Return the bit offset in bitmap of the allocated region, or -errno on failure.
bitmap_release_region — release allocated bitmap region
void fsfuncbitmap_release_region ( | bitmap, | |
pos, | ||
order) ; |
unsigned long * bitmap
;int pos
;int order
;bitmap_allocate_region — allocate bitmap region
int fsfuncbitmap_allocate_region ( | bitmap, | |
pos, | ||
order) ; |
unsigned long * bitmap
;int pos
;int order
;bitmap_copy_le — copy a bitmap, putting the bits into little-endian order.
void fsfuncbitmap_copy_le ( | dst, | |
src, | ||
nbits) ; |
void * dst
;const unsigned long * src
;int nbits
;__bitmap_parselist — convert list format ASCII string to bitmap
int fsfunc__bitmap_parselist ( | buf, | |
buflen, | ||
is_user, | ||
maskp, | ||
nmaskbits) ; |
const char * buf
;unsigned int buflen
;int is_user
;unsigned long * maskp
;int nmaskbits
;buf
read nul-terminated user string from this buffer
buflen
buffer size in bytes. If string is smaller than this then it must be terminated with a \0.
is_user
location of buffer, 0 indicates kernel space
maskp
write resulting mask here
nmaskbits
number of bits in mask to be written
bitmap_pos_to_ord — find ordinal of set bit at given position in bitmap
int fsfuncbitmap_pos_to_ord ( | buf, | |
pos, | ||
bits) ; |
const unsigned long * buf
;int pos
;int bits
;buf
pointer to a bitmap
pos
a bit position in buf
(0 <= pos
< bits
)
bits
number of valid bit positions in buf
Map the bit at position pos
in buf
(of length bits
) to the
ordinal of which set bit it is. If it is not set or if pos
is not a valid bit position, map to -1.
If for example, just bits 4 through 7 are set in buf
, then pos
values 4 through 7 will get mapped to 0 through 3, respectively,
and other pos
values will get mapped to 0. When pos
value 7
gets mapped to (returns) ord
value 3 in this example, that means
that bit 7 is the 3rd (starting with 0th) set bit in buf
.
The bit positions 0 through bits
are valid positions in buf
.
bitmap_ord_to_pos — find position of n-th set bit in bitmap
int fsfuncbitmap_ord_to_pos ( | buf, | |
ord, | ||
bits) ; |
const unsigned long * buf
;int ord
;int bits
;buf
pointer to bitmap
ord
ordinal bit position (n-th set bit, n >= 0)
bits
number of valid bit positions in buf
Map the ordinal offset of bit ord
in buf
to its position in buf
.
Value of ord
should be in range 0 <= ord
< weight(buf), else
results are undefined.
If for example, just bits 4 through 7 are set in buf
, then ord
values 0 through 3 will get mapped to 4 through 7, respectively,
and all other ord
values return undefined values. When ord
value 3
gets mapped to (returns) pos
value 7 in this example, that means
that the 3rd set bit (starting with 0th) is at position 7 in buf
.
The bit positions 0 through bits
are valid positions in buf
.
get_option — Parse integer from an option string
int fsfuncget_option ( | str, | |
pint) ; |
char ** str
;int * pint
;get_options — Parse a string into a list of integers
char * fsfuncget_options ( | str, | |
nints, | ||
ints) ; |
const char * str
;int nints
;int * ints
;This function parses a string containing a comma-separated list of integers, a hyphen-separated range of _positive_ integers, or a combination of both. The parse halts when the array is full, or when no more numbers can be retrieved from the string.
Return value is the character in the string which caused
the parse to end (typically a null terminator, if str
is
completely parseable).
memparse — parse a string with mem suffixes into a number
unsigned long long fsfuncmemparse ( | ptr, | |
retptr) ; |
const char * ptr
;char ** retptr
;
Parses a string into a number. The number stored at ptr
is
potentially suffixed with K
(for kilobytes, or 1024 bytes),
M
(for megabytes, or 1048576 bytes), or G
(for gigabytes, or
1073741824). If the number is suffixed with K, M, or G, then
the return value is the number multiplied by one kilobyte, one
megabyte, or one gigabyte, respectively.
crc7 — update the CRC7 for the data buffer
u8 fsfunccrc7 ( | crc, | |
buffer, | ||
len) ; |
u8 crc
;const u8 * buffer
;size_t len
;crc16 — compute the CRC-16 for the data buffer
u16 fsfunccrc16 ( | crc, | |
buffer, | ||
len) ; |
u16 crc
;u8 const * buffer
;size_t len
;crc_itu_t — Compute the CRC-ITU-T for the data buffer
u16 fsfunccrc_itu_t ( | crc, | |
buffer, | ||
len) ; |
u16 crc
;const u8 * buffer
;size_t len
;idr synchronization (stolen from radix-tree.h)
idr_find
is able to be called locklessly, using RCU. The caller must
ensure calls to this function are made within rcu_read_lock
regions.
Other readers (lock-free or otherwise) and modifications may be running
concurrently.
It is still required that the caller manage the synchronization and
lifetimes of the items. So if RCU lock-free lookups are used, typically
this would mean that the items have their own locks, or are amenable to
lock-free access; and that the items are freed by RCU (or only freed after
having been deleted from the idr tree *and* a synchronize_rcu
grace
period).
IDA - IDR based ID allocator
This is id allocator without id -> pointer translation. Memory usage is much lower than full blown idr because each id only occupies a bit. ida uses a custom leaf node which contains IDA_BITMAP_BITS slots.
2007-04-25 written by Tejun Heo <htejungmail
.com>
idr_preload —
preload for idr_alloc
void fsfuncidr_preload ( | gfp_mask) ; |
gfp_t gfp_mask
;
Preload per-cpu layer buffer for idr_alloc
. Can only be used from
process context and each idr_preload
invocation should be matched with
idr_preload_end
. Note that preemption is disabled while preloaded.
The first idr_alloc
in the preloaded section can be treated as if it
were invoked with gfp_mask
used for preloading. This allows using more
permissive allocation masks for idrs protected by spinlocks.
For example, if idr_alloc
below fails, the failure can be treated as
if idr_alloc
were called with GFP_KERNEL rather than GFP_NOWAIT.
idr_preload(GFP_KERNEL); spin_lock(lock);
id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
spin_unlock(lock);
idr_preload_end
;
if (id < 0)
error;
idr_alloc — allocate new idr entry
int fsfuncidr_alloc ( | idr, | |
ptr, | ||
start, | ||
end, | ||
gfp_mask) ; |
struct idr * idr
;void * ptr
;int start
;int end
;gfp_t gfp_mask
;idr
the (initialized) idr
ptr
pointer to be associated with the new id
start
the minimum id (inclusive)
end
the maximum id (exclusive, <= 0 for max)
gfp_mask
memory allocation flags
Allocate an id in [start, end) and associate it with ptr
. If no ID is
available in the specified range, returns -ENOSPC. On memory allocation
failure, returns -ENOMEM.
Note that end
is treated as max when <= 0. This is to always allow
using start
+ N as end
as long as N is inside integer range.
The user is responsible for exclusively synchronizing all operations
which may modify idr
. However, read-only accesses such as idr_find
or iteration can be performed under RCU read lock provided the user
destroys ptr
in RCU-safe way after removal from idr.
idr_alloc_cyclic — allocate new idr entry in a cyclical fashion
int fsfuncidr_alloc_cyclic ( | idr, | |
ptr, | ||
start, | ||
end, | ||
gfp_mask) ; |
struct idr * idr
;void * ptr
;int start
;int end
;gfp_t gfp_mask
;idr_remove — remove the given id and free its slot
void fsfuncidr_remove ( | idp, | |
id) ; |
struct idr * idp
;int id
;idr_destroy — release all cached layers within an idr tree
void fsfuncidr_destroy ( | idp) ; |
struct idr * idp
;
Free all id mappings and all idp_layers. After this function, idp
is
completely unused and can be freed / recycled. The caller is
responsible for ensuring that no one else accesses idp
during or after
idr_destroy
.
A typical clean-up sequence for objects stored in an idr tree will use
idr_for_each
to free all objects, if necessay, then idr_destroy
to
free up the id mappings and cached idr_layers.
idr_for_each — iterate through all stored pointers
int fsfuncidr_for_each ( | idp, | |
fn, | ||
data) ; |
struct idr * idp
;int (*fn)
(
int id, void *p, void *data)
;void * data
;idp
idr handle
fn
function to be called for each pointer
data
data passed back to callback function
Iterate over the pointers registered with the given idr. The callback function will be called for each pointer currently registered, passing the id, the pointer and the data pointer passed to this function. It is not safe to modify the idr tree while in the callback, so functions such as idr_get_new and idr_remove are not allowed.
We check the return of fn
each time. If it returns anything other
than 0
, we break out and return that value.
The caller must serialize idr_for_each
vs idr_get_new
and idr_remove
.
idr_get_next — lookup next object of id to given id.
void * fsfuncidr_get_next ( | idp, | |
nextidp) ; |
struct idr * idp
;int * nextidp
;idr_replace — replace pointer for given id
void * fsfuncidr_replace ( | idp, | |
ptr, | ||
id) ; |
struct idr * idp
;void * ptr
;int id
;ida_pre_get — reserve resources for ida allocation
int fsfuncida_pre_get ( | ida, | |
gfp_mask) ; |
struct ida * ida
;gfp_t gfp_mask
;ida_get_new_above — allocate new ID above or equal to a start id
int fsfuncida_get_new_above ( | ida, | |
starting_id, | ||
p_id) ; |
struct ida * ida
;int starting_id
;int * p_id
;
Allocate new ID above or equal to starting_id
. It should be called
with any required locks.
If memory is required, it will return -EAGAIN
, you should unlock
and go back to the ida_pre_get
call. If the ida is full, it will
return -ENOSPC
.
p_id
returns a value in the range starting_id
... 0x7fffffff
.
ida_remove — remove the given ID
void fsfuncida_remove ( | ida, | |
id) ; |
struct ida * ida
;int id
;ida_destroy — release all cached layers within an ida tree
void fsfuncida_destroy ( | ida) ; |
struct ida * ida
;ida_simple_get — get a new id.
int fsfuncida_simple_get ( | ida, | |
start, | ||
end, | ||
gfp_mask) ; |
struct ida * ida
;unsigned int start
;unsigned int end
;gfp_t gfp_mask
;kmalloc — allocate memory
void * fsfunckmalloc ( | size, | |
flags) ; |
size_t size
;gfp_t flags
;
The flags
argument may be one of:
GFP_USER
- Allocate memory on behalf of user. May sleep.
GFP_KERNEL
- Allocate normal kernel ram. May sleep.
GFP_ATOMIC
- Allocation will not sleep. May use emergency pools.
For example, use this inside interrupt handlers.
GFP_HIGHUSER
- Allocate pages from high memory.
GFP_NOIO
- Do not do any I/O at all while trying to get memory.
GFP_NOFS
- Do not make any fs calls while trying to get memory.
GFP_NOWAIT
- Allocation will not sleep.
GFP_THISNODE
- Allocate node-local memory only.
GFP_DMA
- Allocation suitable for DMA.
Should only be used for kmalloc
caches. Otherwise, use a
slab created with SLAB_DMA.
Also it is possible to set different flags by OR'ing
in one or more of the following additional flags
:
__GFP_COLD
- Request cache-cold pages instead of
trying to return cache-warm pages.
__GFP_HIGH
- This allocation has high priority and may use emergency pools.
__GFP_NOFAIL
- Indicate that this allocation is in no way allowed to fail
(think twice before using).
__GFP_NORETRY
- If memory is not immediately available,
then give up at once.
__GFP_NOWARN
- If allocation fails, don't issue any warnings.
__GFP_REPEAT
- If allocation fails initially, try once more before failing.
There are other flags available as well, but these are not intended for general use, and so are not documented here. For a full list of potential flags, always refer to linux/gfp.h.
kmalloc is the normal method of allocating memory in the kernel.
kmalloc_array — allocate memory for an array.
void * fsfunckmalloc_array ( | n, | |
size, | ||
flags) ; |
size_t n
;size_t size
;gfp_t flags
;kcalloc — allocate memory for an array. The memory is set to zero.
void * fsfunckcalloc ( | n, | |
size, | ||
flags) ; |
size_t n
;size_t size
;gfp_t flags
;kmalloc_node — allocate memory from a specific node
void * fsfunckmalloc_node ( | size, | |
flags, | ||
node) ; |
size_t size
;gfp_t flags
;int node
;kzalloc — allocate memory. The memory is set to zero.
void * fsfunckzalloc ( | size, | |
flags) ; |
size_t size
;gfp_t flags
;kzalloc_node — allocate zeroed memory from a particular memory node.
void * fsfunckzalloc_node ( | size, | |
flags, | ||
node) ; |
size_t size
;gfp_t flags
;int node
;kmem_cache_shrink — Shrink a cache.
int fsfunckmem_cache_shrink ( | cachep) ; |
struct kmem_cache * cachep
;kmem_cache_alloc — Allocate an object
void * fsfunckmem_cache_alloc ( | cachep, | |
flags) ; |
struct kmem_cache * cachep
;gfp_t flags
;kmem_cache_alloc_node — Allocate an object on the specified node
void * fsfunckmem_cache_alloc_node ( | cachep, | |
flags, | ||
nodeid) ; |
struct kmem_cache * cachep
;gfp_t flags
;int nodeid
;kmem_cache_free — Deallocate an object
void fsfunckmem_cache_free ( | cachep, | |
objp) ; |
struct kmem_cache * cachep
;void * objp
;ksize — get the actual amount of memory allocated for a given object
size_t fsfuncksize ( | objp) ; |
const void * objp
;
kmalloc may internally round up allocations and return more memory
than requested. ksize
can be used to determine the actual amount of
memory allocated. The caller may use this additional memory, even though
a smaller amount of memory was initially specified with the kmalloc call.
The caller must guarantee that objp points to a valid object previously
allocated with either kmalloc
or kmem_cache_alloc
. The object
must not be freed during the duration of the call.
__copy_to_user_inatomic — Copy a block of data into user space, with less checking.
unsigned long fsfunc__copy_to_user_inatomic ( | to, | |
from, | ||
n) ; |
void __user * to
;const void * from
;unsigned long n
;to
Destination address, in user space.
from
Source address, in kernel space.
n
Number of bytes to copy.
Copy data from kernel space to user space. Caller must check
the specified block with access_ok
before calling this function.
The caller should also make sure he pins the user space address
so that we don't result in page fault and sleep.
Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault we return the initial request size (1, 2 or 4), as copy_*_user should do. If a store crosses a page boundary and gets a fault, the x86 will not write anything, so this is accurate.
__copy_to_user — Copy a block of data into user space, with less checking.
unsigned long fsfunc__copy_to_user ( | to, | |
from, | ||
n) ; |
void __user * to
;const void * from
;unsigned long n
;__copy_from_user — Copy a block of data from user space, with less checking.
unsigned long fsfunc__copy_from_user ( | to, | |
from, | ||
n) ; |
void * to
;const void __user * from
;unsigned long n
;to
Destination address, in kernel space.
from
Source address, in user space.
n
Number of bytes to copy.
Copy data from user space to kernel space. Caller must check
the specified block with access_ok
before calling this function.
Returns number of bytes that could not be copied. On success, this will be zero.
If some data could not be copied, this function will pad the copied data to the requested size using zero bytes.
An alternate version - __copy_from_user_inatomic
- may be called from
atomic context and will fail rather than sleep. In this case the
uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
for explanation of why this is needed.
clear_user — Zero a block of memory in user space.
unsigned long fsfuncclear_user ( | to, | |
n) ; |
void __user * to
;unsigned long n
;__clear_user — Zero a block of memory in user space, with less checking.
unsigned long fsfunc__clear_user ( | to, | |
n) ; |
void __user * to
;unsigned long n
;copy_to_user — Copy a block of data into user space.
unsigned long fsfunccopy_to_user ( | to, | |
from, | ||
n) ; |
void __user * to
;const void * from
;unsigned long n
;read_cache_pages — populate an address space with some pages & start reads against them
int fsfuncread_cache_pages ( | mapping, | |
pages, | ||
filler, | ||
data) ; |
struct address_space * mapping
;struct list_head * pages
;int (*filler)
(
void *, struct page *)
;void * data
;page_cache_sync_readahead — generic file readahead
void fsfuncpage_cache_sync_readahead ( | mapping, | |
ra, | ||
filp, | ||
offset, | ||
req_size) ; |
struct address_space * mapping
;struct file_ra_state * ra
;struct file * filp
;pgoff_t offset
;unsigned long req_size
;mapping
address_space which holds the pagecache and I/O vectors
ra
file_ra_state which holds the readahead state
filp
passed on to ->readpage
and ->readpages
offset
start offset into mapping
, in pagecache page-sized units
req_size
hint: total size of the read which the caller is performing in pagecache pages
page_cache_async_readahead — file readahead for marked pages
void fsfuncpage_cache_async_readahead ( | mapping, | |
ra, | ||
filp, | ||
page, | ||
offset, | ||
req_size) ; |
struct address_space * mapping
;struct file_ra_state * ra
;struct file * filp
;struct page * page
;pgoff_t offset
;unsigned long req_size
;mapping
address_space which holds the pagecache and I/O vectors
ra
file_ra_state which holds the readahead state
filp
passed on to ->readpage
and ->readpages
page
the page at offset
which has the PG_readahead flag set
offset
start offset into mapping
, in pagecache page-sized units
req_size
hint: total size of the read which the caller is performing in pagecache pages
delete_from_page_cache — delete page from page cache
void fsfuncdelete_from_page_cache ( | page) ; |
struct page * page
;filemap_flush — mostly a non-blocking flush
int fsfuncfilemap_flush ( | mapping) ; |
struct address_space * mapping
;filemap_fdatawait_range — wait for writeback to complete
int fsfuncfilemap_fdatawait_range ( | mapping, | |
start_byte, | ||
end_byte) ; |
struct address_space * mapping
;loff_t start_byte
;loff_t end_byte
;filemap_fdatawait — wait for all under-writeback pages to complete
int fsfuncfilemap_fdatawait ( | mapping) ; |
struct address_space * mapping
;filemap_write_and_wait_range — write out & wait on a file range
int fsfuncfilemap_write_and_wait_range ( | mapping, | |
lstart, | ||
lend) ; |
struct address_space * mapping
;loff_t lstart
;loff_t lend
;replace_page_cache_page — replace a pagecache page with a new one
int fsfuncreplace_page_cache_page ( | old, | |
new, | ||
gfp_mask) ; |
struct page * old
;struct page * new
;gfp_t gfp_mask
;This function replaces a page in the pagecache with a new one. On success it acquires the pagecache reference for the new page and drops it for the old page. Both the old and new pages must be locked. This function does not add the new page to the LRU, the caller must do that.
The remove + add is atomic. The only way this function can fail is memory allocation failure.
add_to_page_cache_locked — add a locked page to the pagecache
int fsfuncadd_to_page_cache_locked ( | page, | |
mapping, | ||
offset, | ||
gfp_mask) ; |
struct page * page
;struct address_space * mapping
;pgoff_t offset
;gfp_t gfp_mask
;add_page_wait_queue — Add an arbitrary waiter to a page's wait queue
void fsfuncadd_page_wait_queue ( | page, | |
waiter) ; |
struct page * page
;wait_queue_t * waiter
;unlock_page — unlock a locked page
void fsfuncunlock_page ( | page) ; |
struct page * page
;
Unlocks the page and wakes up sleepers in ___wait_on_page_locked
.
Also wakes sleepers in wait_on_page_writeback
because the wakeup
mechananism between PageLocked pages and PageWriteback pages is shared.
But that's OK - sleepers in wait_on_page_writeback
just go back to sleep.
The mb is necessary to enforce ordering between the clear_bit and the read
of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked
).
end_page_writeback — end writeback against a page
void fsfuncend_page_writeback ( | page) ; |
struct page * page
;__lock_page — get a lock on the page, assuming we need to sleep to get it
void fsfunc__lock_page ( | page) ; |
struct page * page
;find_get_page — find and get a page reference
struct page * fsfuncfind_get_page ( | mapping, | |
offset) ; |
struct address_space * mapping
;pgoff_t offset
;find_lock_page — locate, pin and lock a pagecache page
struct page * fsfuncfind_lock_page ( | mapping, | |
offset) ; |
struct address_space * mapping
;pgoff_t offset
;find_or_create_page — locate or add a pagecache page
struct page * fsfuncfind_or_create_page ( | mapping, | |
index, | ||
gfp_mask) ; |
struct address_space * mapping
;pgoff_t index
;gfp_t gfp_mask
;mapping
the page's address_space
index
the page's index into the mapping
gfp_mask
page allocation mode
Locates a page in the pagecache. If the page is not present, a new page
is allocated using gfp_mask
and is added to the pagecache and to the VM's
LRU list. The returned page is locked and has its reference count
incremented.
find_or_create_page
may sleep, even if gfp_flags
specifies an atomic
allocation!
find_or_create_page
returns the desired page's address, or zero on
memory exhaustion.
find_get_pages_contig — gang contiguous pagecache lookup
unsigned fsfuncfind_get_pages_contig ( | mapping, | |
index, | ||
nr_pages, | ||
pages) ; |
struct address_space * mapping
;pgoff_t index
;unsigned int nr_pages
;struct page ** pages
;find_get_pages_tag —
find and return pages that match tag
unsigned fsfuncfind_get_pages_tag ( | mapping, | |
index, | ||
tag, | ||
nr_pages, | ||
pages) ; |
struct address_space * mapping
;pgoff_t * index
;int tag
;unsigned int nr_pages
;struct page ** pages
;grab_cache_page_nowait — returns locked page at given index in given cache
struct page * fsfuncgrab_cache_page_nowait ( | mapping, | |
index) ; |
struct address_space * mapping
;pgoff_t index
;
Same as grab_cache_page
, but do not wait if the page is unavailable.
This is intended for speculative data generators, where the data can
be regenerated if the page couldn't be grabbed. This routine should
be safe to call while holding the lock for another page.
Clear __GFP_FS when allocating the page to avoid recursion into the fs and deadlock against the caller's locked page.
generic_file_aio_read — generic filesystem read routine
ssize_t fsfuncgeneric_file_aio_read ( | iocb, | |
iov, | ||
nr_segs, | ||
pos) ; |
struct kiocb * iocb
;const struct iovec * iov
;unsigned long nr_segs
;loff_t pos
;filemap_fault — read in file data for page fault handling
int fsfuncfilemap_fault ( | vma, | |
vmf) ; |
struct vm_area_struct * vma
;struct vm_fault * vmf
;
filemap_fault
is invoked via the vma operations vector for a
mapped memory region to read in file data during a page fault.
The goto's are kind of ugly, but this streamlines the normal case of having it in the page cache, and handles the special cases reasonably without having a lot of duplicated code.
read_cache_page_async — read into page cache, fill it if needed
struct page * fsfuncread_cache_page_async ( | mapping, | |
index, | ||
filler, | ||
data) ; |
struct address_space * mapping
;pgoff_t index
;int (*filler)
(
void *, struct page *)
;void * data
;mapping
the page's address_space
index
the page index
filler
function to perform the read
data
first arg to filler(data, page) function, often left as NULL
Same as read_cache_page, but don't wait for page to become unlocked after submitting it to the filler.
Read into the page cache. If a page already exists, and PageUptodate
is
not set, try to fill the page but don't wait for it to become unlocked.
If the page does not get brought uptodate, return -EIO.
read_cache_page_gfp — read into page cache, using specified page allocation flags.
struct page * fsfuncread_cache_page_gfp ( | mapping, | |
index, | ||
gfp) ; |
struct address_space * mapping
;pgoff_t index
;gfp_t gfp
;read_cache_page — read into page cache, fill it if needed
struct page * fsfuncread_cache_page ( | mapping, | |
index, | ||
filler, | ||
data) ; |
struct address_space * mapping
;pgoff_t index
;int (*filler)
(
void *, struct page *)
;void * data
;__generic_file_aio_write — write data to a file
ssize_t fsfunc__generic_file_aio_write ( | iocb, | |
iov, | ||
nr_segs, | ||
ppos) ; |
struct kiocb * iocb
;const struct iovec * iov
;unsigned long nr_segs
;loff_t * ppos
;iocb
IO state structure (file, offset, etc.)
iov
vector with data to write
nr_segs
number of segments in the vector
ppos
position where to write
This function does all the work needed for actually writing data to a file. It does all basic checks, removes SUID from the file, updates modification times and calls proper subroutines depending on whether we do direct IO or a standard buffered write.
It expects i_mutex to be grabbed unless we work on a block device or similar object which does not need locking at all.
This function does *not* take care of syncing data in case of O_SYNC write. A caller has to handle it. This is mainly due to the fact that we want to avoid syncing under i_mutex.
generic_file_aio_write — write data to a file
ssize_t fsfuncgeneric_file_aio_write ( | iocb, | |
iov, | ||
nr_segs, | ||
pos) ; |
struct kiocb * iocb
;const struct iovec * iov
;unsigned long nr_segs
;loff_t pos
;try_to_release_page — release old fs-specific metadata on a page
int fsfunctry_to_release_page ( | page, | |
gfp_mask) ; |
struct page * page
;gfp_t gfp_mask
;page
the page which the kernel is trying to free
gfp_mask
memory allocation flags (and I/O mode)
The address_space is to try to release any data against the page (presumably at page->private). If the release was successful, return `1'. Otherwise return zero.
This may also be called if PG_fscache is set on a page, indicating that the page is known to the local caching routines.
The gfp_mask
argument specifies whether I/O may be performed to release
this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
zap_vma_ptes — remove ptes mapping the vma
int fsfunczap_vma_ptes ( | vma, | |
address, | ||
size) ; |
struct vm_area_struct * vma
;unsigned long address
;unsigned long size
;__get_user_pages — pin user pages in memory
long fsfunc__get_user_pages ( | tsk, | |
mm, | ||
start, | ||
nr_pages, | ||
gup_flags, | ||
pages, | ||
vmas, | ||
nonblocking) ; |
struct task_struct * tsk
;struct mm_struct * mm
;unsigned long start
;unsigned long nr_pages
;unsigned int gup_flags
;struct page ** pages
;struct vm_area_struct ** vmas
;int * nonblocking
;tsk
task_struct of target task
mm
mm_struct of target mm
start
starting user address
nr_pages
number of pages from start to pin
gup_flags
flags modifying pin behaviour
pages
array that receives pointers to the pages pinned. Should be at least nr_pages long. Or NULL, if caller only intends to ensure the pages are faulted in.
vmas
array of pointers to vmas corresponding to each page. Or NULL if the caller does not require them.
nonblocking
whether waiting for disk IO or mmap_sem contention
Returns number of pages pinned. This may be fewer than the number
requested. If nr_pages is 0 or negative, returns 0. If no pages
were pinned, returns -errno. Each page returned must be released
with a put_page
call when it is finished with. vmas will only
remain valid while mmap_sem is held.
Must be called with mmap_sem held for read or write.
__get_user_pages walks a process's page tables and takes a reference to each struct page that each user address corresponds to at a given instant. That is, it takes the page that would be accessed if a user thread accesses the given user virtual address at that instant.
This does not guarantee that the page exists in the user mappings when __get_user_pages returns, and there may even be a completely different page there in some cases (eg. if mmapped pagecache has been invalidated and subsequently re faulted). However it does guarantee that the page won't be freed completely. And mostly callers simply care that the page contains data that was valid *at some point in time*. Typically, an IO or similar operation cannot guarantee anything stronger anyway because locks can't be held over the syscall boundary.
If gup_flags
& FOLL_WRITE == 0, the page must not be written to. If
the page is written to, set_page_dirty (or set_page_dirty_lock, as
appropriate) must be called after the page is finished with, and
before put_page is called.
If nonblocking
!= NULL, __get_user_pages will not wait for disk IO
or mmap_sem contention, and if waiting is needed to pin all pages,
*nonblocking
will be set to 0.
In most cases, get_user_pages or get_user_pages_fast should be used
instead of __get_user_pages. __get_user_pages should be used only if
you need some special gup_flags
.
vm_insert_page — insert single page into user vma
int fsfuncvm_insert_page ( | vma, | |
addr, | ||
page) ; |
struct vm_area_struct * vma
;unsigned long addr
;struct page * page
;This allows drivers to insert individual pages they've allocated into a user vma.
The page has to be a nice clean _individual_ kernel allocation.
If you allocate a compound page, you need to have marked it as
such (__GFP_COMP), or manually just split the page up yourself
(see split_page
).
NOTE! Traditionally this was done with “remap_pfn_range
” which
took an arbitrary page protection parameter. This doesn't allow
that. Your vma protection will have to be set up correctly, which
means that if you want a shared writable mapping, you'd better
ask for a shared writable mapping!
The page does not need to be reserved.
Usually this function is called from f_op->mmap
handler
under mm->mmap_sem write-lock, so it can change vma->vm_flags.
Caller must set VM_MIXEDMAP on vma if it wants to call this
function from other places, for example from page-fault handler.
vm_insert_pfn — insert single pfn into user vma
int fsfuncvm_insert_pfn ( | vma, | |
addr, | ||
pfn) ; |
struct vm_area_struct * vma
;unsigned long addr
;unsigned long pfn
;Similar to vm_insert_page, this allows drivers to insert individual pages they've allocated into a user vma. Same comments apply.
This function should only be called from a vm_ops->fault handler, and in that case the handler should return NULL.
vma cannot be a COW mapping.
As this is called only for pages that do not currently exist, we do not need to flush old virtual caches or the TLB.
remap_pfn_range — remap kernel memory to userspace
int fsfuncremap_pfn_range ( | vma, | |
addr, | ||
pfn, | ||
size, | ||
prot) ; |
struct vm_area_struct * vma
;unsigned long addr
;unsigned long pfn
;unsigned long size
;pgprot_t prot
;vm_iomap_memory — remap memory to userspace
int fsfuncvm_iomap_memory ( | vma, | |
start, | ||
len) ; |
struct vm_area_struct * vma
;phys_addr_t start
;unsigned long len
;
This is a simplified io_remap_pfn_range
for common driver use. The
driver just needs to give us the physical memory range to be mapped,
we'll figure out the rest from the vma information.
NOTE! Some drivers might want to tweak vma->vm_page_prot first to get whatever write-combining details or similar.
unmap_mapping_range — unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
void fsfuncunmap_mapping_range ( | mapping, | |
holebegin, | ||
holelen, | ||
even_cows) ; |
struct address_space * mapping
;loff_t const holebegin
;loff_t const holelen
;int even_cows
;mapping
the address space containing mmaps to be unmapped.
holebegin
byte in first page to unmap, relative to the start of
the underlying file. This will be rounded down to a PAGE_SIZE
boundary. Note that this is different from truncate_pagecache
, which
must keep the partial page. In contrast, we must get rid of
partial pages.
holelen
size of prospective hole in bytes. This will be rounded up to a PAGE_SIZE boundary. A holelen of zero truncates to the end of the file.
even_cows
1 when truncating a file, unmap even private COWed pages; but 0 when invalidating pagecache, don't throw away private data.
follow_pfn — look up PFN at a user virtual address
int fsfuncfollow_pfn ( | vma, | |
address, | ||
pfn) ; |
struct vm_area_struct * vma
;unsigned long address
;unsigned long * pfn
;vm_unmap_aliases — unmap outstanding lazy aliases in the vmap layer
void fsfuncvm_unmap_aliases ( | void) ; |
void
;
The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily to amortize TLB flushing overheads. What this means is that any page you have now, may, in a former life, have been mapped into kernel virtual address by the vmap layer and so there might be some CPUs with TLB entries still referencing that page (additional to the regular 1:1 kernel mapping).
vm_unmap_aliases flushes all such lazy mappings. After it returns, we can be sure that none of the pages we have control over will have any aliases from the vmap layer.
vm_unmap_ram — unmap linear kernel address space set up by vm_map_ram
void fsfuncvm_unmap_ram ( | mem, | |
count) ; |
const void * mem
;unsigned int count
;vm_map_ram — map pages linearly into kernel virtual address (vmalloc space)
void * fsfuncvm_map_ram ( | pages, | |
count, | ||
node, | ||
prot) ; |
struct page ** pages
;unsigned int count
;int node
;pgprot_t prot
;unmap_kernel_range_noflush — unmap kernel VM area
void fsfuncunmap_kernel_range_noflush ( | addr, | |
size) ; |
unsigned long addr
;unsigned long size
;vfree —
release memory allocated by vmalloc
void fsfuncvfree ( | addr) ; |
const void * addr
;
Free the virtually continuous memory area starting at addr
, as
obtained from vmalloc
, vmalloc_32
or __vmalloc
. If addr
is
NULL, no operation is performed.
Must not be called in NMI context (strictly speaking, only if we don't
have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
conventions for vfree
arch-depenedent would be a really bad idea)
vunmap —
release virtual mapping obtained by vmap
void fsfuncvunmap ( | addr) ; |
const void * addr
;vmap — map an array of pages into virtually contiguous space
void * fsfuncvmap ( | pages, | |
count, | ||
flags, | ||
prot) ; |
struct page ** pages
;unsigned int count
;unsigned long flags
;pgprot_t prot
;vmalloc — allocate virtually contiguous memory
void * fsfuncvmalloc ( | size) ; |
unsigned long size
;vzalloc — allocate virtually contiguous memory with zero fill
void * fsfuncvzalloc ( | size) ; |
unsigned long size
;vmalloc_user — allocate zeroed virtually contiguous memory for userspace
void * fsfuncvmalloc_user ( | size) ; |
unsigned long size
;vmalloc_node — allocate memory on a specific node
void * fsfuncvmalloc_node ( | size, | |
node) ; |
unsigned long size
;int node
;vzalloc_node — allocate memory on a specific node with zero fill
void * fsfuncvzalloc_node ( | size, | |
node) ; |
unsigned long size
;int node
;vmalloc_32 — allocate virtually contiguous memory (32bit addressable)
void * fsfuncvmalloc_32 ( | size) ; |
unsigned long size
;vmalloc_32_user — allocate zeroed virtually contiguous 32bit memory
void * fsfuncvmalloc_32_user ( | size) ; |
unsigned long size
;remap_vmalloc_range_partial — map vmalloc pages to userspace
int fsfuncremap_vmalloc_range_partial ( | vma, | |
uaddr, | ||
kaddr, | ||
size) ; |
struct vm_area_struct * vma
;unsigned long uaddr
;void * kaddr
;unsigned long size
;remap_vmalloc_range — map vmalloc pages to userspace
int fsfuncremap_vmalloc_range ( | vma, | |
addr, | ||
pgoff) ; |
struct vm_area_struct * vma
;void * addr
;unsigned long pgoff
;alloc_vm_area — allocate a range of kernel address space
struct vm_struct * fsfuncalloc_vm_area ( | size, | |
ptes) ; |
size_t size
;pte_t ** ptes
;nr_free_zone_pages — count number of pages beyond high watermark
unsigned long fsfuncnr_free_zone_pages ( | offset) ; |
int offset
;nr_free_pagecache_pages — count number of pages beyond high watermark
unsigned long fsfuncnr_free_pagecache_pages ( | void) ; |
void
;find_next_best_node — find the next node that should appear in a given node's fallback list
int fsfuncfind_next_best_node ( | node, | |
used_node_mask) ; |
int node
;nodemask_t * used_node_mask
;node
node whose fallback list we're appending
used_node_mask
nodemask_t of already used nodes
We use a number of factors to determine which is the next node that should
appear on a given node's fallback list. The node should not have appeared
already in node
's fallback list, and it should be the next closest node
according to the distance array (which contains arbitrary distance values
from each node to each node in the system), and should also prefer nodes
with no CPUs, since presumably they'll have very little allocation pressure
on them otherwise.
It returns -1 if no node is found.
free_bootmem_with_active_regions — Call free_bootmem_node for each active range
void fsfuncfree_bootmem_with_active_regions ( | nid, | |
max_low_pfn) ; |
int nid
;unsigned long max_low_pfn
;sparse_memory_present_with_active_regions — Call memory_present for each active range
void fsfuncsparse_memory_present_with_active_regions ( | nid) ; |
int nid
;get_pfn_range_for_nid — Return the start and end page frames for a node
void __meminit fsfuncget_pfn_range_for_nid ( | nid, | |
start_pfn, | ||
end_pfn) ; |
unsigned int nid
;unsigned long * start_pfn
;unsigned long * end_pfn
;absent_pages_in_range — Return number of page frames in holes within a range
unsigned long fsfuncabsent_pages_in_range ( | start_pfn, | |
end_pfn) ; |
unsigned long start_pfn
;unsigned long end_pfn
;node_map_pfn_alignment — determine the maximum internode alignment
unsigned long fsfuncnode_map_pfn_alignment ( | void) ; |
void
;
This function should be called after node map is populated and sorted. It calculates the maximum power of two alignment which can distinguish all the nodes.
For example, if all nodes are 1GiB and aligned to 1GiB, the return value would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the nodes are shifted by 256MiB, 256MiB. Note that if only the last node is shifted, 1GiB is enough and this function will indicate so.
This is used to test whether pfn -> nid mapping of the chosen memory model has fine enough granularity to avoid incorrect mapping for the populated node map.
Returns the determined alignment in pfn's. 0 if there is no alignment requirement (single node).
find_min_pfn_with_active_regions — Find the minimum PFN registered
unsigned long fsfuncfind_min_pfn_with_active_regions ( | void) ; |
void
;free_area_init_nodes — Initialise all pg_data_t and zone data
void fsfuncfree_area_init_nodes ( | max_zone_pfn) ; |
unsigned long * max_zone_pfn
;
This will call free_area_init_node
for each active node in the system.
Using the page ranges provided by add_active_range
, the size of each
zone in each node and their holes is calculated. If the maximum PFN
between two adjacent zones match, it is assumed that the zone is empty.
For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
that arch_max_dma32_pfn has no pages. It is also assumed that a zone
starts where the previous one ended. For example, ZONE_DMA32 starts
at arch_max_dma_pfn.
set_dma_reserve — set the specified number of pages reserved in the first zone
void fsfuncset_dma_reserve ( | new_dma_reserve) ; |
unsigned long new_dma_reserve
;The per-cpu batchsize and zone watermarks are determined by present_pages. In the DMA zone, a significant percentage may be consumed by kernel image and other unfreeable allocations which can skew the watermarks badly. This function may optionally be used to account for unfreeable pages in the first zone (e.g., ZONE_DMA). The effect will be lower watermarks and smaller per-cpu batchsize.
setup_per_zone_wmarks — called when min_free_kbytes changes or when memory is hot-{added|removed}
void fsfuncsetup_per_zone_wmarks ( | void) ; |
void
;get_pageblock_flags_group — Return the requested group of flags for the pageblock_nr_pages block of pages
unsigned long fsfuncget_pageblock_flags_group ( | page, | |
start_bitidx, | ||
end_bitidx) ; |
struct page * page
;int start_bitidx
;int end_bitidx
;set_pageblock_flags_group — Set the requested group of flags for a pageblock_nr_pages block of pages
void fsfuncset_pageblock_flags_group ( | page, | |
flags, | ||
start_bitidx, | ||
end_bitidx) ; |
struct page * page
;unsigned long flags
;int start_bitidx
;int end_bitidx
;alloc_contig_range — - tries to allocate given range of pages
int fsfuncalloc_contig_range ( | start, | |
end, | ||
migratetype) ; |
unsigned long start
;unsigned long end
;unsigned migratetype
;start
start PFN to allocate
end
one-past-the-last PFN to allocate
migratetype
migratetype of the underlaying pageblocks (either #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks in range must have the same migratetype and it must be either of the two.
The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES aligned, however it's the caller's responsibility to guarantee that we are the only thread that changes migrate type of pageblocks the pages fall in.
The PFN range must belong to a single zone.
Returns zero on success or negative error code. On success all
pages which PFN is in [start, end) are allocated for the caller and
need to be freed with free_contig_range
.
mempool_destroy — deallocate a memory pool
void fsfuncmempool_destroy ( | pool) ; |
mempool_t * pool
;mempool_create — create a memory pool
mempool_t * fsfuncmempool_create ( | min_nr, | |
alloc_fn, | ||
free_fn, | ||
pool_data) ; |
int min_nr
;mempool_alloc_t * alloc_fn
;mempool_free_t * free_fn
;void * pool_data
;min_nr
the minimum number of elements guaranteed to be allocated for this pool.
alloc_fn
user-defined element-allocation function.
free_fn
user-defined element-freeing function.
pool_data
optional private data available to the user-defined functions.
this function creates and allocates a guaranteed size, preallocated
memory pool. The pool can be used from the mempool_alloc
and mempool_free
functions. This function might sleep. Both the alloc_fn
and the free_fn
functions might sleep - as long as the mempool_alloc
function is not called
from IRQ contexts.
mempool_resize — resize an existing memory pool
int fsfuncmempool_resize ( | pool, | |
new_min_nr, | ||
gfp_mask) ; |
mempool_t * pool
;int new_min_nr
;gfp_t gfp_mask
;pool
pointer to the memory pool which was allocated via
mempool_create
.
new_min_nr
the new minimum number of elements guaranteed to be allocated for this pool.
gfp_mask
the usual allocation bitmask.
This function shrinks/grows the pool. In the case of growing,
it cannot be guaranteed that the pool will be grown to the new
size immediately, but new mempool_free
calls will refill it.
Note, the caller must guarantee that no mempool_destroy is called
while this function is running. mempool_alloc
& mempool_free
might be called (eg. from IRQ contexts) while this function executes.
mempool_alloc — allocate an element from a specific memory pool
void * fsfuncmempool_alloc ( | pool, | |
gfp_mask) ; |
mempool_t * pool
;gfp_t gfp_mask
;mempool_free — return an element to the pool.
void fsfuncmempool_free ( | element, | |
pool) ; |
void * element
;mempool_t * pool
;dma_pool_create — Creates a pool of consistent memory blocks, for dma.
struct dma_pool * fsfuncdma_pool_create ( | name, | |
dev, | ||
size, | ||
align, | ||
boundary) ; |
const char * name
;struct device * dev
;size_t size
;size_t align
;size_t boundary
;name
name of pool, for diagnostics
dev
device that will be doing the DMA
size
size of the blocks in this pool.
align
alignment requirement for blocks; must be a power of two
boundary
returned blocks won't cross this power of two boundary
Returns a dma allocation pool with the requested characteristics, or
null if one can't be created. Given one of these pools, dma_pool_alloc
may be used to allocate memory. Such memory will all have “consistent”
DMA mappings, accessible by the device and its driver without using
cache flushing primitives. The actual size of blocks allocated may be
larger than requested because of alignment.
If boundary
is nonzero, objects returned from dma_pool_alloc
won't
cross that size boundary. This is useful for devices which have
addressing restrictions on individual DMA transfers, such as not crossing
boundaries of 4KBytes.
dma_pool_destroy — destroys a pool of dma memory blocks.
void fsfuncdma_pool_destroy ( | pool) ; |
struct dma_pool * pool
;dma_pool_alloc — get a block of consistent memory
void * fsfuncdma_pool_alloc ( | pool, | |
mem_flags, | ||
handle) ; |
struct dma_pool * pool
;gfp_t mem_flags
;dma_addr_t * handle
;dma_pool_free — put block back into dma pool
void fsfuncdma_pool_free ( | pool, | |
vaddr, | ||
dma) ; |
struct dma_pool * pool
;void * vaddr
;dma_addr_t dma
;dmam_pool_create —
Managed dma_pool_create
struct dma_pool * fsfuncdmam_pool_create ( | name, | |
dev, | ||
size, | ||
align, | ||
allocation) ; |
const char * name
;struct device * dev
;size_t size
;size_t align
;size_t allocation
;dmam_pool_destroy —
Managed dma_pool_destroy
void fsfuncdmam_pool_destroy ( | pool) ; |
struct dma_pool * pool
;balance_dirty_pages_ratelimited — balance dirty memory state
void fsfuncbalance_dirty_pages_ratelimited ( | mapping) ; |
struct address_space * mapping
;Processes which are dirtying memory should call in here once for each page which was newly dirtied. The function will periodically check the system's dirty state and will initiate writeback if needed.
On really big machines, get_writeback_state is expensive, so try to avoid calling it too often (ratelimiting). But once we're over the dirty memory limit we decrease the ratelimiting by a lot, to prevent individual processes from overshooting the limit by (ratelimit_pages) each.
tag_pages_for_writeback — tag pages to be written by write_cache_pages
void fsfunctag_pages_for_writeback ( | mapping, | |
start, | ||
end) ; |
struct address_space * mapping
;pgoff_t start
;pgoff_t end
;mapping
address space structure to write
start
starting page index
end
ending page index (inclusive)
This function scans the page range from start
to end
(inclusive) and tags
all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
that write_cache_pages (or whoever calls this function) will then use
TOWRITE tag to identify pages eligible for writeback. This mechanism is
used to avoid livelocking of writeback by a process steadily creating new
dirty pages in the file (thus it is important for this function to be quick
so that it can tag pages faster than a dirtying process can create them).
write_cache_pages — walk the list of dirty pages of the given address space and write all of them.
int fsfuncwrite_cache_pages ( | mapping, | |
wbc, | ||
writepage, | ||
data) ; |
struct address_space * mapping
;struct writeback_control * wbc
;writepage_t writepage
;void * data
;mapping
address space structure to write
wbc
subtract the number of written pages from *wbc
->nr_to_write
writepage
function called for each page
data
data passed to writepage function
If a page is already under I/O, write_cache_pages
skips it, even
if it's dirty. This is desirable behaviour for memory-cleaning writeback,
but it is INCORRECT for data-integrity system calls such as fsync
. fsync
and msync
need to guarantee that all the data which was dirty at the time
the call was made get new I/O started against them. If wbc->sync_mode is
WB_SYNC_ALL then we were called for data integrity and we must wait for
existing IO to complete.
To avoid livelocks (when other process dirties new pages), we first tag pages which should be written back with TOWRITE tag and only then start writing them. For data-integrity sync we have to be careful so that we do not miss some pages (e.g., because some other process has cleared TOWRITE tag we set). The rule we follow is that TOWRITE tag can be cleared only by the process clearing the DIRTY tag (and submitting the page for IO).
generic_writepages —
walk the list of dirty pages of the given address space and writepage
all of them.
int fsfuncgeneric_writepages ( | mapping, | |
wbc) ; |
struct address_space * mapping
;struct writeback_control * wbc
;write_one_page — write out a single page and optionally wait on I/O
int fsfuncwrite_one_page ( | page, | |
wait) ; |
struct page * page
;int wait
;wait_for_stable_page — wait for writeback to finish, if necessary.
void fsfuncwait_for_stable_page ( | page) ; |
struct page * page
;truncate_inode_pages_range — truncate range of pages specified by start & end byte offsets
void fsfunctruncate_inode_pages_range ( | mapping, | |
lstart, | ||
lend) ; |
struct address_space * mapping
;loff_t lstart
;loff_t lend
;mapping
mapping to truncate
lstart
offset from which to truncate
lend
offset to which to truncate (inclusive)
Truncate the page cache, removing the pages that are between specified offsets (and zeroing out partial pages if lstart or lend + 1 is not page aligned).
Truncate takes two passes - the first pass is nonblocking. It will not block on page locks and it will not block on writeback. The second pass will wait. This is to prevent as much IO as possible in the affected region. The first pass will remove most pages, so the search cost of the second pass is low.
We pass down the cache-hot hint to the page freeing code. Even if the mapping is large, it is probably the case that the final pages are the most recently touched, and freeing happens in ascending file offset order.
Note that since ->invalidatepage
accepts range to invalidate
truncate_inode_pages_range is able to handle cases where lend + 1 is not
page aligned properly.
truncate_inode_pages — truncate *all* the pages from an offset
void fsfunctruncate_inode_pages ( | mapping, | |
lstart) ; |
struct address_space * mapping
;loff_t lstart
;invalidate_mapping_pages — Invalidate all the unlocked pages of one inode
unsigned long fsfuncinvalidate_mapping_pages ( | mapping, | |
start, | ||
end) ; |
struct address_space * mapping
;pgoff_t start
;pgoff_t end
;invalidate_inode_pages2_range — remove range of pages from an address_space
int fsfuncinvalidate_inode_pages2_range ( | mapping, | |
start, | ||
end) ; |
struct address_space * mapping
;pgoff_t start
;pgoff_t end
;invalidate_inode_pages2 — remove all pages from an address_space
int fsfuncinvalidate_inode_pages2 ( | mapping) ; |
struct address_space * mapping
;truncate_pagecache — unmap and remove pagecache that has been truncated
void fsfunctruncate_pagecache ( | inode, | |
oldsize, | ||
newsize) ; |
struct inode * inode
;loff_t oldsize
;loff_t newsize
;inode's new i_size must already be written before truncate_pagecache is called.
This function should typically be called before the filesystem releases resources associated with the freed range (eg. deallocates blocks). This way, pagecache will always stay logically coherent with on-disk format, and the filesystem would not have to deal with situations such as writepage being called for a page that has already had its underlying blocks deallocated.
truncate_setsize — update inode and pagecache for a new file size
void fsfunctruncate_setsize ( | inode, | |
newsize) ; |
struct inode * inode
;loff_t newsize
;
truncate_setsize updates i_size and performs pagecache truncation (if
necessary) to newsize
. It will be typically be called from the filesystem's
setattr function when ATTR_SIZE is passed in.
Must be called with inode_mutex held and before all filesystem specific block truncation has been performed.
truncate_pagecache_range — unmap and remove pagecache that is hole-punched
void fsfunctruncate_pagecache_range ( | inode, | |
lstart, | ||
lend) ; |
struct inode * inode
;loff_t lstart
;loff_t lend
;This function should typically be called before the filesystem releases resources associated with the freed range (eg. deallocates blocks). This way, pagecache will always stay logically coherent with on-disk format, and the filesystem would not have to deal with situations such as writepage being called for a page that has already had its underlying blocks deallocated.
Table of Contents
ipc_init — initialise IPC subsystem
int fsfuncipc_init ( | void) ; |
void
;ipc_init_ids — initialise IPC identifiers
void fsfuncipc_init_ids ( | ids) ; |
struct ipc_ids * ids
;ipc_init_proc_interface — Create a proc interface for sysipc types using a seq_file interface.
void fsfuncipc_init_proc_interface ( | path, | |
header, | ||
ids, | ||
show) ; |
const char * path
;const char * header
;int ids
;int (*show)
(
struct seq_file *, void *)
;ipc_findkey — find a key in an ipc identifier set
struct kern_ipc_perm * fsfuncipc_findkey ( | ids, | |
key) ; |
struct ipc_ids * ids
;key_t key
;ipc_get_maxid — get the last assigned id
int fsfuncipc_get_maxid ( | ids) ; |
struct ipc_ids * ids
;ipc_addid — add an IPC identifier
int fsfuncipc_addid ( | ids, | |
new, | ||
size) ; |
struct ipc_ids * ids
;struct kern_ipc_perm * new
;int size
;Add an entry 'new' to the IPC ids idr. The permissions object is initialised and the first free entry is set up and the id assigned is returned. The 'new' entry is returned in a locked state on success. On failure the entry is not locked and a negative err-code is returned.
Called with writer ipc_ids.rw_mutex held.
ipcget_new — create a new ipc object
int fsfuncipcget_new ( | ns, | |
ids, | ||
ops, | ||
params) ; |
struct ipc_namespace * ns
;struct ipc_ids * ids
;struct ipc_ops * ops
;struct ipc_params * params
;ipc_check_perms — check security and permissions for an IPC
int fsfuncipc_check_perms ( | ns, | |
ipcp, | ||
ops, | ||
params) ; |
struct ipc_namespace * ns
;struct kern_ipc_perm * ipcp
;struct ipc_ops * ops
;struct ipc_params * params
;ipcget_public — get an ipc object or create a new one
int fsfuncipcget_public ( | ns, | |
ids, | ||
ops, | ||
params) ; |
struct ipc_namespace * ns
;struct ipc_ids * ids
;struct ipc_ops * ops
;struct ipc_params * params
;ipc_rmid — remove an IPC identifier
void fsfuncipc_rmid ( | ids, | |
ipcp) ; |
struct ipc_ids * ids
;struct kern_ipc_perm * ipcp
;ipc_schedule_free — free ipc + rcu space
void fsfuncipc_schedule_free ( | head) ; |
struct rcu_head * head
;ipcperms — check IPC permissions
int fsfuncipcperms ( | ns, | |
ipcp, | ||
flag) ; |
struct ipc_namespace * ns
;struct kern_ipc_perm * ipcp
;short flag
;kernel_to_ipc64_perm — convert kernel ipc permissions to user
void fsfunckernel_to_ipc64_perm ( | in, | |
out) ; |
struct kern_ipc_perm * in
;struct ipc64_perm * out
;ipc64_perm_to_ipc_perm — convert new ipc permissions to old
void fsfuncipc64_perm_to_ipc_perm ( | in, | |
out) ; |
struct ipc64_perm * in
;struct ipc_perm * out
;ipc_obtain_object —
struct kern_ipc_perm * fsfuncipc_obtain_object ( | ids, | |
id) ; |
struct ipc_ids * ids
;int id
;ipc_lock — Lock an ipc structure without rw_mutex held
struct kern_ipc_perm * fsfuncipc_lock ( | ids, | |
id) ; |
struct ipc_ids * ids
;int id
;ipc_obtain_object_check —
struct kern_ipc_perm * fsfuncipc_obtain_object_check ( | ids, | |
id) ; |
struct ipc_ids * ids
;int id
;ipcget —
Common sys_*get
code
int fsfuncipcget ( | ns, | |
ids, | ||
ops, | ||
params) ; |
struct ipc_namespace * ns
;struct ipc_ids * ids
;struct ipc_ops * ops
;struct ipc_params * params
;ipc_update_perm — update the permissions of an IPC.
int fsfuncipc_update_perm ( | in, | |
out) ; |
struct ipc64_perm * in
;struct kern_ipc_perm * out
;ipcctl_pre_down — retrieve an ipc and check permissions for some IPC_XXX cmd
struct kern_ipc_perm * fsfuncipcctl_pre_down ( | ns, | |
ids, | ||
id, | ||
cmd, | ||
perm, | ||
extra_perm) ; |
struct ipc_namespace * ns
;struct ipc_ids * ids
;int id
;int cmd
;struct ipc64_perm * perm
;int extra_perm
;ns
the ipc namespace
ids
the table of ids where to look for the ipc
id
the id of the ipc to retrieve
cmd
the cmd to check
perm
the permission to set
extra_perm
one extra permission parameter used by msq
This function does some common audit and permissions check for some IPC_XXX cmd and is called from semctl_down, shmctl_down and msgctl_down. It must be called without any lock held and - retrieves the ipc with the given id in the given table. - performs some audit and permission check, depending on the given cmd - returns the ipc with the ipc lock held in case of success or an err-code without any lock held otherwise.
Call holding the both the rw_mutex and the rcu read lock.
Table of Contents
DECLARE_KFIFO_PTR — macro to declare a fifo pointer object
fsfuncDECLARE_KFIFO_PTR ( | fifo, | |
type) ; |
fifo
; type
;DECLARE_KFIFO — macro to declare a fifo object
fsfuncDECLARE_KFIFO ( | fifo, | |
type, | ||
size) ; |
fifo
; type
; size
;INIT_KFIFO — Initialize a fifo declared by DECLARE_KFIFO
fsfuncINIT_KFIFO ( | fifo) ; |
fifo
;DEFINE_KFIFO — macro to define and initialize a fifo
fsfuncDEFINE_KFIFO ( | fifo, | |
type, | ||
size) ; |
fifo
; type
; size
;kfifo_initialized — Check if the fifo is initialized
fsfunckfifo_initialized ( | fifo) ; |
fifo
;kfifo_esize — returns the size of the element managed by the fifo
fsfunckfifo_esize ( | fifo) ; |
fifo
;kfifo_recsize — returns the size of the record length field
fsfunckfifo_recsize ( | fifo) ; |
fifo
;kfifo_len — returns the number of used elements in the fifo
fsfunckfifo_len ( | fifo) ; |
fifo
;kfifo_avail — returns the number of unused elements in the fifo
fsfunckfifo_avail ( | fifo) ; |
fifo
;kfifo_peek_len — gets the size of the next fifo record
fsfunckfifo_peek_len ( | fifo) ; |
fifo
;kfifo_alloc — dynamically allocates a new fifo buffer
fsfunckfifo_alloc ( | fifo, | |
size, | ||
gfp_mask) ; |
fifo
; size
; gfp_mask
;kfifo_init — initialize a fifo using a preallocated buffer
fsfunckfifo_init ( | fifo, | |
buffer, | ||
size) ; |
fifo
; buffer
; size
;kfifo_peek — get data from the fifo without removing
fsfunckfifo_peek ( | fifo, | |
val) ; |
fifo
; val
;kfifo_in — put data into the fifo
fsfunckfifo_in ( | fifo, | |
buf, | ||
n) ; |
fifo
; buf
; n
;kfifo_in_spinlocked — put data into the fifo using a spinlock for locking
fsfunckfifo_in_spinlocked ( | fifo, | |
buf, | ||
n, | ||
lock) ; |
fifo
; buf
; n
; lock
;kfifo_out — get data from the fifo
fsfunckfifo_out ( | fifo, | |
buf, | ||
n) ; |
fifo
; buf
; n
;kfifo_out_spinlocked — get data from the fifo using a spinlock for locking
fsfunckfifo_out_spinlocked ( | fifo, | |
buf, | ||
n, | ||
lock) ; |
fifo
; buf
; n
; lock
;kfifo_from_user — puts some data from user space into the fifo
fsfunckfifo_from_user ( | fifo, | |
from, | ||
len, | ||
copied) ; |
fifo
; from
; len
; copied
;kfifo_to_user — copies data from the fifo into user space
fsfunckfifo_to_user ( | fifo, | |
to, | ||
len, | ||
copied) ; |
fifo
; to
; len
; copied
;kfifo_dma_in_prepare — setup a scatterlist for DMA input
fsfunckfifo_dma_in_prepare ( | fifo, | |
sgl, | ||
nents, | ||
len) ; |
fifo
; sgl
; nents
; len
;kfifo_dma_in_finish — finish a DMA IN operation
fsfunckfifo_dma_in_finish ( | fifo, | |
len) ; |
fifo
; len
;kfifo_dma_out_prepare — setup a scatterlist for DMA output
fsfunckfifo_dma_out_prepare ( | fifo, | |
sgl, | ||
nents, | ||
len) ; |
fifo
; sgl
; nents
; len
;fifo
address of the fifo to be used
sgl
pointer to the scatterlist array
nents
number of entries in the scatterlist array
len
number of elements to transfer
This macro fills a scatterlist for DMA output which at most len
bytes
to transfer.
It returns the number entries in the scatterlist array.
A zero means there is no space available and the scatterlist is not filled.
Note that with only one concurrent reader and one concurrent writer, you don't need extra locking to use these macros.
kfifo_dma_out_finish — finish a DMA OUT operation
fsfunckfifo_dma_out_finish ( | fifo, | |
len) ; |
fifo
; len
;Table of Contents
Relay interface support is designed to provide an efficient mechanism for tools and facilities to relay large amounts of data from kernel space to user space.
relay_buf_full — boolean, is the channel buffer full?
int fsfuncrelay_buf_full ( | buf) ; |
struct rchan_buf * buf
;relay_open — create a new relay channel
struct rchan * fsfuncrelay_open ( | base_filename, | |
parent, | ||
subbuf_size, | ||
n_subbufs, | ||
cb, | ||
private_data) ; |
const char * base_filename
;struct dentry * parent
;size_t subbuf_size
;size_t n_subbufs
;struct rchan_callbacks * cb
;void * private_data
;relay_switch_subbuf — switch to a new sub-buffer
size_t fsfuncrelay_switch_subbuf ( | buf, | |
length) ; |
struct rchan_buf * buf
;size_t length
;relay_subbufs_consumed — update the buffer's sub-buffers-consumed count
void fsfuncrelay_subbufs_consumed ( | chan, | |
cpu, | ||
subbufs_consumed) ; |
struct rchan * chan
;unsigned int cpu
;size_t subbufs_consumed
;relay_mmap_buf — mmap channel buffer to process address space
int fsfuncrelay_mmap_buf ( | buf, | |
vma) ; |
struct rchan_buf * buf
;struct vm_area_struct * vma
;relay_alloc_buf — allocate a channel buffer
void * fsfuncrelay_alloc_buf ( | buf, | |
size) ; |
struct rchan_buf * buf
;size_t * size
;relay_create_buf — allocate and initialize a channel buffer
struct rchan_buf * fsfuncrelay_create_buf ( | chan) ; |
struct rchan * chan
;relay_destroy_channel — free the channel struct
void fsfuncrelay_destroy_channel ( | kref) ; |
struct kref * kref
;relay_destroy_buf — destroy an rchan_buf struct and associated buffer
void fsfuncrelay_destroy_buf ( | buf) ; |
struct rchan_buf * buf
;relay_remove_buf — remove a channel buffer
void fsfuncrelay_remove_buf ( | kref) ; |
struct kref * kref
;relay_buf_empty — boolean, is the channel buffer empty?
int fsfuncrelay_buf_empty ( | buf) ; |
struct rchan_buf * buf
;wakeup_readers — wake up readers waiting on a channel
void fsfuncwakeup_readers ( | data) ; |
unsigned long data
;__relay_reset — reset a channel buffer
void fsfunc__relay_reset ( | buf, | |
init) ; |
struct rchan_buf * buf
;unsigned int init
;relay_close_buf — close a channel buffer
void fsfuncrelay_close_buf ( | buf) ; |
struct rchan_buf * buf
;relay_hotcpu_callback — CPU hotplug callback
int fsfuncrelay_hotcpu_callback ( | nb, | |
action, | ||
hcpu) ; |
struct notifier_block * nb
;unsigned long action
;void * hcpu
;relay_late_setup_files — triggers file creation
int fsfuncrelay_late_setup_files ( | chan, | |
base_filename, | ||
parent) ; |
struct rchan * chan
;const char * base_filename
;struct dentry * parent
;relay_file_open — open file op for relay files
int fsfuncrelay_file_open ( | inode, | |
filp) ; |
struct inode * inode
;struct file * filp
;relay_file_mmap — mmap file op for relay files
int fsfuncrelay_file_mmap ( | filp, | |
vma) ; |
struct file * filp
;struct vm_area_struct * vma
;relay_file_poll — poll file op for relay files
unsigned int fsfuncrelay_file_poll ( | filp, | |
wait) ; |
struct file * filp
;poll_table * wait
;relay_file_release — release file op for relay files
int fsfuncrelay_file_release ( | inode, | |
filp) ; |
struct inode * inode
;struct file * filp
;relay_file_read_subbuf_avail — return bytes available in sub-buffer
size_t fsfuncrelay_file_read_subbuf_avail ( | read_pos, | |
buf) ; |
size_t read_pos
;struct rchan_buf * buf
;Table of Contents
__request_module — try to load a kernel module
int fsfunc__request_module ( | wait, | |
fmt, | ||
...) ; |
bool wait
;const char * fmt
; ...
;wait
wait (or not) for the operation to complete
fmt
printf style format string for the name of the module @...: arguments as specified in the format string
...
variable arguments
Load a module using the user mode module loader. The function returns zero on success or a negative errno code on failure. Note that a successful module load does not mean the module did not then unload and exit on an error of its own. Callers must check that the service they requested is now available not blindly invoke it.
If module auto-loading support is disabled then this function becomes a no-operation.
call_usermodehelper_setup — prepare to call a usermode helper
struct subprocess_info * fsfunccall_usermodehelper_setup ( | path, | |
argv, | ||
envp, | ||
gfp_mask, | ||
init, | ||
cleanup, | ||
data) ; |
char * path
;char ** argv
;char ** envp
;gfp_t gfp_mask
;int (*init)
(
struct subprocess_info *info, struct cred *new)
;void (*cleanup)
(
struct subprocess_info *info)
;void * data
;path
path to usermode executable
argv
arg vector for process
envp
environment for process
gfp_mask
gfp mask for memory allocation
init
an init function
cleanup
a cleanup function
data
arbitrary context sensitive data
Returns either NULL
on allocation failure, or a subprocess_info
structure. This should be passed to call_usermodehelper_exec to
exec the process and free the structure.
The init function is used to customize the helper process prior to exec. A non-zero return code causes the process to error out, exit, and return the failure to the calling process
The cleanup function is just before ethe subprocess_info is about to be freed. This can be used for freeing the argv and envp. The Function must be runnable in either a process context or the context in which call_usermodehelper_exec is called.
call_usermodehelper_exec — start a usermode application
int fsfunccall_usermodehelper_exec ( | sub_info, | |
wait) ; |
struct subprocess_info * sub_info
;int wait
;call_usermodehelper — prepare and start a usermode application
int fsfunccall_usermodehelper ( | path, | |
argv, | ||
envp, | ||
wait) ; |
char * path
;char ** argv
;char ** envp
;int wait
;path
path to usermode executable
argv
arg vector for process
envp
environment for process
wait
wait for the application to finish and return status. when UMH_NO_WAIT don't wait at all, but you get no useful error back when the program couldn't be exec'ed. This makes it safe to call from interrupt context.
Table of Contents
synchronize_irq — wait for pending IRQ handlers (on other CPUs)
void fsfuncsynchronize_irq ( | irq) ; |
unsigned int irq
;irq_set_affinity_notifier — control notification of IRQ affinity changes
int fsfuncirq_set_affinity_notifier ( | irq, | |
notify) ; |
unsigned int irq
;struct irq_affinity_notify * notify
;disable_irq_nosync — disable an irq without waiting
void fsfuncdisable_irq_nosync ( | irq) ; |
unsigned int irq
;disable_irq — disable an irq and wait for completion
void fsfuncdisable_irq ( | irq) ; |
unsigned int irq
;Disable the selected interrupt line. Enables and Disables are nested. This function waits for any pending IRQ handlers for this interrupt to complete before returning. If you use this function while holding a resource the IRQ handler may need you will deadlock.
This function may be called - with care - from IRQ context.
irq_set_irq_wake — control irq power management wakeup
int fsfuncirq_set_irq_wake ( | irq, | |
on) ; |
unsigned int irq
;unsigned int on
;setup_irq — setup an interrupt
int fsfuncsetup_irq ( | irq, | |
act) ; |
unsigned int irq
;struct irqaction * act
;remove_irq — free an interrupt
void fsfuncremove_irq ( | irq, | |
act) ; |
unsigned int irq
;struct irqaction * act
;free_irq — free an interrupt allocated with request_irq
void fsfuncfree_irq ( | irq, | |
dev_id) ; |
unsigned int irq
;void * dev_id
;Remove an interrupt handler. The handler is removed and if the interrupt line is no longer in use by any driver it is disabled. On a shared IRQ the caller must ensure the interrupt is disabled on the card it drives before calling this function. The function does not return until any executing interrupts for this IRQ have completed.
This function must not be called from interrupt context.
request_threaded_irq — allocate an interrupt line
int fsfuncrequest_threaded_irq ( | irq, | |
handler, | ||
thread_fn, | ||
irqflags, | ||
devname, | ||
dev_id) ; |
unsigned int irq
;irq_handler_t handler
;irq_handler_t thread_fn
;unsigned long irqflags
;const char * devname
;void * dev_id
;irq
Interrupt line to allocate
handler
Function to be called when the IRQ occurs. Primary handler for threaded interrupts If NULL and thread_fn != NULL the default primary handler is installed
thread_fn
Function called from the irq handler thread If NULL, no irq thread is created
irqflags
Interrupt type flags
devname
An ascii name for the claiming device
dev_id
A cookie passed back to the handler function
This call allocates interrupt resources and enables the interrupt line and IRQ handling. From the point this call is made your handler function may be invoked. Since your handler function must clear any interrupt the board raises, you must take care both to initialise your hardware and to set up the interrupt handler in the right order.
If you want to set up a threaded irq handler for your device
then you need to supply handler
and thread_fn
. handler
is
still called in hard interrupt context and has to check
whether the interrupt originates from the device. If yes it
needs to disable the interrupt on the device and return
IRQ_WAKE_THREAD which will wake up the handler thread and run
thread_fn
. This split handler design is necessary to support
shared interrupts.
Dev_id must be globally unique. Normally the address of the device data structure is used as the cookie. Since the handler receives this value it makes sense to use it.
If your interrupt is shared you must pass a non NULL dev_id as this is required when freeing the interrupt.
request_any_context_irq — allocate an interrupt line
int fsfuncrequest_any_context_irq ( | irq, | |
handler, | ||
flags, | ||
name, | ||
dev_id) ; |
unsigned int irq
;irq_handler_t handler
;unsigned long flags
;const char * name
;void * dev_id
;request_resource_conflict — request and reserve an I/O or memory resource
struct resource * fsfuncrequest_resource_conflict ( | root, | |
new) ; |
struct resource * root
;struct resource * new
;reallocate_resource — allocate a slot in the resource tree given range & alignment. The resource will be relocated if the new size cannot be reallocated in the current location.
int fsfuncreallocate_resource ( | root, | |
old, | ||
newsize, | ||
constraint) ; |
struct resource * root
;struct resource * old
;resource_size_t newsize
;struct resource_constraint * constraint
;lookup_resource — find an existing resource by a resource start address
struct resource * fsfunclookup_resource ( | root, | |
start) ; |
struct resource * root
;resource_size_t start
;insert_resource_conflict — Inserts resource in the resource tree
struct resource * fsfuncinsert_resource_conflict ( | parent, | |
new) ; |
struct resource * parent
;struct resource * new
;Returns 0 on success, conflict resource if the resource can't be inserted.
This function is equivalent to request_resource_conflict when no conflict happens. If a conflict happens, and the conflicting resources entirely fit within the range of the new resource, then the new resource is inserted and the conflicting resources become children of the new resource.
insert_resource — Inserts a resource in the resource tree
int fsfuncinsert_resource ( | parent, | |
new) ; |
struct resource * parent
;struct resource * new
;insert_resource_expand_to_fit — Insert a resource into the resource tree
void fsfuncinsert_resource_expand_to_fit ( | root, | |
new) ; |
struct resource * root
;struct resource * new
;resource_alignment — calculate resource's alignment
resource_size_t fsfuncresource_alignment ( | res) ; |
struct resource * res
;release_mem_region_adjustable — release a previously reserved memory region
int fsfuncrelease_mem_region_adjustable ( | parent, | |
start, | ||
size) ; |
struct resource * parent
;resource_size_t start
;resource_size_t size
;This interface is intended for memory hot-delete. The requested region is released from a currently busy memory resource. The requested region must either match exactly or fit into a single busy resource entry. In the latter case, the remaining resource is adjusted accordingly. Existing children of the busy memory resource must be immutable in the request.
- Additional release conditions, such as overlapping region, can be supported after they are confirmed as valid cases. - When a busy memory resource gets split into two entries, the code assumes that all children remain in the lower address entry for simplicity. Enhance this logic when necessary.
request_resource — request and reserve an I/O or memory resource
int fsfuncrequest_resource ( | root, | |
new) ; |
struct resource * root
;struct resource * new
;release_resource — release a previously reserved resource
int fsfuncrelease_resource ( | old) ; |
struct resource * old
;allocate_resource — allocate empty slot in the resource tree given range & alignment. The resource will be reallocated with a new size if it was already allocated
int fsfuncallocate_resource ( | root, | |
new, | ||
size, | ||
min, | ||
max, | ||
align, | ||
alignf, | ||
alignf_data) ; |
struct resource * root
;struct resource * new
;resource_size_t size
;resource_size_t min
;resource_size_t max
;resource_size_t align
;resource_size_t (*alignf)
(
void *, const struct resource *, resource_size_t, resource_size_t)
;void * alignf_data
;root
root resource descriptor
new
resource descriptor desired by caller
size
requested resource region size
min
minimum boundary to allocate
max
maximum boundary to allocate
align
alignment requested, in bytes
alignf
alignment function, optional, called if not NULL
alignf_data
arbitrary data to pass to the alignf
function
adjust_resource — modify a resource's start and size
int fsfuncadjust_resource ( | res, | |
start, | ||
size) ; |
struct resource * res
;resource_size_t start
;resource_size_t size
;__request_region — create a new busy resource region
struct resource * fsfunc__request_region ( | parent, | |
start, | ||
n, | ||
name, | ||
flags) ; |
struct resource * parent
;resource_size_t start
;resource_size_t n
;const char * name
;int flags
;__check_region — check if a resource region is busy or free
int fsfunc__check_region ( | parent, | |
start, | ||
n) ; |
struct resource * parent
;resource_size_t start
;resource_size_t n
;mtrr_add — Add a memory type region
int fsfuncmtrr_add ( | base, | |
size, | ||
type, | ||
increment) ; |
unsigned long base
;unsigned long size
;unsigned int type
;bool increment
;base
Physical base address of region
size
Physical size of region
type
Type of MTRR desired
increment
If this is true do usage counting on the region
Memory type region registers control the caching on newer Intel and non Intel processors. This function allows drivers to request an MTRR is added. The details and hardware specifics of each processor's implementation are hidden from the caller, but nevertheless the caller should expect to need to provide a power of two size on an equivalent power of two boundary.
If the region cannot be added either because all regions are in use or the CPU cannot support it a negative value is returned. On success the register number for this entry is returned, but should be treated as a cookie only.
On a multiprocessor machine the changes are made to all processors. This is required on x86 by the Intel processors.
The available types are
MTRR_TYPE_UNCACHABLE
- No caching
MTRR_TYPE_WRBACK
- Write data back in bursts whenever
MTRR_TYPE_WRCOMB
- Write data back soon but allow bursts
MTRR_TYPE_WRTHROUGH
- Cache reads but not writes
mtrr_del — delete a memory type region
int fsfuncmtrr_del ( | reg, | |
base, | ||
size) ; |
int reg
;unsigned long base
;unsigned long size
;arch_phys_wc_add — add a WC MTRR and handle errors if PAT is unavailable
int fsfuncarch_phys_wc_add ( | base, | |
size) ; |
unsigned long base
;unsigned long size
;If PAT is available, this does nothing. If PAT is unavailable, it attempts to add a WC MTRR covering size bytes starting at base and logs an error if this fails.
Drivers must store the return value to pass to mtrr_del_wc_if_needed, but drivers should not try to interpret that return value.
pci_bus_max_busnr — returns maximum PCI bus number of given bus' children
unsigned char fsfuncpci_bus_max_busnr ( | bus) ; |
struct pci_bus * bus
;pci_find_capability — query for devices' capabilities
int fsfuncpci_find_capability ( | dev, | |
cap) ; |
struct pci_dev * dev
;int cap
;
Tell if a device supports a given PCI capability.
Returns the address of the requested capability structure within the
device's PCI configuration space or 0 in case the device does not
support it. Possible values for cap
:
PCI_CAP_ID_PM
Power Management
PCI_CAP_ID_AGP
Accelerated Graphics Port
PCI_CAP_ID_VPD
Vital Product Data
PCI_CAP_ID_SLOTID
Slot Identification
PCI_CAP_ID_MSI
Message Signalled Interrupts
PCI_CAP_ID_CHSWP
CompactPCI HotSwap
PCI_CAP_ID_PCIX
PCI-X
PCI_CAP_ID_EXP
PCI Express
pci_bus_find_capability — query for devices' capabilities
int fsfuncpci_bus_find_capability ( | bus, | |
devfn, | ||
cap) ; |
struct pci_bus * bus
;unsigned int devfn
;int cap
;pci_find_next_ext_capability — Find an extended capability
int fsfuncpci_find_next_ext_capability ( | dev, | |
start, | ||
cap) ; |
struct pci_dev * dev
;int start
;int cap
;pci_find_ext_capability — Find an extended capability
int fsfuncpci_find_ext_capability ( | dev, | |
cap) ; |
struct pci_dev * dev
;int cap
;
Returns the address of the requested extended capability structure
within the device's PCI configuration space or 0 if the device does
not support it. Possible values for cap
:
PCI_EXT_CAP_ID_ERR
Advanced Error Reporting
PCI_EXT_CAP_ID_VC
Virtual Channel
PCI_EXT_CAP_ID_DSN
Device Serial Number
PCI_EXT_CAP_ID_PWR
Power Budgeting
pci_find_next_ht_capability — query a device's Hypertransport capabilities
int fsfuncpci_find_next_ht_capability ( | dev, | |
pos, | ||
ht_cap) ; |
struct pci_dev * dev
;int pos
;int ht_cap
;pci_find_ht_capability — query a device's Hypertransport capabilities
int fsfuncpci_find_ht_capability ( | dev, | |
ht_cap) ; |
struct pci_dev * dev
;int ht_cap
;
Tell if a device supports a given Hypertransport capability.
Returns an address within the device's PCI configuration space
or 0 in case the device does not support the request capability.
The address points to the PCI capability, of type PCI_CAP_ID_HT,
which has a Hypertransport capability matching ht_cap
.
pci_find_parent_resource — return resource region of parent bus of given region
struct resource * fsfuncpci_find_parent_resource ( | dev, | |
res) ; |
const struct pci_dev * dev
;struct resource * res
;__pci_complete_power_transition — Complete power transition of a PCI device
int fsfunc__pci_complete_power_transition ( | dev, | |
state) ; |
struct pci_dev * dev
;pci_power_t state
;pci_set_power_state — Set the power state of a PCI device
int fsfuncpci_set_power_state ( | dev, | |
state) ; |
struct pci_dev * dev
;pci_power_t state
;dev
PCI device to handle.
state
PCI power state (D0, D1, D2, D3hot) to put the device into.
pci_choose_state — Choose the power state of a PCI device
pci_power_t fsfuncpci_choose_state ( | dev, | |
state) ; |
struct pci_dev * dev
;pm_message_t state
;pci_save_state — save the PCI configuration space of a device before suspending
int fsfuncpci_save_state ( | dev) ; |
struct pci_dev * dev
;pci_restore_state — Restore the saved state of a PCI device
void fsfuncpci_restore_state ( | dev) ; |
struct pci_dev * dev
;pci_store_saved_state — Allocate and return an opaque struct containing the device saved state.
struct pci_saved_state * fsfuncpci_store_saved_state ( | dev) ; |
struct pci_dev * dev
;pci_load_saved_state — Reload the provided save state into struct pci_dev.
int fsfuncpci_load_saved_state ( | dev, | |
state) ; |
struct pci_dev * dev
;struct pci_saved_state * state
;pci_load_and_free_saved_state — Reload the save state pointed to by state, and free the memory allocated for it.
int fsfuncpci_load_and_free_saved_state ( | dev, | |
state) ; |
struct pci_dev * dev
;struct pci_saved_state ** state
;pci_reenable_device — Resume abandoned device
int fsfuncpci_reenable_device ( | dev) ; |
struct pci_dev * dev
;pci_enable_device_io — Initialize a device for use with IO space
int fsfuncpci_enable_device_io ( | dev) ; |
struct pci_dev * dev
;pci_enable_device_mem — Initialize a device for use with Memory space
int fsfuncpci_enable_device_mem ( | dev) ; |
struct pci_dev * dev
;pci_enable_device — Initialize device before it's used by a driver.
int fsfuncpci_enable_device ( | dev) ; |
struct pci_dev * dev
;pcim_enable_device —
Managed pci_enable_device
int fsfuncpcim_enable_device ( | pdev) ; |
struct pci_dev * pdev
;pcim_pin_device — Pin managed PCI device
void fsfuncpcim_pin_device ( | pdev) ; |
struct pci_dev * pdev
;pci_disable_device — Disable PCI device after use
void fsfuncpci_disable_device ( | dev) ; |
struct pci_dev * dev
;pci_set_pcie_reset_state — set reset state for device dev
int fsfuncpci_set_pcie_reset_state ( | dev, | |
state) ; |
struct pci_dev * dev
;enum pcie_reset_state state
;pci_pme_capable — check the capability of PCI device to generate PME#
bool fsfuncpci_pme_capable ( | dev, | |
state) ; |
struct pci_dev * dev
;pci_power_t state
;pci_pme_active — enable or disable PCI device's PME# function
void fsfuncpci_pme_active ( | dev, | |
enable) ; |
struct pci_dev * dev
;bool enable
;__pci_enable_wake — enable PCI device as wakeup event source
int fsfunc__pci_enable_wake ( | dev, | |
state, | ||
runtime, | ||
enable) ; |
struct pci_dev * dev
;pci_power_t state
;bool runtime
;bool enable
;dev
PCI device affected
state
PCI state from which device will issue wakeup events
runtime
True if the events are to be generated at run time
enable
True to enable event generation; false to disable
pci_wake_from_d3 — enable/disable device to wake up from D3_hot or D3_cold
int fsfuncpci_wake_from_d3 ( | dev, | |
enable) ; |
struct pci_dev * dev
;bool enable
;
Many drivers want the device to wake up the system from D3_hot or D3_cold
and this function allows them to set that up cleanly - pci_enable_wake
should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
ordering constraints.
This function only returns error code if the device is not capable of generating PME# from both D3_hot and D3_cold, and the platform is unable to enable wake-up power for it.
pci_target_state — find an appropriate low power state for a given PCI dev
pci_power_t fsfuncpci_target_state ( | dev) ; |
struct pci_dev * dev
;pci_prepare_to_sleep — prepare PCI device for system-wide transition into a sleep state
int fsfuncpci_prepare_to_sleep ( | dev) ; |
struct pci_dev * dev
;pci_back_from_sleep — turn PCI device on during system-wide transition into working state
int fsfuncpci_back_from_sleep ( | dev) ; |
struct pci_dev * dev
;pci_dev_run_wake — Check if device can generate run-time wake-up events.
bool fsfuncpci_dev_run_wake ( | dev) ; |
struct pci_dev * dev
;pci_enable_ido — enable ID-based Ordering on a device
void fsfuncpci_enable_ido ( | dev, | |
type) ; |
struct pci_dev * dev
;unsigned long type
;pci_disable_ido — disable ID-based ordering on a device
void fsfuncpci_disable_ido ( | dev, | |
type) ; |
struct pci_dev * dev
;unsigned long type
;pci_enable_obff — enable optimized buffer flush/fill
int fsfuncpci_enable_obff ( | dev, | |
type) ; |
struct pci_dev * dev
;enum pci_obff_signal_type type
;
Try to enable type
OBFF signaling on dev
. It will try using WAKE#
signaling if possible, falling back to message signaling only if
WAKE# isn't supported. type
should indicate whether the PCIe link
be brought out of L0s or L1 to send the message. It should be either
PCI_EXP_OBFF_SIGNAL_ALWAYS
or PCI_OBFF_SIGNAL_L0
.
If your device can benefit from receiving all messages, even at the
power cost of bringing the link back up from a low power state, use
PCI_EXP_OBFF_SIGNAL_ALWAYS
. Otherwise, use PCI_OBFF_SIGNAL_L0
(the
preferred type).
pci_disable_obff — disable optimized buffer flush/fill
void fsfuncpci_disable_obff ( | dev) ; |
struct pci_dev * dev
;pci_enable_ltr — enable latency tolerance reporting
int fsfuncpci_enable_ltr ( | dev) ; |
struct pci_dev * dev
;pci_disable_ltr — disable latency tolerance reporting
void fsfuncpci_disable_ltr ( | dev) ; |
struct pci_dev * dev
;pci_set_ltr — set LTR latency values
int fsfuncpci_set_ltr ( | dev, | |
snoop_lat_ns, | ||
nosnoop_lat_ns) ; |
struct pci_dev * dev
;int snoop_lat_ns
;int nosnoop_lat_ns
;pci_release_region — Release a PCI bar
void fsfuncpci_release_region ( | pdev, | |
bar) ; |
struct pci_dev * pdev
;int bar
;pci_request_region — Reserve PCI I/O and memory resource
int fsfuncpci_request_region ( | pdev, | |
bar, | ||
res_name) ; |
struct pci_dev * pdev
;int bar
;const char * res_name
;pci_request_region_exclusive — Reserved PCI I/O and memory resource
int fsfuncpci_request_region_exclusive ( | pdev, | |
bar, | ||
res_name) ; |
struct pci_dev * pdev
;int bar
;const char * res_name
;pdev
PCI device whose resources are to be reserved
bar
BAR to be reserved
res_name
Name to be associated with resource.
Mark the PCI region associated with PCI device pdev
BR bar
as
being reserved by owner res_name
. Do not access any
address inside the PCI regions unless this call returns
successfully.
Returns 0 on success, or EBUSY
on error. A warning
message is also printed on failure.
The key difference that _exclusive makes it that userspace is explicitly not allowed to map the resource via /dev/mem or sysfs.
pci_release_selected_regions — Release selected PCI I/O and memory resources
void fsfuncpci_release_selected_regions ( | pdev, | |
bars) ; |
struct pci_dev * pdev
;int bars
;pci_request_selected_regions — Reserve selected PCI I/O and memory resources
int fsfuncpci_request_selected_regions ( | pdev, | |
bars, | ||
res_name) ; |
struct pci_dev * pdev
;int bars
;const char * res_name
;pci_release_regions — Release reserved PCI I/O and memory resources
void fsfuncpci_release_regions ( | pdev) ; |
struct pci_dev * pdev
;pci_request_regions — Reserved PCI I/O and memory resources
int fsfuncpci_request_regions ( | pdev, | |
res_name) ; |
struct pci_dev * pdev
;const char * res_name
;pci_request_regions_exclusive — Reserved PCI I/O and memory resources
int fsfuncpci_request_regions_exclusive ( | pdev, | |
res_name) ; |
struct pci_dev * pdev
;const char * res_name
;pdev
PCI device whose resources are to be reserved
res_name
Name to be associated with resource.
Mark all PCI regions associated with PCI device pdev
as
being reserved by owner res_name
. Do not access any
address inside the PCI regions unless this call returns
successfully.
pci_request_regions_exclusive
will mark the region so that
/dev/mem and the sysfs MMIO access will not be allowed.
Returns 0 on success, or EBUSY
on error. A warning
message is also printed on failure.
pci_set_master — enables bus-mastering for device dev
void fsfuncpci_set_master ( | dev) ; |
struct pci_dev * dev
;pci_clear_master — disables bus-mastering for device dev
void fsfuncpci_clear_master ( | dev) ; |
struct pci_dev * dev
;pci_set_cacheline_size — ensure the CACHE_LINE_SIZE register is programmed
int fsfuncpci_set_cacheline_size ( | dev) ; |
struct pci_dev * dev
;pci_set_mwi — enables memory-write-invalidate PCI transaction
int fsfuncpci_set_mwi ( | dev) ; |
struct pci_dev * dev
;pci_try_set_mwi — enables memory-write-invalidate PCI transaction
int fsfuncpci_try_set_mwi ( | dev) ; |
struct pci_dev * dev
;pci_clear_mwi — disables Memory-Write-Invalidate for device dev
void fsfuncpci_clear_mwi ( | dev) ; |
struct pci_dev * dev
;pci_intx — enables/disables PCI INTx for device dev
void fsfuncpci_intx ( | pdev, | |
enable) ; |
struct pci_dev * pdev
;int enable
;pci_intx_mask_supported — probe for INTx masking support
bool fsfuncpci_intx_mask_supported ( | dev) ; |
struct pci_dev * dev
;pci_check_and_mask_intx — mask INTx on pending interrupt
bool fsfuncpci_check_and_mask_intx ( | dev) ; |
struct pci_dev * dev
;pci_check_and_unmask_intx — unmask INTx of no interrupt is pending
bool fsfuncpci_check_and_unmask_intx ( | dev) ; |
struct pci_dev * dev
;pci_msi_off — disables any MSI or MSI-X capabilities
void fsfuncpci_msi_off ( | dev) ; |
struct pci_dev * dev
;pci_wait_for_pending_transaction — waits for pending transaction
int fsfuncpci_wait_for_pending_transaction ( | dev) ; |
struct pci_dev * dev
;pci_reset_bridge_secondary_bus — Reset the secondary bus on a PCI bridge.
void fsfuncpci_reset_bridge_secondary_bus ( | dev) ; |
struct pci_dev * dev
;__pci_reset_function — reset a PCI device function
int fsfunc__pci_reset_function ( | dev) ; |
struct pci_dev * dev
;Some devices allow an individual function to be reset without affecting other functions in the same device. The PCI device must be responsive to PCI config space in order to use this function.
The device function is presumed to be unused when this function is called. Resetting the device will make the contents of PCI configuration space random, so any caller of this must be prepared to reinitialise the device including MSI, bus mastering, BARs, decoding IO and memory spaces, etc.
Returns 0 if the device function was successfully reset or negative if the device doesn't support resetting a single function.
__pci_reset_function_locked —
reset a PCI device function while holding the dev
mutex lock.
int fsfunc__pci_reset_function_locked ( | dev) ; |
struct pci_dev * dev
;Some devices allow an individual function to be reset without affecting other functions in the same device. The PCI device must be responsive to PCI config space in order to use this function.
The device function is presumed to be unused and the caller is holding the device mutex lock when this function is called. Resetting the device will make the contents of PCI configuration space random, so any caller of this must be prepared to reinitialise the device including MSI, bus mastering, BARs, decoding IO and memory spaces, etc.
Returns 0 if the device function was successfully reset or negative if the device doesn't support resetting a single function.
pci_reset_function — quiesce and reset a PCI device function
int fsfuncpci_reset_function ( | dev) ; |
struct pci_dev * dev
;Some devices allow an individual function to be reset without affecting other functions in the same device. The PCI device must be responsive to PCI config space in order to use this function.
This function does not just reset the PCI portion of a device, but clears all the state associated with the device. This function differs from __pci_reset_function in that it saves and restores device state over the reset.
Returns 0 if the device function was successfully reset or negative if the device doesn't support resetting a single function.
pci_probe_reset_slot — probe whether a PCI slot can be reset
int fsfuncpci_probe_reset_slot ( | slot) ; |
struct pci_slot * slot
;pci_reset_slot — reset a PCI slot
int fsfuncpci_reset_slot ( | slot) ; |
struct pci_slot * slot
;A PCI bus may host multiple slots, each slot may support a reset mechanism independent of other slots. For instance, some slots may support slot power control. In the case of a 1:1 bus to slot architecture, this function may wrap the bus reset to avoid spurious slot related events such as hotplug. Generally a slot reset should be attempted before a bus reset. All of the function of the slot and any subordinate buses behind the slot are reset through this function. PCI config space of all devices in the slot and behind the slot is saved before and restored after reset.
Return 0 on success, non-zero on error.
pci_probe_reset_bus — probe whether a PCI bus can be reset
int fsfuncpci_probe_reset_bus ( | bus) ; |
struct pci_bus * bus
;pcix_get_max_mmrbc — get PCI-X maximum designed memory read byte count
int fsfuncpcix_get_max_mmrbc ( | dev) ; |
struct pci_dev * dev
;pcix_get_mmrbc — get PCI-X maximum memory read byte count
int fsfuncpcix_get_mmrbc ( | dev) ; |
struct pci_dev * dev
;pcix_set_mmrbc — set PCI-X maximum memory read byte count
int fsfuncpcix_set_mmrbc ( | dev, | |
mmrbc) ; |
struct pci_dev * dev
;int mmrbc
;pcie_get_readrq — get PCI Express read request size
int fsfuncpcie_get_readrq ( | dev) ; |
struct pci_dev * dev
;pcie_set_readrq — set PCI Express maximum memory read request
int fsfuncpcie_set_readrq ( | dev, | |
rq) ; |
struct pci_dev * dev
;int rq
;pcie_get_minimum_link — determine minimum link settings of a PCI device
int fsfuncpcie_get_minimum_link ( | dev, | |
speed, | ||
width) ; |
struct pci_dev * dev
;enum pci_bus_speed * speed
;enum pcie_link_width * width
;pci_select_bars — Make BAR mask from the type of resource
int fsfuncpci_select_bars ( | dev, | |
flags) ; |
struct pci_dev * dev
;unsigned long flags
;pci_add_dynid — add a new PCI device ID to this driver and re-probe devices
int fsfuncpci_add_dynid ( | drv, | |
vendor, | ||
device, | ||
subvendor, | ||
subdevice, | ||
class, | ||
class_mask, | ||
driver_data) ; |
struct pci_driver * drv
;unsigned int vendor
;unsigned int device
;unsigned int subvendor
;unsigned int subdevice
;unsigned int class
;unsigned int class_mask
;unsigned long driver_data
;drv
target pci driver
vendor
PCI vendor ID
device
PCI device ID
subvendor
PCI subvendor ID
subdevice
PCI subdevice ID
class
PCI class
class_mask
PCI class mask
driver_data
private driver data
pci_match_id — See if a pci device matches a given pci_id table
const struct pci_device_id * fsfuncpci_match_id ( | ids, | |
dev) ; |
const struct pci_device_id * ids
;struct pci_dev * dev
;__pci_register_driver — register a new pci driver
int fsfunc__pci_register_driver ( | drv, | |
owner, | ||
mod_name) ; |
struct pci_driver * drv
;struct module * owner
;const char * mod_name
;pci_unregister_driver — unregister a pci driver
void fsfuncpci_unregister_driver ( | drv) ; |
struct pci_driver * drv
;pci_dev_driver — get the pci_driver of a device
struct pci_driver * fsfuncpci_dev_driver ( | dev) ; |
const struct pci_dev * dev
;pci_dev_get — increments the reference count of the pci device structure
struct pci_dev * fsfuncpci_dev_get ( | dev) ; |
struct pci_dev * dev
;Each live reference to a device should be refcounted.
Drivers for PCI devices should normally record such references in
their probe
methods, when they bind to a device, and release
them by calling pci_dev_put
, in their disconnect
methods.
A pointer to the device with the incremented reference counter is returned.
pci_dev_put — release a use of the pci device structure
void fsfuncpci_dev_put ( | dev) ; |
struct pci_dev * dev
;pci_stop_and_remove_bus_device — remove a PCI device and any children
void fsfuncpci_stop_and_remove_bus_device ( | dev) ; |
struct pci_dev * dev
;Remove a PCI device from the device lists, informing the drivers that the device has been removed. We also remove any subordinate buses and children in a depth-first manner.
For each device we remove, delete the device structure from the device lists, remove the /proc entry, and notify userspace (/sbin/hotplug).
pci_find_bus — locate PCI bus from a given domain and bus number
struct pci_bus * fsfuncpci_find_bus ( | domain, | |
busnr) ; |
int domain
;int busnr
;pci_find_next_bus — begin or continue searching for a PCI bus
struct pci_bus * fsfuncpci_find_next_bus ( | from) ; |
const struct pci_bus * from
;pci_get_slot — locate PCI device for a given PCI slot
struct pci_dev * fsfuncpci_get_slot ( | bus, | |
devfn) ; |
struct pci_bus * bus
;unsigned int devfn
;bus
PCI bus on which desired PCI device resides
devfn
encodes number of PCI slot in which the desired PCI device resides and the logical device number within that slot in case of multi-function devices.
Given a PCI bus and slot/function number, the desired PCI device
is located in the list of PCI devices.
If the device is found, its reference count is increased and this
function returns a pointer to its data structure. The caller must
decrement the reference count by calling pci_dev_put
.
If no device is found, NULL
is returned.
pci_get_domain_bus_and_slot — locate PCI device for a given PCI domain (segment), bus, and slot
struct pci_dev * fsfuncpci_get_domain_bus_and_slot ( | domain, | |
bus, | ||
devfn) ; |
int domain
;unsigned int bus
;unsigned int devfn
;domain
PCI domain/segment on which the PCI device resides.
bus
PCI bus on which desired PCI device resides
devfn
encodes number of PCI slot in which the desired PCI device resides and the logical device number within that slot in case of multi-function devices.
Given a PCI domain, bus, and slot/function number, the desired PCI
device is located in the list of PCI devices. If the device is
found, its reference count is increased and this function returns a
pointer to its data structure. The caller must decrement the
reference count by calling pci_dev_put
. If no device is found,
NULL
is returned.
pci_get_subsys — begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id
struct pci_dev * fsfuncpci_get_subsys ( | vendor, | |
device, | ||
ss_vendor, | ||
ss_device, | ||
from) ; |
unsigned int vendor
;unsigned int device
;unsigned int ss_vendor
;unsigned int ss_device
;struct pci_dev * from
;vendor
PCI vendor id to match, or PCI_ANY_ID
to match all vendor ids
device
PCI device id to match, or PCI_ANY_ID
to match all device ids
ss_vendor
PCI subsystem vendor id to match, or PCI_ANY_ID
to match all vendor ids
ss_device
PCI subsystem device id to match, or PCI_ANY_ID
to match all device ids
from
Previous PCI device found in search, or NULL
for new search.
Iterates through the list of known PCI devices. If a PCI device is found
with a matching vendor
, device
, ss_vendor
and ss_device
, a pointer to its
device structure is returned, and the reference count to the device is
incremented. Otherwise, NULL
is returned. A new search is initiated by
passing NULL
as the from
argument. Otherwise if from
is not NULL
,
searches continue from next device on the global list.
The reference count for from
is always decremented if it is not NULL
.
pci_get_device — begin or continue searching for a PCI device by vendor/device id
struct pci_dev * fsfuncpci_get_device ( | vendor, | |
device, | ||
from) ; |
unsigned int vendor
;unsigned int device
;struct pci_dev * from
;vendor
PCI vendor id to match, or PCI_ANY_ID
to match all vendor ids
device
PCI device id to match, or PCI_ANY_ID
to match all device ids
from
Previous PCI device found in search, or NULL
for new search.
Iterates through the list of known PCI devices. If a PCI device is
found with a matching vendor
and device
, the reference count to the
device is incremented and a pointer to its device structure is returned.
Otherwise, NULL
is returned. A new search is initiated by passing NULL
as the from
argument. Otherwise if from
is not NULL
, searches continue
from next device on the global list. The reference count for from
is
always decremented if it is not NULL
.
pci_get_class — begin or continue searching for a PCI device by class
struct pci_dev * fsfuncpci_get_class ( | class, | |
from) ; |
unsigned int class
;struct pci_dev * from
;class
search for a PCI device with this class designation
from
Previous PCI device found in search, or NULL
for new search.
Iterates through the list of known PCI devices. If a PCI device is
found with a matching class
, the reference count to the device is
incremented and a pointer to its device structure is returned.
Otherwise, NULL
is returned.
A new search is initiated by passing NULL
as the from
argument.
Otherwise if from
is not NULL
, searches continue from next device
on the global list. The reference count for from
is always decremented
if it is not NULL
.
pci_dev_present — Returns 1 if device matching the device list is present, 0 if not.
int fsfuncpci_dev_present ( | ids) ; |
const struct pci_device_id * ids
;ids
A pointer to a null terminated list of struct pci_device_id structures that describe the type of PCI device the caller is trying to find.
You do not have a reference to any device that might be found by this function, so if that device is removed from the system right after this function is finished, the value will be stale. Use this function to find devices that are usually built into a system, or for a general hint as to if another device happens to be present at this specific moment in time.
pci_enable_msi_block — configure device's MSI capability structure
int fsfuncpci_enable_msi_block ( | dev, | |
nvec) ; |
struct pci_dev * dev
;unsigned int nvec
;
Allocate IRQs for a device with the MSI capability.
This function returns a negative errno if an error occurs. If it
is unable to allocate the number of interrupts requested, it returns
the number of interrupts it might be able to allocate. If it successfully
allocates at least the number of interrupts requested, it returns 0 and
updates the dev
's irq member to the lowest new interrupt number; the
other interrupt numbers allocated to this device are consecutive.
pci_enable_msix — configure device's MSI-X capability structure
int fsfuncpci_enable_msix ( | dev, | |
entries, | ||
nvec) ; |
struct pci_dev * dev
;struct msix_entry * entries
;int nvec
;dev
pointer to the pci_dev data structure of MSI-X device function
entries
pointer to an array of MSI-X entries
nvec
number of MSI-X irqs requested for allocation by device driver
Setup the MSI-X capability structure of device function with the number of requested irqs upon its software driver call to request for MSI-X mode enabled on its hardware device function. A return of zero indicates the successful configuration of MSI-X capability structure with new allocated MSI-X irqs. A return of < 0 indicates a failure. Or a return of > 0 indicates that driver request is exceeding the number of irqs or MSI-X vectors available. Driver should use the returned value to re-send its request.
pci_bus_alloc_resource — allocate a resource from a parent bus
int fsfuncpci_bus_alloc_resource ( | bus, | |
res, | ||
size, | ||
align, | ||
min, | ||
type_mask, | ||
alignf, | ||
alignf_data) ; |
struct pci_bus * bus
;struct resource * res
;resource_size_t size
;resource_size_t align
;resource_size_t min
;unsigned int type_mask
;resource_size_t (*alignf)
(
void *, const struct resource *, resource_size_t, resource_size_t)
;void * alignf_data
;pci_bus_add_device — start driver for a single device
int fsfuncpci_bus_add_device ( | dev) ; |
struct pci_dev * dev
;pci_bus_add_devices — start driver for PCI devices
void fsfuncpci_bus_add_devices ( | bus) ; |
const struct pci_bus * bus
;pci_bus_set_ops — Set raw operations of pci bus
struct pci_ops * fsfuncpci_bus_set_ops ( | bus, | |
ops) ; |
struct pci_bus * bus
;struct pci_ops * ops
;pci_read_vpd — Read one entry from Vital Product Data
ssize_t fsfuncpci_read_vpd ( | dev, | |
pos, | ||
count, | ||
buf) ; |
struct pci_dev * dev
;loff_t pos
;size_t count
;void * buf
;pci_write_vpd — Write entry to Vital Product Data
ssize_t fsfuncpci_write_vpd ( | dev, | |
pos, | ||
count, | ||
buf) ; |
struct pci_dev * dev
;loff_t pos
;size_t count
;const void * buf
;pci_vpd_truncate — Set available Vital Product Data size
int fsfuncpci_vpd_truncate ( | dev, | |
size) ; |
struct pci_dev * dev
;size_t size
;pci_cfg_access_lock — Lock PCI config reads/writes
void fsfuncpci_cfg_access_lock ( | dev) ; |
struct pci_dev * dev
;pci_cfg_access_trylock — try to lock PCI config reads/writes
bool fsfuncpci_cfg_access_trylock ( | dev) ; |
struct pci_dev * dev
;pci_cfg_access_unlock — Unlock PCI config reads/writes
void fsfuncpci_cfg_access_unlock ( | dev) ; |
struct pci_dev * dev
;pci_lost_interrupt — reports a lost PCI interrupt
enum pci_lost_interrupt_reason fsfuncpci_lost_interrupt ( | pdev) ; |
struct pci_dev * pdev
;__ht_create_irq — create an irq and attach it to a device.
int fsfunc__ht_create_irq ( | dev, | |
idx, | ||
update) ; |
struct pci_dev * dev
;int idx
;ht_irq_update_t * update
;ht_create_irq — create an irq and attach it to a device.
int fsfuncht_create_irq ( | dev, | |
idx) ; |
struct pci_dev * dev
;int idx
;ht_destroy_irq — destroy an irq created with ht_create_irq
void fsfuncht_destroy_irq ( | irq) ; |
unsigned int irq
;pci_scan_slot — scan a PCI slot on a bus for devices.
int fsfuncpci_scan_slot ( | bus, | |
devfn) ; |
struct pci_bus * bus
;int devfn
;pci_rescan_bus — scan a PCI bus for devices.
unsigned int __ref fsfuncpci_rescan_bus ( | bus) ; |
struct pci_bus * bus
;pci_create_slot — create or increment refcount for physical PCI slot
struct pci_slot * fsfuncpci_create_slot ( | parent, | |
slot_nr, | ||
name, | ||
hotplug) ; |
struct pci_bus * parent
;int slot_nr
;const char * name
;struct hotplug_slot * hotplug
;parent
struct pci_bus of parent bridge
slot_nr
PCI_SLOT(pci_dev->devfn) or -1 for placeholder
name
user visible string presented in /sys/bus/pci/slots/<name>
hotplug
set if caller is hotplug driver, NULL otherwise
PCI slots have first class attributes such as address, speed, width, and a struct pci_slot is used to manage them. This interface will either return a new struct pci_slot to the caller, or if the pci_slot already exists, its refcount will be incremented.
Slots are uniquely identified by a pci_bus
, slot_nr
tuple.
There are known platforms with broken firmware that assign the same name to multiple slots. Workaround these broken platforms by renaming the slots on behalf of the caller. If firmware assigns name N to
The first slot is assigned N The second slot is assigned N-1 The third slot is assigned N-2 etc.
In most cases, pci_bus
, slot_nr
will be sufficient to uniquely identify
a slot. There is one notable exception - pSeries (rpaphp), where the
slot_nr
cannot be determined until a device is actually inserted into
the slot. In this scenario, the caller may pass -1 for slot_nr
.
The following semantics are imposed when the caller passes slot_nr
==
-1. First, we no longer check for an existing struct
pci_slot, as there
may be many slots with slot_nr
of -1. The other change in semantics is
user-visible, which is the 'address' parameter presented in sysfs will
pci_renumber_slot —
update struct
pci_slot -> number
void fsfuncpci_renumber_slot ( | slot, | |
slot_nr) ; |
struct pci_slot * slot
;int slot_nr
;pci_destroy_slot — decrement refcount for physical PCI slot
void fsfuncpci_destroy_slot ( | slot) ; |
struct pci_slot * slot
;pci_hp_create_module_link — create symbolic link to the hotplug driver module.
void fsfuncpci_hp_create_module_link ( | pci_slot) ; |
struct pci_slot * pci_slot
;pci_hp_remove_module_link — remove symbolic link to the hotplug driver module.
void fsfuncpci_hp_remove_module_link ( | pci_slot) ; |
struct pci_slot * pci_slot
;pci_enable_rom — enable ROM decoding for a PCI device
int fsfuncpci_enable_rom ( | pdev) ; |
struct pci_dev * pdev
;pci_disable_rom — disable ROM decoding for a PCI device
void fsfuncpci_disable_rom ( | pdev) ; |
struct pci_dev * pdev
;pci_map_rom — map a PCI ROM to kernel space
void __iomem * fsfuncpci_map_rom ( | pdev, | |
size) ; |
struct pci_dev * pdev
;size_t * size
;pci_unmap_rom — unmap the ROM from kernel space
void fsfuncpci_unmap_rom ( | pdev, | |
rom) ; |
struct pci_dev * pdev
;void __iomem * rom
;pci_platform_rom — provides a pointer to any ROM image provided by the platform
void __iomem * fsfuncpci_platform_rom ( | pdev, | |
size) ; |
struct pci_dev * pdev
;size_t * size
;pci_enable_sriov — enable the SR-IOV capability
int fsfuncpci_enable_sriov ( | dev, | |
nr_virtfn) ; |
struct pci_dev * dev
;int nr_virtfn
;pci_disable_sriov — disable the SR-IOV capability
void fsfuncpci_disable_sriov ( | dev) ; |
struct pci_dev * dev
;pci_sriov_migration — notify SR-IOV core of Virtual Function Migration
irqreturn_t fsfuncpci_sriov_migration ( | dev) ; |
struct pci_dev * dev
;pci_num_vf — return number of VFs associated with a PF device_release_driver
int fsfuncpci_num_vf ( | dev) ; |
struct pci_dev * dev
;pci_vfs_assigned — returns number of VFs are assigned to a guest
int fsfuncpci_vfs_assigned ( | dev) ; |
struct pci_dev * dev
;pci_sriov_set_totalvfs — - reduce the TotalVFs available
int fsfuncpci_sriov_set_totalvfs ( | dev, | |
numvfs) ; |
struct pci_dev * dev
;u16 numvfs
;pci_sriov_get_totalvfs — - get total VFs supported on this device
int fsfuncpci_sriov_get_totalvfs ( | dev) ; |
struct pci_dev * dev
;pci_read_legacy_io — read byte(s) from legacy I/O port space
ssize_t fsfuncpci_read_legacy_io ( | filp, | |
kobj, | ||
bin_attr, | ||
buf, | ||
off, | ||
count) ; |
struct file * filp
;struct kobject * kobj
;struct bin_attribute * bin_attr
;char * buf
;loff_t off
;size_t count
;pci_write_legacy_io — write byte(s) to legacy I/O port space
ssize_t fsfuncpci_write_legacy_io ( | filp, | |
kobj, | ||
bin_attr, | ||
buf, | ||
off, | ||
count) ; |
struct file * filp
;struct kobject * kobj
;struct bin_attribute * bin_attr
;char * buf
;loff_t off
;size_t count
;pci_mmap_legacy_mem — map legacy PCI memory into user memory space
int fsfuncpci_mmap_legacy_mem ( | filp, | |
kobj, | ||
attr, | ||
vma) ; |
struct file * filp
;struct kobject * kobj
;struct bin_attribute * attr
;struct vm_area_struct * vma
;pci_mmap_legacy_io — map legacy PCI IO into user memory space
int fsfuncpci_mmap_legacy_io ( | filp, | |
kobj, | ||
attr, | ||
vma) ; |
struct file * filp
;struct kobject * kobj
;struct bin_attribute * attr
;struct vm_area_struct * vma
;pci_adjust_legacy_attr — adjustment of legacy file attributes
void fsfuncpci_adjust_legacy_attr ( | b, | |
mmap_type) ; |
struct pci_bus * b
;enum pci_mmap_state mmap_type
;pci_create_legacy_files — create legacy I/O port and memory files
void fsfuncpci_create_legacy_files ( | b) ; |
struct pci_bus * b
;Some platforms allow access to legacy I/O port and ISA memory space on a per-bus basis. This routine creates the files and ties them into their associated read, write and mmap files from pci-sysfs.c
On error unwind, but don't propagate the error to the caller as it is ok to set up the PCI bus without these files.
pci_mmap_resource — map a PCI resource into user memory space
int fsfuncpci_mmap_resource ( | kobj, | |
attr, | ||
vma, | ||
write_combine) ; |
struct kobject * kobj
;struct bin_attribute * attr
;struct vm_area_struct * vma
;int write_combine
;pci_remove_resource_files — cleanup resource files
void fsfuncpci_remove_resource_files ( | pdev) ; |
struct pci_dev * pdev
;pci_create_resource_files —
create resource files in sysfs for dev
int fsfuncpci_create_resource_files ( | pdev) ; |
struct pci_dev * pdev
;pci_write_rom — used to enable access to the PCI ROM display
ssize_t fsfuncpci_write_rom ( | filp, | |
kobj, | ||
bin_attr, | ||
buf, | ||
off, | ||
count) ; |
struct file * filp
;struct kobject * kobj
;struct bin_attribute * bin_attr
;char * buf
;loff_t off
;size_t count
;pci_read_rom — read a PCI ROM
ssize_t fsfuncpci_read_rom ( | filp, | |
kobj, | ||
bin_attr, | ||
buf, | ||
off, | ||
count) ; |
struct file * filp
;struct kobject * kobj
;struct bin_attribute * bin_attr
;char * buf
;loff_t off
;size_t count
;__pci_hp_register — register a hotplug_slot with the PCI hotplug subsystem
int fsfunc__pci_hp_register ( | slot, | |
bus, | ||
devnr, | ||
name, | ||
owner, | ||
mod_name) ; |
struct hotplug_slot * slot
;struct pci_bus * bus
;int devnr
;const char * name
;struct module * owner
;const char * mod_name
;pci_hp_deregister — deregister a hotplug_slot with the PCI hotplug subsystem
int fsfuncpci_hp_deregister ( | hotplug) ; |
struct hotplug_slot * hotplug
;pci_hp_change_slot_info — changes the slot's information structure in the core
int fsfuncpci_hp_change_slot_info ( | hotplug, | |
info) ; |
struct hotplug_slot * hotplug
;struct hotplug_slot_info * info
;Table of Contents
dmi_check_system — check system DMI data
int fsfuncdmi_check_system ( | list) ; |
const struct dmi_system_id * list
;dmi_first_match — find dmi_system_id structure matching system DMI data
const struct dmi_system_id * fsfuncdmi_first_match ( | list) ; |
const struct dmi_system_id * list
;dmi_get_system_info — return DMI data value
const char * fsfuncdmi_get_system_info ( | field) ; |
int field
;dmi_name_in_vendors — Check if string is in the DMI system or board vendor name
int fsfuncdmi_name_in_vendors ( | str) ; |
const char * str
;dmi_find_device — find onboard device by type/name
const struct dmi_device * fsfuncdmi_find_device ( | type, | |
name, | ||
from) ; |
int type
;const char * name
;const struct dmi_device * from
;type
device type or DMI_DEV_TYPE_ANY
to match all device types
name
device name string or NULL
to match all
from
previous device found in search, or NULL
for new search.
Iterates through the list of known onboard devices. If a device is
found with a matching vendor
and device
, a pointer to its device
structure is returned. Otherwise, NULL
is returned.
A new search is initiated by passing NULL
as the from
argument.
If from
is not NULL
, searches continue from next device.
dmi_get_date — parse a DMI date
bool fsfuncdmi_get_date ( | field, | |
yearp, | ||
monthp, | ||
dayp) ; |
int field
;int * yearp
;int * monthp
;int * dayp
;field
data index (see enum dmi_field)
yearp
optional out parameter for the year
monthp
optional out parameter for the month
dayp
optional out parameter for the day
The date field is assumed to be in the form resembling [mm[/dd]]/yy[yy] and the result is stored in the out parameters any or all of which can be omitted.
If the field doesn't exist, all out parameters are set to zero and false is returned. Otherwise, true is returned with any invalid part of date set to zero.
On return, year, month and day are guaranteed to be in the range of [0,9999], [0,12] and [0,31] respectively.
edd_show_raw_data — copies raw data to buffer for userspace to parse
ssize_t fsfuncedd_show_raw_data ( | edev, | |
buf) ; |
struct edd_device * edev
;char * buf
;edd_dev_is_type — is this EDD device a 'type' device?
int fsfuncedd_dev_is_type ( | edev, | |
type) ; |
struct edd_device * edev
;const char * type
;Table of Contents
security_init — initializes the security framework
int fsfuncsecurity_init ( | void) ; |
void
;security_module_enable — Load given security module on boot ?
int fsfuncsecurity_module_enable ( | ops) ; |
struct security_operations * ops
;register_security — registers a security framework with the kernel
int fsfuncregister_security ( | ops) ; |
struct security_operations * ops
;
This function allows a security module to register itself with the
kernel security subsystem. Some rudimentary checking is done on the ops
value passed to this function. You'll need to check first if your LSM
is allowed to register its ops
by calling security_module_enable(ops
).
If there is already a security module registered with the kernel,
an error will be returned. Otherwise 0
is returned on success.
securityfs_create_file — create a file in the securityfs filesystem
struct dentry * fsfuncsecurityfs_create_file ( | name, | |
mode, | ||
parent, | ||
data, | ||
fops) ; |
const char * name
;umode_t mode
;struct dentry * parent
;void * data
;const struct file_operations * fops
;name
a pointer to a string containing the name of the file to create.
mode
the permission that the file should have
parent
a pointer to the parent dentry for this file. This should be a
directory dentry if set. If this parameter is NULL
, then the
file will be created in the root of the securityfs filesystem.
data
a pointer to something that the caller will want to get to later
on. The inode.i_private pointer will point to this value on
the open
call.
fops
a pointer to a struct file_operations that should be used for this file.
This is the basic “create a file” function for securityfs. It allows for a
wide range of flexibility in creating a file, or a directory (if you
want to create a directory, the securityfs_create_dir
function is
recommended to be used instead).
This function returns a pointer to a dentry if it succeeds. This
pointer must be passed to the securityfs_remove
function when the file is
to be removed (no automatic cleanup happens if your module is unloaded,
you are responsible here). If an error occurs, the function will return
the erorr value (via ERR_PTR).
If securityfs is not enabled in the kernel, the value -ENODEV
is
returned.
securityfs_create_dir — create a directory in the securityfs filesystem
struct dentry * fsfuncsecurityfs_create_dir ( | name, | |
parent) ; |
const char * name
;struct dentry * parent
;name
a pointer to a string containing the name of the directory to create.
parent
a pointer to the parent dentry for this file. This should be a
directory dentry if set. If this parameter is NULL
, then the
directory will be created in the root of the securityfs filesystem.
This function creates a directory in securityfs with the given name
.
This function returns a pointer to a dentry if it succeeds. This
pointer must be passed to the securityfs_remove
function when the file is
to be removed (no automatic cleanup happens if your module is unloaded,
you are responsible here). If an error occurs, NULL
will be returned.
If securityfs is not enabled in the kernel, the value -ENODEV
is
returned. It is not wise to check for this value, but rather, check for
NULL
or !NULL
instead as to eliminate the need for #ifdef in the calling
code.
securityfs_remove — removes a file or directory from the securityfs filesystem
void fsfuncsecurityfs_remove ( | dentry) ; |
struct dentry * dentry
;
This function removes a file or directory in securityfs that was previously
created with a call to another securityfs function (like
securityfs_create_file
or variants thereof.)
This function is required to be called in order for the file to be removed. No automatic cleanup of files will happen when a module is removed; you are responsible here.
Table of Contents
audit_log_start — obtain an audit buffer
struct audit_buffer * fsfuncaudit_log_start ( | ctx, | |
gfp_mask, | ||
type) ; |
struct audit_context * ctx
;gfp_t gfp_mask
;int type
;Returns audit_buffer pointer on success or NULL on error.
Obtain an audit buffer. This routine does locking to obtain the audit buffer, but then no locking is required for calls to audit_log_*format. If the task (ctx) is a task that is currently in a syscall, then the syscall is marked as auditable and an audit record will be written at syscall exit. If there is no associated task, then task context (ctx) should be NULL.
audit_log_format — format a message into the audit buffer.
void fsfuncaudit_log_format ( | ab, | |
fmt, | ||
...) ; |
struct audit_buffer * ab
;const char * fmt
; ...
;audit_log_end — end one audit record
void fsfuncaudit_log_end ( | ab) ; |
struct audit_buffer * ab
;audit_log — Log an audit record
void fsfuncaudit_log ( | ctx, | |
gfp_mask, | ||
type, | ||
fmt, | ||
...) ; |
struct audit_context * ctx
;gfp_t gfp_mask
;int type
;const char * fmt
; ...
;audit_log_secctx — Converts and logs SELinux context
void fsfuncaudit_log_secctx ( | ab, | |
secid) ; |
struct audit_buffer * ab
;u32 secid
;This is a helper function that calls security_secid_to_secctx to convert secid to secctx and then adds the (converted) SELinux context to the audit log by calling audit_log_format, thus also preventing leak of internal secid to userspace. If secid cannot be converted audit_panic is called.
audit_alloc — allocate an audit context block for a task
int fsfuncaudit_alloc ( | tsk) ; |
struct task_struct * tsk
;__audit_free — free a per-task audit context
void fsfunc__audit_free ( | tsk) ; |
struct task_struct * tsk
;__audit_syscall_entry — fill in an audit record at syscall entry
void fsfunc__audit_syscall_entry ( | arch, | |
major, | ||
a1, | ||
a2, | ||
a3, | ||
a4) ; |
int arch
;int major
;unsigned long a1
;unsigned long a2
;unsigned long a3
;unsigned long a4
;arch
architecture type
major
major syscall type (function)
a1
additional syscall register 1
a2
additional syscall register 2
a3
additional syscall register 3
a4
additional syscall register 4
Fill in audit context at syscall entry. This only happens if the audit context was created when the task was created and the state or filters demand the audit context be built. If the state from the per-task filter or from the per-syscall filter is AUDIT_RECORD_CONTEXT, then the record will be written at syscall exit time (otherwise, it will only be written if another part of the kernel requests that it be written).
__audit_syscall_exit — deallocate audit context after a system call
void fsfunc__audit_syscall_exit ( | success, | |
return_code) ; |
int success
;long return_code
;
Tear down after system call. If the audit context has been marked as
auditable (either because of the AUDIT_RECORD_CONTEXT state from
filtering, or because some other part of the kernel wrote an audit
message), then write out the syscall information. In call cases,
free the names stored from getname
.
__audit_reusename — fill out filename with info from existing entry
struct filename * fsfunc__audit_reusename ( | uptr) ; |
const __user char * uptr
;__audit_getname — add a name to the list
void fsfunc__audit_getname ( | name) ; |
struct filename * name
;__audit_inode — store the inode and device from a lookup
void fsfunc__audit_inode ( | name, | |
dentry, | ||
flags) ; |
struct filename * name
;const struct dentry * dentry
;unsigned int flags
;auditsc_get_stamp — get local copies of audit_context values
int fsfuncauditsc_get_stamp ( | ctx, | |
t, | ||
serial) ; |
struct audit_context * ctx
;struct timespec * t
;unsigned int * serial
;audit_set_loginuid — set current task's audit_context loginuid
int fsfuncaudit_set_loginuid ( | loginuid) ; |
kuid_t loginuid
;__audit_mq_open — record audit data for a POSIX MQ open
void fsfunc__audit_mq_open ( | oflag, | |
mode, | ||
attr) ; |
int oflag
;umode_t mode
;struct mq_attr * attr
;__audit_mq_sendrecv — record audit data for a POSIX MQ timed send/receive
void fsfunc__audit_mq_sendrecv ( | mqdes, | |
msg_len, | ||
msg_prio, | ||
abs_timeout) ; |
mqd_t mqdes
;size_t msg_len
;unsigned int msg_prio
;const struct timespec * abs_timeout
;__audit_mq_notify — record audit data for a POSIX MQ notify
void fsfunc__audit_mq_notify ( | mqdes, | |
notification) ; |
mqd_t mqdes
;const struct sigevent * notification
;__audit_mq_getsetattr — record audit data for a POSIX MQ get/set attribute
void fsfunc__audit_mq_getsetattr ( | mqdes, | |
mqstat) ; |
mqd_t mqdes
;struct mq_attr * mqstat
;__audit_ipc_obj — record audit data for ipc object
void fsfunc__audit_ipc_obj ( | ipcp) ; |
struct kern_ipc_perm * ipcp
;__audit_ipc_set_perm — record audit data for new ipc permissions
void fsfunc__audit_ipc_set_perm ( | qbytes, | |
uid, | ||
gid, | ||
mode) ; |
unsigned long qbytes
;uid_t uid
;gid_t gid
;umode_t mode
;__audit_socketcall — record audit data for sys_socketcall
int fsfunc__audit_socketcall ( | nargs, | |
args) ; |
int nargs
;unsigned long * args
;__audit_fd_pair — record audit data for pipe and socketpair
void fsfunc__audit_fd_pair ( | fd1, | |
fd2) ; |
int fd1
;int fd2
;__audit_sockaddr — record audit data for sys_bind, sys_connect, sys_sendto
int fsfunc__audit_sockaddr ( | len, | |
a) ; |
int len
;void * a
;__audit_signal_info — record signal info for shutting down audit subsystem
int fsfunc__audit_signal_info ( | sig, | |
t) ; |
int sig
;struct task_struct * t
;__audit_log_bprm_fcaps — store information about a loading bprm and relevant fcaps
int fsfunc__audit_log_bprm_fcaps ( | bprm, | |
new, | ||
old) ; |
struct linux_binprm * bprm
;const struct cred * new
;const struct cred * old
;__audit_log_capset — store information about the arguments to the capset syscall
void fsfunc__audit_log_capset ( | pid, | |
new, | ||
old) ; |
pid_t pid
;const struct cred * new
;const struct cred * old
;audit_core_dumps — record information about processes that end abnormally
void fsfuncaudit_core_dumps ( | signr) ; |
long signr
;audit_receive_filter — apply all rules to the specified message type
int fsfuncaudit_receive_filter ( | type, | |
pid, | ||
seq, | ||
data, | ||
datasz) ; |
int type
;int pid
;int seq
;void * data
;size_t datasz
;parent_len — find the length of the parent portion of a pathname
int fsfuncparent_len ( | path) ; |
const char * path
;Table of Contents
sys_acct — enable/disable process accounting
long fsfuncsys_acct ( | name) ; |
const char __user * name
;acct_auto_close_mnt — turn off a filesystem's accounting if it is on
void fsfuncacct_auto_close_mnt ( | m) ; |
struct vfsmount * m
;acct_auto_close — turn off a filesystem's accounting if it is on
void fsfuncacct_auto_close ( | sb) ; |
struct super_block * sb
;Table of Contents
current
's plugged list
bdget
by gendisk and partition number
blk_get_backing_dev_info — get the address of a queue's backing_dev_info
struct backing_dev_info * fsfuncblk_get_backing_dev_info ( | bdev) ; |
struct block_device * bdev
;blk_delay_queue — restart queueing after defined interval
void fsfuncblk_delay_queue ( | q, | |
msecs) ; |
struct request_queue * q
;unsigned long msecs
;blk_start_queue — restart a previously stopped queue
void fsfuncblk_start_queue ( | q) ; |
struct request_queue * q
;blk_stop_queue — stop a queue
void fsfuncblk_stop_queue ( | q) ; |
struct request_queue * q
;
The Linux block layer assumes that a block driver will consume all
entries on the request queue when the request_fn strategy is called.
Often this will not happen, because of hardware limitations (queue
depth settings). If a device driver gets a 'queue full' response,
or if it simply chooses not to queue more I/O at one point, it can
call this function to prevent the request_fn from being called until
the driver has signalled it's ready to go again. This happens by calling
blk_start_queue
to restart queue operations. Queue lock must be held.
blk_sync_queue — cancel any pending callbacks on a queue
void fsfuncblk_sync_queue ( | q) ; |
struct request_queue * q
;The block layer may perform asynchronous callback activity on a queue, such as calling the unplug function after a timeout. A block device may call blk_sync_queue to ensure that any such activity is cancelled, thus allowing it to release resources that the callbacks might use. The caller must already have made sure that its ->make_request_fn will not re-add plugging prior to calling this function.
This function does not cancel any asynchronous activity arising
out of elevator or throttling code. That would require elevaotor_exit
and blkcg_exit_queue
to be called with queue lock initialized.
__blk_run_queue — run a single device queue
void fsfunc__blk_run_queue ( | q) ; |
struct request_queue * q
;blk_run_queue_async — run a single device queue in workqueue context
void fsfuncblk_run_queue_async ( | q) ; |
struct request_queue * q
;blk_run_queue — run a single device queue
void fsfuncblk_run_queue ( | q) ; |
struct request_queue * q
;blk_queue_bypass_start — enter queue bypass mode
void fsfuncblk_queue_bypass_start ( | q) ; |
struct request_queue * q
;
In bypass mode, only the dispatch FIFO queue of q
is used. This
function makes q
enter bypass mode and drains all requests which were
throttled or issued before. On return, it's guaranteed that no request
is being throttled or has ELVPRIV set and blk_queue_bypass
true
inside queue or RCU read lock.
blk_queue_bypass_end — leave queue bypass mode
void fsfuncblk_queue_bypass_end ( | q) ; |
struct request_queue * q
;blk_cleanup_queue — shutdown a request queue
void fsfuncblk_cleanup_queue ( | q) ; |
struct request_queue * q
;blk_init_queue — prepare a request queue for use with a block device
struct request_queue * fsfuncblk_init_queue ( | rfn, | |
lock) ; |
request_fn_proc * rfn
;spinlock_t * lock
;rfn
The function to be called to process requests that have been placed on the queue.
lock
Request queue spin lock
If a block device wishes to use the standard request handling procedures,
which sorts requests and coalesces adjacent requests, then it must
call blk_init_queue
. The function rfn
will be called when there
are requests on the queue that need to be processed. If the device
supports plugging, then rfn
may not be called immediately when requests
are available on the queue, but may be called at some time later instead.
Plugged queues are generally unplugged when a buffer belonging to one
of the requests on the queue is needed, or due to memory pressure.
rfn
is not required, or even expected, to remove all requests off the
queue, but only as many as it can handle at a time. If it does leave
requests on the queue, it is responsible for arranging that the requests
get dealt with eventually.
The queue spin lock must be held while manipulating the requests on the request queue; this lock will be taken also from interrupt context, so irq disabling is needed for it.
Function returns a pointer to the initialized request queue, or NULL
if
it didn't succeed.
blk_make_request — given a bio, allocate a corresponding struct request.
struct request * fsfuncblk_make_request ( | q, | |
bio, | ||
gfp_mask) ; |
struct request_queue * q
;struct bio * bio
;gfp_t gfp_mask
;q
target request queue
bio
The bio describing the memory mappings that will be submitted for IO. It may be a chained-bio properly constructed by block/bio layer.
gfp_mask
gfp flags to be used for memory allocation
blk_make_request is the parallel of generic_make_request for BLOCK_PC type commands. Where the struct request needs to be farther initialized by the caller. It is passed a struct bio, which describes the memory info of the I/O transfer.
The caller of blk_make_request must make sure that bi_io_vec
are set to describe the memory buffers. That bio_data_dir
will return
the needed direction of the request. (And all bio's in the passed bio-chain
are properly set accordingly)
If called under none-sleepable conditions, mapped bio buffers must not need bouncing, by calling the appropriate masked or flagged allocator, suitable for the target device. Otherwise the call to blk_queue_bounce will BUG.
When allocating/cloning a bio-chain, careful consideration should be
given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
anything but the first bio in the chain. Otherwise you risk waiting for IO
completion of a bio that hasn't been submitted yet, thus resulting in a
deadlock. Alternatively bios should be allocated using bio_kmalloc
instead
of bio_alloc
, as that avoids the mempool deadlock.
If possible a big IO should be split into smaller parts when allocation
fails. Partial allocation should not be an error, or you risk a live-lock.
blk_requeue_request — put a request back on queue
void fsfuncblk_requeue_request ( | q, | |
rq) ; |
struct request_queue * q
;struct request * rq
;part_round_stats — Round off the performance stats on a struct disk_stats.
void fsfuncpart_round_stats ( | cpu, | |
part) ; |
int cpu
;struct hd_struct * part
;The average IO queue length and utilisation statistics are maintained by observing the current state of the queue length and the amount of time it has been in this state for.
Normally, that accounting is done on IO completion, but that can result in more than a second's worth of IO being accounted for within any one second, leading to >100% utilisation. To deal with that, we call this function to do a round-off before returning the results when reading /proc/diskstats. This accounts immediately for all queue usage up to the current jiffies and restarts the counters again.
blk_add_request_payload — add a payload to a request
void fsfuncblk_add_request_payload ( | rq, | |
page, | ||
len) ; |
struct request * rq
;struct page * page
;unsigned int len
;generic_make_request — hand a buffer to its device driver for I/O
void fsfuncgeneric_make_request ( | bio) ; |
struct bio * bio
;
generic_make_request
is used to make I/O requests of block
devices. It is passed a struct bio, which describes the I/O that needs
to be done.
generic_make_request
does not return any status. The
success/failure status of the request, along with notification of
completion, is delivered asynchronously through the bio->bi_end_io
function described (one day) else where.
The caller of generic_make_request must make sure that bi_io_vec are set to describe the memory buffer, and that bi_dev and bi_sector are set to describe the device address, and the bi_end_io and optionally bi_private are set to describe how completion notification should be signaled.
generic_make_request and the drivers it calls may use bi_next if this bio happens to be merged with someone else, and may resubmit the bio to a lower device by calling into generic_make_request recursively, which means the bio should NOT be touched after the call to ->make_request_fn.
submit_bio — submit a bio to the block device layer for I/O
void fsfuncsubmit_bio ( | rw, | |
bio) ; |
int rw
;struct bio * bio
;blk_rq_check_limits — Helper function to check a request for the queue limit
int fsfuncblk_rq_check_limits ( | q, | |
rq) ; |
struct request_queue * q
;struct request * rq
;
rq
may have been made based on weaker limitations of upper-level queues
in request stacking drivers, and it may violate the limitation of q
.
Since the block layer and the underlying device driver trust rq
after it is inserted to q
, it should be checked against q
before
the insertion using this generic function.
This function should also be useful for request stacking drivers in some cases below, so export this function. Request stacking drivers like request-based dm may change the queue limits while requests are in the queue (e.g. dm's table swapping). Such request stacking drivers should check those requests agaist the new queue limits again when they dispatch those requests, although such checkings are also done against the old queue limits when submitting requests.
blk_insert_cloned_request — Helper for stacking drivers to submit a request
int fsfuncblk_insert_cloned_request ( | q, | |
rq) ; |
struct request_queue * q
;struct request * rq
;blk_rq_err_bytes — determine number of bytes till the next failure boundary
unsigned int fsfuncblk_rq_err_bytes ( | rq) ; |
const struct request * rq
;blk_peek_request — peek at the top of a request queue
struct request * fsfuncblk_peek_request ( | q) ; |
struct request_queue * q
;blk_start_request — start request processing on the driver
void fsfuncblk_start_request ( | req) ; |
struct request * req
;blk_fetch_request — fetch a request from a request queue
struct request * fsfuncblk_fetch_request ( | q) ; |
struct request_queue * q
;blk_update_request — Special helper function for request stacking drivers
bool fsfuncblk_update_request ( | req, | |
error, | ||
nr_bytes) ; |
struct request * req
;int error
;unsigned int nr_bytes
;req
the request being processed
error
0
for success, < 0
for error
nr_bytes
number of bytes to complete req
Ends I/O on a number of bytes attached to req
, but doesn't complete
the request structure even if req
doesn't have leftover.
If req
has leftover, sets it up for the next range of segments.
This special helper function is only for request stacking drivers (e.g. request-based dm) so that they can handle partial completion. Actual device drivers should use blk_end_request instead.
Passing the result of blk_rq_bytes
as nr_bytes
guarantees
false
return from this function.
blk_unprep_request — unprepare a request
void fsfuncblk_unprep_request ( | req) ; |
struct request * req
;This function makes a request ready for complete resubmission (or completion). It happens only after all error handling is complete, so represents the appropriate moment to deallocate any resources that were allocated to the request in the prep_rq_fn. The queue lock is held when calling this.
blk_end_request — Helper function for drivers to complete the request.
bool fsfuncblk_end_request ( | rq, | |
error, | ||
nr_bytes) ; |
struct request * rq
;int error
;unsigned int nr_bytes
;rq
the request being processed
error
0
for success, < 0
for error
nr_bytes
number of bytes to complete
blk_end_request_all — Helper function for drives to finish the request.
void fsfuncblk_end_request_all ( | rq, | |
error) ; |
struct request * rq
;int error
;blk_end_request_cur — Helper function to finish the current request chunk.
bool fsfuncblk_end_request_cur ( | rq, | |
error) ; |
struct request * rq
;int error
;blk_end_request_err — Finish a request till the next failure boundary.
bool fsfuncblk_end_request_err ( | rq, | |
error) ; |
struct request * rq
;int error
;__blk_end_request — Helper function for drivers to complete the request.
bool fsfunc__blk_end_request ( | rq, | |
error, | ||
nr_bytes) ; |
struct request * rq
;int error
;unsigned int nr_bytes
;__blk_end_request_all — Helper function for drives to finish the request.
void fsfunc__blk_end_request_all ( | rq, | |
error) ; |
struct request * rq
;int error
;__blk_end_request_cur — Helper function to finish the current request chunk.
bool fsfunc__blk_end_request_cur ( | rq, | |
error) ; |
struct request * rq
;int error
;__blk_end_request_err — Finish a request till the next failure boundary.
bool fsfunc__blk_end_request_err ( | rq, | |
error) ; |
struct request * rq
;int error
;rq_flush_dcache_pages — Helper function to flush all pages in a request
void fsfuncrq_flush_dcache_pages ( | rq) ; |
struct request * rq
;blk_lld_busy — Check if underlying low-level drivers of a device are busy
int fsfuncblk_lld_busy ( | q) ; |
struct request_queue * q
;
Check if underlying low-level drivers of a device are busy.
If the drivers want to export their busy state, they must set own
exporting function using blk_queue_lld_busy
first.
Basically, this function is used only by request stacking drivers to stop dispatching requests to underlying devices when underlying devices are busy. This behavior helps more I/O merging on the queue of the request stacking driver and prevents I/O throughput regression on burst I/O load.
blk_rq_unprep_clone — Helper function to free all bios in a cloned request
void fsfuncblk_rq_unprep_clone ( | rq) ; |
struct request * rq
;blk_rq_prep_clone — Helper function to setup clone request
int fsfuncblk_rq_prep_clone ( | rq, | |
rq_src, | ||
bs, | ||
gfp_mask, | ||
bio_ctr, | ||
data) ; |
struct request * rq
;struct request * rq_src
;struct bio_set * bs
;gfp_t gfp_mask
;int (*bio_ctr)
(
struct bio *, struct bio *, void *)
;void * data
;rq
the request to be setup
rq_src
original request to be cloned
bs
bio_set that bios for clone are allocated from
gfp_mask
memory allocation mask for bio
bio_ctr
setup function to be called for each clone bio.
Returns 0
for success, non 0
for failure.
data
private data to be passed to bio_ctr
Clones bios in rq_src
to rq
, and copies attributes of rq_src
to rq
.
The actual data parts of rq_src
(e.g. ->cmd, ->buffer, ->sense)
are not copied, and copying such parts is the caller's responsibility.
Also, pages which the original bios are pointing to are not copied
and the cloned bios just point same pages.
So cloned bios must be completed before original bios, which means
the caller must complete rq
before rq_src
.
blk_start_plug — initialize blk_plug and track it inside the task_struct
void fsfuncblk_start_plug ( | plug) ; |
struct blk_plug * plug
;
Tracking blk_plug inside the task_struct will help with auto-flushing the
pending I/O should the task end up blocking between blk_start_plug
and
blk_finish_plug
. This is important from a performance perspective, but
also ensures that we don't deadlock. For instance, if the task is blocking
for a memory allocation, memory reclaim could end up wanting to free a
page belonging to that request that is currently residing in our private
plug. By flushing the pending I/O when the process goes to sleep, we avoid
this kind of deadlock.
blk_pm_runtime_init — Block layer runtime PM initialization routine
void fsfuncblk_pm_runtime_init ( | q, | |
dev) ; |
struct request_queue * q
;struct device * dev
;
Initialize runtime-PM-related fields for q
and start auto suspend for
dev
. Drivers that want to take advantage of request-based runtime PM
should call this function after dev
has been initialized, and its
request queue q
has been allocated, and runtime PM for it can not happen
yet(either due to disabled/forbidden or its usage_count > 0). In most
cases, driver should call this function before any I/O has taken place.
This function takes care of setting up using auto suspend for the device, the autosuspend delay is set to -1 to make runtime suspend impossible until an updated value is either set by user or by driver. Drivers do not need to touch other autosuspend settings.
The block layer runtime PM is request based, so only works for drivers that use request as their IO unit instead of those directly use bio's.
blk_pre_runtime_suspend — Pre runtime suspend check
int fsfuncblk_pre_runtime_suspend ( | q) ; |
struct request_queue * q
;This function will check if runtime suspend is allowed for the device by examining if there are any requests pending in the queue. If there are requests pending, the device can not be runtime suspended; otherwise, the queue's status will be updated to SUSPENDING and the driver can proceed to suspend the device.
For the not allowed case, we mark last busy for the device so that runtime PM core will try to autosuspend it some time later.
This function should be called near the start of the device's runtime_suspend callback.
blk_post_runtime_suspend — Post runtime suspend processing
void fsfuncblk_post_runtime_suspend ( | q, | |
err) ; |
struct request_queue * q
;int err
;Update the queue's runtime status according to the return value of the device's runtime suspend function and mark last busy for the device so that PM core will try to auto suspend the device at a later time.
This function should be called near the end of the device's runtime_suspend callback.
blk_pre_runtime_resume — Pre runtime resume processing
void fsfuncblk_pre_runtime_resume ( | q) ; |
struct request_queue * q
;blk_post_runtime_resume — Post runtime resume processing
void fsfuncblk_post_runtime_resume ( | q, | |
err) ; |
struct request_queue * q
;int err
;Update the queue's runtime status according to the return value of the device's runtime_resume function. If it is successfully resumed, process the requests that are queued into the device's queue when it is resuming and then mark last busy and initiate autosuspend for it.
This function should be called near the end of the device's runtime_resume callback.
__blk_run_queue_uncond — run a queue whether or not it has been stopped
void fsfunc__blk_run_queue_uncond ( | q) ; |
struct request_queue * q
;
Invoke request handling on a queue if there are any pending requests.
May be used to restart request handling after a request has completed.
This variant runs the queue whether or not the queue has been
stopped. Must be called with the queue lock held and interrupts
disabled. See also blk_run_queue
.
__blk_drain_queue — drain requests from request_queue
void fsfunc__blk_drain_queue ( | q, | |
drain_all) ; |
struct request_queue * q
;bool drain_all
;rq_ioc — determine io_context for request allocation
struct io_context * fsfuncrq_ioc ( | bio) ; |
struct bio * bio
;__get_request — get a free request
struct request * fsfunc__get_request ( | rl, | |
rw_flags, | ||
bio, | ||
gfp_mask) ; |
struct request_list * rl
;int rw_flags
;struct bio * bio
;gfp_t gfp_mask
;get_request — get a free request
struct request * fsfuncget_request ( | q, | |
rw_flags, | ||
bio, | ||
gfp_mask) ; |
struct request_queue * q
;int rw_flags
;struct bio * bio
;gfp_t gfp_mask
;q
request_queue to allocate request from
rw_flags
RW and SYNC flags
bio
bio to allocate request for (can be NULL
)
gfp_mask
allocation mask
Get a free request from q
. If __GFP_WAIT
is set in gfp_mask
, this
function keeps retrying under memory pressure and fails iff q
is dead.
Must be callled with q
->queue_lock held and,
Returns NULL
on failure, with q
->queue_lock held.
Returns !NULL
on success, with q
->queue_lock *not held*.
attempt_plug_merge —
try to merge with current
's plugged list
bool fsfuncattempt_plug_merge ( | q, | |
bio, | ||
request_count) ; |
struct request_queue * q
;struct bio * bio
;unsigned int * request_count
;q
request_queue new bio is being queued at
bio
new bio being queued
request_count
out parameter for number of traversed plugged requests
Determine whether bio
being queued on q
can be merged with a request
on current
's plugged list. Returns true
if merge was successful,
otherwise false
.
Plugging coalesces IOs from the same issuer for the same purpose without
going through q
->queue_lock. As such it's more of an issuing mechanism
than scheduling, and the request, while may have elvpriv data, is not
added on the elevator at this point. In addition, we don't have
reliable access to the elevator outside queue lock. Only check basic
merging parameters without querying the elevator.
blk_end_bidi_request — Complete a bidi request
bool fsfuncblk_end_bidi_request ( | rq, | |
error, | ||
nr_bytes, | ||
bidi_bytes) ; |
struct request * rq
;int error
;unsigned int nr_bytes
;unsigned int bidi_bytes
;rq
the request to complete
error
0
for success, < 0
for error
nr_bytes
number of bytes to complete rq
bidi_bytes
number of bytes to complete rq
->next_rq
__blk_end_bidi_request — Complete a bidi request with queue lock held
bool fsfunc__blk_end_bidi_request ( | rq, | |
error, | ||
nr_bytes, | ||
bidi_bytes) ; |
struct request * rq
;int error
;unsigned int nr_bytes
;unsigned int bidi_bytes
;rq
the request to complete
error
0
for success, < 0
for error
nr_bytes
number of bytes to complete rq
bidi_bytes
number of bytes to complete rq
->next_rq
blk_rq_map_user — map user data to a request, for REQ_TYPE_BLOCK_PC usage
int fsfuncblk_rq_map_user ( | q, | |
rq, | ||
map_data, | ||
ubuf, | ||
len, | ||
gfp_mask) ; |
struct request_queue * q
;struct request * rq
;struct rq_map_data * map_data
;void __user * ubuf
;unsigned long len
;gfp_t gfp_mask
;q
request queue where request should be inserted
rq
request structure to fill
map_data
pointer to the rq_map_data holding pages (if necessary)
ubuf
the user buffer
len
length of user data
gfp_mask
memory allocation flags
blk_rq_map_user_iov — map user data to a request, for REQ_TYPE_BLOCK_PC usage
int fsfuncblk_rq_map_user_iov ( | q, | |
rq, | ||
map_data, | ||
iov, | ||
iov_count, | ||
len, | ||
gfp_mask) ; |
struct request_queue * q
;struct request * rq
;struct rq_map_data * map_data
;struct sg_iovec * iov
;int iov_count
;unsigned int len
;gfp_t gfp_mask
;q
request queue where request should be inserted
rq
request to map data to
map_data
pointer to the rq_map_data holding pages (if necessary)
iov
pointer to the iovec
iov_count
number of elements in the iovec
len
I/O byte count
gfp_mask
memory allocation flags
blk_rq_unmap_user — unmap a request with user data
int fsfuncblk_rq_unmap_user ( | bio) ; |
struct bio * bio
;blk_rq_map_kern — map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
int fsfuncblk_rq_map_kern ( | q, | |
rq, | ||
kbuf, | ||
len, | ||
gfp_mask) ; |
struct request_queue * q
;struct request * rq
;void * kbuf
;unsigned int len
;gfp_t gfp_mask
;blk_release_queue — release a struct request_queue when it is no longer needed
void fsfuncblk_release_queue ( | kobj) ; |
struct kobject * kobj
;
blk_release_queue is the pair to blk_init_queue
or
blk_queue_make_request
. It should be called when a request queue is
being released; typically when a block device is being de-registered.
Currently, its primary task it to free all the struct request
structures that were allocated to the queue and the queue itself.
blk_queue_prep_rq — set a prepare_request function for queue
void fsfuncblk_queue_prep_rq ( | q, | |
pfn) ; |
struct request_queue * q
;prep_rq_fn * pfn
;blk_queue_unprep_rq — set an unprepare_request function for queue
void fsfuncblk_queue_unprep_rq ( | q, | |
ufn) ; |
struct request_queue * q
;unprep_rq_fn * ufn
;blk_queue_merge_bvec — set a merge_bvec function for queue
void fsfuncblk_queue_merge_bvec ( | q, | |
mbfn) ; |
struct request_queue * q
;merge_bvec_fn * mbfn
;
Usually queues have static limitations on the max sectors or segments that
we can put in a request. Stacking drivers may have some settings that
are dynamic, and thus we have to query the queue whether it is ok to
add a new bio_vec to a bio at a given offset or not. If the block device
has such limitations, it needs to register a merge_bvec_fn to control
the size of bio's sent to it. Note that a block device *must* allow a
single page to be added to an empty bio. The block device driver may want
to use the bio_split
function to deal with these bio's. By default
no merge_bvec_fn is defined for a queue, and only the fixed limits are
honored.
blk_set_default_limits — reset limits to default values
void fsfuncblk_set_default_limits ( | lim) ; |
struct queue_limits * lim
;blk_set_stacking_limits — set default limits for stacking devices
void fsfuncblk_set_stacking_limits ( | lim) ; |
struct queue_limits * lim
;blk_queue_make_request — define an alternate make_request function for a device
void fsfuncblk_queue_make_request ( | q, | |
mfn) ; |
struct request_queue * q
;make_request_fn * mfn
;
The normal way for struct bios to be passed to a device
driver is for them to be collected into requests on a request
queue, and then to allow the device driver to select requests
off that queue when it is ready. This works well for many block
devices. However some block devices (typically virtual devices
such as md or lvm) do not benefit from the processing on the
request queue, and are served best by having the requests passed
directly to them. This can be achieved by providing a function
to blk_queue_make_request
.
blk_queue_bounce_limit — set bounce buffer limit for queue
void fsfuncblk_queue_bounce_limit ( | q, | |
dma_mask) ; |
struct request_queue * q
;u64 dma_mask
;blk_limits_max_hw_sectors — set hard and soft limit of max sectors for request
void fsfuncblk_limits_max_hw_sectors ( | limits, | |
max_hw_sectors) ; |
struct queue_limits * limits
;unsigned int max_hw_sectors
;Enables a low level driver to set a hard upper limit, max_hw_sectors, on the size of requests. max_hw_sectors is set by the device driver based upon the combined capabilities of I/O controller and storage device.
max_sectors is a soft limit imposed by the block layer for filesystem type requests. This value can be overridden on a per-device basis in /sys/block/<device>/queue/max_sectors_kb. The soft limit can not exceed max_hw_sectors.
blk_queue_max_hw_sectors — set max sectors for a request for this queue
void fsfuncblk_queue_max_hw_sectors ( | q, | |
max_hw_sectors) ; |
struct request_queue * q
;unsigned int max_hw_sectors
;blk_queue_max_discard_sectors — set max sectors for a single discard
void fsfuncblk_queue_max_discard_sectors ( | q, | |
max_discard_sectors) ; |
struct request_queue * q
;unsigned int max_discard_sectors
;blk_queue_max_write_same_sectors — set max sectors for a single write same
void fsfuncblk_queue_max_write_same_sectors ( | q, | |
max_write_same_sectors) ; |
struct request_queue * q
;unsigned int max_write_same_sectors
;blk_queue_max_segments — set max hw segments for a request for this queue
void fsfuncblk_queue_max_segments ( | q, | |
max_segments) ; |
struct request_queue * q
;unsigned short max_segments
;blk_queue_max_segment_size — set max segment size for blk_rq_map_sg
void fsfuncblk_queue_max_segment_size ( | q, | |
max_size) ; |
struct request_queue * q
;unsigned int max_size
;blk_queue_logical_block_size — set logical block size for the queue
void fsfuncblk_queue_logical_block_size ( | q, | |
size) ; |
struct request_queue * q
;unsigned short size
;blk_queue_physical_block_size — set physical block size for the queue
void fsfuncblk_queue_physical_block_size ( | q, | |
size) ; |
struct request_queue * q
;unsigned int size
;blk_queue_alignment_offset — set physical block alignment offset
void fsfuncblk_queue_alignment_offset ( | q, | |
offset) ; |
struct request_queue * q
;unsigned int offset
;blk_limits_io_min — set minimum request size for a device
void fsfuncblk_limits_io_min ( | limits, | |
min) ; |
struct queue_limits * limits
;unsigned int min
;blk_queue_io_min — set minimum request size for the queue
void fsfuncblk_queue_io_min ( | q, | |
min) ; |
struct request_queue * q
;unsigned int min
;Storage devices may report a granularity or preferred minimum I/O size which is the smallest request the device can perform without incurring a performance penalty. For disk drives this is often the physical block size. For RAID arrays it is often the stripe chunk size. A properly aligned multiple of minimum_io_size is the preferred request size for workloads where a high number of I/O operations is desired.
blk_limits_io_opt — set optimal request size for a device
void fsfuncblk_limits_io_opt ( | limits, | |
opt) ; |
struct queue_limits * limits
;unsigned int opt
;Storage devices may report an optimal I/O size, which is the device's preferred unit for sustained I/O. This is rarely reported for disk drives. For RAID arrays it is usually the stripe width or the internal track size. A properly aligned multiple of optimal_io_size is the preferred request size for workloads where sustained throughput is desired.
blk_queue_io_opt — set optimal request size for the queue
void fsfuncblk_queue_io_opt ( | q, | |
opt) ; |
struct request_queue * q
;unsigned int opt
;Storage devices may report an optimal I/O size, which is the device's preferred unit for sustained I/O. This is rarely reported for disk drives. For RAID arrays it is usually the stripe width or the internal track size. A properly aligned multiple of optimal_io_size is the preferred request size for workloads where sustained throughput is desired.
blk_queue_stack_limits — inherit underlying queue limits for stacked drivers
void fsfuncblk_queue_stack_limits ( | t, | |
b) ; |
struct request_queue * t
;struct request_queue * b
;blk_stack_limits — adjust queue_limits for stacked devices
int fsfuncblk_stack_limits ( | t, | |
b, | ||
start) ; |
struct queue_limits * t
;struct queue_limits * b
;sector_t start
;t
the stacking driver limits (top device)
b
the underlying queue limits (bottom, component device)
start
first data sector within component device
This function is used by stacking drivers like MD and DM to ensure that all component devices have compatible block sizes and alignments. The stacking driver must provide a queue_limits struct (top) and then iteratively call the stacking function for all component (bottom) devices. The stacking function will attempt to combine the values and ensure proper alignment.
Returns 0 if the top and bottom queue_limits are compatible. The top device's block sizes and alignment offsets may be adjusted to ensure alignment with the bottom device. If no compatible sizes and alignments exist, -1 is returned and the resulting top queue_limits will have the misaligned flag set to indicate that the alignment_offset is undefined.
bdev_stack_limits — adjust queue limits for stacked drivers
int fsfuncbdev_stack_limits ( | t, | |
bdev, | ||
start) ; |
struct queue_limits * t
;struct block_device * bdev
;sector_t start
;disk_stack_limits — adjust queue limits for stacked drivers
void fsfuncdisk_stack_limits ( | disk, | |
bdev, | ||
offset) ; |
struct gendisk * disk
;struct block_device * bdev
;sector_t offset
;blk_queue_dma_pad — set pad mask
void fsfuncblk_queue_dma_pad ( | q, | |
mask) ; |
struct request_queue * q
;unsigned int mask
;blk_queue_update_dma_pad — update pad mask
void fsfuncblk_queue_update_dma_pad ( | q, | |
mask) ; |
struct request_queue * q
;unsigned int mask
;blk_queue_dma_drain — Set up a drain buffer for excess dma.
int fsfuncblk_queue_dma_drain ( | q, | |
dma_drain_needed, | ||
buf, | ||
size) ; |
struct request_queue * q
;dma_drain_needed_fn * dma_drain_needed
;void * buf
;unsigned int size
;q
the request queue for the device
dma_drain_needed
fn which returns non-zero if drain is necessary
buf
physically contiguous buffer
size
size of the buffer in bytes
Some devices have excess DMA problems and can't simply discard (or zero fill) the unwanted piece of the transfer. They have to have a real area of memory to transfer it into. The use case for this is ATAPI devices in DMA mode. If the packet command causes a transfer bigger than the transfer size some HBAs will lock up if there aren't DMA elements to contain the excess transfer. What this API does is adjust the queue so that the buf is always appended silently to the scatterlist.
blk_queue_segment_boundary — set boundary rules for segment merging
void fsfuncblk_queue_segment_boundary ( | q, | |
mask) ; |
struct request_queue * q
;unsigned long mask
;blk_queue_dma_alignment — set dma length and memory alignment
void fsfuncblk_queue_dma_alignment ( | q, | |
mask) ; |
struct request_queue * q
;int mask
;blk_queue_update_dma_alignment — update dma length and memory alignment
void fsfuncblk_queue_update_dma_alignment ( | q, | |
mask) ; |
struct request_queue * q
;int mask
;update required memory and length alignment for direct dma transactions. If the requested alignment is larger than the current alignment, then the current queue alignment is updated to the new value, otherwise it is left alone. The design of this is to allow multiple objects (driver, device, transport etc) to set their respective alignments without having them interfere.
blk_queue_flush — configure queue's cache flush capability
void fsfuncblk_queue_flush ( | q, | |
flush) ; |
struct request_queue * q
;unsigned int flush
;blk_execute_rq_nowait — insert a request into queue for execution
void fsfuncblk_execute_rq_nowait ( | q, | |
bd_disk, | ||
rq, | ||
at_head, | ||
done) ; |
struct request_queue * q
;struct gendisk * bd_disk
;struct request * rq
;int at_head
;rq_end_io_fn * done
;q
queue to insert the request in
bd_disk
matching gendisk
rq
request to insert
at_head
insert request at head or tail of queue
done
I/O completion handler
blk_execute_rq — insert a request into queue for execution
int fsfuncblk_execute_rq ( | q, | |
bd_disk, | ||
rq, | ||
at_head) ; |
struct request_queue * q
;struct gendisk * bd_disk
;struct request * rq
;int at_head
;blkdev_issue_flush — queue a flush
int fsfuncblkdev_issue_flush ( | bdev, | |
gfp_mask, | ||
error_sector) ; |
struct block_device * bdev
;gfp_t gfp_mask
;sector_t * error_sector
;blkdev_issue_discard — queue a discard
int fsfuncblkdev_issue_discard ( | bdev, | |
sector, | ||
nr_sects, | ||
gfp_mask, | ||
flags) ; |
struct block_device * bdev
;sector_t sector
;sector_t nr_sects
;gfp_t gfp_mask
;unsigned long flags
;blkdev_issue_write_same — queue a write same operation
int fsfuncblkdev_issue_write_same ( | bdev, | |
sector, | ||
nr_sects, | ||
gfp_mask, | ||
page) ; |
struct block_device * bdev
;sector_t sector
;sector_t nr_sects
;gfp_t gfp_mask
;struct page * page
;blkdev_issue_zeroout — zero-fill a block range
int fsfuncblkdev_issue_zeroout ( | bdev, | |
sector, | ||
nr_sects, | ||
gfp_mask) ; |
struct block_device * bdev
;sector_t sector
;sector_t nr_sects
;gfp_t gfp_mask
;blk_queue_find_tag — find a request by its tag and queue
struct request * fsfuncblk_queue_find_tag ( | q, | |
tag) ; |
struct request_queue * q
;int tag
;blk_free_tags — release a given set of tag maintenance info
void fsfuncblk_free_tags ( | bqt) ; |
struct blk_queue_tag * bqt
;blk_queue_free_tags — release tag maintenance info
void fsfuncblk_queue_free_tags ( | q) ; |
struct request_queue * q
;blk_init_tags — initialize the tag info for an external tag map
struct blk_queue_tag * fsfuncblk_init_tags ( | depth) ; |
int depth
;blk_queue_init_tags — initialize the queue tag info
int fsfuncblk_queue_init_tags ( | q, | |
depth, | ||
tags) ; |
struct request_queue * q
;int depth
;struct blk_queue_tag * tags
;blk_queue_resize_tags — change the queueing depth
int fsfuncblk_queue_resize_tags ( | q, | |
new_depth) ; |
struct request_queue * q
;int new_depth
;blk_queue_end_tag — end tag operations for a request
void fsfuncblk_queue_end_tag ( | q, | |
rq) ; |
struct request_queue * q
;struct request * rq
;blk_queue_start_tag — find a free tag and assign it
int fsfuncblk_queue_start_tag ( | q, | |
rq) ; |
struct request_queue * q
;struct request * rq
;This can either be used as a stand-alone helper, or possibly be assigned as the queue prep_rq_fn (in which case struct request automagically gets a tag assigned). Note that this function assumes that any type of request can be queued! if this is not true for your device, you must check the request type before calling this function. The request will also be removed from the request queue, so it's the drivers responsibility to readd it if it should need to be restarted for some reason.
blk_queue_invalidate_tags — invalidate all pending tags
void fsfuncblk_queue_invalidate_tags ( | q) ; |
struct request_queue * q
;__blk_free_tags — release a given set of tag maintenance info
int fsfunc__blk_free_tags ( | bqt) ; |
struct blk_queue_tag * bqt
;__blk_queue_free_tags — release tag maintenance info
void fsfunc__blk_queue_free_tags ( | q) ; |
struct request_queue * q
;blk_rq_count_integrity_sg — Count number of integrity scatterlist elements
int fsfuncblk_rq_count_integrity_sg ( | q, | |
bio) ; |
struct request_queue * q
;struct bio * bio
;blk_rq_map_integrity_sg — Map integrity metadata into a scatterlist
int fsfuncblk_rq_map_integrity_sg ( | q, | |
bio, | ||
sglist) ; |
struct request_queue * q
;struct bio * bio
;struct scatterlist * sglist
;blk_integrity_compare — Compare integrity profile of two disks
int fsfuncblk_integrity_compare ( | gd1, | |
gd2) ; |
struct gendisk * gd1
;struct gendisk * gd2
;blk_integrity_register — Register a gendisk as being integrity-capable
int fsfuncblk_integrity_register ( | disk, | |
template) ; |
struct gendisk * disk
;struct blk_integrity * template
;disk
struct gendisk pointer to make integrity-aware
template
optional integrity profile to register
When a device needs to advertise itself as being able to send/receive integrity metadata it must use this function to register the capability with the block layer. The template is a blk_integrity struct with values appropriate for the underlying hardware. If template is NULL the new profile is allocated but not filled out. See Documentation/block/data-integrity.txt.
blk_integrity_unregister — Remove block integrity profile
void fsfuncblk_integrity_unregister ( | disk) ; |
struct gendisk * disk
;blk_trace_ioctl — handle the ioctls associated with tracing
int fsfuncblk_trace_ioctl ( | bdev, | |
cmd, | ||
arg) ; |
struct block_device * bdev
;unsigned cmd
;char __user * arg
;blk_trace_shutdown — stop and cleanup trace structures
void fsfuncblk_trace_shutdown ( | q) ; |
struct request_queue * q
;blk_add_trace_rq — Add a trace for a request oriented action
void fsfuncblk_add_trace_rq ( | q, | |
rq, | ||
what) ; |
struct request_queue * q
;struct request * rq
;u32 what
;blk_add_trace_bio — Add a trace for a bio oriented action
void fsfuncblk_add_trace_bio ( | q, | |
bio, | ||
what, | ||
error) ; |
struct request_queue * q
;struct bio * bio
;u32 what
;int error
;blk_add_trace_bio_remap — Add a trace for a bio-remap operation
void fsfuncblk_add_trace_bio_remap ( | ignore, | |
q, | ||
bio, | ||
dev, | ||
from) ; |
void * ignore
;struct request_queue * q
;struct bio * bio
;dev_t dev
;sector_t from
;blk_add_trace_rq_remap — Add a trace for a request-remap operation
void fsfuncblk_add_trace_rq_remap ( | ignore, | |
q, | ||
rq, | ||
dev, | ||
from) ; |
void * ignore
;struct request_queue * q
;struct request * rq
;dev_t dev
;sector_t from
;blk_mangle_minor — scatter minor numbers apart
int fsfuncblk_mangle_minor ( | minor) ; |
int minor
;blk_alloc_devt — allocate a dev_t for a partition
int fsfuncblk_alloc_devt ( | part, | |
devt) ; |
struct hd_struct * part
;dev_t * devt
;disk_replace_part_tbl — replace disk->part_tbl in RCU-safe way
void fsfuncdisk_replace_part_tbl ( | disk, | |
new_ptbl) ; |
struct gendisk * disk
;struct disk_part_tbl * new_ptbl
;disk_expand_part_tbl — expand disk->part_tbl
int fsfuncdisk_expand_part_tbl ( | disk, | |
partno) ; |
struct gendisk * disk
;int partno
;disk_block_events — block and flush disk event checking
void fsfuncdisk_block_events ( | disk) ; |
struct gendisk * disk
;
On return from this function, it is guaranteed that event checking
isn't in progress and won't happen until unblocked by
disk_unblock_events
. Events blocking is counted and the actual
unblocking happens after the matching number of unblocks are done.
Note that this intentionally does not block event checking from
disk_clear_events
.
disk_unblock_events — unblock disk event checking
void fsfuncdisk_unblock_events ( | disk) ; |
struct gendisk * disk
;disk_flush_events — schedule immediate event checking and flushing
void fsfuncdisk_flush_events ( | disk, | |
mask) ; |
struct gendisk * disk
;unsigned int mask
;disk_clear_events — synchronously check, clear and return pending events
unsigned int fsfuncdisk_clear_events ( | disk, | |
mask) ; |
struct gendisk * disk
;unsigned int mask
;disk_get_part — get partition
struct hd_struct * fsfuncdisk_get_part ( | disk, | |
partno) ; |
struct gendisk * disk
;int partno
;disk_part_iter_init — initialize partition iterator
void fsfuncdisk_part_iter_init ( | piter, | |
disk, | ||
flags) ; |
struct disk_part_iter * piter
;struct gendisk * disk
;unsigned int flags
;disk_part_iter_next — proceed iterator to the next partition and return it
struct hd_struct * fsfuncdisk_part_iter_next ( | piter) ; |
struct disk_part_iter * piter
;disk_part_iter_exit — finish up partition iteration
void fsfuncdisk_part_iter_exit ( | piter) ; |
struct disk_part_iter * piter
;disk_map_sector_rcu — map sector to partition
struct hd_struct * fsfuncdisk_map_sector_rcu ( | disk, | |
sector) ; |
struct gendisk * disk
;sector_t sector
;register_blkdev — register a new block device
int fsfuncregister_blkdev ( | major, | |
name) ; |
unsigned int major
;const char * name
;major
the requested major device number [1..255]. If major
=0, try to
allocate any unused major number.
name
the name of the new block device as a zero terminated string
The name
must be unique within the system.
The return value depends on the major
input parameter.
- if a major device number was requested in range [1..255] then the
function returns zero on success, or a negative error code
- if any unused major number was requested with major
=0 parameter
then the return value is the allocated major number in range
[1..255] or a negative error code otherwise
add_disk — add partitioning information to kernel list
void fsfuncadd_disk ( | disk) ; |
struct gendisk * disk
;get_gendisk — get partitioning information for a given device
struct gendisk * fsfuncget_gendisk ( | devt, | |
partno) ; |
dev_t devt
;int * partno
;Table of Contents
register_chrdev_region — register a range of device numbers
int fsfuncregister_chrdev_region ( | from, | |
count, | ||
name) ; |
dev_t from
;unsigned count
;const char * name
;alloc_chrdev_region — register a range of char device numbers
int fsfuncalloc_chrdev_region ( | dev, | |
baseminor, | ||
count, | ||
name) ; |
dev_t * dev
;unsigned baseminor
;unsigned count
;const char * name
;__register_chrdev — create and register a cdev occupying a range of minors
int fsfunc__register_chrdev ( | major, | |
baseminor, | ||
count, | ||
name, | ||
fops) ; |
unsigned int major
;unsigned int baseminor
;unsigned int count
;const char * name
;const struct file_operations * fops
;major
major device number or 0 for dynamic allocation
baseminor
first of the requested range of minor numbers
count
the number of minor numbers required
name
name of this range of devices
fops
file operations associated with this devices
If major
== 0 this functions will dynamically allocate a major and return
its number.
If major
> 0 this function will attempt to reserve a device with the given
major number and will return zero on success.
Returns a -ve errno on failure.
The name of this device has nothing to do with the name of the device in /dev. It only helps to keep track of the different owners of devices. If your module name has only one type of devices it's ok to use e.g. the name of the module here.
unregister_chrdev_region — return a range of device numbers
void fsfuncunregister_chrdev_region ( | from, | |
count) ; |
dev_t from
;unsigned count
;__unregister_chrdev — unregister and destroy a cdev
void fsfunc__unregister_chrdev ( | major, | |
baseminor, | ||
count, | ||
name) ; |
unsigned int major
;unsigned int baseminor
;unsigned int count
;const char * name
;cdev_add — add a char device to the system
int fsfunccdev_add ( | p, | |
dev, | ||
count) ; |
struct cdev * p
;dev_t dev
;unsigned count
;Table of Contents
misc_register — register a miscellaneous device
int fsfuncmisc_register ( | misc) ; |
struct miscdevice * misc
;Table of Contents
The clock framework defines programming interfaces to support software management of the system clock tree. This framework is widely used with System-On-Chip (SOC) platforms to support power management and various devices which may need custom clock rates. Note that these "clocks" don't relate to timekeeping or real time clocks (RTCs), each of which have separate frameworks. These struct clk instances may be used to manage for example a 96 MHz signal that is used to shift bits into and out of peripherals or busses, or otherwise trigger synchronous state machine transitions in system hardware.
Power management is supported by explicit software clock gating: unused clocks are disabled, so the system doesn't waste power changing the state of transistors that aren't in active use. On some systems this may be backed by hardware clock gating, where clocks are gated without being disabled in software. Sections of chips that are powered but not clocked may be able to retain their last state. This low power state is often called a retention mode. This mode still incurs leakage currents, especially with finer circuit geometries, but for CMOS circuits power is mostly used by clocked state changes.
Power-aware drivers only enable their clocks when the device they manage is in active use. Also, system sleep states often differ according to which clock domains are active: while a "standby" state may allow wakeup from several active domains, a "mem" (suspend-to-RAM) state may require a more wholesale shutdown of clocks derived from higher speed PLLs and oscillators, limiting the number of possible wakeup event sources. A driver's suspend method may need to be aware of system-specific clock constraints on the target sleep state.
Some platforms support programmable clock generators. These can be used by external chips of various kinds, such as other CPUs, multimedia codecs, and devices with strict requirements for interface clocking.
struct clk_notifier — associate a clk with a notifier
struct clk_notifier { struct clk * clk; struct srcu_notifier_head notifier_head; struct list_head node; };
struct clk_notifier_data — rate data to pass to the notifier callback
struct clk_notifier_data { struct clk * clk; unsigned long old_rate; unsigned long new_rate; };
clk_unprepare — undo preparation of a clock source
void fsfuncclk_unprepare ( | clk) ; |
struct clk * clk
;clk_get — lookup and obtain a reference to a clock producer.
struct clk * fsfuncclk_get ( | dev, | |
id) ; |
struct device * dev
;const char * id
;
Returns a struct clk corresponding to the clock producer, or
valid IS_ERR
condition containing errno. The implementation
uses dev
and id
to determine the clock consumer, and thereby
the clock producer. (IOW, id
may be identical strings, but
clk_get may return different clock producers depending on dev
.)
Drivers must assume that the clock source is not enabled.
clk_get should not be called from within interrupt context.
devm_clk_get — lookup and obtain a managed reference to a clock producer.
struct clk * fsfuncdevm_clk_get ( | dev, | |
id) ; |
struct device * dev
;const char * id
;
Returns a struct clk corresponding to the clock producer, or
valid IS_ERR
condition containing errno. The implementation
uses dev
and id
to determine the clock consumer, and thereby
the clock producer. (IOW, id
may be identical strings, but
clk_get may return different clock producers depending on dev
.)
Drivers must assume that the clock source is not enabled.
devm_clk_get should not be called from within interrupt context.
The clock will automatically be freed when the device is unbound from the bus.
clk_enable — inform the system when the clock source should be running.
int fsfuncclk_enable ( | clk) ; |
struct clk * clk
;clk_disable — inform the system when the clock source is no longer required.
void fsfuncclk_disable ( | clk) ; |
struct clk * clk
;clk_get_rate — obtain the current clock rate (in Hz) for a clock source. This is only valid once the clock source has been enabled.
unsigned long fsfuncclk_get_rate ( | clk) ; |
struct clk * clk
;devm_clk_put — "free" a managed clock source
void fsfuncdevm_clk_put ( | dev, | |
clk) ; |
struct device * dev
;struct clk * clk
;clk_round_rate — adjust a rate to the exact rate a clock can provide
long fsfuncclk_round_rate ( | clk, | |
rate) ; |
struct clk * clk
;unsigned long rate
;clk_set_rate — set the clock rate for a clock source
int fsfuncclk_set_rate ( | clk, | |
rate) ; |
struct clk * clk
;unsigned long rate
;clk_set_parent — set the parent clock source for this clock
int fsfuncclk_set_parent ( | clk, | |
parent) ; |
struct clk * clk
;struct clk * parent
;clk_get_parent — get the parent clock source for this clock
struct clk * fsfuncclk_get_parent ( | clk) ; |
struct clk * clk
;clk_get_sys — get a clock based upon the device name
struct clk * fsfuncclk_get_sys ( | dev_id, | |
con_id) ; |
const char * dev_id
;const char * con_id
;
Returns a struct clk corresponding to the clock producer, or
valid IS_ERR
condition containing errno. The implementation
uses dev_id
and con_id
to determine the clock consumer, and
thereby the clock producer. In contrast to clk_get
this function
takes the device name instead of the device itself for identification.
Drivers must assume that the clock source is not enabled.
clk_get_sys should not be called from within interrupt context.