Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
7143 serge 1
#ifndef _ASM_X86_UACCESS_32_H
2
#define _ASM_X86_UACCESS_32_H
3
 
4
/*
5
 * User space memory access functions
6
 */
7
#include 
8
#include 
9
#include 
10
#include 
11
#include 
12
 
13
unsigned long __must_check __copy_to_user_ll
14
		(void __user *to, const void *from, unsigned long n);
15
unsigned long __must_check __copy_from_user_ll
16
		(void *to, const void __user *from, unsigned long n);
17
unsigned long __must_check __copy_from_user_ll_nozero
18
		(void *to, const void __user *from, unsigned long n);
19
unsigned long __must_check __copy_from_user_ll_nocache
20
		(void *to, const void __user *from, unsigned long n);
21
unsigned long __must_check __copy_from_user_ll_nocache_nozero
22
		(void *to, const void __user *from, unsigned long n);
23
 
24
/**
25
 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
26
 * @to:   Destination address, in user space.
27
 * @from: Source address, in kernel space.
28
 * @n:    Number of bytes to copy.
29
 *
30
 * Context: User context only.
31
 *
32
 * Copy data from kernel space to user space.  Caller must check
33
 * the specified block with access_ok() before calling this function.
34
 * The caller should also make sure he pins the user space address
35
 * so that we don't result in page fault and sleep.
36
 *
37
 * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
38
 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
39
 * If a store crosses a page boundary and gets a fault, the x86 will not write
40
 * anything, so this is accurate.
41
 */
42
 
43
static __always_inline unsigned long __must_check
44
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
45
{
46
    if (__builtin_constant_p(n)) {
47
        switch(n) {
48
        case 1:
49
            *(u8 __force *)to = *(u8 *)from;
50
            return 0;
51
        case 2:
52
            *(u16 __force *)to = *(u16 *)from;
53
            return 0;
54
        case 4:
55
            *(u32 __force *)to = *(u32 *)from;
56
            return 0;
57
 
58
        case 8:
59
            *(u64 __force *)to = *(u64 *)from;
60
            return 0;
61
 
62
        default:
63
            break;
64
        }
65
    }
66
 
67
    __builtin_memcpy((void __force *)to, from, n);
68
    return 0;
69
}
70
 
71
/**
72
 * __copy_to_user: - Copy a block of data into user space, with less checking.
73
 * @to:   Destination address, in user space.
74
 * @from: Source address, in kernel space.
75
 * @n:    Number of bytes to copy.
76
 *
77
 * Context: User context only. This function may sleep if pagefaults are
78
 *          enabled.
79
 *
80
 * Copy data from kernel space to user space.  Caller must check
81
 * the specified block with access_ok() before calling this function.
82
 *
83
 * Returns number of bytes that could not be copied.
84
 * On success, this will be zero.
85
 */
86
static __always_inline unsigned long __must_check
87
__copy_to_user(void __user *to, const void *from, unsigned long n)
88
{
89
	might_fault();
90
	return __copy_to_user_inatomic(to, from, n);
91
}
92
 
93
static __always_inline unsigned long
94
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
95
{
96
	/* Avoid zeroing the tail if the copy fails..
97
	 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
98
	 * but as the zeroing behaviour is only significant when n is not
99
	 * constant, that shouldn't be a problem.
100
	 */
101
    if (__builtin_constant_p(n)) {
102
        switch(n) {
103
        case 1:
104
            *(u8 *)to = *(u8 __force *)from;
105
            return 0;
106
        case 2:
107
            *(u16 *)to = *(u16 __force *)from;
108
            return 0;
109
        case 4:
110
            *(u32 *)to = *(u32 __force *)from;
111
            return 0;
112
 
113
        case 8:
114
            *(u64 *)to = *(u64 __force *)from;
115
            return 0;
116
 
117
        default:
118
            break;
119
        }
120
    }
121
 
122
    __builtin_memcpy(to, (const void __force *)from, n);
123
    return 0;
124
}
125
 
126
/**
127
 * __copy_from_user: - Copy a block of data from user space, with less checking.
128
 * @to:   Destination address, in kernel space.
129
 * @from: Source address, in user space.
130
 * @n:    Number of bytes to copy.
131
 *
132
 * Context: User context only. This function may sleep if pagefaults are
133
 *          enabled.
134
 *
135
 * Copy data from user space to kernel space.  Caller must check
136
 * the specified block with access_ok() before calling this function.
137
 *
138
 * Returns number of bytes that could not be copied.
139
 * On success, this will be zero.
140
 *
141
 * If some data could not be copied, this function will pad the copied
142
 * data to the requested size using zero bytes.
143
 *
144
 * An alternate version - __copy_from_user_inatomic() - may be called from
145
 * atomic context and will fail rather than sleep.  In this case the
146
 * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
147
 * for explanation of why this is needed.
148
 */
149
static __always_inline unsigned long
150
__copy_from_user(void *to, const void __user *from, unsigned long n)
151
{
152
	might_fault();
153
    if (__builtin_constant_p(n)) {
154
        switch(n) {
155
        case 1:
156
            *(u8 *)to = *(u8 __force *)from;
157
            return 0;
158
        case 2:
159
            *(u16 *)to = *(u16 __force *)from;
160
            return 0;
161
        case 4:
162
            *(u32 *)to = *(u32 __force *)from;
163
            return 0;
164
 
165
        case 8:
166
            *(u64 *)to = *(u64 __force *)from;
167
            return 0;
168
 
169
        default:
170
            break;
171
        }
172
    }
173
 
174
    __builtin_memcpy(to, (const void __force *)from, n);
175
    return 0;
176
}
177
 
178
static __always_inline unsigned long __copy_from_user_nocache(void *to,
179
				const void __user *from, unsigned long n)
180
{
181
	might_fault();
182
    if (__builtin_constant_p(n)) {
183
        switch(n) {
184
        case 1:
185
            *(u8 *)to = *(u8 __force *)from;
186
            return 0;
187
        case 2:
188
            *(u16 *)to = *(u16 __force *)from;
189
            return 0;
190
        case 4:
191
            *(u32 *)to = *(u32 __force *)from;
192
            return 0;
193
        default:
194
            break;
195
        }
196
    }
197
    __builtin_memcpy(to, (const void __force *)from, n);
198
    return 0;
199
}
200
 
201
static __always_inline unsigned long
202
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
203
				  unsigned long n)
204
{
205
       return __copy_from_user_inatomic(to, from, n);
206
}
207
 
208
#endif /* _ASM_X86_UACCESS_32_H */