Skip to content

Commit de5cb0d

Browse files
committed
Merge branch 'address-masking'
Merge user access fast validation using address masking. This allows architectures to optionally use a data dependent address masking model instead of a conditional branch for validating user accesses. That avoids the Spectre-v1 speculation barriers. Right now only x86-64 takes advantage of this, and not all architectures will be able to do it. It requires a guard region between the user and kernel address spaces (so that you can't overflow from one to the other), and an easy way to generate a guaranteed-to-fault address for invalid user pointers. Also note that this currently assumes that there is no difference between user read and write accesses. If extended to architectures like powerpc, we'll also need to separate out the user read-vs-write cases. * address-masking: x86: make the masked_user_access_begin() macro use its argument only once x86: do the user address masking outside the user access area x86: support user address masking instead of non-speculative conditional
2 parents af9c191 + 533ab22 commit de5cb0d

File tree

5 files changed

+39
-1
lines changed

5 files changed

+39
-1
lines changed

arch/x86/include/asm/uaccess_64.h

+11
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,17 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
5353
*/
5454
#define valid_user_address(x) ((__force long)(x) >= 0)
5555

56+
/*
57+
* Masking the user address is an alternative to a conditional
58+
* user_access_begin that can avoid the fencing. This only works
59+
* for dense accesses starting at the address.
60+
*/
61+
#define mask_user_address(x) ((typeof(x))((long)(x)|((long)(x)>>63)))
62+
#define masked_user_access_begin(x) ({ \
63+
__auto_type __masked_ptr = (x); \
64+
__masked_ptr = mask_user_address(__masked_ptr); \
65+
__uaccess_begin(); __masked_ptr; })
66+
5667
/*
5768
* User pointers can have tag bits on x86-64. This scheme tolerates
5869
* arbitrary values in those bits rather then masking them off.

fs/select.c

+3-1
Original file line numberDiff line numberDiff line change
@@ -777,7 +777,9 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
777777
{
778778
// the path is hot enough for overhead of copy_from_user() to matter
779779
if (from) {
780-
if (!user_read_access_begin(from, sizeof(*from)))
780+
if (can_do_masked_user_access())
781+
from = masked_user_access_begin(from);
782+
else if (!user_read_access_begin(from, sizeof(*from)))
781783
return -EFAULT;
782784
unsafe_get_user(to->p, &from->p, Efault);
783785
unsafe_get_user(to->size, &from->size, Efault);

include/linux/uaccess.h

+7
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,13 @@
3333
})
3434
#endif
3535

36+
#ifdef masked_user_access_begin
37+
#define can_do_masked_user_access() 1
38+
#else
39+
#define can_do_masked_user_access() 0
40+
#define masked_user_access_begin(src) NULL
41+
#endif
42+
3643
/*
3744
* Architectures should provide two primitives (raw_copy_{to,from}_user())
3845
* and get rid of their private instances of copy_{to,from}_user() and

lib/strncpy_from_user.c

+9
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,15 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
120120
if (unlikely(count <= 0))
121121
return 0;
122122

123+
if (can_do_masked_user_access()) {
124+
long retval;
125+
126+
src = masked_user_access_begin(src);
127+
retval = do_strncpy_from_user(dst, src, count, count);
128+
user_read_access_end();
129+
return retval;
130+
}
131+
123132
max_addr = TASK_SIZE_MAX;
124133
src_addr = (unsigned long)untagged_addr(src);
125134
if (likely(src_addr < max_addr)) {

lib/strnlen_user.c

+9
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,15 @@ long strnlen_user(const char __user *str, long count)
9696
if (unlikely(count <= 0))
9797
return 0;
9898

99+
if (can_do_masked_user_access()) {
100+
long retval;
101+
102+
str = masked_user_access_begin(str);
103+
retval = do_strnlen_user(str, count, count);
104+
user_read_access_end();
105+
return retval;
106+
}
107+
99108
max_addr = TASK_SIZE_MAX;
100109
src_addr = (unsigned long)untagged_addr(str);
101110
if (likely(src_addr < max_addr)) {

0 commit comments

Comments
 (0)