blob: c1019a736ff13579cab4afd60b3b2ec1dc225374 [file] [log] [blame]
Thomas Gleixner08dbd0f2019-05-29 07:12:41 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Richard Kuo75677462011-10-31 18:48:07 -05002/*
3 * User memory access support for Hexagon
4 *
Richard Kuoe1858b22012-09-19 16:22:02 -05005 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
Richard Kuo75677462011-10-31 18:48:07 -05006 */
7
8#ifndef _ASM_UACCESS_H
9#define _ASM_UACCESS_H
10/*
11 * User space memory access functions
12 */
Richard Kuo75677462011-10-31 18:48:07 -050013#include <asm/sections.h>
14
15/*
16 * access_ok: - Checks if a user space pointer is valid
Richard Kuo75677462011-10-31 18:48:07 -050017 * @addr: User space pointer to start of block to check
18 * @size: Size of block to check
19 *
David Hildenbrandb3c395ef2015-05-11 17:52:08 +020020 * Context: User context only. This function may sleep if pagefaults are
21 * enabled.
Richard Kuo75677462011-10-31 18:48:07 -050022 *
23 * Checks if a pointer to a block of memory in user space is valid.
24 *
25 * Returns true (nonzero) if the memory block *may* be valid, false (zero)
26 * if it is definitely invalid.
27 *
28 * User address space in Hexagon, like x86, goes to 0xbfffffff, so the
29 * simple MSB-based tests used by MIPS won't work. Some further
30 * optimization is probably possible here, but for now, keep it
31 * reasonably simple and not *too* slow. After all, we've got the
32 * MMU for backup.
33 */
Richard Kuo75677462011-10-31 18:48:07 -050034
35#define __access_ok(addr, size) \
36 ((get_fs().seg == KERNEL_DS.seg) || \
37 (((unsigned long)addr < get_fs().seg) && \
38 (unsigned long)size < (get_fs().seg - (unsigned long)addr)))
39
40/*
41 * When a kernel-mode page fault is taken, the faulting instruction
42 * address is checked against a table of exception_table_entries.
43 * Each entry is a tuple of the address of an instruction that may
44 * be authorized to fault, and the address at which execution should
45 * be resumed instead of the faulting instruction, so as to effect
46 * a workaround.
47 */
48
49/* Assembly somewhat optimized copy routines */
Al Viroac4691f2017-03-28 01:28:09 -040050unsigned long raw_copy_from_user(void *to, const void __user *from,
Richard Kuo75677462011-10-31 18:48:07 -050051 unsigned long n);
Al Viroac4691f2017-03-28 01:28:09 -040052unsigned long raw_copy_to_user(void __user *to, const void *from,
Richard Kuo75677462011-10-31 18:48:07 -050053 unsigned long n);
Al Viroac4691f2017-03-28 01:28:09 -040054#define INLINE_COPY_FROM_USER
55#define INLINE_COPY_TO_USER
Richard Kuo75677462011-10-31 18:48:07 -050056
57__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
58#define __clear_user(a, s) __clear_user_hexagon((a), (s))
59
60#define __strncpy_from_user(dst, src, n) hexagon_strncpy_from_user(dst, src, n)
61
62/* get around the ifndef in asm-generic/uaccess.h */
63#define __strnlen_user __strnlen_user
64
65extern long __strnlen_user(const char __user *src, long n);
66
67static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
68 long n);
69
70#include <asm-generic/uaccess.h>
71
72/* Todo: an actual accelerated version of this. */
73static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
74 long n)
75{
76 long res = __strnlen_user(src, n);
77
Al Virof35c1e02016-08-18 21:16:49 -040078 if (unlikely(!res))
79 return -EFAULT;
Richard Kuo75677462011-10-31 18:48:07 -050080
81 if (res > n) {
Al Viroac4691f2017-03-28 01:28:09 -040082 long left = raw_copy_from_user(dst, src, n);
83 if (unlikely(left))
84 memset(dst + (n - left), 0, left);
Richard Kuo75677462011-10-31 18:48:07 -050085 return n;
86 } else {
Al Viroac4691f2017-03-28 01:28:09 -040087 long left = raw_copy_from_user(dst, src, res);
88 if (unlikely(left))
89 memset(dst + (res - left), 0, left);
Richard Kuo75677462011-10-31 18:48:07 -050090 return res-1;
91 }
92}
93
94#endif