LCOV - code coverage report
Current view: top level - include/linux - uaccess.h (source / functions) Hit Total Coverage
Test: combined.info Lines: 35 55 63.6 %
Date: 2022-03-28 15:32:58 Functions: 0 0 -
Branches: 73 390 18.7 %

           Branch data     Line data    Source code
       1                 :            : /* SPDX-License-Identifier: GPL-2.0 */
       2                 :            : #ifndef __LINUX_UACCESS_H__
       3                 :            : #define __LINUX_UACCESS_H__
       4                 :            : 
       5                 :            : #include <linux/sched.h>
       6                 :            : #include <linux/thread_info.h>
       7                 :            : #include <linux/kasan-checks.h>
       8                 :            : 
       9                 :            : #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
      10                 :            : 
      11                 :            : #include <asm/uaccess.h>
      12                 :            : 
      13                 :            : /*
      14                 :            :  * Architectures should provide two primitives (raw_copy_{to,from}_user())
      15                 :            :  * and get rid of their private instances of copy_{to,from}_user() and
      16                 :            :  * __copy_{to,from}_user{,_inatomic}().
      17                 :            :  *
      18                 :            :  * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
      19                 :            :  * return the amount left to copy.  They should assume that access_ok() has
      20                 :            :  * already been checked (and succeeded); they should *not* zero-pad anything.
      21                 :            :  * No KASAN or object size checks either - those belong here.
      22                 :            :  *
      23                 :            :  * Both of these functions should attempt to copy size bytes starting at from
      24                 :            :  * into the area starting at to.  They must not fetch or store anything
      25                 :            :  * outside of those areas.  Return value must be between 0 (everything
      26                 :            :  * copied successfully) and size (nothing copied).
      27                 :            :  *
      28                 :            :  * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
      29                 :            :  * at to must become equal to the bytes fetched from the corresponding area
      30                 :            :  * starting at from.  All data past to + size - N must be left unmodified.
      31                 :            :  *
      32                 :            :  * If copying succeeds, the return value must be 0.  If some data cannot be
      33                 :            :  * fetched, it is permitted to copy less than had been fetched; the only
      34                 :            :  * hard requirement is that not storing anything at all (i.e. returning size)
      35                 :            :  * should happen only when nothing could be copied.  In other words, you don't
      36                 :            :  * have to squeeze as much as possible - it is allowed, but not necessary.
      37                 :            :  *
      38                 :            :  * For raw_copy_from_user() to always points to kernel memory and no faults
      39                 :            :  * on store should happen.  Interpretation of from is affected by set_fs().
      40                 :            :  * For raw_copy_to_user() it's the other way round.
      41                 :            :  *
      42                 :            :  * Both can be inlined - it's up to architectures whether it wants to bother
      43                 :            :  * with that.  They should not be used directly; they are used to implement
      44                 :            :  * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
      45                 :            :  * that are used instead.  Out of those, __... ones are inlined.  Plain
      46                 :            :  * copy_{to,from}_user() might or might not be inlined.  If you want them
      47                 :            :  * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
      48                 :            :  *
      49                 :            :  * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
      50                 :            :  * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
      51                 :            :  * at all; their callers absolutely must check the return value.
      52                 :            :  *
      53                 :            :  * Biarch ones should also provide raw_copy_in_user() - similar to the above,
      54                 :            :  * but both source and destination are __user pointers (affected by set_fs()
      55                 :            :  * as usual) and both source and destination can trigger faults.
      56                 :            :  */
      57                 :            : 
      58                 :            : static __always_inline __must_check unsigned long
      59                 :    1854462 : __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
      60                 :            : {
      61                 :    1854462 :         kasan_check_write(to, n);
      62                 :    1854462 :         check_object_size(to, n, false);
      63   [ -  -  +  - ]:    1854462 :         return raw_copy_from_user(to, from, n);
      64                 :            : }
      65                 :            : 
      66                 :            : static __always_inline __must_check unsigned long
      67                 :      66220 : __copy_from_user(void *to, const void __user *from, unsigned long n)
      68                 :            : {
      69                 :      66220 :         might_fault();
      70                 :      66220 :         kasan_check_write(to, n);
      71                 :      66220 :         check_object_size(to, n, false);
      72   [ #  #  #  # ]:      66220 :         return raw_copy_from_user(to, from, n);
      73                 :            : }
      74                 :            : 
      75                 :            : /**
      76                 :            :  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
      77                 :            :  * @to:   Destination address, in user space.
      78                 :            :  * @from: Source address, in kernel space.
      79                 :            :  * @n:    Number of bytes to copy.
      80                 :            :  *
      81                 :            :  * Context: User context only.
      82                 :            :  *
      83                 :            :  * Copy data from kernel space to user space.  Caller must check
      84                 :            :  * the specified block with access_ok() before calling this function.
      85                 :            :  * The caller should also make sure he pins the user space address
      86                 :            :  * so that we don't result in page fault and sleep.
      87                 :            :  */
      88                 :            : static __always_inline __must_check unsigned long
      89                 :          0 : __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
      90                 :            : {
      91                 :          0 :         kasan_check_read(from, n);
      92                 :          0 :         check_object_size(from, n, true);
      93   [ #  #  #  # ]:          0 :         return raw_copy_to_user(to, from, n);
      94                 :            : }
      95                 :            : 
      96                 :            : static __always_inline __must_check unsigned long
      97                 :     133616 : __copy_to_user(void __user *to, const void *from, unsigned long n)
      98                 :            : {
      99                 :     133616 :         might_fault();
     100                 :     133616 :         kasan_check_read(from, n);
     101                 :     133616 :         check_object_size(from, n, true);
     102   [ #  #  #  # ]:     133616 :         return raw_copy_to_user(to, from, n);
     103                 :            : }
     104                 :            : 
     105                 :            : #ifdef INLINE_COPY_FROM_USER
     106                 :            : static inline __must_check unsigned long
     107                 :            : _copy_from_user(void *to, const void __user *from, unsigned long n)
     108                 :            : {
     109                 :            :         unsigned long res = n;
     110                 :            :         might_fault();
     111                 :            :         if (likely(access_ok(from, n))) {
     112                 :            :                 kasan_check_write(to, n);
     113                 :            :                 res = raw_copy_from_user(to, from, n);
     114                 :            :         }
     115                 :            :         if (unlikely(res))
     116                 :            :                 memset(to + (n - res), 0, res);
     117                 :            :         return res;
     118                 :            : }
     119                 :            : #else
     120                 :            : extern __must_check unsigned long
     121                 :            : _copy_from_user(void *, const void __user *, unsigned long);
     122                 :            : #endif
     123                 :            : 
     124                 :            : #ifdef INLINE_COPY_TO_USER
     125                 :            : static inline __must_check unsigned long
     126                 :            : _copy_to_user(void __user *to, const void *from, unsigned long n)
     127                 :            : {
     128                 :            :         might_fault();
     129                 :            :         if (access_ok(to, n)) {
     130                 :            :                 kasan_check_read(from, n);
     131                 :            :                 n = raw_copy_to_user(to, from, n);
     132                 :            :         }
     133                 :            :         return n;
     134                 :            : }
     135                 :            : #else
     136                 :            : extern __must_check unsigned long
     137                 :            : _copy_to_user(void __user *, const void *, unsigned long);
     138                 :            : #endif
     139                 :            : 
     140                 :            : static __always_inline unsigned long __must_check
     141                 :     399471 : copy_from_user(void *to, const void __user *from, unsigned long n)
     142                 :            : {
     143   [ +  +  +  +  :     438324 :         if (likely(check_copy_size(to, n, false)))
          +  +  +  +  +  
          +  +  -  -  -  
          -  -  #  #  #  
          #  #  #  #  #  
             #  #  #  # ]
     144                 :     399471 :                 n = _copy_from_user(to, from, n);
     145   [ +  +  +  +  :     399443 :         return n;
          +  +  +  +  +  
          +  +  +  +  -  
          +  -  +  -  +  
          -  +  -  -  -  
          +  -  -  -  -  
          -  -  -  -  -  
          -  -  +  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  +  
          -  -  -  -  -  
          -  -  -  -  -  
             -  +  -  +  
                      - ]
     146                 :            : }
     147                 :            : 
     148                 :            : static __always_inline unsigned long __must_check
     149                 :    1465284 : copy_to_user(void __user *to, const void *from, unsigned long n)
     150                 :            : {
     151   [ +  +  +  +  :    1697904 :         if (likely(check_copy_size(from, n, true)))
          +  +  +  -  -  
          +  +  +  +  -  
          -  -  -  +  +  
          -  -  +  +  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  +  
          +  -  -  -  -  
                      - ]
     152                 :    1465284 :                 n = _copy_to_user(to, from, n);
     153   [ +  +  +  +  :    1462652 :         return n;
          +  +  +  +  +  
          -  +  +  +  -  
          -  -  -  +  -  
          -  -  -  -  -  
          +  -  -  -  -  
          -  -  +  -  +  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  -  -  -  
          -  -  +  -  +  
          -  -  -  -  -  
          -  -  -  -  -  
                   -  - ]
     154                 :            : }
     155                 :            : #ifdef CONFIG_COMPAT
     156                 :            : static __always_inline unsigned long __must_check
     157                 :          0 : copy_in_user(void __user *to, const void __user *from, unsigned long n)
     158                 :            : {
     159                 :          0 :         might_fault();
     160   [ #  #  #  #  :          0 :         if (access_ok(to, n) && access_ok(from, n))
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
                   #  # ]
     161                 :          0 :                 n = raw_copy_in_user(to, from, n);
     162   [ #  #  #  #  :          0 :         return n;
          #  #  #  #  #  
          #  #  #  #  #  
             #  #  #  # ]
     163                 :            : }
     164                 :            : #endif
     165                 :            : 
     166                 :    4404430 : static __always_inline void pagefault_disabled_inc(void)
     167                 :            : {
     168                 :    4404430 :         current->pagefault_disabled++;
     169                 :            : }
     170                 :            : 
     171                 :    4404430 : static __always_inline void pagefault_disabled_dec(void)
     172                 :            : {
     173                 :    4404430 :         current->pagefault_disabled--;
     174                 :            : }
     175                 :            : 
     176                 :            : /*
     177                 :            :  * These routines enable/disable the pagefault handler. If disabled, it will
     178                 :            :  * not take any locks and go straight to the fixup table.
     179                 :            :  *
     180                 :            :  * User access methods will not sleep when called from a pagefault_disabled()
     181                 :            :  * environment.
     182                 :            :  */
     183                 :    4404430 : static inline void pagefault_disable(void)
     184                 :            : {
     185                 :    4404430 :         pagefault_disabled_inc();
     186                 :            :         /*
     187                 :            :          * make sure to have issued the store before a pagefault
     188                 :            :          * can hit.
     189                 :            :          */
     190                 :    4404430 :         barrier();
     191                 :          0 : }
     192                 :            : 
     193                 :    4404430 : static inline void pagefault_enable(void)
     194                 :            : {
     195                 :            :         /*
     196                 :            :          * make sure to issue those last loads/stores before enabling
     197                 :            :          * the pagefault handler again.
     198                 :            :          */
     199                 :    4404430 :         barrier();
     200   [ +  -  -  -  :    4404402 :         pagefault_disabled_dec();
             -  -  +  - ]
     201                 :      77737 : }
     202                 :            : 
     203                 :            : /*
     204                 :            :  * Is the pagefault handler disabled? If so, user access methods will not sleep.
     205                 :            :  */
     206                 :   14135227 : static inline bool pagefault_disabled(void)
     207                 :            : {
     208         [ +  + ]:   14135227 :         return current->pagefault_disabled != 0;
     209                 :            : }
     210                 :            : 
     211                 :            : /*
     212                 :            :  * The pagefault handler is in general disabled by pagefault_disable() or
     213                 :            :  * when in irq context (via in_atomic()).
     214                 :            :  *
     215                 :            :  * This function should only be used by the fault handlers. Other users should
     216                 :            :  * stick to pagefault_disabled().
     217                 :            :  * Please NEVER use preempt_disable() to disable the fault handler. With
     218                 :            :  * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
     219                 :            :  * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
     220                 :            :  */
     221                 :            : #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
     222                 :            : 
     223                 :            : #ifndef ARCH_HAS_NOCACHE_UACCESS
     224                 :            : 
     225                 :            : static inline __must_check unsigned long
     226                 :            : __copy_from_user_inatomic_nocache(void *to, const void __user *from,
     227                 :            :                                   unsigned long n)
     228                 :            : {
     229                 :            :         return __copy_from_user_inatomic(to, from, n);
     230                 :            : }
     231                 :            : 
     232                 :            : #endif          /* ARCH_HAS_NOCACHE_UACCESS */
     233                 :            : 
     234                 :            : extern __must_check int check_zeroed_user(const void __user *from, size_t size);
     235                 :            : 
     236                 :            : /**
     237                 :            :  * copy_struct_from_user: copy a struct from userspace
     238                 :            :  * @dst:   Destination address, in kernel space. This buffer must be @ksize
     239                 :            :  *         bytes long.
     240                 :            :  * @ksize: Size of @dst struct.
     241                 :            :  * @src:   Source address, in userspace.
     242                 :            :  * @usize: (Alleged) size of @src struct.
     243                 :            :  *
     244                 :            :  * Copies a struct from userspace to kernel space, in a way that guarantees
     245                 :            :  * backwards-compatibility for struct syscall arguments (as long as future
     246                 :            :  * struct extensions are made such that all new fields are *appended* to the
     247                 :            :  * old struct, and zeroed-out new fields have the same meaning as the old
     248                 :            :  * struct).
     249                 :            :  *
     250                 :            :  * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
     251                 :            :  * The recommended usage is something like the following:
     252                 :            :  *
     253                 :            :  *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
     254                 :            :  *   {
     255                 :            :  *      int err;
     256                 :            :  *      struct foo karg = {};
     257                 :            :  *
     258                 :            :  *      if (usize > PAGE_SIZE)
     259                 :            :  *        return -E2BIG;
     260                 :            :  *      if (usize < FOO_SIZE_VER0)
     261                 :            :  *        return -EINVAL;
     262                 :            :  *
     263                 :            :  *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
     264                 :            :  *      if (err)
     265                 :            :  *        return err;
     266                 :            :  *
     267                 :            :  *      // ...
     268                 :            :  *   }
     269                 :            :  *
     270                 :            :  * There are three cases to consider:
     271                 :            :  *  * If @usize == @ksize, then it's copied verbatim.
     272                 :            :  *  * If @usize < @ksize, then the userspace has passed an old struct to a
     273                 :            :  *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
     274                 :            :  *    are to be zero-filled.
     275                 :            :  *  * If @usize > @ksize, then the userspace has passed a new struct to an
     276                 :            :  *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
     277                 :            :  *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
     278                 :            :  *
     279                 :            :  * Returns (in all cases, some data may have been copied):
     280                 :            :  *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
     281                 :            :  *  * -EFAULT: access to userspace failed.
     282                 :            :  */
     283                 :            : static __always_inline __must_check int
     284                 :          0 : copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
     285                 :            :                       size_t usize)
     286                 :            : {
     287                 :          0 :         size_t size = min(ksize, usize);
     288                 :          0 :         size_t rest = max(ksize, usize) - size;
     289                 :            : 
     290                 :            :         /* Deal with trailing bytes. */
     291         [ #  # ]:          0 :         if (usize < ksize) {
     292                 :          0 :                 memset(dst + size, 0, rest);
     293         [ #  # ]:          0 :         } else if (usize > ksize) {
     294                 :          0 :                 int ret = check_zeroed_user(src + size, rest);
     295         [ #  # ]:          0 :                 if (ret <= 0)
     296         [ #  # ]:          0 :                         return ret ?: -E2BIG;
     297                 :            :         }
     298                 :            :         /* Copy the interoperable parts of the struct. */
     299   [ #  #  #  # ]:          0 :         if (copy_from_user(dst, src, size))
     300                 :            :                 return -EFAULT;
     301                 :            :         return 0;
     302                 :            : }
     303                 :            : 
     304                 :            : /*
     305                 :            :  * probe_kernel_read(): safely attempt to read from a location
     306                 :            :  * @dst: pointer to the buffer that shall take the data
     307                 :            :  * @src: address to read from
     308                 :            :  * @size: size of the data chunk
     309                 :            :  *
     310                 :            :  * Safely read from address @src to the buffer at @dst.  If a kernel fault
     311                 :            :  * happens, handle that and return -EFAULT.
     312                 :            :  */
     313                 :            : extern long probe_kernel_read(void *dst, const void *src, size_t size);
     314                 :            : extern long probe_kernel_read_strict(void *dst, const void *src, size_t size);
     315                 :            : extern long __probe_kernel_read(void *dst, const void *src, size_t size);
     316                 :            : 
     317                 :            : /*
     318                 :            :  * probe_user_read(): safely attempt to read from a location in user space
     319                 :            :  * @dst: pointer to the buffer that shall take the data
     320                 :            :  * @src: address to read from
     321                 :            :  * @size: size of the data chunk
     322                 :            :  *
     323                 :            :  * Safely read from address @src to the buffer at @dst.  If a kernel fault
     324                 :            :  * happens, handle that and return -EFAULT.
     325                 :            :  */
     326                 :            : extern long probe_user_read(void *dst, const void __user *src, size_t size);
     327                 :            : extern long __probe_user_read(void *dst, const void __user *src, size_t size);
     328                 :            : 
     329                 :            : /*
     330                 :            :  * probe_kernel_write(): safely attempt to write to a location
     331                 :            :  * @dst: address to write to
     332                 :            :  * @src: pointer to the data that shall be written
     333                 :            :  * @size: size of the data chunk
     334                 :            :  *
     335                 :            :  * Safely write to address @dst from the buffer at @src.  If a kernel fault
     336                 :            :  * happens, handle that and return -EFAULT.
     337                 :            :  */
     338                 :            : extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
     339                 :            : extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
     340                 :            : 
     341                 :            : /*
     342                 :            :  * probe_user_write(): safely attempt to write to a location in user space
     343                 :            :  * @dst: address to write to
     344                 :            :  * @src: pointer to the data that shall be written
     345                 :            :  * @size: size of the data chunk
     346                 :            :  *
     347                 :            :  * Safely write to address @dst from the buffer at @src.  If a kernel fault
     348                 :            :  * happens, handle that and return -EFAULT.
     349                 :            :  */
     350                 :            : extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
     351                 :            : extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
     352                 :            : 
     353                 :            : extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
     354                 :            : extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
     355                 :            :                                        long count);
     356                 :            : extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
     357                 :            : extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
     358                 :            :                                      long count);
     359                 :            : extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
     360                 :            : 
     361                 :            : /**
     362                 :            :  * probe_kernel_address(): safely attempt to read from a location
     363                 :            :  * @addr: address to read from
     364                 :            :  * @retval: read into this variable
     365                 :            :  *
     366                 :            :  * Returns 0 on success, or -EFAULT.
     367                 :            :  */
     368                 :            : #define probe_kernel_address(addr, retval)              \
     369                 :            :         probe_kernel_read(&retval, addr, sizeof(retval))
     370                 :            : 
     371                 :            : #ifndef user_access_begin
     372                 :            : #define user_access_begin(ptr,len) access_ok(ptr, len)
     373                 :            : #define user_access_end() do { } while (0)
     374                 :            : #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
     375                 :            : #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
     376                 :            : #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
     377                 :            : #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
     378                 :            : static inline unsigned long user_access_save(void) { return 0UL; }
     379                 :            : static inline void user_access_restore(unsigned long flags) { }
     380                 :            : #endif
     381                 :            : 
     382                 :            : #ifdef CONFIG_HARDENED_USERCOPY
     383                 :            : void usercopy_warn(const char *name, const char *detail, bool to_user,
     384                 :            :                    unsigned long offset, unsigned long len);
     385                 :            : void __noreturn usercopy_abort(const char *name, const char *detail,
     386                 :            :                                bool to_user, unsigned long offset,
     387                 :            :                                unsigned long len);
     388                 :            : #endif
     389                 :            : 
     390                 :            : #endif          /* __LINUX_UACCESS_H__ */

Generated by: LCOV version 1.14