LCOV - code coverage report
Current view: top level - arch/arm/include/asm - cacheflush.h (source / functions) Hit Total Coverage
Test: Real Lines: 14 16 87.5 %
Date: 2020-10-17 15:46:43 Functions: 0 2 0.0 %
Legend: Neither, QEMU, Real, Both Branches: 0 0 -

           Branch data     Line data    Source code
       1                 :            : /* SPDX-License-Identifier: GPL-2.0-only */
       2                 :            : /*
       3                 :            :  *  arch/arm/include/asm/cacheflush.h
       4                 :            :  *
       5                 :            :  *  Copyright (C) 1999-2002 Russell King
       6                 :            :  */
       7                 :            : #ifndef _ASMARM_CACHEFLUSH_H
       8                 :            : #define _ASMARM_CACHEFLUSH_H
       9                 :            : 
      10                 :            : #include <linux/mm.h>
      11                 :            : 
      12                 :            : #include <asm/glue-cache.h>
      13                 :            : #include <asm/shmparam.h>
      14                 :            : #include <asm/cachetype.h>
      15                 :            : #include <asm/outercache.h>
      16                 :            : 
      17                 :            : #define CACHE_COLOUR(vaddr)     ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
      18                 :            : 
      19                 :            : /*
      20                 :            :  * This flag is used to indicate that the page pointed to by a pte is clean
      21                 :            :  * and does not require cleaning before returning it to the user.
      22                 :            :  */
      23                 :            : #define PG_dcache_clean PG_arch_1
      24                 :            : 
      25                 :            : /*
      26                 :            :  *      MM Cache Management
      27                 :            :  *      ===================
      28                 :            :  *
      29                 :            :  *      The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
      30                 :            :  *      implement these methods.
      31                 :            :  *
      32                 :            :  *      Start addresses are inclusive and end addresses are exclusive;
      33                 :            :  *      start addresses should be rounded down, end addresses up.
      34                 :            :  *
      35                 :            :  *      See Documentation/core-api/cachetlb.rst for more information.
      36                 :            :  *      Please note that the implementation of these, and the required
      37                 :            :  *      effects are cache-type (VIVT/VIPT/PIPT) specific.
      38                 :            :  *
      39                 :            :  *      flush_icache_all()
      40                 :            :  *
      41                 :            :  *              Unconditionally clean and invalidate the entire icache.
      42                 :            :  *              Currently only needed for cache-v6.S and cache-v7.S, see
      43                 :            :  *              __flush_icache_all for the generic implementation.
      44                 :            :  *
      45                 :            :  *      flush_kern_all()
      46                 :            :  *
      47                 :            :  *              Unconditionally clean and invalidate the entire cache.
      48                 :            :  *
      49                 :            :  *     flush_kern_louis()
      50                 :            :  *
      51                 :            :  *             Flush data cache levels up to the level of unification
      52                 :            :  *             inner shareable and invalidate the I-cache.
      53                 :            :  *             Only needed from v7 onwards, falls back to flush_cache_all()
      54                 :            :  *             for all other processor versions.
      55                 :            :  *
      56                 :            :  *      flush_user_all()
      57                 :            :  *
      58                 :            :  *              Clean and invalidate all user space cache entries
      59                 :            :  *              before a change of page tables.
      60                 :            :  *
      61                 :            :  *      flush_user_range(start, end, flags)
      62                 :            :  *
      63                 :            :  *              Clean and invalidate a range of cache entries in the
      64                 :            :  *              specified address space before a change of page tables.
      65                 :            :  *              - start - user start address (inclusive, page aligned)
      66                 :            :  *              - end   - user end address   (exclusive, page aligned)
      67                 :            :  *              - flags - vma->vm_flags field
      68                 :            :  *
      69                 :            :  *      coherent_kern_range(start, end)
      70                 :            :  *
      71                 :            :  *              Ensure coherency between the Icache and the Dcache in the
      72                 :            :  *              region described by start, end.  If you have non-snooping
      73                 :            :  *              Harvard caches, you need to implement this function.
      74                 :            :  *              - start  - virtual start address
      75                 :            :  *              - end    - virtual end address
      76                 :            :  *
      77                 :            :  *      coherent_user_range(start, end)
      78                 :            :  *
      79                 :            :  *              Ensure coherency between the Icache and the Dcache in the
      80                 :            :  *              region described by start, end.  If you have non-snooping
      81                 :            :  *              Harvard caches, you need to implement this function.
      82                 :            :  *              - start  - virtual start address
      83                 :            :  *              - end    - virtual end address
      84                 :            :  *
      85                 :            :  *      flush_kern_dcache_area(kaddr, size)
      86                 :            :  *
      87                 :            :  *              Ensure that the data held in page is written back.
      88                 :            :  *              - kaddr  - page address
      89                 :            :  *              - size   - region size
      90                 :            :  *
      91                 :            :  *      DMA Cache Coherency
      92                 :            :  *      ===================
      93                 :            :  *
      94                 :            :  *      dma_inv_range(start, end)
      95                 :            :  *
      96                 :            :  *              Invalidate (discard) the specified virtual address range.
      97                 :            :  *              May not write back any entries.  If 'start' or 'end'
      98                 :            :  *              are not cache line aligned, those lines must be written
      99                 :            :  *              back.
     100                 :            :  *              - start  - virtual start address
     101                 :            :  *              - end    - virtual end address
     102                 :            :  *
     103                 :            :  *      dma_clean_range(start, end)
     104                 :            :  *
     105                 :            :  *              Clean (write back) the specified virtual address range.
     106                 :            :  *              - start  - virtual start address
     107                 :            :  *              - end    - virtual end address
     108                 :            :  *
     109                 :            :  *      dma_flush_range(start, end)
     110                 :            :  *
     111                 :            :  *              Clean and invalidate the specified virtual address range.
     112                 :            :  *              - start  - virtual start address
     113                 :            :  *              - end    - virtual end address
     114                 :            :  */
     115                 :            : 
     116                 :            : struct cpu_cache_fns {
     117                 :            :         void (*flush_icache_all)(void);
     118                 :            :         void (*flush_kern_all)(void);
     119                 :            :         void (*flush_kern_louis)(void);
     120                 :            :         void (*flush_user_all)(void);
     121                 :            :         void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
     122                 :            : 
     123                 :            :         void (*coherent_kern_range)(unsigned long, unsigned long);
     124                 :            :         int  (*coherent_user_range)(unsigned long, unsigned long);
     125                 :            :         void (*flush_kern_dcache_area)(void *, size_t);
     126                 :            : 
     127                 :            :         void (*dma_map_area)(const void *, size_t, int);
     128                 :            :         void (*dma_unmap_area)(const void *, size_t, int);
     129                 :            : 
     130                 :            :         void (*dma_inv_range)(const void *, const void *);
     131                 :            :         void (*dma_clean_range)(const void *, const void *);
     132                 :            :         void (*dma_flush_range)(const void *, const void *);
     133                 :            : } __no_randomize_layout;
     134                 :            : 
     135                 :            : /*
     136                 :            :  * Select the calling method
     137                 :            :  */
     138                 :            : #ifdef MULTI_CACHE
     139                 :            : 
     140                 :            : extern struct cpu_cache_fns cpu_cache;
     141                 :            : 
     142                 :            : #define __cpuc_flush_icache_all         cpu_cache.flush_icache_all
     143                 :            : #define __cpuc_flush_kern_all           cpu_cache.flush_kern_all
     144                 :            : #define __cpuc_flush_kern_louis         cpu_cache.flush_kern_louis
     145                 :            : #define __cpuc_flush_user_all           cpu_cache.flush_user_all
     146                 :            : #define __cpuc_flush_user_range         cpu_cache.flush_user_range
     147                 :            : #define __cpuc_coherent_kern_range      cpu_cache.coherent_kern_range
     148                 :            : #define __cpuc_coherent_user_range      cpu_cache.coherent_user_range
     149                 :            : #define __cpuc_flush_dcache_area        cpu_cache.flush_kern_dcache_area
     150                 :            : 
     151                 :            : /*
     152                 :            :  * These are private to the dma-mapping API.  Do not use directly.
     153                 :            :  * Their sole purpose is to ensure that data held in the cache
     154                 :            :  * is visible to DMA, or data written by DMA to system memory is
     155                 :            :  * visible to the CPU.
     156                 :            :  */
     157                 :            : #define dmac_inv_range                  cpu_cache.dma_inv_range
     158                 :            : #define dmac_clean_range                cpu_cache.dma_clean_range
     159                 :            : #define dmac_flush_range                cpu_cache.dma_flush_range
     160                 :            : 
     161                 :            : #else
     162                 :            : 
     163                 :            : extern void __cpuc_flush_icache_all(void);
     164                 :            : extern void __cpuc_flush_kern_all(void);
     165                 :            : extern void __cpuc_flush_kern_louis(void);
     166                 :            : extern void __cpuc_flush_user_all(void);
     167                 :            : extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
     168                 :            : extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
     169                 :            : extern int  __cpuc_coherent_user_range(unsigned long, unsigned long);
     170                 :            : extern void __cpuc_flush_dcache_area(void *, size_t);
     171                 :            : 
     172                 :            : /*
     173                 :            :  * These are private to the dma-mapping API.  Do not use directly.
     174                 :            :  * Their sole purpose is to ensure that data held in the cache
     175                 :            :  * is visible to DMA, or data written by DMA to system memory is
     176                 :            :  * visible to the CPU.
     177                 :            :  */
     178                 :            : extern void dmac_inv_range(const void *, const void *);
     179                 :            : extern void dmac_clean_range(const void *, const void *);
     180                 :            : extern void dmac_flush_range(const void *, const void *);
     181                 :            : 
     182                 :            : #endif
     183                 :            : 
     184                 :            : /*
     185                 :            :  * Copy user data from/to a page which is mapped into a different
     186                 :            :  * processes address space.  Really, we want to allow our "user
     187                 :            :  * space" model to handle this.
     188                 :            :  */
     189                 :            : extern void copy_to_user_page(struct vm_area_struct *, struct page *,
     190                 :            :         unsigned long, void *, const void *, unsigned long);
     191                 :            : #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
     192                 :            :         do {                                                    \
     193                 :            :                 memcpy(dst, src, len);                          \
     194                 :            :         } while (0)
     195                 :            : 
     196                 :            : /*
     197                 :            :  * Convert calls to our calling convention.
     198                 :            :  */
     199                 :            : 
     200                 :            : /* Invalidate I-cache */
     201                 :            : #define __flush_icache_all_generic()                                    \
     202                 :            :         asm("mcr   p15, 0, %0, c7, c5, 0"                             \
     203                 :            :             : : "r" (0));
     204                 :            : 
     205                 :            : /* Invalidate I-cache inner shareable */
     206                 :            : #define __flush_icache_all_v7_smp()                                     \
     207                 :            :         asm("mcr   p15, 0, %0, c7, c1, 0"                             \
     208                 :            :             : : "r" (0));
     209                 :            : 
     210                 :            : /*
     211                 :            :  * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
     212                 :            :  * will fall through to use __flush_icache_all_generic.
     213                 :            :  */
     214                 :            : #if (defined(CONFIG_CPU_V7) && \
     215                 :            :      (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
     216                 :            :         defined(CONFIG_SMP_ON_UP)
     217                 :            : #define __flush_icache_preferred        __cpuc_flush_icache_all
     218                 :            : #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
     219                 :            : #define __flush_icache_preferred        __flush_icache_all_v7_smp
     220                 :            : #elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
     221                 :            : #define __flush_icache_preferred        __cpuc_flush_icache_all
     222                 :            : #else
     223                 :            : #define __flush_icache_preferred        __flush_icache_all_generic
     224                 :            : #endif
     225                 :            : 
     226                 :            : static inline void __flush_icache_all(void)
     227                 :            : {
     228                 :          3 :         __flush_icache_preferred();
     229                 :          3 :         dsb(ishst);
     230                 :            : }
     231                 :            : 
     232                 :            : /*
     233                 :            :  * Flush caches up to Level of Unification Inner Shareable
     234                 :            :  */
     235                 :            : #define flush_cache_louis()             __cpuc_flush_kern_louis()
     236                 :            : 
     237                 :            : #define flush_cache_all()               __cpuc_flush_kern_all()
     238                 :            : 
     239                 :            : static inline void vivt_flush_cache_mm(struct mm_struct *mm)
     240                 :            : {
     241                 :            :         if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
     242                 :            :                 __cpuc_flush_user_all();
     243                 :            : }
     244                 :            : 
     245                 :            : static inline void
     246                 :            : vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
     247                 :            : {
     248                 :            :         struct mm_struct *mm = vma->vm_mm;
     249                 :            : 
     250                 :            :         if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
     251                 :            :                 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
     252                 :            :                                         vma->vm_flags);
     253                 :            : }
     254                 :            : 
     255                 :            : static inline void
     256                 :            : vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
     257                 :            : {
     258                 :            :         struct mm_struct *mm = vma->vm_mm;
     259                 :            : 
     260                 :            :         if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
     261                 :            :                 unsigned long addr = user_addr & PAGE_MASK;
     262                 :            :                 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
     263                 :            :         }
     264                 :            : }
     265                 :            : 
     266                 :            : #ifndef CONFIG_CPU_CACHE_VIPT
     267                 :            : #define flush_cache_mm(mm) \
     268                 :            :                 vivt_flush_cache_mm(mm)
     269                 :            : #define flush_cache_range(vma,start,end) \
     270                 :            :                 vivt_flush_cache_range(vma,start,end)
     271                 :            : #define flush_cache_page(vma,addr,pfn) \
     272                 :            :                 vivt_flush_cache_page(vma,addr,pfn)
     273                 :            : #else
     274                 :            : extern void flush_cache_mm(struct mm_struct *mm);
     275                 :            : extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
     276                 :            : extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
     277                 :            : #endif
     278                 :            : 
     279                 :            : #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
     280                 :            : 
     281                 :            : /*
     282                 :            :  * flush_cache_user_range is used when we want to ensure that the
     283                 :            :  * Harvard caches are synchronised for the user space address range.
     284                 :            :  * This is used for the ARM private sys_cacheflush system call.
     285                 :            :  */
     286                 :            : #define flush_cache_user_range(s,e)     __cpuc_coherent_user_range(s,e)
     287                 :            : 
     288                 :            : /*
     289                 :            :  * Perform necessary cache operations to ensure that data previously
     290                 :            :  * stored within this range of addresses can be executed by the CPU.
     291                 :            :  */
     292                 :            : #define flush_icache_range(s,e)         __cpuc_coherent_kern_range(s,e)
     293                 :            : 
     294                 :            : /*
     295                 :            :  * Perform necessary cache operations to ensure that the TLB will
     296                 :            :  * see data written in the specified area.
     297                 :            :  */
     298                 :            : #define clean_dcache_area(start,size)   cpu_dcache_clean_area(start, size)
     299                 :            : 
     300                 :            : /*
     301                 :            :  * flush_dcache_page is used when the kernel has written to the page
     302                 :            :  * cache page at virtual address page->virtual.
     303                 :            :  *
     304                 :            :  * If this page isn't mapped (ie, page_mapping == NULL), or it might
     305                 :            :  * have userspace mappings, then we _must_ always clean + invalidate
     306                 :            :  * the dcache entries associated with the kernel mapping.
     307                 :            :  *
     308                 :            :  * Otherwise we can defer the operation, and clean the cache when we are
     309                 :            :  * about to change to user space.  This is the same method as used on SPARC64.
     310                 :            :  * See update_mmu_cache for the user space part.
     311                 :            :  */
     312                 :            : #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
     313                 :            : extern void flush_dcache_page(struct page *);
     314                 :            : 
     315                 :            : static inline void flush_kernel_vmap_range(void *addr, int size)
     316                 :            : {
     317                 :            :         if ((cache_is_vivt() || cache_is_vipt_aliasing()))
     318                 :            :           __cpuc_flush_dcache_area(addr, (size_t)size);
     319                 :            : }
     320                 :            : static inline void invalidate_kernel_vmap_range(void *addr, int size)
     321                 :            : {
     322                 :            :         if ((cache_is_vivt() || cache_is_vipt_aliasing()))
     323                 :            :           __cpuc_flush_dcache_area(addr, (size_t)size);
     324                 :            : }
     325                 :            : 
     326                 :            : #define ARCH_HAS_FLUSH_ANON_PAGE
     327                 :          3 : static inline void flush_anon_page(struct vm_area_struct *vma,
     328                 :            :                          struct page *page, unsigned long vmaddr)
     329                 :            : {
     330                 :            :         extern void __flush_anon_page(struct vm_area_struct *vma,
     331                 :            :                                 struct page *, unsigned long);
     332                 :          3 :         if (PageAnon(page))
     333                 :          3 :                 __flush_anon_page(vma, page, vmaddr);
     334                 :          3 : }
     335                 :            : 
     336                 :            : #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
     337                 :            : extern void flush_kernel_dcache_page(struct page *);
     338                 :            : 
     339                 :            : #define flush_dcache_mmap_lock(mapping)         xa_lock_irq(&mapping->i_pages)
     340                 :            : #define flush_dcache_mmap_unlock(mapping)       xa_unlock_irq(&mapping->i_pages)
     341                 :            : 
     342                 :            : #define flush_icache_user_range(vma,page,addr,len) \
     343                 :            :         flush_dcache_page(page)
     344                 :            : 
     345                 :            : /*
     346                 :            :  * We don't appear to need to do anything here.  In fact, if we did, we'd
     347                 :            :  * duplicate cache flushing elsewhere performed by flush_dcache_page().
     348                 :            :  */
     349                 :            : #define flush_icache_page(vma,page)     do { } while (0)
     350                 :            : 
     351                 :            : /*
     352                 :            :  * flush_cache_vmap() is used when creating mappings (eg, via vmap,
     353                 :            :  * vmalloc, ioremap etc) in kernel space for pages.  On non-VIPT
     354                 :            :  * caches, since the direct-mappings of these pages may contain cached
     355                 :            :  * data, we need to do a full cache flush to ensure that writebacks
     356                 :            :  * don't corrupt data placed into these pages via the new mappings.
     357                 :            :  */
     358                 :            : static inline void flush_cache_vmap(unsigned long start, unsigned long end)
     359                 :            : {
     360                 :          3 :         if (!cache_is_vipt_nonaliasing())
     361                 :          0 :                 flush_cache_all();
     362                 :            :         else
     363                 :            :                 /*
     364                 :            :                  * set_pte_at() called from vmap_pte_range() does not
     365                 :            :                  * have a DSB after cleaning the cache line.
     366                 :            :                  */
     367                 :          3 :                 dsb(ishst);
     368                 :            : }
     369                 :            : 
     370                 :            : static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
     371                 :            : {
     372                 :          3 :         if (!cache_is_vipt_nonaliasing())
     373                 :          0 :                 flush_cache_all();
     374                 :            : }
     375                 :            : 
     376                 :            : /*
     377                 :            :  * Memory synchronization helpers for mixed cached vs non cached accesses.
     378                 :            :  *
     379                 :            :  * Some synchronization algorithms have to set states in memory with the
     380                 :            :  * cache enabled or disabled depending on the code path.  It is crucial
     381                 :            :  * to always ensure proper cache maintenance to update main memory right
     382                 :            :  * away in that case.
     383                 :            :  *
     384                 :            :  * Any cached write must be followed by a cache clean operation.
     385                 :            :  * Any cached read must be preceded by a cache invalidate operation.
     386                 :            :  * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
     387                 :            :  * operation is needed to avoid discarding possible concurrent writes to the
     388                 :            :  * accessed memory.
     389                 :            :  *
     390                 :            :  * Also, in order to prevent a cached writer from interfering with an
     391                 :            :  * adjacent non-cached writer, each state variable must be located to
     392                 :            :  * a separate cache line.
     393                 :            :  */
     394                 :            : 
     395                 :            : /*
     396                 :            :  * This needs to be >= the max cache writeback size of all
     397                 :            :  * supported platforms included in the current kernel configuration.
     398                 :            :  * This is used to align state variables to their own cache lines.
     399                 :            :  */
     400                 :            : #define __CACHE_WRITEBACK_ORDER 6  /* guessed from existing platforms */
     401                 :            : #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
     402                 :            : 
     403                 :            : /*
     404                 :            :  * There is no __cpuc_clean_dcache_area but we use it anyway for
     405                 :            :  * code intent clarity, and alias it to __cpuc_flush_dcache_area.
     406                 :            :  */
     407                 :            : #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
     408                 :            : 
     409                 :            : /*
     410                 :            :  * Ensure preceding writes to *p by this CPU are visible to
     411                 :            :  * subsequent reads by other CPUs:
     412                 :            :  */
     413                 :          3 : static inline void __sync_cache_range_w(volatile void *p, size_t size)
     414                 :            : {
     415                 :            :         char *_p = (char *)p;
     416                 :            : 
     417                 :          3 :         __cpuc_clean_dcache_area(_p, size);
     418                 :          3 :         outer_clean_range(__pa(_p), __pa(_p + size));
     419                 :          3 : }
     420                 :            : 
     421                 :            : /*
     422                 :            :  * Ensure preceding writes to *p by other CPUs are visible to
     423                 :            :  * subsequent reads by this CPU.  We must be careful not to
     424                 :            :  * discard data simultaneously written by another CPU, hence the
     425                 :            :  * usage of flush rather than invalidate operations.
     426                 :            :  */
     427                 :            : static inline void __sync_cache_range_r(volatile void *p, size_t size)
     428                 :            : {
     429                 :            :         char *_p = (char *)p;
     430                 :            : 
     431                 :            : #ifdef CONFIG_OUTER_CACHE
     432                 :            :         if (outer_cache.flush_range) {
     433                 :            :                 /*
     434                 :            :                  * Ensure dirty data migrated from other CPUs into our cache
     435                 :            :                  * are cleaned out safely before the outer cache is cleaned:
     436                 :            :                  */
     437                 :            :                 __cpuc_clean_dcache_area(_p, size);
     438                 :            : 
     439                 :            :                 /* Clean and invalidate stale data for *p from outer ... */
     440                 :            :                 outer_flush_range(__pa(_p), __pa(_p + size));
     441                 :            :         }
     442                 :            : #endif
     443                 :            : 
     444                 :            :         /* ... and inner cache: */
     445                 :          3 :         __cpuc_flush_dcache_area(_p, size);
     446                 :            : }
     447                 :            : 
     448                 :            : #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
     449                 :            : #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
     450                 :            : 
     451                 :            : /*
     452                 :            :  * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
     453                 :            :  * To do so we must:
     454                 :            :  *
     455                 :            :  * - Clear the SCTLR.C bit to prevent further cache allocations
     456                 :            :  * - Flush the desired level of cache
     457                 :            :  * - Clear the ACTLR "SMP" bit to disable local coherency
     458                 :            :  *
     459                 :            :  * ... and so without any intervening memory access in between those steps,
     460                 :            :  * not even to the stack.
     461                 :            :  *
     462                 :            :  * WARNING -- After this has been called:
     463                 :            :  *
     464                 :            :  * - No ldrex/strex (and similar) instructions must be used.
     465                 :            :  * - The CPU is obviously no longer coherent with the other CPUs.
     466                 :            :  * - This is unlikely to work as expected if Linux is running non-secure.
     467                 :            :  *
     468                 :            :  * Note:
     469                 :            :  *
     470                 :            :  * - This is known to apply to several ARMv7 processor implementations,
     471                 :            :  *   however some exceptions may exist.  Caveat emptor.
     472                 :            :  *
     473                 :            :  * - The clobber list is dictated by the call to v7_flush_dcache_*.
     474                 :            :  *   fp is preserved to the stack explicitly prior disabling the cache
     475                 :            :  *   since adding it to the clobber list is incompatible with having
     476                 :            :  *   CONFIG_FRAME_POINTER=y.  ip is saved as well if ever r12-clobbering
     477                 :            :  *   trampoline are inserted by the linker and to keep sp 64-bit aligned.
     478                 :            :  */
     479                 :            : #define v7_exit_coherency_flush(level) \
     480                 :            :         asm volatile( \
     481                 :            :         ".arch     armv7-a \n\t" \
     482                 :            :         "stmfd     sp!, {fp, ip} \n\t" \
     483                 :            :         "mrc       p15, 0, r0, c1, c0, 0   @ get SCTLR \n\t" \
     484                 :            :         "bic       r0, r0, #"__stringify(CR_C)" \n\t" \
     485                 :            :         "mcr       p15, 0, r0, c1, c0, 0   @ set SCTLR \n\t" \
     486                 :            :         "isb       \n\t" \
     487                 :            :         "bl        v7_flush_dcache_"__stringify(level)" \n\t" \
     488                 :            :         "mrc       p15, 0, r0, c1, c0, 1   @ get ACTLR \n\t" \
     489                 :            :         "bic       r0, r0, #(1 << 6) @ disable local coherency \n\t" \
     490                 :            :         "mcr       p15, 0, r0, c1, c0, 1   @ set ACTLR \n\t" \
     491                 :            :         "isb       \n\t" \
     492                 :            :         "dsb       \n\t" \
     493                 :            :         "ldmfd     sp!, {fp, ip}" \
     494                 :            :         : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
     495                 :            :               "r9","r10","lr","memory" )
     496                 :            : 
     497                 :            : void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
     498                 :            :                              void *kaddr, unsigned long len);
     499                 :            : 
     500                 :            : 
     501                 :            : #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
     502                 :            : void check_cpu_icache_size(int cpuid);
     503                 :            : #else
     504                 :            : static inline void check_cpu_icache_size(int cpuid) { }
     505                 :            : #endif
     506                 :            : 
     507                 :            : #endif
    

Generated by: LCOV version 1.14