~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sparc/include/asm/mmu_context_64.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __SPARC64_MMU_CONTEXT_H
  3 #define __SPARC64_MMU_CONTEXT_H
  4 
  5 /* Derived heavily from Linus's Alpha/AXP ASN code... */
  6 
  7 #ifndef __ASSEMBLY__
  8 
  9 #include <linux/spinlock.h>
 10 #include <linux/mm_types.h>
 11 #include <linux/smp.h>
 12 #include <linux/sched.h>
 13 
 14 #include <asm/spitfire.h>
 15 #include <asm/adi_64.h>
 16 #include <asm-generic/mm_hooks.h>
 17 #include <asm/percpu.h>
 18 
 19 extern spinlock_t ctx_alloc_lock;
 20 extern unsigned long tlb_context_cache;
 21 extern unsigned long mmu_context_bmap[];
 22 
 23 DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
 24 void get_new_mmu_context(struct mm_struct *mm);
 25 
 26 #define init_new_context init_new_context
 27 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 28 #define destroy_context destroy_context
 29 void destroy_context(struct mm_struct *mm);
 30 
 31 void __tsb_context_switch(unsigned long pgd_pa,
 32                           struct tsb_config *tsb_base,
 33                           struct tsb_config *tsb_huge,
 34                           unsigned long tsb_descr_pa,
 35                           unsigned long secondary_ctx);
 36 
 37 static inline void tsb_context_switch_ctx(struct mm_struct *mm,
 38                                           unsigned long ctx)
 39 {
 40         __tsb_context_switch(__pa(mm->pgd),
 41                              &mm->context.tsb_block[MM_TSB_BASE],
 42 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 43                              (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
 44                               &mm->context.tsb_block[MM_TSB_HUGE] :
 45                               NULL)
 46 #else
 47                              NULL
 48 #endif
 49                              , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
 50                              ctx);
 51 }
 52 
 53 #define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
 54 
 55 void tsb_grow(struct mm_struct *mm,
 56               unsigned long tsb_index,
 57               unsigned long mm_rss);
 58 #ifdef CONFIG_SMP
 59 void smp_tsb_sync(struct mm_struct *mm);
 60 #else
 61 #define smp_tsb_sync(__mm) do { } while (0)
 62 #endif
 63 
 64 /* Set MMU context in the actual hardware. */
 65 #define load_secondary_context(__mm) \
 66         __asm__ __volatile__( \
 67         "\n661: stxa            %0, [%1] %2\n" \
 68         "       .section        .sun4v_1insn_patch, \"ax\"\n" \
 69         "       .word           661b\n" \
 70         "       stxa            %0, [%1] %3\n" \
 71         "       .previous\n" \
 72         "       flush           %%g6\n" \
 73         : /* No outputs */ \
 74         : "r" (CTX_HWBITS((__mm)->context)), \
 75           "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
 76 
 77 void __flush_tlb_mm(unsigned long, unsigned long);
 78 
 79 /* Switch the current MM context. */
 80 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
 81 {
 82         unsigned long ctx_valid, flags;
 83         int cpu = smp_processor_id();
 84 
 85         per_cpu(per_cpu_secondary_mm, cpu) = mm;
 86         if (unlikely(mm == &init_mm))
 87                 return;
 88 
 89         spin_lock_irqsave(&mm->context.lock, flags);
 90         ctx_valid = CTX_VALID(mm->context);
 91         if (!ctx_valid)
 92                 get_new_mmu_context(mm);
 93 
 94         /* We have to be extremely careful here or else we will miss
 95          * a TSB grow if we switch back and forth between a kernel
 96          * thread and an address space which has its TSB size increased
 97          * on another processor.
 98          *
 99          * It is possible to play some games in order to optimize the
100          * switch, but the safest thing to do is to unconditionally
101          * perform the secondary context load and the TSB context switch.
102          *
103          * For reference the bad case is, for address space "A":
104          *
105          *              CPU 0                   CPU 1
106          *      run address space A
107          *      set cpu0's bits in cpu_vm_mask
108          *      switch to kernel thread, borrow
109          *      address space A via entry_lazy_tlb
110          *                                      run address space A
111          *                                      set cpu1's bit in cpu_vm_mask
112          *                                      flush_tlb_pending()
113          *                                      reset cpu_vm_mask to just cpu1
114          *                                      TSB grow
115          *      run address space A
116          *      context was valid, so skip
117          *      TSB context switch
118          *
119          * At that point cpu0 continues to use a stale TSB, the one from
120          * before the TSB grow performed on cpu1.  cpu1 did not cross-call
121          * cpu0 to update its TSB because at that point the cpu_vm_mask
122          * only had cpu1 set in it.
123          */
124         tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
125 
126         /* Any time a processor runs a context on an address space
127          * for the first time, we must flush that context out of the
128          * local TLB.
129          */
130         if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
131                 cpumask_set_cpu(cpu, mm_cpumask(mm));
132                 __flush_tlb_mm(CTX_HWBITS(mm->context),
133                                SECONDARY_CONTEXT);
134         }
135         spin_unlock_irqrestore(&mm->context.lock, flags);
136 }
137 
138 #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
139 
140 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
141 static inline void arch_start_context_switch(struct task_struct *prev)
142 {
143         /* Save the current state of MCDPER register for the process
144          * we are switching from
145          */
146         if (adi_capable()) {
147                 register unsigned long tmp_mcdper;
148 
149                 __asm__ __volatile__(
150                         ".word 0x83438000\n\t"  /* rd  %mcdper, %g1 */
151                         "mov %%g1, %0\n\t"
152                         : "=r" (tmp_mcdper)
153                         :
154                         : "g1");
155                 if (tmp_mcdper)
156                         set_tsk_thread_flag(prev, TIF_MCDPER);
157                 else
158                         clear_tsk_thread_flag(prev, TIF_MCDPER);
159         }
160 }
161 
162 #define finish_arch_post_lock_switch    finish_arch_post_lock_switch
163 static inline void finish_arch_post_lock_switch(void)
164 {
165         /* Restore the state of MCDPER register for the new process
166          * just switched to.
167          */
168         if (adi_capable()) {
169                 register unsigned long tmp_mcdper;
170 
171                 tmp_mcdper = test_thread_flag(TIF_MCDPER);
172                 __asm__ __volatile__(
173                         "mov %0, %%g1\n\t"
174                         ".word 0x9d800001\n\t"  /* wr %g0, %g1, %mcdper" */
175                         ".word 0xaf902001\n\t"  /* wrpr %g0, 1, %pmcdper */
176                         :
177                         : "ir" (tmp_mcdper)
178                         : "g1");
179                 if (current && current->mm && current->mm->context.adi) {
180                         struct pt_regs *regs;
181 
182                         regs = task_pt_regs(current);
183                         regs->tstate |= TSTATE_MCDE;
184                 }
185         }
186 }
187 
188 #define mm_untag_mask mm_untag_mask
189 static inline unsigned long mm_untag_mask(struct mm_struct *mm)
190 {
191        return -1UL >> adi_nbits();
192 }
193 
194 #include <asm-generic/mmu_context.h>
195 
196 #endif /* !(__ASSEMBLY__) */
197 
198 #endif /* !(__SPARC64_MMU_CONTEXT_H) */
199 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php