~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kvm/va_layout.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arm64/kvm/va_layout.c (Architecture m68k) and /arch/alpha/kvm/va_layout.c (Architecture alpha)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 
  2 /*                                                
  3  * Copyright (C) 2017 ARM Ltd.                    
  4  * Author: Marc Zyngier <marc.zyngier@arm.com>    
  5  */                                               
  6                                                   
  7 #include <linux/kvm_host.h>                       
  8 #include <linux/random.h>                         
  9 #include <linux/memblock.h>                       
 10 #include <asm/alternative.h>                      
 11 #include <asm/debug-monitors.h>                   
 12 #include <asm/insn.h>                             
 13 #include <asm/kvm_mmu.h>                          
 14 #include <asm/memory.h>                           
 15                                                   
 16 /*                                                
 17  * The LSB of the HYP VA tag                      
 18  */                                               
 19 static u8 tag_lsb;                                
 20 /*                                                
 21  * The HYP VA tag value with the region bit       
 22  */                                               
 23 static u64 tag_val;                               
 24 static u64 va_mask;                               
 25                                                   
 26 /*                                                
 27  * Compute HYP VA by using the same computatio    
 28  */                                               
 29 static u64 __early_kern_hyp_va(u64 addr)          
 30 {                                                 
 31         addr &= va_mask;                          
 32         addr |= tag_val << tag_lsb;               
 33         return addr;                              
 34 }                                                 
 35                                                   
 36 /*                                                
 37  * Store a hyp VA <-> PA offset into a EL2-own    
 38  */                                               
 39 static void init_hyp_physvirt_offset(void)        
 40 {                                                 
 41         u64 kern_va, hyp_va;                      
 42                                                   
 43         /* Compute the offset from the hyp VA     
 44         kern_va = (u64)lm_alias(__hyp_text_sta    
 45         hyp_va = __early_kern_hyp_va(kern_va);    
 46         hyp_physvirt_offset = (s64)__pa(kern_v    
 47 }                                                 
 48                                                   
 49 /*                                                
 50  * We want to generate a hyp VA with the follo    
 51  * vabits_actual):                                
 52  *                                                
 53  *  63 ... V |     V-1    | V-2 .. tag_lsb | t    
 54  *  ------------------------------------------    
 55  * | 0000000 | hyp_va_msb |   random tag   |      
 56  *           |--------- tag_val -----------|--    
 57  *                                                
 58  * which does not conflict with the idmap regi    
 59  */                                               
 60 __init void kvm_compute_layout(void)              
 61 {                                                 
 62         phys_addr_t idmap_addr = __pa_symbol(_    
 63         u64 hyp_va_msb;                           
 64                                                   
 65         /* Where is my RAM region? */             
 66         hyp_va_msb  = idmap_addr & BIT(vabits_    
 67         hyp_va_msb ^= BIT(vabits_actual - 1);     
 68                                                   
 69         tag_lsb = fls64((u64)phys_to_virt(memb    
 70                         (u64)(high_memory - 1)    
 71                                                   
 72         va_mask = GENMASK_ULL(tag_lsb - 1, 0);    
 73         tag_val = hyp_va_msb;                     
 74                                                   
 75         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)     
 76                 /* We have some free bits to i    
 77                 tag_val |= get_random_long() &    
 78         }                                         
 79         tag_val >>= tag_lsb;                      
 80                                                   
 81         init_hyp_physvirt_offset();               
 82 }                                                 
 83                                                   
 84 /*                                                
 85  * The .hyp.reloc ELF section contains a list     
 86  * contains kimg VAs but will be accessed only    
 87  * Convert them to hyp VAs. See gen-hyprel.c f    
 88  */                                               
 89 __init void kvm_apply_hyp_relocations(void)       
 90 {                                                 
 91         int32_t *rel;                             
 92         int32_t *begin = (int32_t *)__hyp_relo    
 93         int32_t *end = (int32_t *)__hyp_reloc_    
 94                                                   
 95         for (rel = begin; rel < end; ++rel) {     
 96                 uintptr_t *ptr, kimg_va;          
 97                                                   
 98                 /*                                
 99                  * Each entry contains a 32-bi    
100                  * to a kimg VA position.         
101                  */                               
102                 ptr = (uintptr_t *)lm_alias((c    
103                                                   
104                 /* Read the kimg VA value at t    
105                 kimg_va = *ptr;                   
106                                                   
107                 /* Convert to hyp VA and store    
108                 *ptr = __early_kern_hyp_va((ui    
109         }                                         
110 }                                                 
111                                                   
112 static u32 compute_instruction(int n, u32 rd,     
113 {                                                 
114         u32 insn = AARCH64_BREAK_FAULT;           
115                                                   
116         switch (n) {                              
117         case 0:                                   
118                 insn = aarch64_insn_gen_logica    
119                                                   
120                                                   
121                 break;                            
122                                                   
123         case 1:                                   
124                 /* ROR is a variant of EXTR wi    
125                 insn = aarch64_insn_gen_extr(A    
126                                              r    
127                                              t    
128                 break;                            
129                                                   
130         case 2:                                   
131                 insn = aarch64_insn_gen_add_su    
132                                                   
133                                                   
134                                                   
135                 break;                            
136                                                   
137         case 3:                                   
138                 insn = aarch64_insn_gen_add_su    
139                                                   
140                                                   
141                                                   
142                 break;                            
143                                                   
144         case 4:                                   
145                 /* ROR is a variant of EXTR wi    
146                 insn = aarch64_insn_gen_extr(A    
147                                              r    
148                 break;                            
149         }                                         
150                                                   
151         return insn;                              
152 }                                                 
153                                                   
154 void __init kvm_update_va_mask(struct alt_inst    
155                                __le32 *origptr    
156 {                                                 
157         int i;                                    
158                                                   
159         BUG_ON(nr_inst != 5);                     
160                                                   
161         for (i = 0; i < nr_inst; i++) {           
162                 u32 rd, rn, insn, oinsn;          
163                                                   
164                 /*                                
165                  * VHE doesn't need any addres    
166                  * everything.                    
167                  *                                
168                  * Alternatively, if the tag i    
169                  * dictates it and we don't ha    
170                  * address), NOP everything af    
171                  */                               
172                 if (cpus_have_cap(ARM64_HAS_VI    
173                         updptr[i] = cpu_to_le3    
174                         continue;                 
175                 }                                 
176                                                   
177                 oinsn = le32_to_cpu(origptr[i]    
178                 rd = aarch64_insn_decode_regis    
179                 rn = aarch64_insn_decode_regis    
180                                                   
181                 insn = compute_instruction(i,     
182                 BUG_ON(insn == AARCH64_BREAK_F    
183                                                   
184                 updptr[i] = cpu_to_le32(insn);    
185         }                                         
186 }                                                 
187                                                   
188 void kvm_patch_vector_branch(struct alt_instr     
189                              __le32 *origptr,     
190 {                                                 
191         u64 addr;                                 
192         u32 insn;                                 
193                                                   
194         BUG_ON(nr_inst != 4);                     
195                                                   
196         if (!cpus_have_cap(ARM64_SPECTRE_V3A)     
197             WARN_ON_ONCE(cpus_have_cap(ARM64_H    
198                 return;                           
199                                                   
200         /*                                        
201          * Compute HYP VA by using the same co    
202          */                                       
203         addr = __early_kern_hyp_va((u64)kvm_ks    
204                                                   
205         /* Use PC[10:7] to branch to the same     
206         addr |= ((u64)origptr & GENMASK_ULL(10    
207                                                   
208         /*                                        
209          * Branch over the preamble in order t    
210          * the stack (which we already perform    
211          */                                       
212         addr += KVM_VECTOR_PREAMBLE;              
213                                                   
214         /* movz x0, #(addr & 0xffff) */           
215         insn = aarch64_insn_gen_movewide(AARCH    
216                                          (u16)    
217                                          0,       
218                                          AARCH    
219                                          AARCH    
220         *updptr++ = cpu_to_le32(insn);            
221                                                   
222         /* movk x0, #((addr >> 16) & 0xffff),     
223         insn = aarch64_insn_gen_movewide(AARCH    
224                                          (u16)    
225                                          16,      
226                                          AARCH    
227                                          AARCH    
228         *updptr++ = cpu_to_le32(insn);            
229                                                   
230         /* movk x0, #((addr >> 32) & 0xffff),     
231         insn = aarch64_insn_gen_movewide(AARCH    
232                                          (u16)    
233                                          32,      
234                                          AARCH    
235                                          AARCH    
236         *updptr++ = cpu_to_le32(insn);            
237                                                   
238         /* br x0 */                               
239         insn = aarch64_insn_gen_branch_reg(AAR    
240                                            AAR    
241         *updptr++ = cpu_to_le32(insn);            
242 }                                                 
243                                                   
244 static void generate_mov_q(u64 val, __le32 *or    
245 {                                                 
246         u32 insn, oinsn, rd;                      
247                                                   
248         BUG_ON(nr_inst != 4);                     
249                                                   
250         /* Compute target register */             
251         oinsn = le32_to_cpu(*origptr);            
252         rd = aarch64_insn_decode_register(AARC    
253                                                   
254         /* movz rd, #(val & 0xffff) */            
255         insn = aarch64_insn_gen_movewide(rd,      
256                                          (u16)    
257                                          0,       
258                                          AARCH    
259                                          AARCH    
260         *updptr++ = cpu_to_le32(insn);            
261                                                   
262         /* movk rd, #((val >> 16) & 0xffff), l    
263         insn = aarch64_insn_gen_movewide(rd,      
264                                          (u16)    
265                                          16,      
266                                          AARCH    
267                                          AARCH    
268         *updptr++ = cpu_to_le32(insn);            
269                                                   
270         /* movk rd, #((val >> 32) & 0xffff), l    
271         insn = aarch64_insn_gen_movewide(rd,      
272                                          (u16)    
273                                          32,      
274                                          AARCH    
275                                          AARCH    
276         *updptr++ = cpu_to_le32(insn);            
277                                                   
278         /* movk rd, #((val >> 48) & 0xffff), l    
279         insn = aarch64_insn_gen_movewide(rd,      
280                                          (u16)    
281                                          48,      
282                                          AARCH    
283                                          AARCH    
284         *updptr++ = cpu_to_le32(insn);            
285 }                                                 
286                                                   
287 void kvm_get_kimage_voffset(struct alt_instr *    
288                             __le32 *origptr, _    
289 {                                                 
290         generate_mov_q(kimage_voffset, origptr    
291 }                                                 
292                                                   
293 void kvm_compute_final_ctr_el0(struct alt_inst    
294                                __le32 *origptr    
295 {                                                 
296         generate_mov_q(read_sanitised_ftr_reg(    
297                        origptr, updptr, nr_ins    
298 }                                                 
299                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php