~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/loongarch/include/asm/kvm_mmu.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
  4  */
  5 
  6 #ifndef __ASM_LOONGARCH_KVM_MMU_H__
  7 #define __ASM_LOONGARCH_KVM_MMU_H__
  8 
  9 #include <linux/kvm_host.h>
 10 #include <asm/pgalloc.h>
 11 #include <asm/tlb.h>
 12 
 13 /*
 14  * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
 15  * for which pages need to be cached.
 16  */
 17 #define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1)
 18 
 19 #define _KVM_FLUSH_PGTABLE      0x1
 20 #define _KVM_HAS_PGMASK         0x2
 21 #define kvm_pfn_pte(pfn, prot)  (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
 22 #define kvm_pte_pfn(x)          ((phys_addr_t)((x & _PFN_MASK) >> PFN_PTE_SHIFT))
 23 
 24 typedef unsigned long kvm_pte_t;
 25 typedef struct kvm_ptw_ctx kvm_ptw_ctx;
 26 typedef int (*kvm_pte_ops)(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx);
 27 
 28 struct kvm_ptw_ctx {
 29         kvm_pte_ops     ops;
 30         unsigned long   flag;
 31 
 32         /* for kvm_arch_mmu_enable_log_dirty_pt_masked use */
 33         unsigned long   mask;
 34         unsigned long   gfn;
 35 
 36         /* page walk mmu info */
 37         unsigned int    level;
 38         unsigned long   pgtable_shift;
 39         unsigned long   invalid_entry;
 40         unsigned long   *invalid_ptes;
 41         unsigned int    *pte_shifts;
 42         void            *opaque;
 43 
 44         /* free pte table page list */
 45         struct list_head list;
 46 };
 47 
 48 kvm_pte_t *kvm_pgd_alloc(void);
 49 
 50 static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val)
 51 {
 52         WRITE_ONCE(*ptep, val);
 53 }
 54 
 55 static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; }
 56 static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; }
 57 static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; }
 58 static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; }
 59 
 60 static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte)
 61 {
 62         return pte | _PAGE_ACCESSED;
 63 }
 64 
 65 static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte)
 66 {
 67         return pte & ~_PAGE_ACCESSED;
 68 }
 69 
 70 static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte)
 71 {
 72         return pte | _PAGE_DIRTY;
 73 }
 74 
 75 static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte)
 76 {
 77         return pte & ~_PAGE_DIRTY;
 78 }
 79 
 80 static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte)
 81 {
 82         return pte | _PAGE_HUGE;
 83 }
 84 
 85 static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte)
 86 {
 87         return pte & ~_PAGE_HUGE;
 88 }
 89 
 90 static inline int kvm_need_flush(kvm_ptw_ctx *ctx)
 91 {
 92         return ctx->flag & _KVM_FLUSH_PGTABLE;
 93 }
 94 
 95 static inline kvm_pte_t *kvm_pgtable_offset(kvm_ptw_ctx *ctx, kvm_pte_t *table,
 96                                         phys_addr_t addr)
 97 {
 98 
 99         return table + ((addr >> ctx->pgtable_shift) & (PTRS_PER_PTE - 1));
100 }
101 
102 static inline phys_addr_t kvm_pgtable_addr_end(kvm_ptw_ctx *ctx,
103                                 phys_addr_t addr, phys_addr_t end)
104 {
105         phys_addr_t boundary, size;
106 
107         size = 0x1UL << ctx->pgtable_shift;
108         boundary = (addr + size) & ~(size - 1);
109         return (boundary - 1 < end - 1) ? boundary : end;
110 }
111 
112 static inline int kvm_pte_present(kvm_ptw_ctx *ctx, kvm_pte_t *entry)
113 {
114         if (!ctx || ctx->level == 0)
115                 return !!(*entry & _PAGE_PRESENT);
116 
117         return *entry != ctx->invalid_entry;
118 }
119 
120 static inline int kvm_pte_none(kvm_ptw_ctx *ctx, kvm_pte_t *entry)
121 {
122         return *entry == ctx->invalid_entry;
123 }
124 
125 static inline void kvm_ptw_enter(kvm_ptw_ctx *ctx)
126 {
127         ctx->level--;
128         ctx->pgtable_shift = ctx->pte_shifts[ctx->level];
129         ctx->invalid_entry = ctx->invalid_ptes[ctx->level];
130 }
131 
132 static inline void kvm_ptw_exit(kvm_ptw_ctx *ctx)
133 {
134         ctx->level++;
135         ctx->pgtable_shift = ctx->pte_shifts[ctx->level];
136         ctx->invalid_entry = ctx->invalid_ptes[ctx->level];
137 }
138 
139 #endif /* __ASM_LOONGARCH_KVM_MMU_H__ */
140 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php