1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * KVM guest address space mapping code 4 * 5 * Copyright IBM Corp. 2007, 2016 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 */ 8 9 #ifndef _ASM_S390_GMAP_H 10 #define _ASM_S390_GMAP_H 11 12 #include <linux/radix-tree.h> 13 #include <linux/refcount.h> 14 15 /* Generic bits for GMAP notification on DAT table entry changes. */ 16 #define GMAP_NOTIFY_SHADOW 0x2 17 #define GMAP_NOTIFY_MPROT 0x1 18 19 /* Status bits only for huge segment entries */ 20 #define _SEGMENT_ENTRY_GMAP_IN 0x8000 /* invalidation notify bit */ 21 #define _SEGMENT_ENTRY_GMAP_UC 0x4000 /* dirty (migration) */ 22 23 /** 24 * struct gmap_struct - guest address space 25 * @list: list head for the mm->context gmap list 26 * @crst_list: list of all crst tables used in the guest address space 27 * @mm: pointer to the parent mm_struct 28 * @guest_to_host: radix tree with guest to host address translation 29 * @host_to_guest: radix tree with pointer to segment table entries 30 * @guest_table_lock: spinlock to protect all entries in the guest page table 31 * @ref_count: reference counter for the gmap structure 32 * @table: pointer to the page directory 33 * @asce: address space control element for gmap page table 34 * @pfault_enabled: defines if pfaults are applicable for the guest 35 * @guest_handle: protected virtual machine handle for the ultravisor 36 * @host_to_rmap: radix tree with gmap_rmap lists 37 * @children: list of shadow gmap structures 38 * @pt_list: list of all page tables used in the shadow guest address space 39 * @shadow_lock: spinlock to protect the shadow gmap list 40 * @parent: pointer to the parent gmap for shadow guest address spaces 41 * @orig_asce: ASCE for which the shadow page table has been created 42 * @edat_level: edat level to be used for the shadow translation 43 * @removed: flag to indicate if a shadow guest address space has been removed 44 * @initialized: flag to indicate if a shadow guest address space can be used 45 */ 46 struct gmap { 47 struct list_head list; 48 struct list_head crst_list; 49 struct mm_struct *mm; 50 struct radix_tree_root guest_to_host; 51 struct radix_tree_root host_to_guest; 52 spinlock_t guest_table_lock; 53 refcount_t ref_count; 54 unsigned long *table; 55 unsigned long asce; 56 unsigned long asce_end; 57 void *private; 58 bool pfault_enabled; 59 /* only set for protected virtual machines */ 60 unsigned long guest_handle; 61 /* Additional data for shadow guest address spaces */ 62 struct radix_tree_root host_to_rmap; 63 struct list_head children; 64 struct list_head pt_list; 65 spinlock_t shadow_lock; 66 struct gmap *parent; 67 unsigned long orig_asce; 68 int edat_level; 69 bool removed; 70 bool initialized; 71 }; 72 73 /** 74 * struct gmap_rmap - reverse mapping for shadow page table entries 75 * @next: pointer to next rmap in the list 76 * @raddr: virtual rmap address in the shadow guest address space 77 */ 78 struct gmap_rmap { 79 struct gmap_rmap *next; 80 unsigned long raddr; 81 }; 82 83 #define gmap_for_each_rmap(pos, head) \ 84 for (pos = (head); pos; pos = pos->next) 85 86 #define gmap_for_each_rmap_safe(pos, n, head) \ 87 for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n) 88 89 /** 90 * struct gmap_notifier - notify function block for page invalidation 91 * @notifier_call: address of callback function 92 */ 93 struct gmap_notifier { 94 struct list_head list; 95 struct rcu_head rcu; 96 void (*notifier_call)(struct gmap *gmap, unsigned long start, 97 unsigned long end); 98 }; 99 100 static inline int gmap_is_shadow(struct gmap *gmap) 101 { 102 return !!gmap->parent; 103 } 104 105 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit); 106 void gmap_remove(struct gmap *gmap); 107 struct gmap *gmap_get(struct gmap *gmap); 108 void gmap_put(struct gmap *gmap); 109 110 void gmap_enable(struct gmap *gmap); 111 void gmap_disable(struct gmap *gmap); 112 struct gmap *gmap_get_enabled(void); 113 int gmap_map_segment(struct gmap *gmap, unsigned long from, 114 unsigned long to, unsigned long len); 115 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); 116 unsigned long __gmap_translate(struct gmap *, unsigned long gaddr); 117 unsigned long gmap_translate(struct gmap *, unsigned long gaddr); 118 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr); 119 int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags); 120 void gmap_discard(struct gmap *, unsigned long from, unsigned long to); 121 void __gmap_zap(struct gmap *, unsigned long gaddr); 122 void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr); 123 124 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val); 125 126 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, 127 int edat_level); 128 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level); 129 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, 130 int fake); 131 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, 132 int fake); 133 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, 134 int fake); 135 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, 136 int fake); 137 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, 138 unsigned long *pgt, int *dat_protection, int *fake); 139 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte); 140 141 void gmap_register_pte_notifier(struct gmap_notifier *); 142 void gmap_unregister_pte_notifier(struct gmap_notifier *); 143 144 int gmap_mprotect_notify(struct gmap *, unsigned long start, 145 unsigned long len, int prot); 146 147 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4], 148 unsigned long gaddr, unsigned long vmaddr); 149 int s390_disable_cow_sharing(void); 150 void s390_unlist_old_asce(struct gmap *gmap); 151 int s390_replace_asce(struct gmap *gmap); 152 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns); 153 int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start, 154 unsigned long end, bool interruptible); 155 156 /** 157 * s390_uv_destroy_range - Destroy a range of pages in the given mm. 158 * @mm: the mm on which to operate on 159 * @start: the start of the range 160 * @end: the end of the range 161 * 162 * This function will call cond_sched, so it should not generate stalls, but 163 * it will otherwise only return when it completed. 164 */ 165 static inline void s390_uv_destroy_range(struct mm_struct *mm, unsigned long start, 166 unsigned long end) 167 { 168 (void)__s390_uv_destroy_range(mm, start, end, false); 169 } 170 171 /** 172 * s390_uv_destroy_range_interruptible - Destroy a range of pages in the 173 * given mm, but stop when a fatal signal is received. 174 * @mm: the mm on which to operate on 175 * @start: the start of the range 176 * @end: the end of the range 177 * 178 * This function will call cond_sched, so it should not generate stalls. If 179 * a fatal signal is received, it will return with -EINTR immediately, 180 * without finishing destroying the whole range. Upon successful 181 * completion, 0 is returned. 182 */ 183 static inline int s390_uv_destroy_range_interruptible(struct mm_struct *mm, unsigned long start, 184 unsigned long end) 185 { 186 return __s390_uv_destroy_range(mm, start, end, true); 187 } 188 #endif /* _ASM_S390_GMAP_H */ 189
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.