1 # SPDX-License-Identifier: GPL-2.0 1 # SPDX-License-Identifier: GPL-2.0 2 # 2 # 3 # Copyright (c) 2023 MediaTek Inc. 3 # Copyright (c) 2023 MediaTek Inc. 4 # 4 # 5 # Authors: 5 # Authors: 6 # Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com> 6 # Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com> 7 # 7 # 8 8 9 import gdb 9 import gdb 10 import math 10 import math 11 from linux import utils, constants 11 from linux import utils, constants 12 12 13 def DIV_ROUND_UP(n,d): 13 def DIV_ROUND_UP(n,d): 14 return ((n) + (d) - 1) // (d) 14 return ((n) + (d) - 1) // (d) 15 15 16 def test_bit(nr, addr): 16 def test_bit(nr, addr): 17 if addr.dereference() & (0x1 << nr): 17 if addr.dereference() & (0x1 << nr): 18 return True 18 return True 19 else: 19 else: 20 return False 20 return False 21 21 22 class page_ops(): 22 class page_ops(): 23 ops = None 23 ops = None 24 def __init__(self): 24 def __init__(self): 25 if not constants.LX_CONFIG_SPARSEMEM_V 25 if not constants.LX_CONFIG_SPARSEMEM_VMEMMAP: 26 raise gdb.GdbError('Only support C 26 raise gdb.GdbError('Only support CONFIG_SPARSEMEM_VMEMMAP now') 27 if constants.LX_CONFIG_ARM64 and utils 27 if constants.LX_CONFIG_ARM64 and utils.is_target_arch('aarch64'): 28 self.ops = aarch64_page_ops() 28 self.ops = aarch64_page_ops() 29 else: 29 else: 30 raise gdb.GdbError('Only support a 30 raise gdb.GdbError('Only support aarch64 now') 31 31 32 class aarch64_page_ops(): 32 class aarch64_page_ops(): 33 def __init__(self): 33 def __init__(self): 34 self.SUBSECTION_SHIFT = 21 34 self.SUBSECTION_SHIFT = 21 35 self.SEBSECTION_SIZE = 1 << self.SUBSE 35 self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT 36 self.MODULES_VSIZE = 2 * 1024 * 1024 * 36 self.MODULES_VSIZE = 2 * 1024 * 1024 * 1024 37 37 38 if constants.LX_CONFIG_ARM64_64K_PAGES 38 if constants.LX_CONFIG_ARM64_64K_PAGES: 39 self.SECTION_SIZE_BITS = 29 39 self.SECTION_SIZE_BITS = 29 40 else: 40 else: 41 self.SECTION_SIZE_BITS = 27 41 self.SECTION_SIZE_BITS = 27 42 self.MAX_PHYSMEM_BITS = constants.LX_C 42 self.MAX_PHYSMEM_BITS = constants.LX_CONFIG_ARM64_VA_BITS 43 43 44 self.PAGE_SHIFT = constants.LX_CONFIG_ 44 self.PAGE_SHIFT = constants.LX_CONFIG_PAGE_SHIFT 45 self.PAGE_SIZE = 1 << self.PAGE_SHIFT 45 self.PAGE_SIZE = 1 << self.PAGE_SHIFT 46 self.PAGE_MASK = (~(self.PAGE_SIZE - 1 46 self.PAGE_MASK = (~(self.PAGE_SIZE - 1)) & ((1 << 64) - 1) 47 47 48 self.VA_BITS = constants.LX_CONFIG_ARM 48 self.VA_BITS = constants.LX_CONFIG_ARM64_VA_BITS 49 if self.VA_BITS > 48: 49 if self.VA_BITS > 48: 50 if constants.LX_CONFIG_ARM64_16K_P 50 if constants.LX_CONFIG_ARM64_16K_PAGES: 51 self.VA_BITS_MIN = 47 51 self.VA_BITS_MIN = 47 52 else: 52 else: 53 self.VA_BITS_MIN = 48 53 self.VA_BITS_MIN = 48 54 tcr_el1 = gdb.execute("info regist 54 tcr_el1 = gdb.execute("info registers $TCR_EL1", to_string=True) 55 tcr_el1 = int(tcr_el1.split()[1], 55 tcr_el1 = int(tcr_el1.split()[1], 16) 56 self.vabits_actual = 64 - ((tcr_el 56 self.vabits_actual = 64 - ((tcr_el1 >> 16) & 63) 57 else: 57 else: 58 self.VA_BITS_MIN = self.VA_BITS 58 self.VA_BITS_MIN = self.VA_BITS 59 self.vabits_actual = self.VA_BITS 59 self.vabits_actual = self.VA_BITS 60 self.kimage_voffset = gdb.parse_and_ev 60 self.kimage_voffset = gdb.parse_and_eval('kimage_voffset') & ((1 << 64) - 1) 61 61 62 self.SECTIONS_SHIFT = self.MAX_PHYSMEM 62 self.SECTIONS_SHIFT = self.MAX_PHYSMEM_BITS - self.SECTION_SIZE_BITS 63 63 64 if str(constants.LX_CONFIG_ARCH_FORCE_ 64 if str(constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER).isdigit(): 65 self.MAX_ORDER = constants.LX_CONF 65 self.MAX_ORDER = constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER 66 else: 66 else: 67 self.MAX_ORDER = 10 67 self.MAX_ORDER = 10 68 68 69 self.MAX_ORDER_NR_PAGES = 1 << (self.M 69 self.MAX_ORDER_NR_PAGES = 1 << (self.MAX_ORDER) 70 self.PFN_SECTION_SHIFT = self.SECTION_ 70 self.PFN_SECTION_SHIFT = self.SECTION_SIZE_BITS - self.PAGE_SHIFT 71 self.NR_MEM_SECTIONS = 1 << self.SECTI 71 self.NR_MEM_SECTIONS = 1 << self.SECTIONS_SHIFT 72 self.PAGES_PER_SECTION = 1 << self.PFN 72 self.PAGES_PER_SECTION = 1 << self.PFN_SECTION_SHIFT 73 self.PAGE_SECTION_MASK = (~(self.PAGES 73 self.PAGE_SECTION_MASK = (~(self.PAGES_PER_SECTION - 1)) & ((1 << 64) - 1) 74 74 75 if constants.LX_CONFIG_SPARSEMEM_EXTRE 75 if constants.LX_CONFIG_SPARSEMEM_EXTREME: 76 self.SECTIONS_PER_ROOT = self.PAGE 76 self.SECTIONS_PER_ROOT = self.PAGE_SIZE // gdb.lookup_type("struct mem_section").sizeof 77 else: 77 else: 78 self.SECTIONS_PER_ROOT = 1 78 self.SECTIONS_PER_ROOT = 1 79 79 80 self.NR_SECTION_ROOTS = DIV_ROUND_UP(s 80 self.NR_SECTION_ROOTS = DIV_ROUND_UP(self.NR_MEM_SECTIONS, self.SECTIONS_PER_ROOT) 81 self.SECTION_ROOT_MASK = self.SECTIONS 81 self.SECTION_ROOT_MASK = self.SECTIONS_PER_ROOT - 1 82 self.SUBSECTION_SHIFT = 21 82 self.SUBSECTION_SHIFT = 21 83 self.SEBSECTION_SIZE = 1 << self.SUBSE 83 self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT 84 self.PFN_SUBSECTION_SHIFT = self.SUBSE 84 self.PFN_SUBSECTION_SHIFT = self.SUBSECTION_SHIFT - self.PAGE_SHIFT 85 self.PAGES_PER_SUBSECTION = 1 << self. 85 self.PAGES_PER_SUBSECTION = 1 << self.PFN_SUBSECTION_SHIFT 86 86 87 self.SECTION_HAS_MEM_MAP = 1 << int(gd 87 self.SECTION_HAS_MEM_MAP = 1 << int(gdb.parse_and_eval('SECTION_HAS_MEM_MAP_BIT')) 88 self.SECTION_IS_EARLY = 1 << int(gdb.p 88 self.SECTION_IS_EARLY = 1 << int(gdb.parse_and_eval('SECTION_IS_EARLY_BIT')) 89 89 90 self.struct_page_size = utils.get_page 90 self.struct_page_size = utils.get_page_type().sizeof 91 self.STRUCT_PAGE_MAX_SHIFT = (int)(mat 91 self.STRUCT_PAGE_MAX_SHIFT = (int)(math.log(self.struct_page_size, 2)) 92 92 93 self.PAGE_OFFSET = self._PAGE_OFFSET(s 93 self.PAGE_OFFSET = self._PAGE_OFFSET(self.VA_BITS) 94 self.MODULES_VADDR = self._PAGE_END(se 94 self.MODULES_VADDR = self._PAGE_END(self.VA_BITS_MIN) 95 self.MODULES_END = self.MODULES_VADDR 95 self.MODULES_END = self.MODULES_VADDR + self.MODULES_VSIZE 96 96 97 self.VMEMMAP_RANGE = self._PAGE_END(se 97 self.VMEMMAP_RANGE = self._PAGE_END(self.VA_BITS_MIN) - self.PAGE_OFFSET 98 self.VMEMMAP_SIZE = (self.VMEMMAP_RANG 98 self.VMEMMAP_SIZE = (self.VMEMMAP_RANGE >> self.PAGE_SHIFT) * self.struct_page_size 99 self.VMEMMAP_END = (-(1 * 1024 * 1024 99 self.VMEMMAP_END = (-(1 * 1024 * 1024 * 1024)) & 0xffffffffffffffff 100 self.VMEMMAP_START = self.VMEMMAP_END 100 self.VMEMMAP_START = self.VMEMMAP_END - self.VMEMMAP_SIZE 101 101 102 self.VMALLOC_START = self.MODULES_END 102 self.VMALLOC_START = self.MODULES_END 103 self.VMALLOC_END = self.VMEMMAP_START 103 self.VMALLOC_END = self.VMEMMAP_START - 256 * 1024 * 1024 104 104 105 self.memstart_addr = gdb.parse_and_eva 105 self.memstart_addr = gdb.parse_and_eval("memstart_addr") 106 self.PHYS_OFFSET = self.memstart_addr 106 self.PHYS_OFFSET = self.memstart_addr 107 self.vmemmap = gdb.Value(self.VMEMMAP_ 107 self.vmemmap = gdb.Value(self.VMEMMAP_START).cast(utils.get_page_type().pointer()) - (self.memstart_addr >> self.PAGE_SHIFT) 108 108 109 self.KERNEL_START = gdb.parse_and_eval 109 self.KERNEL_START = gdb.parse_and_eval("_text") 110 self.KERNEL_END = gdb.parse_and_eval(" 110 self.KERNEL_END = gdb.parse_and_eval("_end") 111 111 112 if constants.LX_CONFIG_KASAN_GENERIC o 112 if constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS: 113 if constants.LX_CONFIG_KASAN_GENER 113 if constants.LX_CONFIG_KASAN_GENERIC: 114 self.KASAN_SHADOW_SCALE_SHIFT 114 self.KASAN_SHADOW_SCALE_SHIFT = 3 115 else: 115 else: 116 self.KASAN_SHADOW_SCALE_SHIFT 116 self.KASAN_SHADOW_SCALE_SHIFT = 4 117 self.KASAN_SHADOW_OFFSET = constan 117 self.KASAN_SHADOW_OFFSET = constants.LX_CONFIG_KASAN_SHADOW_OFFSET 118 self.KASAN_SHADOW_END = (1 << (64 118 self.KASAN_SHADOW_END = (1 << (64 - self.KASAN_SHADOW_SCALE_SHIFT)) + self.KASAN_SHADOW_OFFSET 119 self.PAGE_END = self.KASAN_SHADOW_ 119 self.PAGE_END = self.KASAN_SHADOW_END - (1 << (self.vabits_actual - self.KASAN_SHADOW_SCALE_SHIFT)) 120 else: 120 else: 121 self.PAGE_END = self._PAGE_END(sel 121 self.PAGE_END = self._PAGE_END(self.VA_BITS_MIN) 122 122 123 if constants.LX_CONFIG_NUMA and consta 123 if constants.LX_CONFIG_NUMA and constants.LX_CONFIG_NODES_SHIFT: 124 self.NODE_SHIFT = constants.LX_CON 124 self.NODE_SHIFT = constants.LX_CONFIG_NODES_SHIFT 125 else: 125 else: 126 self.NODE_SHIFT = 0 126 self.NODE_SHIFT = 0 127 127 128 self.MAX_NUMNODES = 1 << self.NODE_SHI 128 self.MAX_NUMNODES = 1 << self.NODE_SHIFT 129 129 130 def SECTION_NR_TO_ROOT(self, sec): 130 def SECTION_NR_TO_ROOT(self, sec): 131 return sec // self.SECTIONS_PER_ROOT 131 return sec // self.SECTIONS_PER_ROOT 132 132 133 def __nr_to_section(self, nr): 133 def __nr_to_section(self, nr): 134 root = self.SECTION_NR_TO_ROOT(nr) 134 root = self.SECTION_NR_TO_ROOT(nr) 135 mem_section = gdb.parse_and_eval("mem_ 135 mem_section = gdb.parse_and_eval("mem_section") 136 return mem_section[root][nr & self.SEC 136 return mem_section[root][nr & self.SECTION_ROOT_MASK] 137 137 138 def pfn_to_section_nr(self, pfn): 138 def pfn_to_section_nr(self, pfn): 139 return pfn >> self.PFN_SECTION_SHIFT 139 return pfn >> self.PFN_SECTION_SHIFT 140 140 141 def section_nr_to_pfn(self, sec): 141 def section_nr_to_pfn(self, sec): 142 return sec << self.PFN_SECTION_SHIFT 142 return sec << self.PFN_SECTION_SHIFT 143 143 144 def __pfn_to_section(self, pfn): 144 def __pfn_to_section(self, pfn): 145 return self.__nr_to_section(self.pfn_t 145 return self.__nr_to_section(self.pfn_to_section_nr(pfn)) 146 146 147 def pfn_to_section(self, pfn): 147 def pfn_to_section(self, pfn): 148 return self.__pfn_to_section(pfn) 148 return self.__pfn_to_section(pfn) 149 149 150 def subsection_map_index(self, pfn): 150 def subsection_map_index(self, pfn): 151 return (pfn & ~(self.PAGE_SECTION_MASK 151 return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION 152 152 153 def pfn_section_valid(self, ms, pfn): 153 def pfn_section_valid(self, ms, pfn): 154 if constants.LX_CONFIG_SPARSEMEM_VMEMM 154 if constants.LX_CONFIG_SPARSEMEM_VMEMMAP: 155 idx = self.subsection_map_index(pf 155 idx = self.subsection_map_index(pfn) 156 return test_bit(idx, ms['usage'][' 156 return test_bit(idx, ms['usage']['subsection_map']) 157 else: 157 else: 158 return True 158 return True 159 159 160 def valid_section(self, mem_section): 160 def valid_section(self, mem_section): 161 if mem_section != None and (mem_sectio 161 if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_HAS_MEM_MAP): 162 return True 162 return True 163 return False 163 return False 164 164 165 def early_section(self, mem_section): 165 def early_section(self, mem_section): 166 if mem_section != None and (mem_sectio 166 if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_IS_EARLY): 167 return True 167 return True 168 return False 168 return False 169 169 170 def pfn_valid(self, pfn): 170 def pfn_valid(self, pfn): 171 ms = None 171 ms = None 172 if self.PHYS_PFN(self.PFN_PHYS(pfn)) ! 172 if self.PHYS_PFN(self.PFN_PHYS(pfn)) != pfn: 173 return False 173 return False 174 if self.pfn_to_section_nr(pfn) >= self 174 if self.pfn_to_section_nr(pfn) >= self.NR_MEM_SECTIONS: 175 return False 175 return False 176 ms = self.__pfn_to_section(pfn) 176 ms = self.__pfn_to_section(pfn) 177 177 178 if not self.valid_section(ms): 178 if not self.valid_section(ms): 179 return False 179 return False 180 return self.early_section(ms) or self. 180 return self.early_section(ms) or self.pfn_section_valid(ms, pfn) 181 181 182 def _PAGE_OFFSET(self, va): 182 def _PAGE_OFFSET(self, va): 183 return (-(1 << (va))) & 0xffffffffffff 183 return (-(1 << (va))) & 0xffffffffffffffff 184 184 185 def _PAGE_END(self, va): 185 def _PAGE_END(self, va): 186 return (-(1 << (va - 1))) & 0xffffffff 186 return (-(1 << (va - 1))) & 0xffffffffffffffff 187 187 188 def kasan_reset_tag(self, addr): 188 def kasan_reset_tag(self, addr): 189 if constants.LX_CONFIG_KASAN_SW_TAGS o 189 if constants.LX_CONFIG_KASAN_SW_TAGS or constants.LX_CONFIG_KASAN_HW_TAGS: 190 return int(addr) | (0xff << 56) 190 return int(addr) | (0xff << 56) 191 else: 191 else: 192 return addr 192 return addr 193 193 194 def __is_lm_address(self, addr): 194 def __is_lm_address(self, addr): 195 if (addr - self.PAGE_OFFSET) < (self.P 195 if (addr - self.PAGE_OFFSET) < (self.PAGE_END - self.PAGE_OFFSET): 196 return True 196 return True 197 else: 197 else: 198 return False 198 return False 199 def __lm_to_phys(self, addr): 199 def __lm_to_phys(self, addr): 200 return addr - self.PAGE_OFFSET + self. 200 return addr - self.PAGE_OFFSET + self.PHYS_OFFSET 201 201 202 def __kimg_to_phys(self, addr): 202 def __kimg_to_phys(self, addr): 203 return addr - self.kimage_voffset 203 return addr - self.kimage_voffset 204 204 205 def __virt_to_phys_nodebug(self, va): 205 def __virt_to_phys_nodebug(self, va): 206 untagged_va = self.kasan_reset_tag(va) 206 untagged_va = self.kasan_reset_tag(va) 207 if self.__is_lm_address(untagged_va): 207 if self.__is_lm_address(untagged_va): 208 return self.__lm_to_phys(untagged_ 208 return self.__lm_to_phys(untagged_va) 209 else: 209 else: 210 return self.__kimg_to_phys(untagge 210 return self.__kimg_to_phys(untagged_va) 211 211 212 def __virt_to_phys(self, va): 212 def __virt_to_phys(self, va): 213 if constants.LX_CONFIG_DEBUG_VIRTUAL: 213 if constants.LX_CONFIG_DEBUG_VIRTUAL: 214 if not self.__is_lm_address(self.k 214 if not self.__is_lm_address(self.kasan_reset_tag(va)): 215 raise gdb.GdbError("Warning: v 215 raise gdb.GdbError("Warning: virt_to_phys used for non-linear address: 0x%lx\n" % va) 216 return self.__virt_to_phys_nodebug(va) 216 return self.__virt_to_phys_nodebug(va) 217 217 218 def virt_to_phys(self, va): 218 def virt_to_phys(self, va): 219 return self.__virt_to_phys(va) 219 return self.__virt_to_phys(va) 220 220 221 def PFN_PHYS(self, pfn): 221 def PFN_PHYS(self, pfn): 222 return pfn << self.PAGE_SHIFT 222 return pfn << self.PAGE_SHIFT 223 223 224 def PHYS_PFN(self, phys): 224 def PHYS_PFN(self, phys): 225 return phys >> self.PAGE_SHIFT 225 return phys >> self.PAGE_SHIFT 226 226 227 def __phys_to_virt(self, pa): 227 def __phys_to_virt(self, pa): 228 return (pa - self.PHYS_OFFSET) | self. 228 return (pa - self.PHYS_OFFSET) | self.PAGE_OFFSET 229 229 230 def __phys_to_pfn(self, pa): 230 def __phys_to_pfn(self, pa): 231 return self.PHYS_PFN(pa) 231 return self.PHYS_PFN(pa) 232 232 233 def __pfn_to_phys(self, pfn): 233 def __pfn_to_phys(self, pfn): 234 return self.PFN_PHYS(pfn) 234 return self.PFN_PHYS(pfn) 235 235 236 def __pa_symbol_nodebug(self, x): 236 def __pa_symbol_nodebug(self, x): 237 return self.__kimg_to_phys(x) 237 return self.__kimg_to_phys(x) 238 238 239 def __phys_addr_symbol(self, x): 239 def __phys_addr_symbol(self, x): 240 if constants.LX_CONFIG_DEBUG_VIRTUAL: 240 if constants.LX_CONFIG_DEBUG_VIRTUAL: 241 if x < self.KERNEL_START or x > se 241 if x < self.KERNEL_START or x > self.KERNEL_END: 242 raise gdb.GdbError("0x%x excee 242 raise gdb.GdbError("0x%x exceed kernel range" % x) 243 return self.__pa_symbol_nodebug(x) 243 return self.__pa_symbol_nodebug(x) 244 244 245 def __pa_symbol(self, x): 245 def __pa_symbol(self, x): 246 return self.__phys_addr_symbol(x) 246 return self.__phys_addr_symbol(x) 247 247 248 def __va(self, pa): 248 def __va(self, pa): 249 return self.__phys_to_virt(pa) 249 return self.__phys_to_virt(pa) 250 250 251 def pfn_to_kaddr(self, pfn): 251 def pfn_to_kaddr(self, pfn): 252 return self.__va(pfn << self.PAGE_SHIF 252 return self.__va(pfn << self.PAGE_SHIFT) 253 253 254 def virt_to_pfn(self, va): 254 def virt_to_pfn(self, va): 255 return self.__phys_to_pfn(self.__virt_ 255 return self.__phys_to_pfn(self.__virt_to_phys(va)) 256 256 257 def sym_to_pfn(self, x): 257 def sym_to_pfn(self, x): 258 return self.__phys_to_pfn(self.__pa_sy 258 return self.__phys_to_pfn(self.__pa_symbol(x)) 259 259 260 def page_to_pfn(self, page): 260 def page_to_pfn(self, page): 261 return int(page.cast(utils.get_page_ty 261 return int(page.cast(utils.get_page_type().pointer()) - self.vmemmap.cast(utils.get_page_type().pointer())) 262 262 263 def page_to_phys(self, page): 263 def page_to_phys(self, page): 264 return self.__pfn_to_phys(self.page_to 264 return self.__pfn_to_phys(self.page_to_pfn(page)) 265 265 266 def pfn_to_page(self, pfn): 266 def pfn_to_page(self, pfn): 267 return (self.vmemmap + pfn).cast(utils 267 return (self.vmemmap + pfn).cast(utils.get_page_type().pointer()) 268 268 269 def page_to_virt(self, page): 269 def page_to_virt(self, page): 270 if constants.LX_CONFIG_DEBUG_VIRTUAL: 270 if constants.LX_CONFIG_DEBUG_VIRTUAL: 271 return self.__va(self.page_to_phys 271 return self.__va(self.page_to_phys(page)) 272 else: 272 else: 273 __idx = int((page.cast(gdb.lookup_ 273 __idx = int((page.cast(gdb.lookup_type("unsigned long")) - self.VMEMMAP_START).cast(utils.get_ulong_type())) // self.struct_page_size 274 return self.PAGE_OFFSET + (__idx * 274 return self.PAGE_OFFSET + (__idx * self.PAGE_SIZE) 275 275 276 def virt_to_page(self, va): 276 def virt_to_page(self, va): 277 if constants.LX_CONFIG_DEBUG_VIRTUAL: 277 if constants.LX_CONFIG_DEBUG_VIRTUAL: 278 return self.pfn_to_page(self.virt_ 278 return self.pfn_to_page(self.virt_to_pfn(va)) 279 else: 279 else: 280 __idx = int(self.kasan_reset_tag(v 280 __idx = int(self.kasan_reset_tag(va) - self.PAGE_OFFSET) // self.PAGE_SIZE 281 addr = self.VMEMMAP_START + (__idx 281 addr = self.VMEMMAP_START + (__idx * self.struct_page_size) 282 return gdb.Value(addr).cast(utils. 282 return gdb.Value(addr).cast(utils.get_page_type().pointer()) 283 283 284 def page_address(self, page): 284 def page_address(self, page): 285 return self.page_to_virt(page) 285 return self.page_to_virt(page) 286 286 287 def folio_address(self, folio): 287 def folio_address(self, folio): 288 return self.page_address(folio['page'] 288 return self.page_address(folio['page'].address) 289 289 290 class LxPFN2Page(gdb.Command): 290 class LxPFN2Page(gdb.Command): 291 """PFN to struct page""" 291 """PFN to struct page""" 292 292 293 def __init__(self): 293 def __init__(self): 294 super(LxPFN2Page, self).__init__("lx-p 294 super(LxPFN2Page, self).__init__("lx-pfn_to_page", gdb.COMMAND_USER) 295 295 296 def invoke(self, arg, from_tty): 296 def invoke(self, arg, from_tty): 297 argv = gdb.string_to_argv(arg) 297 argv = gdb.string_to_argv(arg) 298 pfn = int(argv[0]) 298 pfn = int(argv[0]) 299 page = page_ops().ops.pfn_to_page(pfn) 299 page = page_ops().ops.pfn_to_page(pfn) 300 gdb.write("pfn_to_page(0x%x) = 0x%x\n" 300 gdb.write("pfn_to_page(0x%x) = 0x%x\n" % (pfn, page)) 301 301 302 LxPFN2Page() 302 LxPFN2Page() 303 303 304 class LxPage2PFN(gdb.Command): 304 class LxPage2PFN(gdb.Command): 305 """struct page to PFN""" 305 """struct page to PFN""" 306 306 307 def __init__(self): 307 def __init__(self): 308 super(LxPage2PFN, self).__init__("lx-p 308 super(LxPage2PFN, self).__init__("lx-page_to_pfn", gdb.COMMAND_USER) 309 309 310 def invoke(self, arg, from_tty): 310 def invoke(self, arg, from_tty): 311 argv = gdb.string_to_argv(arg) 311 argv = gdb.string_to_argv(arg) 312 struct_page_addr = int(argv[0], 16) 312 struct_page_addr = int(argv[0], 16) 313 page = gdb.Value(struct_page_addr).cas 313 page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer()) 314 pfn = page_ops().ops.page_to_pfn(page) 314 pfn = page_ops().ops.page_to_pfn(page) 315 gdb.write("page_to_pfn(0x%x) = 0x%x\n" 315 gdb.write("page_to_pfn(0x%x) = 0x%x\n" % (page, pfn)) 316 316 317 LxPage2PFN() 317 LxPage2PFN() 318 318 319 class LxPageAddress(gdb.Command): 319 class LxPageAddress(gdb.Command): 320 """struct page to linear mapping address"" 320 """struct page to linear mapping address""" 321 321 322 def __init__(self): 322 def __init__(self): 323 super(LxPageAddress, self).__init__("l 323 super(LxPageAddress, self).__init__("lx-page_address", gdb.COMMAND_USER) 324 324 325 def invoke(self, arg, from_tty): 325 def invoke(self, arg, from_tty): 326 argv = gdb.string_to_argv(arg) 326 argv = gdb.string_to_argv(arg) 327 struct_page_addr = int(argv[0], 16) 327 struct_page_addr = int(argv[0], 16) 328 page = gdb.Value(struct_page_addr).cas 328 page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer()) 329 addr = page_ops().ops.page_address(pag 329 addr = page_ops().ops.page_address(page) 330 gdb.write("page_address(0x%x) = 0x%x\n 330 gdb.write("page_address(0x%x) = 0x%x\n" % (page, addr)) 331 331 332 LxPageAddress() 332 LxPageAddress() 333 333 334 class LxPage2Phys(gdb.Command): 334 class LxPage2Phys(gdb.Command): 335 """struct page to physical address""" 335 """struct page to physical address""" 336 336 337 def __init__(self): 337 def __init__(self): 338 super(LxPage2Phys, self).__init__("lx- 338 super(LxPage2Phys, self).__init__("lx-page_to_phys", gdb.COMMAND_USER) 339 339 340 def invoke(self, arg, from_tty): 340 def invoke(self, arg, from_tty): 341 argv = gdb.string_to_argv(arg) 341 argv = gdb.string_to_argv(arg) 342 struct_page_addr = int(argv[0], 16) 342 struct_page_addr = int(argv[0], 16) 343 page = gdb.Value(struct_page_addr).cas 343 page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer()) 344 phys_addr = page_ops().ops.page_to_phy 344 phys_addr = page_ops().ops.page_to_phys(page) 345 gdb.write("page_to_phys(0x%x) = 0x%x\n 345 gdb.write("page_to_phys(0x%x) = 0x%x\n" % (page, phys_addr)) 346 346 347 LxPage2Phys() 347 LxPage2Phys() 348 348 349 class LxVirt2Phys(gdb.Command): 349 class LxVirt2Phys(gdb.Command): 350 """virtual address to physical address""" 350 """virtual address to physical address""" 351 351 352 def __init__(self): 352 def __init__(self): 353 super(LxVirt2Phys, self).__init__("lx- 353 super(LxVirt2Phys, self).__init__("lx-virt_to_phys", gdb.COMMAND_USER) 354 354 355 def invoke(self, arg, from_tty): 355 def invoke(self, arg, from_tty): 356 argv = gdb.string_to_argv(arg) 356 argv = gdb.string_to_argv(arg) 357 linear_addr = int(argv[0], 16) 357 linear_addr = int(argv[0], 16) 358 phys_addr = page_ops().ops.virt_to_phy 358 phys_addr = page_ops().ops.virt_to_phys(linear_addr) 359 gdb.write("virt_to_phys(0x%x) = 0x%x\n 359 gdb.write("virt_to_phys(0x%x) = 0x%x\n" % (linear_addr, phys_addr)) 360 360 361 LxVirt2Phys() 361 LxVirt2Phys() 362 362 363 class LxVirt2Page(gdb.Command): 363 class LxVirt2Page(gdb.Command): 364 """virtual address to struct page""" 364 """virtual address to struct page""" 365 365 366 def __init__(self): 366 def __init__(self): 367 super(LxVirt2Page, self).__init__("lx- 367 super(LxVirt2Page, self).__init__("lx-virt_to_page", gdb.COMMAND_USER) 368 368 369 def invoke(self, arg, from_tty): 369 def invoke(self, arg, from_tty): 370 argv = gdb.string_to_argv(arg) 370 argv = gdb.string_to_argv(arg) 371 linear_addr = int(argv[0], 16) 371 linear_addr = int(argv[0], 16) 372 page = page_ops().ops.virt_to_page(lin 372 page = page_ops().ops.virt_to_page(linear_addr) 373 gdb.write("virt_to_page(0x%x) = 0x%x\n 373 gdb.write("virt_to_page(0x%x) = 0x%x\n" % (linear_addr, page)) 374 374 375 LxVirt2Page() 375 LxVirt2Page() 376 376 377 class LxSym2PFN(gdb.Command): 377 class LxSym2PFN(gdb.Command): 378 """symbol address to PFN""" 378 """symbol address to PFN""" 379 379 380 def __init__(self): 380 def __init__(self): 381 super(LxSym2PFN, self).__init__("lx-sy 381 super(LxSym2PFN, self).__init__("lx-sym_to_pfn", gdb.COMMAND_USER) 382 382 383 def invoke(self, arg, from_tty): 383 def invoke(self, arg, from_tty): 384 argv = gdb.string_to_argv(arg) 384 argv = gdb.string_to_argv(arg) 385 sym_addr = int(argv[0], 16) 385 sym_addr = int(argv[0], 16) 386 pfn = page_ops().ops.sym_to_pfn(sym_ad 386 pfn = page_ops().ops.sym_to_pfn(sym_addr) 387 gdb.write("sym_to_pfn(0x%x) = %d\n" % 387 gdb.write("sym_to_pfn(0x%x) = %d\n" % (sym_addr, pfn)) 388 388 389 LxSym2PFN() 389 LxSym2PFN() 390 390 391 class LxPFN2Kaddr(gdb.Command): 391 class LxPFN2Kaddr(gdb.Command): 392 """PFN to kernel address""" 392 """PFN to kernel address""" 393 393 394 def __init__(self): 394 def __init__(self): 395 super(LxPFN2Kaddr, self).__init__("lx- 395 super(LxPFN2Kaddr, self).__init__("lx-pfn_to_kaddr", gdb.COMMAND_USER) 396 396 397 def invoke(self, arg, from_tty): 397 def invoke(self, arg, from_tty): 398 argv = gdb.string_to_argv(arg) 398 argv = gdb.string_to_argv(arg) 399 pfn = int(argv[0]) 399 pfn = int(argv[0]) 400 kaddr = page_ops().ops.pfn_to_kaddr(pf 400 kaddr = page_ops().ops.pfn_to_kaddr(pfn) 401 gdb.write("pfn_to_kaddr(%d) = 0x%x\n" 401 gdb.write("pfn_to_kaddr(%d) = 0x%x\n" % (pfn, kaddr)) 402 402 403 LxPFN2Kaddr() 403 LxPFN2Kaddr()
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.