1 # SPDX-License-Identifier: GPL-2.0 !! 1 # SPDX-License-Identifier: GPL-2.0-only 2 # 2 # 3 # Copyright (c) 2023 MediaTek Inc. !! 3 # gdb helper commands and functions for Linux kernel debugging >> 4 # >> 5 # routines to introspect page table 4 # 6 # 5 # Authors: 7 # Authors: 6 # Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com> !! 8 # Dmitrii Bundin <dmitrii.bundin.a@gmail.com> 7 # 9 # 8 10 9 import gdb 11 import gdb 10 import math << 11 from linux import utils, constants << 12 << 13 def DIV_ROUND_UP(n,d): << 14 return ((n) + (d) - 1) // (d) << 15 << 16 def test_bit(nr, addr): << 17 if addr.dereference() & (0x1 << nr): << 18 return True << 19 else: << 20 return False << 21 << 22 class page_ops(): << 23 ops = None << 24 def __init__(self): << 25 if not constants.LX_CONFIG_SPARSEMEM_V << 26 raise gdb.GdbError('Only support C << 27 if constants.LX_CONFIG_ARM64 and utils << 28 self.ops = aarch64_page_ops() << 29 else: << 30 raise gdb.GdbError('Only support a << 31 12 32 class aarch64_page_ops(): !! 13 from linux import utils 33 def __init__(self): << 34 self.SUBSECTION_SHIFT = 21 << 35 self.SEBSECTION_SIZE = 1 << self.SUBSE << 36 self.MODULES_VSIZE = 2 * 1024 * 1024 * << 37 << 38 if constants.LX_CONFIG_ARM64_64K_PAGES << 39 self.SECTION_SIZE_BITS = 29 << 40 else: << 41 self.SECTION_SIZE_BITS = 27 << 42 self.MAX_PHYSMEM_BITS = constants.LX_C << 43 << 44 self.PAGE_SHIFT = constants.LX_CONFIG_ << 45 self.PAGE_SIZE = 1 << self.PAGE_SHIFT << 46 self.PAGE_MASK = (~(self.PAGE_SIZE - 1 << 47 << 48 self.VA_BITS = constants.LX_CONFIG_ARM << 49 if self.VA_BITS > 48: << 50 if constants.LX_CONFIG_ARM64_16K_P << 51 self.VA_BITS_MIN = 47 << 52 else: << 53 self.VA_BITS_MIN = 48 << 54 tcr_el1 = gdb.execute("info regist << 55 tcr_el1 = int(tcr_el1.split()[1], << 56 self.vabits_actual = 64 - ((tcr_el << 57 else: << 58 self.VA_BITS_MIN = self.VA_BITS << 59 self.vabits_actual = self.VA_BITS << 60 self.kimage_voffset = gdb.parse_and_ev << 61 14 62 self.SECTIONS_SHIFT = self.MAX_PHYSMEM !! 15 PHYSICAL_ADDRESS_MASK = gdb.parse_and_eval('0xfffffffffffff') 63 16 64 if str(constants.LX_CONFIG_ARCH_FORCE_ << 65 self.MAX_ORDER = constants.LX_CONF << 66 else: << 67 self.MAX_ORDER = 10 << 68 17 69 self.MAX_ORDER_NR_PAGES = 1 << (self.M !! 18 def page_mask(level=1): 70 self.PFN_SECTION_SHIFT = self.SECTION_ !! 19 # 4KB 71 self.NR_MEM_SECTIONS = 1 << self.SECTI !! 20 if level == 1: 72 self.PAGES_PER_SECTION = 1 << self.PFN !! 21 return gdb.parse_and_eval('(u64) ~0xfff') 73 self.PAGE_SECTION_MASK = (~(self.PAGES !! 22 # 2MB >> 23 elif level == 2: >> 24 return gdb.parse_and_eval('(u64) ~0x1fffff') >> 25 # 1GB >> 26 elif level == 3: >> 27 return gdb.parse_and_eval('(u64) ~0x3fffffff') >> 28 else: >> 29 raise Exception(f'Unknown page level: {level}') 74 30 75 if constants.LX_CONFIG_SPARSEMEM_EXTRE << 76 self.SECTIONS_PER_ROOT = self.PAGE << 77 else: << 78 self.SECTIONS_PER_ROOT = 1 << 79 31 80 self.NR_SECTION_ROOTS = DIV_ROUND_UP(s !! 32 #page_offset_base in case CONFIG_DYNAMIC_MEMORY_LAYOUT is disabled 81 self.SECTION_ROOT_MASK = self.SECTIONS !! 33 POB_NO_DYNAMIC_MEM_LAYOUT = '0xffff888000000000' 82 self.SUBSECTION_SHIFT = 21 !! 34 def _page_offset_base(): 83 self.SEBSECTION_SIZE = 1 << self.SUBSE !! 35 pob_symbol = gdb.lookup_global_symbol('page_offset_base') 84 self.PFN_SUBSECTION_SHIFT = self.SUBSE !! 36 pob = pob_symbol.name if pob_symbol else POB_NO_DYNAMIC_MEM_LAYOUT 85 self.PAGES_PER_SUBSECTION = 1 << self. !! 37 return gdb.parse_and_eval(pob) 86 !! 38 87 self.SECTION_HAS_MEM_MAP = 1 << int(gd !! 39 88 self.SECTION_IS_EARLY = 1 << int(gdb.p !! 40 def is_bit_defined_tupled(data, offset): 89 !! 41 return offset, bool(data >> offset & 1) 90 self.struct_page_size = utils.get_page !! 42 91 self.STRUCT_PAGE_MAX_SHIFT = (int)(mat !! 43 def content_tupled(data, bit_start, bit_end): 92 !! 44 return (bit_start, bit_end), data >> bit_start & ((1 << (1 + bit_end - bit_start)) - 1) 93 self.PAGE_OFFSET = self._PAGE_OFFSET(s !! 45 94 self.MODULES_VADDR = self._PAGE_END(se !! 46 def entry_va(level, phys_addr, translating_va): 95 self.MODULES_END = self.MODULES_VADDR !! 47 def start_bit(level): 96 !! 48 if level == 5: 97 self.VMEMMAP_RANGE = self._PAGE_END(se !! 49 return 48 98 self.VMEMMAP_SIZE = (self.VMEMMAP_RANG !! 50 elif level == 4: 99 self.VMEMMAP_END = (-(1 * 1024 * 1024 !! 51 return 39 100 self.VMEMMAP_START = self.VMEMMAP_END !! 52 elif level == 3: 101 !! 53 return 30 102 self.VMALLOC_START = self.MODULES_END !! 54 elif level == 2: 103 self.VMALLOC_END = self.VMEMMAP_START !! 55 return 21 104 !! 56 elif level == 1: 105 self.memstart_addr = gdb.parse_and_eva !! 57 return 12 106 self.PHYS_OFFSET = self.memstart_addr << 107 self.vmemmap = gdb.Value(self.VMEMMAP_ << 108 << 109 self.KERNEL_START = gdb.parse_and_eval << 110 self.KERNEL_END = gdb.parse_and_eval(" << 111 << 112 if constants.LX_CONFIG_KASAN_GENERIC o << 113 if constants.LX_CONFIG_KASAN_GENER << 114 self.KASAN_SHADOW_SCALE_SHIFT << 115 else: 58 else: 116 self.KASAN_SHADOW_SCALE_SHIFT !! 59 raise Exception(f'Unknown level {level}') 117 self.KASAN_SHADOW_OFFSET = constan << 118 self.KASAN_SHADOW_END = (1 << (64 << 119 self.PAGE_END = self.KASAN_SHADOW_ << 120 else: << 121 self.PAGE_END = self._PAGE_END(sel << 122 << 123 if constants.LX_CONFIG_NUMA and consta << 124 self.NODE_SHIFT = constants.LX_CON << 125 else: << 126 self.NODE_SHIFT = 0 << 127 << 128 self.MAX_NUMNODES = 1 << self.NODE_SHI << 129 << 130 def SECTION_NR_TO_ROOT(self, sec): << 131 return sec // self.SECTIONS_PER_ROOT << 132 << 133 def __nr_to_section(self, nr): << 134 root = self.SECTION_NR_TO_ROOT(nr) << 135 mem_section = gdb.parse_and_eval("mem_ << 136 return mem_section[root][nr & self.SEC << 137 << 138 def pfn_to_section_nr(self, pfn): << 139 return pfn >> self.PFN_SECTION_SHIFT << 140 << 141 def section_nr_to_pfn(self, sec): << 142 return sec << self.PFN_SECTION_SHIFT << 143 << 144 def __pfn_to_section(self, pfn): << 145 return self.__nr_to_section(self.pfn_t << 146 << 147 def pfn_to_section(self, pfn): << 148 return self.__pfn_to_section(pfn) << 149 << 150 def subsection_map_index(self, pfn): << 151 return (pfn & ~(self.PAGE_SECTION_MASK << 152 << 153 def pfn_section_valid(self, ms, pfn): << 154 if constants.LX_CONFIG_SPARSEMEM_VMEMM << 155 idx = self.subsection_map_index(pf << 156 return test_bit(idx, ms['usage'][' << 157 else: << 158 return True << 159 << 160 def valid_section(self, mem_section): << 161 if mem_section != None and (mem_sectio << 162 return True << 163 return False << 164 << 165 def early_section(self, mem_section): << 166 if mem_section != None and (mem_sectio << 167 return True << 168 return False << 169 << 170 def pfn_valid(self, pfn): << 171 ms = None << 172 if self.PHYS_PFN(self.PFN_PHYS(pfn)) ! << 173 return False << 174 if self.pfn_to_section_nr(pfn) >= self << 175 return False << 176 ms = self.__pfn_to_section(pfn) << 177 << 178 if not self.valid_section(ms): << 179 return False << 180 return self.early_section(ms) or self. << 181 << 182 def _PAGE_OFFSET(self, va): << 183 return (-(1 << (va))) & 0xffffffffffff << 184 << 185 def _PAGE_END(self, va): << 186 return (-(1 << (va - 1))) & 0xffffffff << 187 << 188 def kasan_reset_tag(self, addr): << 189 if constants.LX_CONFIG_KASAN_SW_TAGS o << 190 return int(addr) | (0xff << 56) << 191 else: << 192 return addr << 193 << 194 def __is_lm_address(self, addr): << 195 if (addr - self.PAGE_OFFSET) < (self.P << 196 return True << 197 else: << 198 return False << 199 def __lm_to_phys(self, addr): << 200 return addr - self.PAGE_OFFSET + self. << 201 << 202 def __kimg_to_phys(self, addr): << 203 return addr - self.kimage_voffset << 204 << 205 def __virt_to_phys_nodebug(self, va): << 206 untagged_va = self.kasan_reset_tag(va) << 207 if self.__is_lm_address(untagged_va): << 208 return self.__lm_to_phys(untagged_ << 209 else: << 210 return self.__kimg_to_phys(untagge << 211 << 212 def __virt_to_phys(self, va): << 213 if constants.LX_CONFIG_DEBUG_VIRTUAL: << 214 if not self.__is_lm_address(self.k << 215 raise gdb.GdbError("Warning: v << 216 return self.__virt_to_phys_nodebug(va) << 217 << 218 def virt_to_phys(self, va): << 219 return self.__virt_to_phys(va) << 220 << 221 def PFN_PHYS(self, pfn): << 222 return pfn << self.PAGE_SHIFT << 223 << 224 def PHYS_PFN(self, phys): << 225 return phys >> self.PAGE_SHIFT << 226 << 227 def __phys_to_virt(self, pa): << 228 return (pa - self.PHYS_OFFSET) | self. << 229 << 230 def __phys_to_pfn(self, pa): << 231 return self.PHYS_PFN(pa) << 232 << 233 def __pfn_to_phys(self, pfn): << 234 return self.PFN_PHYS(pfn) << 235 << 236 def __pa_symbol_nodebug(self, x): << 237 return self.__kimg_to_phys(x) << 238 << 239 def __phys_addr_symbol(self, x): << 240 if constants.LX_CONFIG_DEBUG_VIRTUAL: << 241 if x < self.KERNEL_START or x > se << 242 raise gdb.GdbError("0x%x excee << 243 return self.__pa_symbol_nodebug(x) << 244 << 245 def __pa_symbol(self, x): << 246 return self.__phys_addr_symbol(x) << 247 << 248 def __va(self, pa): << 249 return self.__phys_to_virt(pa) << 250 << 251 def pfn_to_kaddr(self, pfn): << 252 return self.__va(pfn << self.PAGE_SHIF << 253 << 254 def virt_to_pfn(self, va): << 255 return self.__phys_to_pfn(self.__virt_ << 256 << 257 def sym_to_pfn(self, x): << 258 return self.__phys_to_pfn(self.__pa_sy << 259 << 260 def page_to_pfn(self, page): << 261 return int(page.cast(utils.get_page_ty << 262 << 263 def page_to_phys(self, page): << 264 return self.__pfn_to_phys(self.page_to << 265 << 266 def pfn_to_page(self, pfn): << 267 return (self.vmemmap + pfn).cast(utils << 268 60 269 def page_to_virt(self, page): !! 61 entry_offset = ((translating_va >> start_bit(level)) & 511) * 8 270 if constants.LX_CONFIG_DEBUG_VIRTUAL: !! 62 entry_va = _page_offset_base() + phys_addr + entry_offset 271 return self.__va(self.page_to_phys !! 63 return entry_va >> 64 >> 65 class Cr3(): >> 66 def __init__(self, cr3, page_levels): >> 67 self.cr3 = cr3 >> 68 self.page_levels = page_levels >> 69 self.page_level_write_through = is_bit_defined_tupled(cr3, 3) >> 70 self.page_level_cache_disabled = is_bit_defined_tupled(cr3, 4) >> 71 self.next_entry_physical_address = cr3 & PHYSICAL_ADDRESS_MASK & page_mask() >> 72 >> 73 def next_entry(self, va): >> 74 next_level = self.page_levels >> 75 return PageHierarchyEntry(entry_va(next_level, self.next_entry_physical_address, va), next_level) >> 76 >> 77 def mk_string(self): >> 78 return f"""\ >> 79 cr3: >> 80 {'cr3 binary data': <30} {hex(self.cr3)} >> 81 {'next entry physical address': <30} {hex(self.next_entry_physical_address)} >> 82 --- >> 83 {'bit' : <4} {self.page_level_write_through[0]: <10} {'page level write through': <30} {self.page_level_write_through[1]} >> 84 {'bit' : <4} {self.page_level_cache_disabled[0]: <10} {'page level cache disabled': <30} {self.page_level_cache_disabled[1]} >> 85 """ >> 86 >> 87 >> 88 class PageHierarchyEntry(): >> 89 def __init__(self, address, level): >> 90 data = int.from_bytes( >> 91 memoryview(gdb.selected_inferior().read_memory(address, 8)), >> 92 "little" >> 93 ) >> 94 if level == 1: >> 95 self.is_page = True >> 96 self.entry_present = is_bit_defined_tupled(data, 0) >> 97 self.read_write = is_bit_defined_tupled(data, 1) >> 98 self.user_access_allowed = is_bit_defined_tupled(data, 2) >> 99 self.page_level_write_through = is_bit_defined_tupled(data, 3) >> 100 self.page_level_cache_disabled = is_bit_defined_tupled(data, 4) >> 101 self.entry_was_accessed = is_bit_defined_tupled(data, 5) >> 102 self.dirty = is_bit_defined_tupled(data, 6) >> 103 self.pat = is_bit_defined_tupled(data, 7) >> 104 self.global_translation = is_bit_defined_tupled(data, 8) >> 105 self.page_physical_address = data & PHYSICAL_ADDRESS_MASK & page_mask(level) >> 106 self.next_entry_physical_address = None >> 107 self.hlat_restart_with_ordinary = is_bit_defined_tupled(data, 11) >> 108 self.protection_key = content_tupled(data, 59, 62) >> 109 self.executed_disable = is_bit_defined_tupled(data, 63) >> 110 else: >> 111 page_size = is_bit_defined_tupled(data, 7) >> 112 page_size_bit = page_size[1] >> 113 self.is_page = page_size_bit >> 114 self.entry_present = is_bit_defined_tupled(data, 0) >> 115 self.read_write = is_bit_defined_tupled(data, 1) >> 116 self.user_access_allowed = is_bit_defined_tupled(data, 2) >> 117 self.page_level_write_through = is_bit_defined_tupled(data, 3) >> 118 self.page_level_cache_disabled = is_bit_defined_tupled(data, 4) >> 119 self.entry_was_accessed = is_bit_defined_tupled(data, 5) >> 120 self.page_size = page_size >> 121 self.dirty = is_bit_defined_tupled( >> 122 data, 6) if page_size_bit else None >> 123 self.global_translation = is_bit_defined_tupled( >> 124 data, 8) if page_size_bit else None >> 125 self.pat = is_bit_defined_tupled( >> 126 data, 12) if page_size_bit else None >> 127 self.page_physical_address = data & PHYSICAL_ADDRESS_MASK & page_mask(level) if page_size_bit else None >> 128 self.next_entry_physical_address = None if page_size_bit else data & PHYSICAL_ADDRESS_MASK & page_mask() >> 129 self.hlat_restart_with_ordinary = is_bit_defined_tupled(data, 11) >> 130 self.protection_key = content_tupled(data, 59, 62) if page_size_bit else None >> 131 self.executed_disable = is_bit_defined_tupled(data, 63) >> 132 self.address = address >> 133 self.page_entry_binary_data = data >> 134 self.page_hierarchy_level = level >> 135 >> 136 def next_entry(self, va): >> 137 if self.is_page or not self.entry_present[1]: >> 138 return None >> 139 >> 140 next_level = self.page_hierarchy_level - 1 >> 141 return PageHierarchyEntry(entry_va(next_level, self.next_entry_physical_address, va), next_level) >> 142 >> 143 >> 144 def mk_string(self): >> 145 if not self.entry_present[1]: >> 146 return f"""\ >> 147 level {self.page_hierarchy_level}: >> 148 {'entry address': <30} {hex(self.address)} >> 149 {'page entry binary data': <30} {hex(self.page_entry_binary_data)} >> 150 --- >> 151 PAGE ENTRY IS NOT PRESENT! >> 152 """ >> 153 elif self.is_page: >> 154 def page_size_line(ps_bit, ps, level): >> 155 return "" if level == 1 else f"{'bit': <3} {ps_bit: <5} {'page size': <30} {ps}" >> 156 >> 157 return f"""\ >> 158 level {self.page_hierarchy_level}: >> 159 {'entry address': <30} {hex(self.address)} >> 160 {'page entry binary data': <30} {hex(self.page_entry_binary_data)} >> 161 {'page size': <30} {'1GB' if self.page_hierarchy_level == 3 else '2MB' if self.page_hierarchy_level == 2 else '4KB' if self.page_hierarchy_level == 1 else 'Unknown page size for level:' + self.page_hierarchy_level} >> 162 {'page physical address': <30} {hex(self.page_physical_address)} >> 163 --- >> 164 {'bit': <4} {self.entry_present[0]: <10} {'entry present': <30} {self.entry_present[1]} >> 165 {'bit': <4} {self.read_write[0]: <10} {'read/write access allowed': <30} {self.read_write[1]} >> 166 {'bit': <4} {self.user_access_allowed[0]: <10} {'user access allowed': <30} {self.user_access_allowed[1]} >> 167 {'bit': <4} {self.page_level_write_through[0]: <10} {'page level write through': <30} {self.page_level_write_through[1]} >> 168 {'bit': <4} {self.page_level_cache_disabled[0]: <10} {'page level cache disabled': <30} {self.page_level_cache_disabled[1]} >> 169 {'bit': <4} {self.entry_was_accessed[0]: <10} {'entry has been accessed': <30} {self.entry_was_accessed[1]} >> 170 {"" if self.page_hierarchy_level == 1 else f"{'bit': <4} {self.page_size[0]: <10} {'page size': <30} {self.page_size[1]}"} >> 171 {'bit': <4} {self.dirty[0]: <10} {'page dirty': <30} {self.dirty[1]} >> 172 {'bit': <4} {self.global_translation[0]: <10} {'global translation': <30} {self.global_translation[1]} >> 173 {'bit': <4} {self.hlat_restart_with_ordinary[0]: <10} {'restart to ordinary': <30} {self.hlat_restart_with_ordinary[1]} >> 174 {'bit': <4} {self.pat[0]: <10} {'pat': <30} {self.pat[1]} >> 175 {'bits': <4} {str(self.protection_key[0]): <10} {'protection key': <30} {self.protection_key[1]} >> 176 {'bit': <4} {self.executed_disable[0]: <10} {'execute disable': <30} {self.executed_disable[1]} >> 177 """ >> 178 else: >> 179 return f"""\ >> 180 level {self.page_hierarchy_level}: >> 181 {'entry address': <30} {hex(self.address)} >> 182 {'page entry binary data': <30} {hex(self.page_entry_binary_data)} >> 183 {'next entry physical address': <30} {hex(self.next_entry_physical_address)} >> 184 --- >> 185 {'bit': <4} {self.entry_present[0]: <10} {'entry present': <30} {self.entry_present[1]} >> 186 {'bit': <4} {self.read_write[0]: <10} {'read/write access allowed': <30} {self.read_write[1]} >> 187 {'bit': <4} {self.user_access_allowed[0]: <10} {'user access allowed': <30} {self.user_access_allowed[1]} >> 188 {'bit': <4} {self.page_level_write_through[0]: <10} {'page level write through': <30} {self.page_level_write_through[1]} >> 189 {'bit': <4} {self.page_level_cache_disabled[0]: <10} {'page level cache disabled': <30} {self.page_level_cache_disabled[1]} >> 190 {'bit': <4} {self.entry_was_accessed[0]: <10} {'entry has been accessed': <30} {self.entry_was_accessed[1]} >> 191 {'bit': <4} {self.page_size[0]: <10} {'page size': <30} {self.page_size[1]} >> 192 {'bit': <4} {self.hlat_restart_with_ordinary[0]: <10} {'restart to ordinary': <30} {self.hlat_restart_with_ordinary[1]} >> 193 {'bit': <4} {self.executed_disable[0]: <10} {'execute disable': <30} {self.executed_disable[1]} >> 194 """ >> 195 >> 196 >> 197 class TranslateVM(gdb.Command): >> 198 """Prints the entire paging structure used to translate a given virtual address. >> 199 >> 200 Having an address space of the currently executed process translates the virtual address >> 201 and prints detailed information of all paging structure levels used for the transaltion. >> 202 Currently supported arch: x86""" >> 203 >> 204 def __init__(self): >> 205 super(TranslateVM, self).__init__('translate-vm', gdb.COMMAND_USER) >> 206 >> 207 def invoke(self, arg, from_tty): >> 208 if utils.is_target_arch("x86"): >> 209 vm_address = gdb.parse_and_eval(f'{arg}') >> 210 cr3_data = gdb.parse_and_eval('$cr3') >> 211 cr4 = gdb.parse_and_eval('$cr4') >> 212 page_levels = 5 if cr4 & (1 << 12) else 4 >> 213 page_entry = Cr3(cr3_data, page_levels) >> 214 while page_entry: >> 215 gdb.write(page_entry.mk_string()) >> 216 page_entry = page_entry.next_entry(vm_address) 272 else: 217 else: 273 __idx = int((page.cast(gdb.lookup_ !! 218 gdb.GdbError("Virtual address translation is not" 274 return self.PAGE_OFFSET + (__idx * !! 219 "supported for this arch") 275 220 276 def virt_to_page(self, va): << 277 if constants.LX_CONFIG_DEBUG_VIRTUAL: << 278 return self.pfn_to_page(self.virt_ << 279 else: << 280 __idx = int(self.kasan_reset_tag(v << 281 addr = self.VMEMMAP_START + (__idx << 282 return gdb.Value(addr).cast(utils. << 283 << 284 def page_address(self, page): << 285 return self.page_to_virt(page) << 286 << 287 def folio_address(self, folio): << 288 return self.page_address(folio['page'] << 289 << 290 class LxPFN2Page(gdb.Command): << 291 """PFN to struct page""" << 292 << 293 def __init__(self): << 294 super(LxPFN2Page, self).__init__("lx-p << 295 << 296 def invoke(self, arg, from_tty): << 297 argv = gdb.string_to_argv(arg) << 298 pfn = int(argv[0]) << 299 page = page_ops().ops.pfn_to_page(pfn) << 300 gdb.write("pfn_to_page(0x%x) = 0x%x\n" << 301 << 302 LxPFN2Page() << 303 << 304 class LxPage2PFN(gdb.Command): << 305 """struct page to PFN""" << 306 << 307 def __init__(self): << 308 super(LxPage2PFN, self).__init__("lx-p << 309 << 310 def invoke(self, arg, from_tty): << 311 argv = gdb.string_to_argv(arg) << 312 struct_page_addr = int(argv[0], 16) << 313 page = gdb.Value(struct_page_addr).cas << 314 pfn = page_ops().ops.page_to_pfn(page) << 315 gdb.write("page_to_pfn(0x%x) = 0x%x\n" << 316 << 317 LxPage2PFN() << 318 << 319 class LxPageAddress(gdb.Command): << 320 """struct page to linear mapping address"" << 321 << 322 def __init__(self): << 323 super(LxPageAddress, self).__init__("l << 324 << 325 def invoke(self, arg, from_tty): << 326 argv = gdb.string_to_argv(arg) << 327 struct_page_addr = int(argv[0], 16) << 328 page = gdb.Value(struct_page_addr).cas << 329 addr = page_ops().ops.page_address(pag << 330 gdb.write("page_address(0x%x) = 0x%x\n << 331 << 332 LxPageAddress() << 333 << 334 class LxPage2Phys(gdb.Command): << 335 """struct page to physical address""" << 336 << 337 def __init__(self): << 338 super(LxPage2Phys, self).__init__("lx- << 339 << 340 def invoke(self, arg, from_tty): << 341 argv = gdb.string_to_argv(arg) << 342 struct_page_addr = int(argv[0], 16) << 343 page = gdb.Value(struct_page_addr).cas << 344 phys_addr = page_ops().ops.page_to_phy << 345 gdb.write("page_to_phys(0x%x) = 0x%x\n << 346 << 347 LxPage2Phys() << 348 << 349 class LxVirt2Phys(gdb.Command): << 350 """virtual address to physical address""" << 351 << 352 def __init__(self): << 353 super(LxVirt2Phys, self).__init__("lx- << 354 << 355 def invoke(self, arg, from_tty): << 356 argv = gdb.string_to_argv(arg) << 357 linear_addr = int(argv[0], 16) << 358 phys_addr = page_ops().ops.virt_to_phy << 359 gdb.write("virt_to_phys(0x%x) = 0x%x\n << 360 << 361 LxVirt2Phys() << 362 << 363 class LxVirt2Page(gdb.Command): << 364 """virtual address to struct page""" << 365 << 366 def __init__(self): << 367 super(LxVirt2Page, self).__init__("lx- << 368 << 369 def invoke(self, arg, from_tty): << 370 argv = gdb.string_to_argv(arg) << 371 linear_addr = int(argv[0], 16) << 372 page = page_ops().ops.virt_to_page(lin << 373 gdb.write("virt_to_page(0x%x) = 0x%x\n << 374 << 375 LxVirt2Page() << 376 << 377 class LxSym2PFN(gdb.Command): << 378 """symbol address to PFN""" << 379 << 380 def __init__(self): << 381 super(LxSym2PFN, self).__init__("lx-sy << 382 << 383 def invoke(self, arg, from_tty): << 384 argv = gdb.string_to_argv(arg) << 385 sym_addr = int(argv[0], 16) << 386 pfn = page_ops().ops.sym_to_pfn(sym_ad << 387 gdb.write("sym_to_pfn(0x%x) = %d\n" % << 388 << 389 LxSym2PFN() << 390 << 391 class LxPFN2Kaddr(gdb.Command): << 392 """PFN to kernel address""" << 393 << 394 def __init__(self): << 395 super(LxPFN2Kaddr, self).__init__("lx- << 396 << 397 def invoke(self, arg, from_tty): << 398 argv = gdb.string_to_argv(arg) << 399 pfn = int(argv[0]) << 400 kaddr = page_ops().ops.pfn_to_kaddr(pf << 401 gdb.write("pfn_to_kaddr(%d) = 0x%x\n" << 402 221 403 LxPFN2Kaddr() !! 222 TranslateVM()
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.