1 // SPDX-License-Identifier: GPL-2.0-only << 2 /* 1 /* 3 * fs/proc/vmcore.c Interface for accessi 2 * fs/proc/vmcore.c Interface for accessing the crash 4 * dump from the 3 * dump from the system's previous life. 5 * Heavily borrowed from fs/proc/kcore.c 4 * Heavily borrowed from fs/proc/kcore.c 6 * Created by: Hariprasad Nellitheertha ( 5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 7 * Copyright (C) IBM Corporation, 2004. A 6 * Copyright (C) IBM Corporation, 2004. All rights reserved 8 * 7 * 9 */ 8 */ 10 9 11 #include <linux/mm.h> 10 #include <linux/mm.h> 12 #include <linux/kcore.h> 11 #include <linux/kcore.h> 13 #include <linux/user.h> 12 #include <linux/user.h> 14 #include <linux/elf.h> 13 #include <linux/elf.h> 15 #include <linux/elfcore.h> 14 #include <linux/elfcore.h> 16 #include <linux/export.h> 15 #include <linux/export.h> 17 #include <linux/slab.h> 16 #include <linux/slab.h> 18 #include <linux/highmem.h> 17 #include <linux/highmem.h> 19 #include <linux/printk.h> 18 #include <linux/printk.h> 20 #include <linux/memblock.h> !! 19 #include <linux/bootmem.h> 21 #include <linux/init.h> 20 #include <linux/init.h> 22 #include <linux/crash_dump.h> 21 #include <linux/crash_dump.h> 23 #include <linux/list.h> 22 #include <linux/list.h> 24 #include <linux/moduleparam.h> << 25 #include <linux/mutex.h> << 26 #include <linux/vmalloc.h> 23 #include <linux/vmalloc.h> 27 #include <linux/pagemap.h> 24 #include <linux/pagemap.h> 28 #include <linux/uio.h> !! 25 #include <linux/uaccess.h> 29 #include <linux/cc_platform.h> << 30 #include <asm/io.h> 26 #include <asm/io.h> 31 #include "internal.h" 27 #include "internal.h" 32 28 33 /* List representing chunks of contiguous memo 29 /* List representing chunks of contiguous memory areas and their offsets in 34 * vmcore file. 30 * vmcore file. 35 */ 31 */ 36 static LIST_HEAD(vmcore_list); 32 static LIST_HEAD(vmcore_list); 37 33 38 /* Stores the pointer to the buffer containing 34 /* Stores the pointer to the buffer containing kernel elf core headers. */ 39 static char *elfcorebuf; 35 static char *elfcorebuf; 40 static size_t elfcorebuf_sz; 36 static size_t elfcorebuf_sz; 41 static size_t elfcorebuf_sz_orig; 37 static size_t elfcorebuf_sz_orig; 42 38 43 static char *elfnotes_buf; 39 static char *elfnotes_buf; 44 static size_t elfnotes_sz; 40 static size_t elfnotes_sz; 45 /* Size of all notes minus the device dump not << 46 static size_t elfnotes_orig_sz; << 47 41 48 /* Total size of vmcore file. */ 42 /* Total size of vmcore file. */ 49 static u64 vmcore_size; 43 static u64 vmcore_size; 50 44 51 static struct proc_dir_entry *proc_vmcore; 45 static struct proc_dir_entry *proc_vmcore; 52 46 53 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP !! 47 /* 54 /* Device Dump list and mutex to synchronize a !! 48 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error 55 static LIST_HEAD(vmcoredd_list); !! 49 * The called function has to take care of module refcounting. 56 static DEFINE_MUTEX(vmcoredd_mutex); !! 50 */ 57 !! 51 static int (*oldmem_pfn_is_ram)(unsigned long pfn); 58 static bool vmcoredd_disabled; << 59 core_param(novmcoredd, vmcoredd_disabled, bool << 60 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ << 61 << 62 /* Device Dump Size */ << 63 static size_t vmcoredd_orig_sz; << 64 << 65 static DEFINE_SPINLOCK(vmcore_cb_lock); << 66 DEFINE_STATIC_SRCU(vmcore_cb_srcu); << 67 /* List of registered vmcore callbacks. */ << 68 static LIST_HEAD(vmcore_cb_list); << 69 /* Whether the vmcore has been opened once. */ << 70 static bool vmcore_opened; << 71 << 72 void register_vmcore_cb(struct vmcore_cb *cb) << 73 { << 74 INIT_LIST_HEAD(&cb->next); << 75 spin_lock(&vmcore_cb_lock); << 76 list_add_tail(&cb->next, &vmcore_cb_li << 77 /* << 78 * Registering a vmcore callback after << 79 * very unusual (e.g., manual driver l << 80 */ << 81 if (vmcore_opened) << 82 pr_warn_once("Unexpected vmcor << 83 spin_unlock(&vmcore_cb_lock); << 84 } << 85 EXPORT_SYMBOL_GPL(register_vmcore_cb); << 86 52 87 void unregister_vmcore_cb(struct vmcore_cb *cb !! 53 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)) 88 { 54 { 89 spin_lock(&vmcore_cb_lock); !! 55 if (oldmem_pfn_is_ram) 90 list_del_rcu(&cb->next); !! 56 return -EBUSY; 91 /* !! 57 oldmem_pfn_is_ram = fn; 92 * Unregistering a vmcore callback aft !! 58 return 0; 93 * very unusual (e.g., forced driver r << 94 * unregistering. << 95 */ << 96 if (vmcore_opened) << 97 pr_warn_once("Unexpected vmcor << 98 spin_unlock(&vmcore_cb_lock); << 99 << 100 synchronize_srcu(&vmcore_cb_srcu); << 101 } 59 } 102 EXPORT_SYMBOL_GPL(unregister_vmcore_cb); !! 60 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram); 103 61 104 static bool pfn_is_ram(unsigned long pfn) !! 62 void unregister_oldmem_pfn_is_ram(void) 105 { 63 { 106 struct vmcore_cb *cb; !! 64 oldmem_pfn_is_ram = NULL; 107 bool ret = true; !! 65 wmb(); 108 << 109 list_for_each_entry_srcu(cb, &vmcore_c << 110 srcu_read_loc << 111 if (unlikely(!cb->pfn_is_ram)) << 112 continue; << 113 ret = cb->pfn_is_ram(cb, pfn); << 114 if (!ret) << 115 break; << 116 } << 117 << 118 return ret; << 119 } 66 } >> 67 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram); 120 68 121 static int open_vmcore(struct inode *inode, st !! 69 static int pfn_is_ram(unsigned long pfn) 122 { 70 { 123 spin_lock(&vmcore_cb_lock); !! 71 int (*fn)(unsigned long pfn); 124 vmcore_opened = true; !! 72 /* pfn is ram unless fn() checks pagetype */ 125 spin_unlock(&vmcore_cb_lock); !! 73 int ret = 1; 126 74 127 return 0; !! 75 /* >> 76 * Ask hypervisor if the pfn is really ram. >> 77 * A ballooned page contains no data and reading from such a page >> 78 * will cause high load in the hypervisor. >> 79 */ >> 80 fn = oldmem_pfn_is_ram; >> 81 if (fn) >> 82 ret = fn(pfn); >> 83 >> 84 return ret; 128 } 85 } 129 86 130 /* Reads a page from the oldmem device from gi 87 /* Reads a page from the oldmem device from given offset. */ 131 ssize_t read_from_oldmem(struct iov_iter *iter !! 88 static ssize_t read_from_oldmem(char *buf, size_t count, 132 u64 *ppos, bool encry !! 89 u64 *ppos, int userbuf) 133 { 90 { 134 unsigned long pfn, offset; 91 unsigned long pfn, offset; 135 ssize_t nr_bytes; !! 92 size_t nr_bytes; 136 ssize_t read = 0, tmp; 93 ssize_t read = 0, tmp; 137 int idx; << 138 94 139 if (!count) 95 if (!count) 140 return 0; 96 return 0; 141 97 142 offset = (unsigned long)(*ppos % PAGE_ 98 offset = (unsigned long)(*ppos % PAGE_SIZE); 143 pfn = (unsigned long)(*ppos / PAGE_SIZ 99 pfn = (unsigned long)(*ppos / PAGE_SIZE); 144 100 145 idx = srcu_read_lock(&vmcore_cb_srcu); << 146 do { 101 do { 147 if (count > (PAGE_SIZE - offse 102 if (count > (PAGE_SIZE - offset)) 148 nr_bytes = PAGE_SIZE - 103 nr_bytes = PAGE_SIZE - offset; 149 else 104 else 150 nr_bytes = count; 105 nr_bytes = count; 151 106 152 /* If pfn is not ram, return z 107 /* If pfn is not ram, return zeros for sparse dump files */ 153 if (!pfn_is_ram(pfn)) { !! 108 if (pfn_is_ram(pfn) == 0) 154 tmp = iov_iter_zero(nr !! 109 memset(buf, 0, nr_bytes); 155 } else { !! 110 else { 156 if (encrypted) !! 111 tmp = copy_oldmem_page(pfn, buf, nr_bytes, 157 tmp = copy_old !! 112 offset, userbuf); 158 !! 113 if (tmp < 0) 159 !! 114 return tmp; 160 else << 161 tmp = copy_old << 162 << 163 } << 164 if (tmp < nr_bytes) { << 165 srcu_read_unlock(&vmco << 166 return -EFAULT; << 167 } 115 } 168 << 169 *ppos += nr_bytes; 116 *ppos += nr_bytes; 170 count -= nr_bytes; 117 count -= nr_bytes; >> 118 buf += nr_bytes; 171 read += nr_bytes; 119 read += nr_bytes; 172 ++pfn; 120 ++pfn; 173 offset = 0; 121 offset = 0; 174 } while (count); 122 } while (count); 175 srcu_read_unlock(&vmcore_cb_srcu, idx) << 176 123 177 return read; 124 return read; 178 } 125 } 179 126 180 /* 127 /* 181 * Architectures may override this function to 128 * Architectures may override this function to allocate ELF header in 2nd kernel 182 */ 129 */ 183 int __weak elfcorehdr_alloc(unsigned long long 130 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) 184 { 131 { 185 return 0; 132 return 0; 186 } 133 } 187 134 188 /* 135 /* 189 * Architectures may override this function to 136 * Architectures may override this function to free header 190 */ 137 */ 191 void __weak elfcorehdr_free(unsigned long long 138 void __weak elfcorehdr_free(unsigned long long addr) 192 {} 139 {} 193 140 194 /* 141 /* 195 * Architectures may override this function to 142 * Architectures may override this function to read from ELF header 196 */ 143 */ 197 ssize_t __weak elfcorehdr_read(char *buf, size 144 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) 198 { 145 { 199 struct kvec kvec = { .iov_base = buf, !! 146 return read_from_oldmem(buf, count, ppos, 0); 200 struct iov_iter iter; << 201 << 202 iov_iter_kvec(&iter, ITER_DEST, &kvec, << 203 << 204 return read_from_oldmem(&iter, count, << 205 } 147 } 206 148 207 /* 149 /* 208 * Architectures may override this function to 150 * Architectures may override this function to read from notes sections 209 */ 151 */ 210 ssize_t __weak elfcorehdr_read_notes(char *buf 152 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos) 211 { 153 { 212 struct kvec kvec = { .iov_base = buf, !! 154 return read_from_oldmem(buf, count, ppos, 0); 213 struct iov_iter iter; << 214 << 215 iov_iter_kvec(&iter, ITER_DEST, &kvec, << 216 << 217 return read_from_oldmem(&iter, count, << 218 cc_platform_has(CC_ATT << 219 } 155 } 220 156 221 /* 157 /* 222 * Architectures may override this function to 158 * Architectures may override this function to map oldmem 223 */ 159 */ 224 int __weak remap_oldmem_pfn_range(struct vm_ar 160 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, 225 unsigned lon 161 unsigned long from, unsigned long pfn, 226 unsigned lon 162 unsigned long size, pgprot_t prot) 227 { 163 { 228 prot = pgprot_encrypted(prot); << 229 return remap_pfn_range(vma, from, pfn, 164 return remap_pfn_range(vma, from, pfn, size, prot); 230 } 165 } 231 166 232 /* 167 /* 233 * Architectures which support memory encrypti !! 168 * Copy to either kernel or user space 234 */ 169 */ 235 ssize_t __weak copy_oldmem_page_encrypted(stru !! 170 static int copy_to(void *target, void *src, size_t size, int userbuf) 236 unsigned long pfn, size_t csiz << 237 { 171 { 238 return copy_oldmem_page(iter, pfn, csi !! 172 if (userbuf) { 239 } !! 173 if (copy_to_user((char __user *) target, src, size)) 240 !! 174 return -EFAULT; 241 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP !! 175 } else { 242 static int vmcoredd_copy_dumps(struct iov_iter !! 176 memcpy(target, src, size); 243 { << 244 struct vmcoredd_node *dump; << 245 u64 offset = 0; << 246 int ret = 0; << 247 size_t tsz; << 248 char *buf; << 249 << 250 mutex_lock(&vmcoredd_mutex); << 251 list_for_each_entry(dump, &vmcoredd_li << 252 if (start < offset + dump->siz << 253 tsz = min(offset + (u6 << 254 buf = dump->buf + star << 255 if (copy_to_iter(buf, << 256 ret = -EFAULT; << 257 goto out_unloc << 258 } << 259 << 260 size -= tsz; << 261 start += tsz; << 262 << 263 /* Leave now if buffer << 264 if (!size) << 265 goto out_unloc << 266 } << 267 offset += dump->size; << 268 } << 269 << 270 out_unlock: << 271 mutex_unlock(&vmcoredd_mutex); << 272 return ret; << 273 } << 274 << 275 #ifdef CONFIG_MMU << 276 static int vmcoredd_mmap_dumps(struct vm_area_ << 277 u64 start, size << 278 { << 279 struct vmcoredd_node *dump; << 280 u64 offset = 0; << 281 int ret = 0; << 282 size_t tsz; << 283 char *buf; << 284 << 285 mutex_lock(&vmcoredd_mutex); << 286 list_for_each_entry(dump, &vmcoredd_li << 287 if (start < offset + dump->siz << 288 tsz = min(offset + (u6 << 289 buf = dump->buf + star << 290 if (remap_vmalloc_rang << 291 << 292 ret = -EFAULT; << 293 goto out_unloc << 294 } << 295 << 296 size -= tsz; << 297 start += tsz; << 298 dst += tsz; << 299 << 300 /* Leave now if buffer << 301 if (!size) << 302 goto out_unloc << 303 } << 304 offset += dump->size; << 305 } 177 } 306 !! 178 return 0; 307 out_unlock: << 308 mutex_unlock(&vmcoredd_mutex); << 309 return ret; << 310 } 179 } 311 #endif /* CONFIG_MMU */ << 312 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ << 313 180 314 /* Read from the ELF header and then the crash 181 /* Read from the ELF header and then the crash dump. On error, negative value is 315 * returned otherwise number of bytes read are 182 * returned otherwise number of bytes read are returned. 316 */ 183 */ 317 static ssize_t __read_vmcore(struct iov_iter * !! 184 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, >> 185 int userbuf) 318 { 186 { 319 ssize_t acc = 0, tmp; 187 ssize_t acc = 0, tmp; 320 size_t tsz; 188 size_t tsz; 321 u64 start; 189 u64 start; 322 struct vmcore *m = NULL; 190 struct vmcore *m = NULL; 323 191 324 if (!iov_iter_count(iter) || *fpos >= !! 192 if (buflen == 0 || *fpos >= vmcore_size) 325 return 0; 193 return 0; 326 194 327 iov_iter_truncate(iter, vmcore_size - !! 195 /* trim buflen to not go beyond EOF */ >> 196 if (buflen > vmcore_size - *fpos) >> 197 buflen = vmcore_size - *fpos; 328 198 329 /* Read ELF core header */ 199 /* Read ELF core header */ 330 if (*fpos < elfcorebuf_sz) { 200 if (*fpos < elfcorebuf_sz) { 331 tsz = min(elfcorebuf_sz - (siz !! 201 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen); 332 if (copy_to_iter(elfcorebuf + !! 202 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf)) 333 return -EFAULT; 203 return -EFAULT; >> 204 buflen -= tsz; 334 *fpos += tsz; 205 *fpos += tsz; >> 206 buffer += tsz; 335 acc += tsz; 207 acc += tsz; 336 208 337 /* leave now if filled buffer 209 /* leave now if filled buffer already */ 338 if (!iov_iter_count(iter)) !! 210 if (buflen == 0) 339 return acc; 211 return acc; 340 } 212 } 341 213 342 /* Read ELF note segment */ !! 214 /* Read Elf note segment */ 343 if (*fpos < elfcorebuf_sz + elfnotes_s 215 if (*fpos < elfcorebuf_sz + elfnotes_sz) { 344 void *kaddr; 216 void *kaddr; 345 217 346 /* We add device dumps before !! 218 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen); 347 * other elf notes may not fil !! 219 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz; 348 * completely and we will end !! 220 if (copy_to(buffer, kaddr, tsz, userbuf)) 349 * between the elf notes and t << 350 * then try to decode this zer << 351 * and we don't want that. Hen << 352 * the other elf notes ensure << 353 * avoided. << 354 */ << 355 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP << 356 /* Read device dumps */ << 357 if (*fpos < elfcorebuf_sz + vm << 358 tsz = min(elfcorebuf_s << 359 (size_t)*fpo << 360 start = *fpos - elfcor << 361 if (vmcoredd_copy_dump << 362 return -EFAULT << 363 << 364 *fpos += tsz; << 365 acc += tsz; << 366 << 367 /* leave now if filled << 368 if (!iov_iter_count(it << 369 return acc; << 370 } << 371 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ << 372 << 373 /* Read remaining elf notes */ << 374 tsz = min(elfcorebuf_sz + elfn << 375 iov_iter_count(iter) << 376 kaddr = elfnotes_buf + *fpos - << 377 if (copy_to_iter(kaddr, tsz, i << 378 return -EFAULT; 221 return -EFAULT; 379 !! 222 buflen -= tsz; 380 *fpos += tsz; 223 *fpos += tsz; >> 224 buffer += tsz; 381 acc += tsz; 225 acc += tsz; 382 226 383 /* leave now if filled buffer 227 /* leave now if filled buffer already */ 384 if (!iov_iter_count(iter)) !! 228 if (buflen == 0) 385 return acc; 229 return acc; 386 << 387 cond_resched(); << 388 } 230 } 389 231 390 list_for_each_entry(m, &vmcore_list, l 232 list_for_each_entry(m, &vmcore_list, list) { 391 if (*fpos < m->offset + m->siz 233 if (*fpos < m->offset + m->size) { 392 tsz = (size_t)min_t(un 234 tsz = (size_t)min_t(unsigned long long, 393 m- 235 m->offset + m->size - *fpos, 394 io !! 236 buflen); 395 start = m->paddr + *fp 237 start = m->paddr + *fpos - m->offset; 396 tmp = read_from_oldmem !! 238 tmp = read_from_oldmem(buffer, tsz, &start, userbuf); 397 cc_pla << 398 if (tmp < 0) 239 if (tmp < 0) 399 return tmp; 240 return tmp; >> 241 buflen -= tsz; 400 *fpos += tsz; 242 *fpos += tsz; >> 243 buffer += tsz; 401 acc += tsz; 244 acc += tsz; 402 245 403 /* leave now if filled 246 /* leave now if filled buffer already */ 404 if (!iov_iter_count(it !! 247 if (buflen == 0) 405 return acc; 248 return acc; 406 } 249 } 407 } 250 } 408 251 409 return acc; 252 return acc; 410 } 253 } 411 254 412 static ssize_t read_vmcore(struct kiocb *iocb, !! 255 static ssize_t read_vmcore(struct file *file, char __user *buffer, >> 256 size_t buflen, loff_t *fpos) 413 { 257 { 414 return __read_vmcore(iter, &iocb->ki_p !! 258 return __read_vmcore((__force char *) buffer, buflen, fpos, 1); 415 } 259 } 416 260 417 /* 261 /* 418 * The vmcore fault handler uses the page cach 262 * The vmcore fault handler uses the page cache and fills data using the 419 * standard __read_vmcore() function. !! 263 * standard __vmcore_read() function. 420 * 264 * 421 * On s390 the fault handler is used for memor 265 * On s390 the fault handler is used for memory regions that can't be mapped 422 * directly with remap_pfn_range(). 266 * directly with remap_pfn_range(). 423 */ 267 */ 424 static vm_fault_t mmap_vmcore_fault(struct vm_ !! 268 static int mmap_vmcore_fault(struct vm_fault *vmf) 425 { 269 { 426 #ifdef CONFIG_S390 270 #ifdef CONFIG_S390 427 struct address_space *mapping = vmf->v 271 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 428 pgoff_t index = vmf->pgoff; 272 pgoff_t index = vmf->pgoff; 429 struct iov_iter iter; << 430 struct kvec kvec; << 431 struct page *page; 273 struct page *page; 432 loff_t offset; 274 loff_t offset; >> 275 char *buf; 433 int rc; 276 int rc; 434 277 435 page = find_or_create_page(mapping, in 278 page = find_or_create_page(mapping, index, GFP_KERNEL); 436 if (!page) 279 if (!page) 437 return VM_FAULT_OOM; 280 return VM_FAULT_OOM; 438 if (!PageUptodate(page)) { 281 if (!PageUptodate(page)) { 439 offset = (loff_t) index << PAG 282 offset = (loff_t) index << PAGE_SHIFT; 440 kvec.iov_base = page_address(p !! 283 buf = __va((page_to_pfn(page) << PAGE_SHIFT)); 441 kvec.iov_len = PAGE_SIZE; !! 284 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0); 442 iov_iter_kvec(&iter, ITER_DEST << 443 << 444 rc = __read_vmcore(&iter, &off << 445 if (rc < 0) { 285 if (rc < 0) { 446 unlock_page(page); 286 unlock_page(page); 447 put_page(page); 287 put_page(page); 448 return vmf_error(rc); !! 288 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 449 } 289 } 450 SetPageUptodate(page); 290 SetPageUptodate(page); 451 } 291 } 452 unlock_page(page); 292 unlock_page(page); 453 vmf->page = page; 293 vmf->page = page; 454 return 0; 294 return 0; 455 #else 295 #else 456 return VM_FAULT_SIGBUS; 296 return VM_FAULT_SIGBUS; 457 #endif 297 #endif 458 } 298 } 459 299 >> 300 static const struct vm_operations_struct vmcore_mmap_ops = { >> 301 .fault = mmap_vmcore_fault, >> 302 }; >> 303 460 /** 304 /** 461 * vmcore_alloc_buf - allocate buffer in vmall !! 305 * alloc_elfnotes_buf - allocate buffer for ELF note segment in 462 * @size: size of buffer !! 306 * vmalloc memory >> 307 * >> 308 * @notes_sz: size of buffer 463 * 309 * 464 * If CONFIG_MMU is defined, use vmalloc_user( 310 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap 465 * the buffer to user-space by means of remap_ 311 * the buffer to user-space by means of remap_vmalloc_range(). 466 * 312 * 467 * If CONFIG_MMU is not defined, use vzalloc() 313 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is 468 * disabled and there's no need to allow users 314 * disabled and there's no need to allow users to mmap the buffer. 469 */ 315 */ 470 static inline char *vmcore_alloc_buf(size_t si !! 316 static inline char *alloc_elfnotes_buf(size_t notes_sz) 471 { 317 { 472 #ifdef CONFIG_MMU 318 #ifdef CONFIG_MMU 473 return vmalloc_user(size); !! 319 return vmalloc_user(notes_sz); 474 #else 320 #else 475 return vzalloc(size); !! 321 return vzalloc(notes_sz); 476 #endif 322 #endif 477 } 323 } 478 324 479 /* 325 /* 480 * Disable mmap_vmcore() if CONFIG_MMU is not 326 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is 481 * essential for mmap_vmcore() in order to map 327 * essential for mmap_vmcore() in order to map physically 482 * non-contiguous objects (ELF header, ELF not 328 * non-contiguous objects (ELF header, ELF note segment and memory 483 * regions in the 1st kernel pointed to by PT_ 329 * regions in the 1st kernel pointed to by PT_LOAD entries) into 484 * virtually contiguous user-space in ELF layo 330 * virtually contiguous user-space in ELF layout. 485 */ 331 */ 486 #ifdef CONFIG_MMU 332 #ifdef CONFIG_MMU 487 << 488 static const struct vm_operations_struct vmcor << 489 .fault = mmap_vmcore_fault, << 490 }; << 491 << 492 /* 333 /* 493 * remap_oldmem_pfn_checked - do remap_oldmem_ 334 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages 494 * reported as not being ram with the zero pag 335 * reported as not being ram with the zero page. 495 * 336 * 496 * @vma: vm_area_struct describing requested m 337 * @vma: vm_area_struct describing requested mapping 497 * @from: start remapping from 338 * @from: start remapping from 498 * @pfn: page frame number to start remapping 339 * @pfn: page frame number to start remapping to 499 * @size: remapping size 340 * @size: remapping size 500 * @prot: protection bits 341 * @prot: protection bits 501 * 342 * 502 * Returns zero on success, -EAGAIN on failure 343 * Returns zero on success, -EAGAIN on failure. 503 */ 344 */ 504 static int remap_oldmem_pfn_checked(struct vm_ 345 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma, 505 unsigned l 346 unsigned long from, unsigned long pfn, 506 unsigned l 347 unsigned long size, pgprot_t prot) 507 { 348 { 508 unsigned long map_size; 349 unsigned long map_size; 509 unsigned long pos_start, pos_end, pos; 350 unsigned long pos_start, pos_end, pos; 510 unsigned long zeropage_pfn = my_zero_p 351 unsigned long zeropage_pfn = my_zero_pfn(0); 511 size_t len = 0; 352 size_t len = 0; 512 353 513 pos_start = pfn; 354 pos_start = pfn; 514 pos_end = pfn + (size >> PAGE_SHIFT); 355 pos_end = pfn + (size >> PAGE_SHIFT); 515 356 516 for (pos = pos_start; pos < pos_end; + 357 for (pos = pos_start; pos < pos_end; ++pos) { 517 if (!pfn_is_ram(pos)) { 358 if (!pfn_is_ram(pos)) { 518 /* 359 /* 519 * We hit a page which 360 * We hit a page which is not ram. Remap the continuous 520 * region between pos_ 361 * region between pos_start and pos-1 and replace 521 * the non-ram page at 362 * the non-ram page at pos with the zero page. 522 */ 363 */ 523 if (pos > pos_start) { 364 if (pos > pos_start) { 524 /* Remap conti 365 /* Remap continuous region */ 525 map_size = (po 366 map_size = (pos - pos_start) << PAGE_SHIFT; 526 if (remap_oldm 367 if (remap_oldmem_pfn_range(vma, from + len, 527 368 pos_start, map_size, 528 369 prot)) 529 goto f 370 goto fail; 530 len += map_siz 371 len += map_size; 531 } 372 } 532 /* Remap the zero page 373 /* Remap the zero page */ 533 if (remap_oldmem_pfn_r 374 if (remap_oldmem_pfn_range(vma, from + len, 534 375 zeropage_pfn, 535 376 PAGE_SIZE, prot)) 536 goto fail; 377 goto fail; 537 len += PAGE_SIZE; 378 len += PAGE_SIZE; 538 pos_start = pos + 1; 379 pos_start = pos + 1; 539 } 380 } 540 } 381 } 541 if (pos > pos_start) { 382 if (pos > pos_start) { 542 /* Remap the rest */ 383 /* Remap the rest */ 543 map_size = (pos - pos_start) < 384 map_size = (pos - pos_start) << PAGE_SHIFT; 544 if (remap_oldmem_pfn_range(vma 385 if (remap_oldmem_pfn_range(vma, from + len, pos_start, 545 map 386 map_size, prot)) 546 goto fail; 387 goto fail; 547 } 388 } 548 return 0; 389 return 0; 549 fail: 390 fail: 550 do_munmap(vma->vm_mm, from, len, NULL) 391 do_munmap(vma->vm_mm, from, len, NULL); 551 return -EAGAIN; 392 return -EAGAIN; 552 } 393 } 553 394 554 static int vmcore_remap_oldmem_pfn(struct vm_a 395 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma, 555 unsigned long from 396 unsigned long from, unsigned long pfn, 556 unsigned long size 397 unsigned long size, pgprot_t prot) 557 { 398 { 558 int ret, idx; << 559 << 560 /* 399 /* 561 * Check if a callback was registered !! 400 * Check if oldmem_pfn_is_ram was registered to avoid 562 * pages without a reason. !! 401 * looping over all pages without a reason. 563 */ 402 */ 564 idx = srcu_read_lock(&vmcore_cb_srcu); !! 403 if (oldmem_pfn_is_ram) 565 if (!list_empty(&vmcore_cb_list)) !! 404 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot); 566 ret = remap_oldmem_pfn_checked << 567 else 405 else 568 ret = remap_oldmem_pfn_range(v !! 406 return remap_oldmem_pfn_range(vma, from, pfn, size, prot); 569 srcu_read_unlock(&vmcore_cb_srcu, idx) << 570 return ret; << 571 } 407 } 572 408 573 static int mmap_vmcore(struct file *file, stru 409 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) 574 { 410 { 575 size_t size = vma->vm_end - vma->vm_st 411 size_t size = vma->vm_end - vma->vm_start; 576 u64 start, end, len, tsz; 412 u64 start, end, len, tsz; 577 struct vmcore *m; 413 struct vmcore *m; 578 414 579 start = (u64)vma->vm_pgoff << PAGE_SHI 415 start = (u64)vma->vm_pgoff << PAGE_SHIFT; 580 end = start + size; 416 end = start + size; 581 417 582 if (size > vmcore_size || end > vmcore 418 if (size > vmcore_size || end > vmcore_size) 583 return -EINVAL; 419 return -EINVAL; 584 420 585 if (vma->vm_flags & (VM_WRITE | VM_EXE 421 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 586 return -EPERM; 422 return -EPERM; 587 423 588 vm_flags_mod(vma, VM_MIXEDMAP, VM_MAYW !! 424 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); >> 425 vma->vm_flags |= VM_MIXEDMAP; 589 vma->vm_ops = &vmcore_mmap_ops; 426 vma->vm_ops = &vmcore_mmap_ops; 590 427 591 len = 0; 428 len = 0; 592 429 593 if (start < elfcorebuf_sz) { 430 if (start < elfcorebuf_sz) { 594 u64 pfn; 431 u64 pfn; 595 432 596 tsz = min(elfcorebuf_sz - (siz 433 tsz = min(elfcorebuf_sz - (size_t)start, size); 597 pfn = __pa(elfcorebuf + start) 434 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT; 598 if (remap_pfn_range(vma, vma-> 435 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz, 599 vma->vm_pa 436 vma->vm_page_prot)) 600 return -EAGAIN; 437 return -EAGAIN; 601 size -= tsz; 438 size -= tsz; 602 start += tsz; 439 start += tsz; 603 len += tsz; 440 len += tsz; 604 441 605 if (size == 0) 442 if (size == 0) 606 return 0; 443 return 0; 607 } 444 } 608 445 609 if (start < elfcorebuf_sz + elfnotes_s 446 if (start < elfcorebuf_sz + elfnotes_sz) { 610 void *kaddr; 447 void *kaddr; 611 448 612 /* We add device dumps before << 613 * other elf notes may not fil << 614 * completely and we will end << 615 * between the elf notes and t << 616 * then try to decode this zer << 617 * and we don't want that. Hen << 618 * the other elf notes ensure << 619 * avoided. This also ensures << 620 * other elf notes can be prop << 621 * address. << 622 */ << 623 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP << 624 /* Read device dumps */ << 625 if (start < elfcorebuf_sz + vm << 626 u64 start_off; << 627 << 628 tsz = min(elfcorebuf_s << 629 (size_t)star << 630 start_off = start - el << 631 if (vmcoredd_mmap_dump << 632 << 633 goto fail; << 634 << 635 size -= tsz; << 636 start += tsz; << 637 len += tsz; << 638 << 639 /* leave now if filled << 640 if (!size) << 641 return 0; << 642 } << 643 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ << 644 << 645 /* Read remaining elf notes */ << 646 tsz = min(elfcorebuf_sz + elfn 449 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); 647 kaddr = elfnotes_buf + start - !! 450 kaddr = elfnotes_buf + start - elfcorebuf_sz; 648 if (remap_vmalloc_range_partia 451 if (remap_vmalloc_range_partial(vma, vma->vm_start + len, 649 !! 452 kaddr, tsz)) 650 goto fail; 453 goto fail; 651 << 652 size -= tsz; 454 size -= tsz; 653 start += tsz; 455 start += tsz; 654 len += tsz; 456 len += tsz; 655 457 656 if (size == 0) 458 if (size == 0) 657 return 0; 459 return 0; 658 } 460 } 659 461 660 list_for_each_entry(m, &vmcore_list, l 462 list_for_each_entry(m, &vmcore_list, list) { 661 if (start < m->offset + m->siz 463 if (start < m->offset + m->size) { 662 u64 paddr = 0; 464 u64 paddr = 0; 663 465 664 tsz = (size_t)min_t(un 466 tsz = (size_t)min_t(unsigned long long, 665 m- 467 m->offset + m->size - start, size); 666 paddr = m->paddr + sta 468 paddr = m->paddr + start - m->offset; 667 if (vmcore_remap_oldme 469 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len, 668 470 paddr >> PAGE_SHIFT, tsz, 669 471 vma->vm_page_prot)) 670 goto fail; 472 goto fail; 671 size -= tsz; 473 size -= tsz; 672 start += tsz; 474 start += tsz; 673 len += tsz; 475 len += tsz; 674 476 675 if (size == 0) 477 if (size == 0) 676 return 0; 478 return 0; 677 } 479 } 678 } 480 } 679 481 680 return 0; 482 return 0; 681 fail: 483 fail: 682 do_munmap(vma->vm_mm, vma->vm_start, l 484 do_munmap(vma->vm_mm, vma->vm_start, len, NULL); 683 return -EAGAIN; 485 return -EAGAIN; 684 } 486 } 685 #else 487 #else 686 static int mmap_vmcore(struct file *file, stru 488 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) 687 { 489 { 688 return -ENOSYS; 490 return -ENOSYS; 689 } 491 } 690 #endif 492 #endif 691 493 692 static const struct proc_ops vmcore_proc_ops = !! 494 static const struct file_operations proc_vmcore_operations = { 693 .proc_open = open_vmcore, !! 495 .read = read_vmcore, 694 .proc_read_iter = read_vmcore, !! 496 .llseek = default_llseek, 695 .proc_lseek = default_llseek, !! 497 .mmap = mmap_vmcore, 696 .proc_mmap = mmap_vmcore, << 697 }; 498 }; 698 499 699 static struct vmcore* __init get_new_element(v 500 static struct vmcore* __init get_new_element(void) 700 { 501 { 701 return kzalloc(sizeof(struct vmcore), 502 return kzalloc(sizeof(struct vmcore), GFP_KERNEL); 702 } 503 } 703 504 704 static u64 get_vmcore_size(size_t elfsz, size_ !! 505 static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz, 705 struct list_head *v !! 506 struct list_head *vc_list) 706 { 507 { 707 u64 size; 508 u64 size; 708 struct vmcore *m; 509 struct vmcore *m; 709 510 710 size = elfsz + elfnotesegsz; 511 size = elfsz + elfnotesegsz; 711 list_for_each_entry(m, vc_list, list) 512 list_for_each_entry(m, vc_list, list) { 712 size += m->size; 513 size += m->size; 713 } 514 } 714 return size; 515 return size; 715 } 516 } 716 517 717 /** 518 /** 718 * update_note_header_size_elf64 - update p_me 519 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry 719 * 520 * 720 * @ehdr_ptr: ELF header 521 * @ehdr_ptr: ELF header 721 * 522 * 722 * This function updates p_memsz member of eac 523 * This function updates p_memsz member of each PT_NOTE entry in the 723 * program header table pointed to by @ehdr_pt 524 * program header table pointed to by @ehdr_ptr to real size of ELF 724 * note segment. 525 * note segment. 725 */ 526 */ 726 static int __init update_note_header_size_elf6 527 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr) 727 { 528 { 728 int i, rc=0; 529 int i, rc=0; 729 Elf64_Phdr *phdr_ptr; 530 Elf64_Phdr *phdr_ptr; 730 Elf64_Nhdr *nhdr_ptr; 531 Elf64_Nhdr *nhdr_ptr; 731 532 732 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1 533 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); 733 for (i = 0; i < ehdr_ptr->e_phnum; i++ 534 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 734 void *notes_section; 535 void *notes_section; 735 u64 offset, max_sz, sz, real_s 536 u64 offset, max_sz, sz, real_sz = 0; 736 if (phdr_ptr->p_type != PT_NOT 537 if (phdr_ptr->p_type != PT_NOTE) 737 continue; 538 continue; 738 max_sz = phdr_ptr->p_memsz; 539 max_sz = phdr_ptr->p_memsz; 739 offset = phdr_ptr->p_offset; 540 offset = phdr_ptr->p_offset; 740 notes_section = kmalloc(max_sz 541 notes_section = kmalloc(max_sz, GFP_KERNEL); 741 if (!notes_section) 542 if (!notes_section) 742 return -ENOMEM; 543 return -ENOMEM; 743 rc = elfcorehdr_read_notes(not 544 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset); 744 if (rc < 0) { 545 if (rc < 0) { 745 kfree(notes_section); 546 kfree(notes_section); 746 return rc; 547 return rc; 747 } 548 } 748 nhdr_ptr = notes_section; 549 nhdr_ptr = notes_section; 749 while (nhdr_ptr->n_namesz != 0 550 while (nhdr_ptr->n_namesz != 0) { 750 sz = sizeof(Elf64_Nhdr 551 sz = sizeof(Elf64_Nhdr) + 751 (((u64)nhdr_pt 552 (((u64)nhdr_ptr->n_namesz + 3) & ~3) + 752 (((u64)nhdr_pt 553 (((u64)nhdr_ptr->n_descsz + 3) & ~3); 753 if ((real_sz + sz) > m 554 if ((real_sz + sz) > max_sz) { 754 pr_warn("Warni 555 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n", 755 nhdr_p 556 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); 756 break; 557 break; 757 } 558 } 758 real_sz += sz; 559 real_sz += sz; 759 nhdr_ptr = (Elf64_Nhdr 560 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); 760 } 561 } 761 kfree(notes_section); 562 kfree(notes_section); 762 phdr_ptr->p_memsz = real_sz; 563 phdr_ptr->p_memsz = real_sz; 763 if (real_sz == 0) { 564 if (real_sz == 0) { 764 pr_warn("Warning: Zero 565 pr_warn("Warning: Zero PT_NOTE entries found\n"); 765 } 566 } 766 } 567 } 767 568 768 return 0; 569 return 0; 769 } 570 } 770 571 771 /** 572 /** 772 * get_note_number_and_size_elf64 - get the nu 573 * get_note_number_and_size_elf64 - get the number of PT_NOTE program 773 * headers and sum of real size of their ELF n 574 * headers and sum of real size of their ELF note segment headers and 774 * data. 575 * data. 775 * 576 * 776 * @ehdr_ptr: ELF header 577 * @ehdr_ptr: ELF header 777 * @nr_ptnote: buffer for the number of PT_NOT 578 * @nr_ptnote: buffer for the number of PT_NOTE program headers 778 * @sz_ptnote: buffer for size of unique PT_NO 579 * @sz_ptnote: buffer for size of unique PT_NOTE program header 779 * 580 * 780 * This function is used to merge multiple PT_ 581 * This function is used to merge multiple PT_NOTE program headers 781 * into a unique single one. The resulting uni 582 * into a unique single one. The resulting unique entry will have 782 * @sz_ptnote in its phdr->p_mem. 583 * @sz_ptnote in its phdr->p_mem. 783 * 584 * 784 * It is assumed that program headers with PT_ 585 * It is assumed that program headers with PT_NOTE type pointed to by 785 * @ehdr_ptr has already been updated by updat 586 * @ehdr_ptr has already been updated by update_note_header_size_elf64 786 * and each of PT_NOTE program headers has act 587 * and each of PT_NOTE program headers has actual ELF note segment 787 * size in its p_memsz member. 588 * size in its p_memsz member. 788 */ 589 */ 789 static int __init get_note_number_and_size_elf 590 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr, 790 591 int *nr_ptnote, u64 *sz_ptnote) 791 { 592 { 792 int i; 593 int i; 793 Elf64_Phdr *phdr_ptr; 594 Elf64_Phdr *phdr_ptr; 794 595 795 *nr_ptnote = *sz_ptnote = 0; 596 *nr_ptnote = *sz_ptnote = 0; 796 597 797 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1 598 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); 798 for (i = 0; i < ehdr_ptr->e_phnum; i++ 599 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 799 if (phdr_ptr->p_type != PT_NOT 600 if (phdr_ptr->p_type != PT_NOTE) 800 continue; 601 continue; 801 *nr_ptnote += 1; 602 *nr_ptnote += 1; 802 *sz_ptnote += phdr_ptr->p_mems 603 *sz_ptnote += phdr_ptr->p_memsz; 803 } 604 } 804 605 805 return 0; 606 return 0; 806 } 607 } 807 608 808 /** 609 /** 809 * copy_notes_elf64 - copy ELF note segments i 610 * copy_notes_elf64 - copy ELF note segments in a given buffer 810 * 611 * 811 * @ehdr_ptr: ELF header 612 * @ehdr_ptr: ELF header 812 * @notes_buf: buffer into which ELF note segm 613 * @notes_buf: buffer into which ELF note segments are copied 813 * 614 * 814 * This function is used to copy ELF note segm 615 * This function is used to copy ELF note segment in the 1st kernel 815 * into the buffer @notes_buf in the 2nd kerne 616 * into the buffer @notes_buf in the 2nd kernel. It is assumed that 816 * size of the buffer @notes_buf is equal to o 617 * size of the buffer @notes_buf is equal to or larger than sum of the 817 * real ELF note segment headers and data. 618 * real ELF note segment headers and data. 818 * 619 * 819 * It is assumed that program headers with PT_ 620 * It is assumed that program headers with PT_NOTE type pointed to by 820 * @ehdr_ptr has already been updated by updat 621 * @ehdr_ptr has already been updated by update_note_header_size_elf64 821 * and each of PT_NOTE program headers has act 622 * and each of PT_NOTE program headers has actual ELF note segment 822 * size in its p_memsz member. 623 * size in its p_memsz member. 823 */ 624 */ 824 static int __init copy_notes_elf64(const Elf64 625 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf) 825 { 626 { 826 int i, rc=0; 627 int i, rc=0; 827 Elf64_Phdr *phdr_ptr; 628 Elf64_Phdr *phdr_ptr; 828 629 829 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1) 630 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1); 830 631 831 for (i = 0; i < ehdr_ptr->e_phnum; i++ 632 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 832 u64 offset; 633 u64 offset; 833 if (phdr_ptr->p_type != PT_NOT 634 if (phdr_ptr->p_type != PT_NOTE) 834 continue; 635 continue; 835 offset = phdr_ptr->p_offset; 636 offset = phdr_ptr->p_offset; 836 rc = elfcorehdr_read_notes(not 637 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, 837 &of 638 &offset); 838 if (rc < 0) 639 if (rc < 0) 839 return rc; 640 return rc; 840 notes_buf += phdr_ptr->p_memsz 641 notes_buf += phdr_ptr->p_memsz; 841 } 642 } 842 643 843 return 0; 644 return 0; 844 } 645 } 845 646 846 /* Merges all the PT_NOTE headers into one. */ 647 /* Merges all the PT_NOTE headers into one. */ 847 static int __init merge_note_headers_elf64(cha 648 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, 848 cha 649 char **notes_buf, size_t *notes_sz) 849 { 650 { 850 int i, nr_ptnote=0, rc=0; 651 int i, nr_ptnote=0, rc=0; 851 char *tmp; 652 char *tmp; 852 Elf64_Ehdr *ehdr_ptr; 653 Elf64_Ehdr *ehdr_ptr; 853 Elf64_Phdr phdr; 654 Elf64_Phdr phdr; 854 u64 phdr_sz = 0, note_off; 655 u64 phdr_sz = 0, note_off; 855 656 856 ehdr_ptr = (Elf64_Ehdr *)elfptr; 657 ehdr_ptr = (Elf64_Ehdr *)elfptr; 857 658 858 rc = update_note_header_size_elf64(ehd 659 rc = update_note_header_size_elf64(ehdr_ptr); 859 if (rc < 0) 660 if (rc < 0) 860 return rc; 661 return rc; 861 662 862 rc = get_note_number_and_size_elf64(eh 663 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz); 863 if (rc < 0) 664 if (rc < 0) 864 return rc; 665 return rc; 865 666 866 *notes_sz = roundup(phdr_sz, PAGE_SIZE 667 *notes_sz = roundup(phdr_sz, PAGE_SIZE); 867 *notes_buf = vmcore_alloc_buf(*notes_s !! 668 *notes_buf = alloc_elfnotes_buf(*notes_sz); 868 if (!*notes_buf) 669 if (!*notes_buf) 869 return -ENOMEM; 670 return -ENOMEM; 870 671 871 rc = copy_notes_elf64(ehdr_ptr, *notes 672 rc = copy_notes_elf64(ehdr_ptr, *notes_buf); 872 if (rc < 0) 673 if (rc < 0) 873 return rc; 674 return rc; 874 675 875 /* Prepare merged PT_NOTE program head 676 /* Prepare merged PT_NOTE program header. */ 876 phdr.p_type = PT_NOTE; 677 phdr.p_type = PT_NOTE; 877 phdr.p_flags = 0; 678 phdr.p_flags = 0; 878 note_off = sizeof(Elf64_Ehdr) + 679 note_off = sizeof(Elf64_Ehdr) + 879 (ehdr_ptr->e_phnum - n 680 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); 880 phdr.p_offset = roundup(note_off, PAG 681 phdr.p_offset = roundup(note_off, PAGE_SIZE); 881 phdr.p_vaddr = phdr.p_paddr = 0; 682 phdr.p_vaddr = phdr.p_paddr = 0; 882 phdr.p_filesz = phdr.p_memsz = phdr_s 683 phdr.p_filesz = phdr.p_memsz = phdr_sz; 883 phdr.p_align = 4; !! 684 phdr.p_align = 0; 884 685 885 /* Add merged PT_NOTE program header*/ 686 /* Add merged PT_NOTE program header*/ 886 tmp = elfptr + sizeof(Elf64_Ehdr); 687 tmp = elfptr + sizeof(Elf64_Ehdr); 887 memcpy(tmp, &phdr, sizeof(phdr)); 688 memcpy(tmp, &phdr, sizeof(phdr)); 888 tmp += sizeof(phdr); 689 tmp += sizeof(phdr); 889 690 890 /* Remove unwanted PT_NOTE program hea 691 /* Remove unwanted PT_NOTE program headers. */ 891 i = (nr_ptnote - 1) * sizeof(Elf64_Phd 692 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); 892 *elfsz = *elfsz - i; 693 *elfsz = *elfsz - i; 893 memmove(tmp, tmp+i, ((*elfsz)-sizeof(E 694 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); 894 memset(elfptr + *elfsz, 0, i); 695 memset(elfptr + *elfsz, 0, i); 895 *elfsz = roundup(*elfsz, PAGE_SIZE); 696 *elfsz = roundup(*elfsz, PAGE_SIZE); 896 697 897 /* Modify e_phnum to reflect merged he 698 /* Modify e_phnum to reflect merged headers. */ 898 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum 699 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; 899 700 900 /* Store the size of all notes. We ne << 901 * header when the device dumps will b << 902 */ << 903 elfnotes_orig_sz = phdr.p_memsz; << 904 << 905 return 0; 701 return 0; 906 } 702 } 907 703 908 /** 704 /** 909 * update_note_header_size_elf32 - update p_me 705 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry 910 * 706 * 911 * @ehdr_ptr: ELF header 707 * @ehdr_ptr: ELF header 912 * 708 * 913 * This function updates p_memsz member of eac 709 * This function updates p_memsz member of each PT_NOTE entry in the 914 * program header table pointed to by @ehdr_pt 710 * program header table pointed to by @ehdr_ptr to real size of ELF 915 * note segment. 711 * note segment. 916 */ 712 */ 917 static int __init update_note_header_size_elf3 713 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr) 918 { 714 { 919 int i, rc=0; 715 int i, rc=0; 920 Elf32_Phdr *phdr_ptr; 716 Elf32_Phdr *phdr_ptr; 921 Elf32_Nhdr *nhdr_ptr; 717 Elf32_Nhdr *nhdr_ptr; 922 718 923 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1 719 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); 924 for (i = 0; i < ehdr_ptr->e_phnum; i++ 720 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 925 void *notes_section; 721 void *notes_section; 926 u64 offset, max_sz, sz, real_s 722 u64 offset, max_sz, sz, real_sz = 0; 927 if (phdr_ptr->p_type != PT_NOT 723 if (phdr_ptr->p_type != PT_NOTE) 928 continue; 724 continue; 929 max_sz = phdr_ptr->p_memsz; 725 max_sz = phdr_ptr->p_memsz; 930 offset = phdr_ptr->p_offset; 726 offset = phdr_ptr->p_offset; 931 notes_section = kmalloc(max_sz 727 notes_section = kmalloc(max_sz, GFP_KERNEL); 932 if (!notes_section) 728 if (!notes_section) 933 return -ENOMEM; 729 return -ENOMEM; 934 rc = elfcorehdr_read_notes(not 730 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset); 935 if (rc < 0) { 731 if (rc < 0) { 936 kfree(notes_section); 732 kfree(notes_section); 937 return rc; 733 return rc; 938 } 734 } 939 nhdr_ptr = notes_section; 735 nhdr_ptr = notes_section; 940 while (nhdr_ptr->n_namesz != 0 736 while (nhdr_ptr->n_namesz != 0) { 941 sz = sizeof(Elf32_Nhdr 737 sz = sizeof(Elf32_Nhdr) + 942 (((u64)nhdr_pt 738 (((u64)nhdr_ptr->n_namesz + 3) & ~3) + 943 (((u64)nhdr_pt 739 (((u64)nhdr_ptr->n_descsz + 3) & ~3); 944 if ((real_sz + sz) > m 740 if ((real_sz + sz) > max_sz) { 945 pr_warn("Warni 741 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n", 946 nhdr_p 742 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); 947 break; 743 break; 948 } 744 } 949 real_sz += sz; 745 real_sz += sz; 950 nhdr_ptr = (Elf32_Nhdr 746 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); 951 } 747 } 952 kfree(notes_section); 748 kfree(notes_section); 953 phdr_ptr->p_memsz = real_sz; 749 phdr_ptr->p_memsz = real_sz; 954 if (real_sz == 0) { 750 if (real_sz == 0) { 955 pr_warn("Warning: Zero 751 pr_warn("Warning: Zero PT_NOTE entries found\n"); 956 } 752 } 957 } 753 } 958 754 959 return 0; 755 return 0; 960 } 756 } 961 757 962 /** 758 /** 963 * get_note_number_and_size_elf32 - get the nu 759 * get_note_number_and_size_elf32 - get the number of PT_NOTE program 964 * headers and sum of real size of their ELF n 760 * headers and sum of real size of their ELF note segment headers and 965 * data. 761 * data. 966 * 762 * 967 * @ehdr_ptr: ELF header 763 * @ehdr_ptr: ELF header 968 * @nr_ptnote: buffer for the number of PT_NOT 764 * @nr_ptnote: buffer for the number of PT_NOTE program headers 969 * @sz_ptnote: buffer for size of unique PT_NO 765 * @sz_ptnote: buffer for size of unique PT_NOTE program header 970 * 766 * 971 * This function is used to merge multiple PT_ 767 * This function is used to merge multiple PT_NOTE program headers 972 * into a unique single one. The resulting uni 768 * into a unique single one. The resulting unique entry will have 973 * @sz_ptnote in its phdr->p_mem. 769 * @sz_ptnote in its phdr->p_mem. 974 * 770 * 975 * It is assumed that program headers with PT_ 771 * It is assumed that program headers with PT_NOTE type pointed to by 976 * @ehdr_ptr has already been updated by updat 772 * @ehdr_ptr has already been updated by update_note_header_size_elf32 977 * and each of PT_NOTE program headers has act 773 * and each of PT_NOTE program headers has actual ELF note segment 978 * size in its p_memsz member. 774 * size in its p_memsz member. 979 */ 775 */ 980 static int __init get_note_number_and_size_elf 776 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr, 981 777 int *nr_ptnote, u64 *sz_ptnote) 982 { 778 { 983 int i; 779 int i; 984 Elf32_Phdr *phdr_ptr; 780 Elf32_Phdr *phdr_ptr; 985 781 986 *nr_ptnote = *sz_ptnote = 0; 782 *nr_ptnote = *sz_ptnote = 0; 987 783 988 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1 784 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); 989 for (i = 0; i < ehdr_ptr->e_phnum; i++ 785 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 990 if (phdr_ptr->p_type != PT_NOT 786 if (phdr_ptr->p_type != PT_NOTE) 991 continue; 787 continue; 992 *nr_ptnote += 1; 788 *nr_ptnote += 1; 993 *sz_ptnote += phdr_ptr->p_mems 789 *sz_ptnote += phdr_ptr->p_memsz; 994 } 790 } 995 791 996 return 0; 792 return 0; 997 } 793 } 998 794 999 /** 795 /** 1000 * copy_notes_elf32 - copy ELF note segments 796 * copy_notes_elf32 - copy ELF note segments in a given buffer 1001 * 797 * 1002 * @ehdr_ptr: ELF header 798 * @ehdr_ptr: ELF header 1003 * @notes_buf: buffer into which ELF note seg 799 * @notes_buf: buffer into which ELF note segments are copied 1004 * 800 * 1005 * This function is used to copy ELF note seg 801 * This function is used to copy ELF note segment in the 1st kernel 1006 * into the buffer @notes_buf in the 2nd kern 802 * into the buffer @notes_buf in the 2nd kernel. It is assumed that 1007 * size of the buffer @notes_buf is equal to 803 * size of the buffer @notes_buf is equal to or larger than sum of the 1008 * real ELF note segment headers and data. 804 * real ELF note segment headers and data. 1009 * 805 * 1010 * It is assumed that program headers with PT 806 * It is assumed that program headers with PT_NOTE type pointed to by 1011 * @ehdr_ptr has already been updated by upda 807 * @ehdr_ptr has already been updated by update_note_header_size_elf32 1012 * and each of PT_NOTE program headers has ac 808 * and each of PT_NOTE program headers has actual ELF note segment 1013 * size in its p_memsz member. 809 * size in its p_memsz member. 1014 */ 810 */ 1015 static int __init copy_notes_elf32(const Elf3 811 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf) 1016 { 812 { 1017 int i, rc=0; 813 int i, rc=0; 1018 Elf32_Phdr *phdr_ptr; 814 Elf32_Phdr *phdr_ptr; 1019 815 1020 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1 816 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1); 1021 817 1022 for (i = 0; i < ehdr_ptr->e_phnum; i+ 818 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 1023 u64 offset; 819 u64 offset; 1024 if (phdr_ptr->p_type != PT_NO 820 if (phdr_ptr->p_type != PT_NOTE) 1025 continue; 821 continue; 1026 offset = phdr_ptr->p_offset; 822 offset = phdr_ptr->p_offset; 1027 rc = elfcorehdr_read_notes(no 823 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, 1028 &o 824 &offset); 1029 if (rc < 0) 825 if (rc < 0) 1030 return rc; 826 return rc; 1031 notes_buf += phdr_ptr->p_mems 827 notes_buf += phdr_ptr->p_memsz; 1032 } 828 } 1033 829 1034 return 0; 830 return 0; 1035 } 831 } 1036 832 1037 /* Merges all the PT_NOTE headers into one. * 833 /* Merges all the PT_NOTE headers into one. */ 1038 static int __init merge_note_headers_elf32(ch 834 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, 1039 ch 835 char **notes_buf, size_t *notes_sz) 1040 { 836 { 1041 int i, nr_ptnote=0, rc=0; 837 int i, nr_ptnote=0, rc=0; 1042 char *tmp; 838 char *tmp; 1043 Elf32_Ehdr *ehdr_ptr; 839 Elf32_Ehdr *ehdr_ptr; 1044 Elf32_Phdr phdr; 840 Elf32_Phdr phdr; 1045 u64 phdr_sz = 0, note_off; 841 u64 phdr_sz = 0, note_off; 1046 842 1047 ehdr_ptr = (Elf32_Ehdr *)elfptr; 843 ehdr_ptr = (Elf32_Ehdr *)elfptr; 1048 844 1049 rc = update_note_header_size_elf32(eh 845 rc = update_note_header_size_elf32(ehdr_ptr); 1050 if (rc < 0) 846 if (rc < 0) 1051 return rc; 847 return rc; 1052 848 1053 rc = get_note_number_and_size_elf32(e 849 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz); 1054 if (rc < 0) 850 if (rc < 0) 1055 return rc; 851 return rc; 1056 852 1057 *notes_sz = roundup(phdr_sz, PAGE_SIZ 853 *notes_sz = roundup(phdr_sz, PAGE_SIZE); 1058 *notes_buf = vmcore_alloc_buf(*notes_ !! 854 *notes_buf = alloc_elfnotes_buf(*notes_sz); 1059 if (!*notes_buf) 855 if (!*notes_buf) 1060 return -ENOMEM; 856 return -ENOMEM; 1061 857 1062 rc = copy_notes_elf32(ehdr_ptr, *note 858 rc = copy_notes_elf32(ehdr_ptr, *notes_buf); 1063 if (rc < 0) 859 if (rc < 0) 1064 return rc; 860 return rc; 1065 861 1066 /* Prepare merged PT_NOTE program hea 862 /* Prepare merged PT_NOTE program header. */ 1067 phdr.p_type = PT_NOTE; 863 phdr.p_type = PT_NOTE; 1068 phdr.p_flags = 0; 864 phdr.p_flags = 0; 1069 note_off = sizeof(Elf32_Ehdr) + 865 note_off = sizeof(Elf32_Ehdr) + 1070 (ehdr_ptr->e_phnum - 866 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); 1071 phdr.p_offset = roundup(note_off, PA 867 phdr.p_offset = roundup(note_off, PAGE_SIZE); 1072 phdr.p_vaddr = phdr.p_paddr = 0; 868 phdr.p_vaddr = phdr.p_paddr = 0; 1073 phdr.p_filesz = phdr.p_memsz = phdr_ 869 phdr.p_filesz = phdr.p_memsz = phdr_sz; 1074 phdr.p_align = 4; !! 870 phdr.p_align = 0; 1075 871 1076 /* Add merged PT_NOTE program header* 872 /* Add merged PT_NOTE program header*/ 1077 tmp = elfptr + sizeof(Elf32_Ehdr); 873 tmp = elfptr + sizeof(Elf32_Ehdr); 1078 memcpy(tmp, &phdr, sizeof(phdr)); 874 memcpy(tmp, &phdr, sizeof(phdr)); 1079 tmp += sizeof(phdr); 875 tmp += sizeof(phdr); 1080 876 1081 /* Remove unwanted PT_NOTE program he 877 /* Remove unwanted PT_NOTE program headers. */ 1082 i = (nr_ptnote - 1) * sizeof(Elf32_Ph 878 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); 1083 *elfsz = *elfsz - i; 879 *elfsz = *elfsz - i; 1084 memmove(tmp, tmp+i, ((*elfsz)-sizeof( 880 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); 1085 memset(elfptr + *elfsz, 0, i); 881 memset(elfptr + *elfsz, 0, i); 1086 *elfsz = roundup(*elfsz, PAGE_SIZE); 882 *elfsz = roundup(*elfsz, PAGE_SIZE); 1087 883 1088 /* Modify e_phnum to reflect merged h 884 /* Modify e_phnum to reflect merged headers. */ 1089 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum 885 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; 1090 886 1091 /* Store the size of all notes. We n << 1092 * header when the device dumps will << 1093 */ << 1094 elfnotes_orig_sz = phdr.p_memsz; << 1095 << 1096 return 0; 887 return 0; 1097 } 888 } 1098 889 1099 /* Add memory chunks represented by program h 890 /* Add memory chunks represented by program headers to vmcore list. Also update 1100 * the new offset fields of exported program 891 * the new offset fields of exported program headers. */ 1101 static int __init process_ptload_program_head 892 static int __init process_ptload_program_headers_elf64(char *elfptr, 1102 893 size_t elfsz, 1103 894 size_t elfnotes_sz, 1104 895 struct list_head *vc_list) 1105 { 896 { 1106 int i; 897 int i; 1107 Elf64_Ehdr *ehdr_ptr; 898 Elf64_Ehdr *ehdr_ptr; 1108 Elf64_Phdr *phdr_ptr; 899 Elf64_Phdr *phdr_ptr; 1109 loff_t vmcore_off; 900 loff_t vmcore_off; 1110 struct vmcore *new; 901 struct vmcore *new; 1111 902 1112 ehdr_ptr = (Elf64_Ehdr *)elfptr; 903 ehdr_ptr = (Elf64_Ehdr *)elfptr; 1113 phdr_ptr = (Elf64_Phdr*)(elfptr + siz 904 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ 1114 905 1115 /* Skip ELF header, program headers a !! 906 /* Skip Elf header, program headers and Elf note segment. */ 1116 vmcore_off = elfsz + elfnotes_sz; 907 vmcore_off = elfsz + elfnotes_sz; 1117 908 1118 for (i = 0; i < ehdr_ptr->e_phnum; i+ 909 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 1119 u64 paddr, start, end, size; 910 u64 paddr, start, end, size; 1120 911 1121 if (phdr_ptr->p_type != PT_LO 912 if (phdr_ptr->p_type != PT_LOAD) 1122 continue; 913 continue; 1123 914 1124 paddr = phdr_ptr->p_offset; 915 paddr = phdr_ptr->p_offset; 1125 start = rounddown(paddr, PAGE 916 start = rounddown(paddr, PAGE_SIZE); 1126 end = roundup(paddr + phdr_pt 917 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); 1127 size = end - start; 918 size = end - start; 1128 919 1129 /* Add this contiguous chunk 920 /* Add this contiguous chunk of memory to vmcore list.*/ 1130 new = get_new_element(); 921 new = get_new_element(); 1131 if (!new) 922 if (!new) 1132 return -ENOMEM; 923 return -ENOMEM; 1133 new->paddr = start; 924 new->paddr = start; 1134 new->size = size; 925 new->size = size; 1135 list_add_tail(&new->list, vc_ 926 list_add_tail(&new->list, vc_list); 1136 927 1137 /* Update the program header 928 /* Update the program header offset. */ 1138 phdr_ptr->p_offset = vmcore_o 929 phdr_ptr->p_offset = vmcore_off + (paddr - start); 1139 vmcore_off = vmcore_off + siz 930 vmcore_off = vmcore_off + size; 1140 } 931 } 1141 return 0; 932 return 0; 1142 } 933 } 1143 934 1144 static int __init process_ptload_program_head 935 static int __init process_ptload_program_headers_elf32(char *elfptr, 1145 936 size_t elfsz, 1146 937 size_t elfnotes_sz, 1147 938 struct list_head *vc_list) 1148 { 939 { 1149 int i; 940 int i; 1150 Elf32_Ehdr *ehdr_ptr; 941 Elf32_Ehdr *ehdr_ptr; 1151 Elf32_Phdr *phdr_ptr; 942 Elf32_Phdr *phdr_ptr; 1152 loff_t vmcore_off; 943 loff_t vmcore_off; 1153 struct vmcore *new; 944 struct vmcore *new; 1154 945 1155 ehdr_ptr = (Elf32_Ehdr *)elfptr; 946 ehdr_ptr = (Elf32_Ehdr *)elfptr; 1156 phdr_ptr = (Elf32_Phdr*)(elfptr + siz 947 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ 1157 948 1158 /* Skip ELF header, program headers a !! 949 /* Skip Elf header, program headers and Elf note segment. */ 1159 vmcore_off = elfsz + elfnotes_sz; 950 vmcore_off = elfsz + elfnotes_sz; 1160 951 1161 for (i = 0; i < ehdr_ptr->e_phnum; i+ 952 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 1162 u64 paddr, start, end, size; 953 u64 paddr, start, end, size; 1163 954 1164 if (phdr_ptr->p_type != PT_LO 955 if (phdr_ptr->p_type != PT_LOAD) 1165 continue; 956 continue; 1166 957 1167 paddr = phdr_ptr->p_offset; 958 paddr = phdr_ptr->p_offset; 1168 start = rounddown(paddr, PAGE 959 start = rounddown(paddr, PAGE_SIZE); 1169 end = roundup(paddr + phdr_pt 960 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); 1170 size = end - start; 961 size = end - start; 1171 962 1172 /* Add this contiguous chunk 963 /* Add this contiguous chunk of memory to vmcore list.*/ 1173 new = get_new_element(); 964 new = get_new_element(); 1174 if (!new) 965 if (!new) 1175 return -ENOMEM; 966 return -ENOMEM; 1176 new->paddr = start; 967 new->paddr = start; 1177 new->size = size; 968 new->size = size; 1178 list_add_tail(&new->list, vc_ 969 list_add_tail(&new->list, vc_list); 1179 970 1180 /* Update the program header 971 /* Update the program header offset */ 1181 phdr_ptr->p_offset = vmcore_o 972 phdr_ptr->p_offset = vmcore_off + (paddr - start); 1182 vmcore_off = vmcore_off + siz 973 vmcore_off = vmcore_off + size; 1183 } 974 } 1184 return 0; 975 return 0; 1185 } 976 } 1186 977 1187 /* Sets offset fields of vmcore elements. */ 978 /* Sets offset fields of vmcore elements. */ 1188 static void set_vmcore_list_offsets(size_t el !! 979 static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz, 1189 struct li !! 980 struct list_head *vc_list) 1190 { 981 { 1191 loff_t vmcore_off; 982 loff_t vmcore_off; 1192 struct vmcore *m; 983 struct vmcore *m; 1193 984 1194 /* Skip ELF header, program headers a !! 985 /* Skip Elf header, program headers and Elf note segment. */ 1195 vmcore_off = elfsz + elfnotes_sz; 986 vmcore_off = elfsz + elfnotes_sz; 1196 987 1197 list_for_each_entry(m, vc_list, list) 988 list_for_each_entry(m, vc_list, list) { 1198 m->offset = vmcore_off; 989 m->offset = vmcore_off; 1199 vmcore_off += m->size; 990 vmcore_off += m->size; 1200 } 991 } 1201 } 992 } 1202 993 1203 static void free_elfcorebuf(void) 994 static void free_elfcorebuf(void) 1204 { 995 { 1205 free_pages((unsigned long)elfcorebuf, 996 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig)); 1206 elfcorebuf = NULL; 997 elfcorebuf = NULL; 1207 vfree(elfnotes_buf); 998 vfree(elfnotes_buf); 1208 elfnotes_buf = NULL; 999 elfnotes_buf = NULL; 1209 } 1000 } 1210 1001 1211 static int __init parse_crash_elf64_headers(v 1002 static int __init parse_crash_elf64_headers(void) 1212 { 1003 { 1213 int rc=0; 1004 int rc=0; 1214 Elf64_Ehdr ehdr; 1005 Elf64_Ehdr ehdr; 1215 u64 addr; 1006 u64 addr; 1216 1007 1217 addr = elfcorehdr_addr; 1008 addr = elfcorehdr_addr; 1218 1009 1219 /* Read ELF header */ !! 1010 /* Read Elf header */ 1220 rc = elfcorehdr_read((char *)&ehdr, s 1011 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr); 1221 if (rc < 0) 1012 if (rc < 0) 1222 return rc; 1013 return rc; 1223 1014 1224 /* Do some basic Verification. */ 1015 /* Do some basic Verification. */ 1225 if (memcmp(ehdr.e_ident, ELFMAG, SELF 1016 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || 1226 (ehdr.e_type != ET_CORE) || 1017 (ehdr.e_type != ET_CORE) || 1227 !vmcore_elf64_check_arch(&ehd 1018 !vmcore_elf64_check_arch(&ehdr) || 1228 ehdr.e_ident[EI_CLASS] != ELF 1019 ehdr.e_ident[EI_CLASS] != ELFCLASS64 || 1229 ehdr.e_ident[EI_VERSION] != E 1020 ehdr.e_ident[EI_VERSION] != EV_CURRENT || 1230 ehdr.e_version != EV_CURRENT 1021 ehdr.e_version != EV_CURRENT || 1231 ehdr.e_ehsize != sizeof(Elf64 1022 ehdr.e_ehsize != sizeof(Elf64_Ehdr) || 1232 ehdr.e_phentsize != sizeof(El 1023 ehdr.e_phentsize != sizeof(Elf64_Phdr) || 1233 ehdr.e_phnum == 0) { 1024 ehdr.e_phnum == 0) { 1234 pr_warn("Warning: Core image 1025 pr_warn("Warning: Core image elf header is not sane\n"); 1235 return -EINVAL; 1026 return -EINVAL; 1236 } 1027 } 1237 1028 1238 /* Read in all elf headers. */ 1029 /* Read in all elf headers. */ 1239 elfcorebuf_sz_orig = sizeof(Elf64_Ehd 1030 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) + 1240 ehdr.e_phnum 1031 ehdr.e_phnum * sizeof(Elf64_Phdr); 1241 elfcorebuf_sz = elfcorebuf_sz_orig; 1032 elfcorebuf_sz = elfcorebuf_sz_orig; 1242 elfcorebuf = (void *)__get_free_pages 1033 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1243 1034 get_order(elfcorebuf_sz_orig)); 1244 if (!elfcorebuf) 1035 if (!elfcorebuf) 1245 return -ENOMEM; 1036 return -ENOMEM; 1246 addr = elfcorehdr_addr; 1037 addr = elfcorehdr_addr; 1247 rc = elfcorehdr_read(elfcorebuf, elfc 1038 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr); 1248 if (rc < 0) 1039 if (rc < 0) 1249 goto fail; 1040 goto fail; 1250 1041 1251 /* Merge all PT_NOTE headers into one 1042 /* Merge all PT_NOTE headers into one. */ 1252 rc = merge_note_headers_elf64(elfcore 1043 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, 1253 &elfnot 1044 &elfnotes_buf, &elfnotes_sz); 1254 if (rc) 1045 if (rc) 1255 goto fail; 1046 goto fail; 1256 rc = process_ptload_program_headers_e 1047 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, 1257 1048 elfnotes_sz, &vmcore_list); 1258 if (rc) 1049 if (rc) 1259 goto fail; 1050 goto fail; 1260 set_vmcore_list_offsets(elfcorebuf_sz 1051 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); 1261 return 0; 1052 return 0; 1262 fail: 1053 fail: 1263 free_elfcorebuf(); 1054 free_elfcorebuf(); 1264 return rc; 1055 return rc; 1265 } 1056 } 1266 1057 1267 static int __init parse_crash_elf32_headers(v 1058 static int __init parse_crash_elf32_headers(void) 1268 { 1059 { 1269 int rc=0; 1060 int rc=0; 1270 Elf32_Ehdr ehdr; 1061 Elf32_Ehdr ehdr; 1271 u64 addr; 1062 u64 addr; 1272 1063 1273 addr = elfcorehdr_addr; 1064 addr = elfcorehdr_addr; 1274 1065 1275 /* Read ELF header */ !! 1066 /* Read Elf header */ 1276 rc = elfcorehdr_read((char *)&ehdr, s 1067 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr); 1277 if (rc < 0) 1068 if (rc < 0) 1278 return rc; 1069 return rc; 1279 1070 1280 /* Do some basic Verification. */ 1071 /* Do some basic Verification. */ 1281 if (memcmp(ehdr.e_ident, ELFMAG, SELF 1072 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || 1282 (ehdr.e_type != ET_CORE) || 1073 (ehdr.e_type != ET_CORE) || 1283 !vmcore_elf32_check_arch(&ehd 1074 !vmcore_elf32_check_arch(&ehdr) || 1284 ehdr.e_ident[EI_CLASS] != ELF 1075 ehdr.e_ident[EI_CLASS] != ELFCLASS32|| 1285 ehdr.e_ident[EI_VERSION] != E 1076 ehdr.e_ident[EI_VERSION] != EV_CURRENT || 1286 ehdr.e_version != EV_CURRENT 1077 ehdr.e_version != EV_CURRENT || 1287 ehdr.e_ehsize != sizeof(Elf32 1078 ehdr.e_ehsize != sizeof(Elf32_Ehdr) || 1288 ehdr.e_phentsize != sizeof(El 1079 ehdr.e_phentsize != sizeof(Elf32_Phdr) || 1289 ehdr.e_phnum == 0) { 1080 ehdr.e_phnum == 0) { 1290 pr_warn("Warning: Core image 1081 pr_warn("Warning: Core image elf header is not sane\n"); 1291 return -EINVAL; 1082 return -EINVAL; 1292 } 1083 } 1293 1084 1294 /* Read in all elf headers. */ 1085 /* Read in all elf headers. */ 1295 elfcorebuf_sz_orig = sizeof(Elf32_Ehd 1086 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); 1296 elfcorebuf_sz = elfcorebuf_sz_orig; 1087 elfcorebuf_sz = elfcorebuf_sz_orig; 1297 elfcorebuf = (void *)__get_free_pages 1088 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1298 1089 get_order(elfcorebuf_sz_orig)); 1299 if (!elfcorebuf) 1090 if (!elfcorebuf) 1300 return -ENOMEM; 1091 return -ENOMEM; 1301 addr = elfcorehdr_addr; 1092 addr = elfcorehdr_addr; 1302 rc = elfcorehdr_read(elfcorebuf, elfc 1093 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr); 1303 if (rc < 0) 1094 if (rc < 0) 1304 goto fail; 1095 goto fail; 1305 1096 1306 /* Merge all PT_NOTE headers into one 1097 /* Merge all PT_NOTE headers into one. */ 1307 rc = merge_note_headers_elf32(elfcore 1098 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, 1308 &elfnot 1099 &elfnotes_buf, &elfnotes_sz); 1309 if (rc) 1100 if (rc) 1310 goto fail; 1101 goto fail; 1311 rc = process_ptload_program_headers_e 1102 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, 1312 1103 elfnotes_sz, &vmcore_list); 1313 if (rc) 1104 if (rc) 1314 goto fail; 1105 goto fail; 1315 set_vmcore_list_offsets(elfcorebuf_sz 1106 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); 1316 return 0; 1107 return 0; 1317 fail: 1108 fail: 1318 free_elfcorebuf(); 1109 free_elfcorebuf(); 1319 return rc; 1110 return rc; 1320 } 1111 } 1321 1112 1322 static int __init parse_crash_elf_headers(voi 1113 static int __init parse_crash_elf_headers(void) 1323 { 1114 { 1324 unsigned char e_ident[EI_NIDENT]; 1115 unsigned char e_ident[EI_NIDENT]; 1325 u64 addr; 1116 u64 addr; 1326 int rc=0; 1117 int rc=0; 1327 1118 1328 addr = elfcorehdr_addr; 1119 addr = elfcorehdr_addr; 1329 rc = elfcorehdr_read(e_ident, EI_NIDE 1120 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr); 1330 if (rc < 0) 1121 if (rc < 0) 1331 return rc; 1122 return rc; 1332 if (memcmp(e_ident, ELFMAG, SELFMAG) 1123 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { 1333 pr_warn("Warning: Core image 1124 pr_warn("Warning: Core image elf header not found\n"); 1334 return -EINVAL; 1125 return -EINVAL; 1335 } 1126 } 1336 1127 1337 if (e_ident[EI_CLASS] == ELFCLASS64) 1128 if (e_ident[EI_CLASS] == ELFCLASS64) { 1338 rc = parse_crash_elf64_header 1129 rc = parse_crash_elf64_headers(); 1339 if (rc) 1130 if (rc) 1340 return rc; 1131 return rc; 1341 } else if (e_ident[EI_CLASS] == ELFCL 1132 } else if (e_ident[EI_CLASS] == ELFCLASS32) { 1342 rc = parse_crash_elf32_header 1133 rc = parse_crash_elf32_headers(); 1343 if (rc) 1134 if (rc) 1344 return rc; 1135 return rc; 1345 } else { 1136 } else { 1346 pr_warn("Warning: Core image 1137 pr_warn("Warning: Core image elf header is not sane\n"); 1347 return -EINVAL; 1138 return -EINVAL; 1348 } 1139 } 1349 1140 1350 /* Determine vmcore size. */ 1141 /* Determine vmcore size. */ 1351 vmcore_size = get_vmcore_size(elfcore 1142 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz, 1352 &vmcore 1143 &vmcore_list); 1353 1144 1354 return 0; 1145 return 0; 1355 } 1146 } 1356 1147 1357 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP << 1358 /** << 1359 * vmcoredd_write_header - Write vmcore devic << 1360 * beginning of the dump's buffer. << 1361 * @buf: Output buffer where the note is writ << 1362 * @data: Dump info << 1363 * @size: Size of the dump << 1364 * << 1365 * Fills beginning of the dump's buffer with << 1366 */ << 1367 static void vmcoredd_write_header(void *buf, << 1368 u32 size) << 1369 { << 1370 struct vmcoredd_header *vdd_hdr = (st << 1371 << 1372 vdd_hdr->n_namesz = sizeof(vdd_hdr->n << 1373 vdd_hdr->n_descsz = size + sizeof(vdd << 1374 vdd_hdr->n_type = NT_VMCOREDD; << 1375 << 1376 strscpy_pad(vdd_hdr->name, VMCOREDD_N << 1377 strscpy_pad(vdd_hdr->dump_name, data- << 1378 } << 1379 << 1380 /** << 1381 * vmcoredd_update_program_headers - Update a << 1382 * @elfptr: Pointer to elf header << 1383 * @elfnotesz: Size of elf notes aligned to p << 1384 * @vmcoreddsz: Size of device dumps to be ad << 1385 * << 1386 * Determine type of ELF header (Elf64 or Elf << 1387 * Also update the offsets of all the program << 1388 */ << 1389 static void vmcoredd_update_program_headers(c << 1390 s << 1391 { << 1392 unsigned char *e_ident = (unsigned ch << 1393 u64 start, end, size; << 1394 loff_t vmcore_off; << 1395 u32 i; << 1396 << 1397 vmcore_off = elfcorebuf_sz + elfnotes << 1398 << 1399 if (e_ident[EI_CLASS] == ELFCLASS64) << 1400 Elf64_Ehdr *ehdr = (Elf64_Ehd << 1401 Elf64_Phdr *phdr = (Elf64_Phd << 1402 << 1403 /* Update all program headers << 1404 for (i = 0; i < ehdr->e_phnum << 1405 if (phdr->p_type == P << 1406 /* Update not << 1407 phdr->p_memsz << 1408 phdr->p_files << 1409 continue; << 1410 } << 1411 << 1412 start = rounddown(phd << 1413 end = roundup(phdr->p << 1414 PAGE_SI << 1415 size = end - start; << 1416 phdr->p_offset = vmco << 1417 vmcore_off += size; << 1418 } << 1419 } else { << 1420 Elf32_Ehdr *ehdr = (Elf32_Ehd << 1421 Elf32_Phdr *phdr = (Elf32_Phd << 1422 << 1423 /* Update all program headers << 1424 for (i = 0; i < ehdr->e_phnum << 1425 if (phdr->p_type == P << 1426 /* Update not << 1427 phdr->p_memsz << 1428 phdr->p_files << 1429 continue; << 1430 } << 1431 << 1432 start = rounddown(phd << 1433 end = roundup(phdr->p << 1434 PAGE_SI << 1435 size = end - start; << 1436 phdr->p_offset = vmco << 1437 vmcore_off += size; << 1438 } << 1439 } << 1440 } << 1441 << 1442 /** << 1443 * vmcoredd_update_size - Update the total si << 1444 * ELF header << 1445 * @dump_size: Size of the current device dum << 1446 * << 1447 * Update the total size of all the device du << 1448 * headers. Calculate the new offsets for the << 1449 * total vmcore size. << 1450 */ << 1451 static void vmcoredd_update_size(size_t dump_ << 1452 { << 1453 vmcoredd_orig_sz += dump_size; << 1454 elfnotes_sz = roundup(elfnotes_orig_s << 1455 vmcoredd_update_program_headers(elfco << 1456 vmcor << 1457 << 1458 /* Update vmcore list offsets */ << 1459 set_vmcore_list_offsets(elfcorebuf_sz << 1460 << 1461 vmcore_size = get_vmcore_size(elfcore << 1462 &vmcore << 1463 proc_vmcore->size = vmcore_size; << 1464 } << 1465 << 1466 /** << 1467 * vmcore_add_device_dump - Add a buffer cont << 1468 * @data: dump info. << 1469 * << 1470 * Allocate a buffer and invoke the calling d << 1471 * Write ELF note at the beginning of the buf << 1472 * dump and add the dump to global list. << 1473 */ << 1474 int vmcore_add_device_dump(struct vmcoredd_da << 1475 { << 1476 struct vmcoredd_node *dump; << 1477 void *buf = NULL; << 1478 size_t data_size; << 1479 int ret; << 1480 << 1481 if (vmcoredd_disabled) { << 1482 pr_err_once("Device dump is d << 1483 return -EINVAL; << 1484 } << 1485 << 1486 if (!data || !strlen(data->dump_name) << 1487 !data->vmcoredd_callback || !data << 1488 return -EINVAL; << 1489 << 1490 dump = vzalloc(sizeof(*dump)); << 1491 if (!dump) { << 1492 ret = -ENOMEM; << 1493 goto out_err; << 1494 } << 1495 << 1496 /* Keep size of the buffer page align << 1497 data_size = roundup(sizeof(struct vmc << 1498 PAGE_SIZE); << 1499 << 1500 /* Allocate buffer for driver's to wr << 1501 buf = vmcore_alloc_buf(data_size); << 1502 if (!buf) { << 1503 ret = -ENOMEM; << 1504 goto out_err; << 1505 } << 1506 << 1507 vmcoredd_write_header(buf, data, data << 1508 sizeof(struct v << 1509 << 1510 /* Invoke the driver's dump collectio << 1511 ret = data->vmcoredd_callback(data, b << 1512 sizeof( << 1513 if (ret) << 1514 goto out_err; << 1515 << 1516 dump->buf = buf; << 1517 dump->size = data_size; << 1518 << 1519 /* Add the dump to driver sysfs list << 1520 mutex_lock(&vmcoredd_mutex); << 1521 list_add_tail(&dump->list, &vmcoredd_ << 1522 mutex_unlock(&vmcoredd_mutex); << 1523 << 1524 vmcoredd_update_size(data_size); << 1525 return 0; << 1526 << 1527 out_err: << 1528 vfree(buf); << 1529 vfree(dump); << 1530 << 1531 return ret; << 1532 } << 1533 EXPORT_SYMBOL(vmcore_add_device_dump); << 1534 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ << 1535 << 1536 /* Free all dumps in vmcore device dump list << 1537 static void vmcore_free_device_dumps(void) << 1538 { << 1539 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP << 1540 mutex_lock(&vmcoredd_mutex); << 1541 while (!list_empty(&vmcoredd_list)) { << 1542 struct vmcoredd_node *dump; << 1543 << 1544 dump = list_first_entry(&vmco << 1545 list) << 1546 list_del(&dump->list); << 1547 vfree(dump->buf); << 1548 vfree(dump); << 1549 } << 1550 mutex_unlock(&vmcoredd_mutex); << 1551 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ << 1552 } << 1553 << 1554 /* Init function for vmcore module. */ 1148 /* Init function for vmcore module. */ 1555 static int __init vmcore_init(void) 1149 static int __init vmcore_init(void) 1556 { 1150 { 1557 int rc = 0; 1151 int rc = 0; 1558 1152 1559 /* Allow architectures to allocate EL 1153 /* Allow architectures to allocate ELF header in 2nd kernel */ 1560 rc = elfcorehdr_alloc(&elfcorehdr_add 1154 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size); 1561 if (rc) 1155 if (rc) 1562 return rc; 1156 return rc; 1563 /* 1157 /* 1564 * If elfcorehdr= has been passed in 1158 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel, 1565 * then capture the dump. 1159 * then capture the dump. 1566 */ 1160 */ 1567 if (!(is_vmcore_usable())) 1161 if (!(is_vmcore_usable())) 1568 return rc; 1162 return rc; 1569 rc = parse_crash_elf_headers(); 1163 rc = parse_crash_elf_headers(); 1570 if (rc) { 1164 if (rc) { 1571 elfcorehdr_free(elfcorehdr_ad << 1572 pr_warn("Kdump: vmcore not in 1165 pr_warn("Kdump: vmcore not initialized\n"); 1573 return rc; 1166 return rc; 1574 } 1167 } 1575 elfcorehdr_free(elfcorehdr_addr); 1168 elfcorehdr_free(elfcorehdr_addr); 1576 elfcorehdr_addr = ELFCORE_ADDR_ERR; 1169 elfcorehdr_addr = ELFCORE_ADDR_ERR; 1577 1170 1578 proc_vmcore = proc_create("vmcore", S !! 1171 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); 1579 if (proc_vmcore) 1172 if (proc_vmcore) 1580 proc_vmcore->size = vmcore_si 1173 proc_vmcore->size = vmcore_size; 1581 return 0; 1174 return 0; 1582 } 1175 } 1583 fs_initcall(vmcore_init); 1176 fs_initcall(vmcore_init); 1584 1177 1585 /* Cleanup function for vmcore module. */ 1178 /* Cleanup function for vmcore module. */ 1586 void vmcore_cleanup(void) 1179 void vmcore_cleanup(void) 1587 { 1180 { >> 1181 struct list_head *pos, *next; >> 1182 1588 if (proc_vmcore) { 1183 if (proc_vmcore) { 1589 proc_remove(proc_vmcore); 1184 proc_remove(proc_vmcore); 1590 proc_vmcore = NULL; 1185 proc_vmcore = NULL; 1591 } 1186 } 1592 1187 1593 /* clear the vmcore list. */ 1188 /* clear the vmcore list. */ 1594 while (!list_empty(&vmcore_list)) { !! 1189 list_for_each_safe(pos, next, &vmcore_list) { 1595 struct vmcore *m; 1190 struct vmcore *m; 1596 1191 1597 m = list_first_entry(&vmcore_ !! 1192 m = list_entry(pos, struct vmcore, list); 1598 list_del(&m->list); 1193 list_del(&m->list); 1599 kfree(m); 1194 kfree(m); 1600 } 1195 } 1601 free_elfcorebuf(); 1196 free_elfcorebuf(); 1602 << 1603 /* clear vmcore device dump list */ << 1604 vmcore_free_device_dumps(); << 1605 } 1197 } 1606 1198
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.