1 // SPDX-License-Identifier: GPL-2.0-only << 2 /* 1 /* 3 * fs/proc/vmcore.c Interface for accessi 2 * fs/proc/vmcore.c Interface for accessing the crash 4 * dump from the 3 * dump from the system's previous life. 5 * Heavily borrowed from fs/proc/kcore.c 4 * Heavily borrowed from fs/proc/kcore.c 6 * Created by: Hariprasad Nellitheertha ( 5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 7 * Copyright (C) IBM Corporation, 2004. A 6 * Copyright (C) IBM Corporation, 2004. All rights reserved 8 * 7 * 9 */ 8 */ 10 9 11 #include <linux/mm.h> 10 #include <linux/mm.h> 12 #include <linux/kcore.h> 11 #include <linux/kcore.h> 13 #include <linux/user.h> 12 #include <linux/user.h> 14 #include <linux/elf.h> 13 #include <linux/elf.h> 15 #include <linux/elfcore.h> 14 #include <linux/elfcore.h> 16 #include <linux/export.h> 15 #include <linux/export.h> 17 #include <linux/slab.h> 16 #include <linux/slab.h> 18 #include <linux/highmem.h> 17 #include <linux/highmem.h> 19 #include <linux/printk.h> 18 #include <linux/printk.h> 20 #include <linux/memblock.h> !! 19 #include <linux/bootmem.h> 21 #include <linux/init.h> 20 #include <linux/init.h> 22 #include <linux/crash_dump.h> 21 #include <linux/crash_dump.h> 23 #include <linux/list.h> 22 #include <linux/list.h> 24 #include <linux/moduleparam.h> << 25 #include <linux/mutex.h> 23 #include <linux/mutex.h> 26 #include <linux/vmalloc.h> 24 #include <linux/vmalloc.h> 27 #include <linux/pagemap.h> 25 #include <linux/pagemap.h> 28 #include <linux/uio.h> !! 26 #include <linux/uaccess.h> 29 #include <linux/cc_platform.h> << 30 #include <asm/io.h> 27 #include <asm/io.h> 31 #include "internal.h" 28 #include "internal.h" 32 29 33 /* List representing chunks of contiguous memo 30 /* List representing chunks of contiguous memory areas and their offsets in 34 * vmcore file. 31 * vmcore file. 35 */ 32 */ 36 static LIST_HEAD(vmcore_list); 33 static LIST_HEAD(vmcore_list); 37 34 38 /* Stores the pointer to the buffer containing 35 /* Stores the pointer to the buffer containing kernel elf core headers. */ 39 static char *elfcorebuf; 36 static char *elfcorebuf; 40 static size_t elfcorebuf_sz; 37 static size_t elfcorebuf_sz; 41 static size_t elfcorebuf_sz_orig; 38 static size_t elfcorebuf_sz_orig; 42 39 43 static char *elfnotes_buf; 40 static char *elfnotes_buf; 44 static size_t elfnotes_sz; 41 static size_t elfnotes_sz; 45 /* Size of all notes minus the device dump not 42 /* Size of all notes minus the device dump notes */ 46 static size_t elfnotes_orig_sz; 43 static size_t elfnotes_orig_sz; 47 44 48 /* Total size of vmcore file. */ 45 /* Total size of vmcore file. */ 49 static u64 vmcore_size; 46 static u64 vmcore_size; 50 47 51 static struct proc_dir_entry *proc_vmcore; 48 static struct proc_dir_entry *proc_vmcore; 52 49 53 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 50 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 54 /* Device Dump list and mutex to synchronize a 51 /* Device Dump list and mutex to synchronize access to list */ 55 static LIST_HEAD(vmcoredd_list); 52 static LIST_HEAD(vmcoredd_list); 56 static DEFINE_MUTEX(vmcoredd_mutex); 53 static DEFINE_MUTEX(vmcoredd_mutex); 57 << 58 static bool vmcoredd_disabled; << 59 core_param(novmcoredd, vmcoredd_disabled, bool << 60 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 54 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 61 55 62 /* Device Dump Size */ 56 /* Device Dump Size */ 63 static size_t vmcoredd_orig_sz; 57 static size_t vmcoredd_orig_sz; 64 58 65 static DEFINE_SPINLOCK(vmcore_cb_lock); !! 59 /* 66 DEFINE_STATIC_SRCU(vmcore_cb_srcu); !! 60 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error 67 /* List of registered vmcore callbacks. */ !! 61 * The called function has to take care of module refcounting. 68 static LIST_HEAD(vmcore_cb_list); !! 62 */ 69 /* Whether the vmcore has been opened once. */ !! 63 static int (*oldmem_pfn_is_ram)(unsigned long pfn); 70 static bool vmcore_opened; << 71 << 72 void register_vmcore_cb(struct vmcore_cb *cb) << 73 { << 74 INIT_LIST_HEAD(&cb->next); << 75 spin_lock(&vmcore_cb_lock); << 76 list_add_tail(&cb->next, &vmcore_cb_li << 77 /* << 78 * Registering a vmcore callback after << 79 * very unusual (e.g., manual driver l << 80 */ << 81 if (vmcore_opened) << 82 pr_warn_once("Unexpected vmcor << 83 spin_unlock(&vmcore_cb_lock); << 84 } << 85 EXPORT_SYMBOL_GPL(register_vmcore_cb); << 86 64 87 void unregister_vmcore_cb(struct vmcore_cb *cb !! 65 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)) 88 { 66 { 89 spin_lock(&vmcore_cb_lock); !! 67 if (oldmem_pfn_is_ram) 90 list_del_rcu(&cb->next); !! 68 return -EBUSY; 91 /* !! 69 oldmem_pfn_is_ram = fn; 92 * Unregistering a vmcore callback aft !! 70 return 0; 93 * very unusual (e.g., forced driver r << 94 * unregistering. << 95 */ << 96 if (vmcore_opened) << 97 pr_warn_once("Unexpected vmcor << 98 spin_unlock(&vmcore_cb_lock); << 99 << 100 synchronize_srcu(&vmcore_cb_srcu); << 101 } 71 } 102 EXPORT_SYMBOL_GPL(unregister_vmcore_cb); !! 72 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram); 103 73 104 static bool pfn_is_ram(unsigned long pfn) !! 74 void unregister_oldmem_pfn_is_ram(void) 105 { 75 { 106 struct vmcore_cb *cb; !! 76 oldmem_pfn_is_ram = NULL; 107 bool ret = true; !! 77 wmb(); 108 << 109 list_for_each_entry_srcu(cb, &vmcore_c << 110 srcu_read_loc << 111 if (unlikely(!cb->pfn_is_ram)) << 112 continue; << 113 ret = cb->pfn_is_ram(cb, pfn); << 114 if (!ret) << 115 break; << 116 } << 117 << 118 return ret; << 119 } 78 } >> 79 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram); 120 80 121 static int open_vmcore(struct inode *inode, st !! 81 static int pfn_is_ram(unsigned long pfn) 122 { 82 { 123 spin_lock(&vmcore_cb_lock); !! 83 int (*fn)(unsigned long pfn); 124 vmcore_opened = true; !! 84 /* pfn is ram unless fn() checks pagetype */ 125 spin_unlock(&vmcore_cb_lock); !! 85 int ret = 1; 126 86 127 return 0; !! 87 /* >> 88 * Ask hypervisor if the pfn is really ram. >> 89 * A ballooned page contains no data and reading from such a page >> 90 * will cause high load in the hypervisor. >> 91 */ >> 92 fn = oldmem_pfn_is_ram; >> 93 if (fn) >> 94 ret = fn(pfn); >> 95 >> 96 return ret; 128 } 97 } 129 98 130 /* Reads a page from the oldmem device from gi 99 /* Reads a page from the oldmem device from given offset. */ 131 ssize_t read_from_oldmem(struct iov_iter *iter !! 100 static ssize_t read_from_oldmem(char *buf, size_t count, 132 u64 *ppos, bool encry !! 101 u64 *ppos, int userbuf) 133 { 102 { 134 unsigned long pfn, offset; 103 unsigned long pfn, offset; 135 ssize_t nr_bytes; !! 104 size_t nr_bytes; 136 ssize_t read = 0, tmp; 105 ssize_t read = 0, tmp; 137 int idx; << 138 106 139 if (!count) 107 if (!count) 140 return 0; 108 return 0; 141 109 142 offset = (unsigned long)(*ppos % PAGE_ 110 offset = (unsigned long)(*ppos % PAGE_SIZE); 143 pfn = (unsigned long)(*ppos / PAGE_SIZ 111 pfn = (unsigned long)(*ppos / PAGE_SIZE); 144 112 145 idx = srcu_read_lock(&vmcore_cb_srcu); << 146 do { 113 do { 147 if (count > (PAGE_SIZE - offse 114 if (count > (PAGE_SIZE - offset)) 148 nr_bytes = PAGE_SIZE - 115 nr_bytes = PAGE_SIZE - offset; 149 else 116 else 150 nr_bytes = count; 117 nr_bytes = count; 151 118 152 /* If pfn is not ram, return z 119 /* If pfn is not ram, return zeros for sparse dump files */ 153 if (!pfn_is_ram(pfn)) { !! 120 if (pfn_is_ram(pfn) == 0) { 154 tmp = iov_iter_zero(nr !! 121 tmp = 0; >> 122 if (!userbuf) >> 123 memset(buf, 0, nr_bytes); >> 124 else if (clear_user(buf, nr_bytes)) >> 125 tmp = -EFAULT; 155 } else { 126 } else { 156 if (encrypted) !! 127 tmp = copy_oldmem_page(pfn, buf, nr_bytes, 157 tmp = copy_old !! 128 offset, userbuf); 158 << 159 << 160 else << 161 tmp = copy_old << 162 << 163 } << 164 if (tmp < nr_bytes) { << 165 srcu_read_unlock(&vmco << 166 return -EFAULT; << 167 } 129 } >> 130 if (tmp < 0) >> 131 return tmp; 168 132 169 *ppos += nr_bytes; 133 *ppos += nr_bytes; 170 count -= nr_bytes; 134 count -= nr_bytes; >> 135 buf += nr_bytes; 171 read += nr_bytes; 136 read += nr_bytes; 172 ++pfn; 137 ++pfn; 173 offset = 0; 138 offset = 0; 174 } while (count); 139 } while (count); 175 srcu_read_unlock(&vmcore_cb_srcu, idx) << 176 140 177 return read; 141 return read; 178 } 142 } 179 143 180 /* 144 /* 181 * Architectures may override this function to 145 * Architectures may override this function to allocate ELF header in 2nd kernel 182 */ 146 */ 183 int __weak elfcorehdr_alloc(unsigned long long 147 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) 184 { 148 { 185 return 0; 149 return 0; 186 } 150 } 187 151 188 /* 152 /* 189 * Architectures may override this function to 153 * Architectures may override this function to free header 190 */ 154 */ 191 void __weak elfcorehdr_free(unsigned long long 155 void __weak elfcorehdr_free(unsigned long long addr) 192 {} 156 {} 193 157 194 /* 158 /* 195 * Architectures may override this function to 159 * Architectures may override this function to read from ELF header 196 */ 160 */ 197 ssize_t __weak elfcorehdr_read(char *buf, size 161 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) 198 { 162 { 199 struct kvec kvec = { .iov_base = buf, !! 163 return read_from_oldmem(buf, count, ppos, 0); 200 struct iov_iter iter; << 201 << 202 iov_iter_kvec(&iter, ITER_DEST, &kvec, << 203 << 204 return read_from_oldmem(&iter, count, << 205 } 164 } 206 165 207 /* 166 /* 208 * Architectures may override this function to 167 * Architectures may override this function to read from notes sections 209 */ 168 */ 210 ssize_t __weak elfcorehdr_read_notes(char *buf 169 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos) 211 { 170 { 212 struct kvec kvec = { .iov_base = buf, !! 171 return read_from_oldmem(buf, count, ppos, 0); 213 struct iov_iter iter; << 214 << 215 iov_iter_kvec(&iter, ITER_DEST, &kvec, << 216 << 217 return read_from_oldmem(&iter, count, << 218 cc_platform_has(CC_ATT << 219 } 172 } 220 173 221 /* 174 /* 222 * Architectures may override this function to 175 * Architectures may override this function to map oldmem 223 */ 176 */ 224 int __weak remap_oldmem_pfn_range(struct vm_ar 177 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, 225 unsigned lon 178 unsigned long from, unsigned long pfn, 226 unsigned lon 179 unsigned long size, pgprot_t prot) 227 { 180 { 228 prot = pgprot_encrypted(prot); << 229 return remap_pfn_range(vma, from, pfn, 181 return remap_pfn_range(vma, from, pfn, size, prot); 230 } 182 } 231 183 232 /* 184 /* 233 * Architectures which support memory encrypti 185 * Architectures which support memory encryption override this. 234 */ 186 */ 235 ssize_t __weak copy_oldmem_page_encrypted(stru !! 187 ssize_t __weak 236 unsigned long pfn, size_t csiz !! 188 copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, >> 189 unsigned long offset, int userbuf) >> 190 { >> 191 return copy_oldmem_page(pfn, buf, csize, offset, userbuf); >> 192 } >> 193 >> 194 /* >> 195 * Copy to either kernel or user space >> 196 */ >> 197 static int copy_to(void *target, void *src, size_t size, int userbuf) 237 { 198 { 238 return copy_oldmem_page(iter, pfn, csi !! 199 if (userbuf) { >> 200 if (copy_to_user((char __user *) target, src, size)) >> 201 return -EFAULT; >> 202 } else { >> 203 memcpy(target, src, size); >> 204 } >> 205 return 0; 239 } 206 } 240 207 241 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 208 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 242 static int vmcoredd_copy_dumps(struct iov_iter !! 209 static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf) 243 { 210 { 244 struct vmcoredd_node *dump; 211 struct vmcoredd_node *dump; 245 u64 offset = 0; 212 u64 offset = 0; 246 int ret = 0; 213 int ret = 0; 247 size_t tsz; 214 size_t tsz; 248 char *buf; 215 char *buf; 249 216 250 mutex_lock(&vmcoredd_mutex); 217 mutex_lock(&vmcoredd_mutex); 251 list_for_each_entry(dump, &vmcoredd_li 218 list_for_each_entry(dump, &vmcoredd_list, list) { 252 if (start < offset + dump->siz 219 if (start < offset + dump->size) { 253 tsz = min(offset + (u6 220 tsz = min(offset + (u64)dump->size - start, (u64)size); 254 buf = dump->buf + star 221 buf = dump->buf + start - offset; 255 if (copy_to_iter(buf, !! 222 if (copy_to(dst, buf, tsz, userbuf)) { 256 ret = -EFAULT; 223 ret = -EFAULT; 257 goto out_unloc 224 goto out_unlock; 258 } 225 } 259 226 260 size -= tsz; 227 size -= tsz; 261 start += tsz; 228 start += tsz; >> 229 dst += tsz; 262 230 263 /* Leave now if buffer 231 /* Leave now if buffer filled already */ 264 if (!size) 232 if (!size) 265 goto out_unloc 233 goto out_unlock; 266 } 234 } 267 offset += dump->size; 235 offset += dump->size; 268 } 236 } 269 237 270 out_unlock: 238 out_unlock: 271 mutex_unlock(&vmcoredd_mutex); 239 mutex_unlock(&vmcoredd_mutex); 272 return ret; 240 return ret; 273 } 241 } 274 242 275 #ifdef CONFIG_MMU 243 #ifdef CONFIG_MMU 276 static int vmcoredd_mmap_dumps(struct vm_area_ 244 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, 277 u64 start, size 245 u64 start, size_t size) 278 { 246 { 279 struct vmcoredd_node *dump; 247 struct vmcoredd_node *dump; 280 u64 offset = 0; 248 u64 offset = 0; 281 int ret = 0; 249 int ret = 0; 282 size_t tsz; 250 size_t tsz; 283 char *buf; 251 char *buf; 284 252 285 mutex_lock(&vmcoredd_mutex); 253 mutex_lock(&vmcoredd_mutex); 286 list_for_each_entry(dump, &vmcoredd_li 254 list_for_each_entry(dump, &vmcoredd_list, list) { 287 if (start < offset + dump->siz 255 if (start < offset + dump->size) { 288 tsz = min(offset + (u6 256 tsz = min(offset + (u64)dump->size - start, (u64)size); 289 buf = dump->buf + star 257 buf = dump->buf + start - offset; 290 if (remap_vmalloc_rang 258 if (remap_vmalloc_range_partial(vma, dst, buf, 0, 291 259 tsz)) { 292 ret = -EFAULT; 260 ret = -EFAULT; 293 goto out_unloc 261 goto out_unlock; 294 } 262 } 295 263 296 size -= tsz; 264 size -= tsz; 297 start += tsz; 265 start += tsz; 298 dst += tsz; 266 dst += tsz; 299 267 300 /* Leave now if buffer 268 /* Leave now if buffer filled already */ 301 if (!size) 269 if (!size) 302 goto out_unloc 270 goto out_unlock; 303 } 271 } 304 offset += dump->size; 272 offset += dump->size; 305 } 273 } 306 274 307 out_unlock: 275 out_unlock: 308 mutex_unlock(&vmcoredd_mutex); 276 mutex_unlock(&vmcoredd_mutex); 309 return ret; 277 return ret; 310 } 278 } 311 #endif /* CONFIG_MMU */ 279 #endif /* CONFIG_MMU */ 312 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 280 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 313 281 314 /* Read from the ELF header and then the crash 282 /* Read from the ELF header and then the crash dump. On error, negative value is 315 * returned otherwise number of bytes read are 283 * returned otherwise number of bytes read are returned. 316 */ 284 */ 317 static ssize_t __read_vmcore(struct iov_iter * !! 285 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, >> 286 int userbuf) 318 { 287 { 319 ssize_t acc = 0, tmp; 288 ssize_t acc = 0, tmp; 320 size_t tsz; 289 size_t tsz; 321 u64 start; 290 u64 start; 322 struct vmcore *m = NULL; 291 struct vmcore *m = NULL; 323 292 324 if (!iov_iter_count(iter) || *fpos >= !! 293 if (buflen == 0 || *fpos >= vmcore_size) 325 return 0; 294 return 0; 326 295 327 iov_iter_truncate(iter, vmcore_size - !! 296 /* trim buflen to not go beyond EOF */ >> 297 if (buflen > vmcore_size - *fpos) >> 298 buflen = vmcore_size - *fpos; 328 299 329 /* Read ELF core header */ 300 /* Read ELF core header */ 330 if (*fpos < elfcorebuf_sz) { 301 if (*fpos < elfcorebuf_sz) { 331 tsz = min(elfcorebuf_sz - (siz !! 302 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen); 332 if (copy_to_iter(elfcorebuf + !! 303 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf)) 333 return -EFAULT; 304 return -EFAULT; >> 305 buflen -= tsz; 334 *fpos += tsz; 306 *fpos += tsz; >> 307 buffer += tsz; 335 acc += tsz; 308 acc += tsz; 336 309 337 /* leave now if filled buffer 310 /* leave now if filled buffer already */ 338 if (!iov_iter_count(iter)) !! 311 if (buflen == 0) 339 return acc; 312 return acc; 340 } 313 } 341 314 342 /* Read ELF note segment */ !! 315 /* Read Elf note segment */ 343 if (*fpos < elfcorebuf_sz + elfnotes_s 316 if (*fpos < elfcorebuf_sz + elfnotes_sz) { 344 void *kaddr; 317 void *kaddr; 345 318 346 /* We add device dumps before 319 /* We add device dumps before other elf notes because the 347 * other elf notes may not fil 320 * other elf notes may not fill the elf notes buffer 348 * completely and we will end 321 * completely and we will end up with zero-filled data 349 * between the elf notes and t 322 * between the elf notes and the device dumps. Tools will 350 * then try to decode this zer 323 * then try to decode this zero-filled data as valid notes 351 * and we don't want that. Hen 324 * and we don't want that. Hence, adding device dumps before 352 * the other elf notes ensure 325 * the other elf notes ensure that zero-filled data can be 353 * avoided. 326 * avoided. 354 */ 327 */ 355 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 328 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 356 /* Read device dumps */ 329 /* Read device dumps */ 357 if (*fpos < elfcorebuf_sz + vm 330 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) { 358 tsz = min(elfcorebuf_s 331 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - 359 (size_t)*fpo !! 332 (size_t)*fpos, buflen); 360 start = *fpos - elfcor 333 start = *fpos - elfcorebuf_sz; 361 if (vmcoredd_copy_dump !! 334 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf)) 362 return -EFAULT 335 return -EFAULT; 363 336 >> 337 buflen -= tsz; 364 *fpos += tsz; 338 *fpos += tsz; >> 339 buffer += tsz; 365 acc += tsz; 340 acc += tsz; 366 341 367 /* leave now if filled 342 /* leave now if filled buffer already */ 368 if (!iov_iter_count(it !! 343 if (!buflen) 369 return acc; 344 return acc; 370 } 345 } 371 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 346 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 372 347 373 /* Read remaining elf notes */ 348 /* Read remaining elf notes */ 374 tsz = min(elfcorebuf_sz + elfn !! 349 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen); 375 iov_iter_count(iter) << 376 kaddr = elfnotes_buf + *fpos - 350 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz; 377 if (copy_to_iter(kaddr, tsz, i !! 351 if (copy_to(buffer, kaddr, tsz, userbuf)) 378 return -EFAULT; 352 return -EFAULT; 379 353 >> 354 buflen -= tsz; 380 *fpos += tsz; 355 *fpos += tsz; >> 356 buffer += tsz; 381 acc += tsz; 357 acc += tsz; 382 358 383 /* leave now if filled buffer 359 /* leave now if filled buffer already */ 384 if (!iov_iter_count(iter)) !! 360 if (buflen == 0) 385 return acc; 361 return acc; 386 362 387 cond_resched(); 363 cond_resched(); 388 } 364 } 389 365 390 list_for_each_entry(m, &vmcore_list, l 366 list_for_each_entry(m, &vmcore_list, list) { 391 if (*fpos < m->offset + m->siz 367 if (*fpos < m->offset + m->size) { 392 tsz = (size_t)min_t(un 368 tsz = (size_t)min_t(unsigned long long, 393 m- 369 m->offset + m->size - *fpos, 394 io !! 370 buflen); 395 start = m->paddr + *fp 371 start = m->paddr + *fpos - m->offset; 396 tmp = read_from_oldmem !! 372 tmp = read_from_oldmem(buffer, tsz, &start, userbuf); 397 cc_pla << 398 if (tmp < 0) 373 if (tmp < 0) 399 return tmp; 374 return tmp; >> 375 buflen -= tsz; 400 *fpos += tsz; 376 *fpos += tsz; >> 377 buffer += tsz; 401 acc += tsz; 378 acc += tsz; 402 379 403 /* leave now if filled 380 /* leave now if filled buffer already */ 404 if (!iov_iter_count(it !! 381 if (buflen == 0) 405 return acc; 382 return acc; 406 } 383 } 407 } 384 } 408 385 409 return acc; 386 return acc; 410 } 387 } 411 388 412 static ssize_t read_vmcore(struct kiocb *iocb, !! 389 static ssize_t read_vmcore(struct file *file, char __user *buffer, >> 390 size_t buflen, loff_t *fpos) 413 { 391 { 414 return __read_vmcore(iter, &iocb->ki_p !! 392 return __read_vmcore((__force char *) buffer, buflen, fpos, 1); 415 } 393 } 416 394 417 /* 395 /* 418 * The vmcore fault handler uses the page cach 396 * The vmcore fault handler uses the page cache and fills data using the 419 * standard __read_vmcore() function. !! 397 * standard __vmcore_read() function. 420 * 398 * 421 * On s390 the fault handler is used for memor 399 * On s390 the fault handler is used for memory regions that can't be mapped 422 * directly with remap_pfn_range(). 400 * directly with remap_pfn_range(). 423 */ 401 */ 424 static vm_fault_t mmap_vmcore_fault(struct vm_ 402 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf) 425 { 403 { 426 #ifdef CONFIG_S390 404 #ifdef CONFIG_S390 427 struct address_space *mapping = vmf->v 405 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 428 pgoff_t index = vmf->pgoff; 406 pgoff_t index = vmf->pgoff; 429 struct iov_iter iter; << 430 struct kvec kvec; << 431 struct page *page; 407 struct page *page; 432 loff_t offset; 408 loff_t offset; >> 409 char *buf; 433 int rc; 410 int rc; 434 411 435 page = find_or_create_page(mapping, in 412 page = find_or_create_page(mapping, index, GFP_KERNEL); 436 if (!page) 413 if (!page) 437 return VM_FAULT_OOM; 414 return VM_FAULT_OOM; 438 if (!PageUptodate(page)) { 415 if (!PageUptodate(page)) { 439 offset = (loff_t) index << PAG 416 offset = (loff_t) index << PAGE_SHIFT; 440 kvec.iov_base = page_address(p !! 417 buf = __va((page_to_pfn(page) << PAGE_SHIFT)); 441 kvec.iov_len = PAGE_SIZE; !! 418 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0); 442 iov_iter_kvec(&iter, ITER_DEST << 443 << 444 rc = __read_vmcore(&iter, &off << 445 if (rc < 0) { 419 if (rc < 0) { 446 unlock_page(page); 420 unlock_page(page); 447 put_page(page); 421 put_page(page); 448 return vmf_error(rc); !! 422 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 449 } 423 } 450 SetPageUptodate(page); 424 SetPageUptodate(page); 451 } 425 } 452 unlock_page(page); 426 unlock_page(page); 453 vmf->page = page; 427 vmf->page = page; 454 return 0; 428 return 0; 455 #else 429 #else 456 return VM_FAULT_SIGBUS; 430 return VM_FAULT_SIGBUS; 457 #endif 431 #endif 458 } 432 } 459 433 >> 434 static const struct vm_operations_struct vmcore_mmap_ops = { >> 435 .fault = mmap_vmcore_fault, >> 436 }; >> 437 460 /** 438 /** 461 * vmcore_alloc_buf - allocate buffer in vmall 439 * vmcore_alloc_buf - allocate buffer in vmalloc memory 462 * @size: size of buffer !! 440 * @sizez: size of buffer 463 * 441 * 464 * If CONFIG_MMU is defined, use vmalloc_user( 442 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap 465 * the buffer to user-space by means of remap_ 443 * the buffer to user-space by means of remap_vmalloc_range(). 466 * 444 * 467 * If CONFIG_MMU is not defined, use vzalloc() 445 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is 468 * disabled and there's no need to allow users 446 * disabled and there's no need to allow users to mmap the buffer. 469 */ 447 */ 470 static inline char *vmcore_alloc_buf(size_t si 448 static inline char *vmcore_alloc_buf(size_t size) 471 { 449 { 472 #ifdef CONFIG_MMU 450 #ifdef CONFIG_MMU 473 return vmalloc_user(size); 451 return vmalloc_user(size); 474 #else 452 #else 475 return vzalloc(size); 453 return vzalloc(size); 476 #endif 454 #endif 477 } 455 } 478 456 479 /* 457 /* 480 * Disable mmap_vmcore() if CONFIG_MMU is not 458 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is 481 * essential for mmap_vmcore() in order to map 459 * essential for mmap_vmcore() in order to map physically 482 * non-contiguous objects (ELF header, ELF not 460 * non-contiguous objects (ELF header, ELF note segment and memory 483 * regions in the 1st kernel pointed to by PT_ 461 * regions in the 1st kernel pointed to by PT_LOAD entries) into 484 * virtually contiguous user-space in ELF layo 462 * virtually contiguous user-space in ELF layout. 485 */ 463 */ 486 #ifdef CONFIG_MMU 464 #ifdef CONFIG_MMU 487 << 488 static const struct vm_operations_struct vmcor << 489 .fault = mmap_vmcore_fault, << 490 }; << 491 << 492 /* 465 /* 493 * remap_oldmem_pfn_checked - do remap_oldmem_ 466 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages 494 * reported as not being ram with the zero pag 467 * reported as not being ram with the zero page. 495 * 468 * 496 * @vma: vm_area_struct describing requested m 469 * @vma: vm_area_struct describing requested mapping 497 * @from: start remapping from 470 * @from: start remapping from 498 * @pfn: page frame number to start remapping 471 * @pfn: page frame number to start remapping to 499 * @size: remapping size 472 * @size: remapping size 500 * @prot: protection bits 473 * @prot: protection bits 501 * 474 * 502 * Returns zero on success, -EAGAIN on failure 475 * Returns zero on success, -EAGAIN on failure. 503 */ 476 */ 504 static int remap_oldmem_pfn_checked(struct vm_ 477 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma, 505 unsigned l 478 unsigned long from, unsigned long pfn, 506 unsigned l 479 unsigned long size, pgprot_t prot) 507 { 480 { 508 unsigned long map_size; 481 unsigned long map_size; 509 unsigned long pos_start, pos_end, pos; 482 unsigned long pos_start, pos_end, pos; 510 unsigned long zeropage_pfn = my_zero_p 483 unsigned long zeropage_pfn = my_zero_pfn(0); 511 size_t len = 0; 484 size_t len = 0; 512 485 513 pos_start = pfn; 486 pos_start = pfn; 514 pos_end = pfn + (size >> PAGE_SHIFT); 487 pos_end = pfn + (size >> PAGE_SHIFT); 515 488 516 for (pos = pos_start; pos < pos_end; + 489 for (pos = pos_start; pos < pos_end; ++pos) { 517 if (!pfn_is_ram(pos)) { 490 if (!pfn_is_ram(pos)) { 518 /* 491 /* 519 * We hit a page which 492 * We hit a page which is not ram. Remap the continuous 520 * region between pos_ 493 * region between pos_start and pos-1 and replace 521 * the non-ram page at 494 * the non-ram page at pos with the zero page. 522 */ 495 */ 523 if (pos > pos_start) { 496 if (pos > pos_start) { 524 /* Remap conti 497 /* Remap continuous region */ 525 map_size = (po 498 map_size = (pos - pos_start) << PAGE_SHIFT; 526 if (remap_oldm 499 if (remap_oldmem_pfn_range(vma, from + len, 527 500 pos_start, map_size, 528 501 prot)) 529 goto f 502 goto fail; 530 len += map_siz 503 len += map_size; 531 } 504 } 532 /* Remap the zero page 505 /* Remap the zero page */ 533 if (remap_oldmem_pfn_r 506 if (remap_oldmem_pfn_range(vma, from + len, 534 507 zeropage_pfn, 535 508 PAGE_SIZE, prot)) 536 goto fail; 509 goto fail; 537 len += PAGE_SIZE; 510 len += PAGE_SIZE; 538 pos_start = pos + 1; 511 pos_start = pos + 1; 539 } 512 } 540 } 513 } 541 if (pos > pos_start) { 514 if (pos > pos_start) { 542 /* Remap the rest */ 515 /* Remap the rest */ 543 map_size = (pos - pos_start) < 516 map_size = (pos - pos_start) << PAGE_SHIFT; 544 if (remap_oldmem_pfn_range(vma 517 if (remap_oldmem_pfn_range(vma, from + len, pos_start, 545 map 518 map_size, prot)) 546 goto fail; 519 goto fail; 547 } 520 } 548 return 0; 521 return 0; 549 fail: 522 fail: 550 do_munmap(vma->vm_mm, from, len, NULL) 523 do_munmap(vma->vm_mm, from, len, NULL); 551 return -EAGAIN; 524 return -EAGAIN; 552 } 525 } 553 526 554 static int vmcore_remap_oldmem_pfn(struct vm_a 527 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma, 555 unsigned long from 528 unsigned long from, unsigned long pfn, 556 unsigned long size 529 unsigned long size, pgprot_t prot) 557 { 530 { 558 int ret, idx; << 559 << 560 /* 531 /* 561 * Check if a callback was registered !! 532 * Check if oldmem_pfn_is_ram was registered to avoid 562 * pages without a reason. !! 533 * looping over all pages without a reason. 563 */ 534 */ 564 idx = srcu_read_lock(&vmcore_cb_srcu); !! 535 if (oldmem_pfn_is_ram) 565 if (!list_empty(&vmcore_cb_list)) !! 536 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot); 566 ret = remap_oldmem_pfn_checked << 567 else 537 else 568 ret = remap_oldmem_pfn_range(v !! 538 return remap_oldmem_pfn_range(vma, from, pfn, size, prot); 569 srcu_read_unlock(&vmcore_cb_srcu, idx) << 570 return ret; << 571 } 539 } 572 540 573 static int mmap_vmcore(struct file *file, stru 541 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) 574 { 542 { 575 size_t size = vma->vm_end - vma->vm_st 543 size_t size = vma->vm_end - vma->vm_start; 576 u64 start, end, len, tsz; 544 u64 start, end, len, tsz; 577 struct vmcore *m; 545 struct vmcore *m; 578 546 579 start = (u64)vma->vm_pgoff << PAGE_SHI 547 start = (u64)vma->vm_pgoff << PAGE_SHIFT; 580 end = start + size; 548 end = start + size; 581 549 582 if (size > vmcore_size || end > vmcore 550 if (size > vmcore_size || end > vmcore_size) 583 return -EINVAL; 551 return -EINVAL; 584 552 585 if (vma->vm_flags & (VM_WRITE | VM_EXE 553 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 586 return -EPERM; 554 return -EPERM; 587 555 588 vm_flags_mod(vma, VM_MIXEDMAP, VM_MAYW !! 556 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); >> 557 vma->vm_flags |= VM_MIXEDMAP; 589 vma->vm_ops = &vmcore_mmap_ops; 558 vma->vm_ops = &vmcore_mmap_ops; 590 559 591 len = 0; 560 len = 0; 592 561 593 if (start < elfcorebuf_sz) { 562 if (start < elfcorebuf_sz) { 594 u64 pfn; 563 u64 pfn; 595 564 596 tsz = min(elfcorebuf_sz - (siz 565 tsz = min(elfcorebuf_sz - (size_t)start, size); 597 pfn = __pa(elfcorebuf + start) 566 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT; 598 if (remap_pfn_range(vma, vma-> 567 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz, 599 vma->vm_pa 568 vma->vm_page_prot)) 600 return -EAGAIN; 569 return -EAGAIN; 601 size -= tsz; 570 size -= tsz; 602 start += tsz; 571 start += tsz; 603 len += tsz; 572 len += tsz; 604 573 605 if (size == 0) 574 if (size == 0) 606 return 0; 575 return 0; 607 } 576 } 608 577 609 if (start < elfcorebuf_sz + elfnotes_s 578 if (start < elfcorebuf_sz + elfnotes_sz) { 610 void *kaddr; 579 void *kaddr; 611 580 612 /* We add device dumps before 581 /* We add device dumps before other elf notes because the 613 * other elf notes may not fil 582 * other elf notes may not fill the elf notes buffer 614 * completely and we will end 583 * completely and we will end up with zero-filled data 615 * between the elf notes and t 584 * between the elf notes and the device dumps. Tools will 616 * then try to decode this zer 585 * then try to decode this zero-filled data as valid notes 617 * and we don't want that. Hen 586 * and we don't want that. Hence, adding device dumps before 618 * the other elf notes ensure 587 * the other elf notes ensure that zero-filled data can be 619 * avoided. This also ensures 588 * avoided. This also ensures that the device dumps and 620 * other elf notes can be prop 589 * other elf notes can be properly mmaped at page aligned 621 * address. 590 * address. 622 */ 591 */ 623 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 592 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 624 /* Read device dumps */ 593 /* Read device dumps */ 625 if (start < elfcorebuf_sz + vm 594 if (start < elfcorebuf_sz + vmcoredd_orig_sz) { 626 u64 start_off; 595 u64 start_off; 627 596 628 tsz = min(elfcorebuf_s 597 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - 629 (size_t)star 598 (size_t)start, size); 630 start_off = start - el 599 start_off = start - elfcorebuf_sz; 631 if (vmcoredd_mmap_dump 600 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len, 632 601 start_off, tsz)) 633 goto fail; 602 goto fail; 634 603 635 size -= tsz; 604 size -= tsz; 636 start += tsz; 605 start += tsz; 637 len += tsz; 606 len += tsz; 638 607 639 /* leave now if filled 608 /* leave now if filled buffer already */ 640 if (!size) 609 if (!size) 641 return 0; 610 return 0; 642 } 611 } 643 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 612 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 644 613 645 /* Read remaining elf notes */ 614 /* Read remaining elf notes */ 646 tsz = min(elfcorebuf_sz + elfn 615 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); 647 kaddr = elfnotes_buf + start - 616 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz; 648 if (remap_vmalloc_range_partia 617 if (remap_vmalloc_range_partial(vma, vma->vm_start + len, 649 618 kaddr, 0, tsz)) 650 goto fail; 619 goto fail; 651 620 652 size -= tsz; 621 size -= tsz; 653 start += tsz; 622 start += tsz; 654 len += tsz; 623 len += tsz; 655 624 656 if (size == 0) 625 if (size == 0) 657 return 0; 626 return 0; 658 } 627 } 659 628 660 list_for_each_entry(m, &vmcore_list, l 629 list_for_each_entry(m, &vmcore_list, list) { 661 if (start < m->offset + m->siz 630 if (start < m->offset + m->size) { 662 u64 paddr = 0; 631 u64 paddr = 0; 663 632 664 tsz = (size_t)min_t(un 633 tsz = (size_t)min_t(unsigned long long, 665 m- 634 m->offset + m->size - start, size); 666 paddr = m->paddr + sta 635 paddr = m->paddr + start - m->offset; 667 if (vmcore_remap_oldme 636 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len, 668 637 paddr >> PAGE_SHIFT, tsz, 669 638 vma->vm_page_prot)) 670 goto fail; 639 goto fail; 671 size -= tsz; 640 size -= tsz; 672 start += tsz; 641 start += tsz; 673 len += tsz; 642 len += tsz; 674 643 675 if (size == 0) 644 if (size == 0) 676 return 0; 645 return 0; 677 } 646 } 678 } 647 } 679 648 680 return 0; 649 return 0; 681 fail: 650 fail: 682 do_munmap(vma->vm_mm, vma->vm_start, l 651 do_munmap(vma->vm_mm, vma->vm_start, len, NULL); 683 return -EAGAIN; 652 return -EAGAIN; 684 } 653 } 685 #else 654 #else 686 static int mmap_vmcore(struct file *file, stru 655 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) 687 { 656 { 688 return -ENOSYS; 657 return -ENOSYS; 689 } 658 } 690 #endif 659 #endif 691 660 692 static const struct proc_ops vmcore_proc_ops = !! 661 static const struct file_operations proc_vmcore_operations = { 693 .proc_open = open_vmcore, !! 662 .read = read_vmcore, 694 .proc_read_iter = read_vmcore, !! 663 .llseek = default_llseek, 695 .proc_lseek = default_llseek, !! 664 .mmap = mmap_vmcore, 696 .proc_mmap = mmap_vmcore, << 697 }; 665 }; 698 666 699 static struct vmcore* __init get_new_element(v 667 static struct vmcore* __init get_new_element(void) 700 { 668 { 701 return kzalloc(sizeof(struct vmcore), 669 return kzalloc(sizeof(struct vmcore), GFP_KERNEL); 702 } 670 } 703 671 704 static u64 get_vmcore_size(size_t elfsz, size_ 672 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz, 705 struct list_head *v 673 struct list_head *vc_list) 706 { 674 { 707 u64 size; 675 u64 size; 708 struct vmcore *m; 676 struct vmcore *m; 709 677 710 size = elfsz + elfnotesegsz; 678 size = elfsz + elfnotesegsz; 711 list_for_each_entry(m, vc_list, list) 679 list_for_each_entry(m, vc_list, list) { 712 size += m->size; 680 size += m->size; 713 } 681 } 714 return size; 682 return size; 715 } 683 } 716 684 717 /** 685 /** 718 * update_note_header_size_elf64 - update p_me 686 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry 719 * 687 * 720 * @ehdr_ptr: ELF header 688 * @ehdr_ptr: ELF header 721 * 689 * 722 * This function updates p_memsz member of eac 690 * This function updates p_memsz member of each PT_NOTE entry in the 723 * program header table pointed to by @ehdr_pt 691 * program header table pointed to by @ehdr_ptr to real size of ELF 724 * note segment. 692 * note segment. 725 */ 693 */ 726 static int __init update_note_header_size_elf6 694 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr) 727 { 695 { 728 int i, rc=0; 696 int i, rc=0; 729 Elf64_Phdr *phdr_ptr; 697 Elf64_Phdr *phdr_ptr; 730 Elf64_Nhdr *nhdr_ptr; 698 Elf64_Nhdr *nhdr_ptr; 731 699 732 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1 700 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); 733 for (i = 0; i < ehdr_ptr->e_phnum; i++ 701 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 734 void *notes_section; 702 void *notes_section; 735 u64 offset, max_sz, sz, real_s 703 u64 offset, max_sz, sz, real_sz = 0; 736 if (phdr_ptr->p_type != PT_NOT 704 if (phdr_ptr->p_type != PT_NOTE) 737 continue; 705 continue; 738 max_sz = phdr_ptr->p_memsz; 706 max_sz = phdr_ptr->p_memsz; 739 offset = phdr_ptr->p_offset; 707 offset = phdr_ptr->p_offset; 740 notes_section = kmalloc(max_sz 708 notes_section = kmalloc(max_sz, GFP_KERNEL); 741 if (!notes_section) 709 if (!notes_section) 742 return -ENOMEM; 710 return -ENOMEM; 743 rc = elfcorehdr_read_notes(not 711 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset); 744 if (rc < 0) { 712 if (rc < 0) { 745 kfree(notes_section); 713 kfree(notes_section); 746 return rc; 714 return rc; 747 } 715 } 748 nhdr_ptr = notes_section; 716 nhdr_ptr = notes_section; 749 while (nhdr_ptr->n_namesz != 0 717 while (nhdr_ptr->n_namesz != 0) { 750 sz = sizeof(Elf64_Nhdr 718 sz = sizeof(Elf64_Nhdr) + 751 (((u64)nhdr_pt 719 (((u64)nhdr_ptr->n_namesz + 3) & ~3) + 752 (((u64)nhdr_pt 720 (((u64)nhdr_ptr->n_descsz + 3) & ~3); 753 if ((real_sz + sz) > m 721 if ((real_sz + sz) > max_sz) { 754 pr_warn("Warni 722 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n", 755 nhdr_p 723 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); 756 break; 724 break; 757 } 725 } 758 real_sz += sz; 726 real_sz += sz; 759 nhdr_ptr = (Elf64_Nhdr 727 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); 760 } 728 } 761 kfree(notes_section); 729 kfree(notes_section); 762 phdr_ptr->p_memsz = real_sz; 730 phdr_ptr->p_memsz = real_sz; 763 if (real_sz == 0) { 731 if (real_sz == 0) { 764 pr_warn("Warning: Zero 732 pr_warn("Warning: Zero PT_NOTE entries found\n"); 765 } 733 } 766 } 734 } 767 735 768 return 0; 736 return 0; 769 } 737 } 770 738 771 /** 739 /** 772 * get_note_number_and_size_elf64 - get the nu 740 * get_note_number_and_size_elf64 - get the number of PT_NOTE program 773 * headers and sum of real size of their ELF n 741 * headers and sum of real size of their ELF note segment headers and 774 * data. 742 * data. 775 * 743 * 776 * @ehdr_ptr: ELF header 744 * @ehdr_ptr: ELF header 777 * @nr_ptnote: buffer for the number of PT_NOT 745 * @nr_ptnote: buffer for the number of PT_NOTE program headers 778 * @sz_ptnote: buffer for size of unique PT_NO 746 * @sz_ptnote: buffer for size of unique PT_NOTE program header 779 * 747 * 780 * This function is used to merge multiple PT_ 748 * This function is used to merge multiple PT_NOTE program headers 781 * into a unique single one. The resulting uni 749 * into a unique single one. The resulting unique entry will have 782 * @sz_ptnote in its phdr->p_mem. 750 * @sz_ptnote in its phdr->p_mem. 783 * 751 * 784 * It is assumed that program headers with PT_ 752 * It is assumed that program headers with PT_NOTE type pointed to by 785 * @ehdr_ptr has already been updated by updat 753 * @ehdr_ptr has already been updated by update_note_header_size_elf64 786 * and each of PT_NOTE program headers has act 754 * and each of PT_NOTE program headers has actual ELF note segment 787 * size in its p_memsz member. 755 * size in its p_memsz member. 788 */ 756 */ 789 static int __init get_note_number_and_size_elf 757 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr, 790 758 int *nr_ptnote, u64 *sz_ptnote) 791 { 759 { 792 int i; 760 int i; 793 Elf64_Phdr *phdr_ptr; 761 Elf64_Phdr *phdr_ptr; 794 762 795 *nr_ptnote = *sz_ptnote = 0; 763 *nr_ptnote = *sz_ptnote = 0; 796 764 797 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1 765 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); 798 for (i = 0; i < ehdr_ptr->e_phnum; i++ 766 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 799 if (phdr_ptr->p_type != PT_NOT 767 if (phdr_ptr->p_type != PT_NOTE) 800 continue; 768 continue; 801 *nr_ptnote += 1; 769 *nr_ptnote += 1; 802 *sz_ptnote += phdr_ptr->p_mems 770 *sz_ptnote += phdr_ptr->p_memsz; 803 } 771 } 804 772 805 return 0; 773 return 0; 806 } 774 } 807 775 808 /** 776 /** 809 * copy_notes_elf64 - copy ELF note segments i 777 * copy_notes_elf64 - copy ELF note segments in a given buffer 810 * 778 * 811 * @ehdr_ptr: ELF header 779 * @ehdr_ptr: ELF header 812 * @notes_buf: buffer into which ELF note segm 780 * @notes_buf: buffer into which ELF note segments are copied 813 * 781 * 814 * This function is used to copy ELF note segm 782 * This function is used to copy ELF note segment in the 1st kernel 815 * into the buffer @notes_buf in the 2nd kerne 783 * into the buffer @notes_buf in the 2nd kernel. It is assumed that 816 * size of the buffer @notes_buf is equal to o 784 * size of the buffer @notes_buf is equal to or larger than sum of the 817 * real ELF note segment headers and data. 785 * real ELF note segment headers and data. 818 * 786 * 819 * It is assumed that program headers with PT_ 787 * It is assumed that program headers with PT_NOTE type pointed to by 820 * @ehdr_ptr has already been updated by updat 788 * @ehdr_ptr has already been updated by update_note_header_size_elf64 821 * and each of PT_NOTE program headers has act 789 * and each of PT_NOTE program headers has actual ELF note segment 822 * size in its p_memsz member. 790 * size in its p_memsz member. 823 */ 791 */ 824 static int __init copy_notes_elf64(const Elf64 792 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf) 825 { 793 { 826 int i, rc=0; 794 int i, rc=0; 827 Elf64_Phdr *phdr_ptr; 795 Elf64_Phdr *phdr_ptr; 828 796 829 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1) 797 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1); 830 798 831 for (i = 0; i < ehdr_ptr->e_phnum; i++ 799 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 832 u64 offset; 800 u64 offset; 833 if (phdr_ptr->p_type != PT_NOT 801 if (phdr_ptr->p_type != PT_NOTE) 834 continue; 802 continue; 835 offset = phdr_ptr->p_offset; 803 offset = phdr_ptr->p_offset; 836 rc = elfcorehdr_read_notes(not 804 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, 837 &of 805 &offset); 838 if (rc < 0) 806 if (rc < 0) 839 return rc; 807 return rc; 840 notes_buf += phdr_ptr->p_memsz 808 notes_buf += phdr_ptr->p_memsz; 841 } 809 } 842 810 843 return 0; 811 return 0; 844 } 812 } 845 813 846 /* Merges all the PT_NOTE headers into one. */ 814 /* Merges all the PT_NOTE headers into one. */ 847 static int __init merge_note_headers_elf64(cha 815 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, 848 cha 816 char **notes_buf, size_t *notes_sz) 849 { 817 { 850 int i, nr_ptnote=0, rc=0; 818 int i, nr_ptnote=0, rc=0; 851 char *tmp; 819 char *tmp; 852 Elf64_Ehdr *ehdr_ptr; 820 Elf64_Ehdr *ehdr_ptr; 853 Elf64_Phdr phdr; 821 Elf64_Phdr phdr; 854 u64 phdr_sz = 0, note_off; 822 u64 phdr_sz = 0, note_off; 855 823 856 ehdr_ptr = (Elf64_Ehdr *)elfptr; 824 ehdr_ptr = (Elf64_Ehdr *)elfptr; 857 825 858 rc = update_note_header_size_elf64(ehd 826 rc = update_note_header_size_elf64(ehdr_ptr); 859 if (rc < 0) 827 if (rc < 0) 860 return rc; 828 return rc; 861 829 862 rc = get_note_number_and_size_elf64(eh 830 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz); 863 if (rc < 0) 831 if (rc < 0) 864 return rc; 832 return rc; 865 833 866 *notes_sz = roundup(phdr_sz, PAGE_SIZE 834 *notes_sz = roundup(phdr_sz, PAGE_SIZE); 867 *notes_buf = vmcore_alloc_buf(*notes_s 835 *notes_buf = vmcore_alloc_buf(*notes_sz); 868 if (!*notes_buf) 836 if (!*notes_buf) 869 return -ENOMEM; 837 return -ENOMEM; 870 838 871 rc = copy_notes_elf64(ehdr_ptr, *notes 839 rc = copy_notes_elf64(ehdr_ptr, *notes_buf); 872 if (rc < 0) 840 if (rc < 0) 873 return rc; 841 return rc; 874 842 875 /* Prepare merged PT_NOTE program head 843 /* Prepare merged PT_NOTE program header. */ 876 phdr.p_type = PT_NOTE; 844 phdr.p_type = PT_NOTE; 877 phdr.p_flags = 0; 845 phdr.p_flags = 0; 878 note_off = sizeof(Elf64_Ehdr) + 846 note_off = sizeof(Elf64_Ehdr) + 879 (ehdr_ptr->e_phnum - n 847 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); 880 phdr.p_offset = roundup(note_off, PAG 848 phdr.p_offset = roundup(note_off, PAGE_SIZE); 881 phdr.p_vaddr = phdr.p_paddr = 0; 849 phdr.p_vaddr = phdr.p_paddr = 0; 882 phdr.p_filesz = phdr.p_memsz = phdr_s 850 phdr.p_filesz = phdr.p_memsz = phdr_sz; 883 phdr.p_align = 4; !! 851 phdr.p_align = 0; 884 852 885 /* Add merged PT_NOTE program header*/ 853 /* Add merged PT_NOTE program header*/ 886 tmp = elfptr + sizeof(Elf64_Ehdr); 854 tmp = elfptr + sizeof(Elf64_Ehdr); 887 memcpy(tmp, &phdr, sizeof(phdr)); 855 memcpy(tmp, &phdr, sizeof(phdr)); 888 tmp += sizeof(phdr); 856 tmp += sizeof(phdr); 889 857 890 /* Remove unwanted PT_NOTE program hea 858 /* Remove unwanted PT_NOTE program headers. */ 891 i = (nr_ptnote - 1) * sizeof(Elf64_Phd 859 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); 892 *elfsz = *elfsz - i; 860 *elfsz = *elfsz - i; 893 memmove(tmp, tmp+i, ((*elfsz)-sizeof(E 861 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); 894 memset(elfptr + *elfsz, 0, i); 862 memset(elfptr + *elfsz, 0, i); 895 *elfsz = roundup(*elfsz, PAGE_SIZE); 863 *elfsz = roundup(*elfsz, PAGE_SIZE); 896 864 897 /* Modify e_phnum to reflect merged he 865 /* Modify e_phnum to reflect merged headers. */ 898 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum 866 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; 899 867 900 /* Store the size of all notes. We ne 868 /* Store the size of all notes. We need this to update the note 901 * header when the device dumps will b 869 * header when the device dumps will be added. 902 */ 870 */ 903 elfnotes_orig_sz = phdr.p_memsz; 871 elfnotes_orig_sz = phdr.p_memsz; 904 872 905 return 0; 873 return 0; 906 } 874 } 907 875 908 /** 876 /** 909 * update_note_header_size_elf32 - update p_me 877 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry 910 * 878 * 911 * @ehdr_ptr: ELF header 879 * @ehdr_ptr: ELF header 912 * 880 * 913 * This function updates p_memsz member of eac 881 * This function updates p_memsz member of each PT_NOTE entry in the 914 * program header table pointed to by @ehdr_pt 882 * program header table pointed to by @ehdr_ptr to real size of ELF 915 * note segment. 883 * note segment. 916 */ 884 */ 917 static int __init update_note_header_size_elf3 885 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr) 918 { 886 { 919 int i, rc=0; 887 int i, rc=0; 920 Elf32_Phdr *phdr_ptr; 888 Elf32_Phdr *phdr_ptr; 921 Elf32_Nhdr *nhdr_ptr; 889 Elf32_Nhdr *nhdr_ptr; 922 890 923 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1 891 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); 924 for (i = 0; i < ehdr_ptr->e_phnum; i++ 892 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 925 void *notes_section; 893 void *notes_section; 926 u64 offset, max_sz, sz, real_s 894 u64 offset, max_sz, sz, real_sz = 0; 927 if (phdr_ptr->p_type != PT_NOT 895 if (phdr_ptr->p_type != PT_NOTE) 928 continue; 896 continue; 929 max_sz = phdr_ptr->p_memsz; 897 max_sz = phdr_ptr->p_memsz; 930 offset = phdr_ptr->p_offset; 898 offset = phdr_ptr->p_offset; 931 notes_section = kmalloc(max_sz 899 notes_section = kmalloc(max_sz, GFP_KERNEL); 932 if (!notes_section) 900 if (!notes_section) 933 return -ENOMEM; 901 return -ENOMEM; 934 rc = elfcorehdr_read_notes(not 902 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset); 935 if (rc < 0) { 903 if (rc < 0) { 936 kfree(notes_section); 904 kfree(notes_section); 937 return rc; 905 return rc; 938 } 906 } 939 nhdr_ptr = notes_section; 907 nhdr_ptr = notes_section; 940 while (nhdr_ptr->n_namesz != 0 908 while (nhdr_ptr->n_namesz != 0) { 941 sz = sizeof(Elf32_Nhdr 909 sz = sizeof(Elf32_Nhdr) + 942 (((u64)nhdr_pt 910 (((u64)nhdr_ptr->n_namesz + 3) & ~3) + 943 (((u64)nhdr_pt 911 (((u64)nhdr_ptr->n_descsz + 3) & ~3); 944 if ((real_sz + sz) > m 912 if ((real_sz + sz) > max_sz) { 945 pr_warn("Warni 913 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n", 946 nhdr_p 914 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); 947 break; 915 break; 948 } 916 } 949 real_sz += sz; 917 real_sz += sz; 950 nhdr_ptr = (Elf32_Nhdr 918 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); 951 } 919 } 952 kfree(notes_section); 920 kfree(notes_section); 953 phdr_ptr->p_memsz = real_sz; 921 phdr_ptr->p_memsz = real_sz; 954 if (real_sz == 0) { 922 if (real_sz == 0) { 955 pr_warn("Warning: Zero 923 pr_warn("Warning: Zero PT_NOTE entries found\n"); 956 } 924 } 957 } 925 } 958 926 959 return 0; 927 return 0; 960 } 928 } 961 929 962 /** 930 /** 963 * get_note_number_and_size_elf32 - get the nu 931 * get_note_number_and_size_elf32 - get the number of PT_NOTE program 964 * headers and sum of real size of their ELF n 932 * headers and sum of real size of their ELF note segment headers and 965 * data. 933 * data. 966 * 934 * 967 * @ehdr_ptr: ELF header 935 * @ehdr_ptr: ELF header 968 * @nr_ptnote: buffer for the number of PT_NOT 936 * @nr_ptnote: buffer for the number of PT_NOTE program headers 969 * @sz_ptnote: buffer for size of unique PT_NO 937 * @sz_ptnote: buffer for size of unique PT_NOTE program header 970 * 938 * 971 * This function is used to merge multiple PT_ 939 * This function is used to merge multiple PT_NOTE program headers 972 * into a unique single one. The resulting uni 940 * into a unique single one. The resulting unique entry will have 973 * @sz_ptnote in its phdr->p_mem. 941 * @sz_ptnote in its phdr->p_mem. 974 * 942 * 975 * It is assumed that program headers with PT_ 943 * It is assumed that program headers with PT_NOTE type pointed to by 976 * @ehdr_ptr has already been updated by updat 944 * @ehdr_ptr has already been updated by update_note_header_size_elf32 977 * and each of PT_NOTE program headers has act 945 * and each of PT_NOTE program headers has actual ELF note segment 978 * size in its p_memsz member. 946 * size in its p_memsz member. 979 */ 947 */ 980 static int __init get_note_number_and_size_elf 948 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr, 981 949 int *nr_ptnote, u64 *sz_ptnote) 982 { 950 { 983 int i; 951 int i; 984 Elf32_Phdr *phdr_ptr; 952 Elf32_Phdr *phdr_ptr; 985 953 986 *nr_ptnote = *sz_ptnote = 0; 954 *nr_ptnote = *sz_ptnote = 0; 987 955 988 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1 956 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); 989 for (i = 0; i < ehdr_ptr->e_phnum; i++ 957 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 990 if (phdr_ptr->p_type != PT_NOT 958 if (phdr_ptr->p_type != PT_NOTE) 991 continue; 959 continue; 992 *nr_ptnote += 1; 960 *nr_ptnote += 1; 993 *sz_ptnote += phdr_ptr->p_mems 961 *sz_ptnote += phdr_ptr->p_memsz; 994 } 962 } 995 963 996 return 0; 964 return 0; 997 } 965 } 998 966 999 /** 967 /** 1000 * copy_notes_elf32 - copy ELF note segments 968 * copy_notes_elf32 - copy ELF note segments in a given buffer 1001 * 969 * 1002 * @ehdr_ptr: ELF header 970 * @ehdr_ptr: ELF header 1003 * @notes_buf: buffer into which ELF note seg 971 * @notes_buf: buffer into which ELF note segments are copied 1004 * 972 * 1005 * This function is used to copy ELF note seg 973 * This function is used to copy ELF note segment in the 1st kernel 1006 * into the buffer @notes_buf in the 2nd kern 974 * into the buffer @notes_buf in the 2nd kernel. It is assumed that 1007 * size of the buffer @notes_buf is equal to 975 * size of the buffer @notes_buf is equal to or larger than sum of the 1008 * real ELF note segment headers and data. 976 * real ELF note segment headers and data. 1009 * 977 * 1010 * It is assumed that program headers with PT 978 * It is assumed that program headers with PT_NOTE type pointed to by 1011 * @ehdr_ptr has already been updated by upda 979 * @ehdr_ptr has already been updated by update_note_header_size_elf32 1012 * and each of PT_NOTE program headers has ac 980 * and each of PT_NOTE program headers has actual ELF note segment 1013 * size in its p_memsz member. 981 * size in its p_memsz member. 1014 */ 982 */ 1015 static int __init copy_notes_elf32(const Elf3 983 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf) 1016 { 984 { 1017 int i, rc=0; 985 int i, rc=0; 1018 Elf32_Phdr *phdr_ptr; 986 Elf32_Phdr *phdr_ptr; 1019 987 1020 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1 988 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1); 1021 989 1022 for (i = 0; i < ehdr_ptr->e_phnum; i+ 990 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 1023 u64 offset; 991 u64 offset; 1024 if (phdr_ptr->p_type != PT_NO 992 if (phdr_ptr->p_type != PT_NOTE) 1025 continue; 993 continue; 1026 offset = phdr_ptr->p_offset; 994 offset = phdr_ptr->p_offset; 1027 rc = elfcorehdr_read_notes(no 995 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, 1028 &o 996 &offset); 1029 if (rc < 0) 997 if (rc < 0) 1030 return rc; 998 return rc; 1031 notes_buf += phdr_ptr->p_mems 999 notes_buf += phdr_ptr->p_memsz; 1032 } 1000 } 1033 1001 1034 return 0; 1002 return 0; 1035 } 1003 } 1036 1004 1037 /* Merges all the PT_NOTE headers into one. * 1005 /* Merges all the PT_NOTE headers into one. */ 1038 static int __init merge_note_headers_elf32(ch 1006 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, 1039 ch 1007 char **notes_buf, size_t *notes_sz) 1040 { 1008 { 1041 int i, nr_ptnote=0, rc=0; 1009 int i, nr_ptnote=0, rc=0; 1042 char *tmp; 1010 char *tmp; 1043 Elf32_Ehdr *ehdr_ptr; 1011 Elf32_Ehdr *ehdr_ptr; 1044 Elf32_Phdr phdr; 1012 Elf32_Phdr phdr; 1045 u64 phdr_sz = 0, note_off; 1013 u64 phdr_sz = 0, note_off; 1046 1014 1047 ehdr_ptr = (Elf32_Ehdr *)elfptr; 1015 ehdr_ptr = (Elf32_Ehdr *)elfptr; 1048 1016 1049 rc = update_note_header_size_elf32(eh 1017 rc = update_note_header_size_elf32(ehdr_ptr); 1050 if (rc < 0) 1018 if (rc < 0) 1051 return rc; 1019 return rc; 1052 1020 1053 rc = get_note_number_and_size_elf32(e 1021 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz); 1054 if (rc < 0) 1022 if (rc < 0) 1055 return rc; 1023 return rc; 1056 1024 1057 *notes_sz = roundup(phdr_sz, PAGE_SIZ 1025 *notes_sz = roundup(phdr_sz, PAGE_SIZE); 1058 *notes_buf = vmcore_alloc_buf(*notes_ 1026 *notes_buf = vmcore_alloc_buf(*notes_sz); 1059 if (!*notes_buf) 1027 if (!*notes_buf) 1060 return -ENOMEM; 1028 return -ENOMEM; 1061 1029 1062 rc = copy_notes_elf32(ehdr_ptr, *note 1030 rc = copy_notes_elf32(ehdr_ptr, *notes_buf); 1063 if (rc < 0) 1031 if (rc < 0) 1064 return rc; 1032 return rc; 1065 1033 1066 /* Prepare merged PT_NOTE program hea 1034 /* Prepare merged PT_NOTE program header. */ 1067 phdr.p_type = PT_NOTE; 1035 phdr.p_type = PT_NOTE; 1068 phdr.p_flags = 0; 1036 phdr.p_flags = 0; 1069 note_off = sizeof(Elf32_Ehdr) + 1037 note_off = sizeof(Elf32_Ehdr) + 1070 (ehdr_ptr->e_phnum - 1038 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); 1071 phdr.p_offset = roundup(note_off, PA 1039 phdr.p_offset = roundup(note_off, PAGE_SIZE); 1072 phdr.p_vaddr = phdr.p_paddr = 0; 1040 phdr.p_vaddr = phdr.p_paddr = 0; 1073 phdr.p_filesz = phdr.p_memsz = phdr_ 1041 phdr.p_filesz = phdr.p_memsz = phdr_sz; 1074 phdr.p_align = 4; !! 1042 phdr.p_align = 0; 1075 1043 1076 /* Add merged PT_NOTE program header* 1044 /* Add merged PT_NOTE program header*/ 1077 tmp = elfptr + sizeof(Elf32_Ehdr); 1045 tmp = elfptr + sizeof(Elf32_Ehdr); 1078 memcpy(tmp, &phdr, sizeof(phdr)); 1046 memcpy(tmp, &phdr, sizeof(phdr)); 1079 tmp += sizeof(phdr); 1047 tmp += sizeof(phdr); 1080 1048 1081 /* Remove unwanted PT_NOTE program he 1049 /* Remove unwanted PT_NOTE program headers. */ 1082 i = (nr_ptnote - 1) * sizeof(Elf32_Ph 1050 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); 1083 *elfsz = *elfsz - i; 1051 *elfsz = *elfsz - i; 1084 memmove(tmp, tmp+i, ((*elfsz)-sizeof( 1052 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); 1085 memset(elfptr + *elfsz, 0, i); 1053 memset(elfptr + *elfsz, 0, i); 1086 *elfsz = roundup(*elfsz, PAGE_SIZE); 1054 *elfsz = roundup(*elfsz, PAGE_SIZE); 1087 1055 1088 /* Modify e_phnum to reflect merged h 1056 /* Modify e_phnum to reflect merged headers. */ 1089 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum 1057 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; 1090 1058 1091 /* Store the size of all notes. We n 1059 /* Store the size of all notes. We need this to update the note 1092 * header when the device dumps will 1060 * header when the device dumps will be added. 1093 */ 1061 */ 1094 elfnotes_orig_sz = phdr.p_memsz; 1062 elfnotes_orig_sz = phdr.p_memsz; 1095 1063 1096 return 0; 1064 return 0; 1097 } 1065 } 1098 1066 1099 /* Add memory chunks represented by program h 1067 /* Add memory chunks represented by program headers to vmcore list. Also update 1100 * the new offset fields of exported program 1068 * the new offset fields of exported program headers. */ 1101 static int __init process_ptload_program_head 1069 static int __init process_ptload_program_headers_elf64(char *elfptr, 1102 1070 size_t elfsz, 1103 1071 size_t elfnotes_sz, 1104 1072 struct list_head *vc_list) 1105 { 1073 { 1106 int i; 1074 int i; 1107 Elf64_Ehdr *ehdr_ptr; 1075 Elf64_Ehdr *ehdr_ptr; 1108 Elf64_Phdr *phdr_ptr; 1076 Elf64_Phdr *phdr_ptr; 1109 loff_t vmcore_off; 1077 loff_t vmcore_off; 1110 struct vmcore *new; 1078 struct vmcore *new; 1111 1079 1112 ehdr_ptr = (Elf64_Ehdr *)elfptr; 1080 ehdr_ptr = (Elf64_Ehdr *)elfptr; 1113 phdr_ptr = (Elf64_Phdr*)(elfptr + siz 1081 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ 1114 1082 1115 /* Skip ELF header, program headers a !! 1083 /* Skip Elf header, program headers and Elf note segment. */ 1116 vmcore_off = elfsz + elfnotes_sz; 1084 vmcore_off = elfsz + elfnotes_sz; 1117 1085 1118 for (i = 0; i < ehdr_ptr->e_phnum; i+ 1086 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 1119 u64 paddr, start, end, size; 1087 u64 paddr, start, end, size; 1120 1088 1121 if (phdr_ptr->p_type != PT_LO 1089 if (phdr_ptr->p_type != PT_LOAD) 1122 continue; 1090 continue; 1123 1091 1124 paddr = phdr_ptr->p_offset; 1092 paddr = phdr_ptr->p_offset; 1125 start = rounddown(paddr, PAGE 1093 start = rounddown(paddr, PAGE_SIZE); 1126 end = roundup(paddr + phdr_pt 1094 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); 1127 size = end - start; 1095 size = end - start; 1128 1096 1129 /* Add this contiguous chunk 1097 /* Add this contiguous chunk of memory to vmcore list.*/ 1130 new = get_new_element(); 1098 new = get_new_element(); 1131 if (!new) 1099 if (!new) 1132 return -ENOMEM; 1100 return -ENOMEM; 1133 new->paddr = start; 1101 new->paddr = start; 1134 new->size = size; 1102 new->size = size; 1135 list_add_tail(&new->list, vc_ 1103 list_add_tail(&new->list, vc_list); 1136 1104 1137 /* Update the program header 1105 /* Update the program header offset. */ 1138 phdr_ptr->p_offset = vmcore_o 1106 phdr_ptr->p_offset = vmcore_off + (paddr - start); 1139 vmcore_off = vmcore_off + siz 1107 vmcore_off = vmcore_off + size; 1140 } 1108 } 1141 return 0; 1109 return 0; 1142 } 1110 } 1143 1111 1144 static int __init process_ptload_program_head 1112 static int __init process_ptload_program_headers_elf32(char *elfptr, 1145 1113 size_t elfsz, 1146 1114 size_t elfnotes_sz, 1147 1115 struct list_head *vc_list) 1148 { 1116 { 1149 int i; 1117 int i; 1150 Elf32_Ehdr *ehdr_ptr; 1118 Elf32_Ehdr *ehdr_ptr; 1151 Elf32_Phdr *phdr_ptr; 1119 Elf32_Phdr *phdr_ptr; 1152 loff_t vmcore_off; 1120 loff_t vmcore_off; 1153 struct vmcore *new; 1121 struct vmcore *new; 1154 1122 1155 ehdr_ptr = (Elf32_Ehdr *)elfptr; 1123 ehdr_ptr = (Elf32_Ehdr *)elfptr; 1156 phdr_ptr = (Elf32_Phdr*)(elfptr + siz 1124 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ 1157 1125 1158 /* Skip ELF header, program headers a !! 1126 /* Skip Elf header, program headers and Elf note segment. */ 1159 vmcore_off = elfsz + elfnotes_sz; 1127 vmcore_off = elfsz + elfnotes_sz; 1160 1128 1161 for (i = 0; i < ehdr_ptr->e_phnum; i+ 1129 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 1162 u64 paddr, start, end, size; 1130 u64 paddr, start, end, size; 1163 1131 1164 if (phdr_ptr->p_type != PT_LO 1132 if (phdr_ptr->p_type != PT_LOAD) 1165 continue; 1133 continue; 1166 1134 1167 paddr = phdr_ptr->p_offset; 1135 paddr = phdr_ptr->p_offset; 1168 start = rounddown(paddr, PAGE 1136 start = rounddown(paddr, PAGE_SIZE); 1169 end = roundup(paddr + phdr_pt 1137 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); 1170 size = end - start; 1138 size = end - start; 1171 1139 1172 /* Add this contiguous chunk 1140 /* Add this contiguous chunk of memory to vmcore list.*/ 1173 new = get_new_element(); 1141 new = get_new_element(); 1174 if (!new) 1142 if (!new) 1175 return -ENOMEM; 1143 return -ENOMEM; 1176 new->paddr = start; 1144 new->paddr = start; 1177 new->size = size; 1145 new->size = size; 1178 list_add_tail(&new->list, vc_ 1146 list_add_tail(&new->list, vc_list); 1179 1147 1180 /* Update the program header 1148 /* Update the program header offset */ 1181 phdr_ptr->p_offset = vmcore_o 1149 phdr_ptr->p_offset = vmcore_off + (paddr - start); 1182 vmcore_off = vmcore_off + siz 1150 vmcore_off = vmcore_off + size; 1183 } 1151 } 1184 return 0; 1152 return 0; 1185 } 1153 } 1186 1154 1187 /* Sets offset fields of vmcore elements. */ 1155 /* Sets offset fields of vmcore elements. */ 1188 static void set_vmcore_list_offsets(size_t el 1156 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz, 1189 struct li 1157 struct list_head *vc_list) 1190 { 1158 { 1191 loff_t vmcore_off; 1159 loff_t vmcore_off; 1192 struct vmcore *m; 1160 struct vmcore *m; 1193 1161 1194 /* Skip ELF header, program headers a !! 1162 /* Skip Elf header, program headers and Elf note segment. */ 1195 vmcore_off = elfsz + elfnotes_sz; 1163 vmcore_off = elfsz + elfnotes_sz; 1196 1164 1197 list_for_each_entry(m, vc_list, list) 1165 list_for_each_entry(m, vc_list, list) { 1198 m->offset = vmcore_off; 1166 m->offset = vmcore_off; 1199 vmcore_off += m->size; 1167 vmcore_off += m->size; 1200 } 1168 } 1201 } 1169 } 1202 1170 1203 static void free_elfcorebuf(void) 1171 static void free_elfcorebuf(void) 1204 { 1172 { 1205 free_pages((unsigned long)elfcorebuf, 1173 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig)); 1206 elfcorebuf = NULL; 1174 elfcorebuf = NULL; 1207 vfree(elfnotes_buf); 1175 vfree(elfnotes_buf); 1208 elfnotes_buf = NULL; 1176 elfnotes_buf = NULL; 1209 } 1177 } 1210 1178 1211 static int __init parse_crash_elf64_headers(v 1179 static int __init parse_crash_elf64_headers(void) 1212 { 1180 { 1213 int rc=0; 1181 int rc=0; 1214 Elf64_Ehdr ehdr; 1182 Elf64_Ehdr ehdr; 1215 u64 addr; 1183 u64 addr; 1216 1184 1217 addr = elfcorehdr_addr; 1185 addr = elfcorehdr_addr; 1218 1186 1219 /* Read ELF header */ !! 1187 /* Read Elf header */ 1220 rc = elfcorehdr_read((char *)&ehdr, s 1188 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr); 1221 if (rc < 0) 1189 if (rc < 0) 1222 return rc; 1190 return rc; 1223 1191 1224 /* Do some basic Verification. */ 1192 /* Do some basic Verification. */ 1225 if (memcmp(ehdr.e_ident, ELFMAG, SELF 1193 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || 1226 (ehdr.e_type != ET_CORE) || 1194 (ehdr.e_type != ET_CORE) || 1227 !vmcore_elf64_check_arch(&ehd 1195 !vmcore_elf64_check_arch(&ehdr) || 1228 ehdr.e_ident[EI_CLASS] != ELF 1196 ehdr.e_ident[EI_CLASS] != ELFCLASS64 || 1229 ehdr.e_ident[EI_VERSION] != E 1197 ehdr.e_ident[EI_VERSION] != EV_CURRENT || 1230 ehdr.e_version != EV_CURRENT 1198 ehdr.e_version != EV_CURRENT || 1231 ehdr.e_ehsize != sizeof(Elf64 1199 ehdr.e_ehsize != sizeof(Elf64_Ehdr) || 1232 ehdr.e_phentsize != sizeof(El 1200 ehdr.e_phentsize != sizeof(Elf64_Phdr) || 1233 ehdr.e_phnum == 0) { 1201 ehdr.e_phnum == 0) { 1234 pr_warn("Warning: Core image 1202 pr_warn("Warning: Core image elf header is not sane\n"); 1235 return -EINVAL; 1203 return -EINVAL; 1236 } 1204 } 1237 1205 1238 /* Read in all elf headers. */ 1206 /* Read in all elf headers. */ 1239 elfcorebuf_sz_orig = sizeof(Elf64_Ehd 1207 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) + 1240 ehdr.e_phnum 1208 ehdr.e_phnum * sizeof(Elf64_Phdr); 1241 elfcorebuf_sz = elfcorebuf_sz_orig; 1209 elfcorebuf_sz = elfcorebuf_sz_orig; 1242 elfcorebuf = (void *)__get_free_pages 1210 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1243 1211 get_order(elfcorebuf_sz_orig)); 1244 if (!elfcorebuf) 1212 if (!elfcorebuf) 1245 return -ENOMEM; 1213 return -ENOMEM; 1246 addr = elfcorehdr_addr; 1214 addr = elfcorehdr_addr; 1247 rc = elfcorehdr_read(elfcorebuf, elfc 1215 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr); 1248 if (rc < 0) 1216 if (rc < 0) 1249 goto fail; 1217 goto fail; 1250 1218 1251 /* Merge all PT_NOTE headers into one 1219 /* Merge all PT_NOTE headers into one. */ 1252 rc = merge_note_headers_elf64(elfcore 1220 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, 1253 &elfnot 1221 &elfnotes_buf, &elfnotes_sz); 1254 if (rc) 1222 if (rc) 1255 goto fail; 1223 goto fail; 1256 rc = process_ptload_program_headers_e 1224 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, 1257 1225 elfnotes_sz, &vmcore_list); 1258 if (rc) 1226 if (rc) 1259 goto fail; 1227 goto fail; 1260 set_vmcore_list_offsets(elfcorebuf_sz 1228 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); 1261 return 0; 1229 return 0; 1262 fail: 1230 fail: 1263 free_elfcorebuf(); 1231 free_elfcorebuf(); 1264 return rc; 1232 return rc; 1265 } 1233 } 1266 1234 1267 static int __init parse_crash_elf32_headers(v 1235 static int __init parse_crash_elf32_headers(void) 1268 { 1236 { 1269 int rc=0; 1237 int rc=0; 1270 Elf32_Ehdr ehdr; 1238 Elf32_Ehdr ehdr; 1271 u64 addr; 1239 u64 addr; 1272 1240 1273 addr = elfcorehdr_addr; 1241 addr = elfcorehdr_addr; 1274 1242 1275 /* Read ELF header */ !! 1243 /* Read Elf header */ 1276 rc = elfcorehdr_read((char *)&ehdr, s 1244 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr); 1277 if (rc < 0) 1245 if (rc < 0) 1278 return rc; 1246 return rc; 1279 1247 1280 /* Do some basic Verification. */ 1248 /* Do some basic Verification. */ 1281 if (memcmp(ehdr.e_ident, ELFMAG, SELF 1249 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || 1282 (ehdr.e_type != ET_CORE) || 1250 (ehdr.e_type != ET_CORE) || 1283 !vmcore_elf32_check_arch(&ehd 1251 !vmcore_elf32_check_arch(&ehdr) || 1284 ehdr.e_ident[EI_CLASS] != ELF 1252 ehdr.e_ident[EI_CLASS] != ELFCLASS32|| 1285 ehdr.e_ident[EI_VERSION] != E 1253 ehdr.e_ident[EI_VERSION] != EV_CURRENT || 1286 ehdr.e_version != EV_CURRENT 1254 ehdr.e_version != EV_CURRENT || 1287 ehdr.e_ehsize != sizeof(Elf32 1255 ehdr.e_ehsize != sizeof(Elf32_Ehdr) || 1288 ehdr.e_phentsize != sizeof(El 1256 ehdr.e_phentsize != sizeof(Elf32_Phdr) || 1289 ehdr.e_phnum == 0) { 1257 ehdr.e_phnum == 0) { 1290 pr_warn("Warning: Core image 1258 pr_warn("Warning: Core image elf header is not sane\n"); 1291 return -EINVAL; 1259 return -EINVAL; 1292 } 1260 } 1293 1261 1294 /* Read in all elf headers. */ 1262 /* Read in all elf headers. */ 1295 elfcorebuf_sz_orig = sizeof(Elf32_Ehd 1263 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); 1296 elfcorebuf_sz = elfcorebuf_sz_orig; 1264 elfcorebuf_sz = elfcorebuf_sz_orig; 1297 elfcorebuf = (void *)__get_free_pages 1265 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1298 1266 get_order(elfcorebuf_sz_orig)); 1299 if (!elfcorebuf) 1267 if (!elfcorebuf) 1300 return -ENOMEM; 1268 return -ENOMEM; 1301 addr = elfcorehdr_addr; 1269 addr = elfcorehdr_addr; 1302 rc = elfcorehdr_read(elfcorebuf, elfc 1270 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr); 1303 if (rc < 0) 1271 if (rc < 0) 1304 goto fail; 1272 goto fail; 1305 1273 1306 /* Merge all PT_NOTE headers into one 1274 /* Merge all PT_NOTE headers into one. */ 1307 rc = merge_note_headers_elf32(elfcore 1275 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, 1308 &elfnot 1276 &elfnotes_buf, &elfnotes_sz); 1309 if (rc) 1277 if (rc) 1310 goto fail; 1278 goto fail; 1311 rc = process_ptload_program_headers_e 1279 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, 1312 1280 elfnotes_sz, &vmcore_list); 1313 if (rc) 1281 if (rc) 1314 goto fail; 1282 goto fail; 1315 set_vmcore_list_offsets(elfcorebuf_sz 1283 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); 1316 return 0; 1284 return 0; 1317 fail: 1285 fail: 1318 free_elfcorebuf(); 1286 free_elfcorebuf(); 1319 return rc; 1287 return rc; 1320 } 1288 } 1321 1289 1322 static int __init parse_crash_elf_headers(voi 1290 static int __init parse_crash_elf_headers(void) 1323 { 1291 { 1324 unsigned char e_ident[EI_NIDENT]; 1292 unsigned char e_ident[EI_NIDENT]; 1325 u64 addr; 1293 u64 addr; 1326 int rc=0; 1294 int rc=0; 1327 1295 1328 addr = elfcorehdr_addr; 1296 addr = elfcorehdr_addr; 1329 rc = elfcorehdr_read(e_ident, EI_NIDE 1297 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr); 1330 if (rc < 0) 1298 if (rc < 0) 1331 return rc; 1299 return rc; 1332 if (memcmp(e_ident, ELFMAG, SELFMAG) 1300 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { 1333 pr_warn("Warning: Core image 1301 pr_warn("Warning: Core image elf header not found\n"); 1334 return -EINVAL; 1302 return -EINVAL; 1335 } 1303 } 1336 1304 1337 if (e_ident[EI_CLASS] == ELFCLASS64) 1305 if (e_ident[EI_CLASS] == ELFCLASS64) { 1338 rc = parse_crash_elf64_header 1306 rc = parse_crash_elf64_headers(); 1339 if (rc) 1307 if (rc) 1340 return rc; 1308 return rc; 1341 } else if (e_ident[EI_CLASS] == ELFCL 1309 } else if (e_ident[EI_CLASS] == ELFCLASS32) { 1342 rc = parse_crash_elf32_header 1310 rc = parse_crash_elf32_headers(); 1343 if (rc) 1311 if (rc) 1344 return rc; 1312 return rc; 1345 } else { 1313 } else { 1346 pr_warn("Warning: Core image 1314 pr_warn("Warning: Core image elf header is not sane\n"); 1347 return -EINVAL; 1315 return -EINVAL; 1348 } 1316 } 1349 1317 1350 /* Determine vmcore size. */ 1318 /* Determine vmcore size. */ 1351 vmcore_size = get_vmcore_size(elfcore 1319 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz, 1352 &vmcore 1320 &vmcore_list); 1353 1321 1354 return 0; 1322 return 0; 1355 } 1323 } 1356 1324 1357 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 1325 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 1358 /** 1326 /** 1359 * vmcoredd_write_header - Write vmcore devic 1327 * vmcoredd_write_header - Write vmcore device dump header at the 1360 * beginning of the dump's buffer. 1328 * beginning of the dump's buffer. 1361 * @buf: Output buffer where the note is writ 1329 * @buf: Output buffer where the note is written 1362 * @data: Dump info 1330 * @data: Dump info 1363 * @size: Size of the dump 1331 * @size: Size of the dump 1364 * 1332 * 1365 * Fills beginning of the dump's buffer with 1333 * Fills beginning of the dump's buffer with vmcore device dump header. 1366 */ 1334 */ 1367 static void vmcoredd_write_header(void *buf, 1335 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data, 1368 u32 size) 1336 u32 size) 1369 { 1337 { 1370 struct vmcoredd_header *vdd_hdr = (st 1338 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf; 1371 1339 1372 vdd_hdr->n_namesz = sizeof(vdd_hdr->n 1340 vdd_hdr->n_namesz = sizeof(vdd_hdr->name); 1373 vdd_hdr->n_descsz = size + sizeof(vdd 1341 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name); 1374 vdd_hdr->n_type = NT_VMCOREDD; 1342 vdd_hdr->n_type = NT_VMCOREDD; 1375 1343 1376 strscpy_pad(vdd_hdr->name, VMCOREDD_N !! 1344 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME, 1377 strscpy_pad(vdd_hdr->dump_name, data- !! 1345 sizeof(vdd_hdr->name)); >> 1346 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name)); 1378 } 1347 } 1379 1348 1380 /** 1349 /** 1381 * vmcoredd_update_program_headers - Update a !! 1350 * vmcoredd_update_program_headers - Update all Elf program headers 1382 * @elfptr: Pointer to elf header 1351 * @elfptr: Pointer to elf header 1383 * @elfnotesz: Size of elf notes aligned to p 1352 * @elfnotesz: Size of elf notes aligned to page size 1384 * @vmcoreddsz: Size of device dumps to be ad 1353 * @vmcoreddsz: Size of device dumps to be added to elf note header 1385 * 1354 * 1386 * Determine type of ELF header (Elf64 or Elf !! 1355 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size. 1387 * Also update the offsets of all the program 1356 * Also update the offsets of all the program headers after the elf note header. 1388 */ 1357 */ 1389 static void vmcoredd_update_program_headers(c 1358 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz, 1390 s 1359 size_t vmcoreddsz) 1391 { 1360 { 1392 unsigned char *e_ident = (unsigned ch 1361 unsigned char *e_ident = (unsigned char *)elfptr; 1393 u64 start, end, size; 1362 u64 start, end, size; 1394 loff_t vmcore_off; 1363 loff_t vmcore_off; 1395 u32 i; 1364 u32 i; 1396 1365 1397 vmcore_off = elfcorebuf_sz + elfnotes 1366 vmcore_off = elfcorebuf_sz + elfnotesz; 1398 1367 1399 if (e_ident[EI_CLASS] == ELFCLASS64) 1368 if (e_ident[EI_CLASS] == ELFCLASS64) { 1400 Elf64_Ehdr *ehdr = (Elf64_Ehd 1369 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr; 1401 Elf64_Phdr *phdr = (Elf64_Phd 1370 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr)); 1402 1371 1403 /* Update all program headers 1372 /* Update all program headers */ 1404 for (i = 0; i < ehdr->e_phnum 1373 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { 1405 if (phdr->p_type == P 1374 if (phdr->p_type == PT_NOTE) { 1406 /* Update not 1375 /* Update note size */ 1407 phdr->p_memsz 1376 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz; 1408 phdr->p_files 1377 phdr->p_filesz = phdr->p_memsz; 1409 continue; 1378 continue; 1410 } 1379 } 1411 1380 1412 start = rounddown(phd 1381 start = rounddown(phdr->p_offset, PAGE_SIZE); 1413 end = roundup(phdr->p 1382 end = roundup(phdr->p_offset + phdr->p_memsz, 1414 PAGE_SI 1383 PAGE_SIZE); 1415 size = end - start; 1384 size = end - start; 1416 phdr->p_offset = vmco 1385 phdr->p_offset = vmcore_off + (phdr->p_offset - start); 1417 vmcore_off += size; 1386 vmcore_off += size; 1418 } 1387 } 1419 } else { 1388 } else { 1420 Elf32_Ehdr *ehdr = (Elf32_Ehd 1389 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr; 1421 Elf32_Phdr *phdr = (Elf32_Phd 1390 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr)); 1422 1391 1423 /* Update all program headers 1392 /* Update all program headers */ 1424 for (i = 0; i < ehdr->e_phnum 1393 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { 1425 if (phdr->p_type == P 1394 if (phdr->p_type == PT_NOTE) { 1426 /* Update not 1395 /* Update note size */ 1427 phdr->p_memsz 1396 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz; 1428 phdr->p_files 1397 phdr->p_filesz = phdr->p_memsz; 1429 continue; 1398 continue; 1430 } 1399 } 1431 1400 1432 start = rounddown(phd 1401 start = rounddown(phdr->p_offset, PAGE_SIZE); 1433 end = roundup(phdr->p 1402 end = roundup(phdr->p_offset + phdr->p_memsz, 1434 PAGE_SI 1403 PAGE_SIZE); 1435 size = end - start; 1404 size = end - start; 1436 phdr->p_offset = vmco 1405 phdr->p_offset = vmcore_off + (phdr->p_offset - start); 1437 vmcore_off += size; 1406 vmcore_off += size; 1438 } 1407 } 1439 } 1408 } 1440 } 1409 } 1441 1410 1442 /** 1411 /** 1443 * vmcoredd_update_size - Update the total si 1412 * vmcoredd_update_size - Update the total size of the device dumps and update 1444 * ELF header !! 1413 * Elf header 1445 * @dump_size: Size of the current device dum 1414 * @dump_size: Size of the current device dump to be added to total size 1446 * 1415 * 1447 * Update the total size of all the device du !! 1416 * Update the total size of all the device dumps and update the Elf program 1448 * headers. Calculate the new offsets for the 1417 * headers. Calculate the new offsets for the vmcore list and update the 1449 * total vmcore size. 1418 * total vmcore size. 1450 */ 1419 */ 1451 static void vmcoredd_update_size(size_t dump_ 1420 static void vmcoredd_update_size(size_t dump_size) 1452 { 1421 { 1453 vmcoredd_orig_sz += dump_size; 1422 vmcoredd_orig_sz += dump_size; 1454 elfnotes_sz = roundup(elfnotes_orig_s 1423 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz; 1455 vmcoredd_update_program_headers(elfco 1424 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz, 1456 vmcor 1425 vmcoredd_orig_sz); 1457 1426 1458 /* Update vmcore list offsets */ 1427 /* Update vmcore list offsets */ 1459 set_vmcore_list_offsets(elfcorebuf_sz 1428 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); 1460 1429 1461 vmcore_size = get_vmcore_size(elfcore 1430 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz, 1462 &vmcore 1431 &vmcore_list); 1463 proc_vmcore->size = vmcore_size; 1432 proc_vmcore->size = vmcore_size; 1464 } 1433 } 1465 1434 1466 /** 1435 /** 1467 * vmcore_add_device_dump - Add a buffer cont 1436 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore 1468 * @data: dump info. 1437 * @data: dump info. 1469 * 1438 * 1470 * Allocate a buffer and invoke the calling d 1439 * Allocate a buffer and invoke the calling driver's dump collect routine. 1471 * Write ELF note at the beginning of the buf !! 1440 * Write Elf note at the beginning of the buffer to indicate vmcore device 1472 * dump and add the dump to global list. 1441 * dump and add the dump to global list. 1473 */ 1442 */ 1474 int vmcore_add_device_dump(struct vmcoredd_da 1443 int vmcore_add_device_dump(struct vmcoredd_data *data) 1475 { 1444 { 1476 struct vmcoredd_node *dump; 1445 struct vmcoredd_node *dump; 1477 void *buf = NULL; 1446 void *buf = NULL; 1478 size_t data_size; 1447 size_t data_size; 1479 int ret; 1448 int ret; 1480 1449 1481 if (vmcoredd_disabled) { << 1482 pr_err_once("Device dump is d << 1483 return -EINVAL; << 1484 } << 1485 << 1486 if (!data || !strlen(data->dump_name) 1450 if (!data || !strlen(data->dump_name) || 1487 !data->vmcoredd_callback || !data 1451 !data->vmcoredd_callback || !data->size) 1488 return -EINVAL; 1452 return -EINVAL; 1489 1453 1490 dump = vzalloc(sizeof(*dump)); 1454 dump = vzalloc(sizeof(*dump)); 1491 if (!dump) { 1455 if (!dump) { 1492 ret = -ENOMEM; 1456 ret = -ENOMEM; 1493 goto out_err; 1457 goto out_err; 1494 } 1458 } 1495 1459 1496 /* Keep size of the buffer page align 1460 /* Keep size of the buffer page aligned so that it can be mmaped */ 1497 data_size = roundup(sizeof(struct vmc 1461 data_size = roundup(sizeof(struct vmcoredd_header) + data->size, 1498 PAGE_SIZE); 1462 PAGE_SIZE); 1499 1463 1500 /* Allocate buffer for driver's to wr 1464 /* Allocate buffer for driver's to write their dumps */ 1501 buf = vmcore_alloc_buf(data_size); 1465 buf = vmcore_alloc_buf(data_size); 1502 if (!buf) { 1466 if (!buf) { 1503 ret = -ENOMEM; 1467 ret = -ENOMEM; 1504 goto out_err; 1468 goto out_err; 1505 } 1469 } 1506 1470 1507 vmcoredd_write_header(buf, data, data 1471 vmcoredd_write_header(buf, data, data_size - 1508 sizeof(struct v 1472 sizeof(struct vmcoredd_header)); 1509 1473 1510 /* Invoke the driver's dump collectio 1474 /* Invoke the driver's dump collection routing */ 1511 ret = data->vmcoredd_callback(data, b 1475 ret = data->vmcoredd_callback(data, buf + 1512 sizeof( 1476 sizeof(struct vmcoredd_header)); 1513 if (ret) 1477 if (ret) 1514 goto out_err; 1478 goto out_err; 1515 1479 1516 dump->buf = buf; 1480 dump->buf = buf; 1517 dump->size = data_size; 1481 dump->size = data_size; 1518 1482 1519 /* Add the dump to driver sysfs list 1483 /* Add the dump to driver sysfs list */ 1520 mutex_lock(&vmcoredd_mutex); 1484 mutex_lock(&vmcoredd_mutex); 1521 list_add_tail(&dump->list, &vmcoredd_ 1485 list_add_tail(&dump->list, &vmcoredd_list); 1522 mutex_unlock(&vmcoredd_mutex); 1486 mutex_unlock(&vmcoredd_mutex); 1523 1487 1524 vmcoredd_update_size(data_size); 1488 vmcoredd_update_size(data_size); 1525 return 0; 1489 return 0; 1526 1490 1527 out_err: 1491 out_err: 1528 vfree(buf); !! 1492 if (buf) 1529 vfree(dump); !! 1493 vfree(buf); >> 1494 >> 1495 if (dump) >> 1496 vfree(dump); 1530 1497 1531 return ret; 1498 return ret; 1532 } 1499 } 1533 EXPORT_SYMBOL(vmcore_add_device_dump); 1500 EXPORT_SYMBOL(vmcore_add_device_dump); 1534 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 1501 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 1535 1502 1536 /* Free all dumps in vmcore device dump list 1503 /* Free all dumps in vmcore device dump list */ 1537 static void vmcore_free_device_dumps(void) 1504 static void vmcore_free_device_dumps(void) 1538 { 1505 { 1539 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 1506 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 1540 mutex_lock(&vmcoredd_mutex); 1507 mutex_lock(&vmcoredd_mutex); 1541 while (!list_empty(&vmcoredd_list)) { 1508 while (!list_empty(&vmcoredd_list)) { 1542 struct vmcoredd_node *dump; 1509 struct vmcoredd_node *dump; 1543 1510 1544 dump = list_first_entry(&vmco 1511 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node, 1545 list) 1512 list); 1546 list_del(&dump->list); 1513 list_del(&dump->list); 1547 vfree(dump->buf); 1514 vfree(dump->buf); 1548 vfree(dump); 1515 vfree(dump); 1549 } 1516 } 1550 mutex_unlock(&vmcoredd_mutex); 1517 mutex_unlock(&vmcoredd_mutex); 1551 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 1518 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 1552 } 1519 } 1553 1520 1554 /* Init function for vmcore module. */ 1521 /* Init function for vmcore module. */ 1555 static int __init vmcore_init(void) 1522 static int __init vmcore_init(void) 1556 { 1523 { 1557 int rc = 0; 1524 int rc = 0; 1558 1525 1559 /* Allow architectures to allocate EL 1526 /* Allow architectures to allocate ELF header in 2nd kernel */ 1560 rc = elfcorehdr_alloc(&elfcorehdr_add 1527 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size); 1561 if (rc) 1528 if (rc) 1562 return rc; 1529 return rc; 1563 /* 1530 /* 1564 * If elfcorehdr= has been passed in 1531 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel, 1565 * then capture the dump. 1532 * then capture the dump. 1566 */ 1533 */ 1567 if (!(is_vmcore_usable())) 1534 if (!(is_vmcore_usable())) 1568 return rc; 1535 return rc; 1569 rc = parse_crash_elf_headers(); 1536 rc = parse_crash_elf_headers(); 1570 if (rc) { 1537 if (rc) { 1571 elfcorehdr_free(elfcorehdr_ad << 1572 pr_warn("Kdump: vmcore not in 1538 pr_warn("Kdump: vmcore not initialized\n"); 1573 return rc; 1539 return rc; 1574 } 1540 } 1575 elfcorehdr_free(elfcorehdr_addr); 1541 elfcorehdr_free(elfcorehdr_addr); 1576 elfcorehdr_addr = ELFCORE_ADDR_ERR; 1542 elfcorehdr_addr = ELFCORE_ADDR_ERR; 1577 1543 1578 proc_vmcore = proc_create("vmcore", S !! 1544 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); 1579 if (proc_vmcore) 1545 if (proc_vmcore) 1580 proc_vmcore->size = vmcore_si 1546 proc_vmcore->size = vmcore_size; 1581 return 0; 1547 return 0; 1582 } 1548 } 1583 fs_initcall(vmcore_init); 1549 fs_initcall(vmcore_init); 1584 1550 1585 /* Cleanup function for vmcore module. */ 1551 /* Cleanup function for vmcore module. */ 1586 void vmcore_cleanup(void) 1552 void vmcore_cleanup(void) 1587 { 1553 { 1588 if (proc_vmcore) { 1554 if (proc_vmcore) { 1589 proc_remove(proc_vmcore); 1555 proc_remove(proc_vmcore); 1590 proc_vmcore = NULL; 1556 proc_vmcore = NULL; 1591 } 1557 } 1592 1558 1593 /* clear the vmcore list. */ 1559 /* clear the vmcore list. */ 1594 while (!list_empty(&vmcore_list)) { 1560 while (!list_empty(&vmcore_list)) { 1595 struct vmcore *m; 1561 struct vmcore *m; 1596 1562 1597 m = list_first_entry(&vmcore_ 1563 m = list_first_entry(&vmcore_list, struct vmcore, list); 1598 list_del(&m->list); 1564 list_del(&m->list); 1599 kfree(m); 1565 kfree(m); 1600 } 1566 } 1601 free_elfcorebuf(); 1567 free_elfcorebuf(); 1602 1568 1603 /* clear vmcore device dump list */ 1569 /* clear vmcore device dump list */ 1604 vmcore_free_device_dumps(); 1570 vmcore_free_device_dumps(); 1605 } 1571 } 1606 1572
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.