~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/proc/vmcore.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /fs/proc/vmcore.c (Version linux-6.12-rc7) and /fs/proc/vmcore.c (Version linux-4.19.322)


** Warning: Cannot open xref database.

  1 // SPDX-License-Identifier: GPL-2.0-only            1 
  2 /*                                                
  3  *      fs/proc/vmcore.c Interface for accessi    
  4  *                               dump from the    
  5  *      Heavily borrowed from fs/proc/kcore.c     
  6  *      Created by: Hariprasad Nellitheertha (    
  7  *      Copyright (C) IBM Corporation, 2004. A    
  8  *                                                
  9  */                                               
 10                                                   
 11 #include <linux/mm.h>                             
 12 #include <linux/kcore.h>                          
 13 #include <linux/user.h>                           
 14 #include <linux/elf.h>                            
 15 #include <linux/elfcore.h>                        
 16 #include <linux/export.h>                         
 17 #include <linux/slab.h>                           
 18 #include <linux/highmem.h>                        
 19 #include <linux/printk.h>                         
 20 #include <linux/memblock.h>                       
 21 #include <linux/init.h>                           
 22 #include <linux/crash_dump.h>                     
 23 #include <linux/list.h>                           
 24 #include <linux/moduleparam.h>                    
 25 #include <linux/mutex.h>                          
 26 #include <linux/vmalloc.h>                        
 27 #include <linux/pagemap.h>                        
 28 #include <linux/uio.h>                            
 29 #include <linux/cc_platform.h>                    
 30 #include <asm/io.h>                               
 31 #include "internal.h"                             
 32                                                   
 33 /* List representing chunks of contiguous memo    
 34  * vmcore file.                                   
 35  */                                               
 36 static LIST_HEAD(vmcore_list);                    
 37                                                   
 38 /* Stores the pointer to the buffer containing    
 39 static char *elfcorebuf;                          
 40 static size_t elfcorebuf_sz;                      
 41 static size_t elfcorebuf_sz_orig;                 
 42                                                   
 43 static char *elfnotes_buf;                        
 44 static size_t elfnotes_sz;                        
 45 /* Size of all notes minus the device dump not    
 46 static size_t elfnotes_orig_sz;                   
 47                                                   
 48 /* Total size of vmcore file. */                  
 49 static u64 vmcore_size;                           
 50                                                   
 51 static struct proc_dir_entry *proc_vmcore;        
 52                                                   
 53 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP             
 54 /* Device Dump list and mutex to synchronize a    
 55 static LIST_HEAD(vmcoredd_list);                  
 56 static DEFINE_MUTEX(vmcoredd_mutex);              
 57                                                   
 58 static bool vmcoredd_disabled;                    
 59 core_param(novmcoredd, vmcoredd_disabled, bool    
 60 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */       
 61                                                   
 62 /* Device Dump Size */                            
 63 static size_t vmcoredd_orig_sz;                   
 64                                                   
 65 static DEFINE_SPINLOCK(vmcore_cb_lock);           
 66 DEFINE_STATIC_SRCU(vmcore_cb_srcu);               
 67 /* List of registered vmcore callbacks. */        
 68 static LIST_HEAD(vmcore_cb_list);                 
 69 /* Whether the vmcore has been opened once. */    
 70 static bool vmcore_opened;                        
 71                                                   
 72 void register_vmcore_cb(struct vmcore_cb *cb)     
 73 {                                                 
 74         INIT_LIST_HEAD(&cb->next);                
 75         spin_lock(&vmcore_cb_lock);               
 76         list_add_tail(&cb->next, &vmcore_cb_li    
 77         /*                                        
 78          * Registering a vmcore callback after    
 79          * very unusual (e.g., manual driver l    
 80          */                                       
 81         if (vmcore_opened)                        
 82                 pr_warn_once("Unexpected vmcor    
 83         spin_unlock(&vmcore_cb_lock);             
 84 }                                                 
 85 EXPORT_SYMBOL_GPL(register_vmcore_cb);            
 86                                                   
 87 void unregister_vmcore_cb(struct vmcore_cb *cb    
 88 {                                                 
 89         spin_lock(&vmcore_cb_lock);               
 90         list_del_rcu(&cb->next);                  
 91         /*                                        
 92          * Unregistering a vmcore callback aft    
 93          * very unusual (e.g., forced driver r    
 94          * unregistering.                         
 95          */                                       
 96         if (vmcore_opened)                        
 97                 pr_warn_once("Unexpected vmcor    
 98         spin_unlock(&vmcore_cb_lock);             
 99                                                   
100         synchronize_srcu(&vmcore_cb_srcu);        
101 }                                                 
102 EXPORT_SYMBOL_GPL(unregister_vmcore_cb);          
103                                                   
104 static bool pfn_is_ram(unsigned long pfn)         
105 {                                                 
106         struct vmcore_cb *cb;                     
107         bool ret = true;                          
108                                                   
109         list_for_each_entry_srcu(cb, &vmcore_c    
110                                  srcu_read_loc    
111                 if (unlikely(!cb->pfn_is_ram))    
112                         continue;                 
113                 ret = cb->pfn_is_ram(cb, pfn);    
114                 if (!ret)                         
115                         break;                    
116         }                                         
117                                                   
118         return ret;                               
119 }                                                 
120                                                   
121 static int open_vmcore(struct inode *inode, st    
122 {                                                 
123         spin_lock(&vmcore_cb_lock);               
124         vmcore_opened = true;                     
125         spin_unlock(&vmcore_cb_lock);             
126                                                   
127         return 0;                                 
128 }                                                 
129                                                   
130 /* Reads a page from the oldmem device from gi    
131 ssize_t read_from_oldmem(struct iov_iter *iter    
132                          u64 *ppos, bool encry    
133 {                                                 
134         unsigned long pfn, offset;                
135         ssize_t nr_bytes;                         
136         ssize_t read = 0, tmp;                    
137         int idx;                                  
138                                                   
139         if (!count)                               
140                 return 0;                         
141                                                   
142         offset = (unsigned long)(*ppos % PAGE_    
143         pfn = (unsigned long)(*ppos / PAGE_SIZ    
144                                                   
145         idx = srcu_read_lock(&vmcore_cb_srcu);    
146         do {                                      
147                 if (count > (PAGE_SIZE - offse    
148                         nr_bytes = PAGE_SIZE -    
149                 else                              
150                         nr_bytes = count;         
151                                                   
152                 /* If pfn is not ram, return z    
153                 if (!pfn_is_ram(pfn)) {           
154                         tmp = iov_iter_zero(nr    
155                 } else {                          
156                         if (encrypted)            
157                                 tmp = copy_old    
158                                                   
159                                                   
160                         else                      
161                                 tmp = copy_old    
162                                                   
163                 }                                 
164                 if (tmp < nr_bytes) {             
165                         srcu_read_unlock(&vmco    
166                         return -EFAULT;           
167                 }                                 
168                                                   
169                 *ppos += nr_bytes;                
170                 count -= nr_bytes;                
171                 read += nr_bytes;                 
172                 ++pfn;                            
173                 offset = 0;                       
174         } while (count);                          
175         srcu_read_unlock(&vmcore_cb_srcu, idx)    
176                                                   
177         return read;                              
178 }                                                 
179                                                   
180 /*                                                
181  * Architectures may override this function to    
182  */                                               
183 int __weak elfcorehdr_alloc(unsigned long long    
184 {                                                 
185         return 0;                                 
186 }                                                 
187                                                   
188 /*                                                
189  * Architectures may override this function to    
190  */                                               
191 void __weak elfcorehdr_free(unsigned long long    
192 {}                                                
193                                                   
194 /*                                                
195  * Architectures may override this function to    
196  */                                               
197 ssize_t __weak elfcorehdr_read(char *buf, size    
198 {                                                 
199         struct kvec kvec = { .iov_base = buf,     
200         struct iov_iter iter;                     
201                                                   
202         iov_iter_kvec(&iter, ITER_DEST, &kvec,    
203                                                   
204         return read_from_oldmem(&iter, count,     
205 }                                                 
206                                                   
207 /*                                                
208  * Architectures may override this function to    
209  */                                               
210 ssize_t __weak elfcorehdr_read_notes(char *buf    
211 {                                                 
212         struct kvec kvec = { .iov_base = buf,     
213         struct iov_iter iter;                     
214                                                   
215         iov_iter_kvec(&iter, ITER_DEST, &kvec,    
216                                                   
217         return read_from_oldmem(&iter, count,     
218                         cc_platform_has(CC_ATT    
219 }                                                 
220                                                   
221 /*                                                
222  * Architectures may override this function to    
223  */                                               
224 int __weak remap_oldmem_pfn_range(struct vm_ar    
225                                   unsigned lon    
226                                   unsigned lon    
227 {                                                 
228         prot = pgprot_encrypted(prot);            
229         return remap_pfn_range(vma, from, pfn,    
230 }                                                 
231                                                   
232 /*                                                
233  * Architectures which support memory encrypti    
234  */                                               
235 ssize_t __weak copy_oldmem_page_encrypted(stru    
236                 unsigned long pfn, size_t csiz    
237 {                                                 
238         return copy_oldmem_page(iter, pfn, csi    
239 }                                                 
240                                                   
241 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP             
242 static int vmcoredd_copy_dumps(struct iov_iter    
243 {                                                 
244         struct vmcoredd_node *dump;               
245         u64 offset = 0;                           
246         int ret = 0;                              
247         size_t tsz;                               
248         char *buf;                                
249                                                   
250         mutex_lock(&vmcoredd_mutex);              
251         list_for_each_entry(dump, &vmcoredd_li    
252                 if (start < offset + dump->siz    
253                         tsz = min(offset + (u6    
254                         buf = dump->buf + star    
255                         if (copy_to_iter(buf,     
256                                 ret = -EFAULT;    
257                                 goto out_unloc    
258                         }                         
259                                                   
260                         size -= tsz;              
261                         start += tsz;             
262                                                   
263                         /* Leave now if buffer    
264                         if (!size)                
265                                 goto out_unloc    
266                 }                                 
267                 offset += dump->size;             
268         }                                         
269                                                   
270 out_unlock:                                       
271         mutex_unlock(&vmcoredd_mutex);            
272         return ret;                               
273 }                                                 
274                                                   
275 #ifdef CONFIG_MMU                                 
276 static int vmcoredd_mmap_dumps(struct vm_area_    
277                                u64 start, size    
278 {                                                 
279         struct vmcoredd_node *dump;               
280         u64 offset = 0;                           
281         int ret = 0;                              
282         size_t tsz;                               
283         char *buf;                                
284                                                   
285         mutex_lock(&vmcoredd_mutex);              
286         list_for_each_entry(dump, &vmcoredd_li    
287                 if (start < offset + dump->siz    
288                         tsz = min(offset + (u6    
289                         buf = dump->buf + star    
290                         if (remap_vmalloc_rang    
291                                                   
292                                 ret = -EFAULT;    
293                                 goto out_unloc    
294                         }                         
295                                                   
296                         size -= tsz;              
297                         start += tsz;             
298                         dst += tsz;               
299                                                   
300                         /* Leave now if buffer    
301                         if (!size)                
302                                 goto out_unloc    
303                 }                                 
304                 offset += dump->size;             
305         }                                         
306                                                   
307 out_unlock:                                       
308         mutex_unlock(&vmcoredd_mutex);            
309         return ret;                               
310 }                                                 
311 #endif /* CONFIG_MMU */                           
312 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */       
313                                                   
314 /* Read from the ELF header and then the crash    
315  * returned otherwise number of bytes read are    
316  */                                               
317 static ssize_t __read_vmcore(struct iov_iter *    
318 {                                                 
319         ssize_t acc = 0, tmp;                     
320         size_t tsz;                               
321         u64 start;                                
322         struct vmcore *m = NULL;                  
323                                                   
324         if (!iov_iter_count(iter) || *fpos >=     
325                 return 0;                         
326                                                   
327         iov_iter_truncate(iter, vmcore_size -     
328                                                   
329         /* Read ELF core header */                
330         if (*fpos < elfcorebuf_sz) {              
331                 tsz = min(elfcorebuf_sz - (siz    
332                 if (copy_to_iter(elfcorebuf +     
333                         return -EFAULT;           
334                 *fpos += tsz;                     
335                 acc += tsz;                       
336                                                   
337                 /* leave now if filled buffer     
338                 if (!iov_iter_count(iter))        
339                         return acc;               
340         }                                         
341                                                   
342         /* Read ELF note segment */               
343         if (*fpos < elfcorebuf_sz + elfnotes_s    
344                 void *kaddr;                      
345                                                   
346                 /* We add device dumps before     
347                  * other elf notes may not fil    
348                  * completely and we will end     
349                  * between the elf notes and t    
350                  * then try to decode this zer    
351                  * and we don't want that. Hen    
352                  * the other elf notes ensure     
353                  * avoided.                       
354                  */                               
355 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP             
356                 /* Read device dumps */           
357                 if (*fpos < elfcorebuf_sz + vm    
358                         tsz = min(elfcorebuf_s    
359                                   (size_t)*fpo    
360                         start = *fpos - elfcor    
361                         if (vmcoredd_copy_dump    
362                                 return -EFAULT    
363                                                   
364                         *fpos += tsz;             
365                         acc += tsz;               
366                                                   
367                         /* leave now if filled    
368                         if (!iov_iter_count(it    
369                                 return acc;       
370                 }                                 
371 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */       
372                                                   
373                 /* Read remaining elf notes */    
374                 tsz = min(elfcorebuf_sz + elfn    
375                           iov_iter_count(iter)    
376                 kaddr = elfnotes_buf + *fpos -    
377                 if (copy_to_iter(kaddr, tsz, i    
378                         return -EFAULT;           
379                                                   
380                 *fpos += tsz;                     
381                 acc += tsz;                       
382                                                   
383                 /* leave now if filled buffer     
384                 if (!iov_iter_count(iter))        
385                         return acc;               
386                                                   
387                 cond_resched();                   
388         }                                         
389                                                   
390         list_for_each_entry(m, &vmcore_list, l    
391                 if (*fpos < m->offset + m->siz    
392                         tsz = (size_t)min_t(un    
393                                             m-    
394                                             io    
395                         start = m->paddr + *fp    
396                         tmp = read_from_oldmem    
397                                         cc_pla    
398                         if (tmp < 0)              
399                                 return tmp;       
400                         *fpos += tsz;             
401                         acc += tsz;               
402                                                   
403                         /* leave now if filled    
404                         if (!iov_iter_count(it    
405                                 return acc;       
406                 }                                 
407         }                                         
408                                                   
409         return acc;                               
410 }                                                 
411                                                   
412 static ssize_t read_vmcore(struct kiocb *iocb,    
413 {                                                 
414         return __read_vmcore(iter, &iocb->ki_p    
415 }                                                 
416                                                   
417 /*                                                
418  * The vmcore fault handler uses the page cach    
419  * standard __read_vmcore() function.             
420  *                                                
421  * On s390 the fault handler is used for memor    
422  * directly with remap_pfn_range().               
423  */                                               
424 static vm_fault_t mmap_vmcore_fault(struct vm_    
425 {                                                 
426 #ifdef CONFIG_S390                                
427         struct address_space *mapping = vmf->v    
428         pgoff_t index = vmf->pgoff;               
429         struct iov_iter iter;                     
430         struct kvec kvec;                         
431         struct page *page;                        
432         loff_t offset;                            
433         int rc;                                   
434                                                   
435         page = find_or_create_page(mapping, in    
436         if (!page)                                
437                 return VM_FAULT_OOM;              
438         if (!PageUptodate(page)) {                
439                 offset = (loff_t) index << PAG    
440                 kvec.iov_base = page_address(p    
441                 kvec.iov_len = PAGE_SIZE;         
442                 iov_iter_kvec(&iter, ITER_DEST    
443                                                   
444                 rc = __read_vmcore(&iter, &off    
445                 if (rc < 0) {                     
446                         unlock_page(page);        
447                         put_page(page);           
448                         return vmf_error(rc);     
449                 }                                 
450                 SetPageUptodate(page);            
451         }                                         
452         unlock_page(page);                        
453         vmf->page = page;                         
454         return 0;                                 
455 #else                                             
456         return VM_FAULT_SIGBUS;                   
457 #endif                                            
458 }                                                 
459                                                   
460 /**                                               
461  * vmcore_alloc_buf - allocate buffer in vmall    
462  * @size: size of buffer                          
463  *                                                
464  * If CONFIG_MMU is defined, use vmalloc_user(    
465  * the buffer to user-space by means of remap_    
466  *                                                
467  * If CONFIG_MMU is not defined, use vzalloc()    
468  * disabled and there's no need to allow users    
469  */                                               
470 static inline char *vmcore_alloc_buf(size_t si    
471 {                                                 
472 #ifdef CONFIG_MMU                                 
473         return vmalloc_user(size);                
474 #else                                             
475         return vzalloc(size);                     
476 #endif                                            
477 }                                                 
478                                                   
479 /*                                                
480  * Disable mmap_vmcore() if CONFIG_MMU is not     
481  * essential for mmap_vmcore() in order to map    
482  * non-contiguous objects (ELF header, ELF not    
483  * regions in the 1st kernel pointed to by PT_    
484  * virtually contiguous user-space in ELF layo    
485  */                                               
486 #ifdef CONFIG_MMU                                 
487                                                   
488 static const struct vm_operations_struct vmcor    
489         .fault = mmap_vmcore_fault,               
490 };                                                
491                                                   
492 /*                                                
493  * remap_oldmem_pfn_checked - do remap_oldmem_    
494  * reported as not being ram with the zero pag    
495  *                                                
496  * @vma: vm_area_struct describing requested m    
497  * @from: start remapping from                    
498  * @pfn: page frame number to start remapping     
499  * @size: remapping size                          
500  * @prot: protection bits                         
501  *                                                
502  * Returns zero on success, -EAGAIN on failure    
503  */                                               
504 static int remap_oldmem_pfn_checked(struct vm_    
505                                     unsigned l    
506                                     unsigned l    
507 {                                                 
508         unsigned long map_size;                   
509         unsigned long pos_start, pos_end, pos;    
510         unsigned long zeropage_pfn = my_zero_p    
511         size_t len = 0;                           
512                                                   
513         pos_start = pfn;                          
514         pos_end = pfn + (size >> PAGE_SHIFT);     
515                                                   
516         for (pos = pos_start; pos < pos_end; +    
517                 if (!pfn_is_ram(pos)) {           
518                         /*                        
519                          * We hit a page which    
520                          * region between pos_    
521                          * the non-ram page at    
522                          */                       
523                         if (pos > pos_start) {    
524                                 /* Remap conti    
525                                 map_size = (po    
526                                 if (remap_oldm    
527                                                   
528                                                   
529                                         goto f    
530                                 len += map_siz    
531                         }                         
532                         /* Remap the zero page    
533                         if (remap_oldmem_pfn_r    
534                                                   
535                                                   
536                                 goto fail;        
537                         len += PAGE_SIZE;         
538                         pos_start = pos + 1;      
539                 }                                 
540         }                                         
541         if (pos > pos_start) {                    
542                 /* Remap the rest */              
543                 map_size = (pos - pos_start) <    
544                 if (remap_oldmem_pfn_range(vma    
545                                            map    
546                         goto fail;                
547         }                                         
548         return 0;                                 
549 fail:                                             
550         do_munmap(vma->vm_mm, from, len, NULL)    
551         return -EAGAIN;                           
552 }                                                 
553                                                   
554 static int vmcore_remap_oldmem_pfn(struct vm_a    
555                             unsigned long from    
556                             unsigned long size    
557 {                                                 
558         int ret, idx;                             
559                                                   
560         /*                                        
561          * Check if a callback was registered     
562          * pages without a reason.                
563          */                                       
564         idx = srcu_read_lock(&vmcore_cb_srcu);    
565         if (!list_empty(&vmcore_cb_list))         
566                 ret = remap_oldmem_pfn_checked    
567         else                                      
568                 ret = remap_oldmem_pfn_range(v    
569         srcu_read_unlock(&vmcore_cb_srcu, idx)    
570         return ret;                               
571 }                                                 
572                                                   
573 static int mmap_vmcore(struct file *file, stru    
574 {                                                 
575         size_t size = vma->vm_end - vma->vm_st    
576         u64 start, end, len, tsz;                 
577         struct vmcore *m;                         
578                                                   
579         start = (u64)vma->vm_pgoff << PAGE_SHI    
580         end = start + size;                       
581                                                   
582         if (size > vmcore_size || end > vmcore    
583                 return -EINVAL;                   
584                                                   
585         if (vma->vm_flags & (VM_WRITE | VM_EXE    
586                 return -EPERM;                    
587                                                   
588         vm_flags_mod(vma, VM_MIXEDMAP, VM_MAYW    
589         vma->vm_ops = &vmcore_mmap_ops;           
590                                                   
591         len = 0;                                  
592                                                   
593         if (start < elfcorebuf_sz) {              
594                 u64 pfn;                          
595                                                   
596                 tsz = min(elfcorebuf_sz - (siz    
597                 pfn = __pa(elfcorebuf + start)    
598                 if (remap_pfn_range(vma, vma->    
599                                     vma->vm_pa    
600                         return -EAGAIN;           
601                 size -= tsz;                      
602                 start += tsz;                     
603                 len += tsz;                       
604                                                   
605                 if (size == 0)                    
606                         return 0;                 
607         }                                         
608                                                   
609         if (start < elfcorebuf_sz + elfnotes_s    
610                 void *kaddr;                      
611                                                   
612                 /* We add device dumps before     
613                  * other elf notes may not fil    
614                  * completely and we will end     
615                  * between the elf notes and t    
616                  * then try to decode this zer    
617                  * and we don't want that. Hen    
618                  * the other elf notes ensure     
619                  * avoided. This also ensures     
620                  * other elf notes can be prop    
621                  * address.                       
622                  */                               
623 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP             
624                 /* Read device dumps */           
625                 if (start < elfcorebuf_sz + vm    
626                         u64 start_off;            
627                                                   
628                         tsz = min(elfcorebuf_s    
629                                   (size_t)star    
630                         start_off = start - el    
631                         if (vmcoredd_mmap_dump    
632                                                   
633                                 goto fail;        
634                                                   
635                         size -= tsz;              
636                         start += tsz;             
637                         len += tsz;               
638                                                   
639                         /* leave now if filled    
640                         if (!size)                
641                                 return 0;         
642                 }                                 
643 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */       
644                                                   
645                 /* Read remaining elf notes */    
646                 tsz = min(elfcorebuf_sz + elfn    
647                 kaddr = elfnotes_buf + start -    
648                 if (remap_vmalloc_range_partia    
649                                                   
650                         goto fail;                
651                                                   
652                 size -= tsz;                      
653                 start += tsz;                     
654                 len += tsz;                       
655                                                   
656                 if (size == 0)                    
657                         return 0;                 
658         }                                         
659                                                   
660         list_for_each_entry(m, &vmcore_list, l    
661                 if (start < m->offset + m->siz    
662                         u64 paddr = 0;            
663                                                   
664                         tsz = (size_t)min_t(un    
665                                             m-    
666                         paddr = m->paddr + sta    
667                         if (vmcore_remap_oldme    
668                                                   
669                                                   
670                                 goto fail;        
671                         size -= tsz;              
672                         start += tsz;             
673                         len += tsz;               
674                                                   
675                         if (size == 0)            
676                                 return 0;         
677                 }                                 
678         }                                         
679                                                   
680         return 0;                                 
681 fail:                                             
682         do_munmap(vma->vm_mm, vma->vm_start, l    
683         return -EAGAIN;                           
684 }                                                 
685 #else                                             
686 static int mmap_vmcore(struct file *file, stru    
687 {                                                 
688         return -ENOSYS;                           
689 }                                                 
690 #endif                                            
691                                                   
692 static const struct proc_ops vmcore_proc_ops =    
693         .proc_open      = open_vmcore,            
694         .proc_read_iter = read_vmcore,            
695         .proc_lseek     = default_llseek,         
696         .proc_mmap      = mmap_vmcore,            
697 };                                                
698                                                   
699 static struct vmcore* __init get_new_element(v    
700 {                                                 
701         return kzalloc(sizeof(struct vmcore),     
702 }                                                 
703                                                   
704 static u64 get_vmcore_size(size_t elfsz, size_    
705                            struct list_head *v    
706 {                                                 
707         u64 size;                                 
708         struct vmcore *m;                         
709                                                   
710         size = elfsz + elfnotesegsz;              
711         list_for_each_entry(m, vc_list, list)     
712                 size += m->size;                  
713         }                                         
714         return size;                              
715 }                                                 
716                                                   
717 /**                                               
718  * update_note_header_size_elf64 - update p_me    
719  *                                                
720  * @ehdr_ptr: ELF header                          
721  *                                                
722  * This function updates p_memsz member of eac    
723  * program header table pointed to by @ehdr_pt    
724  * note segment.                                  
725  */                                               
726 static int __init update_note_header_size_elf6    
727 {                                                 
728         int i, rc=0;                              
729         Elf64_Phdr *phdr_ptr;                     
730         Elf64_Nhdr *nhdr_ptr;                     
731                                                   
732         phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1    
733         for (i = 0; i < ehdr_ptr->e_phnum; i++    
734                 void *notes_section;              
735                 u64 offset, max_sz, sz, real_s    
736                 if (phdr_ptr->p_type != PT_NOT    
737                         continue;                 
738                 max_sz = phdr_ptr->p_memsz;       
739                 offset = phdr_ptr->p_offset;      
740                 notes_section = kmalloc(max_sz    
741                 if (!notes_section)               
742                         return -ENOMEM;           
743                 rc = elfcorehdr_read_notes(not    
744                 if (rc < 0) {                     
745                         kfree(notes_section);     
746                         return rc;                
747                 }                                 
748                 nhdr_ptr = notes_section;         
749                 while (nhdr_ptr->n_namesz != 0    
750                         sz = sizeof(Elf64_Nhdr    
751                                 (((u64)nhdr_pt    
752                                 (((u64)nhdr_pt    
753                         if ((real_sz + sz) > m    
754                                 pr_warn("Warni    
755                                         nhdr_p    
756                                 break;            
757                         }                         
758                         real_sz += sz;            
759                         nhdr_ptr = (Elf64_Nhdr    
760                 }                                 
761                 kfree(notes_section);             
762                 phdr_ptr->p_memsz = real_sz;      
763                 if (real_sz == 0) {               
764                         pr_warn("Warning: Zero    
765                 }                                 
766         }                                         
767                                                   
768         return 0;                                 
769 }                                                 
770                                                   
771 /**                                               
772  * get_note_number_and_size_elf64 - get the nu    
773  * headers and sum of real size of their ELF n    
774  * data.                                          
775  *                                                
776  * @ehdr_ptr: ELF header                          
777  * @nr_ptnote: buffer for the number of PT_NOT    
778  * @sz_ptnote: buffer for size of unique PT_NO    
779  *                                                
780  * This function is used to merge multiple PT_    
781  * into a unique single one. The resulting uni    
782  * @sz_ptnote in its phdr->p_mem.                 
783  *                                                
784  * It is assumed that program headers with PT_    
785  * @ehdr_ptr has already been updated by updat    
786  * and each of PT_NOTE program headers has act    
787  * size in its p_memsz member.                    
788  */                                               
789 static int __init get_note_number_and_size_elf    
790                                                   
791 {                                                 
792         int i;                                    
793         Elf64_Phdr *phdr_ptr;                     
794                                                   
795         *nr_ptnote = *sz_ptnote = 0;              
796                                                   
797         phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1    
798         for (i = 0; i < ehdr_ptr->e_phnum; i++    
799                 if (phdr_ptr->p_type != PT_NOT    
800                         continue;                 
801                 *nr_ptnote += 1;                  
802                 *sz_ptnote += phdr_ptr->p_mems    
803         }                                         
804                                                   
805         return 0;                                 
806 }                                                 
807                                                   
808 /**                                               
809  * copy_notes_elf64 - copy ELF note segments i    
810  *                                                
811  * @ehdr_ptr: ELF header                          
812  * @notes_buf: buffer into which ELF note segm    
813  *                                                
814  * This function is used to copy ELF note segm    
815  * into the buffer @notes_buf in the 2nd kerne    
816  * size of the buffer @notes_buf is equal to o    
817  * real ELF note segment headers and data.        
818  *                                                
819  * It is assumed that program headers with PT_    
820  * @ehdr_ptr has already been updated by updat    
821  * and each of PT_NOTE program headers has act    
822  * size in its p_memsz member.                    
823  */                                               
824 static int __init copy_notes_elf64(const Elf64    
825 {                                                 
826         int i, rc=0;                              
827         Elf64_Phdr *phdr_ptr;                     
828                                                   
829         phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1)    
830                                                   
831         for (i = 0; i < ehdr_ptr->e_phnum; i++    
832                 u64 offset;                       
833                 if (phdr_ptr->p_type != PT_NOT    
834                         continue;                 
835                 offset = phdr_ptr->p_offset;      
836                 rc = elfcorehdr_read_notes(not    
837                                            &of    
838                 if (rc < 0)                       
839                         return rc;                
840                 notes_buf += phdr_ptr->p_memsz    
841         }                                         
842                                                   
843         return 0;                                 
844 }                                                 
845                                                   
846 /* Merges all the PT_NOTE headers into one. */    
847 static int __init merge_note_headers_elf64(cha    
848                                            cha    
849 {                                                 
850         int i, nr_ptnote=0, rc=0;                 
851         char *tmp;                                
852         Elf64_Ehdr *ehdr_ptr;                     
853         Elf64_Phdr phdr;                          
854         u64 phdr_sz = 0, note_off;                
855                                                   
856         ehdr_ptr = (Elf64_Ehdr *)elfptr;          
857                                                   
858         rc = update_note_header_size_elf64(ehd    
859         if (rc < 0)                               
860                 return rc;                        
861                                                   
862         rc = get_note_number_and_size_elf64(eh    
863         if (rc < 0)                               
864                 return rc;                        
865                                                   
866         *notes_sz = roundup(phdr_sz, PAGE_SIZE    
867         *notes_buf = vmcore_alloc_buf(*notes_s    
868         if (!*notes_buf)                          
869                 return -ENOMEM;                   
870                                                   
871         rc = copy_notes_elf64(ehdr_ptr, *notes    
872         if (rc < 0)                               
873                 return rc;                        
874                                                   
875         /* Prepare merged PT_NOTE program head    
876         phdr.p_type    = PT_NOTE;                 
877         phdr.p_flags   = 0;                       
878         note_off = sizeof(Elf64_Ehdr) +           
879                         (ehdr_ptr->e_phnum - n    
880         phdr.p_offset  = roundup(note_off, PAG    
881         phdr.p_vaddr   = phdr.p_paddr = 0;        
882         phdr.p_filesz  = phdr.p_memsz = phdr_s    
883         phdr.p_align   = 4;                       
884                                                   
885         /* Add merged PT_NOTE program header*/    
886         tmp = elfptr + sizeof(Elf64_Ehdr);        
887         memcpy(tmp, &phdr, sizeof(phdr));         
888         tmp += sizeof(phdr);                      
889                                                   
890         /* Remove unwanted PT_NOTE program hea    
891         i = (nr_ptnote - 1) * sizeof(Elf64_Phd    
892         *elfsz = *elfsz - i;                      
893         memmove(tmp, tmp+i, ((*elfsz)-sizeof(E    
894         memset(elfptr + *elfsz, 0, i);            
895         *elfsz = roundup(*elfsz, PAGE_SIZE);      
896                                                   
897         /* Modify e_phnum to reflect merged he    
898         ehdr_ptr->e_phnum = ehdr_ptr->e_phnum     
899                                                   
900         /* Store the size of all notes.  We ne    
901          * header when the device dumps will b    
902          */                                       
903         elfnotes_orig_sz = phdr.p_memsz;          
904                                                   
905         return 0;                                 
906 }                                                 
907                                                   
908 /**                                               
909  * update_note_header_size_elf32 - update p_me    
910  *                                                
911  * @ehdr_ptr: ELF header                          
912  *                                                
913  * This function updates p_memsz member of eac    
914  * program header table pointed to by @ehdr_pt    
915  * note segment.                                  
916  */                                               
917 static int __init update_note_header_size_elf3    
918 {                                                 
919         int i, rc=0;                              
920         Elf32_Phdr *phdr_ptr;                     
921         Elf32_Nhdr *nhdr_ptr;                     
922                                                   
923         phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1    
924         for (i = 0; i < ehdr_ptr->e_phnum; i++    
925                 void *notes_section;              
926                 u64 offset, max_sz, sz, real_s    
927                 if (phdr_ptr->p_type != PT_NOT    
928                         continue;                 
929                 max_sz = phdr_ptr->p_memsz;       
930                 offset = phdr_ptr->p_offset;      
931                 notes_section = kmalloc(max_sz    
932                 if (!notes_section)               
933                         return -ENOMEM;           
934                 rc = elfcorehdr_read_notes(not    
935                 if (rc < 0) {                     
936                         kfree(notes_section);     
937                         return rc;                
938                 }                                 
939                 nhdr_ptr = notes_section;         
940                 while (nhdr_ptr->n_namesz != 0    
941                         sz = sizeof(Elf32_Nhdr    
942                                 (((u64)nhdr_pt    
943                                 (((u64)nhdr_pt    
944                         if ((real_sz + sz) > m    
945                                 pr_warn("Warni    
946                                         nhdr_p    
947                                 break;            
948                         }                         
949                         real_sz += sz;            
950                         nhdr_ptr = (Elf32_Nhdr    
951                 }                                 
952                 kfree(notes_section);             
953                 phdr_ptr->p_memsz = real_sz;      
954                 if (real_sz == 0) {               
955                         pr_warn("Warning: Zero    
956                 }                                 
957         }                                         
958                                                   
959         return 0;                                 
960 }                                                 
961                                                   
962 /**                                               
963  * get_note_number_and_size_elf32 - get the nu    
964  * headers and sum of real size of their ELF n    
965  * data.                                          
966  *                                                
967  * @ehdr_ptr: ELF header                          
968  * @nr_ptnote: buffer for the number of PT_NOT    
969  * @sz_ptnote: buffer for size of unique PT_NO    
970  *                                                
971  * This function is used to merge multiple PT_    
972  * into a unique single one. The resulting uni    
973  * @sz_ptnote in its phdr->p_mem.                 
974  *                                                
975  * It is assumed that program headers with PT_    
976  * @ehdr_ptr has already been updated by updat    
977  * and each of PT_NOTE program headers has act    
978  * size in its p_memsz member.                    
979  */                                               
980 static int __init get_note_number_and_size_elf    
981                                                   
982 {                                                 
983         int i;                                    
984         Elf32_Phdr *phdr_ptr;                     
985                                                   
986         *nr_ptnote = *sz_ptnote = 0;              
987                                                   
988         phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1    
989         for (i = 0; i < ehdr_ptr->e_phnum; i++    
990                 if (phdr_ptr->p_type != PT_NOT    
991                         continue;                 
992                 *nr_ptnote += 1;                  
993                 *sz_ptnote += phdr_ptr->p_mems    
994         }                                         
995                                                   
996         return 0;                                 
997 }                                                 
998                                                   
999 /**                                               
1000  * copy_notes_elf32 - copy ELF note segments     
1001  *                                               
1002  * @ehdr_ptr: ELF header                         
1003  * @notes_buf: buffer into which ELF note seg    
1004  *                                               
1005  * This function is used to copy ELF note seg    
1006  * into the buffer @notes_buf in the 2nd kern    
1007  * size of the buffer @notes_buf is equal to     
1008  * real ELF note segment headers and data.       
1009  *                                               
1010  * It is assumed that program headers with PT    
1011  * @ehdr_ptr has already been updated by upda    
1012  * and each of PT_NOTE program headers has ac    
1013  * size in its p_memsz member.                   
1014  */                                              
1015 static int __init copy_notes_elf32(const Elf3    
1016 {                                                
1017         int i, rc=0;                             
1018         Elf32_Phdr *phdr_ptr;                    
1019                                                  
1020         phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1    
1021                                                  
1022         for (i = 0; i < ehdr_ptr->e_phnum; i+    
1023                 u64 offset;                      
1024                 if (phdr_ptr->p_type != PT_NO    
1025                         continue;                
1026                 offset = phdr_ptr->p_offset;     
1027                 rc = elfcorehdr_read_notes(no    
1028                                            &o    
1029                 if (rc < 0)                      
1030                         return rc;               
1031                 notes_buf += phdr_ptr->p_mems    
1032         }                                        
1033                                                  
1034         return 0;                                
1035 }                                                
1036                                                  
1037 /* Merges all the PT_NOTE headers into one. *    
1038 static int __init merge_note_headers_elf32(ch    
1039                                            ch    
1040 {                                                
1041         int i, nr_ptnote=0, rc=0;                
1042         char *tmp;                               
1043         Elf32_Ehdr *ehdr_ptr;                    
1044         Elf32_Phdr phdr;                         
1045         u64 phdr_sz = 0, note_off;               
1046                                                  
1047         ehdr_ptr = (Elf32_Ehdr *)elfptr;         
1048                                                  
1049         rc = update_note_header_size_elf32(eh    
1050         if (rc < 0)                              
1051                 return rc;                       
1052                                                  
1053         rc = get_note_number_and_size_elf32(e    
1054         if (rc < 0)                              
1055                 return rc;                       
1056                                                  
1057         *notes_sz = roundup(phdr_sz, PAGE_SIZ    
1058         *notes_buf = vmcore_alloc_buf(*notes_    
1059         if (!*notes_buf)                         
1060                 return -ENOMEM;                  
1061                                                  
1062         rc = copy_notes_elf32(ehdr_ptr, *note    
1063         if (rc < 0)                              
1064                 return rc;                       
1065                                                  
1066         /* Prepare merged PT_NOTE program hea    
1067         phdr.p_type    = PT_NOTE;                
1068         phdr.p_flags   = 0;                      
1069         note_off = sizeof(Elf32_Ehdr) +          
1070                         (ehdr_ptr->e_phnum -     
1071         phdr.p_offset  = roundup(note_off, PA    
1072         phdr.p_vaddr   = phdr.p_paddr = 0;       
1073         phdr.p_filesz  = phdr.p_memsz = phdr_    
1074         phdr.p_align   = 4;                      
1075                                                  
1076         /* Add merged PT_NOTE program header*    
1077         tmp = elfptr + sizeof(Elf32_Ehdr);       
1078         memcpy(tmp, &phdr, sizeof(phdr));        
1079         tmp += sizeof(phdr);                     
1080                                                  
1081         /* Remove unwanted PT_NOTE program he    
1082         i = (nr_ptnote - 1) * sizeof(Elf32_Ph    
1083         *elfsz = *elfsz - i;                     
1084         memmove(tmp, tmp+i, ((*elfsz)-sizeof(    
1085         memset(elfptr + *elfsz, 0, i);           
1086         *elfsz = roundup(*elfsz, PAGE_SIZE);     
1087                                                  
1088         /* Modify e_phnum to reflect merged h    
1089         ehdr_ptr->e_phnum = ehdr_ptr->e_phnum    
1090                                                  
1091         /* Store the size of all notes.  We n    
1092          * header when the device dumps will     
1093          */                                      
1094         elfnotes_orig_sz = phdr.p_memsz;         
1095                                                  
1096         return 0;                                
1097 }                                                
1098                                                  
1099 /* Add memory chunks represented by program h    
1100  * the new offset fields of exported program     
1101 static int __init process_ptload_program_head    
1102                                                  
1103                                                  
1104                                                  
1105 {                                                
1106         int i;                                   
1107         Elf64_Ehdr *ehdr_ptr;                    
1108         Elf64_Phdr *phdr_ptr;                    
1109         loff_t vmcore_off;                       
1110         struct vmcore *new;                      
1111                                                  
1112         ehdr_ptr = (Elf64_Ehdr *)elfptr;         
1113         phdr_ptr = (Elf64_Phdr*)(elfptr + siz    
1114                                                  
1115         /* Skip ELF header, program headers a    
1116         vmcore_off = elfsz + elfnotes_sz;        
1117                                                  
1118         for (i = 0; i < ehdr_ptr->e_phnum; i+    
1119                 u64 paddr, start, end, size;     
1120                                                  
1121                 if (phdr_ptr->p_type != PT_LO    
1122                         continue;                
1123                                                  
1124                 paddr = phdr_ptr->p_offset;      
1125                 start = rounddown(paddr, PAGE    
1126                 end = roundup(paddr + phdr_pt    
1127                 size = end - start;              
1128                                                  
1129                 /* Add this contiguous chunk     
1130                 new = get_new_element();         
1131                 if (!new)                        
1132                         return -ENOMEM;          
1133                 new->paddr = start;              
1134                 new->size = size;                
1135                 list_add_tail(&new->list, vc_    
1136                                                  
1137                 /* Update the program header     
1138                 phdr_ptr->p_offset = vmcore_o    
1139                 vmcore_off = vmcore_off + siz    
1140         }                                        
1141         return 0;                                
1142 }                                                
1143                                                  
1144 static int __init process_ptload_program_head    
1145                                                  
1146                                                  
1147                                                  
1148 {                                                
1149         int i;                                   
1150         Elf32_Ehdr *ehdr_ptr;                    
1151         Elf32_Phdr *phdr_ptr;                    
1152         loff_t vmcore_off;                       
1153         struct vmcore *new;                      
1154                                                  
1155         ehdr_ptr = (Elf32_Ehdr *)elfptr;         
1156         phdr_ptr = (Elf32_Phdr*)(elfptr + siz    
1157                                                  
1158         /* Skip ELF header, program headers a    
1159         vmcore_off = elfsz + elfnotes_sz;        
1160                                                  
1161         for (i = 0; i < ehdr_ptr->e_phnum; i+    
1162                 u64 paddr, start, end, size;     
1163                                                  
1164                 if (phdr_ptr->p_type != PT_LO    
1165                         continue;                
1166                                                  
1167                 paddr = phdr_ptr->p_offset;      
1168                 start = rounddown(paddr, PAGE    
1169                 end = roundup(paddr + phdr_pt    
1170                 size = end - start;              
1171                                                  
1172                 /* Add this contiguous chunk     
1173                 new = get_new_element();         
1174                 if (!new)                        
1175                         return -ENOMEM;          
1176                 new->paddr = start;              
1177                 new->size = size;                
1178                 list_add_tail(&new->list, vc_    
1179                                                  
1180                 /* Update the program header     
1181                 phdr_ptr->p_offset = vmcore_o    
1182                 vmcore_off = vmcore_off + siz    
1183         }                                        
1184         return 0;                                
1185 }                                                
1186                                                  
1187 /* Sets offset fields of vmcore elements. */     
1188 static void set_vmcore_list_offsets(size_t el    
1189                                     struct li    
1190 {                                                
1191         loff_t vmcore_off;                       
1192         struct vmcore *m;                        
1193                                                  
1194         /* Skip ELF header, program headers a    
1195         vmcore_off = elfsz + elfnotes_sz;        
1196                                                  
1197         list_for_each_entry(m, vc_list, list)    
1198                 m->offset = vmcore_off;          
1199                 vmcore_off += m->size;           
1200         }                                        
1201 }                                                
1202                                                  
1203 static void free_elfcorebuf(void)                
1204 {                                                
1205         free_pages((unsigned long)elfcorebuf,    
1206         elfcorebuf = NULL;                       
1207         vfree(elfnotes_buf);                     
1208         elfnotes_buf = NULL;                     
1209 }                                                
1210                                                  
1211 static int __init parse_crash_elf64_headers(v    
1212 {                                                
1213         int rc=0;                                
1214         Elf64_Ehdr ehdr;                         
1215         u64 addr;                                
1216                                                  
1217         addr = elfcorehdr_addr;                  
1218                                                  
1219         /* Read ELF header */                    
1220         rc = elfcorehdr_read((char *)&ehdr, s    
1221         if (rc < 0)                              
1222                 return rc;                       
1223                                                  
1224         /* Do some basic Verification. */        
1225         if (memcmp(ehdr.e_ident, ELFMAG, SELF    
1226                 (ehdr.e_type != ET_CORE) ||      
1227                 !vmcore_elf64_check_arch(&ehd    
1228                 ehdr.e_ident[EI_CLASS] != ELF    
1229                 ehdr.e_ident[EI_VERSION] != E    
1230                 ehdr.e_version != EV_CURRENT     
1231                 ehdr.e_ehsize != sizeof(Elf64    
1232                 ehdr.e_phentsize != sizeof(El    
1233                 ehdr.e_phnum == 0) {             
1234                 pr_warn("Warning: Core image     
1235                 return -EINVAL;                  
1236         }                                        
1237                                                  
1238         /* Read in all elf headers. */           
1239         elfcorebuf_sz_orig = sizeof(Elf64_Ehd    
1240                                 ehdr.e_phnum     
1241         elfcorebuf_sz = elfcorebuf_sz_orig;      
1242         elfcorebuf = (void *)__get_free_pages    
1243                                                  
1244         if (!elfcorebuf)                         
1245                 return -ENOMEM;                  
1246         addr = elfcorehdr_addr;                  
1247         rc = elfcorehdr_read(elfcorebuf, elfc    
1248         if (rc < 0)                              
1249                 goto fail;                       
1250                                                  
1251         /* Merge all PT_NOTE headers into one    
1252         rc = merge_note_headers_elf64(elfcore    
1253                                       &elfnot    
1254         if (rc)                                  
1255                 goto fail;                       
1256         rc = process_ptload_program_headers_e    
1257                                                  
1258         if (rc)                                  
1259                 goto fail;                       
1260         set_vmcore_list_offsets(elfcorebuf_sz    
1261         return 0;                                
1262 fail:                                            
1263         free_elfcorebuf();                       
1264         return rc;                               
1265 }                                                
1266                                                  
1267 static int __init parse_crash_elf32_headers(v    
1268 {                                                
1269         int rc=0;                                
1270         Elf32_Ehdr ehdr;                         
1271         u64 addr;                                
1272                                                  
1273         addr = elfcorehdr_addr;                  
1274                                                  
1275         /* Read ELF header */                    
1276         rc = elfcorehdr_read((char *)&ehdr, s    
1277         if (rc < 0)                              
1278                 return rc;                       
1279                                                  
1280         /* Do some basic Verification. */        
1281         if (memcmp(ehdr.e_ident, ELFMAG, SELF    
1282                 (ehdr.e_type != ET_CORE) ||      
1283                 !vmcore_elf32_check_arch(&ehd    
1284                 ehdr.e_ident[EI_CLASS] != ELF    
1285                 ehdr.e_ident[EI_VERSION] != E    
1286                 ehdr.e_version != EV_CURRENT     
1287                 ehdr.e_ehsize != sizeof(Elf32    
1288                 ehdr.e_phentsize != sizeof(El    
1289                 ehdr.e_phnum == 0) {             
1290                 pr_warn("Warning: Core image     
1291                 return -EINVAL;                  
1292         }                                        
1293                                                  
1294         /* Read in all elf headers. */           
1295         elfcorebuf_sz_orig = sizeof(Elf32_Ehd    
1296         elfcorebuf_sz = elfcorebuf_sz_orig;      
1297         elfcorebuf = (void *)__get_free_pages    
1298                                                  
1299         if (!elfcorebuf)                         
1300                 return -ENOMEM;                  
1301         addr = elfcorehdr_addr;                  
1302         rc = elfcorehdr_read(elfcorebuf, elfc    
1303         if (rc < 0)                              
1304                 goto fail;                       
1305                                                  
1306         /* Merge all PT_NOTE headers into one    
1307         rc = merge_note_headers_elf32(elfcore    
1308                                       &elfnot    
1309         if (rc)                                  
1310                 goto fail;                       
1311         rc = process_ptload_program_headers_e    
1312                                                  
1313         if (rc)                                  
1314                 goto fail;                       
1315         set_vmcore_list_offsets(elfcorebuf_sz    
1316         return 0;                                
1317 fail:                                            
1318         free_elfcorebuf();                       
1319         return rc;                               
1320 }                                                
1321                                                  
1322 static int __init parse_crash_elf_headers(voi    
1323 {                                                
1324         unsigned char e_ident[EI_NIDENT];        
1325         u64 addr;                                
1326         int rc=0;                                
1327                                                  
1328         addr = elfcorehdr_addr;                  
1329         rc = elfcorehdr_read(e_ident, EI_NIDE    
1330         if (rc < 0)                              
1331                 return rc;                       
1332         if (memcmp(e_ident, ELFMAG, SELFMAG)     
1333                 pr_warn("Warning: Core image     
1334                 return -EINVAL;                  
1335         }                                        
1336                                                  
1337         if (e_ident[EI_CLASS] == ELFCLASS64)     
1338                 rc = parse_crash_elf64_header    
1339                 if (rc)                          
1340                         return rc;               
1341         } else if (e_ident[EI_CLASS] == ELFCL    
1342                 rc = parse_crash_elf32_header    
1343                 if (rc)                          
1344                         return rc;               
1345         } else {                                 
1346                 pr_warn("Warning: Core image     
1347                 return -EINVAL;                  
1348         }                                        
1349                                                  
1350         /* Determine vmcore size. */             
1351         vmcore_size = get_vmcore_size(elfcore    
1352                                       &vmcore    
1353                                                  
1354         return 0;                                
1355 }                                                
1356                                                  
1357 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP            
1358 /**                                              
1359  * vmcoredd_write_header - Write vmcore devic    
1360  * beginning of the dump's buffer.               
1361  * @buf: Output buffer where the note is writ    
1362  * @data: Dump info                              
1363  * @size: Size of the dump                       
1364  *                                               
1365  * Fills beginning of the dump's buffer with     
1366  */                                              
1367 static void vmcoredd_write_header(void *buf,     
1368                                   u32 size)      
1369 {                                                
1370         struct vmcoredd_header *vdd_hdr = (st    
1371                                                  
1372         vdd_hdr->n_namesz = sizeof(vdd_hdr->n    
1373         vdd_hdr->n_descsz = size + sizeof(vdd    
1374         vdd_hdr->n_type = NT_VMCOREDD;           
1375                                                  
1376         strscpy_pad(vdd_hdr->name, VMCOREDD_N    
1377         strscpy_pad(vdd_hdr->dump_name, data-    
1378 }                                                
1379                                                  
1380 /**                                              
1381  * vmcoredd_update_program_headers - Update a    
1382  * @elfptr: Pointer to elf header                
1383  * @elfnotesz: Size of elf notes aligned to p    
1384  * @vmcoreddsz: Size of device dumps to be ad    
1385  *                                               
1386  * Determine type of ELF header (Elf64 or Elf    
1387  * Also update the offsets of all the program    
1388  */                                              
1389 static void vmcoredd_update_program_headers(c    
1390                                             s    
1391 {                                                
1392         unsigned char *e_ident = (unsigned ch    
1393         u64 start, end, size;                    
1394         loff_t vmcore_off;                       
1395         u32 i;                                   
1396                                                  
1397         vmcore_off = elfcorebuf_sz + elfnotes    
1398                                                  
1399         if (e_ident[EI_CLASS] == ELFCLASS64)     
1400                 Elf64_Ehdr *ehdr = (Elf64_Ehd    
1401                 Elf64_Phdr *phdr = (Elf64_Phd    
1402                                                  
1403                 /* Update all program headers    
1404                 for (i = 0; i < ehdr->e_phnum    
1405                         if (phdr->p_type == P    
1406                                 /* Update not    
1407                                 phdr->p_memsz    
1408                                 phdr->p_files    
1409                                 continue;        
1410                         }                        
1411                                                  
1412                         start = rounddown(phd    
1413                         end = roundup(phdr->p    
1414                                       PAGE_SI    
1415                         size = end - start;      
1416                         phdr->p_offset = vmco    
1417                         vmcore_off += size;      
1418                 }                                
1419         } else {                                 
1420                 Elf32_Ehdr *ehdr = (Elf32_Ehd    
1421                 Elf32_Phdr *phdr = (Elf32_Phd    
1422                                                  
1423                 /* Update all program headers    
1424                 for (i = 0; i < ehdr->e_phnum    
1425                         if (phdr->p_type == P    
1426                                 /* Update not    
1427                                 phdr->p_memsz    
1428                                 phdr->p_files    
1429                                 continue;        
1430                         }                        
1431                                                  
1432                         start = rounddown(phd    
1433                         end = roundup(phdr->p    
1434                                       PAGE_SI    
1435                         size = end - start;      
1436                         phdr->p_offset = vmco    
1437                         vmcore_off += size;      
1438                 }                                
1439         }                                        
1440 }                                                
1441                                                  
1442 /**                                              
1443  * vmcoredd_update_size - Update the total si    
1444  * ELF header                                    
1445  * @dump_size: Size of the current device dum    
1446  *                                               
1447  * Update the total size of all the device du    
1448  * headers. Calculate the new offsets for the    
1449  * total vmcore size.                            
1450  */                                              
1451 static void vmcoredd_update_size(size_t dump_    
1452 {                                                
1453         vmcoredd_orig_sz += dump_size;           
1454         elfnotes_sz = roundup(elfnotes_orig_s    
1455         vmcoredd_update_program_headers(elfco    
1456                                         vmcor    
1457                                                  
1458         /* Update vmcore list offsets */         
1459         set_vmcore_list_offsets(elfcorebuf_sz    
1460                                                  
1461         vmcore_size = get_vmcore_size(elfcore    
1462                                       &vmcore    
1463         proc_vmcore->size = vmcore_size;         
1464 }                                                
1465                                                  
1466 /**                                              
1467  * vmcore_add_device_dump - Add a buffer cont    
1468  * @data: dump info.                             
1469  *                                               
1470  * Allocate a buffer and invoke the calling d    
1471  * Write ELF note at the beginning of the buf    
1472  * dump and add the dump to global list.         
1473  */                                              
1474 int vmcore_add_device_dump(struct vmcoredd_da    
1475 {                                                
1476         struct vmcoredd_node *dump;              
1477         void *buf = NULL;                        
1478         size_t data_size;                        
1479         int ret;                                 
1480                                                  
1481         if (vmcoredd_disabled) {                 
1482                 pr_err_once("Device dump is d    
1483                 return -EINVAL;                  
1484         }                                        
1485                                                  
1486         if (!data || !strlen(data->dump_name)    
1487             !data->vmcoredd_callback || !data    
1488                 return -EINVAL;                  
1489                                                  
1490         dump = vzalloc(sizeof(*dump));           
1491         if (!dump) {                             
1492                 ret = -ENOMEM;                   
1493                 goto out_err;                    
1494         }                                        
1495                                                  
1496         /* Keep size of the buffer page align    
1497         data_size = roundup(sizeof(struct vmc    
1498                             PAGE_SIZE);          
1499                                                  
1500         /* Allocate buffer for driver's to wr    
1501         buf = vmcore_alloc_buf(data_size);       
1502         if (!buf) {                              
1503                 ret = -ENOMEM;                   
1504                 goto out_err;                    
1505         }                                        
1506                                                  
1507         vmcoredd_write_header(buf, data, data    
1508                               sizeof(struct v    
1509                                                  
1510         /* Invoke the driver's dump collectio    
1511         ret = data->vmcoredd_callback(data, b    
1512                                       sizeof(    
1513         if (ret)                                 
1514                 goto out_err;                    
1515                                                  
1516         dump->buf = buf;                         
1517         dump->size = data_size;                  
1518                                                  
1519         /* Add the dump to driver sysfs list     
1520         mutex_lock(&vmcoredd_mutex);             
1521         list_add_tail(&dump->list, &vmcoredd_    
1522         mutex_unlock(&vmcoredd_mutex);           
1523                                                  
1524         vmcoredd_update_size(data_size);         
1525         return 0;                                
1526                                                  
1527 out_err:                                         
1528         vfree(buf);                              
1529         vfree(dump);                             
1530                                                  
1531         return ret;                              
1532 }                                                
1533 EXPORT_SYMBOL(vmcore_add_device_dump);           
1534 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */      
1535                                                  
1536 /* Free all dumps in vmcore device dump list     
1537 static void vmcore_free_device_dumps(void)       
1538 {                                                
1539 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP            
1540         mutex_lock(&vmcoredd_mutex);             
1541         while (!list_empty(&vmcoredd_list)) {    
1542                 struct vmcoredd_node *dump;      
1543                                                  
1544                 dump = list_first_entry(&vmco    
1545                                         list)    
1546                 list_del(&dump->list);           
1547                 vfree(dump->buf);                
1548                 vfree(dump);                     
1549         }                                        
1550         mutex_unlock(&vmcoredd_mutex);           
1551 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */      
1552 }                                                
1553                                                  
1554 /* Init function for vmcore module. */           
1555 static int __init vmcore_init(void)              
1556 {                                                
1557         int rc = 0;                              
1558                                                  
1559         /* Allow architectures to allocate EL    
1560         rc = elfcorehdr_alloc(&elfcorehdr_add    
1561         if (rc)                                  
1562                 return rc;                       
1563         /*                                       
1564          * If elfcorehdr= has been passed in     
1565          * then capture the dump.                
1566          */                                      
1567         if (!(is_vmcore_usable()))               
1568                 return rc;                       
1569         rc = parse_crash_elf_headers();          
1570         if (rc) {                                
1571                 elfcorehdr_free(elfcorehdr_ad    
1572                 pr_warn("Kdump: vmcore not in    
1573                 return rc;                       
1574         }                                        
1575         elfcorehdr_free(elfcorehdr_addr);        
1576         elfcorehdr_addr = ELFCORE_ADDR_ERR;      
1577                                                  
1578         proc_vmcore = proc_create("vmcore", S    
1579         if (proc_vmcore)                         
1580                 proc_vmcore->size = vmcore_si    
1581         return 0;                                
1582 }                                                
1583 fs_initcall(vmcore_init);                        
1584                                                  
1585 /* Cleanup function for vmcore module. */        
1586 void vmcore_cleanup(void)                        
1587 {                                                
1588         if (proc_vmcore) {                       
1589                 proc_remove(proc_vmcore);        
1590                 proc_vmcore = NULL;              
1591         }                                        
1592                                                  
1593         /* clear the vmcore list. */             
1594         while (!list_empty(&vmcore_list)) {      
1595                 struct vmcore *m;                
1596                                                  
1597                 m = list_first_entry(&vmcore_    
1598                 list_del(&m->list);              
1599                 kfree(m);                        
1600         }                                        
1601         free_elfcorebuf();                       
1602                                                  
1603         /* clear vmcore device dump list */      
1604         vmcore_free_device_dumps();              
1605 }                                                
1606                                                  

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php